from fastapi import FastAPI
from fastapi.responses import JSONResponse
from pydantic import BaseModel, Field
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
import threading, traceback, queue
from selenium.webdriver.chrome.service import Service


app = FastAPI()

# تنظیمات Pool
POOL_SIZE = 2
driver_pool = queue.Queue(maxsize=POOL_SIZE)
lock = threading.Lock()

class ScrapeRequest(BaseModel):
    category_path: str
    site: str = Field(default="zara", description="نام سایت مانند 'zara' یا 'lc'")

def create_driver():
    options = Options()
    options.add_argument('--headless')
    options.add_argument('--no-sandbox')
    options.add_argument('--disable-dev-shm-usage')
    options.add_argument('--disable-gpu')
    options.add_argument('--window-size=1280,720')
    options.add_argument('--user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.5735.106 Safari/537.36')
    options.page_load_strategy = 'eager'

    # ✅ مسیر صحیح chromedriver
    service = Service("/usr/bin/chromedriver")

    drv = webdriver.Chrome(service=service, options=options)

    # بلاک منابع غیرضروری
    drv.execute_cdp_cmd("Network.enable", {})
    drv.execute_cdp_cmd("Network.setBlockedURLs", {
        "urls": ["*.png","*.jpg","*.jpeg","*.gif","*.webp","*.css","*.woff","*.woff2","*.ttf"]
    })
    return drv

# پر کردن اولیه Pool
for _ in range(POOL_SIZE):
    driver_pool.put(create_driver())

def get_driver():
    global driver_pool
    with lock:
        try:
            drv = driver_pool.get_nowait()
        except queue.Empty:
            drv = create_driver()
    # چک سلامت
    try:
        drv.title
        return drv
    except:
        # اگر منقضی بود، Driver جدید بساز
        return create_driver()

def release_driver(drv):
    global driver_pool
    with lock:
        try:
            driver_pool.put_nowait(drv)
        except queue.Full:
            drv.quit()  # اگر Pool پر است، Driver بسته شود

@app.post("/scrape")
def scrape_site(request_data: ScrapeRequest):
    html = ""
    drv = None
    try:
        base_urls = {
            "zara": "https://www.zara.com/",
            "lc": "https://www.lcw.com/"
        }

        base_url = base_urls.get(request_data.site.lower())
        if not base_url:
            return JSONResponse(status_code=400, content={
                "success": False,
                "error": f"نام سایت '{request_data.site}' پشتیبانی نمی‌شود"
            })

        url = base_url + request_data.category_path
        drv = get_driver()
        drv.get(url)
        drv.implicitly_wait(0.3)
        html = drv.page_source

        return JSONResponse(content={
            "success": True,
            "url": url,
            "html": html,
            "length": len(html)
        })
    except Exception as e:
        return JSONResponse(status_code=500, content={
            "success": False,
            "html": html,
            "error": str(e),
            "traceback": traceback.format_exc()
        })
    finally:
        if drv:
            release_driver(drv)
