"""基于Playwright的商品搜索爬虫模块"""

import asyncio
import time
import logging
from typing import List, Optional, Dict, Any
from urllib.parse import quote

from playwright.async_api import async_playwright, Browser, BrowserContext, Page
from bs4 import BeautifulSoup

from .models import ProductInfo, SearchResult
from .utils import (
    clean_text, extract_price, extract_sales, is_guangzhou_location,
    validate_url, filter_guangzhou_products, sort_by_sales as sort_products_by_sales
)
from config.settings import Settings


class PlaywrightSpider:
    """基于Playwright的商品搜索爬虫"""
    
    def __init__(self, platform: str = 'taobao', headless: bool = True, verbose: bool = False):
        """初始化爬虫
        
        Args:
            platform: 平台名称 ('1688', 'taobao', 'jd')
            headless: 是否无头模式运行
            verbose: 是否详细输出
        """
        self.settings = Settings()
        self.platform = platform
        self.platform_config = self.settings.PLATFORMS.get(platform, self.settings.PLATFORMS['taobao'])
        self.headless = headless
        self.verbose = verbose
        self.logger = self._setup_logger()
        
        # Playwright相关
        self.playwright = None
        self.browser: Optional[Browser] = None
        self.context: Optional[BrowserContext] = None
        self.page: Optional[Page] = None
        
    def _setup_logger(self) -> logging.Logger:
        """设置日志记录器"""
        logger = logging.getLogger(__name__)
        logger.setLevel(getattr(logging, self.settings.LOG_LEVEL))
        
        if not logger.handlers:
            handler = logging.StreamHandler()
            formatter = logging.Formatter(
                '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
            )
            handler.setFormatter(formatter)
            logger.addHandler(handler)
        
        return logger
    
    async def _init_browser(self) -> None:
        """初始化浏览器"""
        if self.playwright is None:
            self.logger.info("初始化Playwright浏览器...")
            self.playwright = await async_playwright().start()
            
            # 启动浏览器
            self.browser = await self.playwright.chromium.launch(
                headless=self.headless,
                args=[
                    '--no-sandbox',
                    '--disable-blink-features=AutomationControlled',
                    '--disable-web-security',
                    '--disable-features=VizDisplayCompositor',
                    '--user-agent=Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36'
                ]
            )
            
            # 创建上下文
            self.context = await self.browser.new_context(
                viewport={'width': 1920, 'height': 1080},
                user_agent='Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36'
            )
            
            # 创建页面
            self.page = await self.context.new_page()
            
            # 设置额外的反检测措施
            await self.page.add_init_script("""
                Object.defineProperty(navigator, 'webdriver', {
                    get: () => undefined,
                });
                
                Object.defineProperty(navigator, 'plugins', {
                    get: () => [1, 2, 3, 4, 5],
                });
                
                Object.defineProperty(navigator, 'languages', {
                    get: () => ['zh-CN', 'zh', 'en'],
                });
                
                window.chrome = {
                    runtime: {},
                };
            """)
    
    async def _close_browser(self) -> None:
        """关闭浏览器"""
        if self.page:
            await self.page.close()
            self.page = None
            
        if self.context:
            await self.context.close()
            self.context = None
            
        if self.browser:
            await self.browser.close()
            self.browser = None
            
        if self.playwright:
            await self.playwright.stop()
            self.playwright = None
    
    async def _navigate_to_search(self, keyword: str, page: int = 1) -> bool:
        """导航到搜索页面"""
        try:
            # 先访问首页建立会话
            home_urls = {
                '1688': 'https://www.1688.com',
                'taobao': 'https://www.taobao.com',
                'jd': 'https://www.jd.com'
            }
            
            home_url = home_urls.get(self.platform, 'https://www.taobao.com')
            self.logger.info(f"先访问{self.platform}首页建立会话: {home_url}")
            
            await self.page.goto(home_url, wait_until='domcontentloaded', timeout=30000)
            await asyncio.sleep(2)
            
            # 构建搜索URL
            search_url = self._get_search_url(keyword, page)
            
            self.logger.info(f"访问搜索页面: {search_url}")
            await self.page.goto(search_url, wait_until='domcontentloaded', timeout=30000)
            
            # 检查是否需要验证码或登录
            title = await self.page.title()
            current_url = self.page.url
            
            if "验证" in title or "captcha" in current_url.lower():
                self.logger.warning("检测到验证码页面，需要人工处理")
                return False
            
            if "login" in current_url.lower() or "登录" in title:
                self.logger.warning("检测到登录页面，尝试跳过")
                # 尝试点击跳过登录按钮
                try:
                    skip_selectors = ['.close', '.skip', '[class*="close"]', '[class*="skip"]', '.login-skip']
                    for selector in skip_selectors:
                        try:
                            await self.page.click(selector, timeout=2000)
                            await asyncio.sleep(1)
                            break
                        except:
                            continue
                except Exception as e:
                    self.logger.debug(f"尝试跳过登录失败: {e}")
            
            return True
            
        except Exception as e:
            self.logger.error(f"导航到搜索页面时出错: {e}")
            return False
    
    def _get_search_url(self, keyword: str, page: int, sort_by_sales: bool = True) -> str:
        """构建搜索URL"""
        encoded_keyword = quote(keyword)
        
        if self.platform == 'taobao':
            # 淘宝搜索URL，s参数表示起始位置（每页44个商品）
            start = (page - 1) * 44
            url = f"https://s.taobao.com/search?q={encoded_keyword}&s={start}"
            if sort_by_sales:
                url += '&sort=sale-desc'
            return url
        elif self.platform == 'jd':
            # 京东搜索URL
            url = f"https://search.jd.com/Search?keyword={encoded_keyword}&page={page}"
            if sort_by_sales:
                url += '&psort=3'  # 按销量排序
            return url
        else:
            # 1688搜索URL
            return f"https://s.1688.com/selloffer/offer_search.htm?keywords={encoded_keyword}&beginPage={page}&sortType=va_rmdesc&tab=offer"
    
    async def _wait_for_products_load(self) -> bool:
        """等待商品列表加载完成"""
        selectors = self.platform_config['selectors']['product_list']
        
        self.logger.info(f"等待商品列表加载，尝试选择器: {selectors}")
        
        for selector in selectors:
            try:
                self.logger.debug(f"尝试选择器: {selector}")
                await self.page.wait_for_selector(selector, timeout=10000)
                
                # 检查是否有商品
                products = await self.page.query_selector_all(selector)
                if products:
                    self.logger.info(f"成功找到 {len(products)} 个商品元素，使用选择器: {selector}")
                    return True
                    
            except Exception as e:
                self.logger.debug(f"选择器 {selector} 失败: {e}")
                continue
        
        self.logger.warning("所有选择器都失败，商品列表可能未加载")
        return False
    
    async def _extract_products(self) -> List[ProductInfo]:
        """提取商品信息"""
        products = []
        selectors = self.platform_config['selectors']['product_list']
        
        for selector in selectors:
            try:
                product_elements = await self.page.query_selector_all(selector)
                if product_elements:
                    self.logger.info(f"使用选择器 {selector} 找到 {len(product_elements)} 个商品")
                    
                    for element in product_elements:
                        try:
                            product = await self._extract_single_product(element)
                            if product and product.title:  # 确保有标题
                                products.append(product)
                        except Exception as e:
                            self.logger.debug(f"提取单个商品信息失败: {e}")
                            continue
                    
                    if products:
                        break  # 如果成功提取到商品，就不再尝试其他选择器
                        
            except Exception as e:
                self.logger.debug(f"使用选择器 {selector} 提取商品失败: {e}")
                continue
        
        self.logger.info(f"总共提取到 {len(products)} 个有效商品")
        return products
    
    async def _extract_single_product(self, element) -> Optional[ProductInfo]:
        """提取单个商品信息"""
        try:
            # 获取元素的HTML内容
            html_content = await element.inner_html()
            soup = BeautifulSoup(html_content, 'html.parser')
            
            # 提取标题
            title = await self._extract_text_from_element(element, self.platform_config['selectors']['title'])
            if not title:
                return None
            
            # 提取链接
            link = await self._extract_link_from_element(element, self.platform_config['selectors']['link'])
            
            # 提取价格
            price = await self._extract_text_from_element(element, self.platform_config['selectors']['price'])
            
            # 提取销量
            sales = await self._extract_text_from_element(element, self.platform_config['selectors']['sales'])
            
            # 提取发货地
            location = await self._extract_text_from_element(element, self.platform_config['selectors']['location'])
            
            # 提取供应商
            supplier = await self._extract_text_from_element(element, self.platform_config['selectors']['supplier'])
            
            # 提取图片
            image = await self._extract_image_from_element(element, self.platform_config['selectors']['image'])
            
            # 提取最小起订量
            min_order = await self._extract_text_from_element(element, self.platform_config['selectors']['min_order'])
            
            return ProductInfo(
                title=clean_text(title),
                link=self._normalize_url(link),
                price=extract_price(price) if price else 0.0,
                sales=extract_sales(sales) if sales else 0,
                location=clean_text(location) if location else "",
                supplier=clean_text(supplier) if supplier else "",
                image=image if image else "",
                min_order=clean_text(min_order) if min_order else ""
            )
            
        except Exception as e:
            self.logger.debug(f"提取单个商品信息时出错: {e}")
            return None
    
    async def _extract_text_from_element(self, element, selectors: List[str]) -> Optional[str]:
        """从元素中提取文本"""
        for selector in selectors:
            try:
                sub_element = await element.query_selector(selector)
                if sub_element:
                    text = await sub_element.inner_text()
                    if text and text.strip():
                        return text.strip()
            except:
                continue
        return None
    
    async def _extract_link_from_element(self, element, selectors: List[str]) -> Optional[str]:
        """从元素中提取链接"""
        for selector in selectors:
            try:
                sub_element = await element.query_selector(selector)
                if sub_element:
                    href = await sub_element.get_attribute('href')
                    if href:
                        return href
            except:
                continue
        return None
    
    async def _extract_image_from_element(self, element, selectors: List[str]) -> Optional[str]:
        """从元素中提取图片URL"""
        for selector in selectors:
            try:
                sub_element = await element.query_selector(selector)
                if sub_element:
                    # 尝试获取src或data-src属性
                    for attr in ['src', 'data-src', 'data-original']:
                        img_url = await sub_element.get_attribute(attr)
                        if img_url:
                            return img_url
            except:
                continue
        return None
    
    def _normalize_url(self, url: Optional[str]) -> str:
        """标准化URL"""
        if not url:
            return ""
        
        # 处理相对路径
        if url.startswith('//'):
            return f"https:{url}"
        elif url.startswith('/'):
            base_urls = {
                '1688': 'https://www.1688.com',
                'taobao': 'https://www.taobao.com',
                'jd': 'https://www.jd.com'
            }
            base_url = base_urls.get(self.platform, 'https://www.taobao.com')
            return f"{base_url}{url}"
        
        return url
    
    async def search_products(self, keyword: str, max_pages: int = 1, 
                            filter_guangzhou: bool = False, sort_by_sales: bool = True) -> SearchResult:
        """搜索商品
        
        Args:
            keyword: 搜索关键词
            max_pages: 最大搜索页数
            filter_guangzhou: 是否只筛选广州地区商品
            
        Returns:
            SearchResult: 搜索结果
        """
        all_products = []
        
        try:
            await self._init_browser()
            
            for page in range(1, max_pages + 1):
                self.logger.info(f"开始搜索第 {page} 页...")
                
                # 导航到搜索页面
                if not await self._navigate_to_search(keyword, page):
                    self.logger.error(f"第 {page} 页导航失败")
                    continue
                
                # 等待商品加载
                if not await self._wait_for_products_load():
                    self.logger.error(f"第 {page} 页商品加载失败")
                    continue
                
                # 提取商品信息
                products = await self._extract_products()
                
                if products:
                    all_products.extend(products)
                    self.logger.info(f"第 {page} 页成功获取 {len(products)} 个商品")
                else:
                    self.logger.warning(f"第 {page} 页未获取到商品")
                
                # 页面间隔
                if page < max_pages:
                    await asyncio.sleep(2)
            
            # 过滤和排序
            if filter_guangzhou:
                all_products = filter_guangzhou_products(all_products)
                self.logger.info(f"广州地区筛选后剩余 {len(all_products)} 个商品")
            
            # 按销量排序
            if sort_by_sales:
                all_products = sort_products_by_sales(all_products, reverse=True)
            
            return SearchResult(
                keyword=keyword,
                total_count=len(all_products),
                products=all_products,
                platform=self.platform
            )
            
        except Exception as e:
            self.logger.error(f"搜索过程中出错: {e}")
            return SearchResult(
                keyword=keyword,
                total_count=0,
                products=[],
                platform=self.platform
            )
        finally:
            await self._close_browser()
    
    def search_products_sync(self, keyword: str, max_pages: int = 1, 
                           filter_guangzhou: bool = False, sort_by_sales: bool = True) -> SearchResult:
        """同步版本的搜索商品方法"""
        return asyncio.run(self.search_products(keyword, max_pages, filter_guangzhou, sort_by_sales))
    
    def __enter__(self):
        """上下文管理器入口"""
        return self
    
    def __exit__(self, exc_type, exc_val, exc_tb):
        """上下文管理器出口"""
        # 由于是异步操作，这里不需要特殊处理
        # 浏览器会在search_products方法中自动关闭
        pass


# 为了保持向后兼容，创建一个别名
ProductSpider = PlaywrightSpider