from typing import Tuple, Optional, Dict, Any, List
import asyncio
import logging
import re
from urllib.parse import urljoin
from playwright.async_api import async_playwright

logger = logging.getLogger(__name__)

class WebCrawler:
    def __init__(self, headless: bool = True, timeout: int = 30000) -> None:
        """
        初始化网页爬取器
        
        Args:
            headless: 是否无头模式
            timeout: 页面加载超时时间（毫秒）
        """
        self.headless = headless
        self.timeout = timeout
        self.browser = None
        self.playwright = None

    async def init_browser(self) -> None:
        self.playwright = await async_playwright().start()
        self.browser = await self.playwright.chromium.launch(
            headless=self.headless,
            args=['--no-sandbox', '--disable-setuid-sandbox']
        )

    async def close(self) -> None:
        """关闭浏览器和Playwright"""
        if self.browser:
            await self.browser.close()
        if self.playwright:
            await self.playwright.stop()

    async def fetch_page(
        self, 
        url: str, 
        wait_until: str = 'networkidle',
        extract_metadata: bool = True
    ) -> Tuple[Optional[str], Optional[str], Optional[Dict[str, Any]]]:
        """
        抓取网页内容
        
        Args:
            url: 目标URL
            wait_until: 等待条件 ('load', 'domcontentloaded', 'networkidle')
            extract_metadata: 是否提取元数据
            
        Returns:
            (页面标题, HTML内容, 元数据)
        """
        if not self.browser:
            await self.init_browser()
        
        context = await self.browser.new_context()
        page = await context.new_page()
        
        try:
            # 设置超时和导航
            page.set_default_timeout(self.timeout)
            await page.goto(url, wait_until=wait_until)
            
            # 获取页面标题和内容
            title = await page.title()
            content = await page.content()
            
            # 提取元数据
            metadata = None
            if extract_metadata:
                metadata = await self._extract_metadata(page)
            
            await context.close()
            return title, content, metadata
            
        except Exception as e:
            logger.error(f"抓取页面失败 {url}: {e}")
            await context.close()
            return None, None, None
    
    async def _extract_metadata(self, page) -> Dict[str, Any]:
        """提取页面元数据"""
        metadata = {}
        
        try:
            # 提取描述
            description = await page.query_selector('meta[name="description"]')
            if description:
                metadata['description'] = await description.get_attribute('content')
            
            # 提取关键词
            keywords = await page.query_selector('meta[name="keywords"]')
            if keywords:
                metadata['keywords'] = await keywords.get_attribute('content')
            
            # 提取作者
            author = await page.query_selector('meta[name="author"]')
            if author:
                metadata['author'] = await author.get_attribute('content')
            
            # 提取发布时间
            publish_time = await page.query_selector('meta[property="article:published_time"]')
            if publish_time:
                metadata['publish_time'] = await publish_time.get_attribute('content')
            
        except Exception as e:
            logger.warning(f"提取元数据失败: {e}")
        
        return metadata

    async def _process_page_elements(self, page, extraction_rules: Dict[str, Any]) -> None:
        """处理页面元素（移除不需要的元素，检查必需元素）"""
        # 移除不需要的元素
        remove_elements = extraction_rules.get('remove_elements', [])
        for selector in remove_elements:
            try:
                elements = await page.query_selector_all(selector)
                for element in elements:
                    await element.evaluate('node => node.remove()')
            except Exception as e:
                logger.warning(f"移除元素失败 {selector}: {e}")
        
        # 检查必需元素是否存在
        required_elements = extraction_rules.get('required_elements', [])
        for selector in required_elements:
            element = await page.query_selector(selector)
            if not element:
                logger.warning(f"未找到必需元素: {selector}")

    def _has_pagination_config(self, rule: Dict[str, Any]) -> bool:
        """检查规则是否包含分页配置"""
        selectors = rule.get('selectors', {})
        table_data_config = selectors.get('table_data', {})
        return isinstance(table_data_config, dict) and 'pagination' in table_data_config

    async def _recreate_page_for_data_extraction(self, context, url: str, wait_until: str, timeout: int, additional_wait: int):
        """为数据提取重新创建页面"""
        try:
            page = await context.new_page()
            page.set_default_timeout(timeout)
            await page.goto(url, wait_until=wait_until, timeout=timeout)
            if additional_wait > 0:
                await asyncio.sleep(additional_wait / 1000)
            return page
        except Exception as e:
            logger.error(f"重新创建页面失败: {e}")
            return None

    async def fetch_page_with_rule(
        self, 
        url: str, 
        rule: Dict[str, Any]
    ) -> Tuple[Optional[str], Optional[str], Optional[Dict[str, Any]], Optional[Dict[str, Any]]]:
        """
        使用规则抓取网页内容
        
        Args:
            url: 目标URL
            rule: 抓取规则
            
        Returns:
            (页面标题, HTML内容, 元数据, 提取的结构化数据)
        """
        if not self.browser:
            await self.init_browser()
        
        context = await self.browser.new_context()
        page = await context.new_page()
        
        try:
            # 从规则获取等待条件
            wait_conditions = rule.get('wait_conditions', {})
            wait_until = wait_conditions.get('type', 'networkidle')
            timeout = wait_conditions.get('timeout', self.timeout)
            additional_wait = wait_conditions.get('additional_wait', 0)
            
            logger.info(f"开始导航到页面: {url}")
            logger.info(f"等待条件: {wait_until}, 超时: {timeout}ms")
            
            # 设置超时和导航
            page.set_default_timeout(timeout)
            await page.goto(url, wait_until=wait_until)
            logger.info("页面导航成功")
            
            # 额外等待时间
            if additional_wait > 0:
                logger.info(f"额外等待 {additional_wait}ms")
                await asyncio.sleep(additional_wait / 1000)
            
            # 根据规则处理页面元素
            extraction_rules = rule.get('extraction_rules', {})
            await self._process_page_elements(page, extraction_rules)
            logger.info("页面元素处理完成")
            
            # 获取页面标题和内容（支持分页）
            title = None
            content = None
            metadata = None
            structured_data = None
            
            try:
                title = await self._extract_title_with_rule(page, rule)
                content = await self._get_content_with_pagination(page, rule)
                
                # 如果有分页操作，重新创建页面进行数据提取
                if self._has_pagination_config(rule):
                    # 不需要重新创建页面，直接使用当前页面
                    logger.info("检测到分页配置，将在原页面基础上进行数据提取")
                
                metadata = await self._extract_metadata(page) if not page.is_closed() else None
                structured_data = await self._extract_structured_data(page, rule, url) if not page.is_closed() else None
                    
            except Exception as e:
                logger.error(f"数据提取过程中发生错误: {e}")
            
            await context.close()
            return title, content, metadata, structured_data
            
        except Exception as e:
            logger.error(f"使用规则抓取页面失败 {url}: {e}")
            try:
                await context.close()
            except:
                pass
            return None, None, None, None

    async def _extract_title_with_rule(self, page, rule: Dict[str, Any]) -> Optional[str]:
        """根据规则提取页面标题"""
        selectors = rule.get('selectors', {})
        title_selectors = selectors.get('title', 'title')
        
        # 支持多个选择器，用逗号分隔
        if isinstance(title_selectors, str):
            title_selectors = [s.strip() for s in title_selectors.split(',')]
        
        for selector in title_selectors:
            try:
                element = await page.query_selector(selector)
                if element:
                    if selector == 'title':
                        return await page.title()
                    else:
                        return await element.inner_text()
            except Exception as e:
                logger.warning(f"提取标题失败 {selector}: {e}")
        
        # 如果所有选择器都失败，回退到页面标题
        return await page.title()

    async def _extract_structured_data(self, page, rule: Dict[str, Any], base_url: str = None) -> Dict[str, Any]:
        """根据规则提取结构化数据"""
        structured_data = {}
        selectors = rule.get('selectors', {})
        extraction_rules = rule.get('extraction_rules', {})
        
        for field_name, selector_config in selectors.items():
            if field_name == 'title':  # 标题已经单独处理
                continue
                
            try:
                if isinstance(selector_config, str):
                    # 简单的选择器字符串
                    print("简单的选择器字符串")
                    data = await self._extract_field_data(page, selector_config, field_name, extraction_rules, None, base_url)
                elif isinstance(selector_config, dict):
                    # 复杂的选择器配置
                    print("使用复杂的选择器配置")
                    selector = selector_config.get('selector', '')
                    data = await self._extract_field_data(page, selector, field_name, extraction_rules, selector_config, base_url)
                else:
                    continue
                
                if data:
                    structured_data[field_name] = data
                    
            except Exception as e:
                logger.warning(f"提取字段 {field_name} 失败: {e}")
        
        return structured_data

    async def _extract_field_data(
        self, 
        page, 
        selector: str, 
        field_name: str, 
        extraction_rules: Dict[str, Any],
        selector_config: Dict[str, Any] = None,
        base_url: str = None
    ) -> Any:
        """提取特定字段的数据"""
        if not selector:
            return None
        
        # 支持多个选择器
        selectors = [s.strip() for s in selector.split(',')]
        
        for sel in selectors:
            try:
                # 根据字段类型决定是获取单个元素还是多个元素
                if field_name.endswith('s') or field_name in ['links', 'images', 'table_data']:
                    # 特殊处理table_data的分页情况
                    if field_name == 'table_data' and selector_config and 'pagination' in selector_config:
                        logger.info("开始处理表格分页数据获取")
                        return await self._extract_table_data_with_pagination(
                            page, sel, selector_config, extraction_rules, base_url
                        )
                    else:
                        # 获取多个元素
                        elements = await page.query_selector_all(sel)
                        if elements:
                            return await self._process_multiple_elements(
                                elements, field_name, extraction_rules, selector_config, base_url
                            )
                else:
                    # 获取单个元素
                    element = await page.query_selector(sel)
                    if element:
                        return await self._process_single_element(
                            element, field_name, extraction_rules, selector_config
                        )
            except Exception as e:
                logger.warning(f"处理选择器失败 {sel}: {e}")
        
        return None

    async def _process_single_element(
        self, 
        element, 
        field_name: str, 
        extraction_rules: Dict[str, Any],
        selector_config: Dict[str, Any] = None
    ) -> str:
        """处理单个元素"""
        try:
            if field_name in ['publish_date', 'date'] or 'date' in field_name.lower():
                # 尝试获取datetime属性或文本
                datetime_attr = await element.get_attribute('datetime')
                if datetime_attr:
                    return datetime_attr
            
            # 默认获取文本内容
            text = await element.inner_text()
            
            # 文本清理
            if extraction_rules.get('text_cleanup', False):
                text = text.strip().replace('\n', ' ').replace('\t', ' ')
                # 移除多余的空格
                text = re.sub(r'\s+', ' ', text)
            
            return text
            
        except Exception as e:
            logger.warning(f"处理单个元素失败: {e}")
            return ""

    def _normalize_url(self, href: str, base_url: str) -> str:
        """标准化URL，处理相对路径和缺少域名的情况"""
        if not href:
            return href
            
        # 如果已经是完整的URL，直接返回
        if href.startswith(('http://', 'https://')):
            return href
        
        # 使用urljoin处理相对路径
        normalized_url = urljoin(base_url, href)
        return normalized_url

    async def _process_multiple_elements(
        self, 
        elements, 
        field_name: str, 
        extraction_rules: Dict[str, Any],
        selector_config: Dict[str, Any] = None,
        base_url: str = None
    ) -> List[Any]:
        """处理多个元素"""
        results = []
        extract_attributes = extraction_rules.get('extract_attributes', {})
        
        for element in elements:
            try:
                if field_name == 'images':
                    # 图片元素，提取src和alt
                    src = await element.get_attribute('src')
                    alt = await element.get_attribute('alt')
                    if src:
                        results.append({'src': src, 'alt': alt or ''})
                        
                elif field_name == 'links':
                    # 链接元素，提取href和text
                    href = await element.get_attribute('href')
                    text = await element.inner_text()
                    title = await element.get_attribute('title')
                    if href:
                        # 标准化URL，处理相对路径和域名拼接
                        normalized_href = self._normalize_url(href, base_url) if base_url else href
                        
                        # 检查是否是PDF文件
                        is_pdf = href.lower().endswith('.pdf') or 'pdf' in href.lower()
                        
                        results.append({
                            'href': normalized_href, 
                            'text': text.strip() if text else '',
                            'title': title or '',
                            'is_pdf': is_pdf,
                            'original_href': href  # 保留原始链接用于调试
                        })
                        
                elif field_name == 'table_data':
                    # 表格行数据 - 处理单元格内容和链接
                    cells = await element.query_selector_all('td, th')
                    row_data = []
                    row_links = []
                    
                    for cell in cells:
                        cell_text = await cell.inner_text()
                        cell_data = cell_text.strip() if cell_text else ''
                        row_data.append(cell_data)
                        
                        # 检查单元格内是否有链接
                        links = await cell.query_selector_all('a[href]')
                        cell_links = []
                        for link in links:
                            href = await link.get_attribute('href')
                            link_text = await link.inner_text()
                            if href:
                                normalized_href = self._normalize_url(href, base_url) if base_url else href
                                is_pdf = href.lower().endswith('.pdf') or 'pdf' in href.lower()
                                cell_links.append({
                                    'href': normalized_href,
                                    'text': link_text.strip() if link_text else '',
                                    'is_pdf': is_pdf,
                                    'original_href': href
                                })
                        
                        if cell_links:
                            row_links.extend(cell_links)
                    
                    if row_data:
                        table_row = {
                            'cells': row_data,
                            'links': row_links if row_links else []
                        }
                        results.append(table_row)
                        
                else:
                    # 通用文本提取
                    text = await element.inner_text()
                    if text and text.strip():
                        results.append(text.strip())
                        
            except Exception as e:
                logger.warning(f"处理元素失败: {e}")
        
        return results

    async def _extract_table_data_with_pagination(
        self, 
        page, 
        table_selector: str, 
        selector_config: Dict[str, Any],
        extraction_rules: Dict[str, Any],
        base_url: str = None
    ) -> List[Any]:
        """带分页的表格数据提取 - 支持起始页码和分批次抓取"""
        all_results = []
        pagination_config = selector_config.get('pagination', {})
        next_button_selector = pagination_config.get('next_button', '')
        start_page = pagination_config.get('start_page', 1)
        end_page = pagination_config.get('end_page', 10)
        max_pages = pagination_config.get('max_pages', end_page)
        page_delay = pagination_config.get('page_delay', 5)  # 页面间延迟（秒）
        
        if not next_button_selector:
            logger.info("没有配置分页按钮，回退到单页提取")
            elements = await page.query_selector_all(table_selector)
            return await self._process_multiple_elements(
                elements, 'table_data', extraction_rules, selector_config, base_url
            )
        
        logger.info(f"开始分批次抓取: 第{start_page}页到第{end_page}页 (最大{max_pages}页)")
        
        # 如果起始页不是第1页，需要先导航到起始页
        if start_page > 1:
            success = await self._navigate_to_start_page(page, next_button_selector, start_page)
            if not success:
                logger.error(f"无法导航到起始页{start_page}")
                return []
        
        current_page = start_page
        target_end_page = min(end_page, max_pages)
        consecutive_errors = 0
        max_consecutive_errors = 5
        
        while current_page <= target_end_page and consecutive_errors < max_consecutive_errors:
            try:
                logger.info(f"正在处理第{current_page}页表格数据 (进度: {current_page-start_page+1}/{target_end_page-start_page+1})")
                
                # 检查页面状态
                if page.is_closed():
                    logger.error(f"第{current_page}页处理时页面已关闭")
                    break
                
                # 等待表格内容加载完成
                try:
                    logger.info(f"第{current_page}页尝试等待表格元素: {table_selector}")
                    await page.wait_for_selector(table_selector, state='visible', timeout=20000)
                    await asyncio.sleep(1)  # 确保JavaScript执行完成
                    logger.info(f"第{current_page}页表格元素加载成功")
                except Exception as e:
                    logger.warning(f"第{current_page}页等待表格元素失败: {e}")
                    consecutive_errors += 1
                    if consecutive_errors >= max_consecutive_errors:
                        logger.error("连续错误次数过多，终止分页处理")
                        break
                    continue
                
                # 提取当前页的表格数据
                elements = await page.query_selector_all(table_selector)
                if elements:
                    page_results = await self._process_multiple_elements(
                        elements, 'table_data', extraction_rules, selector_config, base_url
                    )
                    if page_results:
                        all_results.extend(page_results)
                        logger.info(f"第{current_page}页成功提取{len(page_results)}行数据，累计{len(all_results)}行")
                        consecutive_errors = 0  # 重置错误计数
                        
                        # 记录进度到文件（用于断点续传）
                        await self._save_progress(current_page, len(all_results))
                    else:
                        logger.warning(f"第{current_page}页未提取到有效数据")
                        consecutive_errors += 1
                else:
                    logger.warning(f"第{current_page}页未找到表格元素: {table_selector}")
                    consecutive_errors += 1
                
                # 如果已到达目标结束页，结束循环
                if current_page >= target_end_page:
                    logger.info(f"已完成批次抓取任务: 第{start_page}页到第{target_end_page}页")
                    break
                
                # 页面间延迟
                if page_delay > 0:
                    logger.info(f"页面间延迟{page_delay}秒...")
                    await asyncio.sleep(page_delay)
                
                # 查找并点击下一页按钮
                next_success = await self._click_next_page_button(page, next_button_selector, current_page)
                if not next_success:
                    logger.info("无法进入下一页，分页处理结束")
                    break
                
                current_page += 1
                
            except Exception as e:
                logger.error(f"处理第{current_page}页时发生错误: {e}")
                consecutive_errors += 1
                if consecutive_errors >= max_consecutive_errors:
                    logger.error("连续错误过多，终止处理")
                    break
        
        logger.info(f"分页表格数据提取完成：处理了{current_page-1}页，共获取{len(all_results)}行数据")
        return all_results

    async def _click_next_page_button(self, page, next_button_selector: str, current_page: int) -> bool:
        """点击下一页按钮的辅助方法"""
        try:
            # 查找下一页按钮
            next_button = await page.query_selector(next_button_selector)
            if not next_button:
                logger.info(f"第{current_page}页未找到下一页按钮: {next_button_selector}")
                return False
            
            # 检查按钮是否可点击
            is_disabled = await next_button.is_disabled()
            is_visible = await next_button.is_visible()
            
            if is_disabled:
                logger.info(f"第{current_page}页下一页按钮已禁用")
                return False
                
            if not is_visible:
                logger.info(f"第{current_page}页下一页按钮不可见")
                return False
            
            # 滚动到按钮位置并点击
            try:
                await next_button.scroll_into_view_if_needed()
                await asyncio.sleep(0.5)
                
                # 使用JavaScript点击以避免遮挡问题
                await page.evaluate('(element) => element.click()', next_button)
                logger.info(f"成功点击下一页按钮，即将跳转到第{current_page + 1}页")
                
                # 等待页面加载
                await page.wait_for_load_state('networkidle', timeout=20000)
                await asyncio.sleep(2)  # 额外等待确保页面完全加载
                await asyncio.sleep(2)  # 额外等待确保内容加载完成
                
                return True
                
            except Exception as e:
                logger.error(f"点击下一页按钮时发生错误: {e}")
                return False
                
        except Exception as e:
            logger.error(f"查找下一页按钮时发生错误: {e}")
            return False

    async def _navigate_to_start_page(self, page, next_button_selector: str, target_page: int) -> bool:
        """导航到指定的起始页面"""
        current_page = 1
        logger.info(f"正在导航到第{target_page}页...")
        
        while current_page < target_page:
            try:
                # 查找下一页按钮
                next_button = await page.query_selector(next_button_selector)
                if not next_button:
                    logger.error(f"导航过程中未找到下一页按钮 (当前页: {current_page})")
                    return False
                
                # 检查按钮是否可点击
                is_disabled = await next_button.is_disabled()
                if is_disabled:
                    logger.error(f"导航过程中下一页按钮已禁用 (当前页: {current_page})")
                    return False
                
                # 点击下一页
                await next_button.scroll_into_view_if_needed()
                await asyncio.sleep(0.5)
                await page.evaluate('(element) => element.click()', next_button)
                
                # 等待页面加载
                await page.wait_for_load_state('networkidle', timeout=15000)
                await asyncio.sleep(1)
                
                current_page += 1
                if current_page % 10 == 0:  # 每10页输出一次进度
                    logger.info(f"导航进度: {current_page}/{target_page}")
                
            except Exception as e:
                logger.error(f"导航到第{target_page}页时发生错误 (当前页: {current_page}): {e}")
                return False
        
        logger.info(f"成功导航到第{target_page}页")
        return True

    async def _save_progress(self, current_page: int, total_records: int) -> None:
        """保存抓取进度到文件"""
        try:
            import json
            import os
            
            progress_file = "crawl_progress.json"
            progress_data = {
                "last_page": current_page,
                "total_records": total_records,
                "timestamp": __import__('datetime').datetime.now().isoformat(),
                "status": "in_progress"
            }
            
            with open(progress_file, 'w', encoding='utf-8') as f:
                json.dump(progress_data, f, ensure_ascii=False, indent=2)
                
        except Exception as e:
            logger.warning(f"保存进度信息失败: {e}")

    async def _get_content_with_pagination(self, page, rule: Dict[str, Any]) -> str:
        """获取带分页的完整页面内容"""
        try:
            # 检查页面是否仍然有效
            if page.is_closed():
                logger.error("页面已关闭，无法获取分页内容")
                return ""
            
            # 检查是否有table_data的分页配置
            selectors = rule.get('selectors', {})
            table_data_config = selectors.get('table_data', {})
            
            # 如果table_data不是dict或没有pagination配置，直接返回当前页内容
            if not isinstance(table_data_config, dict) or 'pagination' not in table_data_config:
                logger.info("没有分页配置，直接返回当前页面内容")
                return await page.content()
            
            logger.info("检测到分页配置，但暂时跳过分页处理")
            # 暂时跳过分页处理以避免页面关闭问题
            return await page.content()
                    
        except Exception as e:
            logger.error(f"获取分页内容时发生严重错误: {e}")
            try:
                # 尝试返回当前页面内容
                if not page.is_closed():
                    return await page.content()
            except:
                pass
            return ""

    async def _merge_paginated_content(self, all_content_parts: List[str], table_data_config: Dict[str, Any]) -> str:
        """合并分页内容"""
        try:
            from bs4 import BeautifulSoup
        except ImportError:
            logger.warning("未安装beautifulsoup4，使用简单合并策略")
            return all_content_parts[0]  # 返回第一页内容
        
        try:
            table_selector = table_data_config.get('selector', '')
            if not table_selector:
                return all_content_parts[0]
            
            # 解析第一页作为基础
            base_soup = BeautifulSoup(all_content_parts[0], 'html.parser')
            
            # 获取基础页面的表格容器
            # 从 "#table > table > tbody > tr" 中提取表格容器选择器
            table_container_selector = table_selector.rsplit(' > tr', 1)[0] if ' > tr' in table_selector else table_selector
            base_table_container = base_soup.select_one(table_container_selector)
            
            if not base_table_container:
                logger.warning(f"在基础页面中未找到表格容器: {table_container_selector}")
                return all_content_parts[0]
            
            # 获取基础页面的所有表格行
            base_rows = base_soup.select(table_selector)
            logger.info(f"基础页面有{len(base_rows)}行数据")
            
            # 合并其他页面的表格行
            total_added_rows = 0
            for i, content in enumerate(all_content_parts[1:], 2):
                try:
                    page_soup = BeautifulSoup(content, 'html.parser')
                    page_rows = page_soup.select(table_selector)
                    
                    if page_rows:
                        # 将其他页面的行添加到基础表格中
                        for row in page_rows:
                            # 深度复制整个行元素及其所有子元素
                            cloned_row = BeautifulSoup(str(row), 'html.parser').find()
                            if cloned_row:
                                base_table_container.append(cloned_row)
                                total_added_rows += 1
                        
                        logger.info(f"从第{i}页添加了{len(page_rows)}行数据")
                    else:
                        logger.warning(f"第{i}页未找到表格数据")
                        
                except Exception as e:
                    logger.error(f"合并第{i}页内容失败: {e}")
            
            logger.info(f"内容合并完成，总共添加了{total_added_rows}行数据")
            return str(base_soup)
            
        except Exception as e:
            logger.error(f"合并分页内容失败: {e}")
            return all_content_parts[0]  # 失败时返回第一页内容


class AsyncWebCrawler:
    """上下文管理器版本的WebCrawler"""
    
    def __init__(self, headless: bool = True, timeout: int = 30000):
        self.headless = headless
        self.timeout = timeout
        self.crawler = None
    
    async def __aenter__(self):
        self.crawler = WebCrawler(headless=self.headless, timeout=self.timeout)
        await self.crawler.init_browser()
        return self.crawler
    
    async def __aexit__(self, exc_type, exc_val, exc_tb):
        if self.crawler:
            await self.crawler.close()

