"""
百度网盘信息提取器

专门处理百度网盘链接和提取码的识别、提取操作
"""

import asyncio
import sys
import re
from typing import Dict, Any, Optional, Tuple
from playwright.async_api import Page
from pathlib import Path

# 添加项目根目录到Python路径
project_root = Path(__file__).parent.parent.parent
sys.path.insert(0, str(project_root))

try:
    from src.browser.page_handler import BasePageHandler
    from src.utils.logger import get_logger
except ImportError:
    try:
        from browser.page_handler import BasePageHandler
        from utils.logger import get_logger
    except ImportError:
        from page_handler import BasePageHandler
        from logger import get_logger


class BaiduNetdiskExtractor(BasePageHandler):
    """百度网盘信息提取器类"""
    
    def __init__(self, page: Page):
        super().__init__(page)
        
        # 百度网盘链接的识别选择器
        self.netdisk_link_selectors = [
            # 用户提供的具体选择器（通用化）
            '[id*="post"] .entry-content p:nth-child(1) a',
            '.entry-content.u-text-format p:nth-child(1) a',
            '.entry-content p:first-child a',
            # 原有的具体选择器
            '#post-35656 > div:nth-child(2) > div > div.entry-content.u-text-format.u-clearfix > p:nth-child(1) > a',
            # 通用选择器
            'a[href*="pan.baidu.com"]',
            'a[href*="baidu.com/s/"]',
            'a:has-text("pan.baidu.com")',
            'a:has-text("百度网盘")',
            'a:has-text("网盘链接")',
            '.entry-content a[href*="baidu"]',
            'p a[href*="pan.baidu.com"]',
            '.entry-content p a',
            '.u-text-format a',
            '.post-content a[href*="baidu"]'
        ]
        
        # 提取码的识别选择器
        self.extract_code_selectors = [
            # 用户提供的具体选择器（通用化）
            '[id*="post"] .entry-content p:nth-child(2)',
            '.entry-content.u-text-format p:nth-child(2)',
            '.entry-content p:nth-child(2)',
            # 原有的具体选择器
            '#post-35656 > div:nth-child(2) > div > div.entry-content.u-text-format.u-clearfix > p:nth-child(2)',
            # 通用选择器
            'p:has-text("提取码")',
            '*:has-text("提取码:")',
            '*:has-text("提取码：")',
            '*:has-text("密码:")',
            '*:has-text("密码：")',
            '.entry-content p:has-text("提取码")',
            'div:has-text("提取码")',
            '.u-text-format p:has-text("提取码")',
            '.post-content p:has-text("提取码")'
        ]
        
        # 百度网盘链接的正则表达式模式
        self.netdisk_url_patterns = [
            r'https?://pan\.baidu\.com/s/[A-Za-z0-9_-]+',
            r'https?://[^/]*baidu\.com/s/[A-Za-z0-9_-]+',
            r'pan\.baidu\.com/s/[A-Za-z0-9_-]+'
        ]
        
        # 提取码的正则表达式模式
        self.extract_code_patterns = [
            r'提取码[：:]\s*([A-Za-z0-9]{4})',
            r'提取码[：:]?\s*([A-Za-z0-9]{4})',
            r'密码[：:]\s*([A-Za-z0-9]{4})',
            r'密码[：:]?\s*([A-Za-z0-9]{4})',
            r'提取码.*?([A-Za-z0-9]{4})',
            r'密码.*?([A-Za-z0-9]{4})'
        ]
    
    async def handle(self) -> Dict[str, Any]:
        """
        处理百度网盘信息提取
        
        Returns:
            Dict: 处理结果
        """
        try:
            # 等待页面加载
            await self.wait_for_page_load()
            
            self.logger.info("🔍 开始提取百度网盘信息...")
            
            # 提取百度网盘链接
            netdisk_url = await self.extract_netdisk_url()
            if not netdisk_url:
                return {
                    "success": False,
                    "error": "未找到百度网盘链接",
                    "page_type": "netdisk_extraction"
                }
            
            self.logger.info(f"🔗 成功提取百度网盘链接: {netdisk_url}")
            
            # 提取提取码
            extract_code = await self.extract_code()
            if not extract_code:
                return {
                    "success": False,
                    "error": "未找到提取码",
                    "page_type": "netdisk_extraction",
                    "netdisk_url": netdisk_url
                }
            
            self.logger.info(f"🔑 成功提取提取码: {extract_code}")
            
            # 任务3.3.3：拼接链接并在新标签页打开
            complete_url = await self.create_complete_netdisk_url(netdisk_url, extract_code)
            if not complete_url:
                return {
                    "success": False,
                    "error": "链接拼接失败",
                    "page_type": "netdisk_extraction",
                    "netdisk_url": netdisk_url,
                    "extract_code": extract_code
                }
            
            self.logger.info(f"🔗 成功拼接完整链接: {complete_url}")
            
            # 在新标签页打开拼接后的链接
            new_page = await self.open_complete_url_in_new_tab(complete_url)
            if not new_page:
                return {
                    "success": False,
                    "error": "无法在新标签页打开拼接链接",
                    "page_type": "netdisk_extraction",
                    "netdisk_url": netdisk_url,
                    "extract_code": extract_code,
                    "complete_url": complete_url
                }
            
            # 验证新标签页的链接是否正确
            success = await self.verify_new_tab_url(new_page, complete_url)
            
            return {
                "success": success,
                "message": "百度网盘信息提取和链接拼接完成" if success else "链接拼接完成但验证失败",
                "page_type": "netdisk_extraction",
                "netdisk_url": netdisk_url,
                "extract_code": extract_code,
                "complete_url": complete_url,
                "new_page": new_page,
                "action_taken": "complete_url_opened" if success else "url_verification_failed"
            }
            
        except Exception as e:
            self.logger.error(f"提取百度网盘信息时出错: {str(e)}")
            return {
                "success": False,
                "error": f"提取失败: {str(e)}",
                "page_type": "netdisk_extraction"
            }
    
    async def extract_netdisk_url(self) -> Optional[str]:
        """
        从页面中提取百度网盘链接
        
        Returns:
            str: 提取的百度网盘链接，如果提取失败则返回None
        """
        try:
            # 添加调试信息：打印页面中所有的链接
            all_links = await self.page.query_selector_all('a')
            self.logger.info(f"页面中共找到 {len(all_links)} 个链接")
            
            # 打印包含百度网盘的链接
            baidu_links = []
            for i, link in enumerate(all_links):
                href = await link.get_attribute('href')
                text = await link.inner_text() if link else ""
                text = text.strip() if text else ""
                if href:
                    # 检查是否包含百度相关关键词
                    if ('baidu' in href.lower() or 'pan.' in href.lower() or 
                        'baidu' in text.lower() or 'pan.' in text.lower() or
                        '网盘' in text or '百度' in text):
                        baidu_links.append(f"链接{i}: {href} (文本: {text})")
                        self.logger.info(f"找到百度相关链接{i}: {href} (文本: {text})")
                    # 打印前10个链接作为参考
                    elif i < 10:
                        self.logger.debug(f"链接{i}: {href} (文本: {text})")
            
            if not baidu_links:
                self.logger.info("页面中没有找到任何百度相关链接")
                # 打印页面内容的一部分来调试
                page_content = await self.page.content()
                if 'pan.baidu.com' in page_content:
                    self.logger.info("页面内容中包含 'pan.baidu.com'")
                    # 查找包含百度网盘链接的部分
                    matches = re.findall(r'.{0,50}pan\.baidu\.com.{0,50}', page_content, re.IGNORECASE)
                    for match in matches[:3]:  # 只显示前3个匹配
                        self.logger.info(f"找到内容片段: {match}")
                else:
                    self.logger.info("页面内容中不包含 'pan.baidu.com'")
            
            # 方法1: 通过选择器查找链接元素
            for selector in self.netdisk_link_selectors:
                try:
                    self.logger.debug(f"尝试选择器: {selector}")
                    elements = await self.page.query_selector_all(selector)
                    self.logger.debug(f"选择器 {selector} 找到 {len(elements)} 个元素")
                    
                    for element in elements:
                        href = await element.get_attribute('href')
                        if href:
                            self.logger.debug(f"检查链接: {href}")
                            # 使用正则表达式模式检查链接
                            for pattern in self.netdisk_url_patterns:
                                if re.search(pattern, href, re.IGNORECASE):
                                    # 确保链接格式正确
                                    if href.startswith('http'):
                                        self.logger.info(f"通过选择器 {selector} 找到网盘链接: {href}")
                                        return href
                                    else:
                                        # 补全协议
                                        full_url = f"https://{href}" if not href.startswith('//') else f"https:{href}"
                                        self.logger.info(f"通过选择器 {selector} 找到网盘链接（已补全协议）: {full_url}")
                                        return full_url
                except Exception as e:
                    self.logger.debug(f"选择器 {selector} 查找失败: {str(e)}")
                    continue
            
            # 方法2: 通过正则表达式从页面内容中提取
            page_content = await self.page.content()
            for pattern in self.netdisk_url_patterns:
                matches = re.findall(pattern, page_content)
                if matches:
                    url = matches[0]
                    if not url.startswith('http'):
                        url = f"https://{url}"
                    self.logger.info(f"通过正则表达式提取到网盘链接: {url}")
                    return url
            
            # 方法3: 查找包含百度网盘链接的文本元素
            text_elements = await self.page.query_selector_all('p, div, span, a')
            for element in text_elements:
                try:
                    text_content = await element.text_content()
                    if text_content and ('pan.baidu.com' in text_content or 'baidu.com/s/' in text_content):
                        for pattern in self.netdisk_url_patterns:
                            matches = re.findall(pattern, text_content)
                            if matches:
                                url = matches[0]
                                if not url.startswith('http'):
                                    url = f"https://{url}"
                                self.logger.info(f"从文本内容中提取到网盘链接: {url}")
                                return url
                except Exception:
                    continue
            
            self.logger.warning("未能从页面中提取到百度网盘链接")
            return None
            
        except Exception as e:
            self.logger.error(f"提取百度网盘链接时出错: {str(e)}")
            return None
    
    async def extract_code(self) -> Optional[str]:
        """
        从页面中提取提取码
        
        Returns:
            str: 提取的提取码，如果提取失败则返回None
        """
        try:
            # 方法1: 通过选择器查找提取码元素
            for selector in self.extract_code_selectors:
                try:
                    elements = await self.page.query_selector_all(selector)
                    for element in elements:
                        text_content = await element.text_content()
                        if text_content:
                            # 从找到的元素中提取提取码
                            code = self._extract_code_from_text(text_content)
                            if code:
                                self.logger.info(f"通过选择器 {selector} 找到提取码: {code}")
                                return code
                except Exception as e:
                    self.logger.debug(f"选择器 {selector} 查找失败: {str(e)}")
                    continue
            
            # 方法2: 从页面内容中直接搜索
            page_content = await self.page.content()
            code = self._extract_code_from_text(page_content)
            if code:
                self.logger.info(f"从页面内容中提取到提取码: {code}")
                return code
            
            # 方法3: 查找所有文本元素并逐一检查
            text_elements = await self.page.query_selector_all('p, div, span, h1, h2, h3, h4, h5, h6')
            for element in text_elements:
                try:
                    text_content = await element.text_content()
                    if text_content and ('提取码' in text_content or '密码' in text_content):
                        code = self._extract_code_from_text(text_content)
                        if code:
                            self.logger.info(f"从文本元素中提取到提取码: {code}")
                            return code
                except Exception:
                    continue
            
            self.logger.warning("未能从页面中提取到提取码")
            return None
            
        except Exception as e:
            self.logger.error(f"提取提取码时出错: {str(e)}")
            return None
    
    def _extract_code_from_text(self, text: str) -> Optional[str]:
        """
        从文本中提取提取码
        
        Args:
            text: 包含提取码信息的文本
            
        Returns:
            str: 提取的提取码
        """
        if not text:
            return None
        
        # 使用正则表达式提取提取码
        for pattern in self.extract_code_patterns:
            matches = re.findall(pattern, text, re.IGNORECASE)
            if matches:
                code = matches[0].strip()
                # 验证提取码格式（通常是4位字母数字组合）
                if len(code) == 4 and code.isalnum():
                    return code
        
        return None
    
    async def create_complete_netdisk_url(self, netdisk_url: str, extract_code: str) -> Optional[str]:
        """
        拼接百度网盘链接和提取码，生成完整的带密码链接
        
        Args:
            netdisk_url: 百度网盘链接
            extract_code: 提取码
            
        Returns:
            str: 拼接后的完整链接，格式为：链接?pwd=提取码
        """
        try:
            if not netdisk_url or not extract_code:
                self.logger.error("链接或提取码为空，无法拼接")
                return None
            
            # 确保链接以http开头
            if not netdisk_url.startswith('http'):
                netdisk_url = 'https://' + netdisk_url
            
            # 拼接格式：链接?pwd=提取码
            complete_url = f"{netdisk_url}?pwd={extract_code}"
            
            self.logger.info(f"🔗 链接拼接成功: {netdisk_url} + {extract_code} = {complete_url}")
            return complete_url
            
        except Exception as e:
            self.logger.error(f"拼接链接时出错: {str(e)}")
            return None
    
    async def open_complete_url_in_new_tab(self, complete_url: str) -> Optional[Page]:
        """
        在新标签页中打开拼接后的完整链接
        
        Args:
            complete_url: 拼接后的完整链接
            
        Returns:
            Page: 新打开的页面对象
        """
        try:
            self.logger.info(f"🌐 在新标签页中打开完整链接: {complete_url}")
            
            # 获取浏览器上下文
            context = self.page.context
            
            # 创建新标签页
            new_page = await context.new_page()
            
            # 导航到完整链接
            await new_page.goto(complete_url, wait_until="domcontentloaded", timeout=30000)
            
            # 等待页面加载
            await new_page.wait_for_load_state("networkidle", timeout=10000)
            
            self.logger.info(f"✅ 成功在新标签页中打开链接: {new_page.url}")
            return new_page
            
        except Exception as e:
            self.logger.error(f"在新标签页中打开完整链接失败: {str(e)}")
            return None
    
    async def verify_new_tab_url(self, new_page: Page, expected_url: str) -> bool:
        """
        验证新标签页的链接是否与预期的拼接链接前缀一致
        
        Args:
            new_page: 新打开的页面对象
            expected_url: 预期的完整链接
            
        Returns:
            bool: 验证是否成功
        """
        try:
            actual_url = new_page.url
            
            # 提取预期链接的前缀（去掉?pwd=部分）
            expected_prefix = expected_url.split('?pwd=')[0] if '?pwd=' in expected_url else expected_url
            
            # 检查实际链接是否以预期前缀开头
            is_valid = actual_url.startswith(expected_prefix)
            
            if is_valid:
                self.logger.info(f"✅ 链接验证成功: {actual_url} 匹配前缀 {expected_prefix}")
            else:
                self.logger.warning(f"❌ 链接验证失败: {actual_url} 不匹配前缀 {expected_prefix}")
            
            return is_valid
            
        except Exception as e:
            self.logger.error(f"验证新标签页链接时出错: {str(e)}")
            return False
    
    async def open_netdisk_url_in_new_tab(self, netdisk_url: str) -> Optional[Page]:
        """
        在新标签页中打开百度网盘链接
        
        Args:
            netdisk_url: 百度网盘链接
            
        Returns:
            Page: 新打开的页面对象，如果失败则返回None
        """
        try:
            self.logger.info(f"🌐 在新标签页中打开百度网盘链接: {netdisk_url}")
            
            # 获取浏览器上下文
            context = self.page.context
            
            # 在新标签页中打开链接
            new_page = await context.new_page()
            await new_page.goto(netdisk_url, wait_until="domcontentloaded", timeout=60000)
            
            # 等待页面加载
            await asyncio.sleep(3)
            
            self.logger.info("✅ 成功在新标签页中打开百度网盘链接")
            return new_page
            
        except Exception as e:
            self.logger.error(f"在新标签页中打开百度网盘链接失败: {str(e)}")
            return None
    
    async def get_page_info(self) -> Dict[str, Any]:
        """
        获取当前页面信息
        
        Returns:
            Dict: 页面信息
        """
        try:
            return {
                "url": self.page.url,
                "title": await self.page.title(),
                "has_netdisk_link": await self._has_netdisk_link(),
                "has_extract_code": await self._has_extract_code()
            }
        except Exception as e:
            self.logger.error(f"获取页面信息失败: {str(e)}")
            return {}
    
    async def _has_netdisk_link(self) -> bool:
        """检查页面是否有百度网盘链接"""
        for selector in self.netdisk_link_selectors:
            try:
                element = await self.page.query_selector(selector)
                if element:
                    return True
            except Exception:
                continue
        return False
    
    async def _has_extract_code(self) -> bool:
        """检查页面是否有提取码信息"""
        for selector in self.extract_code_selectors:
            try:
                element = await self.page.query_selector(selector)
                if element:
                    return True
            except Exception:
                continue
        return False