"""
数据源抽象模块
定义了关键词采集的通用接口，便于扩展新的数据源
"""
from abc import ABC, abstractmethod
import requests
import logging
from typing import List, Dict, Any, Optional
import json
import time
from playwright.sync_api import sync_playwright
from .utils import get_user_agent


class DataSource(ABC):
    """数据源抽象基类"""
    
    def __init__(self, name: str, config: Dict[str, Any]):
        """
        初始化数据源
        
        Args:
            name: 数据源名称
            config: 配置字典
        """
        self.name = name
        self.config = config
        self.logger = logging.getLogger(f"{__name__}.{name}")
        self.request_config = config.get('request_config', {})
    
    @abstractmethod
    def collect_keywords(self, keyword: str) -> List[str]:
        """
        采集关键词的抽象方法
        
        Args:
            keyword: 种子关键词
        
        Returns:
            采集到的关键词列表
        """
        pass
    
    def get_source_info(self) -> Dict[str, Any]:
        """
        获取数据源信息
        
        Returns:
            数据源信息字典
        """
        return {
            'name': self.name,
            'type': self.__class__.__name__,
            'enabled': self.config.get('enabled', True)
        }
    
    def is_enabled(self) -> bool:
        """检查数据源是否启用"""
        return self.config.get('enabled', True)


class BaiduSuggestionsSource(DataSource):
    """百度搜索建议数据源"""
    
    def __init__(self, config: Dict[str, Any]):
        super().__init__("百度建议", config)
    
    def collect_keywords(self, keyword: str) -> List[str]:
        """从百度搜索建议获取长尾关键词"""
        suggestions = []
        
        try:
            # 百度搜索建议API
            url = f"https://www.baidu.com/sugrec?pre=1&p=3&ie=utf-8&json=1&prod=pc&from=pc_web&sugsid=62325,63145,63327,63948,64050,64359,64363,64397,64437,64450,64459,64561,64571,64598,64642,64671,64712,64741,64743,64738,64702,64829,64814,64833,64881,64889,64905,64914&wd={keyword}&csor=10&cb=jQuery1102047227305657596097_1756555479001&_=1756555479002"
            headers = {
                'User-Agent': get_user_agent(),
                'Referer': 'https://www.baidu.com'
            }
            
            response = requests.get(url, headers=headers, 
                                  timeout=self.request_config.get('timeout', 10))
            
            if response.status_code == 200:
                # 解析JSONP响应
                content = response.text
                start = content.find('[')
                end = content.rfind(']') + 1
                if start != -1 and end != 0:
                    suggestions_data = json.loads(content[start:end])
                    if len(suggestions_data) > 1:
                        # 过滤掉单字符和无意义的建议
                        suggestions = [s.get('q', '') for s in suggestions_data if 'q' in s]
            
            self.logger.info(f"从百度获取到 {len(suggestions)} 个建议关键词: {keyword}")
            
        except Exception as e:
            self.logger.error(f"获取百度建议失败: {keyword}, 错误: {e}")
        
        return suggestions


class BaiduRelatedSource(DataSource):
    """百度相关搜索数据源"""
    
    def __init__(self, config: Dict[str, Any]):
        super().__init__("百度相关搜索", config)
    
    def collect_keywords(self, keyword: str) -> List[str]:
        """从百度相关搜索获取关键词"""
        related_keywords = []
        
        try:
            with sync_playwright() as p:
                # 启动浏览器
                browser = p.chromium.launch(
                    headless=False,  # 改为无头模式，提高性能
                    args=['--no-sandbox', '--disable-dev-shm-usage', '--disable-blink-features=AutomationControlled']
                )
                
                # 创建上下文，添加更多反爬虫设置
                context = browser.new_context(
                    user_agent=get_user_agent(),
                    viewport={'width': 1920, 'height': 1080},
                    locale='zh-CN',
                    timezone_id='Asia/Shanghai',
                    extra_http_headers={
                        'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
                        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'
                    }
                )
                
                # 创建页面
                page = context.new_page()
                
                # 设置页面属性，避免被检测为自动化工具
                page.add_init_script("""
                    Object.defineProperty(navigator, 'webdriver', {
                        get: () => undefined,
                    });
                """)
                
                # 访问百度搜索页面
                page.goto(f"https://www.baidu.com/s?wd={keyword}", wait_until='domcontentloaded')
                
                # 等待页面加载完成
                page.wait_for_selector("body", timeout=15000)
                
                # 添加随机延迟，模拟人类行为
                page.wait_for_timeout(2000)
                
                # 使用多种策略查找相关搜索
                related_keywords = self._extract_related_keywords(page, keyword)
                
                # 关闭浏览器
                browser.close()
                
            self.logger.info(f"从百度相关搜索获取到 {len(related_keywords)} 个关键词: {keyword}")
            
        except Exception as e:
            self.logger.error(f"获取百度相关搜索失败: {keyword}, 错误: {e}")
        
        return related_keywords
    
    def _extract_related_keywords(self, page, keyword: str) -> List[str]:
        """提取相关关键词的多种策略"""
        related_keywords = []
        
        # 策略1: 使用XPath定位右侧相关搜索
        try:
            # 查找包含"相关搜索"文本的容器
            xpath_patterns = [
                "//div[contains(text(), '相关搜索')]/parent::div//a",
                # "//div[contains(@id, 'rs')]//a"
            ]
            
            for xpath in xpath_patterns:
                try:
                    elements = page.query_selector_all(f"xpath={xpath}")
                    for element in elements:
                        text = element.text_content().strip()
                        if self._is_valid_related_keyword(text, keyword):
                            related_keywords.append(text)
                except:
                    continue
        except Exception as e:
            self.logger.debug(f"XPath策略失败: {e}")
        
        # 策略2: 使用属性选择器
        # try:
        #     # 查找包含特定属性的链接
        #     attr_selectors = [
        #         "[data-click*='rs'] a",           # 包含rs的data-click属性
        #         "[data-click*='related'] a",      # 包含related的data-click属性
        #         "[onclick*='rs'] a",              # 包含rs的onclick属性
        #         "a[href*='rs']",                  # href包含rs的链接
        #         "a[href*='related']"              # href包含related的链接
        #     ]
            
        #     for selector in attr_selectors:
        #         try:
        #             elements = page.query_selector_all(selector)
        #             for element in elements:
        #                 text = element.text_content().strip()
        #                 if self._is_valid_related_keyword(text, keyword):
        #                     related_keywords.append(text)
        #         except:
        #             continue
        # except Exception as e:
        #     self.logger.debug(f"属性选择器策略失败: {e}")
        
        # # 策略3: 使用文本内容定位
        # try:
        #     # 查找包含"相关"、"推荐"等关键词的区域
        #     text_patterns = [
        #         "//div[contains(text(), '相关')]//a",
        #         "//div[contains(text(), '推荐')]//a",
        #         "//div[contains(text(), '热门')]//a",
        #         "//div[contains(text(), '更多')]//a"
        #     ]
            
        #     for xpath in text_patterns:
        #         try:
        #             elements = page.query_selector_all(f"xpath={xpath}")
        #             for element in elements:
        #                 text = element.text_content().strip()
        #                 if self._is_valid_related_keyword(text, keyword):
        #                     related_keywords.append(text)
        #         except:
        #             continue
        # except Exception as e:
        #     self.logger.debug(f"文本内容策略失败: {e}")
        
        # # 策略4: 使用位置定位（右侧和底部）
        # try:
        #     # 查找页面右侧和底部的链接
        #     position_selectors = [
        #         "//div[position()=last()]//a",           # 页面底部
        #         "//div[contains(@style, 'right')]//a",   # 右侧定位
        #         "//div[contains(@style, 'float: right')]//a",  # 右浮动
        #         "//div[contains(@style, 'position: absolute')]//a"  # 绝对定位
        #     ]
            
        #     for xpath in position_selectors:
        #         try:
        #             elements = page.query_selector_all(f"xpath={xpath}")
        #             for element in elements:
        #                 text = element.text_content().strip()
        #                 if self._is_valid_related_keyword(text, keyword):
        #                     related_keywords.append(text)
        #         except:
        #             continue
        # except Exception as e:
        #     self.logger.debug(f"位置定位策略失败: {e}")
        
        # 去重并过滤
        unique_keywords = []
        for kw in related_keywords:
            if kw not in unique_keywords and self._is_valid_related_keyword(kw, keyword):
                unique_keywords.append(kw)
        
        return unique_keywords
    
    def _is_valid_related_keyword(self, text: str, original_keyword: str) -> bool:
        """验证是否为有效的相关关键词"""
        if not text or not isinstance(text, str):
            return False
        
        text = text.strip()
        
        # 长度检查
        if len(text) < 2 or len(text) > 50:
            return False
        
        # 内容检查
        if text == original_keyword:
            return False
        
        # 过滤掉导航、广告等无关内容
        invalid_patterns = [
            '百度', '搜索', '登录', '注册', '更多', '下一页', '上一页',
            '首页', '帮助', '关于', '设置', '反馈', '意见', '建议',
            '广告', '推广', '赞助', '热门', '推荐', '相关', '相似'
        ]
        
        for pattern in invalid_patterns:
            if pattern in text:
                return False
        
        return True


class Search360Source(DataSource):
    """360搜索建议数据源"""
    
    def __init__(self, config: Dict[str, Any]):
        super().__init__("360搜索", config)
    
    def collect_keywords(self, keyword: str) -> List[str]:
        """从360搜索建议获取关键词"""
        suggestions = []
        
        try:
            # 尝试多个360搜索建议API
            url = f'http://sug.so.360.cn/suggest?callback=suggest_so&encodein=utf-8&encodeout=utf-8&format=json&fields=word,obdata&word={keyword}'
            
            headers = {
                'User-Agent': get_user_agent(),
                'Referer': 'https://www.so.com',
                'Accept': 'application/json, text/javascript, */*; q=0.01',
                'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
                'X-Requested-With': 'XMLHttpRequest'
            }
            

            response = requests.get(url, headers=headers,
                                    timeout=self.request_config.get('timeout', 10))
            
            if response.status_code == 200:
                content = response.text.strip()
                
                if content.startswith('suggest_so(') and content.endswith(')'):
                    # 另一种JSONP格式
                    json_str = content[11:-1]
                    data = json.loads(json_str)
                    if 'result' in data and isinstance(data['result'], list):
                        suggestions = data['result']
            
            # 过滤结果
            if suggestions:
                suggestions = [s['word'] for s in suggestions if 'word' in s]
            
            self.logger.info(f"从360搜索获取到 {len(suggestions)} 个建议关键词: {keyword}")
            
        except Exception as e:
            self.logger.error(f"获取360搜索建议失败: {keyword}, 错误: {e}")
        
        return suggestions


class ZhihuTopicsSource(DataSource):
    """知乎话题数据源"""
    
    def __init__(self, config: Dict[str, Any]):
        super().__init__("知乎话题", config)
    
    def collect_keywords(self, keyword: str) -> List[str]:
        """从知乎话题获取相关关键词"""
        topics = []
        
        try:
            # 知乎现在需要登录才能访问API，我们改用网页爬取方式
            with sync_playwright() as p:
                browser = p.chromium.launch(
                    headless=True,
                    args=['--no-sandbox', '--disable-dev-shm-usage', '--disable-blink-features=AutomationControlled']
                )
                
                context = browser.new_context(
                    user_agent=get_user_agent(),
                    viewport={'width': 1920, 'height': 1080},
                    locale='zh-CN',
                    timezone_id='Asia/Shanghai'
                )
                
                page = context.new_page()
                
                # 设置页面属性，避免被检测
                page.add_init_script("""
                    Object.defineProperty(navigator, 'webdriver', {
                        get: () => undefined,
                    });
                """)
                
                # 访问知乎搜索页面
                search_url = f"https://www.zhihu.com/search?q={keyword}&type=topic"
                page.goto(search_url, wait_until='domcontentloaded')
                
                # 等待页面加载
                try:
                    page.wait_for_selector(".SearchMain", timeout=15000)
                except:
                    page.wait_for_selector("body", timeout=15000)
                
                # 添加延迟
                page.wait_for_timeout(3000)
                
                # 尝试多个选择器来获取话题
                selectors = [
                    ".SearchMain .TopicItem .TopicItem-title",
                    ".SearchMain .TopicItem h3",
                    ".SearchMain .TopicItem a",
                    ".SearchMain .TopicItem .ContentItem-title",
                    "[data-za-detail-view-path*='topic']"
                ]
                
                for selector in selectors:
                    try:
                        elements = page.query_selector_all(selector)
                        for element in elements:
                            text = element.text_content().strip()
                            if text and len(text) > 1 and text not in topics:
                                topics.append(text)
                        if topics:  # 如果找到了话题，就跳出循环
                            break
                    except:
                        continue
                
                # 如果还是没找到，尝试从搜索结果中提取
                if not topics:
                    try:
                        # 查找所有可能包含关键词的链接
                        links = page.query_selector_all("a[href*='/topic/']")
                        for link in links:
                            text = link.text_content().strip()
                            if text and len(text) > 1 and text not in topics:
                                topics.append(text)
                    except:
                        pass
                
                browser.close()
            
            # 过滤结果
            topics = [t for t in topics if len(t.strip()) > 1]
            
            self.logger.info(f"从知乎获取到 {len(topics)} 个话题关键词: {keyword}")
            
        except Exception as e:
            self.logger.error(f"获取知乎话题失败: {keyword}, 错误: {e}")
        
        return topics


class DataSourceManager:
    """数据源管理器"""
    
    def __init__(self, config: Dict[str, Any]):
        """
        初始化数据源管理器
        
        Args:
            config: 配置字典
        """
        self.config = config
        self.logger = logging.getLogger(__name__)
        self.sources = self._initialize_sources()
    
    def _initialize_sources(self) -> List[DataSource]:
        """初始化所有数据源"""
        sources = []
        
        # 百度建议
        baidu_config = {
            'enabled': True,
            'request_config': self.config.get('request_config', {})
        }
        sources.append(BaiduSuggestionsSource(baidu_config))
        
        # 百度相关搜索
        baidu_related_config = {
            'enabled': False,
            'request_config': self.config.get('request_config', {})
        }
        sources.append(BaiduRelatedSource(baidu_related_config))
        
        # 360搜索
        search360_config = {
            'enabled': True,
            'request_config': self.config.get('request_config', {})
        }
        sources.append(Search360Source(search360_config))
        
        # 知乎话题
        zhihu_config = {
            'enabled': False,
            'request_config': self.config.get('request_config', {})
        }
        sources.append(ZhihuTopicsSource(zhihu_config))
        
        self.logger.info(f"初始化了 {len(sources)} 个数据源")
        return sources
    
    def add_source(self, source: DataSource):
        """添加新的数据源"""
        self.sources.append(source)
        self.logger.info(f"添加了新数据源: {source.name}")
    
    def remove_source(self, source_name: str):
        """移除数据源"""
        self.sources = [s for s in self.sources if s.name != source_name]
        self.logger.info(f"移除了数据源: {source_name}")
    
    def get_enabled_sources(self) -> List[DataSource]:
        """获取所有启用的数据源"""
        return [s for s in self.sources if s.is_enabled()]
    
    def collect_from_all_sources(self, keyword: str, with_source_info: bool = False) -> List[str] | List[tuple]:
        """
        从所有启用的数据源采集关键词
        
        Args:
            keyword: 种子关键词
            with_source_info: 是否返回带来源信息的结果
        
        Returns:
            如果 with_source_info=False: 关键词列表
            如果 with_source_info=True: (关键词, 数据源名称) 元组列表
        """
        all_keywords = []
        enabled_sources = self.get_enabled_sources()
        
        for source in enabled_sources:
            try:
                self.logger.info(f"正在从 {source.name} 获取关键词: {keyword}")
                keywords = source.collect_keywords(keyword)
                
                if with_source_info:
                    # 返回带来源信息的结果
                    for kw in keywords:
                        all_keywords.append((kw, source.name))
                else:
                    # 返回普通关键词列表（保持向后兼容）
                    all_keywords.extend(keywords)
                
                self.logger.info(f"{source.name} 获取结果: {len(keywords)} 个关键词")
            except Exception as e:
                self.logger.error(f"从 {source.name} 获取关键词失败: {e}")
        
        return all_keywords
    
    def get_source_info(self) -> List[Dict[str, Any]]:
        """获取所有数据源信息"""
        return [source.get_source_info() for source in self.sources]


# 数据源工厂函数
def create_data_source(source_type: str, config: Dict[str, Any]) -> Optional[DataSource]:
    """
    创建数据源实例
    
    Args:
        source_type: 数据源类型
        config: 配置字典
    
    Returns:
        数据源实例或None
    """
    source_map = {
        'baidu_suggestions': BaiduSuggestionsSource,
        'baidu_related': BaiduRelatedSource,
        'search360': Search360Source,
        'zhihu_topics': ZhihuTopicsSource
    }
    
    if source_type in source_map:
        return source_map[source_type](config)
    else:
        logging.warning(f"未知的数据源类型: {source_type}")
        return None
