"""
解析器模块

负责解析HTML内容，提取结构化数据
"""

import os
import re
import yaml
import logging
import importlib
import importlib.util
from urllib.parse import urlparse
from bs4 import BeautifulSoup
from lxml import etree

logger = logging.getLogger('parser')

class BaseParser:
    """解析器基类"""
    
    def __init__(self):
        """初始化解析器"""
        self.domains = []  # 支持的域名列表
    
    def can_parse(self, url):
        """
        检查是否可以解析该URL
        
        参数:
            url: 要检查的URL
        
        返回:
            布尔值，表示是否可以解析
        """
        if not self.domains:  # 如果没有指定域名，则支持所有域名
            return True
        
        domain = urlparse(url).netloc
        return any(domain.endswith(d) for d in self.domains)
    
    def parse(self, html, url):
        """
        解析HTML内容
        
        参数:
            html: HTML内容
            url: 页面URL
        
        返回:
            解析结果字典
        """
        raise NotImplementedError("子类必须实现parse方法")


class RuleParser(BaseParser):
    """基于规则的解析器"""
    
    def __init__(self, config):
        """
        初始化规则解析器
        
        参数:
            config: 配置对象
        """
        super().__init__()
        self.config = config
        self.rules = {}
        
        # 加载解析规则
        self._load_rules()
        
        logger.info(f"规则解析器初始化完成，加载了 {len(self.rules)} 个域名规则")
    
    def _load_rules(self):
        """加载解析规则"""
        rules_file = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'config', 'rules.yaml')
        
        if not os.path.exists(rules_file):
            logger.warning(f"规则文件不存在: {rules_file}")
            return
        
        try:
            with open(rules_file, 'r', encoding='utf-8') as f:
                self.rules = yaml.safe_load(f) or {}
            
            logger.info(f"成功加载规则文件: {rules_file}")
        except Exception as e:
            logger.error(f"加载规则文件失败: {e}")
    
    def get_domain_rules(self, url):
        """
        获取URL对应的域名规则
        
        参数:
            url: 页面URL
        
        返回:
            规则字典或None
        """
        domain = urlparse(url).netloc
        
        # 尝试完全匹配
        if domain in self.rules:
            return self.rules[domain]
        
        # 尝试子域名匹配
        for rule_domain, rules in self.rules.items():
            if domain.endswith('.' + rule_domain):
                return rules
        
        # 尝试主域名匹配
        main_domain = '.'.join(domain.split('.')[-2:])
        if main_domain in self.rules:
            return self.rules[main_domain]
        
        return None
    
    def parse(self, html, url):
        """
        根据规则解析HTML内容
        
        参数:
            html: HTML内容
            url: 页面URL
        
        返回:
            解析结果字典
        """
        # 获取域名规则
        rules = self.get_domain_rules(url)
        if not rules:
            logger.warning(f"未找到匹配的解析规则: {url}")
            return None
        
        try:
            # 创建解析结果
            result = {}

            # 解析HTML
            soup = BeautifulSoup(html, 'html.parser')
            dom = etree.HTML(html)
            
            # 应用规则提取数据
            for field, rule in rules.items():
                if isinstance(rule, str):
                    # 单一规则
                    if rule.startswith('xpath:'):
                        xpath = rule[6:]
                        result[field] = self.extract_by_xpath(dom, xpath)
                    elif rule.startswith('css:'):
                        css = rule[4:]
                        result[field] = self.extract_by_css(soup, css)
                    elif rule.startswith('regex:'):
                        regex = rule[6:]
                        result[field] = self.extract_by_regex(html, regex)
                    else:
                        # 默认当作XPath处理
                        result[field] = self.extract_by_xpath(dom, rule)
                elif isinstance(rule, dict):
                    # 复合规则
                    if 'xpath' in rule:
                        result[field] = self.extract_by_xpath(dom, rule['xpath'])
                    elif 'css' in rule:
                        result[field] = self.extract_by_css(soup, rule['css'])
                    elif 'regex' in rule:
                        result[field] = self.extract_by_regex(html, rule['regex'])
                    
                    # 应用后处理
                    if 'replace' in rule and result[field]:
                        for pattern, repl in rule['replace'].items():
                            result[field] = re.sub(pattern, repl, result[field])
                    
                    if 'strip' in rule and rule['strip'] and result[field]:
                        result[field] = result[field].strip()

            
            logger.info(f"成功解析: {url}, 提取了 {len(result) - 2} 个字段")
            return result
            
        except Exception as e:
            logger.error(f"解析失败: {url}, 错误: {e}")
            return None
    
    def extract_by_xpath(self, dom, xpath):
        """
        使用XPath提取数据
        
        参数:
            dom: etree DOM对象
            xpath: XPath表达式
        
        返回:
            提取的文本或列表
        """
        try:
            results = dom.xpath(xpath)
            
            if not results:
                return None
            
            if isinstance(results, list):
                if len(results) == 1:
                    # 单个结果
                    result = results[0]
                    if isinstance(result, str):
                        return result
                    elif hasattr(result, 'text'):
                        return result.text
                    else:
                        return str(result)
                else:
                    # 多个结果
                    text_results = []
                    for result in results:
                        if isinstance(result, str):
                            text_results.append(result)
                        elif hasattr(result, 'text'):
                            text_results.append(result.text)
                        else:
                            text_results.append(str(result))
                    
                    return '\n'.join(text_results) if text_results else None
            else:
                # 非列表结果
                return str(results)
                
        except Exception as e:
            logger.error(f"XPath提取失败: {xpath}, 错误: {e}")
            return None
    
    def extract_by_css(self, soup, css):
        """
        使用CSS选择器提取数据
        
        参数:
            soup: BeautifulSoup对象
            css: CSS选择器
        
        返回:
            提取的文本或列表
        """
        try:
            # 检查是否包含::text或::attr(name)后缀
            attr_match = re.search(r'::attr\(([^)]+)\)$', css)
            text_match = re.search(r'::text$', css)
            
            if attr_match:
                # 提取属性
                attr_name = attr_match.group(1)
                selector = css[:attr_match.start()]
                elements = soup.select(selector)
                
                if not elements:
                    return None
                
                if len(elements) == 1:
                    return elements[0].get(attr_name, '')
                else:
                    return '\n'.join([e.get(attr_name, '') for e in elements])
                
            elif text_match:
                # 提取文本
                selector = css[:text_match.start()]
                elements = soup.select(selector)
                
                if not elements:
                    return None
                
                if len(elements) == 1:
                    return elements[0].get_text().strip()
                else:
                    return '\n'.join([e.get_text().strip() for e in elements])
                
            else:
                # 默认提取文本
                elements = soup.select(css)
                
                if not elements:
                    return None
                
                if len(elements) == 1:
                    return elements[0].get_text().strip()
                else:
                    return '\n'.join([e.get_text().strip() for e in elements])
                
        except Exception as e:
            logger.error(f"CSS提取失败: {css}, 错误: {e}")
            return None
    
    def extract_by_regex(self, html, regex):
        """
        使用正则表达式提取数据
        
        参数:
            html: HTML内容
            regex: 正则表达式
        
        返回:
            提取的文本或列表
        """
        try:
            pattern = re.compile(regex, re.DOTALL)
            matches = pattern.findall(html)
            
            if not matches:
                return None
            
            if len(matches) == 1:
                if isinstance(matches[0], tuple):
                    # 如果匹配结果是元组（有分组），返回第一个分组
                    return matches[0][0] if matches[0] else None
                else:
                    # 否则返回匹配结果
                    return matches[0]
            else:
                if isinstance(matches[0], tuple):
                    # 如果匹配结果是元组列表，返回第一个分组的列表
                    return '\n'.join([m[0] for m in matches if m])
                else:
                    # 否则返回匹配结果列表
                    return '\n'.join(matches)
                
        except Exception as e:
            logger.error(f"正则提取失败: {regex}, 错误: {e}")
            return None
    
    def extract_links(self, soup, url):
        """
        提取页面中的链接
        
        参数:
            soup: BeautifulSoup对象
            url: 页面URL
        
        返回:
            链接列表
        """
        links = []
        for a in soup.find_all('a', href=True):
            link = a['href']
            if link.startswith('http'):
                links.append(link)
            elif link.startswith('/'):
                links.append(urlparse(url).scheme + '://' + urlparse(url).netloc + link)
            else:
                links.append(urlparse(url).scheme + '://' + urlparse(url).netloc + urlparse(url).path + '/' + link)
        return links
    
    def extract_pagination_links(self, soup, url):
        """
        提取页面中的翻页链接
        
        参数:
            soup: BeautifulSoup对象
            url: 页面URL
        
        返回:
            翻页链接列表
        """
        pagination_links = []
        for a in soup.find_all('a', href=True):
            link = a['href']
            if 'page' in link or 'pageno' in link:
                pagination_links.append(link)
        return pagination_links


class ParserManager:
    """解析器管理器"""
    
    def __init__(self, config):
        """
        初始化解析器管理器
        
        参数:
            config: 配置对象
        """
        self.config = config
        self.parsers = []
        
        # 创建规则解析器
        self.rule_parser = RuleParser(config)
        self.parsers.append(self.rule_parser)
        
        # 加载自定义解析器插件
        self._load_plugins()
        
        logger.info(f"解析器管理器初始化完成，共加载 {len(self.parsers)} 个解析器")
    
    def _load_plugins(self):
        """加载自定义解析器插件"""
        plugins_dir = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'parsers')
        
        if not os.path.exists(plugins_dir):
            logger.warning(f"插件目录不存在: {plugins_dir}")
            return
        
        # 遍历插件目录
        for filename in os.listdir(plugins_dir):
            if filename.endswith('.py') and not filename.startswith('__'):
                try:
                    # 构建模块路径
                    module_name = f"parsers.{filename[:-3]}"
                    module_path = os.path.join(plugins_dir, filename)
                    
                    # 加载模块
                    spec = importlib.util.spec_from_file_location(module_name, module_path)
                    module = importlib.util.module_from_spec(spec)
                    spec.loader.exec_module(module)
                    
                    # 查找并实例化解析器类
                    for attr_name in dir(module):
                        attr = getattr(module, attr_name)
                        if (isinstance(attr, type) and 
                            issubclass(attr, BaseParser) and 
                            attr is not BaseParser):
                            # 创建解析器实例
                            parser = attr()
                            self.parsers.append(parser)
                            logger.info(f"加载解析器插件: {attr_name} from {filename}")
                    
                except Exception as e:
                    logger.error(f"加载解析器插件失败: {filename}, 错误: {e}")
    
    def get_parser(self, url):
        """
        为URL获取合适的解析器
        
        参数:
            url: 要解析的URL
        
        返回:
            解析器对象或None
        """
        # 先检查自定义解析器
        for parser in self.parsers:
            if parser is not self.rule_parser and parser.can_parse(url):
                logger.info(f"使用自定义解析器: {parser.__class__.__name__} 解析 {url}")
                return parser
        
        # 然后检查规则解析器
        if self.rule_parser.get_domain_rules(url):
            logger.info(f"使用规则解析器解析 {url}")
            return self.rule_parser
        
        logger.warning(f"未找到合适的解析器: {url}")
        return None
    
    def parse(self, html, url):
        """
        解析HTML内容
        
        参数:
            html: HTML内容
            url: 页面URL
        
        返回:
            解析结果字典
        """
        parser = self.get_parser(url)
        if parser:
            return parser.parse(html, url)
        return None
