"""
找法网解析器

专门用于解析找法网(findlaw.cn)的案例详情页
"""

import re
import logging
from urllib.parse import urlparse
from bs4 import BeautifulSoup
from lxml import etree

from crawler.parser import BaseParser

logger = logging.getLogger('findlaw_parser')

class FindlawParser(BaseParser):
    """找法网解析器类"""
    
    def __init__(self):
        """初始化找法网解析器"""
        super().__init__()
        # 设置支持的域名
        self.domains = ['findlaw.cn', 'm.findlaw.cn']
        logger.info("找法网解析器初始化完成")
    
    def can_parse(self, url):
        """
        检查是否可以解析该URL
        
        参数:
            url: 要检查的URL
        
        返回:
            布尔值，表示是否可以解析
        """
        domain = urlparse(url).netloc
        # 检查是否为案例详情页
        if domain in self.domains and '/case/' in url:
            return True
        return False
    
    def parse(self, html, url):
        """
        解析找法网案例详情页
        
        参数:
            html: HTML内容
            url: 页面URL
        
        返回:
            解析结果字典
        """
        try:
            # 创建解析结果
            result = {
                'url': url
            }
            
            # 解析HTML
            soup = BeautifulSoup(html, 'html.parser')
            dom = etree.HTML(html)
            
            # 提取案例标题
            title_element = soup.select_one('div.article h1')
            if title_element:
                result['title'] = title_element.get_text().strip()
            
            # 提取案情介绍
            case_intro = self._extract_section(soup, '案情介绍')
            if case_intro:
                result['intro'] = case_intro
            
            # 提取案情分析
            case_analysis = self._extract_section(soup, '案情分析')
            if case_analysis:
                result['analysis'] = case_analysis
            
            # 提取判决结果
            judgment = self._extract_section(soup, '判决结果')
            if judgment:
                result['judgment'] = judgment
            
            # 提取相关法规
            related_laws = self._extract_section(soup, '相关法规')
            if related_laws:
                result['laws'] = related_laws
            
            # 不提取链接信息，按照用户要求
            logger.info(f"成功解析找法网案例: {url}")
            return result
            
        except Exception as e:
            logger.error(f"解析找法网案例失败: {url}, 错误: {e}")
            return None
    
    def _extract_section(self, soup, section_title):
        """
        提取特定标题的内容区块
        
        参数:
            soup: BeautifulSoup对象
            section_title: 区块标题
        
        返回:
            提取的文本内容
        """
        try:
            # 查找包含特定标题的段落
            title_p = soup.find('p', class_='color_b', string=section_title)
            if not title_p:
                return None
            
            # 获取父级li元素
            parent_li = title_p.find_parent('li', class_='dj-content-li')
            if not parent_li:
                return None
            
            # 获取除了标题外的所有内容
            content = ''
            for p in parent_li.find_all('p')[1:]:  # 跳过标题段落
                text = p.get_text().strip()
                if text:
                    content += text + '\n'
            
            return content.strip()
            
        except Exception as e:
            logger.error(f"提取区块失败: {section_title}, 错误: {e}")
            return None
