"""题目解析服务 - 使用 LegalBERT 解析和结构化真题"""
import logging
import re
from typing import Dict, List, Optional, Any
from bs4 import BeautifulSoup

from app.models.legalbert import classify_text, extract_entities
from app.config import settings

logger = logging.getLogger(__name__)


class QuestionParser:
    """题目解析器 - 使用 LegalBERT 解析网页内容"""
    
    def __init__(self):
        self.question_patterns = [
            r'题目[：:]\s*(.+?)(?=选项|答案|解析|$)',
            r'问题[：:]\s*(.+?)(?=选项|答案|解析|$)',
            r'(.+?)[？?]\s*(?=选项|答案|解析|$)',
        ]
        self.option_patterns = [
            r'[A-Z][、．.。]\s*(.+?)(?=[A-Z][、．.。]|答案|解析|$)',
            r'选项[A-Z][：:]\s*(.+?)(?=选项[A-Z]|答案|解析|$)',
        ]
        self.answer_patterns = [
            r'答案[：:]\s*([A-Z]+)',
            r'正确答案[：:]\s*([A-Z]+)',
            r'参考答案[：:]\s*([A-Z]+)',
        ]
        self.explanation_patterns = [
            r'解析[：:]\s*(.+?)(?=题目|问题|$)',
            r'详解[：:]\s*(.+?)(?=题目|问题|$)',
        ]
    
    def parse_web_content(self, content: Dict[str, Any]) -> List[Dict[str, Any]]:
        """
        解析网页内容，提取题目
        
        Args:
            content: 网页内容字典，包含 url, title, content, html
            
        Returns:
            解析出的题目列表
        """
        questions = []
        
        try:
            # 使用 HTML 解析（如果可用）
            if 'html' in content:
                questions.extend(self._parse_html(content['html'], content['url']))
            
            # 使用纯文本解析
            if 'content' in content:
                questions.extend(self._parse_text(content['content'], content['url']))
            
            # 使用 LegalBERT 增强解析
            for question in questions:
                question = self._enhance_with_legalbert(question)
            
        except Exception as e:
            logger.error(f"解析网页内容失败: {str(e)}", exc_info=True)
        
        return questions
    
    def _parse_html(self, html: str, source_url: str) -> List[Dict[str, Any]]:
        """从 HTML 中解析题目"""
        questions = []
        
        try:
            soup = BeautifulSoup(html, 'html.parser')
            
            # 查找可能的题目容器
            question_containers = soup.find_all(['div', 'p', 'li'], class_=re.compile(r'question|题目|问题', re.I))
            
            for container in question_containers:
                text = container.get_text(separator='\n', strip=True)
                question = self._parse_text_block(text, source_url)
                if question:
                    questions.append(question)
            
            # 如果没有找到特定容器，尝试解析整个页面
            if not questions:
                text = soup.get_text(separator='\n', strip=True)
                questions.extend(self._parse_text(text, source_url))
        
        except Exception as e:
            logger.error(f"HTML 解析失败: {str(e)}")
        
        return questions
    
    def _parse_text(self, text: str, source_url: str) -> List[Dict[str, Any]]:
        """从纯文本中解析题目"""
        questions = []
        
        # 按段落分割
        paragraphs = text.split('\n\n')
        
        current_question = None
        
        for para in paragraphs:
            para = para.strip()
            if not para:
                continue
            
            # 检查是否是题目开始
            if self._is_question_start(para):
                if current_question:
                    questions.append(current_question)
                current_question = {
                    'content': para,
                    'options': [],
                    'correct_answer': '',
                    'explanation': '',
                    'source_url': source_url
                }
            elif current_question:
                # 检查是否是选项
                option = self._extract_option(para)
                if option:
                    current_question['options'].append(option)
                # 检查是否是答案
                elif self._is_answer(para):
                    current_question['correct_answer'] = self._extract_answer(para)
                # 检查是否是解析
                elif self._is_explanation(para):
                    current_question['explanation'] = para
        
        if current_question:
            questions.append(current_question)
        
        return questions
    
    def _parse_text_block(self, text: str, source_url: str) -> Optional[Dict[str, Any]]:
        """解析单个文本块"""
        question = {
            'content': '',
            'options': [],
            'correct_answer': '',
            'explanation': '',
            'source_url': source_url
        }
        
        # 提取题目
        for pattern in self.question_patterns:
            match = re.search(pattern, text, re.DOTALL)
            if match:
                question['content'] = match.group(1).strip()
                break
        
        # 提取选项
        for pattern in self.option_patterns:
            matches = re.findall(pattern, text)
            if matches:
                question['options'] = [m.strip() for m in matches]
                break
        
        # 提取答案
        for pattern in self.answer_patterns:
            match = re.search(pattern, text, re.I)
            if match:
                question['correct_answer'] = match.group(1).strip()
                break
        
        # 提取解析
        for pattern in self.explanation_patterns:
            match = re.search(pattern, text, re.DOTALL)
            if match:
                question['explanation'] = match.group(1).strip()
                break
        
        if question['content']:
            return question
        
        return None
    
    def _is_question_start(self, text: str) -> bool:
        """判断是否是题目开始"""
        patterns = [
            r'^\d+[、．.。]',
            r'^题目[：:]',
            r'^问题[：:]',
            r'^[（(]\d+[）)]',
        ]
        return any(re.match(pattern, text) for pattern in patterns)
    
    def _extract_option(self, text: str) -> Optional[str]:
        """提取选项"""
        match = re.match(r'^[A-Z][、．.。]\s*(.+)$', text.strip())
        if match:
            return match.group(1).strip()
        return None
    
    def _is_answer(self, text: str) -> bool:
        """判断是否是答案"""
        return bool(re.search(r'答案[：:]', text, re.I))
    
    def _extract_answer(self, text: str) -> str:
        """提取答案"""
        match = re.search(r'答案[：:]\s*([A-Z]+)', text, re.I)
        if match:
            return match.group(1).strip()
        return ''
    
    def _is_explanation(self, text: str) -> bool:
        """判断是否是解析"""
        return bool(re.search(r'解析[：:]|详解[：:]', text, re.I))
    
    def _enhance_with_legalbert(self, question: Dict[str, Any]) -> Dict[str, Any]:
        """
        使用 LegalBERT 增强题目解析
        
        Args:
            question: 题目字典
            
        Returns:
            增强后的题目字典
        """
        try:
            content = question.get('content', '')
            if not content:
                return question
            
            # 1. 文本分类（识别科目、难度等）
            classification = classify_text(content, num_labels=8)  # 8个科目
            question['classification'] = classification
            
            # 2. 命名实体识别（识别法条、案例、机构等）
            entities = extract_entities(content)
            question['entities'] = entities
            
            # 3. 提取科目信息
            subject = self._extract_subject(content, classification)
            question['subject'] = subject
            
            # 4. 提取知识点
            knowledge_points = self._extract_knowledge_points(content, entities)
            question['knowledge_points'] = knowledge_points
            
            # 5. 提取年份
            year = self._extract_year(content)
            question['year'] = year
            
        except Exception as e:
            logger.error(f"LegalBERT 增强解析失败: {str(e)}")
        
        return question
    
    def _extract_subject(self, content: str, classification: Dict[str, float]) -> str:
        """从分类结果中提取科目"""
        # 科目关键词映射
        subject_keywords = {
            '民法': ['民法', '民事', '合同', '物权', '侵权'],
            '刑法': ['刑法', '犯罪', '刑罚', '罪名'],
            '民诉': ['民诉', '民事诉讼', '诉讼程序'],
            '刑诉': ['刑诉', '刑事诉讼', '刑事程序'],
            '行政': ['行政', '行政诉讼', '行政许可'],
            '商经': ['商法', '经济法', '公司法', '证券'],
            '三国': ['国际法', '国际私法', '国际经济法'],
        }
        
        # 检查内容中的关键词
        for subject, keywords in subject_keywords.items():
            if any(keyword in content for keyword in keywords):
                return subject
        
        # 如果没有找到，返回分类结果中概率最高的
        if classification:
            # 这里可以根据分类结果映射到科目
            pass
        
        return '综合'
    
    def _extract_knowledge_points(self, content: str, entities: List[Dict]) -> List[str]:
        """提取知识点"""
        knowledge_points = []
        
        # 从实体中提取知识点
        for entity in entities:
            if entity.get('label') in ['LAW', 'CASE']:
                knowledge_points.append(entity.get('text', ''))
        
        # 从内容中提取常见知识点关键词
        knowledge_keywords = [
            '合同', '物权', '侵权', '婚姻', '继承',
            '犯罪', '刑罚', '罪名', '程序',
            '管辖', '证据', '执行', '仲裁'
        ]
        
        for keyword in knowledge_keywords:
            if keyword in content:
                knowledge_points.append(keyword)
        
        return list(set(knowledge_points))  # 去重
    
    def _extract_year(self, content: str) -> Optional[int]:
        """从内容中提取年份"""
        match = re.search(r'(\d{4})年', content)
        if match:
            year = int(match.group(1))
            # 只接受合理的年份（2015-2025）
            if 2015 <= year <= 2025:
                return year
        return None

