"""法考真题爬虫服务"""
import re
import json
import time
from typing import List, Dict, Any, Optional
from datetime import datetime
from urllib.parse import urljoin, urlparse
import requests
from bs4 import BeautifulSoup


class LegalExamScraper:
    """法考真题爬虫"""
    
    def __init__(self):
        self.session = requests.Session()
        self.session.headers.update({
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
        })
        self.base_urls = [
            'https://www.zhumavip.com',  # 竹马法考
            'https://www.fakao365.com',  # 法考365
        ]
        self.scraped_questions = []
        self.stats = {
            'total_scraped': 0,
            'by_year': {},
            'by_subject': {},
            'errors': []
        }
    
    def scrape_recent_years(self, years: List[int] = None) -> List[Dict[str, Any]]:
        """
        爬取最近5年的法考真题
        
        Args:
            years: 要爬取的年份列表，默认最近5年
            
        Returns:
            爬取的题目列表
        """
        if years is None:
            current_year = datetime.now().year
            years = list(range(current_year - 4, current_year + 1))
        
        print(f"开始爬取 {years} 年的法考真题...")
        
        all_questions = []
        for year in years:
            try:
                questions = self._scrape_year(year)
                all_questions.extend(questions)
                self.stats['by_year'][year] = len(questions)
                print(f"✅ {year}年: 爬取 {len(questions)} 道题目")
                time.sleep(2)  # 避免请求过快
            except Exception as e:
                error_msg = f"{year}年爬取失败: {str(e)}"
                print(f"❌ {error_msg}")
                self.stats['errors'].append(error_msg)
        
        self.stats['total_scraped'] = len(all_questions)
        return all_questions
    
    def _scrape_year(self, year: int) -> List[Dict[str, Any]]:
        """爬取指定年份的真题"""
        questions = []
        
        # 尝试从多个网站爬取
        for base_url in self.base_urls:
            try:
                year_questions = self._scrape_from_site(base_url, year)
                questions.extend(year_questions)
                if year_questions:
                    break  # 如果成功爬取，不再尝试其他网站
            except Exception as e:
                print(f"从 {base_url} 爬取 {year} 年失败: {e}")
                continue
        
        # 如果没有爬取到，生成模拟数据（用于开发测试）
        if not questions:
            questions = self._generate_mock_questions(year)
        
        return questions
    
    def _scrape_from_site(self, base_url: str, year: int) -> List[Dict[str, Any]]:
        """从指定网站爬取真题"""
        # 这里需要根据实际网站结构调整
        # 由于无法直接访问真实网站，这里提供一个框架
        
        # 示例URL模式（需要根据实际网站调整）
        url = f"{base_url}/exam/{year}"
        
        try:
            response = self.session.get(url, timeout=10)
            response.raise_for_status()
            
            soup = BeautifulSoup(response.text, 'html.parser')
            questions = self._parse_questions(soup, year)
            return questions
        except requests.RequestException as e:
            print(f"请求失败: {e}")
            return []
    
    def _parse_questions(self, soup: BeautifulSoup, year: int) -> List[Dict[str, Any]]:
        """解析HTML中的题目"""
        questions = []
        
        # 这里需要根据实际网站的HTML结构编写解析逻辑
        # 示例解析逻辑（需要根据实际网站调整）
        
        # 查找题目容器（示例选择器，需要调整）
        question_containers = soup.find_all('div', class_='question-item')
        
        for container in question_containers:
            try:
                question = self._extract_question_data(container, year)
                if question:
                    questions.append(question)
            except Exception as e:
                print(f"解析题目失败: {e}")
                continue
        
        return questions
    
    def _extract_question_data(self, container, year: int) -> Optional[Dict[str, Any]]:
        """从容器中提取题目数据"""
        try:
            # 提取题目内容
            content_elem = container.find('div', class_='question-content')
            content = content_elem.get_text(strip=True) if content_elem else ""
            
            # 提取选项
            options = []
            option_elems = container.find_all('div', class_='option')
            for elem in option_elems:
                option_text = elem.get_text(strip=True)
                if option_text:
                    options.append(option_text)
            
            # 提取正确答案
            answer_elem = container.find('div', class_='correct-answer')
            correct_answer = answer_elem.get_text(strip=True) if answer_elem else ""
            
            # 提取解析
            explanation_elem = container.find('div', class_='explanation')
            explanation = explanation_elem.get_text(strip=True) if explanation_elem else ""
            
            # 提取科目
            subject_elem = container.find('span', class_='subject')
            subject = subject_elem.get_text(strip=True) if subject_elem else "未知"
            
            if not content or not options:
                return None
            
            # 生成题目ID
            question_id = f"scraped_{year}_{hash(content) % 100000}"
            
            return {
                "question_id": question_id,
                "content": content,
                "options": options,
                "correct_answer": correct_answer,
                "explanation": explanation,
                "type": "single_choice",
                "difficulty": "medium",  # 默认中等，后续可通过分析调整
                "category": subject,
                "subject": self._map_subject(subject),
                "year": year,
                "exam_type": "客观题",
                "source": "scraped",
                "knowledge_points": self._extract_knowledge_points(explanation),
                "difficulty_score": None,  # 后续评估
                "wrong_count": 0,
                "correct_count": 0
            }
        except Exception as e:
            print(f"提取题目数据失败: {e}")
            return None
    
    def _map_subject(self, subject: str) -> str:
        """将中文科目名映射到英文标识"""
        subject_map = {
            "民法": "civil_law",
            "民诉": "civil_procedure",
            "民事诉讼法": "civil_procedure",
            "刑法": "criminal_law",
            "刑诉": "criminal_procedure",
            "刑事诉讼法": "criminal_procedure",
            "行政": "administrative",
            "行政法": "administrative",
            "商经": "commercial",
            "商法": "commercial",
            "经济法": "commercial",
            "三国": "international",
            "国际法": "international",
            "综合": "comprehensive"
        }
        return subject_map.get(subject, "comprehensive")
    
    def _extract_knowledge_points(self, explanation: str) -> List[str]:
        """从解析中提取知识点"""
        # 简单的关键词提取（后续可以用NLP增强）
        knowledge_keywords = [
            "合同", "侵权", "物权", "债权", "婚姻", "继承",
            "犯罪", "刑罚", "正当防卫", "紧急避险",
            "管辖", "证据", "审判", "执行",
            "行政许可", "行政处罚", "行政复议",
            "公司", "合伙", "破产", "票据",
            "国际法", "国际私法", "国际经济法"
        ]
        
        found_points = []
        for keyword in knowledge_keywords:
            if keyword in explanation:
                found_points.append(keyword)
        
        return found_points[:3]  # 最多返回3个知识点
    
    def _generate_mock_questions(self, year: int) -> List[Dict[str, Any]]:
        """生成模拟题目（用于开发测试，当无法访问真实网站时）"""
        print(f"⚠️ 无法访问真实网站，为 {year} 年生成模拟题目数据")
        
        subjects = ["civil_law", "civil_procedure", "criminal_law", "criminal_procedure", 
                   "administrative", "commercial", "international", "comprehensive"]
        difficulties = ["easy", "medium", "hard"]
        
        mock_questions = []
        for i, subject in enumerate(subjects):
            for j, difficulty in enumerate(difficulties):
                question_id = f"mock_{year}_{subject}_{difficulty}_{i*3+j+1}"
                mock_questions.append({
                    "question_id": question_id,
                    "content": f"[{year}年真题] 这是{subject}科目的{difficulty}难度题目，题目内容待补充。",
                    "options": [
                        "选项A：选项内容待补充",
                        "选项B：选项内容待补充",
                        "选项C：选项内容待补充",
                        "选项D：选项内容待补充"
                    ],
                    "correct_answer": "选项A",
                    "explanation": "解析内容待补充",
                    "type": "single_choice",
                    "difficulty": difficulty,
                    "category": self._get_category_name(subject),
                    "subject": subject,
                    "year": year,
                    "exam_type": "客观题",
                    "source": "mock",
                    "knowledge_points": [f"{subject}_知识点"],
                    "difficulty_score": {"easy": 30, "medium": 60, "hard": 90}[difficulty],
                    "wrong_count": 0,
                    "correct_count": 0
                })
        
        return mock_questions
    
    def _get_category_name(self, subject: str) -> str:
        """获取科目中文名"""
        category_map = {
            "civil_law": "民法",
            "civil_procedure": "民诉",
            "criminal_law": "刑法",
            "criminal_procedure": "刑诉",
            "administrative": "行政",
            "commercial": "商经",
            "international": "三国",
            "comprehensive": "综合"
        }
        return category_map.get(subject, "综合")
    
    def save_to_file(self, questions: List[Dict[str, Any]], filepath: str):
        """保存题目到JSON文件"""
        with open(filepath, 'w', encoding='utf-8') as f:
            json.dump(questions, f, ensure_ascii=False, indent=2)
        print(f"✅ 已保存 {len(questions)} 道题目到 {filepath}")
    
    def get_stats(self) -> Dict[str, Any]:
        """获取爬取统计信息"""
        return self.stats


# 单例实例
legal_exam_scraper = LegalExamScraper()


