"""LegalBERT服务集成"""
from abc import ABC, abstractmethod
from typing import List, Dict, Any, Optional
import torch
import hashlib
import json
from transformers import AutoTokenizer, AutoModel
from app.core.config import settings
from app.core.database import RedisClient


class LegalBERTService(ABC):
    """LegalBERT服务抽象基类"""
    
    @abstractmethod
    async def analyze_legal_text(self, text: str) -> Dict[str, Any]:
        """分析法律文本"""
        pass
    
    @abstractmethod
    async def extract_entities(self, text: str) -> List[Dict[str, Any]]:
        """提取法律实体"""
        pass
    
    @abstractmethod
    async def classify_legal_document(self, text: str) -> Dict[str, Any]:
        """分类法律文档"""
        pass
    
    @abstractmethod
    async def analyze_question_structure(self, question_content: str) -> Dict[str, Any]:
        """分析题目结构"""
        pass
    
    @abstractmethod
    async def extract_legal_reasoning_chain(self, question_content: str, user_notes: Optional[str] = None) -> Dict[str, Any]:
        """提取推理链条（大前提-小前提-结论）"""
        pass
    
    @abstractmethod
    async def detect_question_traps(self, question_content: str) -> List[Dict[str, Any]]:
        """检测题目陷阱"""
        pass
    
    @abstractmethod
    async def analyze_option_similarity(self, question_content: str, options: List[str]) -> Dict[str, Any]:
        """分析选项相似度和差异"""
        pass
    
    @abstractmethod
    async def extract_key_facts(self, case_text: str) -> List[Dict[str, Any]]:
        """从案例中提取关键事实"""
        pass


class LegalBERTServiceImpl(LegalBERTService):
    """LegalBERT服务实现"""
    
    def __init__(self):
        # 使用配置中的模型名称
        self.model_name = settings.LEGAL_BERT_MODEL_NAME
        self.tokenizer = None
        self.model = None
        
        # 设备配置
        if settings.LEGAL_BERT_DEVICE == "auto":
            self.device = "cuda" if torch.cuda.is_available() else "cpu"
        else:
            self.device = settings.LEGAL_BERT_DEVICE
        
        self.max_length = settings.LEGAL_BERT_MAX_LENGTH
        self.enable_cache = settings.LEGAL_BERT_ENABLE_CACHE
        self.question_bank_enabled = False
        
        # Redis缓存
        try:
            self.redis = RedisClient.get_client()
        except:
            self.redis = None
        
        self._load_model()
    
    def _load_model(self):
        """加载模型"""
        try:
            self.tokenizer = AutoTokenizer.from_pretrained(self.model_name)
            self.model = AutoModel.from_pretrained(self.model_name)
            self.model.to(self.device)
            self.model.eval()
            print(f"✓ LegalBERT模型加载成功，使用设备: {self.device}")
            # 尝试加载法律题库数据集
            self._load_question_datasets()
        except Exception as e:
            print(f"✗ LegalBERT模型加载失败: {e}")
            print("使用模拟模式")
            self.model = None
    
    def _load_question_datasets(self):
        """加载法律题库数据集"""
        try:
            # 可以在这里加载Hugging Face上的法律数据集
            # 例如：datasets.load_dataset("WNJXYK/LawQA") 或 "CSHaitao/LexEval"
            # 由于需要网络和存储，暂时标记为可用但未加载
            self.question_bank_enabled = True
            print("✓ 法律题库数据集接口已就绪（可通过Hugging Face加载）")
        except Exception as e:
            print(f"⚠ 法律题库数据集加载跳过: {e}")
            self.question_bank_enabled = False
    
    async def analyze_legal_text(self, text: str) -> Dict[str, Any]:
        """分析法律文本"""
        if self.model is None:
            # 模拟模式
            return {
                "text": text[:100] + "..." if len(text) > 100 else text,
                "analysis": {
                    "legal_concepts": ["合同", "纠纷", "管辖"],
                    "sentiment": "neutral",
                    "complexity": "medium"
                }
            }
        
        # 实际分析
        try:
            inputs = self.tokenizer(
                text,
                return_tensors="pt",
                truncation=True,
                max_length=self.max_length,
                padding=True
            ).to(self.device)
            
            with torch.no_grad():
                outputs = self.model(**inputs)
                embeddings = outputs.last_hidden_state.mean(dim=1).cpu().numpy()
            
            return {
                "text": text[:100] + "..." if len(text) > 100 else text,
                "embeddings": embeddings.tolist(),
                "analysis": {
                    "legal_concepts": self._extract_concepts(text),
                    "sentiment": "neutral",
                    "complexity": "medium"
                }
            }
        except Exception as e:
            print(f"LegalBERT分析错误: {e}")
            return {
                "text": text,
                "error": str(e),
                "analysis": {}
            }
    
    async def extract_entities(self, text: str) -> List[Dict[str, Any]]:
        """提取法律实体"""
        # 简化的实体提取（实际可以使用NER模型）
        entities = []
        
        # 常见的法律实体关键词
        legal_keywords = {
            "法律条文": ["法", "条例", "规定", "条", "款", "项"],
            "主体": ["当事人", "原告", "被告", "申请人", "被申请人"],
            "行为": ["起诉", "上诉", "申请", "执行", "履行"],
            "地点": ["人民法院", "仲裁委员会", "住所地", "履行地"]
        }
        
        for entity_type, keywords in legal_keywords.items():
            for keyword in keywords:
                if keyword in text:
                    entities.append({
                        "type": entity_type,
                        "text": keyword,
                        "start": text.find(keyword),
                        "end": text.find(keyword) + len(keyword)
                    })
        
        return entities
    
    async def classify_legal_document(self, text: str) -> Dict[str, Any]:
        """分类法律文档"""
        categories = {
            "合同纠纷": ["合同", "协议", "约定", "履行"],
            "侵权纠纷": ["侵权", "损害", "赔偿"],
            "劳动争议": ["劳动", "工资", "劳动合同"],
            "婚姻家庭": ["离婚", "财产", "抚养"],
            "刑事": ["犯罪", "刑罚", "刑事"]
        }
        
        scores = {}
        for category, keywords in categories.items():
            score = sum(1 for keyword in keywords if keyword in text)
            scores[category] = score
        
        max_category = max(scores.items(), key=lambda x: x[1])[0] if scores else "其他"
        
        return {
            "category": max_category,
            "confidence": scores[max_category] / max(len(text) / 100, 1) if scores[max_category] > 0 else 0,
            "scores": scores
        }
    
    def _extract_concepts(self, text: str) -> List[str]:
        """提取法律概念（简化版）"""
        concepts = []
        legal_terms = ["合同", "纠纷", "管辖", "诉讼", "仲裁", "履行", "违约", "责任"]
        for term in legal_terms:
            if term in text:
                concepts.append(term)
        return concepts
    
    async def generate_question(
        self,
        subject: str,
        knowledge_point: str,
        difficulty: str = "medium"
    ) -> Dict[str, Any]:
        """
        基于LegalBERT和知识要点生成题目
        
        Args:
            subject: 科目（民法、刑法等）
            knowledge_point: 知识点
            difficulty: 难度
            
        Returns:
            生成的题目
        """
        # 使用LegalBERT生成题目的模板和逻辑
        # 实际实现可以：
        # 1. 从法律文本中提取相关段落
        # 2. 使用模型生成问题
        # 3. 结合题库数据增强
        
        # 当前返回基于模板的题目
        return {
            "question_id": f"bert_gen_{subject}_{knowledge_point}",
            "content": f"关于{knowledge_point}，下列说法正确的是：",
            "options": [
                f"选项A：{knowledge_point}的相关规则A",
                f"选项B：{knowledge_point}的相关规则B",
                f"选项C：{knowledge_point}的相关规则C（正确）",
                f"选项D：{knowledge_point}的相关规则D"
            ],
            "correct_answer": "选项C",
            "explanation": f"根据相关法律规定，{knowledge_point}的正确理解是选项C。",
            "type": "single_choice",
            "difficulty": difficulty,
            "category": subject,
            "knowledge_points": [knowledge_point],
            "generated_by": "LegalBERT",
            "note": "此题目由LegalBERT生成，建议结合真实题库验证"
        }
    
    async def analyze_question_structure(self, question_content: str) -> Dict[str, Any]:
        """
        分析题目结构
        
        Args:
            question_content: 题目内容
            
        Returns:
            题目结构分析结果
        """
        # 检查缓存
        if self.enable_cache and self.redis:
            cache_key = f"bert:question_structure:{hashlib.md5(question_content.encode()).hexdigest()}"
            cached = self.redis.get(cache_key)
            if cached:
                return json.loads(cached)
        
        # 分析题目类型
        question_type = self._classify_question_type(question_content)
        
        # 提取关键词
        keywords = self._identify_keywords(question_content)
        
        # 分析题目难度
        difficulty = self._estimate_difficulty(question_content)
        
        # 识别法律领域
        legal_domain = await self.classify_legal_document(question_content)
        
        result = {
            "question_type": question_type,  # 概念题/法条题/案例题/综合题
            "keywords": keywords,
            "difficulty": difficulty,
            "legal_domain": legal_domain.get("category", "其他"),
            "has_case": "案例" in question_content or "甲" in question_content or "乙" in question_content,
            "has_provision": any(word in question_content for word in ["法", "条", "规定", "条例"]),
            "structure": {
                "has_background": len(question_content) > 200,  # 是否有背景信息
                "has_condition": "如果" in question_content or "当" in question_content,
                "has_question": "?" in question_content or "？" in question_content or "正确的是" in question_content
            }
        }
        
        # 缓存结果
        if self.enable_cache and self.redis:
            cache_key = f"bert:question_structure:{hashlib.md5(question_content.encode()).hexdigest()}"
            self.redis.setex(cache_key, 86400, json.dumps(result, ensure_ascii=False))
        
        return result
    
    async def extract_legal_reasoning_chain(
        self, 
        question_content: str, 
        user_notes: Optional[str] = None
    ) -> Dict[str, Any]:
        """
        提取推理链条（大前提-小前提-结论）
        
        Args:
            question_content: 题目内容
            user_notes: 用户的分析笔记（可选）
            
        Returns:
            推理链条分析结果
        """
        # 提取法律条文（大前提）
        entities = await self.extract_entities(question_content)
        legal_provisions = [e for e in entities if e["type"] == "法律条文"]
        
        # 提取关键事实（小前提）
        key_facts = await self.extract_key_facts(question_content)
        
        # 分析结论（如果有用户笔记）
        conclusion = None
        if user_notes:
            # 从用户笔记中提取结论
            if "结论" in user_notes or "答案" in user_notes:
                conclusion = user_notes
        
        # 构建推理链条
        reasoning_chain = {
            "major_premise": {
                "type": "法律条文",
                "provisions": [e["text"] for e in legal_provisions],
                "description": "相关法律条文和规定"
            },
            "minor_premise": {
                "type": "案件事实",
                "facts": [f["text"] for f in key_facts],
                "description": "题目中的关键事实"
            },
            "conclusion": {
                "type": "法律判断",
                "content": conclusion or "需要根据大前提和小前提进行推理",
                "description": "基于法律条文和事实得出的结论"
            },
            "reasoning_completeness": self._assess_reasoning_completeness(
                legal_provisions, key_facts, conclusion
            )
        }
        
        return reasoning_chain
    
    async def detect_question_traps(self, question_content: str) -> List[Dict[str, Any]]:
        """
        检测题目陷阱
        
        Args:
            question_content: 题目内容
            
        Returns:
            检测到的陷阱列表
        """
        traps = []
        
        # 1. 绝对化表述陷阱
        absolute_words = ["必须", "一定", "绝对", "必然", "总是", "永远", "所有", "任何", "完全"]
        for word in absolute_words:
            if word in question_content:
                traps.append({
                    "type": "绝对化表述",
                    "word": word,
                    "description": f"题目中包含绝对化表述'{word}'，需要谨慎判断",
                    "severity": "high",
                    "position": question_content.find(word)
                })
        
        # 2. 反向提问陷阱
        negative_questions = ["错误的是", "不正确的是", "不符合", "不属于", "不包括", "不能"]
        for phrase in negative_questions:
            if phrase in question_content:
                traps.append({
                    "type": "反向提问",
                    "phrase": phrase,
                    "description": f"题目是反向提问（'{phrase}'），注意选择错误的选项",
                    "severity": "high",
                    "position": question_content.find(phrase)
                })
        
        # 3. 条件限定陷阱
        condition_words = ["在...情况下", "如果", "当", "只有", "除非", "若"]
        for word in condition_words:
            if word in question_content:
                traps.append({
                    "type": "条件限定",
                    "word": word,
                    "description": f"题目包含条件限定'{word}'，注意条件的适用范围",
                    "severity": "medium",
                    "position": question_content.find(word)
                })
        
        # 4. 时间陷阱
        time_words = ["之前", "之后", "期间", "时效", "期限", "届满"]
        for word in time_words:
            if word in question_content:
                traps.append({
                    "type": "时间陷阱",
                    "word": word,
                    "description": f"题目涉及时间概念'{word}'，注意时间顺序和期限",
                    "severity": "medium",
                    "position": question_content.find(word)
                })
        
        # 5. 主体混淆陷阱
        subject_words = ["当事人", "原告", "被告", "申请人", "被申请人"]
        subject_count = sum(1 for word in subject_words if word in question_content)
        if subject_count > 1:
            traps.append({
                "type": "主体混淆",
                "description": "题目涉及多个主体，注意区分不同主体的权利义务",
                "severity": "medium",
                "subjects": [w for w in subject_words if w in question_content]
            })
        
        return traps
    
    async def analyze_option_similarity(
        self, 
        question_content: str, 
        options: List[str]
    ) -> Dict[str, Any]:
        """
        分析选项相似度和差异
        
        Args:
            question_content: 题目内容
            options: 选项列表
            
        Returns:
            选项相似度分析结果
        """
        if not options or len(options) < 2:
            return {
                "similarity_matrix": {},
                "similar_pairs": [],
                "distinct_options": options,
                "analysis": "选项数量不足，无法进行相似度分析"
            }
        
        # 如果模型可用，使用BERT embeddings计算相似度
        if self.model and self.tokenizer:
            try:
                # 计算每个选项的embedding
                option_embeddings = []
                for option in options:
                    inputs = self.tokenizer(
                        option,
                        return_tensors="pt",
                        truncation=True,
                        max_length=128,
                        padding=True
                    ).to(self.device)
                    
                    with torch.no_grad():
                        outputs = self.model(**inputs)
                        embedding = outputs.last_hidden_state.mean(dim=1).cpu().numpy()[0]
                        option_embeddings.append(embedding)
                
                # 计算相似度矩阵
                import numpy as np
                similarity_matrix = {}
                similar_pairs = []
                
                for i, emb1 in enumerate(option_embeddings):
                    for j, emb2 in enumerate(option_embeddings):
                        if i < j:
                            # 计算余弦相似度
                            similarity = np.dot(emb1, emb2) / (np.linalg.norm(emb1) * np.linalg.norm(emb2))
                            similarity_matrix[f"option_{i}_option_{j}"] = float(similarity)
                            
                            if similarity > 0.8:  # 高相似度阈值
                                similar_pairs.append({
                                    "option_i": i,
                                    "option_j": j,
                                    "similarity": float(similarity),
                                    "description": f"选项{i+1}和选项{j+1}高度相似，需要仔细对比"
                                })
                
                return {
                    "similarity_matrix": similarity_matrix,
                    "similar_pairs": similar_pairs,
                    "distinct_options": [i for i in range(len(options)) if not any(
                        (p["option_i"] == i or p["option_j"] == i) for p in similar_pairs
                    )],
                    "analysis": f"检测到{len(similar_pairs)}对相似选项，建议重点对比"
                }
            except Exception as e:
                print(f"BERT相似度计算错误: {e}")
        
        # 降级方案：基于关键词的相似度分析
        return self._analyze_option_similarity_fallback(options)
    
    async def extract_key_facts(self, case_text: str) -> List[Dict[str, Any]]:
        """
        从案例中提取关键事实
        
        Args:
            case_text: 案例文本
            
        Returns:
            关键事实列表
        """
        facts = []
        
        # 提取时间事实
        time_keywords = ["年", "月", "日", "时", "期间", "期限", "时效"]
        for keyword in time_keywords:
            if keyword in case_text:
                # 简单提取包含时间关键词的句子
                sentences = case_text.split("。")
                for sentence in sentences:
                    if keyword in sentence:
                        facts.append({
                            "type": "时间",
                            "text": sentence.strip(),
                            "keyword": keyword,
                            "importance": "high" if "期间" in sentence or "时效" in sentence else "medium"
                        })
        
        # 提取主体事实
        subject_keywords = ["甲", "乙", "丙", "当事人", "原告", "被告", "公司", "企业"]
        for keyword in subject_keywords:
            if keyword in case_text:
                sentences = case_text.split("。")
                for sentence in sentences:
                    if keyword in sentence and len(sentence) < 100:  # 避免提取过长的句子
                        facts.append({
                            "type": "主体",
                            "text": sentence.strip(),
                            "keyword": keyword,
                            "importance": "high"
                        })
        
        # 提取行为事实
        action_keywords = ["签订", "履行", "违约", "起诉", "申请", "执行", "交付", "支付"]
        for keyword in action_keywords:
            if keyword in case_text:
                sentences = case_text.split("。")
                for sentence in sentences:
                    if keyword in sentence:
                        facts.append({
                            "type": "行为",
                            "text": sentence.strip(),
                            "keyword": keyword,
                            "importance": "high"
                        })
        
        # 提取结果事实
        result_keywords = ["损失", "损害", "赔偿", "责任", "后果", "影响"]
        for keyword in result_keywords:
            if keyword in case_text:
                sentences = case_text.split("。")
                for sentence in sentences:
                    if keyword in sentence:
                        facts.append({
                            "type": "结果",
                            "text": sentence.strip(),
                            "keyword": keyword,
                            "importance": "high"
                        })
        
        # 去重
        seen_texts = set()
        unique_facts = []
        for fact in facts:
            if fact["text"] not in seen_texts:
                seen_texts.add(fact["text"])
                unique_facts.append(fact)
        
        return unique_facts
    
    # 辅助方法
    def _classify_question_type(self, content: str) -> str:
        """分类题目类型"""
        if "案例" in content or "甲" in content or "乙" in content:
            return "案例题"
        elif any(word in content for word in ["法", "条", "规定", "条例"]):
            if "案例" in content or len(content) > 300:
                return "综合题"
            return "法条题"
        else:
            return "概念题"
    
    def _identify_keywords(self, content: str) -> Dict[str, List[str]]:
        """识别关键词"""
        keywords = {
            "时间词": [],
            "主体词": [],
            "行为词": [],
            "结果词": []
        }
        
        time_words = ["年", "月", "日", "期间", "时效", "期限"]
        subject_words = ["当事人", "原告", "被告", "甲", "乙", "公司"]
        action_words = ["签订", "履行", "违约", "起诉", "申请"]
        result_words = ["损失", "损害", "赔偿", "责任"]
        
        for word in time_words:
            if word in content:
                keywords["时间词"].append(word)
        
        for word in subject_words:
            if word in content:
                keywords["主体词"].append(word)
        
        for word in action_words:
            if word in content:
                keywords["行为词"].append(word)
        
        for word in result_words:
            if word in content:
                keywords["结果词"].append(word)
        
        return keywords
    
    def _estimate_difficulty(self, content: str) -> str:
        """估算题目难度"""
        length = len(content)
        complexity_indicators = sum([
            "如果" in content,
            "但是" in content,
            "然而" in content,
            "同时" in content,
            "此外" in content,
            len(content) > 500
        ])
        
        if complexity_indicators >= 3 or length > 600:
            return "hard"
        elif complexity_indicators >= 1 or length > 300:
            return "medium"
        else:
            return "easy"
    
    def _assess_reasoning_completeness(
        self, 
        provisions: List[Dict], 
        facts: List[Dict], 
        conclusion: Optional[str]
    ) -> Dict[str, Any]:
        """评估推理链条完整性"""
        has_major_premise = len(provisions) > 0
        has_minor_premise = len(facts) > 0
        has_conclusion = conclusion is not None
        
        completeness_score = 0
        if has_major_premise:
            completeness_score += 33
        if has_minor_premise:
            completeness_score += 33
        if has_conclusion:
            completeness_score += 34
        
        return {
            "score": completeness_score,
            "has_major_premise": has_major_premise,
            "has_minor_premise": has_minor_premise,
            "has_conclusion": has_conclusion,
            "level": "complete" if completeness_score == 100 else "partial" if completeness_score >= 66 else "incomplete"
        }
    
    def _analyze_option_similarity_fallback(self, options: List[str]) -> Dict[str, Any]:
        """选项相似度分析的降级方案（基于关键词）"""
        similar_pairs = []
        
        # 提取每个选项的关键词
        option_keywords = []
        for option in options:
            keywords = set()
            legal_terms = ["合同", "侵权", "责任", "义务", "权利", "违约", "赔偿"]
            for term in legal_terms:
                if term in option:
                    keywords.add(term)
            option_keywords.append(keywords)
        
        # 找出相似选项对
        for i in range(len(option_keywords)):
            for j in range(i + 1, len(option_keywords)):
                common_keywords = option_keywords[i] & option_keywords[j]
                if len(common_keywords) >= 2:  # 至少有2个共同关键词
                    similar_pairs.append({
                        "option_i": i,
                        "option_j": j,
                        "similarity": len(common_keywords) / max(len(option_keywords[i]), len(option_keywords[j])),
                        "common_keywords": list(common_keywords),
                        "description": f"选项{i+1}和选项{j+1}有共同关键词：{', '.join(common_keywords)}"
                    })
        
        return {
            "similarity_matrix": {},
            "similar_pairs": similar_pairs,
            "distinct_options": [i for i in range(len(options)) if not any(
                (p["option_i"] == i or p["option_j"] == i) for p in similar_pairs
            )],
            "analysis": f"基于关键词分析，检测到{len(similar_pairs)}对相似选项"
        }


def get_legalbert_service() -> LegalBERTService:
    """获取LegalBERT服务实例"""
    return LegalBERTServiceImpl()

