import torch
from transformers import AutoTokenizer, AutoModelForCausalLM
from typing import List, Dict, Tuple
import re
import json
from collections import deque


class LegalConsultationSystem:
    def __init__(self, model_path: str):
        # 初始化模型和tokenizer
        self.tokenizer = AutoTokenizer.from_pretrained(
            model_path,
            trust_remote_code=True,
            pad_token='<|endoftext|>'
        )
        self.model = AutoModelForCausalLM.from_pretrained(
            model_path,
            torch_dtype=torch.bfloat16,  # 使用更高效的内存格式
            device_map="auto",
            trust_remote_code=True
        ).eval()

        # 对话管理配置
        self.max_history = 5  # 保留最近5轮对话
        self.dialogue_history = deque(maxlen=self.max_history * 2)

        # 法律知识增强配置
        self.law_database = self._load_law_database()
        self.risk_keywords = ["自杀", "暴力", "政治"]

        # 生成参数优化
        self.generation_config = {
            "max_new_tokens": 512,
            "temperature": 0.7,
            "top_p": 0.9,
            "repetition_penalty": 1.2,
            "do_sample": True
        }

    def _load_law_database(self) -> Dict:
        """加载法律知识库（示例数据）"""
        return {
            "劳动合同法": {
                10: "建立劳动关系应当订立书面劳动合同",
                82: "未签劳动合同的二倍工资罚则",
                47: "经济补偿计算标准（N/2N）"
            },
            "民法典": {
                577: "违约责任的一般规定",
                584: "损失赔偿范围"
            }
        }

    def _build_prompt(self, query: str) -> Tuple[str, Dict]:
        """构建法律增强的对话提示"""
        # 1. 系统提示词
        system_prompt = """你是一名专业法律顾问，需遵守：
1. 严格引用中国大陆法律条文（格式：《法律名称》第XX条）
2. 分步骤给出可操作建议
3. 用【风险提示】标注潜在法律风险
4. 禁用不确定表述（如“可能”“大概”）

当前对话上下文："""

        # 2. 关联法律条文
        related_laws = self._detect_related_laws(query)
        law_prompt = "\n".join([f"- {law}" for law in related_laws])

        # 3. 拼接对话历史
        history_prompt = "\n".join([
            f"{role}: {content}" for role, content in self.dialogue_history
        ])

        # 4. 组合完整prompt
        full_prompt = (
            f"{system_prompt}\n{law_prompt}\n{history_prompt}\n"
            f"用户：{query}\n律师："
        )

        # 5. Tokenize并生成attention_mask
        inputs = self.tokenizer(
            full_prompt,
            return_tensors="pt",
            padding=True,
            truncation=True,
            max_length=2048
        ).to(self.model.device)

        return full_prompt, inputs

    def _detect_related_laws(self, text: str) -> List[str]:
        """法律条文关联检索"""
        detected_laws = []

        # 劳动法关键词
        labor_keywords = ["劳动合同", "解雇", "工资", "赔偿"]
        if any(kw in text for kw in labor_keywords):
            detected_laws.extend([
                "《劳动合同法》第10条（书面劳动合同要求）",
                "《劳动合同法》第82条（二倍工资罚则）",
                "《劳动合同法》第47条（经济补偿标准）"
            ])

        # 民法关键词
        civil_keywords = ["违约", "赔偿", "合同"]
        if any(kw in text for kw in civil_keywords):
            detected_laws.extend([
                "《民法典》第577条（违约责任）",
                "《民法典》第584条（损失赔偿）"
            ])

        return detected_laws

    def _postprocess(self, response: str) -> str:
        """回复后处理"""
        # 1. 法律条文解释增强
        response = self._add_law_explanations(response)

        # 2. 风险内容过滤
        if self._risk_detection(response):
            return "该问题涉及敏感内容，建议咨询专业律师获取详细帮助。"

        # 3. 格式美化
        return self._format_response(response)

    def _add_law_explanations(self, text: str) -> str:
        """为法律条文添加解释"""
        pattern = r"《(.+?)》第(\d+)条"
        matches = re.findall(pattern, text)
        for law, article in matches:
            if law in self.law_database and int(article) in self.law_database[law]:
                explanation = self.law_database[law][int(article)]
                text = text.replace(
                    f"《{law}》第{article}条",
                    f"《{law}》第{article}条（{explanation}）"
                )
        return text

    def _risk_detection(self, text: str) -> bool:
        """风险内容检测"""
        return any(kw in text for kw in self.risk_keywords)

    def _format_response(self, text: str) -> str:
        """格式化回复文本"""
        # 添加分步骤序号
        text = re.sub(r"(\d+\. )", r"\n\1", text)
        # 强调风险提示
        text = re.sub(r"【风险提示】", "\n【重要风险提示】", text)
        return text.strip()

    def consult(self, query: str) -> str:
        """处理用户咨询"""
        try:
            # 1. 构建增强prompt
            full_prompt, inputs = self._build_prompt(query)

            # 2. 生成回复
            outputs = self.model.generate(
                input_ids=inputs.input_ids,
                attention_mask=inputs.attention_mask,
                **self.generation_config
            )

            # 3. 解码并后处理
            response = self.tokenizer.decode(
                outputs[0][inputs.input_ids.shape[-1]:],
                skip_special_tokens=True
            )
            processed_response = self._postprocess(response)

            # 4. 更新对话历史
            self.dialogue_history.extend([
                ("用户", query),
                ("律师", processed_response)
            ])

            return processed_response

        except Exception as e:
            return f"咨询系统错误：{str(e)}"

    def save_session(self, filename: str):
        """保存对话会话"""
        with open(filename, 'w', encoding='utf-8') as f:
            json.dump(list(self.dialogue_history), f, ensure_ascii=False, indent=2)

    def load_session(self, filename: str):
        """加载对话会话"""
        with open(filename, 'r', encoding='utf-8') as f:
            history = json.load(f)
            self.dialogue_history.extend(history)


# ----------------- 使用示例 -----------------
if __name__ == "__main__":
    # 初始化系统
    legal_system = LegalConsultationSystem(
        model_path="../DeepSeek-R1-Distill-Qwen-1.5B"
    )

    # 模拟多轮咨询
    test_cases = [
        ("公司没有和我签劳动合同已经工作10个月怎么办？", "劳动权益咨询"),
        ("我的月薪是8000元，能主张多少赔偿？", "赔偿计算"),
        ("如果公司拒绝执行仲裁裁决怎么办？", "执行程序咨询")
    ]

    for query, scenario in test_cases:
        print(f"\n[场景] {scenario}")
        print(f"[用户] {query}")
        response = legal_system.consult(query)
        print(f"[律师] {response}")

    # 保存会话记录
    legal_system.save_session("legal_session.json")