import torch
from transformers import AutoTokenizer, AutoModelForCausalLM
from typing import List, Dict
import re


class LegalAssistant:
    def __init__(self, model_path: str):
        # 确保正确加载tokenizer和模型
        self.tokenizer = AutoTokenizer.from_pretrained(
            model_path,
            trust_remote_code=True,
            pad_token='<|endoftext|>'  # 显式设置pad_token
        )
        self.model = AutoModelForCausalLM.from_pretrained(
            model_path,
            torch_dtype=torch.float16,
            device_map="auto",
            trust_remote_code=True
        ).eval()  # 设置为评估模式

        # 确保tokenizer有pad_token
        if self.tokenizer.pad_token is None:
            self.tokenizer.pad_token = self.tokenizer.eos_token

        self.dialogue_history = []
        self.law_db = self._init_law_database()

    def _init_law_database(self) -> Dict:
        return {
            "劳动合同法": {
                10: "建立劳动关系应当订立书面劳动合同",
                82: "未签订劳动合同的二倍工资罚则",
                47: "经济补偿的计算标准"
            }
        }

    def _legal_enhanced_prompt(self, query: str) -> str:
        prompt = f"""你是一名专业律师，请根据中国法律回答下列问题：
问题：{query}
要求：
1. 引用《劳动合同法》相关条款
2. 分步骤给出建议
3. 语言简洁明确

请结合以下法律条款回答："""
        return prompt

    def generate_response(self, query: str) -> str:
        try:
            # 构建增强提示词
            legal_prompt = self._legal_enhanced_prompt(query)

            # 拼接完整prompt
            full_prompt = f"{legal_prompt}\n用户：{query}\n律师："

            # Tokenize并生成attention_mask
            inputs = self.tokenizer(
                full_prompt,
                return_tensors="pt",
                padding=True,
                truncation=True,
                max_length=1024
            ).to(self.model.device)

            # 生成回复（添加更安全的参数）
            outputs = self.model.generate(
                input_ids=inputs.input_ids,
                attention_mask=inputs.attention_mask,  # 显式传入注意力掩码
                max_new_tokens=512,
                temperature=0.9,  # 提高温度避免概率异常
                top_p=0.9,
                repetition_penalty=1.1,
                pad_token_id=self.tokenizer.pad_token_id
            )

            # 解码并后处理
            response = self.tokenizer.decode(
                outputs[0][inputs.input_ids.shape[-1]:],
                skip_special_tokens=True
            )

            # 添加法律条文解释
            response = self._add_law_explanation(response)

            return response

        except Exception as e:
            return f"生成错误：{str(e)}"

    def _add_law_explanation(self, text: str) -> str:
        pattern = r"《(.+?)》第(\d+)条"
        matches = re.findall(pattern, text)
        for law, article in matches:
            if law in self.law_db and int(article) in self.law_db[law]:
                explanation = self.law_db[law][int(article)]
                text = text.replace(
                    f"《{law}》第{article}条",
                    f"《{law}》第{article}条（{explanation}）"
                )
        return text


# ----------------- 使用示例 -----------------
if __name__ == "__main__":
    # 初始化（确认模型路径正确）
    assistant = LegalAssistant(
        model_path="../DeepSeek-R1-Distill-Qwen-1.5B"
    )

    # 测试查询
    questions = [
        "公司没签劳动合同怎么办？",
        "工作8个月能要多少赔偿？"
    ]

    for q in questions:
        print(f"[用户] {q}")
        resp = assistant.generate_response(q)
        print(f"[律师] {resp}\n")