"""
教师模型（基础大模型）封装
用于处理模型加载、推理和生成软标签
"""

from typing import List, Dict, Any
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
from vllm import LLM, SamplingParams

class TeacherModel:
    def __init__(self, model_name: str = "deepseek-ai/deepseek-coder-33b-instruct", 
                 device: str = "cuda"):
        """
        初始化教师模型
        Args:
            model_name: 模型名称或路径
            device: 运行设备
        """
        self.model_name = model_name
        self.device = device
        self.tokenizer = None
        self.model = None
        self.vllm_model = None
        self._initialize_model()

    def _initialize_model(self):
        """初始化模型和分词器"""
        try:
            # 优先使用 vLLM 进行高效推理
            self.vllm_model = LLM(
                model=self.model_name,
                tensor_parallel_size=2,  # 根据可用GPU数量调整
                quantization="4bit"      # 使用量化减少显存占用
            )
        except Exception as e:
            print(f"vLLM initialization failed: {e}, falling back to transformers")
            self.tokenizer = AutoTokenizer.from_pretrained(self.model_name)
            self.model = AutoModelForCausalLM.from_pretrained(
                self.model_name,
                device_map="auto",
                torch_dtype=torch.float16
            )

    def generate_with_logits(self, prompt: str, temperature: float = 0.7) -> Dict[str, Any]:
        """
        生成答案并返回logits（用于生成软标签）
        Args:
            prompt: 输入提示
            temperature: 温度参数
        Returns:
            包含生成结果和logits的字典
        """
        if self.vllm_model:
            sampling_params = SamplingParams(temperature=temperature)
            outputs = self.vllm_model.generate(prompt, sampling_params)
            # vLLM目前不直接提供logits，需要额外处理
            return {
                "text": outputs[0].outputs[0].text,
                "logits": None  # TODO: 实现logits获取
            }
        else:
            inputs = self.tokenizer(prompt, return_tensors="pt").to(self.device)
            with torch.no_grad():
                outputs = self.model.generate(
                    **inputs,
                    temperature=temperature,
                    max_new_tokens=512,
                    return_dict_in_generate=True,
                    output_scores=True
                )
            return {
                "text": self.tokenizer.decode(outputs.sequences[0], skip_special_tokens=True),
                "logits": outputs.scores
            }

    def generate_soft_labels(self, input_data: Dict[str, Any]) -> Dict[str, Any]:
        """生成软标签"""
        prompt = self._format_prompt(input_data)
        result = self.generate_with_logits(prompt)
        
        if not self.validate_output(result):
            raise ValueError("生成的输出不符合要求")
        
        # 处理logits生成软标签
        if result["logits"] is not None:
            soft_labels = self._process_logits(result["logits"])
            result["soft_labels"] = soft_labels
        
        # 保存结果
        self._save_output(result, input_data)
        
        return result

    def _format_prompt(self, input_data: Dict[str, Any]) -> str:
        """格式化输入提示"""
        template = """你是一位经验丰富的小学数学老师。请根据以下要求生成一道数学题：
        年级：{grade}
        学期：{semester}
        知识点：{knowledge_points}
        难度等级：{difficulty}
        题型：{question_type}
        """
        return template.format(**input_data)

    def _process_logits(self, logits: torch.Tensor) -> torch.Tensor:
        """处理logits生成软标签"""
        # 对logits进行处理，生成软标签
        # 可以使用softmax和温度参数调整分布
        return torch.softmax(logits / self.temperature, dim=-1)

    def _select_model_path(self) -> str:
        """根据选择的模型类型返回正确的模型路径"""
        model_paths = {
            "67b_base": "deepseek-ai/deepseek-llm-67b-base",
            "67b_instruct": "deepseek-ai/deepseek-llm-67b-instruct",
            "33b_base": "deepseek-ai/deepseek-coder-33b-base",
            "33b_instruct": "deepseek-ai/deepseek-coder-33b-instruct",
            "16b_base": "deepseek-ai/deepseek-moe-16b-base",
            "16b_instruct": "deepseek-ai/deepseek-moe-16b-instruct"
        }
        model_key = f"{self.model_type}_{self.variant}"
        if model_key not in model_paths:
            raise ValueError(f"不支持的模型类型: {model_key}")
        return model_paths[model_key]

    def validate_output(self, generated_output: Dict[str, Any]) -> bool:
        """验证生成的输出是否符合要求"""
        required_fields = ['题目', '答案', '解析', '知识点']
        text = generated_output.get('text', '')
        
        # 检查是否包含所有必需字段
        for field in required_fields:
            if field not in text:
                return False
            
        # 检查内容完整性
        sections = text.split('\n\n')
        if len(sections) < 4:  # 至少应该有4个部分
            return False
        
        # 检查答案格式
        answer_section = None
        for section in sections:
            if section.startswith('答案：'):
                answer_section = section
                break
        if not answer_section or len(answer_section.strip()) < 5:  # 答案不能太短
            return False
        
        return True

    def _save_output(self, result: Dict[str, Any], input_data: Dict[str, Any]) -> None:
        """保存输出结果"""
        import json
        import os
        from datetime import datetime
        
        # 创建输出目录
        output_dir = "训练示例/教师模型输出"
        os.makedirs(output_dir, exist_ok=True)
        
        # 保存软标签
        soft_labels_file = os.path.join(output_dir, "soft_labels.json")
        with open(soft_labels_file, "a", encoding="utf-8") as f:
            output_data = {
                "input": input_data,
                "output": result["text"],
                "soft_labels": result["soft_labels"].tolist() if isinstance(result["soft_labels"], torch.Tensor) else result["soft_labels"],
                "timestamp": datetime.now().isoformat()
            }
            json.dump(output_data, f, ensure_ascii=False)
            f.write("\n")
        
        # 记录日志
        log_file = os.path.join(output_dir, "teacher_output.log")
        with open(log_file, "a", encoding="utf-8") as f:
            log_entry = f"[{datetime.now().isoformat()}] Generated output for input: {input_data.get('knowledge_points')}\n"
            f.write(log_entry) 