"""
deepseek_llm.py 模块
DeepSeek大模型的本地部署实现（基于transformers）
支持DeepSeek-7B/13B-Chat等对话模型
sk-50364eedb8394834830691bc3b727df7
"""
from typing import Optional, Dict, Any
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM, GenerationConfig
from src.llm.base_llm import BaseLLM

class DeepSeekLLM(BaseLLM):
    """DeepSeek大模型的本地实现类"""

    def __init__(
        self,
        model_name_or_path: str = "deepseek-ai/DeepSeek-7B-Chat",
        device: str = "auto",
        temperature: float = 0.7,
        max_new_tokens: int = 1024,
        **kwargs
    ):
        """
        初始化DeepSeek模型

        Args:
            model_name_or_path: 模型名称或本地路径
            device: 运行设备（"cuda"或"cpu"，"auto"自动检测）
            temperature: 生成温度（0-1，值越低越确定）
            max_new_tokens: 最大生成token数
        """
        super().__init__()
        self.model_name_or_path = model_name_or_path
        self.temperature = temperature
        self.max_new_tokens = max_new_tokens

        # 自动检测设备
        if device == "auto":
            self.device = "cuda" if torch.cuda.is_available() else "cpu"
        else:
            self.device = device

        # 加载tokenizer和模型
        self.tokenizer = AutoTokenizer.from_pretrained(
            model_name_or_path,
            trust_remote_code=True
        )
        self.model = AutoModelForCausalLM.from_pretrained(
            model_name_or_path,
            torch_dtype=torch.float16 if self.device == "cuda" else torch.float32,
            device_map="auto" if self.device == "cuda" else None,
            trust_remote_code=True
        )

        # 模型配置
        self.generation_config = GenerationConfig(
            temperature=temperature,
            max_new_tokens=max_new_tokens,
            pad_token_id=self.tokenizer.pad_token_id,
            eos_token_id=self.tokenizer.eos_token_id,
            **kwargs
        )

        # 确保模型在指定设备上
        if self.device == "cpu":
            self.model = self.model.to("cpu")

        print(f"DeepSeek模型加载完成，运行设备: {self.device}")

    def generate(
        self,
        prompt: str,
        temperature: Optional[float] = None,
        max_new_tokens: Optional[int] = None,
       ** kwargs
    ) -> str:
        """
        生成文本响应

        Args:
            prompt: 输入提示词
            temperature: 临时指定生成温度（覆盖初始化值）
            max_new_tokens: 临时指定最大生成token数（覆盖初始化值）

        Returns:
            生成的文本
        """
        # 构建对话格式（DeepSeek要求特定格式）
        messages = [{"role": "user", "content": prompt}]
        input_ids = self.tokenizer.apply_chat_template(
            messages,
            add_generation_prompt=True,
            return_tensors="pt"
        ).to(self.model.device)

        # 生成配置
        gen_config = self.generation_config.copy()
        if temperature is not None:
            gen_config.temperature = temperature
        if max_new_tokens is not None:
            gen_config.max_new_tokens = max_new_tokens
        if kwargs:
            gen_config.update(**kwargs)

        # 生成响应
        with torch.no_grad():
            outputs = self.model.generate(
                input_ids=input_ids,
                generation_config=gen_config
            )

        # 解码结果（去除输入部分）
        response_ids = outputs[:, input_ids.shape[1]:]
        response = self.tokenizer.decode(response_ids[0], skip_special_tokens=True)

        return response

    def stream_generate(self, prompt: str,** kwargs) -> str:
        """流式生成（用于实时展示回答过程）"""
        # 构建对话格式
        messages = [{"role": "user", "content": prompt}]
        input_ids = self.tokenizer.apply_chat_template(
            messages,
            add_generation_prompt=True,
            return_tensors="pt"
        ).to(self.model.device)

        # 生成配置
        gen_config = self.generation_config.copy()
        gen_config.update(kwargs)
        gen_config.do_stream = True

        # 流式生成
        with torch.no_grad():
            for token in self.model.generate(input_ids=input_ids, generation_config=gen_config):
                if token.item() == self.tokenizer.eos_token_id:
                    break
                yield self.tokenizer.decode(token.item(), skip_special_tokens=True)
