from transformers import AutoTokenizer, AutoModelForCausalLM
import logging

class ModelLoadError(Exception):
    """自定义模型加载异常"""
    pass

class DeepSeekModel:
    def __init__(self):
        self.model = None
        self.tokenizer = None
        self.logger = logging.getLogger(__name__)

    def load_model(self, model_path="C:/Users/ZQQL/Desktop/deepseek语音输入2/newmodel"):
        try:
            self.logger.info("正在加载模型...")
            # 使用 AutoTokenizer 替代具体的 tokenizer
            self.tokenizer = AutoTokenizer.from_pretrained(model_path)
            self.model = AutoModelForCausalLM.from_pretrained(model_path)
            self.logger.info("模型加载成功")
        except Exception as e:
            self.logger.error(f"模型加载失败: {str(e)}")
            raise ModelLoadError(f"无法加载模型: {str(e)}") from e

    def generate_response(self, prompt):
        if not self.model or not self.tokenizer:
            raise ValueError("模型未初始化")
        
        try:
            inputs = self.tokenizer.encode(prompt, return_tensors="pt")
            outputs = self.model.generate(inputs, max_length=200)
            return self.tokenizer.decode(outputs[0], skip_special_tokens=True)
        except Exception as e:
            self.logger.error(f"生成响应失败: {str(e)}")
            return "请求处理失败"