"""
模型推理模块，负责处理模型的推理请求
支持Hugging Face模型和GGUF格式模型
"""
import torch
import logging
from typing import Dict, Any, List, Optional
from transformers import GenerationConfig
from .model_loader import ModelLoader
from ..config import (
    MODEL_MAX_LENGTH,
    MODEL_MAX_NEW_TOKENS,
    MODEL_TEMPERATURE,
    MODEL_TOP_P,
    MODEL_TOP_K
)

# 配置日志
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

class ModelInference:
    """模型推理类"""
    
    def __init__(self):
        self.loader = ModelLoader()
        self.model = None
        self.tokenizer = None
        self.model_type = None
        self._initialize()
        
    def _initialize(self):
        """初始化模型和分词器"""
        self.model, self.tokenizer = self.loader.get_model_and_tokenizer()
        self.model_type = self.loader.model_type
        
    def generate_text(
        self,
        prompt: str,
        max_new_tokens: Optional[int] = None,
        temperature: Optional[float] = None,
        top_p: Optional[float] = None,
        top_k: Optional[int] = None,
        **kwargs
    ) -> Dict[str, Any]:
        """
        生成文本
        
        参数:
            prompt: 输入提示文本
            max_new_tokens: 最大生成的新token数量
            temperature: 温度参数，控制随机性
            top_p: 核采样参数
            top_k: top-k采样参数
            
        返回:
            包含生成文本和元数据的字典
        """
        # 使用默认值或传入的参数
        max_new_tokens = max_new_tokens or MODEL_MAX_NEW_TOKENS
        temperature = temperature or MODEL_TEMPERATURE
        top_p = top_p or MODEL_TOP_P
        top_k = top_k or MODEL_TOP_K
        
        # 根据模型类型选择不同的推理方法
        if self.model_type == "gguf":
            return self._generate_with_gguf(prompt, max_new_tokens, temperature, top_p, top_k, **kwargs)
        else:
            return self._generate_with_huggingface(prompt, max_new_tokens, temperature, top_p, top_k, **kwargs)
    
    def _generate_with_huggingface(
        self,
        prompt: str,
        max_new_tokens: int,
        temperature: float,
        top_p: float,
        top_k: int,
        **kwargs
    ) -> Dict[str, Any]:
        """使用Hugging Face模型生成文本"""
        # 创建生成配置
        generation_config = GenerationConfig(
            max_new_tokens=max_new_tokens,
            temperature=temperature,
            top_p=top_p,
            top_k=top_k,
            pad_token_id=self.tokenizer.eos_token_id if hasattr(self.tokenizer, 'eos_token_id') else None,
            **kwargs
        )
        
        # 对输入进行编码
        inputs = self.tokenizer(prompt, return_tensors="pt", truncation=True, max_length=MODEL_MAX_LENGTH)
        inputs = {k: v.to(self.model.device) for k, v in inputs.items()}
        
        # 生成文本
        with torch.no_grad():
            outputs = self.model.generate(
                **inputs,
                generation_config=generation_config
            )
        
        # 解码生成的文本
        generated_text = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
        
        # 提取模型的回复（去除原始提示）
        response = generated_text[len(self.tokenizer.decode(inputs["input_ids"][0], skip_special_tokens=True)):]
        
        return {
            "prompt": prompt,
            "generated_text": generated_text,
            "response": response.strip(),
            "metadata": {
                "max_new_tokens": max_new_tokens,
                "temperature": temperature,
                "top_p": top_p,
                "top_k": top_k
            }
        }
    
    def _generate_with_gguf(
        self,
        prompt: str,
        max_new_tokens: int,
        temperature: float,
        top_p: float,
        top_k: int,
        **kwargs
    ) -> Dict[str, Any]:
        """使用GGUF模型生成文本"""
        try:
            # 检查是否为llama-cpp-python模型
            if hasattr(self.model, 'eval'):
                # 使用ctransformers库
                inputs = self.tokenizer(prompt, return_tensors="pt")
                
                outputs = self.model.generate(
                    inputs["input_ids"],
                    max_new_tokens=max_new_tokens,
                    temperature=temperature,
                    top_p=top_p,
                    top_k=top_k,
                    **kwargs
                )
                
                generated_text = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
                response = generated_text[len(prompt):]
            else:
                # 使用llama-cpp-python库
                generated_text = self.model(
                    prompt,
                    max_tokens=max_new_tokens,
                    temperature=temperature,
                    top_p=top_p,
                    top_k=top_k,
                    **kwargs
                )
                
                response = generated_text['choices'][0]['text']
                generated_text = prompt + response
            
            return {
                "prompt": prompt,
                "generated_text": generated_text,
                "response": response.strip(),
                "metadata": {
                    "max_new_tokens": max_new_tokens,
                    "temperature": temperature,
                    "top_p": top_p,
                    "top_k": top_k
                }
            }
        except Exception as e:
            logger.error(f"GGUF模型生成文本失败: {str(e)}")
            raise
        
    def batch_generate(
        self,
        prompts: List[str],
        **kwargs
    ) -> List[Dict[str, Any]]:
        """
        批量生成文本
        
        参数:
            prompts: 输入提示文本列表
            kwargs: 其他生成参数
            
        返回:
            生成结果列表
        """
        return [self.generate_text(prompt, **kwargs) for prompt in prompts]
