"""
DeepSpeed MOE + Qwen3-Coder 推理引擎
负责模型加载、推理执行和性能优化
"""

import torch
import deepspeed
import logging
from typing import Dict, List, Optional, Union, Any
import json
import time
from transformers import AutoTokenizer, AutoModelForCausalLM
from deepspeed.moe.layer import MoE
import numpy as np

from config import DeepSpeedConfig, ModelConfig

class Qwen3CoderEngine:
    """Qwen3-Coder DeepSpeed MOE推理引擎"""
    
    def __init__(self):
        self.model = None
        self.tokenizer = None
        self.device = None
        self.is_initialized = False
        self.model_config = None
        
        # 性能监控
        self.inference_count = 0
        self.total_tokens_generated = 0
        self.avg_latency = 0.0
        
        logger = logging.getLogger(__name__)
        logger.info("Qwen3-Coder引擎已初始化")
    
    def load_model(self, model_path: Optional[str] = None) -> bool:
        """
        加载Qwen3-Coder模型
        
        Args:
            model_path: 模型路径，如果为None则使用配置中的路径
        
        Returns:
            bool: 加载是否成功
        """
        try:
            model_path = model_path or DeepSpeedConfig.MODEL_PATH
            logger = logging.getLogger(__name__)
            
            logger.info(f"开始加载模型: {model_path}")
            start_time = time.time()
            
            # 设置设备
            self.device = torch.cuda.current_device() if torch.cuda.is_available() else "cpu"
            logger.info(f"使用设备: {self.device}")
            
            # 加载分词器
            logger.info("加载分词器...")
            if ModelConfig.TOKENIZER_PATH:
                self.tokenizer = AutoTokenizer.from_pretrained(ModelConfig.TOKENIZER_PATH)
            else:
                self.tokenizer = AutoTokenizer.from_pretrained(model_path)
            
            # 如果没有pad token，设置eos token为pad token
            if self.tokenizer.pad_token is None:
                self.tokenizer.pad_token = self.tokenizer.eos_token
            
            # 加载基础模型
            logger.info("加载基础模型...")
            base_model = AutoModelForCausalLM.from_pretrained(
                model_path,
                torch_dtype=getattr(torch, DeepSpeedConfig.DTYPE),
                device_map="auto" if self.device == "cpu" else None,
                trust_remote_code=True
            )
            
            # 设置模型为评估模式
            base_model.eval()
            
            # 初始化DeepSpeed推理引擎
            logger.info("初始化DeepSpeed推理引擎...")
            
            # 构建推理配置
            inference_kwargs = dict(
                dtype=torch.half if DeepSpeedConfig.DTYPE == "fp16" else torch.bfloat16,
                mp_size=DeepSpeedConfig.TENSOR_PARALLEL_SIZE,
                moe_experts=DeepSpeedConfig.NUM_EXPERTS,
                checkpoint=None,  # 如果有checkpoint路径可以在这里指定
                replace_with_kernel_inject=DeepSpeedConfig.REPLACE_WITH_KERNEL_INJECT,
            )
            
            # 如果模型已加载，设置checkpoint
            if hasattr(base_model, 'module'):
                inference_kwargs['checkpoint'] = model_path
            
            # 初始化DeepSpeed引擎
            ds_engine = deepspeed.init_inference(base_model, **inference_kwargs)
            self.model = ds_engine.module
            
            # 加载模型配置
            if hasattr(base_model, 'config'):
                self.model_config = base_model.config
            
            # 记录加载时间
            load_time = time.time() - start_time
            logger.info(f"模型加载完成，耗时: {load_time:.2f}秒")
            
            # 测试推理
            self._test_inference()
            
            self.is_initialized = True
            logger.info("Qwen3-Coder引擎初始化完成")
            return True
            
        except Exception as e:
            logger = logging.getLogger(__name__)
            logger.error(f"模型加载失败: {str(e)}", exc_info=True)
            return False
    
    def _test_inference(self):
        """测试推理功能"""
        logger = logging.getLogger(__name__)
        
        try:
            test_prompt = "Write a simple Python function to calculate fibonacci numbers."
            inputs = self.tokenizer(test_prompt, return_tensors="pt")
            
            with torch.no_grad():
                outputs = self.model.generate(
                    **inputs,
                    max_new_tokens=100,
                    temperature=0.7,
                    do_sample=True,
                    pad_token_id=self.tokenizer.eos_token_id,
                    eos_token_id=self.tokenizer.encode("\n")[0] if "\n" in self.tokenizer.get_vocab() else self.tokenizer.eos_token_id
                )
            
            generated_text = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
            logger.info("推理测试成功")
            logger.debug(f"测试生成文本长度: {len(generated_text)}")
            
        except Exception as e:
            logger.error(f"推理测试失败: {str(e)}")
            raise
    
    def generate(self, 
                prompt: str,
                system_prompt: Optional[str] = None,
                max_new_tokens: Optional[int] = None,
                temperature: Optional[float] = None,
                top_p: Optional[float] = None,
                top_k: Optional[int] = None,
                repetition_penalty: Optional[float] = None,
                stop_sequences: Optional[List[str]] = None,
                stream: bool = False) -> Dict[str, Any]:
        """
        生成文本回复
        
        Args:
            prompt: 用户输入提示
            system_prompt: 系统提示
            max_new_tokens: 最大生成token数
            temperature: 采样温度
            top_p: nucleus sampling参数
            top_k: top-k sampling参数
            repetition_penalty: 重复惩罚
            stop_sequences: 停止序列
            stream: 是否流式输出
        
        Returns:
            Dict[str, Any]: 生成结果
        """
        if not self.is_initialized:
            raise RuntimeError("模型未初始化，请先调用load_model()")
        
        logger = logging.getLogger(__name__)
        start_time = time.time()
        
        try:
            # 构建完整提示
            if system_prompt:
                full_prompt = f"{system_prompt}\n\n{prompt}"
            else:
                full_prompt = f"{ModelConfig.SYSTEM_PROMPT}\n\n{prompt}"
            
            # 设置默认值
            max_new_tokens = max_new_tokens or DeepSpeedConfig.MAX_NEW_TOKENS
            temperature = temperature if temperature is not None else DeepSpeedConfig.TEMPERATURE
            top_p = top_p if top_p is not None else DeepSpeedConfig.TOP_P
            top_k = top_k if top_k is not None else DeepSpeedConfig.TOP_K
            repetition_penalty = repetition_penalty or DeepSpeedConfig.REPETITION_PENALTY
            
            # 编码输入
            inputs = self.tokenizer(full_prompt, return_tensors="pt")
            input_length = inputs["input_ids"].shape[1]
            
            # 移动到设备
            inputs = {k: v.to(self.device) for k, v in inputs.items()}
            
            # 生成参数
            generation_config = {
                "max_new_tokens": max_new_tokens,
                "temperature": temperature,
                "top_p": top_p,
                "top_k": top_k,
                "repetition_penalty": repetition_penalty,
                "do_sample": True,
                "pad_token_id": self.tokenizer.eos_token_id,
                "eos_token_id": self.tokenizer.eos_token_id,
            }
            
            # 执行推理
            with torch.no_grad():
                outputs = self.model.generate(**inputs, **generation_config)
            
            # 解码输出
            generated_tokens = outputs[0][input_length:]
            response = self.tokenizer.decode(generated_tokens, skip_special_tokens=True)
            
            # 计算性能指标
            latency = time.time() - start_time
            tokens_generated = len(generated_tokens)
            tokens_per_second = tokens_generated / latency if latency > 0 else 0
            
            # 更新统计信息
            self._update_stats(latency, tokens_generated)
            
            result = {
                "response": response,
                "usage": {
                    "prompt_tokens": input_length,
                    "completion_tokens": tokens_generated,
                    "total_tokens": input_length + tokens_generated,
                    "latency_seconds": latency,
                    "tokens_per_second": tokens_per_second
                },
                "model_info": {
                    "model_name": ModelConfig.MODEL_NAME,
                    "model_type": ModelConfig.MODEL_TYPE,
                    "context_length": ModelConfig.CONTEXT_LENGTH
                }
            }
            
            logger.info(f"推理完成: {tokens_generated} tokens, {latency:.2f}s")
            return result
            
        except Exception as e:
            logger.error(f"推理失败: {str(e)}", exc_info=True)
            raise
    
    def stream_generate(self, prompt: str, **kwargs) -> Any:
        """流式生成（如果支持）"""
        # 注意：DeepSpeed MOE模型的流式生成需要特殊处理
        # 这里提供一个基础实现
        if not self.is_initialized:
            raise RuntimeError("模型未初始化")
        
        # 非流式生成，然后逐步输出
        result = self.generate(prompt, **kwargs)
        
        # 模拟流式输出
        response = result["response"]
        words = response.split()
        
        for i, word in enumerate(words):
            partial_response = " ".join(words[:i+1])
            yield {
                "choices": [{
                    "delta": {
                        "content": word + (" " if i < len(words) - 1 else "")
                    }
                }],
                "usage": result["usage"],
                "model_info": result["model_info"],
                "partial": True
            }
    
    def _update_stats(self, latency: float, tokens_generated: int):
        """更新性能统计"""
        self.inference_count += 1
        self.total_tokens_generated += tokens_generated
        
        # 计算平均延迟
        if self.inference_count == 1:
            self.avg_latency = latency
        else:
            self.avg_latency = ((self.avg_latency * (self.inference_count - 1)) + latency) / self.inference_count
    
    def get_stats(self) -> Dict[str, Any]:
        """获取性能统计"""
        return {
            "inference_count": self.inference_count,
            "total_tokens_generated": self.total_tokens_generated,
            "avg_latency": self.avg_latency,
            "tokens_per_inference": self.total_tokens_generated / max(self.inference_count, 1),
            "model_initialized": self.is_initialized,
            "device": str(self.device),
            "model_config": str(self.model_config) if self.model_config else None
        }
    
    def cleanup(self):
        """清理资源"""
        logger = logging.getLogger(__name__)
        
        if self.model:
            del self.model
        if torch.cuda.is_available():
            torch.cuda.empty_cache()
        
        logger.info("模型资源已清理")
        self.is_initialized = False

# 全局引擎实例
engine = Qwen3CoderEngine()

def get_engine() -> Qwen3CoderEngine:
    """获取全局引擎实例"""
    return engine