#!/usr/bin/env python3
# -*- coding: utf-8 -*-

"""
大模型工具函数
提供模型加载、推理、优化等功能
"""

import os
import time
import json
import logging
from typing import Dict, List, Optional, Union, Tuple, Any

import torch
from transformers import AutoTokenizer, AutoModelForCausalLM
from transformers import GenerationConfig

from ascend_llm_demo.utils.ascend_utils import HAS_ASCEND, optimize_model_for_ascend, get_npu_memory_usage

logger = logging.getLogger(__name__)


class ModelManager:
    """模型管理器，负责模型的加载、卸载和推理"""
    
    def __init__(self, model_path: str, device: str = "npu", device_id: int = 0):
        """
        初始化模型管理器
        
        Args:
            model_path: 模型路径
            device: 设备类型，可选值为npu, cuda, cpu
            device_id: 设备ID
        """
        self.model_path = model_path
        self.device_type = device.lower()
        self.device_id = device_id
        self.device = self._get_device()
        
        self.model = None
        self.tokenizer = None
        self.is_loaded = False
        
        # 性能统计
        self.stats = {
            "total_tokens": 0,
            "total_time": 0,
            "avg_tokens_per_second": 0,
            "requests_count": 0
        }
    
    def _get_device(self) -> str:
        """获取设备字符串"""
        if self.device_type == "npu":
            if not HAS_ASCEND:
                logger.warning("未检测到昇腾环境，将使用CPU替代")
                return "cpu"
            return f"npu:{self.device_id}"
        elif self.device_type == "cuda":
            if not torch.cuda.is_available():
                logger.warning("未检测到CUDA环境，将使用CPU替代")
                return "cpu"
            return f"cuda:{self.device_id}"
        else:
            return "cpu"
    
    def load(self) -> bool:
        """
        加载模型和分词器
        
        Returns:
            加载是否成功
        """
        try:
            logger.info(f"开始加载模型: {self.model_path}")
            start_time = time.time()
            
            # 加载分词器
            self.tokenizer = AutoTokenizer.from_pretrained(
                self.model_path,
                trust_remote_code=True
            )
            
            # 加载模型
            self.model = AutoModelForCausalLM.from_pretrained(
                self.model_path,
                trust_remote_code=True,
                torch_dtype=torch.float16,  # 使用半精度加速
                device_map=self.device
            )
            
            # 适配不同设备
            if "npu" in self.device:
                optimize_model_for_ascend(self.model)
            elif "cuda" in self.device:
                if hasattr(self.model, "half"):
                    self.model.half()  # 使用半精度
            
            # 设置为评估模式
            self.model.eval()
            
            load_time = time.time() - start_time
            logger.info(f"模型加载完成，耗时: {load_time:.2f}秒")
            
            self.is_loaded = True
            return True
            
        except Exception as e:
            logger.error(f"加载模型时出错: {str(e)}")
            self.is_loaded = False
            return False
    
    def unload(self) -> bool:
        """
        卸载模型，释放资源
        
        Returns:
            卸载是否成功
        """
        try:
            if self.model is not None:
                # 将模型移至CPU并删除
                self.model.to("cpu")
                del self.model
                self.model = None
            
            if self.tokenizer is not None:
                del self.tokenizer
                self.tokenizer = None
            
            # 清理缓存
            if "cuda" in self.device:
                torch.cuda.empty_cache()
            elif "npu" in self.device:
                torch.npu.empty_cache()
            
            self.is_loaded = False
            logger.info("模型已卸载")
            return True
            
        except Exception as e:
            logger.error(f"卸载模型时出错: {str(e)}")
            return False
    
    def generate(self, 
                prompt: str, 
                max_length: int = 2048, 
                temperature: float = 0.7,
                top_p: float = 0.9,
                top_k: int = 40) -> Dict[str, Any]:
        """
        生成文本
        
        Args:
            prompt: 输入提示
            max_length: 最大生成长度
            temperature: 温度参数
            top_p: top-p参数
            top_k: top-k参数
            
        Returns:
            包含生成结果的字典
        """
        if not self.is_loaded:
            return {"error": "模型未加载"}
        
        try:
            start_time = time.time()
            
            # 设置生成配置
            generation_config = GenerationConfig(
                max_length=max_length,
                temperature=temperature,
                top_p=top_p,
                top_k=top_k,
                do_sample=temperature > 0,
                pad_token_id=self.tokenizer.pad_token_id or self.tokenizer.eos_token_id
            )
            
            # 编码输入
            inputs = self.tokenizer(prompt, return_tensors="pt")
            inputs = {k: v.to(self.device) for k, v in inputs.items()}
            
            # 生成
            with torch.no_grad():
                outputs = self.model.generate(
                    **inputs,
                    generation_config=generation_config
                )
            
            # 解码输出
            response_text = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
            if prompt in response_text:
                response_text = response_text[len(prompt):].strip()
            
            # 计算生成的token数量
            input_tokens = len(inputs["input_ids"][0])
            output_tokens = len(outputs[0]) - input_tokens
            total_tokens = input_tokens + output_tokens
            
            # 更新性能统计
            gen_time = time.time() - start_time
            tokens_per_second = output_tokens / gen_time if gen_time > 0 else 0
            
            self.stats["total_tokens"] += total_tokens
            self.stats["total_time"] += gen_time
            self.stats["requests_count"] += 1
            self.stats["avg_tokens_per_second"] = self.stats["total_tokens"] / self.stats["total_time"] if self.stats["total_time"] > 0 else 0
            
            # 构建结果
            result = {
                "response": response_text,
                "performance": {
                    "input_tokens": input_tokens,
                    "output_tokens": output_tokens,
                    "total_tokens": total_tokens,
                    "time": gen_time,
                    "tokens_per_second": tokens_per_second
                }
            }
            
            # 获取设备内存情况
            if "npu" in self.device:
                memory_info = get_npu_memory_usage(self.device_id)
                if memory_info:
                    result["performance"]["memory"] = memory_info
            elif "cuda" in self.device:
                used_mem = torch.cuda.max_memory_allocated(self.device_id)
                total_mem = torch.cuda.get_device_properties(self.device_id).total_memory
                result["performance"]["memory"] = {
                    "used": used_mem,
                    "total": total_mem,
                    "unit": "Byte"
                }
            
            return result
            
        except Exception as e:
            logger.error(f"生成文本时出错: {str(e)}")
            return {"error": str(e)}
    
    def chat(self, 
             messages: List[Dict[str, str]], 
             max_length: int = 2048,
             temperature: float = 0.7,
             top_p: float = 0.9,
             top_k: int = 40) -> Dict[str, Any]:
        """
        聊天模式
        
        Args:
            messages: 聊天历史，格式为[{"role": "user", "content": "..."}, {"role": "assistant", "content": "..."}]
            max_length: 最大生成长度
            temperature: 温度参数
            top_p: top-p参数
            top_k: top-k参数
            
        Returns:
            包含生成结果的字典
        """
        if not self.is_loaded:
            return {"error": "模型未加载"}
        
        try:
            # 构建聊天提示
            prompt = self._build_chat_prompt(messages)
            
            # 调用生成函数
            result = self.generate(
                prompt=prompt,
                max_length=max_length,
                temperature=temperature,
                top_p=top_p,
                top_k=top_k
            )
            
            return result
            
        except Exception as e:
            logger.error(f"聊天生成时出错: {str(e)}")
            return {"error": str(e)}
    
    def _build_chat_prompt(self, messages: List[Dict[str, str]]) -> str:
        """
        构建聊天提示
        
        Args:
            messages: 聊天历史
            
        Returns:
            构建好的提示字符串
        """
        prompt = ""
        
        for message in messages:
            role = message.get("role", "").lower()
            content = message.get("content", "")
            
            if role == "system":
                prompt += f"系统: {content}\n\n"
            elif role == "user":
                prompt += f"用户: {content}\n\n"
            elif role == "assistant":
                prompt += f"助手: {content}\n\n"
            else:
                prompt += f"{content}\n\n"
        
        # 添加助手前缀
        prompt += "助手: "
        
        return prompt
    
    def get_stats(self) -> Dict[str, Any]:
        """
        获取性能统计信息
        
        Returns:
            性能统计数据
        """
        return self.stats.copy()
    
    def reset_stats(self) -> None:
        """重置性能统计"""
        self.stats = {
            "total_tokens": 0,
            "total_time": 0,
            "avg_tokens_per_second": 0,
            "requests_count": 0
        } 