#!/usr/bin/env python3
# -*- coding: utf-8 -*-

"""
昇腾大模型推理服务
提供高性能的大模型推理API服务
"""

import os
import sys
import argparse
import json
import logging
import time
from typing import List, Dict, Any, Optional, Union

import torch
import flask
from flask import Flask, request, jsonify
from flask_cors import CORS

# 导入昇腾相关库
try:
    import acl
    import torch_npu
    HAS_ASCEND = True
except ImportError:
    HAS_ASCEND = False
    print("Warning: 未检测到昇腾相关依赖，将使用CPU/GPU模式运行")

# 导入工具函数
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from utils.tokenizer import get_tokenizer
from utils.ascend_utils import init_ascend, release_ascend

# 设置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
)
logger = logging.getLogger(__name__)

# 初始化Flask应用
app = Flask(__name__)
CORS(app)  # 支持跨域请求

# 全局变量
model = None
tokenizer = None
device = None
device_id = 0
model_path = None

class AscendLLMModel:
    """昇腾大模型推理类"""
    
    def __init__(self, model_path: str, device: str = "ascend", device_id: int = 0):
        """
        初始化昇腾大模型推理环境
        
        Args:
            model_path: 模型路径
            device: 设备类型，支持"ascend"、"cuda"和"cpu"
            device_id: 设备ID
        """
        self.model_path = model_path
        self.device = device
        self.device_id = device_id
        
        # 初始化设备
        if device == "ascend":
            if not HAS_ASCEND:
                raise ImportError("未安装昇腾相关依赖，请先安装acl和torch_npu")
            
            logger.info(f"正在初始化昇腾环境，设备ID: {device_id}")
            init_ascend(device_id)
            self.device = f"npu:{device_id}"
        elif device == "cuda":
            if not torch.cuda.is_available():
                raise RuntimeError("CUDA设备不可用")
            self.device = f"cuda:{device_id}"
        else:
            self.device = "cpu"
        
        # 加载模型和分词器
        logger.info(f"正在从 {model_path} 加载模型...")
        self.tokenizer = get_tokenizer(model_path)
        
        # 根据模型路径确定是否为量化模型
        is_quantized = "int8" in model_path or "int4" in model_path
        
        # 加载模型
        if "chatglm" in model_path.lower():
            from transformers import AutoModel
            self.model = AutoModel.from_pretrained(
                model_path, 
                trust_remote_code=True,
                device_map=self.device
            )
        elif "llama" in model_path.lower() or "baichuan" in model_path.lower():
            from transformers import AutoModelForCausalLM
            self.model = AutoModelForCausalLM.from_pretrained(
                model_path,
                device_map=self.device,
                trust_remote_code=True,
                low_cpu_mem_usage=True
            )
        else:
            # 默认加载方式
            from transformers import AutoModelForCausalLM
            self.model = AutoModelForCausalLM.from_pretrained(
                model_path,
                device_map=self.device,
                trust_remote_code=True
            )
        
        # 设置为推理模式
        self.model.eval()
        
        # 如果是昇腾设备，进行额外优化
        if device == "ascend":
            # 将模型移至NPU设备
            if not is_quantized:
                logger.info("正在优化昇腾模型...")
                # NPU优化技巧
                torch_npu.npu.set_compile_mode(jit_compile=True)
                
                # 设置为FP16精度
                if hasattr(self.model, "half"):
                    self.model = self.model.half()
            
            logger.info("昇腾模型准备完成")
            
        logger.info("模型加载完成")
            
    def generate(self, 
                 messages: List[Dict[str, str]], 
                 max_tokens: int = 2048,
                 temperature: float = 0.7,
                 top_p: float = 0.9,
                 **kwargs) -> Dict[str, Any]:
        """
        生成回复文本
        
        Args:
            messages: 对话历史
            max_tokens: 最大生成长度
            temperature: 温度参数
            top_p: 概率截断参数
            
        Returns:
            包含生成文本和token统计的字典
        """
        start_time = time.time()
        
        # 根据messages构建prompt
        if len(messages) == 0:
            return {"error": "消息不能为空"}
        
        prompt = self._build_prompt(messages)
        input_ids = self.tokenizer.encode(prompt, return_tensors="pt").to(self.device)
        prompt_length = input_ids.shape[1]
        
        # 模型推理参数
        gen_params = {
            "input_ids": input_ids,
            "max_new_tokens": max_tokens,
            "temperature": temperature,
            "top_p": top_p,
            "do_sample": temperature > 0,
            "eos_token_id": self.tokenizer.eos_token_id,
        }
        
        # ChatGLM模型使用特殊的生成方法
        if "chatglm" in self.model_path.lower():
            if hasattr(self.model, "stream_chat"):
                response = ""
                for response, _ in self.model.stream_chat(self.tokenizer, prompt, **kwargs):
                    pass
                output_ids = self.tokenizer.encode(response)
            else:
                outputs = self.model.generate(**gen_params)
                output_ids = outputs[0].cpu().tolist()[prompt_length:]
                response = self.tokenizer.decode(output_ids)
        else:
            # 标准生成方法
            with torch.no_grad():
                outputs = self.model.generate(**gen_params)
                output_ids = outputs[0].cpu().tolist()[prompt_length:]
                response = self.tokenizer.decode(output_ids, skip_special_tokens=True)
        
        # 计算token统计
        usage = {
            "prompt_tokens": prompt_length,
            "completion_tokens": len(output_ids),
            "total_tokens": prompt_length + len(output_ids)
        }
        
        # 计算性能指标
        elapsed_time = time.time() - start_time
        speed = len(output_ids) / elapsed_time if elapsed_time > 0 else 0
        
        logger.info(f"生成完成: {len(output_ids)}个tokens，耗时{elapsed_time:.2f}秒，速度{speed:.2f} tokens/s")
        
        return {
            "response": response,
            "usage": usage,
            "performance": {
                "elapsed_time": elapsed_time,
                "tokens_per_second": speed
            }
        }
    
    def _build_prompt(self, messages: List[Dict[str, str]]) -> str:
        """
        根据消息历史构建prompt
        
        Args:
            messages: 对话历史
            
        Returns:
            构建好的prompt字符串
        """
        # 针对不同模型构建不同格式的prompt
        if "chatglm" in self.model_path.lower():
            # ChatGLM格式
            prompt = ""
            for i, msg in enumerate(messages):
                role = msg["role"]
                content = msg["content"]
                if role == "user":
                    if i == 0:
                        prompt = content
                    else:
                        prompt = f"{prompt}\n[Round {i//2+1}]\n问：{content}"
                elif role == "assistant":
                    prompt = f"{prompt}\n答：{content}"
            if len(messages) % 2 == 1:
                prompt = f"{prompt}\n答："
            return prompt
        
        elif "llama" in self.model_path.lower():
            # Llama格式
            prompt = ""
            for msg in messages:
                role = msg["role"]
                content = msg["content"]
                if role == "system":
                    prompt += f"<s>[INST] <<SYS>>\n{content}\n<</SYS>>\n\n"
                elif role == "user":
                    if not prompt:
                        prompt = f"<s>[INST] {content} [/INST]"
                    else:
                        prompt = f"{prompt}\n\n[INST] {content} [/INST]"
                elif role == "assistant":
                    prompt = f"{prompt} {content} </s>"
            return prompt
        
        else:
            # 默认格式
            prompt = ""
            for msg in messages:
                role = msg["role"]
                content = msg["content"]
                if role == "system":
                    prompt += f"System: {content}\n"
                elif role == "user":
                    prompt += f"User: {content}\n"
                elif role == "assistant":
                    prompt += f"Assistant: {content}\n"
            prompt += "Assistant: "
            return prompt
            
    def __del__(self):
        """释放资源"""
        if hasattr(self, "model"):
            del self.model
        if self.device.startswith("npu"):
            release_ascend(self.device_id)


@app.route("/api/chat", methods=["POST"])
def chat_endpoint():
    """聊天API接口"""
    global model
    
    try:
        data = request.json
        messages = data.get("messages", [])
        temperature = float(data.get("temperature", 0.7))
        max_tokens = int(data.get("max_tokens", 2048))
        top_p = float(data.get("top_p", 0.9))
        
        # 参数验证
        if not messages:
            return jsonify({"error": "消息不能为空"}), 400
            
        # 生成回复
        result = model.generate(
            messages=messages,
            max_tokens=max_tokens,
            temperature=temperature,
            top_p=top_p
        )
        
        return jsonify(result)
        
    except Exception as e:
        logger.error(f"处理请求时出错: {str(e)}")
        return jsonify({"error": str(e)}), 500


@app.route("/api/health", methods=["GET"])
def health_endpoint():
    """健康检查接口"""
    return jsonify({
        "status": "ok",
        "model": model_path,
        "device": device,
        "device_id": device_id
    })


def parse_arguments():
    """解析命令行参数"""
    parser = argparse.ArgumentParser(description="昇腾大模型推理服务")
    parser.add_argument("--model", type=str, required=True, help="模型路径")
    parser.add_argument("--device", type=str, default="ascend", choices=["ascend", "cuda", "cpu"], help="运行设备")
    parser.add_argument("--device_id", type=int, default=0, help="设备ID")
    parser.add_argument("--host", type=str, default="0.0.0.0", help="服务主机地址")
    parser.add_argument("--port", type=int, default=8000, help="服务端口")
    return parser.parse_args()


def main():
    """主函数"""
    global model, tokenizer, device, device_id, model_path
    
    # 解析命令行参数
    args = parse_arguments()
    model_path = args.model
    device = args.device
    device_id = args.device_id
    
    try:
        # 初始化模型
        model = AscendLLMModel(model_path, device, device_id)
        
        # 启动服务
        logger.info(f"服务启动于 http://{args.host}:{args.port}")
        app.run(host=args.host, port=args.port)
    except KeyboardInterrupt:
        logger.info("服务已停止")
    except Exception as e:
        logger.error(f"服务启动失败: {str(e)}")
        sys.exit(1)


if __name__ == "__main__":
    main() 