#!/usr/bin/env python3
# -*- coding: utf-8 -*-

"""
昇腾大模型客户端
提供与昇腾大模型服务交互的命令行客户端
"""

import os
import sys
import json
import time
import logging
import argparse
from typing import Dict, List, Any, Optional, Union

import requests

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
    handlers=[
        logging.StreamHandler(sys.stdout)
    ]
)
logger = logging.getLogger(__name__)


class AscendLLMClient:
    """昇腾大模型客户端"""
    
    def __init__(self, api_url: str = "http://localhost:8000"):
        """
        初始化客户端
        
        Args:
            api_url: API服务地址
        """
        self.api_url = api_url.rstrip("/")
        self.session = requests.Session()
        self.conversation_history = []
    
    def health_check(self) -> Dict[str, Any]:
        """
        检查API服务健康状态
        
        Returns:
            服务健康状态
        """
        try:
            response = self.session.get(f"{self.api_url}/health")
            response.raise_for_status()
            return response.json()
        except Exception as e:
            logger.error(f"健康检查失败: {str(e)}")
            return {"status": "error", "reason": str(e)}
    
    def get_metrics(self) -> Dict[str, Any]:
        """
        获取服务指标
        
        Returns:
            服务指标
        """
        try:
            response = self.session.get(f"{self.api_url}/metrics")
            response.raise_for_status()
            return response.json()
        except Exception as e:
            logger.error(f"获取指标失败: {str(e)}")
            return {"status": "error", "reason": str(e)}
    
    def chat(self, 
            message: str, 
            system_prompt: Optional[str] = None,
            max_length: int = 2048,
            temperature: float = 0.7,
            top_p: float = 0.9,
            top_k: int = 40,
            show_performance: bool = False) -> Dict[str, Any]:
        """
        发送聊天消息
        
        Args:
            message: 用户消息
            system_prompt: 系统提示
            max_length: 最大生成长度
            temperature: 温度参数
            top_p: top-p参数
            top_k: top-k参数
            show_performance: 是否显示性能信息
            
        Returns:
            模型响应
        """
        # 准备消息
        messages = []
        
        # 添加系统提示
        if system_prompt and not any(msg.get("role") == "system" for msg in self.conversation_history):
            messages.append({"role": "system", "content": system_prompt})
        
        # 添加历史消息
        messages.extend(self.conversation_history)
        
        # 添加当前消息
        messages.append({"role": "user", "content": message})
        
        try:
            # 准备请求数据
            request_data = {
                "messages": messages,
                "max_length": max_length,
                "temperature": temperature,
                "top_p": top_p,
                "top_k": top_k,
                "show_performance": show_performance
            }
            
            # 发送请求
            response = self.session.post(
                f"{self.api_url}/chat",
                json=request_data
            )
            response.raise_for_status()
            result = response.json()
            
            # 更新对话历史
            self.conversation_history.append({"role": "user", "content": message})
            self.conversation_history.append({"role": "assistant", "content": result.get("response", "")})
            
            return result
            
        except Exception as e:
            logger.error(f"聊天请求失败: {str(e)}")
            return {"error": str(e)}
    
    def reset_conversation(self) -> None:
        """重置对话历史"""
        self.conversation_history = []
        logger.info("对话历史已重置")


def interactive_mode(client: AscendLLMClient, args: argparse.Namespace) -> None:
    """
    交互式模式
    
    Args:
        client: 客户端实例
        args: 命令行参数
    """
    print("\n=== 昇腾大模型交互式命令行 ===")
    print("输入 'q' 或 'exit' 退出，输入 'clear' 清除对话历史\n")
    
    # 检查服务状态
    health = client.health_check()
    if health.get("status") != "ok":
        print(f"警告: 服务状态异常 - {health.get('reason', '未知错误')}")
        print("继续尝试连接...\n")
    else:
        print(f"服务状态: 正常")
        print(f"模型: {health.get('model_path', '未知')}")
        print(f"设备: {health.get('device', '未知')}\n")
    
    # 交互循环
    while True:
        try:
            # 获取用户输入
            user_input = input("用户> ")
            
            # 检查退出命令
            if user_input.lower() in ["q", "exit", "quit"]:
                print("退出交互式命令行")
                break
            
            # 检查清除历史命令
            if user_input.lower() == "clear":
                client.reset_conversation()
                print("对话历史已清除")
                continue
            
            # 检查空输入
            if not user_input.strip():
                continue
            
            # 发送请求
            start_time = time.time()
            result = client.chat(
                message=user_input,
                system_prompt=args.system_prompt,
                max_length=args.max_length,
                temperature=args.temperature,
                top_p=args.top_p,
                top_k=args.top_k,
                show_performance=args.show_performance
            )
            
            # 处理响应
            if "error" in result:
                print(f"错误: {result['error']}")
                continue
            
            # 打印模型响应
            print("\n助手> " + result.get("response", ""))
            
            # 打印性能信息
            if args.show_performance and "performance" in result:
                perf = result["performance"]
                total_time = time.time() - start_time
                
                print("\n--- 性能信息 ---")
                print(f"总耗时: {total_time:.2f}秒")
                print(f"模型耗时: {perf.get('time', 0):.2f}秒")
                print(f"输入tokens: {perf.get('input_tokens', 0)}")
                print(f"输出tokens: {perf.get('output_tokens', 0)}")
                print(f"生成速度: {perf.get('tokens_per_second', 0):.2f} tokens/s")
                
                # 显示内存信息
                if "memory" in perf:
                    mem = perf["memory"]
                    used_mem_mb = mem.get("used", 0) / (1024 * 1024)
                    total_mem_mb = mem.get("total", 0) / (1024 * 1024)
                    print(f"内存使用: {used_mem_mb:.2f}MB / {total_mem_mb:.2f}MB")
            
            print("\n")
            
        except KeyboardInterrupt:
            print("\n捕获到中断信号，退出交互式命令行")
            break
        except Exception as e:
            print(f"错误: {str(e)}")


def single_query_mode(client: AscendLLMClient, args: argparse.Namespace) -> None:
    """
    单次查询模式
    
    Args:
        client: 客户端实例
        args: 命令行参数
    """
    try:
        # 发送请求
        result = client.chat(
            message=args.query,
            system_prompt=args.system_prompt,
            max_length=args.max_length,
            temperature=args.temperature,
            top_p=args.top_p,
            top_k=args.top_k,
            show_performance=args.show_performance
        )
        
        # 处理响应
        if "error" in result:
            print(f"错误: {result['error']}")
            return
        
        # 打印模型响应
        print(result.get("response", ""))
        
        # 打印性能信息
        if args.show_performance and "performance" in result:
            perf = result["performance"]
            
            print("\n--- 性能信息 ---")
            print(f"耗时: {perf.get('time', 0):.2f}秒")
            print(f"输入tokens: {perf.get('input_tokens', 0)}")
            print(f"输出tokens: {perf.get('output_tokens', 0)}")
            print(f"生成速度: {perf.get('tokens_per_second', 0):.2f} tokens/s")
            
    except Exception as e:
        print(f"错误: {str(e)}")


def parse_args() -> argparse.Namespace:
    """解析命令行参数"""
    parser = argparse.ArgumentParser(description="昇腾大模型客户端")
    
    parser.add_argument("--api_url", type=str, default="http://localhost:8000",
                        help="API服务地址，默认为http://localhost:8000")
    
    parser.add_argument("--interactive", action="store_true",
                        help="使用交互式模式")
    
    parser.add_argument("--query", type=str,
                        help="单次查询模式下的查询内容")
    
    parser.add_argument("--system_prompt", type=str,
                        help="系统提示")
    
    parser.add_argument("--max_length", type=int, default=2048,
                        help="最大生成长度")
    
    parser.add_argument("--temperature", type=float, default=0.7,
                        help="温度参数，控制随机性")
    
    parser.add_argument("--top_p", type=float, default=0.9,
                        help="top-p参数")
    
    parser.add_argument("--top_k", type=int, default=40,
                        help="top-k参数")
    
    parser.add_argument("--show_performance", action="store_true",
                        help="显示性能信息")
    
    return parser.parse_args()


def main() -> None:
    """主函数"""
    # 解析命令行参数
    args = parse_args()
    
    # 创建客户端
    client = AscendLLMClient(api_url=args.api_url)
    
    # 交互式模式
    if args.interactive:
        interactive_mode(client, args)
    # 单次查询模式
    elif args.query:
        single_query_mode(client, args)
    # 未指定模式
    else:
        print("错误: 必须指定 --interactive 或 --query 参数")
        print("示例: python client.py --interactive")
        print("示例: python client.py --query '你好，请介绍一下自己'")


if __name__ == "__main__":
    main() 