"""
API客户端示例，展示如何调用ROCm大模型推理API
"""
import requests
import json
import time
import argparse

# API基础URL
BASE_URL = "http://localhost:8000/api/v1"

def get_model_info():
    """获取模型信息"""
    url = f"{BASE_URL}/model-info"
    
    try:
        response = requests.get(url)
        response.raise_for_status()
        return response.json()
    except requests.exceptions.RequestException as e:
        print(f"获取模型信息失败: {str(e)}")
        return None

def generate_text(prompt, max_new_tokens=None, temperature=None, top_p=None, top_k=None):
    """生成文本"""
    url = f"{BASE_URL}/generate"
    
    # 构建请求数据
    data = {"prompt": prompt}
    if max_new_tokens is not None:
        data["max_new_tokens"] = max_new_tokens
    if temperature is not None:
        data["temperature"] = temperature
    if top_p is not None:
        data["top_p"] = top_p
    if top_k is not None:
        data["top_k"] = top_k
    
    try:
        start_time = time.time()
        response = requests.post(url, json=data)
        response.raise_for_status()
        end_time = time.time()
        
        result = response.json()
        print(f"请求耗时: {end_time - start_time:.2f} 秒")
        return result
    except requests.exceptions.RequestException as e:
        print(f"生成文本失败: {str(e)}")
        return None

def batch_generate(prompts, max_new_tokens=None, temperature=None, top_p=None, top_k=None):
    """批量生成文本"""
    url = f"{BASE_URL}/batch-generate"
    
    # 构建请求数据
    data = {"prompts": prompts}
    if max_new_tokens is not None:
        data["max_new_tokens"] = max_new_tokens
    if temperature is not None:
        data["temperature"] = temperature
    if top_p is not None:
        data["top_p"] = top_p
    if top_k is not None:
        data["top_k"] = top_k
    
    try:
        start_time = time.time()
        response = requests.post(url, json=data)
        response.raise_for_status()
        end_time = time.time()
        
        result = response.json()
        print(f"请求耗时: {end_time - start_time:.2f} 秒")
        return result
    except requests.exceptions.RequestException as e:
        print(f"批量生成文本失败: {str(e)}")
        return None

def main():
    """主函数"""
    parser = argparse.ArgumentParser(description="ROCm大模型推理API客户端示例")
    parser.add_argument("--info", action="store_true", help="获取模型信息")
    parser.add_argument("--generate", action="store_true", help="生成文本")
    parser.add_argument("--batch", action="store_true", help="批量生成文本")
    parser.add_argument("--prompt", type=str, help="输入提示文本")
    parser.add_argument("--prompts", type=str, nargs="+", help="输入提示文本列表")
    parser.add_argument("--max-tokens", type=int, help="最大生成的新token数量")
    parser.add_argument("--temperature", type=float, help="温度参数")
    parser.add_argument("--top-p", type=float, help="核采样参数")
    parser.add_argument("--top-k", type=int, help="top-k采样参数")
    
    args = parser.parse_args()
    
    # 如果没有指定参数，则显示帮助信息
    if not (args.info or args.generate or args.batch):
        parser.print_help()
        return
    
    # 获取模型信息
    if args.info:
        info = get_model_info()
        if info:
            print("\n===== 模型信息 =====")
            print(json.dumps(info, indent=2, ensure_ascii=False))
    
    # 生成文本
    if args.generate:
        if not args.prompt:
            print("错误: 生成文本需要提供--prompt参数")
            return
        
        result = generate_text(
            args.prompt,
            max_new_tokens=args.max_tokens,
            temperature=args.temperature,
            top_p=args.top_p,
            top_k=args.top_k
        )
        
        if result:
            print("\n===== 生成结果 =====")
            print(f"提示: {result['prompt']}")
            print(f"回复: {result['response']}")
            print(f"元数据: {json.dumps(result['metadata'], indent=2)}")
    
    # 批量生成文本
    if args.batch:
        if not args.prompts:
            print("错误: 批量生成文本需要提供--prompts参数")
            return
        
        result = batch_generate(
            args.prompts,
            max_new_tokens=args.max_tokens,
            temperature=args.temperature,
            top_p=args.top_p,
            top_k=args.top_k
        )
        
        if result:
            print("\n===== 批量生成结果 =====")
            for i, item in enumerate(result["results"]):
                print(f"\n结果 {i+1}:")
                print(f"提示: {item['prompt']}")
                print(f"回复: {item['response']}")

if __name__ == "__main__":
    main()