#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Ollama API 客户端示例

此脚本演示如何使用httpx库与Ollama本地API进行交互，包括：
- 获取可用模型列表
- 使用指定模型生成文本
- 错误处理和响应解析

使用说明：
1. 确保Ollama服务正在本地运行（默认端口11434）
2. 运行脚本：python test1_1.py
"""

import httpx
import json
import time

def get_available_models():
    """获取Ollama中可用的模型列表"""
    try:
        url = "http://127.0.0.1:11434/api/tags"
        with httpx.Client() as client:
            response = client.get(url, timeout=10)
            response.raise_for_status()
            
            data = response.json()
            models = data.get('models', [])
            
            print(f"\n📦 可用Ollama模型 ({len(models)}):")
            for model in models:
                model_name = model.get('name')
                size_gb = model.get('size', 0) / (1024**3)  # 转换为GB
                modified = model.get('modified_at', '未知')
                print(f"  - {model_name}")
                print(f"    大小: {size_gb:.1f} GB, 修改时间: {modified[:10]}")
            
            return models
            
    except httpx.RequestError as e:
        print(f"❌ 获取模型列表失败: {e}")
        return []
    except json.JSONDecodeError:
        print("❌ 无法解析API响应为JSON格式")
        return []

def generate_text(prompt, model="qwen3:14b", stream=False, timeout=30):
    """
    使用Ollama API生成文本响应
    
    参数:
        prompt (str): 输入提示词
        model (str): 要使用的模型名称
        stream (bool): 是否流式返回结果
        timeout (int): 请求超时时间（秒）
    
    返回:
        dict: 包含生成结果的字典
    """
    url = "http://127.0.0.1:11434/api/generate"
    payload = {
        "model": model,
        "prompt": prompt,
        "stream": stream
    }
    
    try:
        print(f"\n🔄 正在调用 {model} 模型生成文本...")
        print(f"💬 提示词: {prompt}")
        
        start_time = time.time()
        with httpx.Client() as client:
            response = client.post(url, json=payload, timeout=timeout)
        end_time = time.time()
        
        print(f"✅ 请求完成，耗时: {end_time - start_time:.2f} 秒")
        print(f"📊 HTTP状态码: {response.status_code}")
        
        response.raise_for_status()
        data = response.json()
        
        # 验证响应包含必要字段
        if 'response' not in data:
            print("⚠️  警告: 响应中缺少'response'字段")
            return {"success": False, "error": "响应格式不正确", "raw_data": data}
        
        # 返回结构化结果
        return {
            "success": True,
            "model": data.get("model", "未知"),
            "response": data["response"],
            "done": data.get("done", False),
            "duration_ms": data.get("total_duration", 0) // 1000000,
            "prompt_tokens": data.get("prompt_eval_count", 0),
            "response_tokens": data.get("eval_count", 0)
        }
        
    except httpx.TimeoutException:
        return {"success": False, "error": "请求超时，请检查Ollama服务状态"}
    except httpx.ConnectError:
        return {"success": False, "error": "无法连接到Ollama服务，请确认服务已启动"}
    except httpx.HTTPStatusError as e:
        return {"success": False, "error": f"HTTP错误: {e.response.status_code}", "status_code": e.response.status_code}
    except json.JSONDecodeError:
        return {"success": False, "error": "无法解析JSON响应", "raw_text": response.text}
    except Exception as e:
        return {"success": False, "error": f"发生未知错误: {e}", "error_type": type(e).__name__}

def main():
    """主函数"""
    print("🌟 Ollama API 客户端示例 🌟")
    print("=" * 50)
    
    # 步骤1: 获取可用模型
    models = get_available_models()
    
    # 步骤2: 执行文本生成测试
    print("\n" + "=" * 50)
    print("🚀 开始文本生成测试")
    print("=" * 50)
    
    # 测试用例
    test_cases = [
        {"prompt": "你好，请简单介绍一下你自己", "model": "qwen3:14b"},
        {"prompt": "1+1等于多少？", "model": "qwen3:14b"}
    ]
    
    for i, test_case in enumerate(test_cases, 1):
        print(f"\n📝 测试用例 {i}/{len(test_cases)}")
        result = generate_text(
            prompt=test_case["prompt"],
            model=test_case["model"],
            stream=False,
            timeout=30
        )
        
        if result["success"]:
            print(f"\n✅ 生成成功！")
            print(f"  📊 统计信息:")
            print(f"    - 模型: {result['model']}")
            print(f"    - 耗时: {result['duration_ms']} 毫秒")
            print(f"    - 提示词 tokens: {result['prompt_tokens']}")
            print(f"    - 响应 tokens: {result['response_tokens']}")
            print(f"    - 总 tokens: {result['prompt_tokens'] + result['response_tokens']}")
            print(f"\n  📜 生成内容:")
            print("  " + "=" * 40)
            print(f"  {result['response']}")
            print("  " + "=" * 40)
        else:
            print(f"\n❌ 生成失败！")
            print(f"  错误信息: {result.get('error', '未知错误')}")
            if 'status_code' in result:
                print(f"  HTTP状态码: {result['status_code']}")
    
    print("\n" + "=" * 50)
    print("🎉 测试完成！")
    print("=" * 50)
    print("💡 提示: 你可以修改代码中的测试用例来尝试不同的提示词和模型")

if __name__ == "__main__":
    main()
