#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
演示不同AI模型接口的使用方法
支持OpenAI API和本地模型(如Ollama)
"""

import requests
import json
import time
from typing import Dict, Any, Optional

class AIModelDemo:
    def __init__(self):
        self.models = {
            "openai": {
                "name": "OpenAI GPT-3.5",
                "url": "https://api.openai.com/v1/chat/completions",
                "model": "gpt-3.5-turbo"
            },
            "local": {
                "name": "本地Ollama模型",
                "url": "http://localhost:11434/api/chat",
                "model": "llama2"
            }
        }
        
    def call_openai(self, message: str, api_key: str, model: str = "gpt-3.5-turbo") -> Dict[str, Any]:
        """
        调用OpenAI API
        
        Args:
            message: 用户消息
            api_key: OpenAI API密钥
            model: 模型名称
            
        Returns:
            包含响应和状态的字典
        """
        headers = {
            "Authorization": f"Bearer {api_key}",
            "Content-Type": "application/json"
        }
        
        data = {
            "model": model,
            "messages": [
                {"role": "system", "content": "你是一个友善的AI助手，请用中文回答问题。"},
                {"role": "user", "content": message}
            ],
            "max_tokens": 1000,
            "temperature": 0.7
        }
        
        try:
            print(f"正在调用OpenAI API ({model})...")
            start_time = time.time()
            
            response = requests.post(
                "https://api.openai.com/v1/chat/completions",
                headers=headers,
                json=data,
                timeout=30
            )
            
            end_time = time.time()
            response_time = end_time - start_time
            
            if response.status_code == 200:
                result = response.json()
                return {
                    "success": True,
                    "response": result['choices'][0]['message']['content'].strip(),
                    "response_time": response_time,
                    "tokens_used": result.get('usage', {}).get('total_tokens', 0)
                }
            else:
                return {
                    "success": False,
                    "error": f"HTTP {response.status_code}: {response.text}",
                    "response_time": response_time
                }
                
        except requests.exceptions.RequestException as e:
            return {
                "success": False,
                "error": f"网络请求错误: {str(e)}",
                "response_time": 0
            }
        except Exception as e:
            return {
                "success": False,
                "error": f"未知错误: {str(e)}",
                "response_time": 0
            }
            
    def call_local_model(self, message: str, model: str = "llama2", base_url: str = "http://localhost:11434") -> Dict[str, Any]:
        """
        调用本地模型API (如Ollama)
        
        Args:
            message: 用户消息
            model: 模型名称
            base_url: 本地服务器地址
            
        Returns:
            包含响应和状态的字典
        """
        url = f"{base_url}/api/chat"
        
        data = {
            "model": model,
            "messages": [
                {"role": "system", "content": "你是一个友善的AI助手，请用中文回答问题。"},
                {"role": "user", "content": message}
            ],
            "stream": False
        }
        
        try:
            print(f"正在调用本地模型 ({model})...")
            start_time = time.time()
            
            response = requests.post(url, json=data, timeout=60)
            
            end_time = time.time()
            response_time = end_time - start_time
            
            if response.status_code == 200:
                result = response.json()
                return {
                    "success": True,
                    "response": result['message']['content'].strip(),
                    "response_time": response_time,
                    "model_info": result.get('model', model)
                }
            else:
                return {
                    "success": False,
                    "error": f"HTTP {response.status_code}: {response.text}",
                    "response_time": response_time
                }
                
        except requests.exceptions.ConnectionError:
            return {
                "success": False,
                "error": "无法连接到本地模型服务。请确保Ollama或其他本地服务正在运行。",
                "response_time": 0
            }
        except requests.exceptions.RequestException as e:
            return {
                "success": False,
                "error": f"网络请求错误: {str(e)}",
                "response_time": 0
            }
        except Exception as e:
            return {
                "success": False,
                "error": f"未知错误: {str(e)}",
                "response_time": 0
            }
            
    def test_model_availability(self) -> Dict[str, bool]:
        """
        测试各个模型的可用性
        
        Returns:
            各模型可用性状态
        """
        availability = {}
        
        # 测试本地模型
        try:
            response = requests.get("http://localhost:11434/api/tags", timeout=5)
            availability["local"] = response.status_code == 200
        except:
            availability["local"] = False
            
        # OpenAI需要API密钥才能测试，这里只标记为未知
        availability["openai"] = "需要API密钥"
        
        return availability
        
    def compare_models(self, message: str, openai_api_key: Optional[str] = None) -> None:
        """
        比较不同模型的响应
        
        Args:
            message: 测试消息
            openai_api_key: OpenAI API密钥（可选）
        """
        print(f"\n{'='*60}")
        print(f"模型比较测试")
        print(f"测试消息: {message}")
        print(f"{'='*60}")
        
        results = []
        
        # 测试本地模型
        print("\n1. 测试本地模型...")
        local_result = self.call_local_model(message)
        results.append(("本地模型", local_result))
        
        if local_result["success"]:
            print(f"✓ 响应时间: {local_result['response_time']:.2f}秒")
            print(f"✓ 响应内容: {local_result['response'][:100]}...")
        else:
            print(f"✗ 错误: {local_result['error']}")
            
        # 测试OpenAI模型
        if openai_api_key:
            print("\n2. 测试OpenAI模型...")
            openai_result = self.call_openai(message, openai_api_key)
            results.append(("OpenAI", openai_result))
            
            if openai_result["success"]:
                print(f"✓ 响应时间: {openai_result['response_time']:.2f}秒")
                print(f"✓ 令牌使用: {openai_result.get('tokens_used', 0)}")
                print(f"✓ 响应内容: {openai_result['response'][:100]}...")
            else:
                print(f"✗ 错误: {openai_result['error']}")
        else:
            print("\n2. 跳过OpenAI测试 (未提供API密钥)")
            
        # 总结
        print(f"\n{'='*60}")
        print("测试总结:")
        for model_name, result in results:
            status = "✓ 成功" if result["success"] else "✗ 失败"
            time_info = f" ({result['response_time']:.2f}s)" if result["response_time"] > 0 else ""
            print(f"  {model_name}: {status}{time_info}")
            
def main():
    """主函数 - 演示程序"""
    demo = AIModelDemo()
    
    print("AI模型接口演示程序")
    print("=" * 40)
    
    # 检查模型可用性
    print("\n检查模型可用性...")
    availability = demo.test_model_availability()
    for model, status in availability.items():
        print(f"  {model}: {status}")
        
    # 获取用户输入
    print("\n请输入测试消息 (按Enter使用默认消息):")
    user_message = input("> ").strip()
    if not user_message:
        user_message = "请介绍一下人工智能的发展历史"
        
    print("\n请输入OpenAI API密钥 (可选，按Enter跳过):")
    api_key = input("> ").strip()
    if not api_key:
        api_key = None
        
    # 运行比较测试
    demo.compare_models(user_message, api_key)
    
    print("\n演示完成！")
    
if __name__ == "__main__":
    main()