from openai import OpenAI
from dotenv import load_dotenv
import os
import time
import sys

load_dotenv()
# 选择默认模型
select_model = "qwen-vl-max-latest"

# 配置所有模型，统一使用openai_compat作为provider
model_configs = {
    # GOOGLE_API_KEY
    "gemma-3-27b-it": {
        "provider": "openai_compat",
        "base_url": "https://generativelanguage.googleapis.com/v1beta",
        "model_name": "gemma-3-27b-it",
        "api_key_name":"GOOGLE_API_KEY"
    },
    
    "gemini-2.0-flash-exp": {
        "provider": "openai_compat",
        "base_url": "https://generativelanguage.googleapis.com/v1beta",
        "model_name": "gemini-2.0-flash-exp",
        "api_key_name":"GOOGLE_API_KEY"
    },
    
    # DASHSCOPE_API_KEY
    "qwen_25vl_72b_awq": {
        "provider": "openai_compat",
        "base_url": "http://localhost:8197/v1",
        "model_name": "Qwen/Qwen2.5-VL-72B-Instruct-AWQ",
        "api_key_name":"CUSTOM_COMPAT_API_KEY"
    },
    # DASHSCOPE_API_KEY
    "qwen-max-latest": {
        "provider": "openai_compat",
        "base_url": "https://dashscope.aliyuncs.com/compatible-mode/v1",
        "model_name": "qwen-max-latest",
        "api_key_name":"DASHSCOPE_API_KEY"
    },
    
    "qwen-vl-max-latest": {
        "provider": "openai_compat",
        "base_url": "https://dashscope.aliyuncs.com/compatible-mode/v1",
        "model_name": "qwen-vl-max-latest",
        "api_key_name":"DASHSCOPE_API_KEY"
    },
    
    "qwen2.5-vl-72b-instruct": {
        "provider": "openai_compat",
        "base_url": "https://dashscope.aliyuncs.com/compatible-mode/v1",
        "model_name": "qwen2.5-vl-72b-instruct",
        "api_key_name":"DASHSCOPE_API_KEY"
    }
}

def test_model_connection(model_key, timeout=10):
    """
    测试指定模型的连接状态
    
    Args:
        model_key: 模型配置的键名
        timeout: 请求超时时间(秒)
        
    Returns:
        (bool, str): (是否连接成功, 错误信息或成功信息)
    """
    if model_key not in model_configs:
        return False, f"模型 {model_key} 不存在于配置中"
    
    model_config = model_configs[model_key]
    api_key_name = model_config.get("api_key_name")
    
    # 从环境变量获取API密钥
    api_key = os.getenv(api_key_name)
    if not api_key:
        return False, f"未找到环境变量 {api_key_name} 的值"
    
    base_url = model_config.get("base_url")
    model_name = model_config.get("model_name")
    
    try:
        # 创建OpenAI客户端
        client = OpenAI(
            api_key=api_key,
            base_url=base_url,
            timeout=timeout
        )
        
        # 发送一个简单请求测试连通性
        start_time = time.time()
        response = client.chat.completions.create(
            model=model_name,
            messages=[{"role": "user", "content": "Hello"}],
            max_tokens=5
        )
        end_time = time.time()
        
        return True, f"连接成功，响应时间：{end_time - start_time:.2f}秒，响应内容：{response.choices[0].message.content}"
    
    except Exception as e:
        return False, f"连接失败：{str(e)}"

def test_all_models(timeout=10, verbose=True):
    """测试所有配置的模型连接状态"""
    global available_models, unavailable_models, connection_errors
    
    # 重置状态
    available_models = []
    unavailable_models = []
    connection_errors = {}
    
    for model_key in model_configs:
        if verbose:
            print(f"测试模型 {model_key} 连接状态...")
        is_available, message = test_model_connection(model_key, timeout)
        
        if is_available:
            available_models.append(model_key)
            if verbose:
                print(f"✅ {model_key}: {message}")
        else:
            unavailable_models.append(model_key)
            connection_errors[model_key] = message
            if verbose:
                print(f"❌ {model_key}: {message}")
    
    if verbose:
        print("\n=== 测试结果汇总 ===")
        print(f"可用模型 ({len(available_models)}): {', '.join(available_models)}")
        print(f"不可用模型 ({len(unavailable_models)}): {', '.join(unavailable_models)}")
    
    return available_models, unavailable_models, connection_errors


def get_client(model_key=None):
    """
    根据模型key获取配置的OpenAI客户端
    
    Args:
        model_key: 模型配置的键名，如果为None则使用select_model
        
    Returns:
        (OpenAI, str): (OpenAI客户端实例, 模型名称)
    """
    if model_key is None:
        model_key = select_model
        
    if model_key not in model_configs:
        raise ValueError(f"模型 {model_key} 不存在于配置中")
    
    model_config = model_configs[model_key]
    api_key_name = model_config.get("api_key_name")
    api_key = os.getenv(api_key_name)
    
    if not api_key:
        raise ValueError(f"未找到环境变量 {api_key_name} 的值")
    
    base_url = model_config.get("base_url")
    model_name = model_config.get("model_name")
    
    client = OpenAI(
        api_key=api_key,
        base_url=base_url
    )
    
    return client, model_name

def get_available_models():
    """
    获取可用的模型列表
    
    Returns:
        list: 可用模型的列表
    """
    # 如果已测试并存在可用模型，则直接返回
    if len(available_models) > 0:
        return available_models
    else:
        # 否则返回所有配置的模型
        return list(model_configs.keys())

# 在导入时进行一次测试，静默模式
# 如果需要在每次导入时自动测试模型可用性，取消注释以下代码
try:
    # 设置一个较短的超时时间，减少启动延迟
    # 默认使用静默模式，不输出测试进度
    test_all_models(timeout=5, verbose=False)
    print(f"模型测试完成，找到 {len(available_models)} 个可用模型")
except Exception as e:
    print(f"模型测试过程中发生错误: {str(e)}")
    # 确保即使测试失败也不会阻止应用启动
    pass

if __name__ == "__main__":
    # 根据命令行参数判断测试单个模型还是所有模型-python config.py gemma-3-27b-it
    if len(sys.argv) > 1:
        # 测试单个指定的模型
        model_key = sys.argv[1]
        print(f"测试模型: {model_key}")
        print("=" * 50)
        
        is_available, message = test_model_connection(model_key)
        
        if is_available:
            print(f"✅ 连接成功")
            print(f"详情: {message}")
        else:
            print(f"❌ 连接失败")
            print(f"错误: {message}")
    else:
        # 测试所有模型
        test_all_models(verbose=True)
