from openai import OpenAI
from dotenv import load_dotenv
import os
import time
import sys
import threading
import argparse

load_dotenv()
# 选择默认模型
select_model = "qwen-vl-max-latest"

# 配置所有模型，统一使用openai_compat作为provider
model_configs = {
    # GOOGLE_API_KEY
    "gemma-3-27b-it": {
        "provider": "openai_compat",
        "base_url": "https://generativelanguage.googleapis.com/v1beta",
        "model_name": "gemma-3-27b-it",
        "api_key_name":"GOOGLE_API_KEY"
    },
    
    "gemini-2.0-flash-exp": {
        "provider": "openai_compat",
        "base_url": "https://generativelanguage.googleapis.com/v1beta",
        "model_name": "gemini-2.0-flash-exp",
        "api_key_name":"GOOGLE_API_KEY"
    },
    
    # DASHSCOPE_API_KEY
    "qwen_25vl_72b_awq": {
        "provider": "openai_compat",
        "base_url": "http://localhost:8197/v1",
        "model_name": "Qwen/Qwen2.5-VL-72B-Instruct-AWQ",
        "api_key_name":"CUSTOM_COMPAT_API_KEY"
    },
    # DASHSCOPE_API_KEY
    "qwen-max-latest": {
        "provider": "openai_compat",
        "base_url": "https://dashscope.aliyuncs.com/compatible-mode/v1",
        "model_name": "qwen-max-latest",
        "api_key_name":"DASHSCOPE_API_KEY"
    },
    
    "qwen-vl-max-latest": {
        "provider": "openai_compat",
        "base_url": "https://dashscope.aliyuncs.com/compatible-mode/v1",
        "model_name": "qwen-vl-max-latest",
        "api_key_name":"DASHSCOPE_API_KEY"
    },
    
    "qwen2.5-vl-72b-instruct": {
        "provider": "openai_compat",
        "base_url": "https://dashscope.aliyuncs.com/compatible-mode/v1",
        "model_name": "qwen2.5-vl-72b-instruct",
        "api_key_name":"DASHSCOPE_API_KEY"
    }
    
}

# 导入并合并secret_files目录中的模型配置（如果存在）
try:
    import sys
    import os
    # 添加父目录到sys.path
    sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
    
    # 尝试导入secret配置
    from secret_files.model_configs import model_configs as secret_model_configs
    
    # 合并配置
    model_configs.update(secret_model_configs)
    print(f"已成功导入secret_files/model_configs.py中的{len(secret_model_configs)}个模型配置")
except ImportError:
    print("未找到secret_files/model_configs.py文件，跳过导入")
except Exception as e:
    print(f"导入secret_files/model_configs.py时出错: {str(e)}")

# 全局变量，用于存储可用和不可用的模型列表
available_models = []  # 将在实际测试后填充
unavailable_models = []  # 存储不可用的模型键名列表
connection_errors = {}  # 存储不可用模型的错误原因 {model_key: error_message}
is_testing = False  # 标记是否正在进行测试
test_complete = False  # 标记测试是否完成
auto_test_models = False  # 默认不自动测试模型


def test_model_connection(model_key, timeout=5):
    """
    测试指定模型的连接状态
    
    Args:
        model_key: 模型配置的键名
        timeout: 请求超时时间(秒)
        
    Returns:
        (bool, str): (是否连接成功, 错误信息或成功信息)
    """
    if model_key not in model_configs:
        return False, f"模型 {model_key} 不存在于配置中"
    
    model_config = model_configs[model_key]
    api_key_name = model_config.get("api_key_name")
    
    # 从环境变量获取API密钥
    api_key = os.getenv(api_key_name)
    if not api_key:
        return False, f"未找到环境变量 {api_key_name} 的值"
    
    base_url = model_config.get("base_url")
    model_name = model_config.get("model_name")
    
    try:
        # 创建OpenAI客户端
        client = OpenAI(
            api_key=api_key,
            base_url=base_url,
            timeout=timeout
        )
        
        # 发送一个简单请求测试连通性
        start_time = time.time()
        response = client.chat.completions.create(
            model=model_name,
            messages=[{"role": "user", "content": "Hello"}],
            max_tokens=5
        )
        end_time = time.time()
        
        return True, f"连接成功，响应时间：{end_time - start_time:.2f}秒，响应内容：{response.choices[0].message.content}"
    
    except Exception as e:
        return False, f"连接失败：{str(e)}"

def test_all_models(timeout=5, verbose=True):
    """测试所有配置的模型连接状态"""
    global available_models, unavailable_models, connection_errors, is_testing, test_complete
    
    if is_testing:
        return available_models, unavailable_models, connection_errors
    
    is_testing = True
    
    try:
        # 重置状态，但保持原始可用模型列表作为备份
        _available_models = []
        unavailable_models = []
        connection_errors = {}
        
        for model_key in model_configs:
            if verbose:
                print(f"测试模型 {model_key} 连接状态...")
            try:
                is_available, message = test_model_connection(model_key, timeout)
                
                if is_available:
                    _available_models.append(model_key)
                    if verbose:
                        print(f"✅ {model_key}: {message}")
                else:
                    unavailable_models.append(model_key)
                    connection_errors[model_key] = message
                    if verbose:
                        print(f"❌ {model_key}: {message}")
            except Exception as e:
                # 捕获每个模型的异常，不影响其他模型的测试
                unavailable_models.append(model_key)
                connection_errors[model_key] = f"测试过程发生异常: {str(e)}"
                if verbose:
                    print(f"❌ {model_key}: 测试过程发生异常: {str(e)}")
        
        # 如果有可用模型，更新列表
        if _available_models:
            available_models = _available_models
        
        if verbose:
            print("\n=== 测试结果汇总 ===")
            print(f"可用模型 ({len(available_models)}): {', '.join(available_models)}")
            print(f"不可用模型 ({len(unavailable_models)}): {', '.join(unavailable_models)}")
        
        test_complete = True
        return available_models, unavailable_models, connection_errors
    
    finally:
        is_testing = False


def get_client(model_key=None):
    """
    根据模型key获取配置的OpenAI客户端
    
    Args:
        model_key: 模型配置的键名，如果为None则使用select_model
        
    Returns:
        (OpenAI, str): (OpenAI客户端实例, 模型名称)
    """
    if model_key is None:
        model_key = select_model
        
    if model_key not in model_configs:
        raise ValueError(f"模型 {model_key} 不存在于配置中")
    
    model_config = model_configs[model_key]
    api_key_name = model_config.get("api_key_name")
    api_key = os.getenv(api_key_name)
    
    if not api_key:
        raise ValueError(f"未找到环境变量 {api_key_name} 的值")
    
    base_url = model_config.get("base_url")
    model_name = model_config.get("model_name")
    
    client = OpenAI(
        api_key=api_key,
        base_url=base_url
    )
    
    return client, model_name

def get_available_models():
    """
    获取可用的模型列表
    
    Returns:
        list: 可用模型的列表
    """
    # 如果已经测试过并有结果且auto_test_models为True，返回测试结果
    if auto_test_models and available_models:
        return available_models
    # 否则返回所有配置的模型（不测试连通性）
    return list(model_configs.keys())

def background_test_models():
    """在后台线程中测试模型连接"""
    try:
        print("开始在后台测试模型连接...")
        models, _, _ = test_all_models(timeout=5, verbose=False)
        print(f"模型测试完成，找到 {len(models)} 个可用模型: {', '.join(models)}")
    except Exception as e:
        print(f"后台测试模型时发生错误: {str(e)}")

# 不再自动测试模型连通性，以避免启动延迟和连接问题
# 如果需要测试，可以手动调用test_all_models函数
print("未进行模型连通性测试，默认所有模型可用")

if __name__ == "__main__":
    # 添加命令行参数解析，允许用户选择是否测试模型
    parser = argparse.ArgumentParser(description="模型配置工具")
    parser.add_argument("--test-models", action="store_true", help="测试所有模型连通性")
    args = parser.parse_args()
    
    if args.test_models or auto_test_models:
        print("开始测试模型连通性...")
        test_all_models(verbose=True)
    else:
        print("已跳过模型连通性测试。如需测试，请使用 --test-models 参数或设置 auto_test_models=True")
