from openai import OpenAI
from dotenv import load_dotenv
import os
import time
import sys
import threading
import base64
from typing import Optional, List, Dict, Any, Union
from fastapi import APIRouter, HTTPException, Depends, status, File, UploadFile, Form
from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials
import uuid
from pydantic import BaseModel
import json

# 导入图像处理智能体相关模块
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))

load_dotenv()
# 选择默认模型
select_model = "qwen_31132_244"

# 取字典的第一个key
def get_first_key(dictionary):
    return next(iter(dictionary)) if dictionary else None

# 配置所有模型，统一使用openai_compat作为provider
model_configs = {
    # GOOGLE_API_KEY
    "gemma-3-27b-it": {
        "provider": "openai_compat",
        "base_url": "https://generativelanguage.googleapis.com/v1beta",
        "model_name": "gemma-3-27b-it",
        "api_key_name":"GOOGLE_API_KEY"
    },
    
    "gemini-2.0-flash-exp": {
        "provider": "openai_compat",
        "base_url": "https://generativelanguage.googleapis.com/v1beta",
        "model_name": "gemini-2.0-flash-exp",
        "api_key_name":"GOOGLE_API_KEY"
    },
    
    # DASHSCOPE_API_KEY
    "qwen_25vl_72b_awq": {
        "provider": "openai_compat",
        "base_url": "http://localhost:8197/v1",
        "model_name": "Qwen/Qwen2.5-VL-72B-Instruct-AWQ",
        "api_key_name":"CUSTOM_COMPAT_API_KEY"
    },
    # DASHSCOPE_API_KEY
    "qwen-max-2025-01-25": {
        "provider": "openai_compat",
        "base_url": "https://dashscope.aliyuncs.com/compatible-mode/v1",
        "model_name": "qwen-max-2025-01-25",
        "api_key_name":"DASHSCOPE_API_KEY"
    },
    
    "qwen-vl-max-2025-04-08": {
        "provider": "openai_compat",
        "base_url": "https://dashscope.aliyuncs.com/compatible-mode/v1",
        "model_name": "qwen-vl-max-2025-04-08",
        "api_key_name":"DASHSCOPE_API_KEY"
    },
    
    "qwen2.5-vl-72b-instruct": {
        "provider": "openai_compat",
        "base_url": "https://dashscope.aliyuncs.com/compatible-mode/v1",
        "model_name": "qwen2.5-vl-72b-instruct",
        "api_key_name":"DASHSCOPE_API_KEY"
    }
    
}

# 导入并合并secret_files目录中的模型配置（如果存在）
try:
    import sys
    import os
    # 添加父目录到sys.path
    sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
    
    # 尝试导入secret配置
    from secret_files.model_configs import model_configs as secret_model_configs
    
    # 合并配置
    model_configs.update(secret_model_configs)
    print(f"已成功导入secret_files/model_configs.py中的{len(secret_model_configs)}个模型配置")
except ImportError:
    print("未找到secret_files/model_configs.py文件，跳过导入")
except Exception as e:
    print(f"导入secret_files/model_configs.py时出错: {str(e)}")

# 全局变量，用于存储可用和不可用的模型列表
available_models = []  # 将在实际测试后填充
unavailable_models = []  # 存储不可用的模型键名列表
connection_errors = {}  # 存储不可用模型的错误原因 {model_key: error_message}
is_testing = False  # 标记是否正在进行测试
test_complete = False  # 标记测试是否完成

def get_model_request_configs(model_key=None):
    """
    获取所有模型request的配置
    
    Returns:
        request_url,headers
    """
    if model_key not in model_configs:
        return False, f"模型 {model_key} 不存在于配置中"
    
    model_config = model_configs[model_key]
    api_key_name = model_config.get("api_key_name")
    
    # 从环境变量获取API密钥
    api_key = os.getenv(api_key_name)
    if not api_key:
        return False, f"未找到环境变量 {api_key_name} 的值"
    
    base_url = model_config.get("base_url")
    request_url = f"{base_url}/chat/completions"
    model_name = model_config.get("model_name")
    headers = {
        "Content-Type": "application/json",
        "Authorization": f"Bearer {api_key}"
    }
    return request_url,headers

def test_model_connection(model_key, timeout=5):
    """
    测试指定模型的连接状态
    
    Args:
        model_key: 模型配置的键名
        timeout: 请求超时时间(秒)
        
    Returns:
        (bool, str): (是否连接成功, 错误信息或成功信息)
    """
    if model_key not in model_configs:
        return False, f"模型 {model_key} 不存在于配置中"
    
    model_config = model_configs[model_key]
    api_key_name = model_config.get("api_key_name")
    
    # 从环境变量获取API密钥
    api_key = os.getenv(api_key_name)
    if not api_key:
        return False, f"未找到环境变量 {api_key_name} 的值"
    
    base_url = model_config.get("base_url")
    model_name = model_config.get("model_name")
    
    try:
        # 创建OpenAI客户端
        client = OpenAI(
            api_key=api_key,
            base_url=base_url,
            timeout=timeout
        )
        
        # 发送一个简单请求测试连通性
        start_time = time.time()
        response = client.chat.completions.create(
            model=model_name,
            messages=[{"role": "user", "content": "Hello"}],
            max_tokens=5
        )
        end_time = time.time()
        
        return True, f"连接成功，响应时间：{end_time - start_time:.2f}秒，响应内容：{response.choices[0].message.content}"
    
    except Exception as e:
        return False, f"连接失败：{str(e)}"

def test_all_models(timeout=5, verbose=True):
    """测试所有配置的模型连接状态"""
    global available_models, unavailable_models, connection_errors, is_testing, test_complete
    
    if is_testing:
        return available_models, unavailable_models, connection_errors
    
    is_testing = True
    
    try:
        # 重置状态，但保持原始可用模型列表作为备份
        _available_models = []
        unavailable_models = []
        connection_errors = {}
        
        for model_key in model_configs:
            if verbose:
                print(f"测试模型 {model_key} 连接状态...")
            try:
                is_available, message = test_model_connection(model_key, timeout)
                
                if is_available:
                    _available_models.append(model_key)
                    if verbose:
                        print(f"✅ {model_key}: {message}")
                else:
                    unavailable_models.append(model_key)
                    connection_errors[model_key] = message
                    if verbose:
                        print(f"❌ {model_key}: {message}")
            except Exception as e:
                # 捕获每个模型的异常，不影响其他模型的测试
                unavailable_models.append(model_key)
                connection_errors[model_key] = f"测试过程发生异常: {str(e)}"
                if verbose:
                    print(f"❌ {model_key}: 测试过程发生异常: {str(e)}")
        
        # 如果有可用模型，更新列表
        if _available_models:
            available_models = _available_models
        
        if verbose:
            print("\n=== 测试结果汇总 ===")
            print(f"可用模型 ({len(available_models)}): {', '.join(available_models)}")
            print(f"不可用模型 ({len(unavailable_models)}): {', '.join(unavailable_models)}")
        
        test_complete = True
        return available_models, unavailable_models, connection_errors
    
    finally:
        is_testing = False


def get_client(model_key=None):
    """
    根据模型key获取配置的OpenAI客户端
    
    Args:
        model_key: 模型配置的键名，如果为None则使用select_model
        
    Returns:
        (OpenAI, str): (OpenAI客户端实例, 模型名称)
    """
    if model_key is None:
        model_key = select_model
        
    if model_key not in model_configs:
        raise ValueError(f"模型 {model_key} 不存在于配置中")
    
    model_config = model_configs[model_key]
    api_key_name = model_config.get("api_key_name")
    api_key = os.getenv(api_key_name)
    
    if not api_key:
        raise ValueError(f"未找到环境变量 {api_key_name} 的值")
    
    base_url = model_config.get("base_url")
    model_name = model_config.get("model_name")
    
    client = OpenAI(
        api_key=api_key,
        base_url=base_url
    )
    
    return client, model_name

def get_available_models():
    """
    获取可用的模型列表
    
    Returns:
        list: 可用模型的列表
    """
    # 如果已经测试过并有结果，返回测试结果
    if available_models:
        return available_models
    # 否则返回所有配置的模型（不测试连通性）
    return list(model_configs.keys())

def background_test_models():
    """在后台线程中测试模型连接"""
    try:
        print("开始在后台测试模型连接...")
        models, _, _ = test_all_models(timeout=5, verbose=False)
        print(f"模型测试完成，找到 {len(models)} 个可用模型: {', '.join(models)}")
    except Exception as e:
        print(f"后台测试模型时发生错误: {str(e)}")

# 不再自动测试模型连通性，以避免启动延迟和连接问题
# 如果需要测试，可以手动调用test_all_models函数

if __name__ == "__main__":
    test_all_models(verbose=True)
