# sk-6bf84288ecdc45298ef47b7c28380766
import os
from openai import OpenAI
from concurrent.futures import ThreadPoolExecutor

# 配置API Key和Base URL
try:
    API_KEY = os.getenv("sk-6bf84288ecdc45298ef47b7c28380766") or "sk-6bf84288ecdc45298ef47b7c28380766"  # 替换为你的API Key
    BASE_URL = "https://dashscope.aliyuncs.com/compatible-mode/v1"
except Exception as e:
    print(f"环境变量配置错误: {e}")
    exit()


# 初始化OpenAI客户端
def create_client():
    return OpenAI(api_key=API_KEY, base_url=BASE_URL)


# 调用单个模型
def call_model(model_name, messages):
    try:
        print(f"正在调用模型 {model_name}...")
        client = create_client()
        completion = client.chat.completions.create(
            model=model_name,
            messages=messages
        )
        response = completion.choices[0].message.content
        print(f"{model_name} 的响应: {response}")
        return response
    except Exception as e:
        print(f"调用模型 {model_name} 出错: {str(e)}")
        return None


# 并发调用多个模型
def call_multiple_models(model_names, messages):
    results = {}

    # 使用线程池实现并发调用
    with ThreadPoolExecutor(max_workers=len(model_names)) as executor:
        future_to_model = {
            executor.submit(call_model, model_name, messages): model_name
            for model_name in model_names
        }

        for future in future_to_model:
            model_name = future_to_model[future]
            try:
                results[model_name] = future.result()
            except Exception as e:
                print(f"处理模型 {model_name} 的结果时出错: {str(e)}")
                results[model_name] = None

    return results


# 主函数
if __name__ == "__main__":
    # 定义需要调用的模型名称
    model_names = ["qwen-plus", "qwen-max", "qwen-turbo"]  # 移除了无效的 qwen-qwq

    # 定义通用消息
    messages = [
        {'role': 'system', 'content': 'You are a helpful assistant.'},
        {'role': 'user', 'content': '请做一个简短的自我介绍。'}
    ]

    # 调用模型并获取响应
    results = call_multiple_models(model_names, messages)

    # 打印结果
    print("\n--- 各模型的自我介绍 ---")
    for model_name, response in results.items():
        print(f"{model_name}: {response}")