import os
from mistralai import Mistral
from zhipuai import ZhipuAI
from openai import OpenAI
from Shell.utils.config_manager import get_model_config, get_supported_models


def initialize_client(model_name):
    model_config = get_model_config(model_name)
    api_key = model_config.get("api_key")
    model_url = model_config.get("model_url")
    model_type = ""

    # 检查 model.json 中是否包含该模型
    supported_models = get_supported_models()
    # print(f"支持的模型: {supported_models}")
    # print(f"当前模型名称: {model_name}")
    # 使用 for 循环检查模型是否支持
    supported = False
    for m_type, models in supported_models.items():
        # print(f"正在检查模型: {m_type}")
        if model_name in models:
            model_type = m_type
            supported = True
            break

    if supported == False:
        raise ValueError(f"不支持的模型llll: {model_name}")

    if model_type.lower() == "mistral":
        return Mistral(api_key=api_key)
    elif model_type.lower() == "glm":
        return ZhipuAI(api_key=api_key)
    elif model_type.lower() == "openai":
        return OpenAI(api_key=api_key, base_url=model_url)
    else:
        raise ValueError(f"不支持的模型222: {model_name}")


def create_completion(client, messages, model_name):
    model_type = ""
    # 检查 model.json 中是否包含该模型
    supported_models = get_supported_models()
    for m_type, models in supported_models.items():
        # print(f"正在检查模型: {m_type}")
        if model_name in models:
            model_type = m_type
            break

    if model_type.lower() == "mistral":
        return client.chat.complete(model=model_name, messages=messages)
    elif model_type.lower() == "glm":
        return client.chat.completions.create(model=model_name, messages=messages)
    elif model_type.lower() == "openai":
        return client.chat.completions.create(model=model_name, messages=messages)
    else:
        raise ValueError(f"不支持的模型: {model_name}")
