import json
from langchain_community.chat_models import ChatSparkLLM  # 用于星火的聊天模型
from langchain_openai import ChatOpenAI  # 用于OpenAI的聊天模型

with open('./model-url.json', 'r') as file:
    llm_detail = json.load(file)


def find_topmost_key(nested_dict, target_key):
    for key, value in nested_dict.items():
        if isinstance(value, dict):
            # 递归搜索子字典
            result = find_topmost_key(value, target_key)
            if result:
                return key
        elif key == target_key:
            # 找到目标键，返回当前键
            return key
    return None


def version_choose(chosen_model='deepseek-chat', temp=0.1):
    model = find_topmost_key(llm_detail, chosen_model)
    if model == 'model_xf':
        llm = ChatSparkLLM(
            spark_api_url=llm_detail[model]['model_name'][chosen_model],
            spark_app_id=llm_detail[model]["appid"],
            spark_api_key=llm_detail[model]["ak"],
            spark_api_secret=llm_detail[model]["sk"],
            spark_llm_domain=chosen_model,
            temperature=temp,
            streaming=False,
            request_timeout=180
        )
        # chain = (
        #         llm | StrOutputParser()
        # )
        # return chain.invoke(question)
        return llm
    else:
        llm = ChatOpenAI(
            model=chosen_model,
            temperature=temp,
            base_url=llm_detail[model]['model_name'][chosen_model],
            openai_api_key=llm_detail[model]["openai_api_key"],
            max_tokens=5000
        )
        # chain = (
        #         llm | StrOutputParser()
        # )
        # return chain.invoke(question)
        return llm

# print(version_choose("moonshot-v1-128k", 0.1, '你好'))
