import json
import os

import qianfan
from langchain_community.chat_models import ChatSparkLLM
from langchain_core.output_parsers import StrOutputParser
from langchain_openai import ChatOpenAI

with open('./model-url.json', 'r') as file:
    llm_detail = json.load(file)

os.environ["QIANFAN_AK"] = llm_detail['model_qf']['ak']
os.environ["QIANFAN_SK"] = llm_detail['model_qf']['sk']


def find_topmost_key(nested_dict, target_key):
    for key, value in nested_dict.items():
        if isinstance(value, dict):
            result = find_topmost_key(value, target_key)
            if result:
                return key
        elif key == target_key:
            return key
    return None


def version_choose(chosen_model='moonshot-v1-auto', temp=0.1, question='你好'):
    model = find_topmost_key(llm_detail, chosen_model)
    print(model)
    if model == 'model_qf':
        question = question
        temp = temp
        chat_comp = qianfan.ChatCompletion()

        # 指定特定模型
        resp = chat_comp.do(model="ERNIE-3.5-8K", messages=[{
            "role": "user",
            "content": question,
            "temperature": temp
        }])
        return resp["body"]['result']
    elif model == 'model_xf':
        llm = ChatSparkLLM(
            spark_api_url=llm_detail[model]['model_name'][chosen_model],
            spark_app_id=llm_detail[model]["appid"],
            spark_api_key=llm_detail[model]["ak"],
            spark_api_secret=llm_detail[model]["sk"],
            spark_llm_domain=chosen_model,
            temperature=temp,
            streaming=False,
            request_timeout=180
        )
        chain = (
                llm | StrOutputParser()
        )
        return chain.invoke(question)
    else:
        print(f'model:{chosen_model},openai_api_key:{llm_detail[model]["openai_api_key"]}')
        llm = ChatOpenAI(
            model=chosen_model,
            temperature=temp,
            base_url=llm_detail[model]['model_name'][chosen_model],
            openai_api_key=llm_detail[model]["openai_api_key"],
            max_tokens=5000
        )
        chain = (
                llm | StrOutputParser()
        )
        return chain.invoke(question)

# print(version_choose("moonshot-v1-128k", 0.1, '你好'))
