import json
from langchain_core.runnables import RunnableConfig
from langchain_community.chat_models import ErnieBotChat
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnableLambda
from langchain_core.output_parsers import StrOutputParser
from langchain_openai import ChatOpenAI
from langchain_community.callbacks import get_openai_callback
from dotenv import load_dotenv, find_dotenv

_ = load_dotenv(find_dotenv())  # 读取本地 .env 文件，里面定义了 OPENAI_API_KEY

# 模型
# llm = ErnieBotChat(model_name='ERNIE-Bot-4')
# llm = ChatOpenAI(temperature=0, model="gpt-4")
llm = ChatOpenAI(temperature=0, model="gpt-3.5-turbo")


# 用于解析或修复文本的函数(解析过程中出现异常，会尝试三次)
def parse_or_fix(text: str, config: RunnableConfig):
    """
    使用RunnableConfig将回调、标记和其他配置信息传递到管道中运行。
    """
    print("parse_or_fix...")
    fixing_chain = (
            ChatPromptTemplate.from_template(
                "Fix the following text:\n\n```text\n{input}\n```\nError: {error}"
                " Don't narrate, just respond with the fixed data."
            )
            | llm
            | StrOutputParser()
    )
    for _ in range(3):
        try:
            print("loads...")
            return json.loads(text)
        except Exception as e:
            print("Error...", e)
            text = fixing_chain.invoke({"input": text, "error": e}, config)
    return "Failed to parse"


with get_openai_callback() as cb:
    print("Running...")
    output = RunnableLambda(parse_or_fix).invoke(
        "{foo: bar}", {"tags": ["my-tag"], "callbacks": [cb]}
    )
    print("Output...")
    print(output)
    print(cb)
