from langchain_openai import ChatOpenAI
from langchain.schema import AIMessage, HumanMessage
import gradio as gr
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler

api_key = '503687a4a1b3de564e8a65c9da87b3da.VOdqJVr0l2WX69Ie'

llm = ChatOpenAI(temperature=0.95, model_name='glm-4', openai_api_key=api_key,
                 openai_api_base='https://open.bigmodel.cn/api/paas/v4/', streaming=True,
                 callbacks=[StreamingStdOutCallbackHandler()])


def predict(message, history):
    history_langchain_format = []
    for human, ai in history:
        history_langchain_format.append(HumanMessage(content=human))
        history_langchain_format.append(AIMessage(content=ai))

    history_langchain_format.append(HumanMessage(content=message))
    glm_response = llm(history_langchain_format)
    return glm_response.content


gr.ChatInterface(predict).queue().launch()
