import os, openai, gradio as gr
from langchain_openai import OpenAI
from langchain_openai import ChatOpenAI
from langchain.chains import ConversationChain
from langchain.memory import ConversationSummaryBufferMemory

# 解决请求超时问题
os.environ["http_proxy"] = "http://localhost:7890"
os.environ["https_proxy"] = "http://localhost:7890"

os.environ["OPENAI_API_KEY"] = "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"
openai.api_key = os.environ["OPENAI_API_KEY"]

memory = ConversationSummaryBufferMemory(
    llm=ChatOpenAI(
        # openai_api_key="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", # 从OpenAI官方申请秘钥[可以是用户秘钥，也可以是项目秘钥]
        # model_name="gpt-3.5-turbo" # 默认是gpt-3.5-turbo
    ),
    max_token_limit=2048
)

conversation = ConversationChain(
    llm=OpenAI(
        # api_key="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",
        max_tokens=2048,
        temperature=0.5),
    memory=memory,
)

"""基于记忆体实现对话的历史上下文管理"""
def chat(input, history=[]):
    history.append(input)
    response = conversation.predict(input=input)
    history.append(response)
    # history[::2] 切片语法，每隔两个元素提取一个元素，即提取出所有的输入，
    # history[1::2]表示从历史记录中每隔2个元素提取一个元素，即提取出所有的输出
    # zip函数把两个列表元素打包为元组的列表的方式
    responses = [(u, b) for u, b in zip(history[::2], history[1::2])]
    print("用户输入：", history[::2])
    print("AI回答：", history[1::2])
    print("上下文：", responses)
    return responses, history


"""可视化界面中实现AI对话"""
with gr.Blocks(css="#chatbot{height:800px} .overflow-y-auto{height:800px}") as demo:
    chatbot = gr.Chatbot(elem_id="chatbot")
    state = gr.State([])

    with gr.Row():
        txt = gr.Textbox(show_label=False, placeholder="请输入你的问题.")

    txt.submit(chat, [txt, state], [chatbot, state])

# 启动项目
demo.launch(share=True)
