import gradio as gr
from langchain.chains.combine_documents import create_stuff_documents_chain
from langchain_community.llms import Ollama
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.output_parsers import StrOutputParser
from langchain_community.document_loaders import TextLoader
import warnings
warnings.filterwarnings("ignore")

# 定义处理文件上传的函数，保存文件内容
def upload_file(file):
    if file is not None:
        print(file)
        loader = TextLoader(file, encoding="utf-8")
        docs = loader.load()
        return docs, "文件上传成功！您可以开始提问。"
    else:
        return None, "请上传文件！"


# 定义处理问题的函数，基于已上传的文件内容回答问题
def answer_question(docs, question, chat_history):
    if docs is None:
        return chat_history + [[question, "请先上传文件！"]]
    llm = Ollama(model="qwen2.5")
    prompt = ChatPromptTemplate.from_template("""仅根据提供的上下文回答以下问题:

        <context>
        {context}
        </context>

        Question: {input}""")

    document_chain = prompt | llm

    # 根据文件内容回答问题的逻辑
    result = document_chain.invoke({
        "input": question,
        "context": docs
    })

    result = "RAG答案：" + result + "\n\n" + "-"*80 + "\n" + "原本答案：" + llm.invoke(question)

    # 将问题和答案追加到聊天记录中
    chat_history.append([question, result])

    return chat_history, ""


# 定义Gradio界面
with gr.Blocks() as demo:
    # 定义全局状态，用于存储文件内容
    file_content_state = gr.State(value=None)

    # 聊天记录组件
    chat_history = gr.Chatbot(label="AI 对话")

    # 文件上传组件
    file_input = gr.File(label="上传文件", type="filepath")

    # 提示用户上传成功的信息
    upload_info = gr.Textbox(label="文件状态", interactive=False)

    # 用户输入问题组件
    question_input = gr.Textbox(label="输入问题", placeholder="请在此输入您的问题...")

    # 文件上传按钮
    upload_button = gr.Button("上传文件")

    # 提交问题按钮
    submit_button = gr.Button("提交问题")

    # 上传文件后的处理：保存文件内容
    upload_button.click(fn=upload_file,
                        inputs=file_input,
                        outputs=[file_content_state, upload_info])

    # 处理问题：基于保存的文件内容回答问题，并显示为对话形式
    submit_button.click(fn=answer_question,
                        inputs=[file_content_state, question_input, chat_history],
                        outputs=[chat_history, question_input])

# 启动Gradio应用
demo.launch()
