"""
使用Ollama + RAG模型进行问答。
"""

import gradio as gr
from langchain.text_splitter import CharacterTextSplitter
from langchain_community import embeddings
from langchain_community.chat_models import ChatOllama
from langchain_community.document_loaders import (WebBaseLoader, PyPDFLoader, TextLoader, Docx2txtLoader,
                                                  UnstructuredMarkdownLoader)
from langchain_community.vectorstores import Chroma
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnablePassthrough


def process_input(urls, question):
    """
    处理输入的URL和问题，使用ChatOllama模型进行问答。

    参数:
    - urls: 以新行分隔的URL字符串，用于加载文档。
    - question: 用户提出的问题字符串。

    返回:
    - 问题的答案字符串。
    """
    # 初始化ChatOllama模型
    model_local = ChatOllama(model="qwen:0.5b")

    # 将URL字符串转换为列表，并加载文档
    urls_list = urls.split("\n")
    docs = []
    for url in urls_list:
        file_extension = url.split('.')[-1]
        print(f"文件地址:{url}, 扩展名:{file_extension}")
        if file_extension == "pdf":
            docs.append(PyPDFLoader(url).load())
        elif file_extension == "txt":
            docs.append(TextLoader(url).load())
        elif file_extension == "doc" or file_extension == "docx":
            docs.append(Docx2txtLoader(url).load())
        elif file_extension == "md":
            docs.append(UnstructuredMarkdownLoader(url).load())
        else:
            docs.append(WebBaseLoader(url).load())

    docs_list = [item for sublist in docs for item in sublist]

    # 使用CharacterTextSplitter分割文档
    text_splitter = CharacterTextSplitter.from_tiktoken_encoder(chunk_size=7500, chunk_overlap=100)
    doc_splits = text_splitter.split_documents(docs_list)

    # 对分割后的文档进行嵌入，并存储到vectorstore中
    vectorstore = Chroma.from_documents(
        documents=doc_splits,
        collection_name="rag-chroma",
        embedding=embeddings.ollama.OllamaEmbeddings(model='nomic-embed-text'),
    )
    retriever = vectorstore.as_retriever()

    # 创建问题模板，并通过模型链进行处理以得到答案
    after_rag_template = """根据以下上下文回答问题:
    {context}
    问题: {question}
    """
    after_rag_prompt = ChatPromptTemplate.from_template(after_rag_template)
    after_rag_chain = (
            {"context": retriever, "question": RunnablePassthrough()}
            | after_rag_prompt
            | model_local
            | StrOutputParser()
    )
    return after_rag_chain.invoke(question)


if __name__ == "__main__":
    # https://www.gradio.app/docs/interface
    # 定义Gradio界面
    iface = gr.Interface(fn=process_input,
                         inputs=[gr.Textbox(label="输入以新行分隔的url(本地pdf, txt, doc, docx, md和在线网页)", lines=5),
                                 gr.Textbox(label="问题:", lines=3)],
                         outputs=[gr.Textbox(label="答案:")],
                         title="使用Ollama查询文档",
                         description="输入url和查询文档的问题。",
                         clear_btn=gr.Button("清除"),
                         submit_btn=gr.Button("查询"))
    iface.launch(share=True, auth=("admin", "admin"))
