"""
通过读入pdf或者word的文章，实现RAG，使用gradio来实现UI。
"""

from langchain_community.document_loaders import PyPDFLoader, Docx2txtLoader, TextLoader
from langchain_text_splitters import RecursiveCharacterTextSplitter
from langchain_community.vectorstores.faiss import FAISS
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate, PromptTemplate, format_document
from langchain_core.runnables import RunnablePassthrough
from langchain_ollama import ChatOllama, OllamaEmbeddings
import gradio as gr

base_ip = "10.12.8.21:11434"
model_name = "qwen2.5:14b"
num_ctx = 10000
llm = ChatOllama(base_url=base_ip, model=model_name, num_ctx=num_ctx)
embedding_model = OllamaEmbeddings(base_url=base_ip, model=model_name)

# 准备Model I/O 三元组
template = """仅根据以下上下文回答问题:{context},
            问题:{question}
                """
# template = """Answer the question based only on the following context:{context}
#         Question:{question}
#         """
prompt = ChatPromptTemplate.from_template(template)
# 构建Document转文本段落的工具函数
DEFAULT_DOCUMENT_PROMPT = PromptTemplate.from_template(template="{page_content}")


def get_loader(file_obj):
    file_name = file_obj.name
    if file_name.endswith(".pdf"):
        loader = PyPDFLoader(file_obj)
    elif file_name.endswith(".txt"):
        loader = TextLoader(file_obj)
    elif file_name.endswith(".docx"):
        loader = Docx2txtLoader(file_obj)
    else:
        raise Exception("目前只支持pdf文件与txt、docx文件")
    return loader


def get_words_count(pages):
    total_word_count = 0
    for page in pages:
        text = page.page_content
        words = text.split()
        total_word_count += len(words)
    return total_word_count


def create_vs(embedding_model, pages, chunk_size=100, chunk_overlap=10):
    # 把文本分割
    text_splitter = RecursiveCharacterTextSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap)
    chunks = text_splitter.split_documents(pages)
    vs = FAISS.from_documents(chunks, embedding_model)
    # print(vs.similarity_search("What is ReAct"))
    retriever = vs.as_retriever()
    return retriever


def get_text_page(pages):
    return [page.page_content for page in pages]


def _combine_documents(docs, document_prompt=DEFAULT_DOCUMENT_PROMPT, document_separator="\n\n"):
    doc_string = [format_document(doc, document_prompt) for doc in docs]
    return document_separator.join(doc_string)


def get_chain(file_obj, embedding_model, llm):
    loader = get_loader(file_obj)
    pages = loader.load()
    if get_words_count(pages) > 8000:
        retriever = create_vs(embedding_model, pages)
        chain = (
                {
                    "context": retriever | _combine_documents,
                    "question": RunnablePassthrough()
                }
                | prompt
                | llm

        )
    else:
        chain = (
            {
                "context":  _combine_documents(get_text_page(pages)),
                "question": RunnablePassthrough()
            }
            | prompt
            | llm

        )

    return chain


def predict(message, history):
    history_openai_format = []
    for human, assistant in history:
        history_openai_format.append({"role": "user", "content": human})
        history_openai_format.append({"role": "assistant", "content": assistant})
    history_openai_format.append({"role": "user", "content": message})

    # response = client.chat.completions.create(model='gpt-3.5-turbo',
    #                                           messages=history_openai_format,
    #                                           temperature=1.0,
    #                                           stream=True)
    response = llm.stream(history_openai_format)

    partial_message = ""
    for chunk in response:
        # if chunk.choices[0].delta.content is not None:
        if chunk.content is not None:
            partial_message = partial_message + chunk.content
            yield partial_message


gr.ChatInterface(predict).launch()

