"""
专门处理pdf文件
"""
#参考官网：https://python.langchain.com/docs/modules/data_connection/document_loaders/pdf
from PyPDF2 import PdfReader
from langchain.text_splitter import CharacterTextSplitter
from langchain_openai import OpenAIEmbeddings
from langchain.vectorstores import Chroma
from langchain.chat_models import ChatOpenAI
from langchain.memory import ConversationBufferMemory
from langchain.chains import ConversationalRetrievalChain
import os,streamlit as st
from langchain.schema import Document
from template import user_template, bot_template
from langchain.document_loaders import PyPDFLoader, TextLoader
from langchain.chains import LLMChain,StuffDocumentsChain
from langchain.prompts import PromptTemplate
from langchain.chat_models import ChatOllama
from langchain.embeddings import OllamaEmbeddings

# file_path=os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))),'file_info','test.pdf')

ONEAPI_API_BASE = "http://localhost:3000/v1"
ONEAPI_CHAT_API_KEY = "sk-4Rqi5cIas3Rg5cwh59231eB0B82c42E7AdAe9a17CcE29591"
ONEAPI_CHAT_MODEL = "qwen-plus"
ONEAPI_EMBEDDING_API_KEY = "sk-4Rqi5cIas3Rg5cwh59231eB0B82c42E7AdAe9a17CcE29591"
ONEAPI_EMBEDDING_MODEL = "text-embedding-v1"

# embedding_model = OpenAIEmbeddings(model=ONEAPI_EMBEDDING_MODEL,
#                                    base_url=ONEAPI_API_BASE,
#                                    api_key=ONEAPI_EMBEDDING_API_KEY,
#                                    deployment=ONEAPI_EMBEDDING_MODEL)
embedding_model = OllamaEmbeddings(model="nomic-embed-text")
# llm = ChatOpenAI(
#     base_url=ONEAPI_API_BASE,
#     api_key=ONEAPI_CHAT_API_KEY,
#     model=ONEAPI_CHAT_MODEL,  # 本次使用的模型
#     temperature=0,  # 发散的程度，一般为0
#     timeout=None,  # 服务请求超时
#     max_retries=2,  # 失败重试最大次数
# )
llm = ChatOllama(model="qwen2", temperature=0.1, stream=True)


def extract_text_from_pdf(pad_files):
    print("---->",pad_files)
    #加载多个pdf文件
    text = ""
    for pdf in pad_files:
        print(pdf)
        pdf_reader = PdfReader(pdf)
        for page in pdf_reader.pages:
            text += page.extract_text()
    return text  #返回所有pdf拼接后的文本

def load_file_name(file_type, file_name):
        """
        获取文本信息并进行文本切割
        Args:
            file_type (_type_): 传入文本格式，例如：pdf或者txt
            file_name (_type_): 传入文件的名称

        Returns:
            _type_: _description_
        """
        file_info = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
                                 'file_info', file_name)
        print("file_info===>",file_info)
        if file_type.lower() == "pdf":
            loader = PyPDFLoader(file_info)
        elif file_type.lower() == "docx":
            loader = TextLoader(file_info)
        # load_file = loader.load()
        load_file=loader.load_and_split()
        print(type(load_file),load_file)
        return load_file

def split_content_into_chuncks():
    load_file=load_file_name('pdf','test.pdf')
    #文本切割不同的大小
    text_spliter = CharacterTextSplitter(chunk_size=500,
                                         chunk_overlap=80,
                                         separator='\n')
    content_chunck = text_spliter.split_documents(load_file)
    return content_chunck


def save_chunck_into_vectorstore(input_info):
    content_chuncks=split_content_into_chuncks()
    # documents=[Document(page_content=chunk) for chunk in content_chunck]
    print("===>",content_chuncks)
    #对每个chunck计算embedding，并存入到向量数据库中
    vectorstore = Chroma.from_documents(content_chuncks,embedding_model)#创建向量数据库，灌入数据
    #根据相关性返回文本块
    result=vectorstore.as_retriever(search_type="similarity",search_kwargs={"k": 5})
    query_info_res=result.get_relevant_documents(input_info)
    print("query_info_res===>",query_info_res)
    return query_info_res


def chain_func(vectorstore_result,input_info):
    print("vectorstore_result--->",vectorstore_result)
    template="""
    你是一位拥有丰富知识的ai助手，可以协助用户处理相关文档问题
    --------------------
    {page_content}
    --------------------
    请依据上面的内容回答问题：
    user：{input_info}
    """
    # memory = ConversationBufferMemory(memory_key="chat_history",
    #                                   return_messages=True)
    prompt=PromptTemplate(input_variables=['page_content','input_info'],template=template)
    chain = LLMChain(
        prompt=prompt,
        llm=llm)
    workchain=StuffDocumentsChain(llm_chain=chain,document_variable_name="page_content")
    # 提供上下文：input_documents 包含与用户输入相关的文档，这些文档将作为上下文提供给语言模型（LLM）。通过提供相关的文档，模型可以更好地理解用户的问题，并生成更准确的回答
    response=workchain.invoke({"input_documents":vectorstore_result,"input_info":input_info})#vectorstore_result是文档对象的列表
    print(response["output_text"])
    # return chain


# def process_user_input(user_input):
#     #调用函数st。session——state。conversation并把用户输入的内容作为一个问题传入，返回响应
#     response = st.session_state.conversation({"question": user_input})
#     #session状态是streamlit中的一个特性，允许在用户的多个请求之间保存数据
#     st.session_state.chat_history = response['chat_history']
#     #显示聊天
#     for i, message in enumerate(st.session_state.chat_history):
#         #用户输入
#         if i % 2 == 0:
#             st.write(user_template.replace("{{MSG}}", message.content),
#                      unsafe_allow_html=True)
#         else:
#             st.write(bot_template.replace("{{MSG}}", message.content),
#                      unsafe_allow_html=True)


if __name__ == '__main__':
#     #初始化
#     if 'conversation' not in st.session_state:
#         st.session_state.conversation = None
#     if 'chat_history' not in st.session_state:
#         st.session_state.chat_history = None
    # file_path=st.file_uploader("上传pdf文件",accept_multiple_files=True)
    # text=extract_text_from_pdf(file_path)
    # content_chuncks=split_content_into_chuncks(text)
    # print(type(content_chuncks),content_chuncks)
#     if len(content_chuncks)!=0:
#         vectorstore=save_chunck_into_vectorstore(content_chuncks)
#         #创建对话chain
#         st.session_state.conversation=chain_func(vectorstore)
    
    # user_input = st.text_input("请输入你的问题")
    # if user_input:
    #     process_user_input(user_input)
    # load_file=load_file_name('pdf','test.pdf')
    # content_chuncks=split_content_into_chuncks(load_file)
    # print(type(content_chuncks),content_chuncks)
    while True:
        input_info1=input("请输入你的问题：")#单行输入框交互及校验逻辑适用于以下几种场景
        if input_info1.lower()=="exit":
            break
        vectorstore=save_chunck_into_vectorstore(input_info1)
        input_info2=input("请输入你的问题：")
        chain_func(vectorstore,input_info2)
