import streamlit as st
from langchain_community.document_loaders import PyPDFLoader, TextLoader, UnstructuredWordDocumentLoader
import tempfile
from langchain_text_splitters import RecursiveCharacterTextSplitter
from langchain_ollama import OllamaEmbeddings
from langchain_chroma import Chroma
from langchain_core.prompts import PromptTemplate
from langchain_deepseek import ChatDeepSeek
from langchain_core.output_parsers import StrOutputParser
import os

#　保存llm
if 'llm' not in st.session_state:
    st.session_state['llm'] = ChatDeepSeek(
        model="deepseek-chat",
        api_key=os.getenv("DEEPSEEK_API_KEY"),
    )

if 'messages' not in st.session_state:
    # 保存历史信息
    st.session_state['messages'] = [
        ("ai", "我是Leoh RAG助手，上传文件后可与我对话~")
    ]

with st.sidebar:
    files = st.file_uploader(
        label='Upload file',
        type=['pdf', 'txt', 'doc', 'docx'],
        accept_multiple_files=True,
    )
    if st.button('Upload', use_container_width=True):
        if not files:
            st.info('No file selected.')
        with st.spinner("Uploading..."):
            all_docs = []
            # 1. 加载文件
            for file in files:
                # 先将文件写入到本地的临时文件中
                with tempfile.NamedTemporaryFile(delete=False, suffix=file.name) as temp_file:
                    temp_file.write(file.getvalue())
                    if file.type == 'application/pdf':
                        loader = PyPDFLoader(temp_file.name)
                    elif file.type == 'text/plain':
                        loader = TextLoader(temp_file.name)
                    else:
                        loader = UnstructuredWordDocumentLoader(temp_file.name)
                    # 把当前文件的Documents对象存到all_docs中
                    docs = loader.load()
                    all_docs.extend(docs)

            # 2. 切分Document对象
            splitter = RecursiveCharacterTextSplitter(
                chunk_size=900,
                chunk_overlap=90,
                separators=['\n\n', '\n', "(?<=[。！？])", "(?<=[，；、])", " ", ""]
            )
            chunks = splitter.split_documents(all_docs)

            # 3. 存入向量数据库中
            embedding = OllamaEmbeddings(model="shaw/dmeta-embedding-zh:latest")
            db = Chroma.from_documents(documents=chunks, embedding=embedding)

            # 4. 向量数据库db 存入 st.session_state上下文中
            st.session_state['db'] = db


st.title('Leoh RAG应用')
# 展示历史信息
for message in st.session_state['messages']:
    st.chat_message(message[0]).write(message[1])

question = st.chat_input("请输入内容...")
if question and 'db' not in st.session_state:
    st.info('请先上传提示文件！')
    st.stop()
elif question and 'db' in st.session_state:
    # 把用户的消息展示在页面上
    st.chat_message("user").write(question)
    # 保存到历史信息
    st.session_state['messages'].append(("user", question))
    # 检索数据
    db = st.session_state['db']
    # 获取到检索器对象，最多检索出9条最相关的Documents
    retriever = db.as_retriever(search_kwargs={"k":9})
    docs = retriever.invoke(question)
    # 拼接上下文信息
    context = "\n\n".join([doc.page_content for doc in docs])
    # 提示词模板
    prompt_template = PromptTemplate.from_template(
        """使用以下上下文信息回答问题。如果你不知道答案，就说你不知道，不要编造答案。
        
        上下文：
        {context}
        
        问题：
        {question}
            
        请用中文给出有帮助的回答："""
    )
    # 构建链
    llm = st.session_state['llm']
    chain = prompt_template | llm | StrOutputParser()
    # 调用链
    stream = chain.stream({"context": context, "question": question})
    # 输出屏幕
    ai_message = st.chat_message("ai").write_stream(stream)
    # 保存到历史信息
    st.session_state['messages'].append(("ai", ai_message))