import os

# os.environ['OPENAI_API_KEY'] = "sk"
# os.environ['OPENAI_API_BASE'] = "https://"
# os.environ['HUGGINGFACEHUB_API_TOKEN'] = ''
from dotenv import load_dotenv
load_dotenv()

import streamlit as st
from PyPDF2 import PdfReader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores import Chroma
from langchain.chains import RetrievalQA
from langchain.llms import OpenAI


# 设置Streamlit应用标题
st.title("PDF问答应用")

# 上传PDF文件
uploaded_file = st.file_uploader("选择一个PDF文件", type="pdf")

if uploaded_file is not None:
    # 读取PDF文件内容
    pdf_reader = PdfReader(uploaded_file)
    pdf_text = ""
    for page in pdf_reader.pages:
        pdf_text += page.extract_text() or ""

    # 检查提取的文本
    if not pdf_text:
        st.error("PDF 文件中未找到任何文本。")
    else:
        st.success("成功提取文本。")

        # 显示PDF内容的前几百个字符
        st.write(pdf_text[:500] + "...")

        # 文本分割
        text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=100)
        texts = text_splitter.split_text(pdf_text)

        # 确保文本分割结果非空
        if not texts:
            st.error("分割后的文本为空，请检查 PDF 文件的内容。")
        else:
            st.success(f"成功分割文本，分割出的文本块数量: {len(texts)}")

            # 创建嵌入
            embeddings = OpenAIEmbeddings( )

            # 使用 Chroma 创建向量存储
            vectorstore = Chroma.from_texts(texts, embeddings, persist_directory="./chroma_db")  # 设置持久化目录

            # 创建问答链
            qa_chain = RetrievalQA.from_chain_type(
                llm=OpenAI( ),
                chain_type="stuff",
                retriever=vectorstore.as_retriever()
            )

            # 用户输入问题
            user_question = st.text_input("请输入您的问题：")

            if user_question:
                # 获取答案
                answer = qa_chain.run(user_question)
                st.write("回答：", answer)

