import os

import streamlit as st
from langchain.text_splitter import RecursiveCharacterTextSplitter
from pypdf import PdfReader
from langchain.embeddings import HuggingFaceEmbeddings
from  langchain.vectorstores import FAISS

with st.sidebar:
    st.title("Ask for pdf")
    st.markdown("""
    该项目提供pdf文件上传功能
    用户可以输入问题，在pdf文件中搜索""")
    st.write("学习和参考")


def main():
    st.header("main page")
    pdf =st.file_uploader("请上传pdf文件")
    if pdf is not None:
        st.write(pdf.name)
        pdfReader=PdfReader(pdf)
        txt=""
        for page in pdfReader.pages:
            txt+=page.extract_text()
            st.write(txt)
        txt_splitter=RecursiveCharacterTextSplitter(
            chunk_size=100,
            chunk_overlap=20,
            length_function=len
        )
        chunks=txt_splitter.split_text(text=txt)
        st.write(chunks)

        os.environ['HF_ENDPOINT']="https://hf-mirror.com"
      #pip install sentence-transformers
        # embeddings = HuggingFaceEmbeddings(
        #     model_name="shibing624/text2vec-base-chinese ",
        #
        # )
        from langchain.embeddings import HuggingFaceEmbeddings

        model_name = "sentence-transformers/all-mpnet-base-v2"
        model_kwargs = {'device': 'cpu'}
        encode_kwargs = {'normalize_embeddings': False}
        embeddings = HuggingFaceEmbeddings(
            model_name=model_name,
            model_kwargs=model_kwargs,
            encode_kwargs=encode_kwargs
        )


        vectorStore=FAISS.from_texts(chunks,embedding=embeddings)



if __name__ == '__main__':
    main()
