# import nltk
# nltk.download('punkt')

from langchain.document_loaders.pdf import PyMuPDFLoader
from langchain.document_loaders.text import TextLoader
from langchain.document_loaders.markdown import UnstructuredMarkdownLoader
# from langchain.document_loaders.markdown import UnstructuredMarkdownLoader
import os
# 获取pdf文件内容
def get_pdf_text(folder_path):
    # 获取folder_path下所有⽂件路径，储存在file_paths⾥
    file_paths = []
    for root, dirs, files in os.walk(folder_path):
        for file in files:
            file_path = os.path.join(root, file)
            file_paths.append(file_path)
    print(file_paths[:3])
    # 遍历⽂件路径并把实例化的loader存放在loaders⾥
    loaders = []
    for file_path in file_paths:
        file_type = file_path.split('.')[-1]
        if file_type == 'pdf':
            loaders.append(PyMuPDFLoader(file_path))
        elif file_type == 'md':
            loaders.append(UnstructuredMarkdownLoader(file_path))
        elif file_type == 'txt':
            loaders.append(TextLoader(file_path,encoding='utf-8'))

    texts = []
    for loader in loaders: texts.extend(loader.load())

    return texts


from langchain.text_splitter import RecursiveCharacterTextSplitter
 

# 拆分文本
def get_text_chunks(text):
    text_splitter = RecursiveCharacterTextSplitter(
        chunk_size=80,
        # chunk_size=768,
        chunk_overlap=30,
        # length_function=len
    )
    chunks = text_splitter.split_documents(text)
    return chunks


from zhipuai_embedding import ZhipuAIEmbeddings
embedding = ZhipuAIEmbeddings()


from langchain.vectorstores import Chroma

def save_vector_store_bd(textChunks,persist_directory="./Chromadb"):
    db = Chroma.from_documents(
        documents=textChunks[:100],
        embedding=embedding,
        persist_directory=persist_directory
    )
    db.persist()

def load_vector_store_bd(persist_directory="./Chromadb"):
    return Chroma(persist_directory=persist_directory, embedding_function=embedding)

def get_info_from_vector_store_bd(vectorstore, query, top_k=1):
    cont =  vectorstore.max_marginal_relevance_search(query, k = top_k)
    return [sim_doc.page_content for sim_doc in cont]



# if __name__ == '__main__':
#     # print(get_info_from_vector_store_bd(load_vector_store_bd(), "10%的利率，50年单利和复利的利息差异是多少", top_k=3))
#     import sys
#     # 命令行第二个参数为文件夹路径
#     folder_path = sys.argv[1]
#     if sys.argv[0] == 'python':
#         folder_path = sys.argv[2]
#     # 获取pdf文件内容
#     texts = get_pdf_text(folder_path)
#     print("text: ", texts)
#     # 拆分文本
#     chunks = get_text_chunks(texts)
#     print("chunks: ", chunks)
#     # 保存向量库
#     save_vector_store_bd(chunks, persist_directory="./Chromadb1")


 