from typing import List

from langchain_core.documents import Document
import glob, time
from langchain_community.document_loaders import TextLoader, Docx2txtLoader
from langchain_community.vectorstores import FAISS
from langchain_text_splitters import CharacterTextSplitter
from library.chains.recall_chain import RetrievalQAChain
from library.common.constants import *
from library.modelio.zhipu_glm import chat_model, embedding_model
from langchain.document_loaders import PyPDFium2Loader

DATA_PATH = './library/data/*'
VECTOR_STORE_PATH = './library/vector/'
# 创建 Text Splitter
text_splitter = CharacterTextSplitter.from_tiktoken_encoder(chunk_size=512, chunk_overlap=20, separator='\n\n')

def load_doc(f) -> List[Document]:
    docs = []
    print("loading doc {}".format(f))
    if str(f).lower().endswith('.pdf'):
        pdf_docs = PyPDFium2Loader(f).load()
        docs += pdf_docs
    elif str(f).lower().endswith('.txt'):
        docs += TextLoader(f, encoding='UTF-8').load()
    elif str(f).lower().endswith('.docx'):
        docs += Docx2txtLoader(f).load()
    else:
        print("not load {}".format(f))
        pass
    return docs


def reload_db():
    # 清空目录
    if os.path.exists(VECTOR_STORE_PATH):
        os.rmdir(VECTOR_STORE_PATH)

    # 创建 Data Loader
    docs = [load_doc(f) for f in glob.glob(DATA_PATH)]
    docs = [i for f in docs for i in f]
    # 切分 Document
    documents = text_splitter.split_documents(docs)
    db = FAISS.from_documents(documents, embedding_model)
    if documents:
        db.save_local(VECTOR_STORE_PATH)
    return db


def get_db() -> FAISS:
    print(f"get_db: {time.time()}")
    # 判断是否已经有本地生成的 Embedding Vector
    if os.path.exists(VECTOR_STORE_PATH):
        db = FAISS.load_local(VECTOR_STORE_PATH, embedding_model, allow_dangerous_deserialization=True)
    else:
        db = reload_db()
    return db


def get_qa_chain():
    return RetrievalQAChain.from_llm(
        # return RetrievalQA.from_llm(
        llm=chat_model,
        retriever=get_db().as_retriever(
            search_type='similarity',
            search_kwargs={"k": 5}
        ),
        return_source_documents=True
    )


if __name__ == '__main__':
    reload_db()
