from langchain.document_loaders import PyPDFLoader
from langchain.indexes.vectorstore import VectorstoreIndexCreator
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores.chroma import Chroma
from langchain.indexes.vectorstore import VectorStoreIndexWrapper
from langchain.embeddings import HuggingFaceBgeEmbeddings
from ChatglmCppAI import ChatglmCppAI

from dotenv import load_dotenv
import os

load_dotenv()

llm = ChatglmCppAI()

file_path = "./1.pdf"
local_persist_path = "./vector_store"
query_instruction = "为这个句子生成表示以用于检索相关文章："

def get_embeddings():
    return HuggingFaceBgeEmbeddings(model_name='../../models/bge-large-zh/',
                                    model_kwargs={'device': "cuda"},
                                    query_instruction=query_instruction)

def get_index_path(index_name):
    return os.path.join(local_persist_path, index_name)

def load_pdf_and_save_to_index(file_path, index_name):

    loader = PyPDFLoader(file_path)

    # 初始化向量数据库索引
    index = VectorstoreIndexCreator(vectorstore_kwargs={"persist_directory": get_index_path(index_name)},embedding=get_embeddings()).from_loaders(loaders=[loader])

    index.vectorstore.persist()

def load_index(index_name):
    index_path = get_index_path(index_name)
    vectordb = Chroma(persist_directory=index_path, embedding_function=get_embeddings())
    return VectorStoreIndexWrapper(vectorstore=vectordb)

# 把检索到的文本，一个个推送给LLM，最后汇总得到总结结果
def query_index(index, query):
    ans = index.query_with_sources(query, chain_type="map_reduce", llm= llm)
    return ans['answer']

query_index(load_index('test1'),'langchain')



# load_pdf_and_save_to_index('./langchain.pdf','test1')