from langchain_community.document_loaders import PyPDFLoader
from langchain_community.llms import Tongyi
from langchain_community.embeddings import DashScopeEmbeddings
from langchain.text_splitter import CharacterTextSplitter
from langchain_community.vectorstores import Chroma
from langchain.indexes.vectorstore import VectorStoreIndexWrapper
from langchain.chains import RetrievalQA
import os
os.environ["DASHSCOPE_API_KEY"]= "sk-9d8f1914800e497f8717144e860f99bc"

llm = Tongyi()
file_path = "./lyk.pdf"
local_persist_path = "./vector_store"

# 构建持久化索引名称
def get_index_path(index_name):
    return os.path.join(local_persist_path,index_name)

# 文本加载-向量化-持久化存储-公开索引
def load_pdf_and_save_to_index(file_path, index_name):
    # 1、加载文档
    loader = PyPDFLoader(file_path)
    documents = loader.load()
    # 2. 文档分块
    text_splitter = CharacterTextSplitter(
        # 设置一个非常小的文字块大小
        chunk_size=100,
        chunk_overlap=20
    )
    texts = text_splitter.split_documents(documents)
    # 3.词向量嵌入
    embeddings = DashScopeEmbeddings(
        model="text-embedding-v1"
    )
    # 4. 创建索引的向量存储并持久化
    db = Chroma().from_documents(texts,embeddings,persist_directory=get_index_path(index_name))
    db.persist()
    # 5. 在检索器中公开索引
    # retriever = db.as_retriever()
    # qa = RetrievalQA.from_chain_type(llm=llm, chain_type="stuff", retriever=retriever)
    # query = "入职资料单包含那些信息"
    # ans = qa.run(query)
    # print(ans)

# 从磁盘加载向量索引至内存
def load_index(index_name):
    index_path = get_index_path(index_name)
    embeddings = DashScopeEmbeddings(
        model="text-embedding-v1"
    )
    # Now we can load the persisted database from disk, and use it as normal.
    vectordb = Chroma(
        persist_directory=index_path,
        embedding_function=embeddings
    )
    return VectorStoreIndexWrapper(vectorstore=vectordb)


def query_index_lc(index,query):
    ans =index.query_with_sources(question=query,llm=llm,chain_type="stuff")
    return ans['answer']

# ans =query_index_lc("simon_index001","郎亚坤的正式薪资信息")
# print(ans)




