import os

from langchain_community.document_loaders import CSVLoader
from langchain_community.embeddings import DashScopeEmbeddings
from langchain_community.vectorstores import Chroma, FAISS
from langchain_text_splitters import RecursiveCharacterTextSplitter


def create_index(key, files):
    os.environ["DASHSCOPE_API_KEY"] = key

    # 加载
    i = 1
    for file in files:
        i += 1

        loader = CSVLoader(file, encoding='utf-8-sig')

        document = loader.load()

        # 分割
        text_split = RecursiveCharacterTextSplitter(
            chunk_size=600,
            chunk_overlap=100,
            separators=["\n\n", "\n", "。", "，", " ", ""]
        )
        documents = text_split.split_documents(document)

        # 嵌入
        embedding_model = DashScopeEmbeddings(
            model="text-embedding-v1"
        )

        db = FAISS.from_documents(documents, embedding_model)
        query = "评分高的喜剧片"
        docs = db.similarity_search(query)
        print(docs[0].page_content)

        print("####")

        db.save_local("/content/faiss_index")
        new_db = FAISS.load_local("faiss_index", embedding_model)
        new_docs = new_db.similarity_search(query)
        print(new_docs[0].page_content)

        # # 持久化数据
        # db = Chroma.from_documents(documents, embedding_model, persist_directory="staff_data")
        # db.persist()
        #
        # query = "刘德华"
        # docs = db.similarity_search(query)
        # print(docs[0].page_content)
        #
        # print("####")
        # # 加载数据
        # db_new = Chroma(persist_directory="staff_data", embedding_function=embedding_model)
        #
        # docs_new = db_new.similarity_search(query)
        # print(docs_new[0].page_content)


# if '__name__' == '__main__':
files = ["D:/BaiduNetdiskDownload/staff.csv"]
# "D:/BaiduNetdiskDownload/staff.csv",
# "D:/BaiduNetdiskDownload/comments.csv",
# "D:/BaiduNetdiskDownload/ratings.csv"]
print("开始")
create_index("sk-c44402d7a12c41299bb716af8d7e8bac", files)
print("结束")