# -*- coding: utf-8 -*-

import sys
sys.path.append('../../')


from langchain.document_loaders import Docx2txtLoader, DirectoryLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter,CharacterTextSplitter
from langchain.vectorstores import Chroma
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
import sentence_transformers

base_chroma = "../data/chroma/"


#文本分割器
text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=200)
#text_splitter = CharacterTextSplitter(chunk_size=300, chunk_overlap=100)
with open('../data/luxun.txt') as f:
    state_of_the_union = f.read()
split_docs = text_splitter.create_documents([state_of_the_union])
print("split_docs size:",len(split_docs))

embedding_model_dict = {
    "ernie-tiny": "nghuyong/ernie-3.0-nano-zh",
    "ernie-base": "nghuyong/ernie-3.0-base-zh",
    "text2vec": "GanymedeNil/text2vec-large-chinese",
    "text2vec2":"uer/sbert-base-chinese-nli",
    "text2vec3":"shibing624/text2vec-base-chinese",
}

EMBEDDING_MODEL = "ernie-tiny"
# 初始化 hugginFace 的 embeddings 对象
embeddings = HuggingFaceEmbeddings(model_name=embedding_model_dict[EMBEDDING_MODEL], )
embeddings.client = sentence_transformers.SentenceTransformer(
        embeddings.model_name, device='cpu')

db = Chroma.from_documents(split_docs, embeddings,persist_directory="base_chroma")
# 持久化
db.persist()