import chromadb
from llama_index.core import SimpleDirectoryReader, Settings, VectorStoreIndex
from llama_index.core.extractors import TitleExtractor
from llama_index.core.ingestion import IngestionPipeline
from llama_index.core.node_parser import SentenceSplitter
from llama_index.vector_stores.chroma import ChromaVectorStore

from config.embeddings import embed_model_local_bge_small
from config.llm import deepseek_llm
from tool.file_tool import getRootPath

root_path = getRootPath()

#加载一些文档
documents = SimpleDirectoryReader(root_path + '/data').load_data()

# 自定义向量存储
#初始化chroma客户端，并设置保存数据的路径
chroma_db = chromadb.PersistentClient(path=root_path + '/chroma_db')

#创建correction
chroma_correction = chroma_db.get_or_create_collection("chroma-examples")

#将 chroma 分配为上下文中的vector_store
vector_store = ChromaVectorStore(chroma_collection=chroma_correction)

#chunk_size 文档块大小，chunk_overlap 文档块重叠度
text_parser = SentenceSplitter(chunk_size=500, chunk_overlap=100)

# 在 v0.10.0之后，使用Setting来设置全局的LLM和Embedding模型
Settings.llm = deepseek_llm()
Settings.embed_model = embed_model_local_bge_small()

# 创建带有转换的管道
pipeline = IngestionPipeline(transformations=[text_parser,
                                              TitleExtractor(),
                                              embed_model_local_bge_small()],
                             vector_store=vector_store)

# 将文档加载到管道中
pipeline.run(documents = documents)

# 创建索引
vector_index = VectorStoreIndex.from_documents(documents)