from langchain_community.vectorstores import Chroma
from langchain_core.documents import Document
from langchain_text_splitters import RecursiveCharacterTextSplitter
import os
from Embed_model import BGE_Embed
docs_path = './docs/'

# 获取所有txt文件
txt_files = [f for f in os.listdir(docs_path) if f.endswith('.txt')]

# 创建文本分割器
text_splitter = RecursiveCharacterTextSplitter(
    separators=[
                "\"\"\"",
                "\n\n",  # 段落换行
                "\n",  # 单换行
        ],
    chunk_size=500,
    chunk_overlap=100
)

# 分割文档，获取文本块
documents = []
for txt_file in txt_files:
    with open(os.path.join(docs_path, txt_file), 'r', encoding='utf-8') as file:
        content = file.read()

    # 使用三引号分割内容
    entries = content.split('"""')
    for entry in entries:
        lines = entry.strip().split('\n')
        if len(lines) >= 2:
            title = lines[0].strip()
            body = '\n'.join(lines[1:]).strip()
            full_text = f"{title}\n{body}"
            chunks = text_splitter.split_text(full_text)
            documents.extend([Document(page_content=chunk, metadata={"source": txt_file}) for chunk in chunks])

# 加载词嵌入模型
embedding = BGE_Embed()

# 创建向量仓库
vectorstore = Chroma.from_documents(
    documents=documents,
    embedding=embedding,
    persist_directory='./chroma_rag'
)
