import os
from langchain.document_loaders import TextLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores import Chroma

from config.embedding_config import get_openai_embeddings_xin

# 1. 配置环境
PERSIST_DIR = "./vectorstore"  # 持久化目录

text_path = os.path.join(os.path.dirname(__file__), "bank.txt")

# 2. 加载并分割文档
loader = TextLoader(text_path, encoding='utf-8')  # 替换为你的文件路径
documents = loader.load()

text_splitter = RecursiveCharacterTextSplitter(
    chunk_size=100,     # 文本块长度
    chunk_overlap=20,    # 块间重叠字符
    separators=["\n\n", "\n", "。", " "]  # 分段符
)
texts = text_splitter.split_documents(documents)
print(f"✅ 已加载和分割 {len(texts)} 个文本块")
print(texts[0])

# 3. 初始化嵌入模型
embeddings = get_openai_embeddings_xin()

# 4. 创建/加载向量库
vector_db = Chroma.from_documents(
    documents=texts,
    embedding=embeddings,
    persist_directory=PERSIST_DIR  # 启用持久化
)
vector_db.persist()  # 显式保存到磁盘 :ml-citation{ref="3,5" data="citationList"}

print(f"✅ 已保存 {len(texts)} 个文本块到 {PERSIST_DIR}")
