from langchain_community.document_loaders import TextLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from model.nomic_embed_text import NomicEmbedText
import os

# 加载文档

loader = TextLoader("./table_msg.txt",encoding='utf8')
documents = loader.load()

# 定义分割器
text_splitter = RecursiveCharacterTextSplitter(
    chunk_size=1000,  # 每块最大字符数
    chunk_overlap=50  # 块之间的重叠字符数
)

# 分割文档
split_docs = text_splitter.split_documents(documents)

bgeEmbeddings = NomicEmbedText().get_llm_model()

from langchain_community.vectorstores import FAISS

vector = FAISS.from_documents(split_docs, bgeEmbeddings)

# 保存向量数据库到本地文件系统
vector.save_local("rag_data/faiss_index")