from langchain_community.document_loaders import TextLoader
from langchain_community.vectorstores import FAISS
from langchain_ollama import OllamaEmbeddings
from langchain_text_splitters import RecursiveCharacterTextSplitter

def get_vector():
    EMBEDDING_MODEL = "nomic-embed-text"

    # 定义要加载的文档路径
    file_paths = [
        '../data/省商务厅等7部门关于印发《湖北省家电以旧换新工作补充细则》的通知-湖北省商务厅.md',
        '../data/省商务厅等7部门关于印发《湖北省 报废机动车回收管理实施办法》的通知-湖北省商务厅.md',
        '../data/省商务厅等8部门关于印发2025年湖北省汽车报废更新和置换更新补贴实施细则的通知-湖北省商务厅.md'
    ]

    # 加载多个文档
    docs = []
    for path in file_paths:
        loader = TextLoader(path, encoding='utf-8')
        docs.extend(loader.load())

    # 文本分割
    text_split = RecursiveCharacterTextSplitter(
        chunk_size=800,
        chunk_overlap=150
    )
    split_data = text_split.split_documents(docs)

    # 生成向量存储
    embeddings = OllamaEmbeddings(
        model=EMBEDDING_MODEL,
        base_url="http://192.168.124.23:11434"
    )
    db = FAISS.from_documents(split_data, embeddings)
    db.save_local('./camp')
    print("向量数据库已保存至 ./camp")

    return split_data

if __name__ == '__main__':
    get_vector()
