from langchain_ollama import OllamaEmbeddings
from langchain_community.vectorstores import FAISS
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.schema import Document
from FlagEmbedding import FlagReranker
import os
import json

# 初始化全局模型
embedding_model = OllamaEmbeddings(model="bge-m3")
text_splitter = RecursiveCharacterTextSplitter(chunk_size=100, chunk_overlap=20)

# 初始化reranker
reranker_path = "/root/autodl-tmp/Models/bge-reranker-v2-m3"
reranker = FlagReranker(reranker_path, use_fp16=True)

def build_vectorstore_from_text(text: str) -> FAISS:
    """
    将原始文本分块，并生成完整向量数据库
    """
    chunks = text_splitter.split_text(text)
    # 每个chunk生成对应的元数据
    metadatas = [{"source": "local"} for _ in chunks]

    vectorstore = FAISS.from_texts(chunks, embedding_model, metadatas=metadatas)
    return vectorstore

def save_vectorstore_to_disk(vectorstore: FAISS, folder_path: str):
    """
    将向量数据库保存到磁盘指定目录
    """
    os.makedirs(folder_path, exist_ok=True)
    vectorstore.save_local(folder_path)

def load_vectorstore_from_disk(folder_path: str) -> FAISS:
    """
    从磁盘加载向量数据库
    """
    if not os.path.isdir(folder_path):
        raise ValueError(f"目录 {folder_path} 不存在")

    required_files = ["index.faiss", "index.pkl"]
    for f in required_files:
        if not os.path.exists(os.path.join(folder_path, f)):
            raise ValueError(f"缺少必要的vectorstore文件: {f}")

    # 反序列化时需要允许危险反序列化
    vectorstore = FAISS.load_local(folder_path, embeddings=embedding_model, allow_dangerous_deserialization=True)
    return vectorstore

# 导出所有公共函数和变量
__all__ = [
    "embedding_model",
    "reranker",
    "build_vectorstore_from_text",
    "save_vectorstore_to_disk",
    "load_vectorstore_from_disk"
] 