from typing import List
from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain.text_splitter import CharacterTextSplitter
from langchain_community.vectorstores import FAISS
import os
import torch

# 全局共享的 embeddings 实例
_embeddings = None

def get_embeddings():
    """获取或创建 embeddings 实例"""
    global _embeddings
    if _embeddings is None:
        # 检查是否有可用的 CUDA GPU
        device = 'cuda' 
        print(f"使用设备: {device}")
        
        model_kwargs = {'device': device}
        encode_kwargs = {'normalize_embeddings': True}
        
        try:
            _embeddings = HuggingFaceEmbeddings(
                model_name="sentence-transformers/all-mpnet-base-v2",
                model_kwargs=model_kwargs,
                encode_kwargs=encode_kwargs,
                cache_folder="./models",  # 指定本地缓存目录
                show_progress=True  # 显示下载进度
            )
            print("Embeddings 模型加载成功！")
        except Exception as e:
            print(f"加载 Embeddings 模型时出错: {str(e)}")
            # 如果使用 CUDA 失败，尝试回退到 CPU
            if device == 'cuda':
                print("GPU 加载失败，尝试使用 CPU...")
                model_kwargs['device'] = 'cpu'
                _embeddings = HuggingFaceEmbeddings(
                    model_name="sentence-transformers/all-mpnet-base-v2",
                    model_kwargs=model_kwargs,
                    encode_kwargs=encode_kwargs,
                    cache_folder="./models",
                    show_progress=True
                )
                print("使用 CPU 加载成功！")
    
    return _embeddings

class VectorStore:
    def __init__(self):
        try:
            self.embeddings = get_embeddings()
        except Exception as e:
            print(f"初始化 embeddings 失败: {e}")
            raise

    def create_vector_store(self, texts: List[str], persist_path: str):
        """创建或更新向量存储"""
        try:
            # 如果已有存储，则加载后添加新文本
            if os.path.exists(os.path.join(persist_path, "index.faiss")):
                print(f"发现已有向量存储，进行增量更新...")
                db = FAISS.load_local(persist_path, self.embeddings)
                if texts:  # 只在有新文本时添加
                    print(f"添加 {len(texts)} 条新文本...")
                    # 为每个文本创建元数据
                    metadatas = [{"source": text.split('\n\n')[0]} for text in texts]
                    db.add_texts(texts, metadatas=metadatas)
                    db.save_local(persist_path)
                return db
        except Exception as e:
            print(f"加载现有向量库失败，将创建新的向量库: {str(e)}")
            
        # 创建新存储
        print("创建新的向量存储...")
        # 为每个文本创建元数据
        metadatas = [{"source": text.split('\n\n')[0]} for text in texts]
        db = FAISS.from_texts(texts, self.embeddings, metadatas=metadatas)
        db.save_local(persist_path)
        return db

    @classmethod
    def load_vector_store(cls, persist_path: str):
        """加载已有向量存储"""
        if not os.path.exists(persist_path):
            return None
        return FAISS.load_local(persist_path, get_embeddings()) 