from sentence_transformers import SentenceTransformer
import torch
import gc

class SentenceTransformerWrapper:
    _instance = None
    
    def __new__(cls):
        if cls._instance is None:
            cls._instance = super(SentenceTransformerWrapper, cls).__new__(cls)
        return cls._instance
    
    def __init__(self):
        if not hasattr(self, 'model'):
            self.model = None
    
    def load_model(self):
        if self.model is None:
            try:
                # 强制使用CPU并设置较小的max_seq_length
                self.model = SentenceTransformer(
                    'all-MiniLM-L6-v2',
                    device='cpu',
                    cache_folder='./model_cache'
                )
                self.model.max_seq_length = 256  # 减小序列长度
            except Exception as e:
                print(f"加载Sentence Transformer模型失败: {e}")
                raise
    
    def encode(self, texts, batch_size=32):
        try:
            self.load_model()
            # 分批处理文本
            embeddings = []
            for i in range(0, len(texts), batch_size):
                batch = texts[i:i + batch_size]
                batch_embeddings = self.model.encode(
                    batch,
                    show_progress_bar=False,
                    convert_to_numpy=True
                )
                embeddings.extend(batch_embeddings)
                # 清理每批次的内存
                gc.collect()
            return embeddings
        except Exception as e:
            print(f"生成文本嵌入失败: {e}")
            raise
        
    def __del__(self):
        if hasattr(self, 'model') and self.model is not None:
            del self.model
            gc.collect()
            if torch.cuda.is_available():
                torch.cuda.empty_cache()

def generate_embeddings(texts):
    """
    Generate embeddings for a list of texts using SentenceTransformer
    
    Args:
        texts (List[str]): List of text strings to generate embeddings for
        
    Returns:
        numpy.ndarray: Array of embeddings
    """
    transformer = SentenceTransformerWrapper()
    return transformer.encode(texts) 