from typing import List, Union
import os
import sys
import numpy as np

# 设置本地模型路径
LOCAL_MODEL_PATH = r"D:\module\BAAI-bge-large-zh-v1.5"


class EmbeddingModel:
    """Text embedding model using local BGE model or fallback to random embeddings."""
    
    def __init__(self, model_path: str = LOCAL_MODEL_PATH):
        """
        Initialize the embedding model.
        
        Args:
            model_path: Path to the local embedding model
        """
        self.model_path = model_path
        self.dim = 1024  # BGE-large 模型的向量维度
        self._model_loaded = False
        self.model = None
        self.tokenizer = None
        
        # 尝试加载模型
        try:
            # 加载必要的库
            from transformers import AutoModel, AutoTokenizer
            import torch
            
            print(f"尝试从本地路径加载模型: {model_path}")
            # 加载模型和分词器
            self.tokenizer = AutoTokenizer.from_pretrained(model_path)
            self.model = AutoModel.from_pretrained(model_path)
            
            # 如果有GPU，将模型移至GPU
            self.device = 'cuda' if torch.cuda.is_available() else 'cpu'
            self.model.to(self.device)
            self.model.eval()  # 设置为评估模式
            
            self._model_loaded = True
            print(f"成功加载模型: {model_path}")
            
        except Exception as e:
            print(f"警告: 无法加载嵌入模型: {str(e)}")
            print("将使用随机向量作为演示用途。")
            self._model_loaded = False
    
    def _get_embeddings(self, text_batch):
        """使用BGE模型获取文本的嵌入向量"""
        import torch
        
        # 对文本进行编码
        encoded_input = self.tokenizer(
            text_batch, 
            padding=True, 
            truncation=True, 
            max_length=512, 
            return_tensors='pt'
        ).to(self.device)
        
        # 推理
        with torch.no_grad():
            outputs = self.model(**encoded_input)
            # 使用最后一层的隐藏状态的平均值作为嵌入
            embeddings = self._mean_pooling(outputs.last_hidden_state, encoded_input['attention_mask'])
            # 标准化
            embeddings = torch.nn.functional.normalize(embeddings, p=2, dim=1)
        
        return embeddings.cpu().numpy()
    
    def _mean_pooling(self, token_embeddings, attention_mask):
        """对token嵌入进行平均池化"""
        import torch
        
        input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
        return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9)
    
    def embed(self, text: Union[str, List[str]]) -> List[List[float]]:
        """
        Convert text to vector embeddings.
        
        Args:
            text: Single string or list of strings to embed
        
        Returns:
            List of vector embeddings
        """
        # 确保输入格式一致，单个字符串转为列表
        if isinstance(text, str):
            texts = [text]
        else:
            texts = text
        
        # 如果模型加载失败，返回随机向量
        if not self._model_loaded:
            import random
            return [[random.uniform(-1, 1) for _ in range(self.dim)] for _ in texts]
        
        # 使用模型生成嵌入
        try:
            embeddings = self._get_embeddings(texts)
            return embeddings.tolist()
        except Exception as e:
            print(f"生成嵌入时出错: {str(e)}")
            print("返回随机向量作为后备方案")
            import random
            return [[random.uniform(-1, 1) for _ in range(self.dim)] for _ in texts] 