import os
from sentence_transformers import SentenceTransformer, util
import torch
import time
from typing import Union, List


class VectorTool:
    """向量化工具类，用于文本向量化和相似度计算"""
    
    def __init__(self, model_name: str = "BAAI/bge-large-zh-v1.5", device: str = 'cpu'):
        """
        初始化向量工具
        
        Args:
            model_name: 模型名称
            device: 设备类型 ('cpu' 或 'cuda')
        """
        self.model_name = model_name
        self.device = device
        self.encoder = None
        
        print(f"torch版本: {torch.__version__}")
        print(f"cuda支持: {torch.cuda.is_available()}")
        
        self._load_model()
    
    def _load_model(self):
        """加载模型"""
        models_dir = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'models')
        model_path = os.path.join(models_dir, self.model_name)
        
        try:
            # 尝试从本地加载
            self.encoder = SentenceTransformer(model_path)
            os.environ["TRANSFORMERS_OFFLINE"] = "1"
            print(f"本地模型加载成功: {self.model_name}")
        except Exception as e:
            print(f"本地加载失败: {str(e)}")
            # 设置镜像代理
            os.environ['HF_ENDPOINT'] = 'https://hf-mirror.com'
            # 从Hugging Face下载
            self.encoder = SentenceTransformer(self.model_name, device=self.device)
            # 保存到本地
            os.makedirs(models_dir, exist_ok=True)
            self.encoder.save(model_path)
            print(f"模型下载并保存完成: {self.model_name}")
    
    def generate_vector(self, text: Union[str, List[str]]) -> Union[List[float], List[List[float]]]:
        """
        生成文本向量
        
        Args:
            text: 输入文本或文本列表
            
        Returns:
            向量或向量列表
        """
        start = time.perf_counter()
        
        embeddings = self.encoder.encode(
            text,
            convert_to_numpy=True,
            normalize_embeddings=True,  # 启用向量归一化,提升余弦相似度准确性
            show_progress_bar=True,  # 显示进度条
        )
        
        elapsed_time = time.perf_counter() - start
        print(f"耗时: {elapsed_time:.6f}秒")
        print(f"向量维度: {embeddings.shape}")
        
        return embeddings.tolist()
    
    def compute_similarity(self, sentences1: Union[str, List[str]], 
                         sentences2: Union[str, List[str]]) -> torch.Tensor:
        """
        计算语义相似度
        
        Args:
            sentences1: 第一组句子
            sentences2: 第二组句子
            
        Returns:
            相似度张量
        """
        # 确保输入为列表格式
        if isinstance(sentences1, str):
            sentences1 = [sentences1]
        if isinstance(sentences2, str):
            sentences2 = [sentences2]
        
        # 生成向量
        embeddings1 = self.encoder.encode(sentences1, normalize_embeddings=True)
        embeddings2 = self.encoder.encode(sentences2, normalize_embeddings=True)
        
        # 计算余弦相似度
        similarity = util.pytorch_cos_sim(embeddings1, embeddings2)
        
        return similarity


# 保持向后兼容的全局函数
_global_tool = None

def get_global_tool():
    """获取全局向量工具实例"""
    global _global_tool
    if _global_tool is None:
        _global_tool = VectorTool()
    return _global_tool

def generate_vector(text: Union[str, List[str]]) -> Union[List[float], List[List[float]]]:
    """向后兼容的向量生成函数"""
    return get_global_tool().generate_vector(text)


