"""
嵌入模型管理模块
支持本地Hugging Face嵌入模型（bge-large-zh-v1.5推荐用于中文）
"""

import os
import logging
import gc
import torch
from typing import List, Dict, Any, Optional
from abc import ABC, abstractmethod
from transformers import AutoTokenizer, AutoModel
import torch.nn.functional as F

logger = logging.getLogger(__name__)


class BaseEmbedding(ABC):
    """嵌入模型的抽象基类"""
    
    @abstractmethod
    def embed_documents(self, texts: List[str]) -> List[List[float]]:
        """嵌入文档"""
        pass
    
    @abstractmethod
    def embed_query(self, text: str) -> List[float]:
        """嵌入查询"""
        pass
    
    @property
    @abstractmethod
    def dimension(self) -> int:
        """获取嵌入维度"""
        pass


class LocalEmbedding(BaseEmbedding):
    """本地Hugging Face嵌入模型"""
    
    def __init__(self, 
                 model_path: str = "./models/bge-large-zh-v1.5",
                 batch_size: int = None,
                 **kwargs):
        """
        初始化本地嵌入模型
        
        Args:
            model_path: 本地模型路径
            batch_size: 批处理大小
            **kwargs: 其他参数
        """
        self.model_path = model_path
        
        # 从环境变量获取批处理大小
        if batch_size is None:
            batch_size = int(os.getenv('EMBEDDING_BATCH_SIZE', '8'))
        self.batch_size = batch_size
        
        # 清理内存
        if torch.cuda.is_available():
            torch.cuda.empty_cache()
        gc.collect()
        
        try:
            # 使用transformers AutoModel和AutoTokenizer
            self.device = 'cuda' if torch.cuda.is_available() else 'cpu'
            
            # 检查本地路径是否存在
            if os.path.exists(model_path):
                logger.info(f"从本地路径加载嵌入模型: {model_path}")
                model_name = model_path
            else:
                # 使用默认的Hugging Face模型
                logger.info("使用默认的Hugging Face模型: BAAI/bge-large-zh-v1.5")
                model_name = "BAAI/bge-large-zh-v1.5"
            
            # 加载tokenizer和model
            self.tokenizer = AutoTokenizer.from_pretrained(model_name)
            self.model = AutoModel.from_pretrained(model_name)
            self.model.eval()
            self.model.to(self.device)
            
            logger.info(f"成功加载嵌入模型，设备: {self.device}，批处理大小: {self.batch_size}")
        except Exception as e:
            logger.error(f"加载嵌入模型失败: {str(e)}")
            raise
    
    def _clear_memory(self):
        """清理GPU内存"""
        if torch.cuda.is_available():
            torch.cuda.empty_cache()
        gc.collect()
    
    def _check_memory_usage(self):
        """检查内存使用情况"""
        if torch.cuda.is_available():
            allocated = torch.cuda.memory_allocated(0) / 1024**3
            cached = torch.cuda.memory_reserved(0) / 1024**3
            logger.info(f"嵌入模型GPU内存使用: {allocated:.2f}GB (已分配), {cached:.2f}GB (已缓存)")
    
    def embed_documents(self, texts: List[str]) -> List[List[float]]:
        """
        对文档列表进行嵌入编码
        
        Args:
            texts: 文档文本列表
            
        Returns:
            嵌入向量列表
        """
        try:
            if not texts:
                return []
            
            # 检查内存使用情况
            self._check_memory_usage()
            
            # 使用transformers方法进行编码
            all_embeddings = []
            
            # 分批处理
            for i in range(0, len(texts), self.batch_size):
                batch_texts = texts[i:i + self.batch_size]
                
                # Tokenize sentences
                encoded_input = self.tokenizer(
                    batch_texts, 
                    padding=True, 
                    truncation=True, 
                    return_tensors='pt'
                ).to(self.device)
                
                # Compute token embeddings
                with torch.no_grad():
                    model_output = self.model(**encoded_input)
                    # Perform pooling. In this case, cls pooling.
                    sentence_embeddings = model_output[0][:, 0]
                    # normalize embeddings
                    sentence_embeddings = F.normalize(sentence_embeddings, p=2, dim=1)
                    
                    # 转换为CPU并添加到结果列表
                    batch_embeddings = sentence_embeddings.cpu().tolist()
                    all_embeddings.extend(batch_embeddings)
            
            logger.debug(f"完成 {len(texts)} 个文档的嵌入编码")
            return all_embeddings
            
        except Exception as e:
            logger.error(f"文档嵌入编码失败: {str(e)}")
            raise
    
    def embed_query(self, text: str) -> List[float]:
        """
        对查询文本进行嵌入编码
        
        Args:
            text: 查询文本
            
        Returns:
            嵌入向量
        """
        try:
            # Tokenize sentence
            encoded_input = self.tokenizer(
                [text], 
                padding=True, 
                truncation=True, 
                return_tensors='pt'
            ).to(self.device)
            
            # Compute token embeddings
            with torch.no_grad():
                model_output = self.model(**encoded_input)
                # Perform pooling. In this case, cls pooling.
                sentence_embedding = model_output[0][:, 0]
                # normalize embeddings
                sentence_embedding = F.normalize(sentence_embedding, p=2, dim=1)
                
                # 转换为CPU并返回
                result = sentence_embedding.cpu().tolist()[0]
                return result
                
        except Exception as e:
            logger.error(f"查询嵌入失败: {e}")
            raise
    
    @property
    def dimension(self) -> int:
        """获取嵌入维度"""
        # bge-large-zh-v1.5模型的维度是1024
        return 1024


class EmbeddingManager:
    """嵌入模型管理器"""
    
    def __init__(self):
        self.model = None
        self._initialize_default_model()
    
    def _initialize_default_model(self):
        """初始化默认的本地模型"""
        try:
            # 从环境变量获取模型路径
            model_path = os.getenv('EMBEDDING_MODEL', './models/bge-large-zh-v1.5')
            
            self.model = LocalEmbedding(model_path=model_path)
            logger.info(f"成功初始化嵌入模型: {model_path}")
        except Exception as e:
            logger.error(f"初始化模型失败: {str(e)}")
            raise
    
    def embed_documents(self, texts: List[str]) -> List[List[float]]:
        """
        使用当前模型嵌入文档
        
        Args:
            texts: 文本列表
            
        Returns:
            嵌入向量列表
        """
        if not self.model:
            raise ValueError("嵌入模型未初始化")
        
        return self.model.embed_documents(texts)
    
    def embed_query(self, text: str) -> List[float]:
        """
        使用当前模型嵌入查询
        
        Args:
            text: 查询文本
            
        Returns:
            嵌入向量
        """
        if not self.model:
            raise ValueError("嵌入模型未初始化")
        
        return self.model.embed_query(text)
    
    @property
    def dimension(self) -> int:
        """获取当前模型的维度"""
        if not self.model:
            raise ValueError("嵌入模型未初始化")
        
        return self.model.dimension
    
    def get_model_info(self) -> Dict[str, Any]:
        """
        获取模型信息
        
        Returns:
            模型信息字典
        """
        if not self.model:
            return {}
        
        return {
            'name': 'bge-large-zh-v1.5',
            'type': 'LocalEmbedding',
            'dimension': self.dimension,
            'model_path': self.model.model_path
        }


# 使用示例
if __name__ == "__main__":
    # 创建嵌入管理器
    manager = EmbeddingManager()
    
    # 测试嵌入
    test_texts = ["这是测试文本1", "这是测试文本2"]
    embeddings = manager.embed_documents(test_texts)
    print(f"嵌入维度: {len(embeddings[0])}")
    print(f"嵌入数量: {len(embeddings)}")
    
    # 查询嵌入
    query_embedding = manager.embed_query("测试查询")
    print(f"查询嵌入维度: {len(query_embedding)}")
    
    # 获取模型信息
    print(f"模型信息: {manager.get_model_info()}")