# memory/local_embedding_client.py
# 本地embedding客户端，直接使用sentence-transformers加载模型

import os
import numpy as np
from typing import List, Optional, Dict, Any
from pathlib import Path
import torch
from sentence_transformers import SentenceTransformer
import yaml


def _get_optimal_device() -> str:
    """自动检测最优设备"""
    if torch.cuda.is_available():
        return "cuda"
    elif hasattr(torch.backends, 'mps') and torch.backends.mps.is_available():
        return "mps"  # Apple Silicon Mac
    else:
        return "cpu"


class LocalEmbeddingClient:
    """本地embedding客户端，直接使用sentence-transformers加载模型"""
    
    def __init__(self, config: Optional[Dict] = None):
        if config is None:
            # 从统一配置管理器读取
            try:
                from ..configs import get_llm_config
                full_config = get_llm_config()
                config = full_config.get('embedding_config', {})
            except ImportError:
                # 回退到直接读取配置文件
                config_path = Path(__file__).parent.parent / "configs" / "llm_config.yaml"
                with open(config_path, 'r', encoding='utf-8') as f:
                    full_config = yaml.safe_load(f)
                    config = full_config.get('embedding_config', {})
        
        self.model_path = config.get('model_path', 'models/embedding')
        self.model_name = config.get('model_name', 'BAAI/bge-small-zh-v1.5')
        device_config = config.get('device', 'cpu')
        
        # 处理设备配置
        if device_config == 'auto':
            self.device = _get_optimal_device()
        else:
            self.device = device_config
            
        self.max_seq_length = config.get('max_seq_length', 512)
        
        # 初始化模型
        self.model = None
        self._load_model()
    
    def _load_model(self):
        """加载embedding模型"""
        try:
            # 确保使用相对路径，基于当前工作目录
            model_path = Path(self.model_path)
            if not model_path.is_absolute():
                # 如果是相对路径，基于当前工作目录
                model_path = Path.cwd() / model_path
            
            # 检查设备可用性
            print(f"📌 准备加载embedding模型，设备: {self.device}")
            if self.device == "cuda" and not torch.cuda.is_available():
                print(f"⚠️  警告: CUDA不可用，自动切换到CPU")
                self.device = "cpu"
            elif self.device == "cuda":
                print(f"✅ CUDA可用，使用GPU加速")
            
            if model_path.exists() and (model_path / "config.json").exists():
                # 使用本地模型路径
                print(f"📂 正在加载本地embedding模型: {model_path}")
                print(f"⏳ 模型加载中（可能需要10-30秒）...")
                try:
                    self.model = SentenceTransformer(str(model_path), device=self.device)
                    print(f"✅ 本地embedding模型加载成功")
                except Exception as load_error:
                    print(f"❌ 从本地路径加载失败: {load_error}")
                    print(f"🔄 尝试从HuggingFace下载模型: {self.model_name}")
                    # 回退到下载
                    model_path.mkdir(parents=True, exist_ok=True)
                    temp_model = SentenceTransformer(self.model_name, device=self.device)
                    temp_model.save(str(model_path))
                    self.model = SentenceTransformer(str(model_path), device=self.device)
                    print(f"✅ 模型下载并加载成功")
            else:
                # 使用HuggingFace模型名称，但先下载到指定路径
                print(f"📥 本地模型不存在，正在下载embedding模型: {self.model_name}")
                print(f"📂 目标路径: {model_path}")
                print(f"⏳ 下载中（可能需要几分钟，取决于网络速度）...")
                
                # 确保目标目录存在
                model_path.mkdir(parents=True, exist_ok=True)
                
                # 先下载模型到临时位置，然后保存到指定路径
                temp_model = SentenceTransformer(self.model_name, device=self.device)
                temp_model.save(str(model_path))
                
                # 重新加载保存的模型
                print(f"📂 正在从本地路径加载模型: {model_path}")
                self.model = SentenceTransformer(str(model_path), device=self.device)
                print(f"✅ 模型下载并加载成功")
            
            # 设置最大序列长度
            if hasattr(self.model, 'max_seq_length'):
                self.model.max_seq_length = self.max_seq_length
            
            # 获取实际使用的设备
            actual_device = str(self.model.device) if hasattr(self.model, 'device') else self.device
            print(f"✅ Embedding模型加载完成，使用设备: {actual_device}")
            
        except Exception as e:
            print(f"❌ 加载embedding模型失败: {e}")
            import traceback
            traceback.print_exc()
            raise e
    
    def get_embedding(self, text: str) -> Optional[List[float]]:
        """
        获取单个文本的embedding向量
        
        Args:
            text: 输入文本
            
        Returns:
            embedding向量或None（如果失败）
        """
        try:
            if not self.model:
                print("模型未加载")
                return None
            
            # 使用sentence-transformers进行编码
            embedding = self.model.encode(text, convert_to_tensor=False)
            
            # 转换为列表
            if isinstance(embedding, np.ndarray):
                embedding = embedding.tolist()
            
            return embedding
            
        except Exception as e:
            print(f"获取embedding时发生异常: {e}")
            return None
    
    def get_embeddings_batch(self, texts: List[str]) -> List[Optional[List[float]]]:
        """
        批量获取多个文本的embedding向量
        
        Args:
            texts: 输入文本列表
            
        Returns:
            embedding向量列表，失败的项目为None
        """
        try:
            if not self.model:
                print("模型未加载")
                return [None] * len(texts)
            
            # 批量编码
            embeddings = self.model.encode(texts, convert_to_tensor=False, show_progress_bar=False)
            
            # 转换为列表格式
            result = []
            for i, embedding in enumerate(embeddings):
                if isinstance(embedding, np.ndarray):
                    result.append(embedding.tolist())
                else:
                    result.append(embedding)
            
            return result
            
        except Exception as e:
            print(f"批量获取embedding时发生异常: {e}")
            return [None] * len(texts)
    
    def cosine_similarity(self, vec1: List[float], vec2: List[float]) -> float:
        """
        计算两个向量的余弦相似度
        
        Args:
            vec1: 第一个向量
            vec2: 第二个向量
            
        Returns:
            余弦相似度值 (-1 到 1)
        """
        try:
            # 转换为numpy数组
            a = np.array(vec1)
            b = np.array(vec2)
            
            # 计算余弦相似度
            dot_product = np.dot(a, b)
            norm_a = np.linalg.norm(a)
            norm_b = np.linalg.norm(b)
            
            if norm_a == 0 or norm_b == 0:
                return 0.0
                
            return dot_product / (norm_a * norm_b)
            
        except Exception as e:
            print(f"计算余弦相似度时发生异常: {e}")
            return 0.0
    
    def test_connection(self) -> bool:
        """
        测试embedding模型是否正常工作
        
        Returns:
            模型是否正常工作
        """
        try:
            test_embedding = self.get_embedding("测试连接")
            return test_embedding is not None and len(test_embedding) > 0
        except Exception as e:
            print(f"测试embedding模型失败: {e}")
            return False
    
    def get_model_info(self) -> Dict[str, Any]:
        """获取模型信息"""
        if not self.model:
            return {"status": "not_loaded"}
        
        return {
            "status": "loaded",
            "model_path": str(self.model_path),
            "device": str(self.model.device) if hasattr(self.model, 'device') else "unknown",
            "max_seq_length": getattr(self.model, 'max_seq_length', self.max_seq_length),
            "embedding_dimension": self.model.get_sentence_embedding_dimension() if hasattr(self.model, 'get_sentence_embedding_dimension') else "unknown"
        }


# 全局本地embedding客户端实例
_local_embedding_client: Optional[LocalEmbeddingClient] = None


def get_local_embedding_client() -> LocalEmbeddingClient:
    """获取全局本地embedding客户端实例"""
    global _local_embedding_client
    if _local_embedding_client is None:
        _local_embedding_client = LocalEmbeddingClient()
    return _local_embedding_client


def close_local_embedding_client():
    """关闭全局本地embedding客户端"""
    global _local_embedding_client
    if _local_embedding_client:
        # 清理模型资源
        if hasattr(_local_embedding_client, 'model') and _local_embedding_client.model:
            del _local_embedding_client.model
        _local_embedding_client = None
