"""
高级嵌入模型管理器

提供统一的嵌入服务接口和智能缓存机制。
"""

from typing import Optional, List, Dict, Any

from llama_index.core.settings import Settings
from llama_index.core.embeddings import BaseEmbedding

from ..config import config, Mode
from .providers import BaseEmbeddingProvider, LocalEmbeddingProvider, CloudEmbeddingProvider
from .cache import EmbeddingCacheManager


class EmbeddingModelManager:
    """
    高级嵌入模型管理器

    职责：
    1. 管理嵌入提供者的选择和切换
    2. 提供统一的嵌入生成接口
    3. 管理嵌入向量的缓存
    4. 配置LlamaIndex全局嵌入设置
    5. 提供性能监控和基准测试
    """

    def __init__(self):
        """初始化嵌入模型管理器"""
        self._provider: Optional[BaseEmbeddingProvider] = None
        self._cache = EmbeddingCacheManager(
            cache_ttl=config.embedding_cache_ttl
        )
        self._cache.set_enabled(config.enable_embedding_cache)
        self._embedding_dim: Optional[int] = None

    def _get_provider(self) -> BaseEmbeddingProvider:
        """
        获取嵌入提供者

        Returns:
            BaseEmbeddingProvider: 当前配置的嵌入提供者
        """
        if self._provider is None:
            embedding_config = config.get_embedding_config()

            if embedding_config["mode"] == "local":
                self._provider = LocalEmbeddingProvider(
                    model_path=embedding_config["model_path"],
                    device=embedding_config["device"],
                    batch_size=embedding_config["batch_size"]
                )
            else:
                self._provider = CloudEmbeddingProvider(
                    api_key=embedding_config["api_key"],
                    base_url=embedding_config["base_url"],
                    model_name=embedding_config["model"]
                )

        return self._provider

    def embed_batch(self, texts: List[str]) -> List[List[float]]:
        """
        批量生成嵌入向量

        Args:
            texts: 文本列表

        Returns:
            List[List[float]]: 嵌入向量列表
        """
        if not texts:
            return []

        provider = self._get_provider()
        embeddings = []

        # 如果启用缓存，先检查缓存
        if config.enable_embedding_cache:
            uncached_texts = []
            uncached_indices = []

            for i, text in enumerate(texts):
                cached_embedding = self._cache.get(text)
                if cached_embedding is not None:
                    embeddings.append(cached_embedding)
                    if self._embedding_dim is None and cached_embedding:
                        self._embedding_dim = len(cached_embedding)
                else:
                    uncached_texts.append(text)
                    uncached_indices.append((i, text))

            # 批量处理未缓存的文本
            if uncached_texts:
                try:
                    new_embeddings = provider.embed(uncached_texts)
                    if new_embeddings and self._embedding_dim is None:
                        self._embedding_dim = len(new_embeddings[0])

                    # 将新嵌入存入缓存
                    for (idx, text), embedding in zip(uncached_indices, new_embeddings):
                        self._cache.put(text, embedding, provider.get_info().get("model_name", "unknown"))
                        embeddings.insert(idx, embedding)

                except Exception as e:
                    print(f"嵌入生成失败: {e}")
                    if self._embedding_dim is None:
                        raise RuntimeError("嵌入生成失败，且无法确定嵌入维度用于回退") from e
                    # 生成与已知维度一致的零向量作为有限降级
                    for (idx, _) in uncached_indices:
                        zero_embedding = [0.0] * self._embedding_dim
                        embeddings.insert(idx, zero_embedding)
        else:
            # 未启用缓存，直接批量处理
            try:
                embeddings = provider.embed(texts)
                if embeddings and self._embedding_dim is None:
                    self._embedding_dim = len(embeddings[0])
            except Exception as e:
                print(f"嵌入生成失败: {e}")
                if self._embedding_dim is None:
                    raise RuntimeError("嵌入生成失败，且无法确定嵌入维度用于回退") from e
                embeddings = [[0.0] * self._embedding_dim for _ in texts]

        return embeddings

    def embed_single(self, text: str) -> List[float]:
        """
        生成单个文本的嵌入向量

        Args:
            text: 输入文本

        Returns:
            List[float]: 嵌入向量
        """
        embeddings = self.embed_batch([text])
        return embeddings[0] if embeddings else []

    def configure_llama_index(self) -> None:
        """
        配置 LlamaIndex 全局嵌入模型设置

        将当前嵌入提供者配置为LlamaIndex的默认嵌入模型。
        """
        provider = self._get_provider()

        if isinstance(provider, LocalEmbeddingProvider):
            Settings.embed_model = provider._get_model()
        elif isinstance(provider, CloudEmbeddingProvider):
            # 对于云端嵌入，我们需要创建一个包装类来适配LlamaIndex
            try:
                Settings.embed_model = provider._get_standard_model()
            except ValueError:
                # 如果模型名称不被支持，创建一个自定义的适配器
                Settings.embed_model = self._create_custom_adapter(provider)

        print("已配置 LlamaIndex 全局嵌入模型")

    def _create_custom_adapter(self, provider):
        """为自定义模型创建LlamaIndex适配器"""
        try:
            # 尝试使用新的pydantic 2.0格式
            manager_self = self

            class CustomEmbeddingAdapter(BaseEmbedding):
                def __init__(self, cloud_provider):
                    super().__init__()
                    self._provider = cloud_provider
                    self._cached_dim: Optional[int] = None

                def _get_query_embedding(self, query: str) -> List[float]:
                    embedding = self._provider.embed([query])[0]
                    self._remember_dim(embedding)
                    return embedding

                def _get_text_embedding(self, text: str) -> List[float]:
                    embedding = self._provider.embed([text])[0]
                    self._remember_dim(embedding)
                    return embedding

                def _get_text_embeddings(self, texts: List[str]) -> List[List[float]]:
                    embeddings = self._provider.embed(texts)
                    if embeddings:
                        self._remember_dim(embeddings[0])
                    return embeddings

                def _aget_query_embedding(self, query: str) -> List[float]:
                    return self._get_query_embedding(query)

                def _aget_text_embedding(self, text: str) -> List[float]:
                    return self._get_text_embedding(text)

                def _aget_text_embeddings(self, texts: List[str]) -> List[List[float]]:
                    return self._get_text_embeddings(texts)

                def embed_dim(self) -> int:
                    dim = self._cached_dim or manager_self._embedding_dim
                    if dim:
                        return dim
                    # 如果仍未知，尝试探测一次
                    probe = self._provider.embed(["dim_probe"])
                    if probe and probe[0]:
                        self._remember_dim(probe[0])
                        return len(probe[0])
                    return 0

                def _remember_dim(self, embedding: List[float]) -> None:
                    if embedding:
                        self._cached_dim = len(embedding)
                        manager_self._embedding_dim = manager_self._embedding_dim or self._cached_dim

                @classmethod
                def class_name(cls) -> str:
                    return "CustomEmbeddingAdapter"

            return CustomEmbeddingAdapter(provider)

        except Exception as e:
            print(f"自定义适配器创建失败: {e}")
            # 如果自定义适配器失败，直接返回标准模型，让它使用标准API
            return provider._get_standard_model()

    def _get_model(self) -> BaseEmbedding:
        """
        获取当前嵌入模型实例（适配LlamaIndex接口）

        Returns:
            BaseEmbedding: 适配LlamaIndex接口的嵌入模型实例
        """
        provider = self._get_provider()

        if isinstance(provider, LocalEmbeddingProvider):
            return provider._get_model()
        elif isinstance(provider, CloudEmbeddingProvider):
            try:
                return provider._get_standard_model()
            except ValueError:
                return self._create_custom_adapter(provider)

    def switch_provider(self, mode: Optional[Mode] = None) -> None:
        """
        切换嵌入提供者

        Args:
            mode: 目标模式，None表示根据配置自动选择
        """
        if mode and mode != config.mode:
            raise ValueError(f"无法切换到 {mode.value} 模式，当前配置为 {config.mode.value}")

        # 强制重新初始化提供者
        self._provider = None
        self._get_provider()

    def clear_cache(self) -> None:
        """清空嵌入缓存"""
        self._cache.clear()
        print("嵌入缓存已清空")

    def get_cache_stats(self) -> Dict[str, Any]:
        """
        获取缓存统计信息

        Returns:
            Dict[str, Any]: 缓存统计数据
        """
        return self._cache.get_stats()

    def benchmark_embedding_speed(
        self,
        test_texts: List[str],
        warmup_texts: List[str] = None
    ) -> Dict[str, Any]:
        """
        基准测试嵌入速度

        Args:
            test_texts: 测试文本列表
            warmup_texts: 预热文本列表

        Returns:
            Dict[str, Any]: 基准测试结果
        """
        import time

        if not test_texts:
            return {"error": "没有提供测试文本"}

        # 预热
        if warmup_texts:
            print("正在进行嵌入速度基准测试预热...")
            self.embed_batch(warmup_texts)

        # 正式测试
        print(f"开始嵌入速度基准测试，测试 {len(test_texts)} 个文本...")
        start_time = time.time()

        try:
            embeddings = self.embed_batch(test_texts)
            end_time = time.time()

            total_time = end_time - start_time
            texts_per_second = len(test_texts) / total_time

            return {
                "provider_type": self._get_provider().get_info()["provider_type"],
                "total_texts": len(test_texts),
                "total_time": round(total_time, 3),
                "texts_per_second": round(texts_per_second, 2),
                "avg_time_per_text": round(total_time / len(test_texts), 6),
                "embedding_dimension": len(embeddings[0]) if embeddings else 0,
                "success": True
            }
        except Exception as e:
            return {
                "error": str(e),
                "success": False,
                "provider_type": self._get_provider().get_info()["provider_type"]
            }

    @property
    def model(self) -> BaseEmbedding:
        """
        获取嵌入模型实例

        Returns:
            BaseEmbedding: 当前配置的嵌入模型实例
        """
        return self._get_model()

    @property
    def model_info(self) -> Dict[str, Any]:
        """
        获取模型信息

        Returns:
            Dict[str, Any]: 包含提供者信息和缓存统计的字典
        """
        provider_info = self._get_provider().get_info()
        cache_stats = self.get_cache_stats()

        return {
            **provider_info,
            "cache_stats": cache_stats,
            "current_mode": config.mode.value
        }

    def __str__(self) -> str:
        """
        返回模型信息的字符串表示

        Returns:
            str: 格式化的模型信息字符串
        """
        provider_info = self.model_info
        cache_stats = provider_info["cache_stats"]

        return f"""
=== 高级嵌入模型管理器 ===
运行模式: {provider_info['current_mode']}
提供者类型: {provider_info['provider_type']}
缓存状态: {'启用' if cache_stats['cache_enabled'] else '禁用'}
缓存项目: {cache_stats['valid_cache_items']}/{cache_stats['total_cache_items']}
缓存大小: {cache_stats['cache_memory_mb']} MB

{'=' * 25}
本地模式信息:
- 模型路径: {provider_info.get('model_path', 'N/A')}
- 计算设备: {provider_info.get('device', 'N/A')}
- 批次大小: {provider_info.get('batch_size', 'N/A')}

云端模式信息:
- API地址: {provider_info.get('base_url', 'N/A')}
- 模型名称: {provider_info.get('model_name', 'N/A')}
- 超时时间: {provider_info.get('timeout', 'N/A')}秒
        """.strip()


# 全局高级嵌入模型管理器实例
embedding_manager = EmbeddingModelManager()