"""
嵌入提供者模块

定义各种嵌入模型提供者，包括本地和云端模型。
"""

import os
import time
from abc import ABC, abstractmethod
from typing import List, Dict, Any, Optional
import numpy as np
import requests

try:
    from llama_index.embeddings.huggingface import HuggingFaceEmbedding
    from llama_index.embeddings.openai import OpenAIEmbedding
except ImportError:
    # 新版本导入路径
    from llama_index.embeddings.huggingface import HuggingFaceEmbedding
    from llama_index.embeddings.openai import OpenAIEmbedding

from llama_index.core.embeddings import BaseEmbedding


class BaseEmbeddingProvider(ABC):
    """嵌入提供者基类"""

    @abstractmethod
    def embed(self, texts: List[str]) -> List[List[float]]:
        """生成嵌入向量"""
        pass

    @abstractmethod
    def get_info(self) -> Dict[str, Any]:
        """获取提供者信息"""
        pass


class LocalEmbeddingProvider(BaseEmbeddingProvider):
    """本地嵌入模型提供者"""

    def __init__(
        self,
        model_path: str,
        device: str = "cpu",
        batch_size: int = 4
    ):
        """
        初始化本地嵌入提供者

        Args:
            model_path: 模型路径
            device: 计算设备
            batch_size: 批次大小
        """
        self.model_path = model_path
        self.device = device
        self.batch_size = batch_size
        self._model: Optional[HuggingFaceEmbedding] = None

    def embed(self, texts: List[str]) -> List[List[float]]:
        """使用本地模型生成嵌入向量"""
        model = self._get_model()
        return model._get_text_embeddings(texts)

    def _get_model(self) -> HuggingFaceEmbedding:
        """获取模型实例（懒加载）"""
        if self._model is None:
            print(f"正在加载本地嵌入模型: {self.model_path}")
            print(f"使用设备: {self.device}")
            print(f"批次大小: {self.batch_size}")

            if not os.path.exists(self.model_path):
                raise FileNotFoundError(f"本地模型路径不存在: {self.model_path}")

            self._model = HuggingFaceEmbedding(
                model_name=self.model_path,
                device=self.device,
                embed_batch_size=self.batch_size
            )
            print("嵌入模型加载完成")

        return self._model

    def get_info(self) -> Dict[str, Any]:
        return {
            "provider_type": "local",
            "model_path": self.model_path,
            "device": self.device,
            "batch_size": self.batch_size,
            "is_loaded": self._model is not None
        }


class CloudEmbeddingProvider(BaseEmbeddingProvider):
    """云端嵌入模型提供者（OpenAI兼容）"""

    def __init__(
        self,
        api_key: str,
        base_url: str,
        model_name: str,
        timeout: int = 60,
        max_retries: int = 3
    ):
        """
        初始化云端嵌入提供者

        Args:
            api_key: API密钥
            base_url: API基础URL
            model_name: 模型名称
            timeout: 请求超时时间
            max_retries: 最大重试次数
        """
        self.api_key = api_key
        self.base_url = base_url
        self.model_name = model_name
        self.timeout = timeout
        self.max_retries = max_retries
        self._model: Optional[OpenAIEmbedding] = None
        self._custom_model_initialized = False

    def embed(self, texts: List[str]) -> List[List[float]]:
        """使用云端模型生成嵌入向量"""
        try:
            # 首先尝试使用标准OpenAIEmbedding（如果模型名称在白名单中）
            model = self._get_standard_model()
            return model._get_text_embeddings(texts)
        except ValueError:
            # 如果模型名称不被支持，使用自定义HTTP请求
            return self._embed_with_custom_api(texts)

    def _get_standard_model(self) -> OpenAIEmbedding:
        """获取标准LlamaIndex模型实例"""
        if self._model is None:
            print(f"正在加载云端嵌入模型: {self.model_name}")
            print(f"API地址: {self.base_url}")

            self._model = OpenAIEmbedding(
                api_key=self.api_key,
                api_base=self.base_url,
                model=self.model_name,
                timeout=self.timeout,
                max_retries=self.max_retries
            )
            print("云端嵌入模型加载完成")

        return self._model

    def _embed_with_custom_api(self, texts: List[str]) -> List[List[float]]:
        """使用自定义HTTP请求调用任意模型"""
        if not self._custom_model_initialized:
            print(f"正在使用自定义API加载云端嵌入模型: {self.model_name}")
            print(f"API地址: {self.base_url}")
            self._custom_model_initialized = True

        headers = {
            "Authorization": f"Bearer {self.api_key}",
            "Content-Type": "application/json"
        }

        # 批量处理文本（每次最多处理100个文本）
        batch_size = 100
        all_embeddings = []

        for i in range(0, len(texts), batch_size):
            batch_texts = texts[i:i + batch_size]

            for retry in range(self.max_retries):
                try:
                    response = requests.post(
                        f"{self.base_url}/embeddings",
                        headers=headers,
                        json={
                            "model": self.model_name,
                            "input": batch_texts,
                            "encoding_format": "float"
                        },
                        timeout=self.timeout
                    )
                    response.raise_for_status()

                    result = response.json()
                    batch_embeddings = [item["embedding"] for item in result["data"]]
                    all_embeddings.extend(batch_embeddings)

                    break
                except Exception as e:
                    if retry == self.max_retries - 1:
                        raise Exception(f"嵌入API调用失败: {e}")
                    time.sleep(2 ** retry)  # 指数退避

        return all_embeddings

    def get_info(self) -> Dict[str, Any]:
        return {
            "provider_type": "cloud",
            "model_name": self.model_name,
            "base_url": self.base_url,
            "timeout": self.timeout,
            "max_retries": self.max_retries,
            "is_loaded": self._model is not None
        }