from typing import List, Optional, Dict

from deepeval.config.settings import get_settings
from deepeval.utils import require_dependency
from deepeval.models import DeepEvalBaseEmbeddingModel
from deepeval.models.retry_policy import (
    create_retry_decorator,
)
from deepeval.constants import ProviderSlug as PS


retry_ollama = create_retry_decorator(PS.OLLAMA)


class OllamaEmbeddingModel(DeepEvalBaseEmbeddingModel):
    def __init__(
        self,
        model: Optional[str] = None,
        host: Optional[str] = None,
        generation_kwargs: Optional[Dict] = None,
        **client_kwargs,
    ):
        settings = get_settings()

        self.host = (
            host
            or settings.LOCAL_EMBEDDING_BASE_URL
            and str(settings.LOCAL_EMBEDDING_BASE_URL)
        )
        self.model_name = model or settings.LOCAL_EMBEDDING_MODEL_NAME
        self.client_kwargs = client_kwargs or {}
        self.generation_kwargs = generation_kwargs or {}
        super().__init__(self.model_name)

    @retry_ollama
    def embed_text(self, text: str) -> List[float]:
        embedding_model = self.load_model()
        response = embedding_model.embed(
            model=self.model_name, input=text, **self.generation_kwargs
        )
        return response["embeddings"][0]

    @retry_ollama
    def embed_texts(self, texts: List[str]) -> List[List[float]]:
        embedding_model = self.load_model()
        response = embedding_model.embed(
            model=self.model_name, input=texts, **self.generation_kwargs
        )
        return response["embeddings"]

    @retry_ollama
    async def a_embed_text(self, text: str) -> List[float]:
        embedding_model = self.load_model(async_mode=True)
        response = await embedding_model.embed(
            model=self.model_name, input=text, **self.generation_kwargs
        )
        return response["embeddings"][0]

    @retry_ollama
    async def a_embed_texts(self, texts: List[str]) -> List[List[float]]:
        embedding_model = self.load_model(async_mode=True)
        response = await embedding_model.embed(
            model=self.model_name, input=texts, **self.generation_kwargs
        )
        return response["embeddings"]

    ###############################################
    # Model
    ###############################################

    def load_model(self, async_mode: bool = False):
        ollama = require_dependency(
            "ollama",
            provider_label="OllamaEmbeddingModel",
            install_hint="Install it with `pip install ollama`.",
        )

        if not async_mode:
            return self._build_client(ollama.Client)
        return self._build_client(ollama.AsyncClient)

    def _build_client(self, cls):
        return cls(host=self.host, **self.client_kwargs)

    def get_model_name(self):
        return f"{self.model_name} (Ollama)"
