from typing import Any

import requests
from ibm_watsonx_ai.metanames import EmbedTextParamsMetaNames
from langchain_openai import OpenAIEmbeddings

from lfx.base.embeddings.embeddings_class import EmbeddingsWithModels
from lfx.base.embeddings.model import LCEmbeddingsModel
from lfx.base.models.model_utils import get_ollama_models, is_valid_ollama_url
from lfx.base.models.openai_constants import OPENAI_EMBEDDING_MODEL_NAMES
from lfx.base.models.watsonx_constants import (
    IBM_WATSONX_URLS,
    WATSONX_EMBEDDING_MODEL_NAMES,
)
from lfx.field_typing import Embeddings
from lfx.io import (
    BoolInput,
    DictInput,
    DropdownInput,
    FloatInput,
    IntInput,
    MessageTextInput,
    SecretStrInput,
)
from lfx.log.logger import logger
from lfx.schema.dotdict import dotdict
from lfx.utils.util import transform_localhost_url

# Ollama API constants
HTTP_STATUS_OK = 200
JSON_MODELS_KEY = "models"
JSON_NAME_KEY = "name"
JSON_CAPABILITIES_KEY = "capabilities"
DESIRED_CAPABILITY = "embedding"
DEFAULT_OLLAMA_URL = "http://localhost:11434"


class EmbeddingModelComponent(LCEmbeddingsModel):
    display_name = "Embedding Model"
    description = "Generate embeddings using a specified provider."
    documentation: str = "https://docs.langflow.org/components-embedding-models"
    icon = "binary"
    name = "EmbeddingModel"
    category = "models"

    inputs = [
        DropdownInput(
            name="provider",
            display_name="Model Provider",
            options=["OpenAI", "Ollama", "IBM watsonx.ai"],
            value="OpenAI",
            info="Select the embedding model provider",
            real_time_refresh=True,
            options_metadata=[{"icon": "OpenAI"}, {"icon": "Ollama"}, {"icon": "WatsonxAI"}],
        ),
        MessageTextInput(
            name="api_base",
            display_name="API Base URL",
            info="Base URL for the API. Leave empty for default.",
            advanced=True,
        ),
        MessageTextInput(
            name="ollama_base_url",
            display_name="Ollama API URL",
            info=f"Endpoint of the Ollama API (Ollama only). Defaults to {DEFAULT_OLLAMA_URL}",
            value=DEFAULT_OLLAMA_URL,
            show=False,
            real_time_refresh=True,
            load_from_db=True,
        ),
        DropdownInput(
            name="base_url_ibm_watsonx",
            display_name="watsonx API Endpoint",
            info="The base URL of the API (IBM watsonx.ai only)",
            options=IBM_WATSONX_URLS,
            value=IBM_WATSONX_URLS[0],
            show=False,
            real_time_refresh=True,
        ),
        DropdownInput(
            name="model",
            display_name="Model Name",
            options=OPENAI_EMBEDDING_MODEL_NAMES,
            value=OPENAI_EMBEDDING_MODEL_NAMES[0],
            info="Select the embedding model to use",
            real_time_refresh=True,
            refresh_button=True,
        ),
        SecretStrInput(
            name="api_key",
            display_name="OpenAI API Key",
            info="Model Provider API key",
            required=True,
            show=True,
            real_time_refresh=True,
        ),
        # Watson-specific inputs
        MessageTextInput(
            name="project_id",
            display_name="Project ID",
            info="IBM watsonx.ai Project ID (required for IBM watsonx.ai)",
            show=False,
        ),
        IntInput(
            name="dimensions",
            display_name="Dimensions",
            info="The number of dimensions the resulting output embeddings should have. "
            "Only supported by certain models.",
            advanced=True,
        ),
        IntInput(name="chunk_size", display_name="Chunk Size", advanced=True, value=1000),
        FloatInput(name="request_timeout", display_name="Request Timeout", advanced=True),
        IntInput(name="max_retries", display_name="Max Retries", advanced=True, value=3),
        BoolInput(name="show_progress_bar", display_name="Show Progress Bar", advanced=True),
        DictInput(
            name="model_kwargs",
            display_name="Model Kwargs",
            advanced=True,
            info="Additional keyword arguments to pass to the model.",
        ),
        IntInput(
            name="truncate_input_tokens",
            display_name="Truncate Input Tokens",
            advanced=True,
            value=200,
            show=False,
        ),
        BoolInput(
            name="input_text",
            display_name="Include the original text in the output",
            value=True,
            advanced=True,
            show=False,
        ),
        BoolInput(
            name="fail_safe_mode",
            display_name="Fail-Safe Mode",
            value=False,
            advanced=True,
            info="When enabled, errors will be logged instead of raising exceptions. "
            "The component will return None on error.",
            real_time_refresh=True,
        ),
    ]

    @staticmethod
    def fetch_ibm_models(base_url: str) -> list[str]:
        """Fetch available models from the watsonx.ai API."""
        try:
            endpoint = f"{base_url}/ml/v1/foundation_model_specs"
            params = {
                "version": "2024-09-16",
                "filters": "function_embedding,!lifecycle_withdrawn:and",
            }
            response = requests.get(endpoint, params=params, timeout=10)
            response.raise_for_status()
            data = response.json()
            models = [model["model_id"] for model in data.get("resources", [])]
            return sorted(models)
        except Exception:  # noqa: BLE001
            logger.exception("Error fetching models")
            return WATSONX_EMBEDDING_MODEL_NAMES

    async def fetch_ollama_models(self) -> list[str]:
        try:
            return await get_ollama_models(
                base_url_value=self.ollama_base_url,
                desired_capability=DESIRED_CAPABILITY,
                json_models_key=JSON_MODELS_KEY,
                json_name_key=JSON_NAME_KEY,
                json_capabilities_key=JSON_CAPABILITIES_KEY,
            )
        except Exception:  # noqa: BLE001
            logger.exception("Error fetching models")
            return []

    async def build_embeddings(self) -> Embeddings:
        provider = self.provider
        model = self.model
        api_key = self.api_key
        api_base = self.api_base
        base_url_ibm_watsonx = self.base_url_ibm_watsonx
        ollama_base_url = self.ollama_base_url
        dimensions = self.dimensions
        chunk_size = self.chunk_size
        request_timeout = self.request_timeout
        max_retries = self.max_retries
        show_progress_bar = self.show_progress_bar
        model_kwargs = self.model_kwargs or {}

        if provider == "OpenAI":
            if not api_key:
                msg = "OpenAI API key is required when using OpenAI provider"
                if self.fail_safe_mode:
                    logger.error(msg)
                    return None
                raise ValueError(msg)

            try:
                # Create the primary embedding instance
                embeddings_instance = OpenAIEmbeddings(
                    model=model,
                    dimensions=dimensions or None,
                    base_url=api_base or None,
                    api_key=api_key,
                    chunk_size=chunk_size,
                    max_retries=max_retries,
                    timeout=request_timeout or None,
                    show_progress_bar=show_progress_bar,
                    model_kwargs=model_kwargs,
                )

                # Create dedicated instances for each available model
                available_models_dict = {}
                for model_name in OPENAI_EMBEDDING_MODEL_NAMES:
                    available_models_dict[model_name] = OpenAIEmbeddings(
                        model=model_name,
                        dimensions=dimensions or None,  # Use same dimensions config for all
                        base_url=api_base or None,
                        api_key=api_key,
                        chunk_size=chunk_size,
                        max_retries=max_retries,
                        timeout=request_timeout or None,
                        show_progress_bar=show_progress_bar,
                        model_kwargs=model_kwargs,
                    )

                return EmbeddingsWithModels(
                    embeddings=embeddings_instance,
                    available_models=available_models_dict,
                )
            except Exception as e:
                msg = f"Failed to initialize OpenAI embeddings: {e}"
                if self.fail_safe_mode:
                    logger.error(msg)
                    return None
                raise

        if provider == "Ollama":
            try:
                from langchain_ollama import OllamaEmbeddings
            except ImportError:
                try:
                    from langchain_community.embeddings import OllamaEmbeddings
                except ImportError:
                    msg = "Please install langchain-ollama: pip install langchain-ollama"
                    if self.fail_safe_mode:
                        logger.error(msg)
                        return None
                    raise ImportError(msg) from None

            try:
                transformed_base_url = transform_localhost_url(ollama_base_url)

                # Check if URL contains /v1 suffix (OpenAI-compatible mode)
                if transformed_base_url and transformed_base_url.rstrip("/").endswith("/v1"):
                    # Strip /v1 suffix and log warning
                    transformed_base_url = transformed_base_url.rstrip("/").removesuffix("/v1")
                    logger.warning(
                        "Detected '/v1' suffix in base URL. The Ollama component uses the native Ollama API, "
                        "not the OpenAI-compatible API. The '/v1' suffix has been automatically removed. "
                        "If you want to use the OpenAI-compatible API, please use the OpenAI component instead. "
                        "Learn more at https://docs.ollama.com/openai#openai-compatibility"
                    )

                final_base_url = transformed_base_url or "http://localhost:11434"

                # Create the primary embedding instance
                embeddings_instance = OllamaEmbeddings(
                    model=model,
                    base_url=final_base_url,
                    **model_kwargs,
                )

                # Fetch available Ollama models
                available_model_names = await self.fetch_ollama_models()

                # Create dedicated instances for each available model
                available_models_dict = {}
                for model_name in available_model_names:
                    available_models_dict[model_name] = OllamaEmbeddings(
                        model=model_name,
                        base_url=final_base_url,
                        **model_kwargs,
                    )

                return EmbeddingsWithModels(
                    embeddings=embeddings_instance,
                    available_models=available_models_dict,
                )
            except Exception as e:
                msg = f"Failed to initialize Ollama embeddings: {e}"
                if self.fail_safe_mode:
                    logger.error(msg)
                    return None
                raise

        if provider == "IBM watsonx.ai":
            try:
                from langchain_ibm import WatsonxEmbeddings
            except ImportError:
                msg = "Please install langchain-ibm: pip install langchain-ibm"
                if self.fail_safe_mode:
                    logger.error(msg)
                    return None
                raise ImportError(msg) from None

            if not api_key:
                msg = "IBM watsonx.ai API key is required when using IBM watsonx.ai provider"
                if self.fail_safe_mode:
                    logger.error(msg)
                    return None
                raise ValueError(msg)

            project_id = self.project_id

            if not project_id:
                msg = "Project ID is required for IBM watsonx.ai provider"
                if self.fail_safe_mode:
                    logger.error(msg)
                    return None
                raise ValueError(msg)

            try:
                from ibm_watsonx_ai import APIClient, Credentials

                final_url = base_url_ibm_watsonx or "https://us-south.ml.cloud.ibm.com"

                credentials = Credentials(
                    api_key=self.api_key,
                    url=final_url,
                )

                api_client = APIClient(credentials)

                params = {
                    EmbedTextParamsMetaNames.TRUNCATE_INPUT_TOKENS: self.truncate_input_tokens,
                    EmbedTextParamsMetaNames.RETURN_OPTIONS: {"input_text": self.input_text},
                }

                # Create the primary embedding instance
                embeddings_instance = WatsonxEmbeddings(
                    model_id=model,
                    params=params,
                    watsonx_client=api_client,
                    project_id=project_id,
                )

                # Fetch available IBM watsonx.ai models
                available_model_names = self.fetch_ibm_models(final_url)

                # Create dedicated instances for each available model
                available_models_dict = {}
                for model_name in available_model_names:
                    available_models_dict[model_name] = WatsonxEmbeddings(
                        model_id=model_name,
                        params=params,
                        watsonx_client=api_client,
                        project_id=project_id,
                    )

                return EmbeddingsWithModels(
                    embeddings=embeddings_instance,
                    available_models=available_models_dict,
                )
            except Exception as e:
                msg = f"Failed to authenticate with IBM watsonx.ai: {e}"
                if self.fail_safe_mode:
                    logger.error(msg)
                    return None
                raise

        msg = f"Unknown provider: {provider}"
        if self.fail_safe_mode:
            logger.error(msg)
            return None
        raise ValueError(msg)

    async def update_build_config(
        self, build_config: dotdict, field_value: Any, field_name: str | None = None
    ) -> dotdict:
        # Handle fail_safe_mode changes first - set all required fields to False if enabled
        if field_name == "fail_safe_mode":
            if field_value:  # If fail_safe_mode is enabled
                build_config["api_key"]["required"] = False
            elif hasattr(self, "provider"):
                # If fail_safe_mode is disabled, restore required flags based on provider
                if self.provider in ["OpenAI", "IBM watsonx.ai"]:
                    build_config["api_key"]["required"] = True
                else:  # Ollama
                    build_config["api_key"]["required"] = False

        if field_name == "provider":
            if field_value == "OpenAI":
                build_config["model"]["options"] = OPENAI_EMBEDDING_MODEL_NAMES
                build_config["model"]["value"] = OPENAI_EMBEDDING_MODEL_NAMES[0]
                build_config["api_key"]["display_name"] = "OpenAI API Key"
                # Only set required=True if fail_safe_mode is not enabled
                build_config["api_key"]["required"] = not (hasattr(self, "fail_safe_mode") and self.fail_safe_mode)
                build_config["api_key"]["show"] = True
                build_config["api_base"]["display_name"] = "OpenAI API Base URL"
                build_config["api_base"]["advanced"] = True
                build_config["api_base"]["show"] = True
                build_config["ollama_base_url"]["show"] = False
                build_config["project_id"]["show"] = False
                build_config["base_url_ibm_watsonx"]["show"] = False
                build_config["truncate_input_tokens"]["show"] = False
                build_config["input_text"]["show"] = False
            elif field_value == "Ollama":
                build_config["ollama_base_url"]["show"] = True

                if await is_valid_ollama_url(url=self.ollama_base_url):
                    try:
                        models = await self.fetch_ollama_models()
                        build_config["model"]["options"] = models
                        build_config["model"]["value"] = models[0] if models else ""
                    except ValueError:
                        build_config["model"]["options"] = []
                        build_config["model"]["value"] = ""
                else:
                    build_config["model"]["options"] = []
                    build_config["model"]["value"] = ""
                build_config["truncate_input_tokens"]["show"] = False
                build_config["input_text"]["show"] = False
                build_config["api_key"]["display_name"] = "API Key (Optional)"
                build_config["api_key"]["required"] = False
                build_config["api_key"]["show"] = False
                build_config["api_base"]["show"] = False
                build_config["project_id"]["show"] = False
                build_config["base_url_ibm_watsonx"]["show"] = False

            elif field_value == "IBM watsonx.ai":
                build_config["model"]["options"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)
                build_config["model"]["value"] = self.fetch_ibm_models(base_url=self.base_url_ibm_watsonx)[0]
                build_config["api_key"]["display_name"] = "IBM watsonx.ai API Key"
                # Only set required=True if fail_safe_mode is not enabled
                build_config["api_key"]["required"] = not (hasattr(self, "fail_safe_mode") and self.fail_safe_mode)
                build_config["api_key"]["show"] = True
                build_config["api_base"]["show"] = False
                build_config["ollama_base_url"]["show"] = False
                build_config["base_url_ibm_watsonx"]["show"] = True
                build_config["project_id"]["show"] = True
                build_config["truncate_input_tokens"]["show"] = True
                build_config["input_text"]["show"] = True
        elif field_name == "base_url_ibm_watsonx":
            build_config["model"]["options"] = self.fetch_ibm_models(base_url=field_value)
            build_config["model"]["value"] = self.fetch_ibm_models(base_url=field_value)[0]
        elif field_name == "ollama_base_url":
            # # Refresh Ollama models when base URL changes
            # if hasattr(self, "provider") and self.provider == "Ollama":
            # Use field_value if provided, otherwise fall back to instance attribute
            ollama_url = self.ollama_base_url
            if await is_valid_ollama_url(url=ollama_url):
                try:
                    models = await self.fetch_ollama_models()
                    build_config["model"]["options"] = models
                    build_config["model"]["value"] = models[0] if models else ""
                except ValueError:
                    await logger.awarning("Failed to fetch Ollama embedding models.")
                    build_config["model"]["options"] = []
                    build_config["model"]["value"] = ""

        elif field_name == "model" and self.provider == "Ollama":
            ollama_url = self.ollama_base_url
            if await is_valid_ollama_url(url=ollama_url):
                try:
                    models = await self.fetch_ollama_models()
                    build_config["model"]["options"] = models
                except ValueError:
                    await logger.awarning("Failed to refresh Ollama embedding models.")
                    build_config["model"]["options"] = []

        return build_config
