import logging
import os
import re
from collections.abc import Callable, Generator, Mapping
from typing import cast

import litellm

# from litellm import get_supported_openai_params
from litellm import get_max_tokens, token_counter
from pydantic import ValidationError
from unstract.sdk1.adapters.constants import Common
from unstract.sdk1.adapters.llm1 import adapters
from unstract.sdk1.audit import Audit
from unstract.sdk1.constants import ToolEnv
from unstract.sdk1.exceptions import LLMError, SdkError, strip_litellm_prefix
from unstract.sdk1.platform import PlatformHelper
from unstract.sdk1.tool.base import BaseTool
from unstract.sdk1.utils.common import (
    LLMResponseCompat,
    TokenCounterCompat,
    capture_metrics,
)

logger = logging.getLogger(__name__)


class LLM:
    """Unified LLM interface powered by LiteLLM.

    Internally invokes Unstract LLM adapters.

    Accepts either of the following pairs for init:
    - adapter ID and metadata       (e.g. test connection)
    - adapter instance ID and tool  (e.g. edit adapter)
    """

    SYSTEM_PROMPT = "You are a helpful assistant."
    MAX_TOKENS = 4096
    JSON_REGEX = re.compile(r"\[(?:.|\n)*\]|\{(?:.|\n)*\}")
    JSON_CONTENT_MARKER = os.environ.get("JSON_SELECTION_MARKER", "§§§")

    def __init__(  # noqa: C901
        self,
        adapter_id: str = "",
        adapter_metadata: dict[str, object] | None = None,
        adapter_instance_id: str = "",
        tool: BaseTool | None = None,
        usage_kwargs: dict[str, object] | None = None,
        system_prompt: str = "",
        kwargs: dict[str, object] | None = None,
        capture_metrics: bool = False,
    ) -> None:
        """Initialize the LLM interface.

        Args:
            adapter_id: Adapter identifier for LLM model
            adapter_metadata: Configuration metadata for the adapter
            adapter_instance_id: Instance identifier for the adapter
            tool: BaseTool instance for tool-specific operations
            usage_kwargs: Usage tracking parameters
            system_prompt: System prompt for the LLM
            kwargs: Additional keyword arguments for configuration
            capture_metrics: Whether to capture performance metrics
        """
        if adapter_metadata is None:
            adapter_metadata = {}
        if usage_kwargs is None:
            usage_kwargs = {}
        if kwargs is None:
            kwargs = {}
        self._usage_kwargs = usage_kwargs
        self._capture_metrics = capture_metrics
        try:
            llm_config = None

            if adapter_instance_id:
                if not tool:
                    raise SdkError(
                        "Broken LLM adapter tool binding: " + adapter_instance_id
                    )
                llm_config = PlatformHelper.get_adapter_config(tool, adapter_instance_id)

            if llm_config:
                self._adapter_id = llm_config[Common.ADAPTER_ID]
                self._adapter_metadata = llm_config[Common.ADAPTER_METADATA]
                self._adapter_instance_id = adapter_instance_id
                self._tool = tool
            else:
                self._adapter_id = adapter_id
                if adapter_metadata:
                    self._adapter_metadata = adapter_metadata
                else:
                    self._adapter_metadata = adapters[self._adapter_id][Common.METADATA]
                self._adapter_instance_id = ""
                self._tool = None

            # Retrieve the adapter class.
            self.adapter = adapters[self._adapter_id][Common.MODULE]
        except KeyError as e:
            raise SdkError(
                f"LLM adapter not supported: {adapter_id or adapter_instance_id}"
            ) from e

        try:
            self.platform_kwargs = {**kwargs, **usage_kwargs}

            if self._adapter_instance_id:
                self.platform_kwargs["adapter_instance_id"] = self._adapter_instance_id

            self.kwargs = self.adapter.validate(self._adapter_metadata)

            # REF: https://docs.litellm.ai/docs/completion/input#translated-openai-params
            # supported = get_supported_openai_params(model=self.kwargs["model"],
            #     custom_llm_provider=self.provider)
            # for s in supported:
            #     if s not in self.kwargs:
            #         logger.warning("Missing supported parameter for '%s': %s",
            #             self.adapter.get_provider(), s)
        except ValidationError as e:
            raise SdkError("Invalid LLM adapter metadata: " + str(e)) from e

        self._system_prompt = system_prompt or self.SYSTEM_PROMPT

        if self._tool:
            self._platform_api_key = self._tool.get_env_or_die(ToolEnv.PLATFORM_API_KEY)
            if not self._platform_api_key:
                raise SdkError(f"Missing env variable '{ToolEnv.PLATFORM_API_KEY}'")
        else:
            self._platform_api_key = os.environ.get(ToolEnv.PLATFORM_API_KEY, "")

        # Metrics capture.
        self._run_id = self.platform_kwargs.get("run_id")
        # Only override capture_metrics if it's explicitly set in platform_kwargs
        capture_metrics_from_platform = self.platform_kwargs.get("capture_metrics")
        if capture_metrics_from_platform is not None:
            self._capture_metrics = capture_metrics_from_platform
        self._metrics: dict[str, object] = {}

    def test_connection(self) -> bool:
        """Test connection to the LLM provider."""
        try:
            response = self.complete("What is the capital of Tamilnadu?")
            text = response["response"].text

            find_match = re.search("chennai", text.lower())
            if find_match:
                return True

            logger.error("LLM test response: %s", text)
            msg = (
                "LLM based test failed. The credentials was valid however a sane "
                "response was not obtained from the LLM provider, please recheck "
                "the configuration."
            )
            raise LLMError(message=msg, status_code=400)
        except LLMError:
            # Already wrapped in LLMError from complete(), re-raise as is
            raise
        except SdkError:
            # Already wrapped in SdkError, re-raise as is
            raise
        except Exception as e:
            # Catch any unexpected exceptions and wrap them
            logger.error("Failed to test connection for LLM: %s", e)

            # Extract status code if available
            status_code = None
            if hasattr(e, "status_code"):
                status_code = e.status_code
            elif hasattr(e, "http_status"):
                status_code = e.http_status

            # Wrap in LLMError with context
            raise LLMError(
                message=f"Failed to test LLM connection: {str(e)}",
                status_code=status_code,
                actual_err=e,
            ) from e

    @capture_metrics
    def complete(self, prompt: str, **kwargs: object) -> dict[str, object]:
        """Return a standard chat completion dict with optional metrics capture.

        Return a standard chat completion dict and optionally captures metrics if run
        ID is provided.

        Args:
            prompt   (str)   The input text prompt for generating the completion.
            **kwargs (Any)   Additional arguments passed to the completion function.

        Returns:
            dict[str, Any]  : A dictionary containing the result of the completion,
                any processed output, and the captured metrics (if applicable).
        """
        try:
            litellm.drop_params = True  # drop params that are not supported by the model

            messages: list[dict[str, str]] = [
                {"role": "system", "content": self._system_prompt},
                {"role": "user", "content": prompt},
            ]
            logger.debug(
                f"[sdk1][LLM]Invoking {self.adapter.get_provider()} completion API"
            )

            completion_kwargs = self.adapter.validate({**self.kwargs, **kwargs})

            # if hasattr(self, "model") and self.model not in O1_MODELS:
            #     completion_kwargs["temperature"] = 0.003
            # if hasattr(self, "thinking_dict") and self.thinking_dict is not None:
            #     completion_kwargs["temperature"] = 1

            response: dict[str, object] = litellm.completion(
                messages=messages,
                **completion_kwargs,
            )

            response_text = response["choices"][0]["message"]["content"]

            self._record_usage(
                self.kwargs["model"], messages, response.get("usage"), "complete"
            )

            # NOTE:
            # The typecasting was required to stop the type checker from complaining.
            # Improvements in readability are definitely welcome.
            extract_json: bool = cast("bool", kwargs.get("extract_json", False))
            post_process_fn: (
                Callable[[LLMResponseCompat, bool, str], dict[str, object]] | None
            ) = cast(
                "Callable[[LLMResponseCompat, bool, str], dict[str, object]] | None",
                kwargs.get("process_text", None),
            )

            response_text, post_processed_output = self._post_process_response(
                response_text, extract_json, post_process_fn
            )

            response_object = LLMResponseCompat(response_text)
            response_object.raw = (
                response  # Attach raw litellm response for metadata access
            )
            return {"response": response_object, **post_processed_output}

        except LLMError:
            # Already wrapped LLMError, re-raise as is
            raise
        except SdkError:
            # Already wrapped SdkError, re-raise as is
            raise
        except Exception as e:
            # Wrap all other exceptions in LLMError with provider context
            logger.error(f"[sdk1][LLM] Error during completion: {e}")

            # Extract status code if available
            status_code = None
            if hasattr(e, "status_code"):
                status_code = e.status_code
            elif hasattr(e, "http_status"):
                status_code = e.http_status

            error_msg = (
                f"Error from LLM provider '{self.adapter.get_provider()}': "
                f"{strip_litellm_prefix(str(e))}"
            )

            raise LLMError(
                message=error_msg, status_code=status_code, actual_err=e
            ) from e

    def stream_complete(
        self,
        prompt: str,
        callback_manager: object | None = None,
        **kwargs: object,
    ) -> Generator[LLMResponseCompat, None, None]:
        """Yield LLMResponseCompat objects with text chunks.

        Chunks arrive as they stream from the provider.
        """
        try:
            messages = [
                {"role": "system", "content": self._system_prompt},
                {"role": "user", "content": prompt},
            ]
            logger.debug(
                f"[sdk1][LLM]Invoking {self.adapter.get_provider()} stream completion API"
            )

            completion_kwargs = self.adapter.validate({**self.kwargs, **kwargs})

            for chunk in litellm.completion(
                messages=messages,
                stream=True,
                stream_options={
                    "include_usage": True,
                },
                **completion_kwargs,
            ):
                if chunk.get("usage"):
                    self._record_usage(
                        self.kwargs["model"],
                        messages,
                        chunk.get("usage"),
                        "stream_complete",
                    )

                text = chunk["choices"][0]["delta"].get("content", "")

                if text:
                    if callback_manager and hasattr(callback_manager, "on_stream"):
                        callback_manager.on_stream(text)

                    # Yield LLMResponseCompat for backward compatibility
                    # with code expecting .delta
                    stream_response = LLMResponseCompat(text)
                    stream_response.delta = text
                    yield stream_response

        except LLMError:
            # Already wrapped LLMError, re-raise as is
            raise
        except SdkError:
            # Already wrapped SdkError, re-raise as is
            raise
        except Exception as e:
            # Wrap all other exceptions in LLMError with provider context
            logger.error(f"[sdk1][LLM] Error during stream completion: {e}")

            # Extract status code if available
            status_code = None
            if hasattr(e, "status_code"):
                status_code = e.status_code
            elif hasattr(e, "http_status"):
                status_code = e.http_status

            error_msg = (
                f"Error from LLM provider '{self.adapter.get_provider()}': "
                f"{strip_litellm_prefix(str(e))}"
            )

            raise LLMError(
                message=error_msg, status_code=status_code, actual_err=e
            ) from e

    async def acomplete(self, prompt: str, **kwargs: object) -> dict[str, object]:
        """Asynchronous chat completion (wrapper around ``litellm.acompletion``)."""
        try:
            messages = [
                {"role": "system", "content": self._system_prompt},
                {"role": "user", "content": prompt},
            ]
            logger.debug(
                f"[sdk1][LLM]Invoking {self.adapter.get_provider()} async completion API"
            )

            completion_kwargs = self.adapter.validate({**self.kwargs, **kwargs})

            response = await litellm.acompletion(
                messages=messages,
                **completion_kwargs,
            )
            response_text = response["choices"][0]["message"]["content"]

            self._record_usage(
                self.kwargs["model"], messages, response.get("usage"), "acomplete"
            )

            response_object = LLMResponseCompat(response_text)
            response_object.raw = (
                response  # Attach raw litellm response for metadata access
            )
            return {"response": response_object}

        except LLMError:
            # Already wrapped LLMError, re-raise as is
            raise
        except SdkError:
            # Already wrapped SdkError, re-raise as is
            raise
        except Exception as e:
            # Wrap all other exceptions in LLMError with provider context
            logger.error(f"[sdk1][LLM] Error during async completion: {e}")

            # Extract status code if available
            status_code = None
            if hasattr(e, "status_code"):
                status_code = e.status_code
            elif hasattr(e, "http_status"):
                status_code = e.http_status

            error_msg = (
                f"Error from LLM provider '{self.adapter.get_provider()}': "
                f"{strip_litellm_prefix(str(e))}"
            )

            raise LLMError(
                message=error_msg, status_code=status_code, actual_err=e
            ) from e

    @classmethod
    def get_context_window_size(
        cls, adapter_id: str, adapter_metadata: dict[str, object]
    ) -> int:
        """Returns the context window size of the LLM."""
        try:
            model = adapters[adapter_id][Common.MODULE].validate_model(adapter_metadata)
            return get_max_tokens(model)
        except Exception as e:
            logger.warning(f"Failed to get context window size for {adapter_id}: {e}")
            return cls.MAX_TOKENS

    @classmethod
    def get_max_tokens(
        cls, adapter_instance_id: str, tool: BaseTool, reserved_for_output: int = 0
    ) -> int:
        """Returns the maximum number of tokens limit for the LLM."""
        try:
            llm_config = PlatformHelper.get_adapter_config(tool, adapter_instance_id)
            adapter_id = llm_config[Common.ADAPTER_ID]
            adapter_metadata = llm_config[Common.ADAPTER_METADATA]

            model = adapters[adapter_id][Common.MODULE].validate_model(adapter_metadata)

            return get_max_tokens(model) - reserved_for_output
        except Exception as e:
            logger.warning(
                f"Failed to get context window size for {adapter_instance_id}: {e}"
            )
            return cls.MAX_TOKENS - reserved_for_output

    def get_model_name(self) -> str:
        """Gets the name of the LLM model.

        Returns:
            LLM model name
        """
        return self.kwargs["model"]

    def get_metrics(self) -> dict[str, object]:
        return self._metrics

    def get_usage_reason(self) -> object:
        return self.platform_kwargs.get("llm_usage_reason")

    def _record_usage(
        self,
        model: str,
        messages: list[dict[str, str]],
        usage: Mapping[str, int] | None,
        llm_api: str,
    ) -> None:
        prompt_tokens = token_counter(model=model, messages=messages)
        usage_data: Mapping[str, int] = usage or {}
        all_tokens = TokenCounterCompat(
            prompt_tokens=usage_data.get("prompt_tokens", 0),
            completion_tokens=usage_data.get("completion_tokens", 0),
            total_tokens=usage_data.get("total_tokens", 0),
        )

        logger.info(f"[sdk1][LLM][{model}][{llm_api}] Prompt Tokens: {prompt_tokens}")
        logger.info(f"[sdk1][LLM][{model}][{llm_api}] LLM Usage: {all_tokens}")

        Audit().push_usage_data(
            platform_api_key=self._platform_api_key,
            token_counter=all_tokens,
            event_type="llm",
            model_name=model,
            kwargs={"provider": self.adapter.get_provider(), **self.platform_kwargs},
        )

    def _post_process_response(
        self,
        response_text: str,
        extract_json: bool,
        post_process_fn: Callable[[LLMResponseCompat, bool, str], dict[str, object]]
        | None,
    ) -> tuple[str, dict[str, object]]:
        post_processed_output: dict[str, object] = {}

        # Save original text before any modifications
        original_text = response_text

        if extract_json:
            start = response_text.find(LLM.JSON_CONTENT_MARKER)
            if start != -1:
                response_text = response_text[
                    start + len(LLM.JSON_CONTENT_MARKER) :
                ].lstrip()
            end = response_text.rfind(LLM.JSON_CONTENT_MARKER)
            if end != -1:
                response_text = response_text[:end].rstrip()
            match = LLM.JSON_REGEX.search(response_text)
            if match:
                response_text = match.group(0)

        if post_process_fn:
            try:
                response_compat = LLMResponseCompat(response_text)
                post_processed_output = post_process_fn(
                    response_compat, extract_json, original_text
                )
                # Needed as the text is modified in place.
                response_text = response_compat.text
            except Exception as e:
                logger.error(
                    f"[sdk1][LLM][complete] Failed to post process response: {e}"
                )
                post_processed_output = {}

        return (response_text, post_processed_output)
