"""Chat-based Ollama LLM implementation."""

from collections.abc import Callable
from typing import Any, Generator, AsyncGenerator

from tenacity import (
    AsyncRetrying,
    RetryError,
    Retrying,
    retry_if_exception_type,
    stop_after_attempt,
    wait_exponential_jitter,
)

from graphrag.query.llm.base import BaseLLM, BaseLLMCallback
from graphrag.query.llm.ola.base import OllamaLLMImpl
from graphrag.query.llm.ola.typing import (
    OllamaApiType,
)
from graphrag.query.progress import StatusReporter

_MODEL_REQUIRED_MSG = "model is required"


class ChatOllama(BaseLLM, OllamaLLMImpl):
    """Wrapper for Ollama ChatCompletion models."""

    def __init__(
            self,
            model: str | None = None,
            api_base: str | None = None,
            max_retries: int = 10,
            request_timeout: float = 180.0,
            reporter: StatusReporter | None = None,
    ):
        OllamaLLMImpl.__init__(
            self=self,
            api_base=api_base,
            max_retries=max_retries,
            request_timeout=request_timeout,
            reporter=reporter,
        )
        self.model = model

    def generate(
            self,
            messages: str | list[Any],
            streaming: bool = True,
            callbacks: list[BaseLLMCallback] | None = None,
            **kwargs: Any,
    ) -> str:
        """Generate text."""
        try:
            retryer = Retrying(
                stop=stop_after_attempt(self.max_retries),
                wait=wait_exponential_jitter(max=10),
                reraise=True,
                retry=retry_if_exception_type(),
            )
            for attempt in retryer:
                with attempt:
                    return self._generate(
                        messages=messages,
                        streaming=streaming,
                        callbacks=callbacks,
                        **kwargs,
                    )
        except RetryError as e:
            self._reporter.error(
                message="Error at generate()", details={self.__class__.__name__: str(e)}
            )
            return ""
        else:
            return ""

    async def agenerate(
        self,
        messages: str | list[Any],
        streaming: bool = True,
        callbacks: list[BaseLLMCallback] | None = None,
        **kwargs: Any,
    ) -> str:
        """Generate text asynchronously."""
        try:
            retryer = AsyncRetrying(
                stop=stop_after_attempt(self.max_retries),
                wait=wait_exponential_jitter(max=10),
                reraise=True,
                retry=retry_if_exception_type(),
            )
            async for attempt in retryer:
                with attempt:
                    return await self._agenerate(
                        messages=messages,
                        streaming=streaming,
                        callbacks=callbacks,
                        **kwargs,
                    )
        except RetryError as e:
            self._reporter.error(f"Error at agenerate(): {e}")
            return ""
        else:
            # TODO: why not just throw in this case?
            return ""

    def stream(
            self,
            messages: str | list[Any],
            callbacks: list[BaseLLMCallback] | None = None,
            **kwargs: Any
    ) -> Generator[str, None, None] | None:
        """Stream text."""
        try:
            retryer = Retrying(
                stop=stop_after_attempt(self.max_retries),
                wait=wait_exponential_jitter(max=10),
                reraise=True,
                retry=retry_if_exception_type(),
            )
            for attempt in retryer:
                with attempt:
                    return self._stream(
                        messages=messages,
                        callbacks=callbacks,
                        **kwargs,
                    )
        except RetryError as e:
            self._reporter.error(
                message="Error at stream()", details={self.__class__.__name__: str(e)}
            )
            return
        else:
            return

    async def astream(
        self,
        messages: str | list[Any],
        callbacks: list[BaseLLMCallback] | None = None,
        **kwargs: Any
    ) -> AsyncGenerator[str, None] | None:
        """Stream text asynchronously."""
        try:
            retryer = AsyncRetrying(
                stop=stop_after_attempt(self.max_retries),
                wait=wait_exponential_jitter(max=10),
                reraise=True,
                retry=retry_if_exception_type(),
            )
            async for attempt in retryer:
                with attempt:
                    return self._astream(
                        messages=messages,
                        callbacks=callbacks,
                        **kwargs,
                    )
        except RetryError as e:
            self._reporter.error(f"Error at astream(): {e}")
            return
        else:
            # TODO: why not just throw in this case?
            return

    def _generate(
        self,
        messages: str | list[Any],
        streaming: bool = True,
        callbacks: list[BaseLLMCallback] | None = None,
        **kwargs: Any,
    ) -> str:
        model = self.model
        if not model:
            raise ValueError(_MODEL_REQUIRED_MSG)
        options = kwargs
        options["num_predict"] = options.pop("max_tokens")
        response = self.sync_client.chat(  # type: ignore
            model=model,
            messages=messages,  # type: ignore
            stream=streaming,  # type: ignore
            options=options,   # type: ignore
        )
        if streaming:
            full_response = ""
            while True:
                try:
                    chunk = response.__next__()
                    if not chunk:
                        continue

                    delta = (
                        chunk['message']['content']
                        if chunk['message'] and chunk['message']['content']
                        else ""
                    )  # type: ignore

                    full_response += delta
                    if callbacks:
                        for callback in callbacks:
                            callback.on_llm_new_token(delta)
                    if chunk['done']:  # type: ignore
                        break
                except StopIteration:
                    break
            return full_response
        return response['message']['content'] or ""  # type: ignore

    async def _agenerate(
        self,
        messages: str | list[Any],
        streaming: bool = True,
        callbacks: list[BaseLLMCallback] | None = None,
        **kwargs: Any,
    ) -> str:
        options = kwargs
        options["num_predict"] = options.pop("max_tokens")
        model = self.model
        if not model:
            raise ValueError(_MODEL_REQUIRED_MSG)
        response = await self.async_client.chat(  # type: ignore
            model=model,
            messages=messages,  # type: ignore
            stream=streaming,   # type: ignore
            options=options,    # type: ignore
        )
        if streaming:
            full_response = ""
            while True:
                try:
                    chunk = await response.__anext__()  # type: ignore
                    if not chunk:
                        continue

                    delta = (
                        chunk['message']['content']
                        if chunk['message'] and chunk['message']['content']
                        else ""
                    )  # type: ignore

                    full_response += delta
                    if callbacks:
                        for callback in callbacks:
                            callback.on_llm_new_token(delta)
                    if chunk['done']:  # type: ignore
                        break
                except StopIteration:
                    break
            return full_response

        return response['message']['content'] or ""  # type: ignore

    def _stream(
        self,
        messages: str | list[Any],
        callbacks: list[BaseLLMCallback] | None = None,
        **kwargs: Any,
    ) -> Generator[str, None, None]:
        model = self.model
        if not model:
            raise ValueError(_MODEL_REQUIRED_MSG)
        options = kwargs
        options["num_predict"] = options.pop("max_tokens")
        response = self.sync_client.chat(  # type: ignore
            model=model,
            messages=messages,  # type: ignore
            stream=True,  # type: ignore
            options=options,   # type: ignore
        )
        while True:
            try:
                chunk = response.__next__()
                if not chunk:
                    continue

                delta = (
                    chunk['message']['content']
                    if chunk['message'] and chunk['message']['content']
                    else ""
                )  # type: ignore

                if callbacks:
                    for callback in callbacks:
                        callback.on_llm_new_token(delta)
                if chunk['done']:  # type: ignore
                    break

                yield delta
            except StopIteration:
                break

    async def _astream(
        self,
        messages: str | list[Any],
        callbacks: list[BaseLLMCallback] | None = None,
        **kwargs: Any,
    ) -> AsyncGenerator[str, None]:
        options = kwargs
        options["num_predict"] = options.pop("max_tokens")
        model = self.model
        if not model:
            raise ValueError(_MODEL_REQUIRED_MSG)
        response = await self.async_client.chat(  # type: ignore
            model=model,
            messages=messages,  # type: ignore
            stream=True,   # type: ignore
            options=options,    # type: ignore
        )
        while True:
            try:
                chunk = await response.__anext__()  # type: ignore
                if not chunk:
                    continue

                delta = (
                    chunk['message']['content']
                    if chunk['message'] and chunk['message']['content']
                    else ""
                )  # type: ignore

                if callbacks:
                    for callback in callbacks:
                        callback.on_llm_new_token(delta)
                if chunk['done']:  # type: ignore
                    break

                yield delta
            except StopIteration:
                break
