from abc import ABC, ABCMeta, abstractmethod
import asyncio
import time
from typing import AsyncGenerator, Dict, List, Optional, Union, Generator

from chernc.llm.uni_tokenizer import UniTokenizer


class LanguageModelPredictor(ABC):
    def __init__(
        self,
    ) -> None:
        self.tokenizer: Optional[UniTokenizer] = None

    @abstractmethod
    def token_num(self, text: Union[str, List[Dict[str, str]]]) -> int:
        """
        Calculate the number of tokens in the text.

        Args:
            text: The text to calculate the number of tokens.

        Returns:
            The number of tokens in the text.
        """
        raise NotImplementedError()

    @abstractmethod
    def chat(
        self,
        messages: List[Dict[str, str]],
        max_length: int = 2048,
        temperature: Optional[float] = None,
        top_p: Optional[float] = None,
    ) -> Optional[str]:
        """
        Chat with the language model.

        Args:
            messages: The messages to chat with the language model.
            max_length: The maximum length of the generated text.
            temperature: The temperature of the language model.
            top_p: The top p of the language model.

        Returns:
            The generated text.
        """
        raise NotImplementedError()

    @abstractmethod
    def chat_stream(
        self,
        messages: List[Dict[str, str]],
        max_length: int = 2048,
        temperature: Optional[float] = None,
        top_p: Optional[float] = None,
    ) -> Generator[Optional[str], None, None]:
        """
        Chat with the language model in a streaming manner.

        Args:
            messages: The messages to chat with the language model.
            max_length: The maximum length of the generated text.
            temperature: The temperature of the language model.
            top_p: The top p of the language model.

        Returns:
            A generator of the generated text.
        """
        raise NotImplementedError()


class RateLimited(ABCMeta):
    def __new__(cls, name, bases, attrs):
        if "__init__" in attrs:
            __init__ = attrs["__init__"]

            def init_wrapper(self, *args, **kwargs):
                __init__(self, *args, **kwargs)
                if hasattr(self, "rate_limit") and self.rate_limit:
                    rate_limit = self.rate_limit
                else:
                    rate_limit = 1
                self._interval = 60 / rate_limit
                self._last_time = 0

            attrs["__init__"] = init_wrapper

        def create_wrapper(func):
            async def wrapper(self, *args, **kwargs):
                while (waited := time.time() - self._last_time) < self._interval:
                    await asyncio.sleep(self._interval - waited)
                self._last_time = time.time()
                return await func(self, *args, **kwargs)

            return wrapper

        if "chat" in attrs:
            attrs["chat"] = create_wrapper(attrs["chat"])
        if "chat_stream" in attrs:
            attrs["chat_stream"] = create_wrapper(attrs["chat_stream"])
        return super().__new__(cls, name, bases, attrs)


class AsyncLanguageModelPredictor(ABC, metaclass=RateLimited):
    def __init__(self, rate_limit=1) -> None:
        self.tokenizer: Optional[UniTokenizer] = None
        self.rate_limit = rate_limit

    @abstractmethod
    def token_num(self, text: Union[str, List[Dict[str, str]]]) -> int:
        """
        Calculate the number of tokens in the text.

        Args:
            text: The text to calculate the number of tokens.

        Returns:
            The number of tokens in the text.
        """
        raise NotImplementedError()

    @abstractmethod
    async def chat(
        self,
        messages: List[Dict[str, str]],
        max_length: int = 2048,
        temperature: Optional[float] = None,
        top_p: Optional[float] = None,
    ) -> Optional[str]:
        """
        Chat with the language model.

        Args:
            messages: The messages to chat with the language model.
            max_length: The maximum length of the generated text.
            temperature: The temperature of the language model.
            top_p: The top p of the language model.

        Returns:
            The generated text.
        """
        raise NotImplementedError()

    @abstractmethod
    async def chat_stream(
        self,
        messages: List[Dict[str, str]],
        max_length: int = 2048,
        temperature: Optional[float] = None,
        top_p: Optional[float] = None,
    ) -> AsyncGenerator[Optional[str], None]:
        """
        Generate a stream of responses from the language model.

        Args:
            messages: The messages to send to the language model.
            max_length: The maximum number of tokens in the response.
            temperature: The temperature to use for the response.
            top_p: The top p to use for the response.

        Returns:
            A generator of the response from the language model.
        """
        yield  # type: ignore[misc]
        raise NotImplementedError()
