import abc
import array
import hashlib
from typing import Iterable, List, Optional, Tuple, Union

import torch

from configs import LMCacheEngineMetadata
from utils import CacheEngineKey, LayerCacheEngineKey


class TokenDatabase(metaclass=abc.ABCMeta):

    @abc.abstractmethod
    def process_tokens(
        self,
        tokens: Union[torch.Tensor, List[int]],
        mask: Optional[torch.Tensor] = None,
        make_key: bool = True,
    ) -> Iterable[Tuple[int, int, Union[CacheEngineKey, str]]]:
        """
        Process the tokens and return an iterable of tuples containing the start and end indices of the tokens, and the key of the cache engine.

        Args:
            tokens: The tokens to process.
            mask: The mask of the tokens.
            make_key: Whether to make the key.

        Returns:
            An iterable of tuples containing the start and end indices of the tokens,
            and the key of the cache engine.
        """
        raise NotImplemented


class ChunkedTokenDatabase(TokenDatabase):

    def __init__(
        self, chunk_size: int, metadata: Optional[LMCacheEngineMetadata] = None
    ):
        self.chunk_size = chunk_size
        self.metadata = metadata

    def __make_key_by_hash(
        self, chunk_hash: str, layer_id: Optional[int]
    ) -> CacheEngineKey:
        assert self.metadata is not None

        if layer_id is None:
            return CacheEngineKey(
                fmt=self.metadata.fmt,
                model_name=self.metadata.model_name,
                worker_id=self.metadata.worker_id,
                world_size=self.metadata.world_size,
                chunk_hash=chunk_hash,
            )

        return LayerCacheEngineKey(
            fmt=self.metadata.fmt,
            model_name=self.metadata.model_name,
            worker_id=self.metadata.worker_id,
            world_size=self.metadata.world_size,
            chunk_hash=chunk_hash,
            layer_id=layer_id,
        )

    def __chunk_tokens(
        self, tokens: Union[torch.Tensor, List[int]]
    ) -> Iterable[Union[torch.Tensor, List[int]]]:
        for i in range(0, len(tokens), self.chunk_size):
            yield tokens[i : i + self.chunk_size]

    def __hash(self, tokens: Union[torch.Tensor, List[int]], prefix_hash: str) -> str:
        if isinstance(tokens, torch.Tensor):
            tokens_bytes = tokens.cpu().to(torch.uint32).numpy().tobytes()
        elif isinstance(tokens, list):
            tokens_bytes = array.array("I", tokens).tobytes()

        return hashlib.sha256(prefix_hash.encode("ascii") + tokens_bytes).hexdigest()

    def __prefix_hash(
        self, token_chunks: Iterable[Union[torch.Tensor, List[int]]]
    ) -> Iterable[str]:
        prefix_hash = ""
        for token_chunk in token_chunks:
            prefix_hash = self.__hash(token_chunk, prefix_hash)
            yield prefix_hash

    def process_tokens(
        self,
        tokens: Union[torch.Tensor, List[int]],
        mask: Optional[torch.Tensor] = None,
        make_key: bool = True,
    ) -> Iterable[Tuple[int, int, Union[CacheEngineKey, str]]]:
        """
        Process the tokens and return an iterable of tuples containing the start and end indices of the tokens, and the key of the cache engine.

        Args:
            tokens: The tokens to process.
            mask: The mask of the tokens.
            make_key: Whether to make the key.

        Returns:
            An iterable of tuples containing the start and end indices of the tokens,
            and the key of the cache engine.
        """
        if mask is not None:
            num_false = mask.numel() - mask.long().sum().item()
        else:
            num_false = 0

        if num_false % self.chunk_size != 0:
            raise ValueError(
                f"Number of false tokens ({num_false}) is not divisible by chunk size ({self.chunk_size})"
            )

        total_len = len(tokens)

        token_chunks = self.__chunk_tokens(tokens)
        prefix_hashes = self.__prefix_hash(token_chunks)

        start_idx = 0
        for chunk_id, prefix_hash in enumerate(prefix_hashes):
            start_idx = chunk_id * self.chunk_size
            end_idx = min(start_idx + self.chunk_size, total_len)
            if start_idx < num_false:
                continue
            else:
                if make_key:
                    yield start_idx, end_idx, self.__make_key_by_hash(prefix_hash, None)
                else:
                    yield start_idx, end_idx, prefix_hash
