import torch

from llmpt.neural import KV


class KVCache:
    def __init__(self, capacity: int):
        self._capacity = capacity

        self._head = 0
        self._tail = 0
        self._cache = None

    @property
    def capacity(self):
        return self._capacity

    @property
    def head(self):
        return self._head

    @property
    def tail(self):
        return self._tail

    def append(self, layers_kv: tuple[KV, ...]):
        """
        :param layers_kv: layers_n * [(k, v)]
            k.shape=[..., seq_len, groups_n, qk_size]
            v.shape=[..., seq_len, groups_n, v_size]
        """
        if self._cache is None:
            cache = []
            for k, v in layers_kv:
                k_cache = torch.empty((*k.shape[:-3], self._capacity, *k.shape[-2:]), dtype=k.dtype, device=k.device)
                v_cache = torch.empty((*v.shape[:-3], self._capacity, *v.shape[-2:]), dtype=v.dtype, device=v.device)
                cache.append((k_cache, v_cache))
            self._cache = tuple(cache)

        add_size = layers_kv[0][0].shape[-3]
        for (k_cache, v_cache), (k, v) in zip(self._cache, layers_kv):
            add_head = 0
            che_head = self.tail % self.capacity
            while add_head < add_size:
                seg = min(add_size - add_head, self.capacity - che_head)
                add_tail = add_head + seg
                che_tail = che_head + seg
                k_cache[..., che_head:che_tail, :, :] = k[..., add_head:add_tail, :, :]
                v_cache[..., che_head:che_tail, :, :] = v[..., add_head:add_tail, :, :]
                add_head = add_tail
                che_head = che_tail % self.capacity
        self._tail += add_size
        self._head = max(self._head, self._tail - self.capacity)

    def read(self):
        """
        :return: layers_n * [(k, v)]
            k.shape=[..., cached_len, groups_n, qk_size]
            v.shape=[..., cached_len, groups_n, v_size]
        """
        if self._cache is None:
            return None
        if self.head == self.tail:
            return None
        ctx_head = self.head % self.capacity
        ctx_tail = self.tail % self.capacity
        if ctx_head == ctx_tail:
            return self._cache
        if ctx_head < ctx_tail:
            return tuple(
                (k[..., ctx_head:ctx_tail, :, :],
                 v[..., ctx_head:ctx_tail, :, :])
                for k, v in self._cache)
        # ctx_head > ctx_tail
        return tuple(
            (torch.cat([k[..., ctx_head:, :, :], k[..., :ctx_tail, :, :]], dim=-3),
             torch.cat([v[..., ctx_head:, :, :], v[..., :ctx_tail, :, :]], dim=-3))
            for k, v in self._cache)

    def clear(self):
        self._head = 0
        self._tail = 0
        self._cache = None
