from typing import Optional

import torch
from torch.nn.parameter import is_lazy

class DequeMemory(torch.nn.Module):
    def __init__(self, dim: int, capacity: int):
        super().__init__()
        self._dim = dim
        self._capacity = capacity
        self._buffer = torch.nn.UninitializedBuffer()
        self._buffer_size = torch.nn.UninitializedBuffer()

    @property
    def dim(self) -> int:
        return self._dim

    @property
    def capacity(self) -> int:
        return self._capacity

    @property
    def buffer_size(self) -> torch.Tensor:
        return self._buffer_size

    def get_extra_state(self) -> dict:
        return dict(capacity=self._capacity)

    def set_extra_state(self, state: dict):
        if not is_lazy(self._buffer):
            dim = self._dim % len(self._buffer.shape)
            buffer_size = self._buffer.shape[dim]
            if buffer_size > self._capacity:
                self._buffer = self._buffer.narrow(dim, buffer_size - self._capacity, self._capacity)

    def append(self, chunk: torch.Tensor, mask: Optional[torch.Tensor] = None):
        """
        :param chunk: shape=[*batch_shape, chunk_size]
        :param mask: shape=[*batch_shape], dtype=bool
        """
        dim = self._dim % len(chunk.shape)
        batch_shape = chunk.shape[:dim]
        chunk_size = chunk.shape[dim]
        value_shape = chunk.shape[dim + 1:]

        if chunk_size > self._capacity:
            chunk = chunk.narrow(dim, chunk_size - self._capacity, self._capacity)
            chunk_size = self._capacity

        if mask is None:
            chunk_size_masked = chunk_size
        else:
            mask = mask.broadcast_to(batch_shape)
            chunk_size_masked = torch.where(mask, chunk_size, 0)

        if is_lazy(self._buffer):
            if chunk_size >= self._capacity:
                self._buffer = chunk
            else:
                padding_size = self._capacity - chunk_size
                padding_shape = batch_shape + (padding_size,) + value_shape
                padding = torch.zeros(padding_shape, dtype=chunk.dtype, device=chunk.device)
                self._buffer = torch.concat((padding, chunk), dim=dim)
            self._buffer_size = torch.zeros(batch_shape, dtype=torch.int64, device=chunk.device)
        else:
            if chunk_size >= self._capacity:
                if mask is None:
                    self._buffer = chunk
                else:
                    buffer_mask = mask.view(batch_shape + (1,) + (1,) * len(value_shape))
                    buffer_mask = buffer_mask.expand(batch_shape + (self._capacity,) + value_shape)
                    self._buffer = torch.where(buffer_mask, chunk, self._buffer)
            else:
                if mask is None:
                    legacy = self._buffer.narrow(dim, chunk_size, self._capacity - chunk_size)
                    self._buffer = torch.concat((legacy, chunk), dim=dim)
                else:
                    buffer_rolled = roll_nd(self._buffer, -chunk_size_masked, dim)
                    legacy = buffer_rolled.narrow(dim, 0, self._capacity - chunk_size)
                    chunk_part0 = buffer_rolled.narrow(dim, self._capacity - chunk_size, chunk_size)
                    chunk_mask = mask.view(batch_shape + (1,) + (1,) * len(value_shape))
                    chunk_mask = chunk_mask.expand(batch_shape + (chunk_size,) + value_shape)
                    chunk = torch.where(chunk_mask, chunk, chunk_part0)
                    self._buffer = torch.concat((legacy, chunk), dim=dim)
        self._buffer_size += chunk_size_masked

    def reset(self, mask: Optional[torch.Tensor] = None):
        """
        :param mask: shape=[*batch_shape], dtype=bool
        """
        if mask is None:
            self._buffer = torch.nn.UninitializedBuffer()
            self._buffer_size = torch.nn.UninitializedBuffer()
        else:
            self._buffer_size.masked_fill_(mask, 0)

    def read(self) -> tuple[torch.Tensor, torch.Tensor] | None:
        offset = self._buffer_size
        if is_lazy(offset):
            return None
        buffer = self._buffer
        if is_lazy(buffer):
            return None
        dim = self._dim % len(buffer.shape)
        buffer_size = buffer.shape[dim]
        indices = torch.arange(buffer_size, dtype=buffer.dtype, device=buffer.device)
        mask = indices.flip(0) < offset.unsqueeze(-1)
        return buffer, mask

def roll_nd(
    x: torch.Tensor,
    shift: torch.Tensor,
    dim: int
) -> torch.Tensor:
    """
    :param x: shape=[*batch_shape, buffer_size, *value_shape]
    :param shift: shape=[*batch_shape], int64
    :param dim: int
    :return: y.shape=[*batch_shape, buffer_size, *value_shape]
    """
    dim %= x.ndim
    batch_shape = x.shape[:dim]
    buffer_size = x.shape[dim]
    value_shape = x.shape[dim + 1:]
    shift = shift.broadcast_to(batch_shape)
    indices = torch.arange(buffer_size, dtype=torch.int64, device=x.device)
    indices = (indices - shift.unsqueeze(-1)) % buffer_size
    indices = indices.view(batch_shape + (buffer_size,) + (1,) * len(value_shape))
    indices = indices.expand(batch_shape + (buffer_size,) + value_shape)
    return torch.gather(x, dim, indices)
