import abc
from typing import List, Optional, Tuple

import torch

from memory_management import GPUMemoryAllocator, MemoryFormat, MemoryObj
import lmcache.c_ops as lmc_ops

class GPUConnectorInterface(metaclass=abc.ABCMeta):

    @abc.abstractmethod
    def to_gpu(self, memory_obj: MemoryObj, start: int, end: int, **kwargs):
        """
        Copy the memory object to the GPU.

        Args:
            memory_obj: The memory object to copy.
            start: The start index of the data in the token sequence.
            end: The end index of the data in the token sequence.
        """
        raise NotImplementedError

    @abc.abstractmethod
    def from_gpu(self, memory_obj: MemoryObj, start: int, end: int, **kwargs):
        """
        Copy the data from the GPU to the memory object.

        Args:
            memory_obj: The memory object to copy the data to.
            start: The start index of the data in the token sequence.
            end: The end index of the data in the token sequence.
        """
        raise NotImplementedError

    @abc.abstractmethod
    def get_shape(self, num_tokens: int) -> torch.Size:
        """
        Get the shape of the data in the memory object.
        """
        raise NotImplementedError

class VLLMLayerWiseGPUConnector(GPUConnectorInterface):
    """"""

    def __init__(
            self,
            num_layers: int,
            hidden_dim_size: int,
            use_gpu: bool = False,
            **kwargs):
        """
        params:
            num_layers: The number of layers in the model.
            hidden_dim_size: The hidden dimension size of the model.
            use_gpu: Whether to use GPU or not.
            kwargs: Additional arguments:
                dtype: The data type of the model.
                device: The device to use.
                chunk_size: The chunk size per chunk.
        """

        self.num_layers = num_layers
        self.hidden_dim_size = hidden_dim_size

        if use_gpu:
            assert "device" in kwargs
            assert "dtype" in kwargs
            assert "chunk_size" in kwargs

            self.dtype = kwargs["dtype"]
            self.device = torch.device(kwargs["device"])
            self.chunk_size = kwargs["chunk_size"]

            max_tokens = 32000
            shape = self.get_shape(max_tokens)

            num_elements = shape.numel()
            element_size = torch.tensor([], dtype=self.dtype).element_size()
            self.gpu_buffer_allocator = GPUMemoryAllocator(
                num_elements * element_size,
                self.device)
            
            self.load_stream = torch.cuda.Stream()
            self.store_stream = torch.cuda.Stream()
        else:
            pass   

    def to_gpu(self, memory_obj, start, end, **kwargs):
        raise NotImplemented

    def from_gpu(self, memory_obj, start, end, **kwargs):
        raise NotImplemented

    def batched_to_gpu(self, starts: List[int], ends: List[int], **kwargs):
        """
            Copy the data from the memory objects to the paged GPU memory.
            params:
                starts: The start indices of the data in the token sequence.
                ends: The end indices of the data in the token sequence.
                kwargs:
                    kvcaches
                    slot_mappings
        """
        assert "kvcaches" in kwargs
        assert "slot_mappings" in kwargs

        kvcaches: List[torch.Tensor] = kwargs["kvcaches"]
        slot_mappings: torch.Tensor = kwargs["slot_mappings"]

        #  Get the chunks of the slot mappings.
        slot_mappings_chunks = []
        for start, end in zip(starts, ends, strict=False):
            slot_mappings_chunks.append(slot_mappings[start:end])

        slot_mappings_fulls = torch.cat(slot_mappings_chunks, dim=0)
        num_tokens = len(slot_mappings_fulls)

        # Get gpu buffer
        buffer_shape = self.get_shape(num_tokens)
        tmp_gpu_buffer = self.gpu_buffer_allocator.allocate(
            buffer_shape,
            self.dtype,
            MemoryFormat.KV_T2D,
        )

        offset = starts[0]
        current_stream = torch.cuda.current_stream()

        for layer_id in range(self.num_layers):
            layer_memory_obj = yield
            current_stream.wait_stream(self.load_stream)

            with torch.cuda.stream(self.load_stream):
                for start, end, memory_obj in zip(
                    starts, ends, layer_memory_obj, strict=False
                ):
                    tmp_gpu_buffer.tensor[start - offset : end - offset].copy_(
                        memory_obj.tensor, non_blocking=True
                    )

            lmc_ops.single_layer_kv_transfer(
                tmp_gpu_buffer.tensor,
                kvcaches[layer_id][0],
                kvcaches[layer_id][1],
                slot_mappings_fulls,
                False
            )

        yield

        current_stream.wait_stream(current_stream)  

        tmp_gpu_buffer.ref_count_down()  

    def batched_from_gpu(self, memory_objs: List[List[MemoryObj]], starts: List[int], ends: List[int]):
        pass

    def get_shape(self, num_tokens):
        return torch.Size([num_tokens, 2, self.hidden_dim_size])