import threading
from collections import OrderedDict
from concurrent.futures import Future
from typing import TYPE_CHECKING, List, Optional

import torch
from configs import LMCacheEngineConfig
from memory_management import MemoryAllocatorInterface, MemoryFormat, MemoryObj
from storage_backend.abstract_backend import StorageBackendInterface
from utils import CacheEngineKey


class LocalCPUBackend(StorageBackendInterface):
    """
    Storage backend for local CPU memory.
    """

    def __init__(
        self,
        config: LMCacheEngineConfig,
        memory_allocator: MemoryAllocatorInterface,
        layerwise: bool = False,
    ):
        self.hot_cache: OrderedDict[CacheEngineKey, MemoryObj] = OrderedDict()
        self.lock = threading.Lock()
        self.memory_allocator = memory_allocator
        self.config = config
        self.usage = 0
        self.layerwise = layerwise
        self.stream = torch.cuda.Stream()

    def __str__(self) -> str:
        return "LocalCPUBackend"

    def contains(self, key: CacheEngineKey, pin: bool = False) -> bool:
        """
        Check if the storage backend contains the data for the given key.
        """
        with self.lock:
            if key not in self.hot_cache:
                return False
            if pin:
                self.hot_cache[key].pin()
            return True

    def submit_put_task(
        self, key: CacheEngineKey, obj: MemoryObj
    ) -> Optional[torch.Future]:
        """
        Submit a task to put the data into the storage backend.
        """
        with self.lock:
            if key in self.hot_cache:
                old_obj = self.hot_cache.pop(key)
                old_obj.ref_count_down()
            self.hot_cache[key] = obj
            obj.ref_count_up()
            self.usage += obj.get_size()

    def exists_in_put_tasks(self, key: CacheEngineKey) -> bool:
        """
        Check if the data for the given key is in the put tasks.
        """
        return False

    def submit_prefetch_task(self, key: CacheEngineKey) -> Optional[torch.Future]:
        """
        Submit a task to prefetch the data into the storage backend.
        """
        return None

    def get_blocking(self, key: CacheEngineKey) -> Optional[MemoryObj]:
        """
        Blocking get the data for the given key from the storage backend.
        """
        with self.lock:
            if key not in self.hot_cache:
                return None
            obj = self.hot_cache[key]
            obj.ref_count_up()
            self.hot_cache.move_to_end(key)
            return obj

    def get_non_blocking(self, key: CacheEngineKey) -> Optional[Future]:
        """
        Non-blocking get the data for the given key from the storage backend.
        """
        with self.lock:
            if key not in self.hot_cache:
                return None
            obj = self.hot_cache[key]
            obj.ref_count_up()
            self.hot_cache.move_to_end(key)
            future = Future()
            future.set_result(obj)
            return future

    def pin(self, key: CacheEngineKey) -> bool:
        """
        Pin the data for the given key in the storage backend.
        """
        with self.lock:
            if key not in self.hot_cache:
                return False
            self.hot_cache[key].pin()
            return True

    def unpin(self, key: CacheEngineKey) -> bool:
        """
        Unpin the data for the given key in the storage backend.
        """
        with self.lock:
            if key not in self.hot_cache:
                return False
            self.hot_cache[key].unpin()
            return True

    def remove(self, key: CacheEngineKey) -> bool:
        """
        Remove the data for the given key from the storage backend.
        """
        with self.lock:
            if key not in self.hot_cache:
                return False
            obj = self.hot_cache.pop(key)
            obj.ref_count_down()
            self.usage -= obj.get_size()
            return True

    def close(self) -> None:
        """
        Close the storage backend.
        """
        clear_keys = []
        with self.lock:
            for key, obj in self.hot_cache.items():
                if obj.get_ref_count() > 1:
                    continue
                clear_keys.append(key)
                obj.ref_count_down()

        for key in clear_keys:
            self.remove(key)

    def allocate(
        self,
        shape: torch.Size,
        dtype: torch.dtype,
        fmt: Optional[MemoryFormat] = None,
        evict: bool = True,
    ) -> Optional[MemoryObj]:
        """
        Allocate memory for the given shape, dtype, and fmt.
        """

        if fmt is None:
            if self.layerwise:
                fmt = MemoryFormat.KV_T2D
            else:
                fmt = MemoryFormat.KV_2LTD

        obj = self.memory_allocator.allocate(shape, dtype, fmt)
        if obj is not None:
            return obj

        evict_keys = []
        with self.lock:
            for key, obj in self.hot_cache.items():
                if obj.get_ref_count() > 1:
                    continue
                evict_keys.append(key)
                obj.ref_count_down()

                obj = self.memory_allocator.allocate(shape, dtype, fmt)
                if obj is not None:
                    break

        for key in evict_keys:
            self.remove(key)
        return obj

    def write_back(self, key: CacheEngineKey, obj: MemoryObj) -> bool:
        if obj is None:
            return False

        if obj.tensor is not None and obj.tensor.is_cuda:
            self.lock.acquire()
            if key in self.hot_cache:
                self.lock.release()
                return False
            self.lock.release()

            cpu_memory_obj = self.memory_allocator.allocate(
                obj.get_shape(), obj.get_dtype(), fmt=obj.get_memory_format()
            )

            if cpu_memory_obj is None:
                return False

            # Copy the tensor to the CPU memory obj
            self.stream.wait_stream(torch.cuda.default_stream())
            with torch.cuda.stream(self.stream):
                cpu_memory_obj.tensor.copy_(obj.tensor, non_blocking=True)
            obj.tensor.record_stream(self.stream)

            # update hot cache
            self.lock.acquire()
            self.hot_cache[key] = cpu_memory_obj
            self.lock.release()

            return True
        else:
            self.lock.acquire()
            if key not in self.hot_cache:
                self.hot_cache[key] = obj
            self.lock.release()
            return True
