import asyncio
import threading
from collections import OrderedDict
from concurrent.futures import Future
from typing import (
    TYPE_CHECKING,
    Dict,
    Generator,
    List,
    Optional,
    Sequence,
    Tuple,
    Union,
)

import torch

from configs import LMCacheEngineConfig, LMCacheEngineMetadata
from memory_management import MemoryAllocatorInterface, MemoryFormat, MemoryObj
from storage_backend import CreateStorageBackends
from storage_backend.abstract_backend import StorageBackendInterface
from storage_backend.local_cpu_backend import LocalCPUBackend
from utils import CacheEngineKey


class StorageManager:
    """
    The storage manager is responsible for managing the storage backends.
    """

    def __init__(
        self,
        config: LMCacheEngineConfig,
        metadata: LMCacheEngineMetadata,
        allocator: MemoryAllocatorInterface,
        layerwise: bool = False,
    ):
        self.config = config
        self.metadata = metadata
        self.allocator = allocator
        self.layerwise = layerwise

        self.loop = asyncio.get_event_loop()
        self.thread = threading.Thread(target=self.loop.run_forever)
        self.thread.start()
        self.lock = threading.Lock()

        self.backends: OrderedDict[str, StorageBackendInterface] = (
            CreateStorageBackends(config, metadata, self.loop, allocator, layerwise)
        )
        self.local_cpu_backend = self.backends["LocalCPUBackend"]

        self.prefetch_tasks: Dict[CacheEngineKey, Future] = {}
        self.put_tasks: Dict[str, Dict[CacheEngineKey, Tuple[Future, MemoryObj]]] = {}
        for backend in self.backends.keys():
            self.put_tasks[backend] = {}

        self.stream = torch.cuda.Stream()

    def allocate(
        self,
        shape: torch.Size,
        dtype: torch.dtype,
        fmt: MemoryFormat = MemoryFormat.KV_2LTD,
        evict: bool = True,
    ) -> Optional[MemoryObj]:
        """
        Allocate a memory object.
        """
        assert isinstance(self.local_cpu_backend, LocalCPUBackend)
        return self.local_cpu_backend.allocate(shape, dtype, fmt, evict)

    def put(self, key: CacheEngineKey, obj: MemoryObj) -> List[Future]:
        """
        Put a memory object into the storage manager.
        """
        for backend in self.backends.values():
            if backend.contains(key) or backend.exists_in_put_tasks(key):
                obj.ref_count_down()
                return []

        put_tasks = []
        for backend in self.backends.values():
            put_task = backend.submit_put_task(key, obj)
            if put_task is not None:
                put_tasks.append(put_task)

        obj.ref_count_down()
        return put_tasks

    def batched_put(self, keys: List[CacheEngineKey], objs: List[MemoryObj]) -> None:
        """
        Put a list of memory objects into the storage manager.
        """
        for key, obj in zip(keys, objs, strict=False):
            self.put(key, obj)

    def get_blocking(self, key: CacheEngineKey) -> Optional[MemoryObj]:
        """
        Get a memory object from the storage manager.
        """
        # Wait for prefetch task to complete
        self.lock.acquire()
        prefetch_task = self.prefetch_tasks.get(key)
        self.lock.release()

        if prefetch_task is not None:
            prefetch_task.result(timeout=1)

        # Get memory object from backends, write back to local cpu backend
        for backend_name, backend in self.backends.items():
            memory_obj = backend.get_blocking(key)
            if memory_obj is not None:
                if backend_name != "LocalCPUBackend":
                    assert isinstance(self.local_cpu_backend, LocalCPUBackend)
                    self.local_cpu_backend.write_back(key, memory_obj)
                return memory_obj

        return None

    def get_non_blocking(self, key: CacheEngineKey) -> Optional[MemoryObj]:
        """
        Get a memory object from the storage manager.
        """
        for backend in self.backends.values():
            task = backend.get_non_blocking(key)
            if task is not None:
                return task

        return None

    def layer_wise_batched_get(
        self,
        keys: List[List[CacheEngineKey]],
    ) -> Generator[List[Future], None, None]:
        """
        Get layer-wise batched memory objects from the storage manager in a layer-wise manner.
        """
        for keys_multi_chunk in keys:
            tasks = []
            for key in keys_multi_chunk:
                task = self.get_non_blocking(key)
                if task is not None:
                    tasks.append(task)
            yield tasks

    def prefetch(self, key: CacheEngineKey) -> None:
        """
        Prefetch a memory object from the storage manager.
        """
        if self.local_cpu_backend.contains(key):
            return

        with self.lock:
            if key in self.prefetch_tasks:
                return

        def prefetch_callback(future: Future):
            with self.lock:
                self.prefetch_tasks.pop(key)

            try:
                buffer_memory_obj = future.result()
            except Exception as e:
                raise e

            # TODO: Copy the buffer_memory_obj to gpu?

            with self.lock:
                assert isinstance(self.local_cpu_backend, LocalCPUBackend)
                self.local_cpu_backend.submit_put_task(key, buffer_memory_obj)

        for backend in self.backends.values():
            task = backend.submit_prefetch_task(key)
            if task is not None:
                with self.lock:
                    self.prefetch_tasks[key] = task
                    task.add_done_callback(lambda f: prefetch_callback(f))
                break

    def contains(self, key: CacheEngineKey) -> bool:
        """
        Check if a memory object exists in the storage manager.
        """
        for backend in self.backends.values():
            if backend.contains(key):
                return True
        return False

    def remove(self, key: CacheEngineKey) -> int:
        """
        Remove a memory object from the storage manager.
        """
        n_removed = 0
        for backend in self.backends.values():
            if backend.remove(key):
                n_removed += 1
        return n_removed

    def clear(self) -> int:
        """
        Clear the storage manager.
        """
        n_cleared = 0
        for backend in self.backends.values():
            if hasattr(backend, "clear"):
                n_cleared += backend.clear()
        return n_cleared

    def batched_unpin(self, keys: List[CacheEngineKey]) -> None:
        """
        Unpin a list of memory objects from the storage manager.
        """
        for backend in self.backends.values():
            backend.unpin(keys)

    def close(self) -> None:
        """
        Close the storage manager.
        """
        for backend in self.backends.values():
            backend.close()

        if self.loop.is_running():
            self.loop.call_soon_threadsafe(self.loop.stop())
        if self.thread.is_alive():
            self.thread.join()
