import asyncio
import os
import threading
from collections import OrderedDict
from concurrent.futures import Future
from typing import TYPE_CHECKING, List, Optional

import torch
import aiofiles

from configs import DiskCacheMetadata, LMCacheEngineConfig
from memory_management import MemoryAllocatorInterface, MemoryFormat, MemoryObj
from storage_backend.abstract_backend import StorageBackendInterface
from storage_backend.evictor.base_evictor import LRUEvictor, PutStatus
from storage_backend.local_cpu_backend import LocalCPUBackend
from utils import CacheEngineKey


class LocalDiskBackend(StorageBackendInterface):

    def __init__(
        self,
        config: LMCacheEngineConfig,
        loop: asyncio.AbstractEventLoop,
        local_cpu_backend: LocalCPUBackend,
        dst_device: str = "cuda",
    ):
        self.config = config
        self.loop = loop
        self.local_cpu_backend = local_cpu_backend
        self.dst_device = dst_device

        self.disk_path = config.local_disk
        if not os.path.exists(self.disk_path):
            os.makedirs(self.disk_path)

        self.hot_cache: OrderedDict[CacheEngineKey, DiskCacheMetadata] = OrderedDict()
        self.lock = threading.Lock()

        self.put_tasks: List[CacheEngineKey] = []
        self.evictor = LRUEvictor(config.max_local_disk_size)
        self.usage = 0

    def __str__(self) -> str:
        return "LocalDiskBackend"

    def __key_to_path(self, key: CacheEngineKey) -> str:
        return os.path.join(self.disk_path, key.to_string().replace("/", "-") + ".pt")

    def __insert_key(self, key: CacheEngineKey, obj: MemoryObj) -> None:
        """
        Insert the key to the cache.
        """
        path = self.__key_to_path(key)
        with self.lock:
            if key in self.hot_cache:
                self.hot_cache.pop(key)
            self.hot_cache[key] = DiskCacheMetadata(
                path=path,
                size=obj.get_size(),
                shape=obj.get_shape(),
                dtype=obj.get_dtype(),
                is_pin=False,
            )

    @torch.inference_mode()
    async def async_save_bytes_to_disk(
        self, key: CacheEngineKey, obj: MemoryObj
    ) -> None:
        """
        Convert the memory object to bytes and save to the disk.
        """
        print(f"Saving bytes to disk: {key} begin")
        path = self.__key_to_path(key)
        kv_chunks = obj.tensor
        assert kv_chunks is not None
        kv_bytes = obj.byte_array
        async with aiofiles.open(path, "wb") as f:
            await f.write(kv_bytes)

        print(f"Saving bytes to disk: {key} end")

        size = len(kv_bytes)
        self.__insert_key(key, obj)
        obj.ref_count_down()

        with self.lock:
            self.usage += size
            self.put_tasks.remove(key)

    async def async_load_bytes_from_disk(
        self, path: str, shape: torch.Size, dtype: torch.dtype
    ) -> Optional[MemoryObj]:
        """
        Load the bytes from the disk and convert to memory object.
        """
        memory_obj = self.local_cpu_backend.allocate(shape, dtype)
        if memory_obj is None:
            return None

        async with aiofiles.open(path, "rb") as f:
            await f.readinto(memory_obj.byte_array)

        return memory_obj

    def load_bytes_from_disk(
        self, path: str, shape: torch.Size, dtype: torch.dtype
    ) -> Optional[MemoryObj]:
        """
        Load the bytes from the disk and convert to memory object.
        """
        memory_obj = self.local_cpu_backend.allocate(shape, dtype)
        if memory_obj is None:
            return None

        with open(path, "rb") as f:
            f.readinto(memory_obj.byte_array)

        return memory_obj

    def contains(self, key: CacheEngineKey, pin: bool = False) -> bool:
        """
        Check if the storage backend contains the data for the given key.
        """
        with self.lock:
            if key not in self.hot_cache:
                return False
            if pin:
                self.hot_cache[key].pin()
            return True

    def submit_put_task(self, key: CacheEngineKey, obj: MemoryObj) -> Optional[Future]:
        """
        Submit a put task to the storage backend.
        """
        put_status, keys_to_evict = self.evictor.update_on_put(
            self.hot_cache, obj.get_size()
        )
        if put_status == PutStatus.ILLEGAL:
            return None

        for evict_key in keys_to_evict:
            self.remove(evict_key)

        obj.ref_count_up()

        self.lock.acquire()
        self.put_tasks.append(key)
        self.lock.release()

        future = asyncio.run_coroutine_threadsafe(
            self.async_save_bytes_to_disk(key, obj),
            self.loop,
        )
        return future

    def exists_in_put_tasks(self, key: CacheEngineKey) -> bool:
        """
        Check if the data for the given key is in the put tasks.
        """
        with self.lock:
            return key in self.put_tasks

    def submit_prefetch_task(self, key: CacheEngineKey) -> Optional[Future]:
        """
        Submit a prefetch task to the storage backend.
        """
        with self.lock:
            if key not in self.hot_cache:
                return None

            self.evictor.update_on_hit(key, self.hot_cache)
            obj_metadata = self.hot_cache[key]

        future = asyncio.run_coroutine_threadsafe(
            self.async_load_bytes_from_disk(
                obj_metadata.path, obj_metadata.shape, obj_metadata.dtype
            ),
            self.loop,
        )
        return future

    def get_blocking(self, key: CacheEngineKey) -> Optional[MemoryObj]:
        """
        Blocking get the data for the given key from the storage backend.
        """
        with self.lock:
            if key not in self.hot_cache:
                return None

            self.evictor.update_on_hit(key, self.hot_cache)
            obj_metadata = self.hot_cache[key]

        obj = self.load_bytes_from_disk(
            obj_metadata.path, obj_metadata.shape, obj_metadata.dtype
        )
        return obj

    def get_non_blocking(self, key: CacheEngineKey) -> Optional[Future]:
        """
        Non-blocking get the data for the given key from the storage backend.
        """
        return self.submit_prefetch_task(key)

    def pin(self, key: CacheEngineKey) -> bool:
        """
        Pin the data for the given key in the storage backend.
        """
        with self.lock:
            if key not in self.hot_cache:
                return False
            self.hot_cache[key].pin()
            return True

    def unpin(self, key: CacheEngineKey) -> bool:
        """
        Unpin the data for the given key in the storage backend.
        """
        with self.lock:
            if key not in self.hot_cache:
                return False
            self.hot_cache[key].unpin()
            return True

    def remove(self, key: CacheEngineKey) -> bool:
        """
        Remove the data for the given key from the storage backend.
        """
        with self.lock:
            if key not in self.hot_cache:
                return False
            path = self.hot_cache[key].path
            self.hot_cache.pop(key)

            size = os.path.getsize(path)
            self.usage -= size

        os.remove(path)
        return True

    def close(self) -> None:
        """
        Close the storage backend.
        """
        pass
