import sys
from collections.abc import Callable, Iterable, Iterator
from typing import TypeVar

import numpy as np

from zkl_ptutils_training.utils.distributed import torch_distributed_is_rank0
from .action_scheduling import ActionScheduling
from .resumable_fs import CheckpointKeyResolver, FsResumableController, FsResumeArgs

AnyInput = TypeVar('AnyInput')
AnyOutput = TypeVar('AnyOutput')
AnyCreateArgs = TypeVar('AnyCreateArgs')
KeepRule = Callable[[np.ndarray], np.ndarray]


class CheckpointScheduling(ActionScheduling, CheckpointKeyResolver):
    def __init__(self, *,
        progress_metric_name: str = 'step',
        action_interval: int | float | None,
        keep_rules: Iterable[KeepRule] | KeepRule | None = None
    ):
        super().__init__(
            progress_metric_name=progress_metric_name,
            action_interval=action_interval)
        keep_rules = keep_rules if keep_rules is not None else make_keep_all_rule()
        keep_rules = list(keep_rules) if isinstance(keep_rules, Iterable) else [keep_rules]
        self.keep_rules = keep_rules

    # controller

    @property
    def _controller(self) -> FsResumableController:
        return self.dock.get_plugin(FsResumableController)

    # checkpoint key

    def get_current_checkpoint_key(self) -> str:
        return str(int(self._get_progress()))

    def get_latest_checkpoint_key(self, args: FsResumeArgs) -> str:
        checkpoints_key = self._iter_checkpoints_key(args)
        checkpoint_key = max(checkpoints_key, key=int, default=None)
        if checkpoint_key is None:
            raise ValueError('No checkpoints found')
        return checkpoint_key

    def _iter_checkpoints_key(self, args: FsResumeArgs | None = None) -> Iterator[str]:
        checkpoints_fs = args.checkpoints_fs \
            if args is not None else self._controller.checkpoints_fs
        if checkpoints_fs is not None:
            for name in checkpoints_fs.listdir("", detail=False):
                if name.isdigit():
                    yield name

    # action

    def _action(self, progress: int | float):
        self._controller.pause()
        if torch_distributed_is_rank0():
            self._clean_checkpoints()

    def _clean_checkpoints(self):
        checkpoints_progress_n = np.asarray(list(map(int, self._iter_checkpoints_key())))
        rules_checkpoints_keep = [rule(checkpoints_progress_n) for rule in self.keep_rules]
        checkpoints_keep = np.any(rules_checkpoints_keep, axis=0)
        for progress, keep in zip(checkpoints_progress_n, checkpoints_keep):
            if not keep:
                checkpoint_key = str(progress)
                checkpoint_fs = self._controller.get_checkpoint_fs(checkpoint_key)
                checkpoint_fs.delete("", recursive=True)
                print(f"Removed checkpoint {checkpoint_key}", file=sys.stderr)


# keep rules

def make_keep_all_rule() -> KeepRule:
    def rule(checkpoints_progress: np.ndarray):
        return np.ones(len(checkpoints_progress), dtype=bool)

    return rule


def make_keep_last_few_rule(n: int) -> KeepRule:
    def rule(checkpoints_progress: np.ndarray):
        sorted_indices = np.argsort(checkpoints_progress)
        keep_indices = sorted_indices[-n:]
        checkpoints_keep = np.zeros(len(checkpoints_progress), dtype=bool)
        checkpoints_keep[keep_indices] = True
        return checkpoints_keep

    return rule


def make_keep_by_interval_rule(interval: int | float) -> KeepRule:
    def rule(checkpoints_progress: np.ndarray):
        sorted_indices = np.argsort(checkpoints_progress)
        progress = interval
        keep_indices = []
        for index in sorted_indices:
            checkpoint_progress = checkpoints_progress[index]
            if checkpoint_progress >= progress:
                keep_indices.append(index)
                while checkpoint_progress >= progress:
                    progress += interval

        checkpoints_keep = np.zeros(len(checkpoints_progress), dtype=bool)
        checkpoints_keep[keep_indices] = True
        return checkpoints_keep

    return rule
