from dataclasses import dataclass
from typing import Any, NamedTuple, TYPE_CHECKING, TypeVar

from zkl_aiutils_training import Dock, DockPlugin, Resumable
from zkl_pyutils_fsspec import resolve_local_path

from zkl_ptutils_training.plugins import FsPauseArgs, FsResumeArgs, FsResumeFromCheckpointArgs, \
    LearningRateSchedulingTarget
from zkl_ptutils_training.tasks import MLTrainingEngine
from zkl_ptutils_training.utils.attributes import get_entry_or_attribute
from .compiling import resolve_compiling
from .simple import ModelFactory, SimpleTrainingEngineConfig, SimpleTrainingEngineHparams

if TYPE_CHECKING:
    from deepspeed import DeepSpeedEngine

AnyInput = TypeVar('AnyInput')
AnyOutput = TypeVar('AnyOutput')
AnyCreateArgs = TypeVar('AnyCreateArgs')


class DeepSpeedArgs(NamedTuple):
    deepspeed_config: dict


@dataclass(kw_only=True)
class DeepSpeedTrainingEngineConfig(SimpleTrainingEngineConfig):
    zero_configurations: dict[str, Any] = None


@dataclass(kw_only=True)
class DeepSpeedTrainingEngineHparams(SimpleTrainingEngineHparams):
    pass


class DeepSpeedTrainingEngine(
    MLTrainingEngine[AnyInput, AnyOutput],
    LearningRateSchedulingTarget,
    DockPlugin, Resumable,
):
    def __init__(self, *,
        model_factory: ModelFactory,
        config: DeepSpeedTrainingEngineConfig | None = None,
        hparams: DeepSpeedTrainingEngineHparams | None = None,
    ):
        self._model_factory = model_factory
        self._config = DeepSpeedTrainingEngineConfig() if config is None else config
        self._hparams = DeepSpeedTrainingEngineHparams() if hparams is None else hparams

    def on_installed(self, dock: Dock):
        super().on_installed(dock)
        dock.install(self._model_factory)

    # resume & pause

    engine: 'DeepSpeedEngine'

    def on_resume(self, args: FsResumeArgs):
        self.dock.ensure_after_plugins((self._model_factory,))

        model = self._model_factory()
        model = resolve_compiling(self._config.compiling)(model)

        import deepspeed
        deepspeed.init_distributed()
        deepseek_args = self.make_deepseek_args(self._config, self._hparams)
        self.engine, *_ = deepspeed.initialize(deepseek_args, model)

        if isinstance(args, FsResumeFromCheckpointArgs):
            checkpoints_dir_path = resolve_local_path(args.checkpoints_fs)
            self.engine.load_checkpoint(checkpoints_dir_path, args.checkpoint_key)

    def on_pause(self, args: FsPauseArgs):
        checkpoints_dir_path = resolve_local_path(args.checkpoints_fs)
        self.engine.save_checkpoint(checkpoints_dir_path, args.checkpoint_key)

    @staticmethod
    def make_deepseek_args(
        config: DeepSpeedTrainingEngineConfig,
        hparams: DeepSpeedTrainingEngineHparams,
    ) -> DeepSpeedArgs:
        return DeepSpeedArgs({
            "optimizer": {
                "type": "AdamW",
                "params": {
                    "lr": hparams.learning_rate,
                    "weight_decay": hparams.weight_decay
                },
            },
            "zero_optimization": {
                "stage": 2,
                **(config.zero_configurations or {})
            },
            "train_micro_batch_size_per_gpu": 1,  # fake
            "gradient_accumulation_steps": hparams.accumulating_steps_n,
            "gradient_clipping": hparams.gradient_clip_norm,
        })

    # engine

    @property
    def learning_rate(self) -> float:
        return self.engine.optimizer.param_groups[0]['lr']

    @learning_rate.setter
    def learning_rate(self, value: float):
        self.engine.optimizer.param_groups[0]['lr'] = value

    def forward(self, input: AnyInput) -> AnyOutput:
        return self.engine.forward(input)

    def backward(self, output: AnyOutput):
        loss = get_entry_or_attribute(output, 'loss')
        self.engine.backward(loss)

    def optimize(self):
        self.engine.step()
        self.engine.zero_grad()
