import os
from dataclasses import dataclass
from typing import TypeVar

import torch
from torch.distributed.optim import ZeroRedundancyOptimizer
from zkl_aiutils_training import Dock, DockPlugin, Resumable
from zkl_pyutils_fsspec import resolve_local_path

from zkl_ptutils_training.plugins import FsPauseArgs, FsResumeArgs, FsResumeFromCheckpointArgs, \
    LearningRateSchedulingTarget
from zkl_ptutils_training.tasks import MLTrainingEngine
from zkl_ptutils_training.utils.attributes import get_entry_or_attribute
from .compiling import resolve_compiling
from .simple import ModelFactory, SimpleTrainingEngineConfig, SimpleTrainingEngineHparams

AnyInput = TypeVar('AnyInput')
AnyOutput = TypeVar('AnyOutput')
AnyCreateArgs = TypeVar('AnyCreateArgs')


@dataclass(kw_only=True)
class ZeroTrainingEngineConfig(SimpleTrainingEngineConfig):
    process_group: torch.distributed.ProcessGroup | None = None


@dataclass(kw_only=True)
class ZeroTrainingEngineHparams(SimpleTrainingEngineHparams):
    pass


class ZeroTrainingEngine(
    MLTrainingEngine[AnyInput, AnyOutput],
    LearningRateSchedulingTarget,
    DockPlugin, Resumable,
):
    def __init__(self, *,
        model_factory: ModelFactory,
        config: ZeroTrainingEngineConfig | None = None,
        hparams: ZeroTrainingEngineHparams | None = None,
    ):
        self._model_factory = model_factory
        self._config = ZeroTrainingEngineConfig() if config is None else config
        self._hparams = ZeroTrainingEngineHparams() if hparams is None else hparams

    def on_installed(self, dock: Dock):
        super().on_installed(dock)
        dock.install(self._model_factory)

    # resume & pause

    model: torch.nn.Module
    ddp: torch.nn.parallel.DistributedDataParallel
    optimizer: torch.distributed.optim.ZeroRedundancyOptimizer
    accumulated_steps_n: int

    def on_resume(self, args: FsResumeArgs):
        self.dock.ensure_after_plugins((self._model_factory,))

        self.model = self._model_factory()
        compiled_model = resolve_compiling(self._config.compiling)(self.model)

        if self._config.process_group is None:
            self._config.process_group = torch.distributed.group.WORLD
        self.ddp = torch.nn.parallel.DistributedDataParallel(
            module=compiled_model,
            process_group=self._config.process_group)
        self.optimizer = torch.distributed.optim.ZeroRedundancyOptimizer(
            params=self.ddp.parameters(),
            optimizer_class=torch.optim.AdamW,
            lr=self._hparams.learning_rate,
            weight_decay=self._hparams.weight_decay,
            process_group=self._config.process_group)
        self.accumulated_steps_n = 0

        if isinstance(args, FsResumeFromCheckpointArgs):
            rank = get_rank_str(self._config.process_group)
            checkpoint_dir_path = resolve_local_path(args.checkpoint_fs)
            self.model.load_state_dict(torch.load(os.path.join(checkpoint_dir_path, f"model.rank{rank}.pt")))
            self.optimizer.load_state_dict(torch.load(os.path.join(checkpoint_dir_path, f"optimizer.rank{rank}.pt")))

    def on_pause(self, args: FsPauseArgs):
        self.optimizer.consolidate_state_dict()
        rank = get_rank_str(self._config.process_group)
        checkpoint_dir_path = resolve_local_path(args.checkpoint_fs)
        torch.save(self.model.state_dict(), os.path.join(checkpoint_dir_path, f"model.rank{rank}.pt"))
        torch.save(self.optimizer.state_dict(), os.path.join(checkpoint_dir_path, f"optimizer.rank{rank}.pt"))

    # engine

    @property
    def learning_rate(self) -> float:
        return self.optimizer.param_groups[0]['lr']

    @learning_rate.setter
    def learning_rate(self, value: float):
        self.optimizer.param_groups[0]['lr'] = value

    def forward(self, input: AnyInput) -> AnyOutput:
        return self.ddp.forward(input)

    def backward(self, output: AnyOutput):
        loss = get_entry_or_attribute(output, 'loss')
        if self.accumulated_steps_n + 1 >= self._hparams.accumulating_steps_n:
            loss.backward()
        else:
            with self.ddp.no_sync():
                loss.backward()
        self.accumulated_steps_n += 1

    def optimize(self):
        if not self.accumulated_steps_n >= self._hparams.accumulating_steps_n:
            return

        if self.accumulated_steps_n > 1:
            for param in self.ddp.parameters():
                if param.grad is not None:
                    param.grad /= self.accumulated_steps_n

        if self._hparams.gradient_clip_norm is not None:
            torch.nn.utils.clip_grad_norm_(
                self.ddp.parameters(),
                self._hparams.gradient_clip_norm)

        self.optimizer.step()
        self.optimizer.zero_grad()
        self.accumulated_steps_n = 0


def get_rank_str(process_group: torch.distributed.ProcessGroup) -> str:
    rank = process_group.rank()
    size = process_group.size()
    digits_n = len(str(size - 1))
    return format(rank, f"0{digits_n}d")
