import os
from dataclasses import dataclass
from functools import cached_property
from typing import Callable, TypeVar

import torch
from zkl_aiutils_training import Dock, DockPlugin, Resumable
from zkl_pyutils_fsspec import resolve_local_path

from zkl_ptutils_training.plugins import FsPauseArgs, FsResumeArgs, FsResumeFromCheckpointArgs, \
    LearningRateSchedulingTarget
from zkl_ptutils_training.tasks import MLTrainingEngine
from zkl_ptutils_training.utils.attributes import get_entry_or_attribute
from .compiling import Compiling, resolve_compiling

AnyInput = TypeVar('AnyInput')
AnyOutput = TypeVar('AnyOutput')
AnyCreateArgs = TypeVar('AnyCreateArgs')
ModelFactory = Callable[[], torch.nn.Module]


@dataclass(kw_only=True)
class SimpleTrainingEngineConfig:
    compiling: Compiling = False


@dataclass(kw_only=True)
class SimpleTrainingEngineHparams:
    learning_rate: float = 1e-3
    weight_decay: float = 1e-2
    gradient_clip_norm: float | None = 1.0
    accumulating_steps_n: int = 1


class SimpleTrainingEngine(
    MLTrainingEngine[AnyInput, AnyOutput],
    LearningRateSchedulingTarget,
    DockPlugin, Resumable,
):
    def __init__(self, *,
        model_factory: ModelFactory,
        config: SimpleTrainingEngineConfig | None = None,
        hparams: SimpleTrainingEngineHparams | None = None,
    ):
        self._model_factory = model_factory
        self._config = SimpleTrainingEngineConfig() if config is None else config
        self._hparams = SimpleTrainingEngineHparams() if hparams is None else hparams

    def on_installed(self, dock: Dock):
        super().on_installed(dock)
        dock.install(self._model_factory)

    # resume & pause

    model: torch.nn.Module
    optimizer: torch.optim.Optimizer
    accumulated_steps_n: int

    def on_resume(self, args: FsResumeArgs):
        self.dock.ensure_after_plugins((self._model_factory,))

        self.model = self._model_factory()
        self.optimizer = torch.optim.AdamW(
            self.model.parameters(),
            lr=self._hparams.learning_rate,
            weight_decay=self._hparams.weight_decay)
        self.accumulated_steps_n = 0

        if isinstance(args, FsResumeFromCheckpointArgs):
            checkpoint_dir_path = resolve_local_path(args.checkpoint_fs)
            self.model.load_state_dict(torch.load(os.path.join(checkpoint_dir_path, "model.pt")))
            self.optimizer.load_state_dict(torch.load(os.path.join(checkpoint_dir_path, "optimizer.pt")))

    def on_pause(self, args: FsPauseArgs):
        checkpoint_dir_path = resolve_local_path(args.checkpoint_fs)
        torch.save(self.model.state_dict(), os.path.join(checkpoint_dir_path, "model.pt"))
        torch.save(self.optimizer.state_dict(), os.path.join(checkpoint_dir_path, "optimizer.pt"))

    # engine

    @property
    def learning_rate(self) -> float:
        return self.optimizer.param_groups[0]['lr']

    @learning_rate.setter
    def learning_rate(self, value: float):
        self.optimizer.param_groups[0]['lr'] = value

    @cached_property
    def forward_compiled(self) -> Callable[[AnyInput], AnyOutput]:
        return resolve_compiling(self._config.compiling)(self.model.forward)

    def forward(self, input: AnyInput) -> AnyOutput:
        return self.forward_compiled(input)

    def backward(self, output: AnyOutput):
        loss = get_entry_or_attribute(output, 'loss')
        loss.backward()
        self.accumulated_steps_n += 1

    def optimize(self):
        if not self.accumulated_steps_n >= self._hparams.accumulating_steps_n:
            return

        if self.accumulated_steps_n > 1:
            for param in self.model.parameters():
                if param.grad is not None:
                    param.grad /= self.accumulated_steps_n

        if self._hparams.gradient_clip_norm is not None:
            torch.nn.utils.clip_grad_norm_(
                self.model.parameters(),
                self._hparams.gradient_clip_norm)

        self.optimizer.step()
        self.optimizer.zero_grad()
        self.accumulated_steps_n = 0
