import torch

from zkl_ptutils_training.utils.distributed import torch_distributed_is_rank0
from .action_scheduling import ActionScheduling
from .metrics import MetricOrValue, MetricsCollector
from .metrics_logger import MetricsLogger
from .validation import ValidationProvider


class SummaryScheduling(ActionScheduling, MetricsCollector):
    def __init__(self, *,
        progress_metric_name: str = 'step',
        action_interval: int | float | None,
        group: torch.distributed.ProcessGroup | None = None
    ):
        super().__init__(
            progress_metric_name=progress_metric_name,
            action_interval=action_interval)
        self.rank0 = torch_distributed_is_rank0(group)

    # action

    def _action(self, progress: int | float):
        metrics = self.collect_metrics()
        self.log_metrics(metrics)
        self.reset_metrics()

    # collect

    def collect_metrics(self) -> dict[str, MetricOrValue]:
        metrics = {}
        for metric_name, metric_value in self.collect_metrics_from_training().items():
            metrics['train/' + metric_name] = metric_value
        for metric_name, metric_value in self.collect_metrics_from_validating().items():
            metrics['valid/' + metric_name] = metric_value
        return metrics

    def collect_metrics_from_training(self) -> dict[str, MetricOrValue]:
        return self._get_metrics()

    def collect_metrics_from_validating(self) -> dict[str, MetricOrValue]:
        return self.dock.scan_agg_plugins(
            self.dock.iter_plugins(ValidationProvider, after=True), {},
            lambda plugin, metrics: {**metrics, **plugin.validate()})

    # logging

    def log_metrics(self, metrics: dict[str, MetricOrValue]):
        if self.rank0:
            self.dock.scan_call_plugins(
                self.dock.iter_plugins(MetricsLogger, after=True),
                lambda plugin: plugin.log(metrics))

    # resetting

    def reset_metrics(self):
        for metric in self._get_metrics('epoch').values():
            metric.reset()
