import torch.distributed

from zkl_ptutils_training.utils.distributed import torch_distributed_is_rank0
from .action_scheduling import ActionScheduling


class TqdmProgressBar(ActionScheduling):
    def __init__(self, *,
        progress_metric_name: str = 'step',
        action_interval: int | float | None = 1,
        group: torch.distributed.ProcessGroup | None = None,
        **kwargs,
    ):
        super().__init__(
            progress_metric_name=progress_metric_name,
            action_interval=action_interval)
        self.rank0 = torch_distributed_is_rank0(group)
        self.kwargs = kwargs

    def _prepare(self, progress: int | float):
        if self.rank0:
            metrics_scalar_value = self._get_metrics_scalar_value()
            from tqdm import tqdm
            self.tqdm = tqdm(initial=progress, **self.kwargs)
            self.tqdm.set_postfix(metrics_scalar_value)

    def _action(self, progress: int | float):
        if self.rank0:
            self.tqdm.update(progress - self.tqdm.n)
            self.tqdm.set_postfix(self._get_metrics_scalar_value())

    def _release(self):
        if self.rank0:
            progress = self._get_progress()
            metrics_scalar_value = self._get_metrics_scalar_value()
            self.tqdm.update(progress - self.tqdm.n)
            self.tqdm.set_postfix(metrics_scalar_value)
            self.tqdm.close()
            self.tqdm = None
