import os
import sys
from typing import Callable, TYPE_CHECKING, Union

import torch
from torch.utils.tensorboard import SummaryWriter
from zkl_metrics import Metric
from zkl_training import ProcessingTaskPlugin

from .plugin_metrics import MetricsPlugin

if TYPE_CHECKING:
    from .training import GPTTraining
    from .validating import GPTValidating


class SummaryPlugin(ProcessingTaskPlugin):
    def __init__(self, validating_func: Callable[['GPTTraining'], dict[str, float]] | None = None):
        super().__init__()

        try:
            self.process_rank = torch.distributed.get_rank()
        except ValueError:
            self.process_rank = 0

        self.validating_func = validating_func

        self.last_summary_tokens_n: int | None = None

        self.summary_writer: SummaryWriter | None = None

    @property
    def task(self) -> Union['GPTTraining', 'GPTValidating']:
        task = super().task
        from .training import GPTTraining
        from .validating import GPTValidating
        assert isinstance(task, (GPTTraining, GPTValidating))
        return task

    @property
    def progress_tokens_n(self) -> int:
        return self.task.progress_tokens_n

    @property
    def metrics(self) -> dict[str, Metric]:
        return {
            attr_name[:-len('_metric')]: attr_value
            for attr_name, attr_value in vars(self).items()
            if attr_name.endswith('_metric') and isinstance(attr_value, Metric)}

    # task

    def on_before_run(self):
        self.last_summary_tokens_n = self.progress_tokens_n

        if self.process_rank == 0:
            self.summary_writer = SummaryWriter(log_dir=os.path.join(self.task.path, "summaries"))

    def on_after_step(self):
        if self.check_need_summarize():
            self.summarize()
            self.last_summary_tokens_n = self.progress_tokens_n

    # summarizing

    def check_need_summarize(self):
        summary_tokens_n = self.task.hyperparams.summary_tokens_n
        if summary_tokens_n is None:
            return False

        if summary_tokens_n <= 0:
            return True

        warmup_tokens_n = self.task.hyperparams.warmup_tokens_n
        if warmup_tokens_n is not None:
            if self.last_summary_tokens_n < warmup_tokens_n:
                summary_tokens_n //= 10

        next_summary_tokens_n = ((self.last_summary_tokens_n // summary_tokens_n) + 1) * summary_tokens_n
        return self.progress_tokens_n >= next_summary_tokens_n

    def summarize(self):
        metrics_value = self.collect_metrics()
        if self.process_rank == 0:
            self.print_metrics(metrics_value)
            self.write_metrics(metrics_value)

    # collect

    def collect_metrics(self) -> dict[str, float]:
        metrics_value = {}

        for metric_name, metric_value in self.collect_metrics_value_from_training().items():
            metrics_value['train/' + metric_name] = metric_value

        for metric_name, metric_value in self.collect_metrics_value_from_validating().items():
            metrics_value['valid/' + metric_name] = metric_value

        return metrics_value

    def collect_metrics_value_from_training(self) -> dict[str, float]:
        metrics_value = {}
        for plugin in self.task.plugins:
            if isinstance(plugin, MetricsPlugin):
                metrics_value.update(plugin.collect_metrics_value())
        return metrics_value

    def collect_metrics_value_from_validating(self) -> dict[str, float]:
        return self.validating_func(self.task) \
            if self.validating_func is not None else {}

    # print & write

    def print_metrics(self, metrics_value: dict[str, float]):
        print(f"\nSummary (progress_tokens_n={self.task.progress_tokens_n})", file=sys.stderr)
        for metric_name, metric_value in metrics_value.items():
            print(f"\t{metric_name}: {metric_value}", file=sys.stderr)

    def write_metrics(self, metrics_value: dict[str, float]):
        if self.summary_writer is not None:
            for metric_name, metric_value in metrics_value.items():
                self.summary_writer.add_scalar(metric_name, metric_value, self.progress_tokens_n)
