import abc
from collections.abc import Iterable
from copy import deepcopy
from dataclasses import dataclass
from functools import partial
from typing import Callable

import torch
from zkl_aiutils_training import Dock
from zkl_ptutils_training import CheckpointScheduling, DeepSpeedTrainingEngine, EarlyStopByMaxThreshold, \
    FsResumableController, LearningRateScheduling, LearningRateSchedulingHparams as LearningRateSchedulingHparams_, \
    MLProcessingTask, MetricsRecording, SimpleTrainingEngine, SimpleTrainingEngineHparams, StandardTrainer, \
    StdOutMetricsLogger, SummaryScheduling, TensorBoardMetricsLogger, TqdmProgressBar, ZklDatasetProducer, \
    make_keep_last_few_rule
from zkl_ptutils_training.engines.deepspeed import DeepSpeedTrainingEngineConfig
from zkl_ptutils_training.engines.simple import SimpleTrainingEngineConfig

from scripts.datasets.loading import DatasetHparams, IteratorHparams, load_llmpt_dataset_from_hparams
from scripts.training.validating import BpbValidationProvider, BpbValidationProviderHparams
from scripts.utils.distributed import torch_distributed_get_info


@dataclass(kw_only=True)
class CommonTrainingModelHparams(abc.ABC):
    embs_n: int


@dataclass(kw_only=True)
class LearningRateSchedulingHparams:
    learning_rate: float | None = None
    warmup_tokens_n: float | int | None = None
    decay_tokens_n: float | int | None = None
    decay_scale: float | None = None


@dataclass(kw_only=True)
class CommonTrainingHparams(abc.ABC):
    name: str | Iterable[str] | None = None
    dataset: DatasetHparams
    model: CommonTrainingModelHparams
    iterator: IteratorHparams
    engine: SimpleTrainingEngineHparams
    learning_rate_scheduling: LearningRateSchedulingHparams | None = None
    train_stopping_tokens_n: int | None = None
    valid_stopping_tokens_n: int | None = 0
    summary_interval_tokens_n: int | None = None
    checkpoint_interval_tokens_n: int | None = None


def assemble_training_task(*,
    hparams: CommonTrainingHparams,
    model_factory: Callable,
) -> Dock:
    process_rank, processes_n = torch_distributed_get_info()

    if torch.cuda.is_available():
        torch.set_float32_matmul_precision('medium')
        device = torch.device(f"cuda:{process_rank}")
        dtype = torch.bfloat16
        compiling = True
    else:
        device = torch.device("cpu")
        dtype = torch.float32
        compiling = False

    dock = Dock()

    hparams.dataset.text_dataset_split_name = "train"
    dataset = load_llmpt_dataset_from_hparams(
        dataset_hparams=hparams.dataset,
        iterator_hparams=hparams.iterator,
        device=device)

    hparams.model.embs_n = dataset.tokenizer.vocab_tokens_n
    model_factory = partial(model_factory,
        hparams=hparams.model, dtype=dtype, device=device)

    if processes_n == 1:
        engine = SimpleTrainingEngine(
            model_factory=model_factory,
            config=SimpleTrainingEngineConfig(
                compiling=compiling),
            hparams=hparams.engine)
    else:
        # noinspection PyTypeChecker
        engine = DeepSpeedTrainingEngine(
            model_factory=model_factory,
            config=DeepSpeedTrainingEngineConfig(
                compiling=compiling,
                zero_configurations={
                    "stage": 2,
                    "overlap_comm": True,
                    "offload_optimizer": {
                        "device": "cpu",
                        "pin_memory": True,
                    }
                }),
            hparams=hparams.engine)

    dock.install(MLProcessingTask(
        producer=ZklDatasetProducer(dataset),
        processor=StandardTrainer(engine=engine)))

    dock.install(FsResumableController())

    dock.install(MetricsRecording())

    if hparams.learning_rate_scheduling is not None:
        dock.install(LearningRateScheduling(
            progress_metric_name='tokens',
            hparams=LearningRateSchedulingHparams_(
                learning_rate=hparams.engine.learning_rate,
                warmup_duration=hparams.learning_rate_scheduling.warmup_tokens_n,
                decay_halflife=hparams.learning_rate_scheduling.decay_tokens_n,
                decay_scale=hparams.learning_rate_scheduling.decay_scale,
            )))

    dock.install(EarlyStopByMaxThreshold(
        metric_name='tokens',
        threshold=hparams.train_stopping_tokens_n))

    dock.install(TqdmProgressBar(
        progress_metric_name='tokens',
        desc="Training", unit='tokens', unit_scale=True))

    dock.install(SummaryScheduling(
        progress_metric_name='tokens',
        action_interval=hparams.summary_interval_tokens_n))

    valid_dataset_hparams = deepcopy(hparams.dataset)
    valid_dataset_hparams.text_dataset_split_name = "valid"
    dock.install(BpbValidationProvider(
        hparams=BpbValidationProviderHparams(
            dataset=valid_dataset_hparams,
            iterator=hparams.iterator,
            stopping_tokens_n=hparams.valid_stopping_tokens_n),
        device=device))

    dock.install(StdOutMetricsLogger())
    dock.install(TensorBoardMetricsLogger(
        step_metric_name='tokens'))

    dock.install(CheckpointScheduling(
        progress_metric_name='tokens',
        action_interval=hparams.checkpoint_interval_tokens_n,
        keep_rules=make_keep_last_few_rule(4)))

    return dock
