import json
import os
import subprocess
import sys
import warnings
from collections.abc import Iterable
from datetime import datetime
from functools import partial
from typing import Callable

import fire
import torch
from zkl_ptutils_training import FsResumableController, FsResumeFromCheckpointArgs, FsResumeFromScratchArgs, \
    MLProcessingTask
from zkl_ptutils_training.utils.fs import resolve_fs
from zkl_pyutils_serialization import dump_json_value, load_and_parse_json

from scripts.training.training import CommonTrainingHparams, assemble_training_task
from scripts.utils.distributed import torch_distributed_get_info


def make_training_name(
    name: str | None = None,
    hparams: CommonTrainingHparams | None = None,
) -> str:
    parts = ["training", datetime.now().strftime("%Y%m%d-%H%M%S-%f")]
    if name is not None:
        parts.append(name)
    if hparams is not None and hparams.name is not None:
        if isinstance(hparams.name, str):
            parts.append(hparams.name)
        elif isinstance(hparams.name, Iterable):
            parts.extend(hparams.name)
        else:
            raise TypeError(f"Unexpected hparams.name {hparams.name}")
    return "_".join(parts)


def common_train_cli(*,
    hparams_cls: type,
    model_factory: Callable,

    resume: bool = False,
    training_dir_path: str,
    resume_checkpoint_key: str | None = None,
):
    # Initializing distributed

    if torch.distributed.is_torchelastic_launched() and not torch.distributed.is_initialized():
        torch.distributed.init_process_group(backend='nccl')
        try:
            return common_train_cli(
                hparams_cls=hparams_cls,
                model_factory=model_factory,
                resume=resume,
                training_dir_path=training_dir_path,
                resume_checkpoint_key=resume_checkpoint_key)
        finally:
            torch.distributed.destroy_process_group()

    process_rank, processes_n = torch_distributed_get_info()
    print(f"Process launched [{process_rank}/{processes_n}]")

    # Assembling training task

    hparams_file_path = os.path.join(training_dir_path, "hparams.json")
    hparams = load_and_parse_json(hparams_file_path, hparams_cls)

    dock = assemble_training_task(hparams=hparams, model_factory=model_factory)

    # Resuming training task

    if not resume:
        resume_args = FsResumeFromScratchArgs(
            fs=resolve_fs(training_dir_path))
    else:
        resume_checkpoint_key = str(resume_checkpoint_key) \
            if resume_checkpoint_key is not None else None
        resume_args = FsResumeFromCheckpointArgs(
            fs=resolve_fs(training_dir_path),
            checkpoint_key=resume_checkpoint_key)
    controller = dock.get_plugin(FsResumableController)
    controller.resume(resume_args)

    # Launching training task

    task = dock.get_plugin(MLProcessingTask)
    task.run()


def common_launch_cli(*,
    hparams_cls: type,
    launched_flag_env_name: str,

    script_file_path: str,
    processes_n: int | None = None,

    name: str | None = None,
    hparams_file_path: str | None = None,
    trainings_dir_path: str | None = None,

    resume: bool = False,
    training_dir_path: str | None = None,
    resume_checkpoint_key: str | None = None,
):
    print("\nLoading hparams...")

    if not resume:
        if hparams_file_path is None:
            raise ValueError("hparams_file_path is required!")
        hparams = load_and_parse_json(hparams_file_path, hparams_cls)
    else:
        if hparams_file_path is not None:
            warnings.warn(f"hparams_file_path is ignored when resume==True")
        if training_dir_path is None:
            raise ValueError("training_dir_path is required when resume==True")
        hparams_file_path = os.path.join(training_dir_path, "hparams.json")
        hparams = load_and_parse_json(hparams_file_path, hparams_cls)
    print(f"\nhparams={json.dumps(dump_json_value(hparams), indent=2)}")
    print("\n")

    print("\nPreparing training directory...")

    if not resume:
        if training_dir_path is None:
            training_name = make_training_name(name, hparams)
            training_dir_path = os.path.join(trainings_dir_path, training_name)
            print("training_dir_path =", training_dir_path)

        os.makedirs(training_dir_path, exist_ok=True)
        with open(os.path.join(training_dir_path, "hparams.json"), "wt") as fp:
            json.dump(dump_json_value(hparams), fp, indent=2)

    print("\nLaunching training process...")

    if processes_n is None:
        processes_n = max(torch.cuda.device_count(), 1)

    if processes_n == 1:
        cmd = [sys.executable]
    else:
        cmd = [
            "torchrun",
            "--nnodes=1",
            f"--nproc-per-node={processes_n}",
            "--rdzv-backend=c10d",
            "--rdzv-endpoint=localhost:0"]

    cmd.extend([script_file_path])
    cmd.extend(["--training_dir_path", training_dir_path])

    if resume:
        cmd.extend(["--resume"])
        if resume_checkpoint_key is not None:
            cmd.extend(["--resume_checkpoint_key", str(resume_checkpoint_key)])

    print(" ".join(cmd))
    subprocess.check_call(cmd, env={**os.environ, launched_flag_env_name: "1"})

    print("\nDone.")


def common_mixture_cli(*,
    hparams_cls: type,
    model_factory: Callable,

    script_file_path: str,
    hparams_file_path: str | None = None,
    trainings_dir_path: str | None = None,
):
    launched_flag_env_name = "LAUNCHED_FLAG"
    if os.getenv(launched_flag_env_name) is None:
        fire.Fire(partial(
            common_launch_cli,
            hparams_cls=hparams_cls,
            script_file_path=script_file_path,
            hparams_file_path=hparams_file_path,
            trainings_dir_path=trainings_dir_path,
            launched_flag_env_name=launched_flag_env_name))
    else:
        fire.Fire(partial(
            common_train_cli,
            hparams_cls=hparams_cls,
            model_factory=model_factory))
