import json
import os
import subprocess
import sys

import deepspeed
import fire
import torch
import torch.distributed
from zkl_aiutils_datasets import load_dataset
from zkl_serialization import dump_json_value, load_and_parse_json

project_dir_path = os.path.join(os.path.dirname(__file__), "../..")
sys.path.append(project_dir_path)

from llmpt.model import GPTLaunchingHparams, GPTTrainingDeepspeed, GPTTrainingDeepspeedCreateArgs, make_training_name, \
    torch_distributed_get_info
from scripts.config import default_dataset_path, default_hparams_file_path, trainings_dir_path


def main(*,
    trainings_dir_path: str = trainings_dir_path,
    training_dir_path: str | None = None,
    hparams_file_path: str = default_hparams_file_path,
    dataset_path: str = default_dataset_path,
    group_size: int | None = None,
    local_rank: int | None = None,
):
    if local_rank is not None:
        return main_distributed(
            training_dir_path=training_dir_path,
            hparams_file_path=hparams_file_path,
            dataset_path=dataset_path)

    # training dir
    hparams = load_and_parse_json(hparams_file_path, GPTLaunchingHparams)
    if training_dir_path is None:
        training_dir_path = os.path.join(trainings_dir_path, make_training_name(hparams))
        training_dir_path = os.path.abspath(training_dir_path)

    # perform training
    if group_size is None:
        group_size = torch.cuda.device_count()
    cmd = [
        f"deepspeed",
        f"--num_gpus", str(group_size),
        __file__,
        "--training_dir_path", training_dir_path,
        "--hparams_file_path", hparams_file_path,
        "--dataset_path", dataset_path]
    print("Executing:", file=sys.stderr)
    print(" ".join(cmd), file=sys.stderr)
    subprocess.check_call(cmd)


def main_distributed(*,
    training_dir_path: str,
    hparams_file_path: str,
    dataset_path: str,
):
    # init distributed
    deepspeed.init_distributed()
    process_rank, processes_num = torch_distributed_get_info()
    print(f"process[{process_rank}/{processes_num}] started!", file=sys.stderr)

    # load training_hparams
    hparams = load_and_parse_json(hparams_file_path, GPTLaunchingHparams)

    # print useful information
    if process_rank == 0:
        print("Starting training:", file=sys.stderr)
        print("training_dir_path=" + training_dir_path, file=sys.stderr)
        print("model_hparams=" + json.dumps(dump_json_value(hparams.model), indent=4), file=sys.stderr)
        print("training_hparams=" + json.dumps(dump_json_value(hparams.training), indent=4), file=sys.stderr)
        print(file=sys.stderr)

    # device configurations
    device = torch.device("cuda", process_rank)
    torch.set_float32_matmul_precision('medium')

    # perform training
    GPTTrainingDeepspeed(GPTTrainingDeepspeedCreateArgs(
        training_dir_path=training_dir_path,
        model_hparams=hparams.model,
        training_hparams=hparams.training,
        train_dataset_factory=lambda: load_dataset(os.path.join(dataset_path, 'train')),
        valid_dataset_factory=lambda: load_dataset(os.path.join(dataset_path, 'valid')),
        device=device,
        compile=True,
    )).run()


if __name__ == '__main__':
    fire.Fire(main)
