import json
import os
import subprocess
import sys

import deepspeed
import fire
import torch
import torch.distributed
from zkl_datasets import load_dataset
from zkl_serialization import dump_json_value, load_and_parse_json

root_dir_path = os.path.join(os.path.dirname(__file__), "../..")
sys.path.append(root_dir_path)

from llmpt.model import GPTTraining, GPTTrainingDeepspeed
from llmpt.preprocess import MultiSplitsTokenizedDataset
from scripts.config import default_dataset_path, default_hyperparams_file_path, trainings_dir_path
from scripts.training.utils import make_training_name


def main(*,
    trainings_dir_path: str = trainings_dir_path,
    training_dir_path: str | None = None,
    hyperparams_file_path: str = default_hyperparams_file_path,
    dataset_path: str = default_dataset_path,
    group_size: int | None = None,
    local_rank: int | None = None,
):
    if local_rank is not None:
        return main_distributed(
            training_dir_path=training_dir_path,
            hyperparams_file_path=hyperparams_file_path,
            dataset_path=dataset_path)

    # process group size
    if group_size is None:
        group_size = torch.cuda.device_count()

    # load hyperparams
    hyperparams = load_and_parse_json(hyperparams_file_path, GPTTraining.Hyperparams)

    # training dir
    if training_dir_path is None:
        training_dir_path = os.path.join(trainings_dir_path, make_training_name(hyperparams))
        training_dir_path = os.path.abspath(training_dir_path)

    # print useful information
    print("Starting training:", file=sys.stderr)
    print("group_size=" + str(group_size), file=sys.stderr)
    print("training_dir_path=" + training_dir_path, file=sys.stderr)
    print("hyperparams=" + json.dumps(dump_json_value(hyperparams), indent=4), file=sys.stderr)
    print(file=sys.stderr)

    # perform training
    cmd = [
        f"deepspeed",
        f"--num_gpus", str(group_size),
        __file__,
        "--training_dir_path", training_dir_path,
        "--hyperparams_file_path", hyperparams_file_path,
        "--dataset_path", dataset_path]
    print("Executing:", file=sys.stderr)
    print(" ".join(cmd), file=sys.stderr)
    subprocess.check_call(cmd)


def main_distributed(*,
    training_dir_path: str,
    hyperparams_file_path: str,
    dataset_path: str,
):
    # init distributed
    deepspeed.init_distributed()
    process_rank = torch.distributed.get_rank()
    process_group_size = torch.distributed.get_world_size()
    print(f"process[{process_rank}/{process_group_size}] started!", file=sys.stderr)

    # load hyperparams
    hyperparams = load_and_parse_json(hyperparams_file_path, GPTTraining.Hyperparams)

    # load dataset
    dataset = load_dataset(dataset_path)
    assert isinstance(dataset, MultiSplitsTokenizedDataset)

    # setup configurations
    device = torch.device("cuda", process_rank)
    torch.set_float32_matmul_precision('medium')

    # perform training
    GPTTrainingDeepspeed(
        path=training_dir_path,
        hyperparams=hyperparams,
        train_dataset=dataset.children['train'],
        valid_dataset=dataset.children['valid'],
        device=device,
    ).run()


if __name__ == '__main__':
    fire.Fire(main)
