import json
import os
import sys

import fire
import torch
import torch.distributed
from zkl_datasets import load_dataset
from zkl_serialization import dump_json_value, load_and_parse_json

root_dir_path = os.path.join(os.path.dirname(__file__), "../..")
sys.path.append(root_dir_path)

from llmpt.model import GPTTrainingSimple
from llmpt.preprocess import MultiSplitsTokenizedDataset
from scripts.config import default_dataset_path, default_hyperparams_file_path, trainings_dir_path
from scripts.training.utils import make_training_name


def main(*,
    trainings_dir_path: str = trainings_dir_path,
    training_dir_path: str | None = None,
    hyperparams_file_path: str = default_hyperparams_file_path,
    dataset_path: str = default_dataset_path,
):
    # load hyperparams
    hyperparams = load_and_parse_json(hyperparams_file_path, GPTTrainingSimple.Hyperparams)

    # training dir
    if training_dir_path is None:
        training_dir_path = os.path.join(trainings_dir_path, make_training_name(hyperparams))
        training_dir_path = os.path.abspath(training_dir_path)

    # print useful information
    print("Starting training:", file=sys.stderr)
    print("training_dir_path=" + training_dir_path, file=sys.stderr)
    print("hyperparams=" + json.dumps(dump_json_value(hyperparams), indent=4), file=sys.stderr)
    print(file=sys.stderr)

    # setup configurations
    device = 'cuda' if torch.cuda.is_available() else 'cpu'
    compile = True if torch.cuda.is_available() else False
    torch.set_float32_matmul_precision('medium')

    # load dataset
    dataset = load_dataset(dataset_path)
    assert isinstance(dataset, MultiSplitsTokenizedDataset)

    # perform training
    GPTTrainingSimple(
        path=training_dir_path,
        hyperparams=hyperparams,
        train_dataset=dataset.children['train'],
        valid_dataset=dataset.children['valid'],
        device=device,
        compile=compile,
    ).run()


if __name__ == '__main__':
    fire.Fire(main)
