import os
import sys

import fire
import torch
import torch.distributed
from zkl_aiutils_datasets import load_dataset
from zkl_promptui import input_text

project_dir_path = os.path.join(os.path.dirname(__file__), "../..")
sys.path.append(project_dir_path)

from llmpt.model import GPTTrainingSimple, GPTTrainingResumeArgs
from scripts.config import default_dataset_path


def main(*,
    training_dir_path: str | None = None,
    checkpoint_tokens_n: int | None = None,
    dataset_path: str = default_dataset_path,
):
    # training dir
    if training_dir_path is None:
        training_dir_path = input_text("Please specify training_dir_path:")
        training_dir_path = os.path.abspath(training_dir_path)

    # print useful information
    print("Resuming training:", file=sys.stderr)
    print("training_dir_path=" + training_dir_path, file=sys.stderr)
    print(file=sys.stderr)

    # device configurations
    device = 'cuda' if torch.cuda.is_available() else 'cpu'
    compile = True if torch.cuda.is_available() else False
    torch.set_float32_matmul_precision('medium')

    # perform training
    GPTTrainingSimple(GPTTrainingResumeArgs(
        training_dir_path=training_dir_path,
        checkpoint_tokens_n=checkpoint_tokens_n,
        train_dataset_factory=lambda: load_dataset(os.path.join(dataset_path, 'train')),
        valid_dataset_factory=lambda: load_dataset(os.path.join(dataset_path, 'valid')),
        device=device,
        compile=compile,
    )).run()


if __name__ == '__main__':
    fire.Fire(main)
