import os
import sys

import torch
from zkl_aiutils_training import Dock
from zkl_ptutils_training import FsResumableController, FsResumeFromScratchArgs, MLProcessingTask, MetricsRecording, \
    SimpleTrainingEngineConfig, SimpleTrainingEngineHparams, StandardTrainer, StdOutMetricsLogger, SummaryScheduling, \
    TqdmProgressBar, ZklDatasetProducer, ZklRecordsMetricsLogger
from zkl_pyutils_fsspec import resolve_fs

script_dir_path = os.path.dirname(__file__)
project_dir_path = os.path.join(script_dir_path, "../..")
sys.path.append(project_dir_path)

from scripts.datasets.delay_repeat import make_delay_repeat_dataset_for_training
from scripts.training.engine import NbpttTrainingEngine
from scripts.training.model import TrainingModel
from zkl_llmpt_nbptt.nbptt_causal_language_model import NbpttCausalLanguageModel, NbpttCausalLanguageModelHparams
from zkl_llmpt_nbptt.nbptt_transformer import NbpttTransformerHparams

# %% config

vocab_tokens_n = 8
batch_samples_n = 64
random_seed = 42

learning_rate = 1e-4

summary_interval_tokens_n = 5000

hparams = NbpttCausalLanguageModelHparams(
    embs_n=vocab_tokens_n,
    emb_size=32,
    transformer=NbpttTransformerHparams(
        i_size=32,
        layers_n=1,
        memory_n=1,
        queries_n=4,
        groups_n=4,
        qk_size=16,
        v_size=16,
        m_size=32,
        h_size=32,
        o_size=32))

compiling = True
torch.backends.cuda.matmul.fp32_precision = 'tf32'
dtype = torch.bfloat16 if torch.cuda.is_available() else torch.float32
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
training_dir_fs = resolve_fs(os.path.join(project_dir_path, "trainings/policy"))

# %% training

dock = Dock()

dataset = make_delay_repeat_dataset_for_training(
    vocab_tokens_n=vocab_tokens_n,
    batch_samples_n=batch_samples_n,
    random_seed=random_seed,
    device=device)

def model_factory():
    model = NbpttCausalLanguageModel(hparams=hparams, dtype=dtype, device=device)
    model(torch.ones((batch_samples_n, 1), dtype=torch.int64, device=device))  # dummy call
    return TrainingModel(model=model)

engine = NbpttTrainingEngine(
    model_factory=model_factory,
    config=SimpleTrainingEngineConfig(
        compiling=compiling),
    hparams=SimpleTrainingEngineHparams(
        learning_rate=learning_rate))

dock.install(MLProcessingTask(
    producer=ZklDatasetProducer(dataset),
    processor=StandardTrainer(engine=engine)))

dock.install(FsResumableController())

dock.install(MetricsRecording())

dock.install(SummaryScheduling(
    progress_metric_name='tokens',
    action_interval=summary_interval_tokens_n))

dock.install(TqdmProgressBar(
    progress_metric_name='tokens',
    unit='tokens', unit_scale=True,
    desc="Training"))

dock.install(StdOutMetricsLogger())
dock.install(ZklRecordsMetricsLogger())

resume_args = FsResumeFromScratchArgs(
    fs=resolve_fs(training_dir_fs))
controller = dock.get_plugin(FsResumableController)
controller.resume(resume_args)

task = dock.get_plugin(MLProcessingTask)
task.run()
