import json
import os
import sys
from dataclasses import dataclass
from datetime import datetime

import fire
import torch
from zkl_llmpt_training import CheckpointPlugin, LearningRatePlugin, LlmptTraining, LlmptTrainingEngine, \
    LlmptValidating, LlmptValidatingBpb, MetricsPlugin, SimpleLlmptTrainingEngine, StoppingPlugin, SummaryPlugin, \
    TqdmPlugin
from zkl_serialization import dump_json_value, load_and_parse_json

project_dir_path = os.path.join(os.path.dirname(__file__), "../..")
sys.path.append(project_dir_path)

from scripts.datasets.presets import load_preset_llmpt_dataset, load_preset_text_dataset, load_preset_text_tokenizer
from zkl_llmpt_haodar import HaodarCausalLanguageModelHparams, HaodarTrainingModel, HaodarTrainingModelHparams


@dataclass(kw_only=True)
class HaodarTrainingHparams:
    name: str | None = None

    model: HaodarCausalLanguageModelHparams

    learning_rate: float = 1e-3
    weight_decay: float = 1e-2
    gradient_clip_norm: float = 1.0
    accumulating_steps_n: int = 1

    text_dataset_name: str
    text_tokenizer_name: str
    batch_samples_n: int
    chunk_tokens_n: int
    memory_tokens_n: int = 0
    random_offset: bool = False

    warmup_tokens_n: int | None = 0.1e9
    decay_tokens_n: int | None = 1.0e9
    decay_scale: float | None = 0.1

    train_stopping_tokens_n: int | None = None
    valid_stopping_tokens_n: int | None = 0
    valid_bpb_stopping_docs_n: int | None = 0
    summary_interval_tokens_n: int | None = int(20e6)
    checkpoint_interval_tokens_n: int | None = int(1e6)


def make_training_name(hparams: HaodarTrainingHparams | None = None) -> str:
    parts = ["training", datetime.now().strftime("%Y%m%d-%H%M%S-%f")]
    if hparams is not None and hparams.name is not None:
        parts.append(hparams.name)
    return "_".join(parts)


def make_validating_func(*,
    hparams: HaodarTrainingHparams,
    device: torch.device,
):
    text_dataset = load_preset_text_dataset(hparams.text_dataset_name, 'valid')
    text_tokenizer = load_preset_text_tokenizer(hparams.text_tokenizer_name)

    validating_factory = LlmptValidating.make_factory(
        text_dataset=text_dataset,
        text_tokenizer=text_tokenizer,
        total_tokens_n=hparams.valid_stopping_tokens_n,
        batch_samples_n=hparams.batch_samples_n,
        chunk_tokens_n=hparams.chunk_tokens_n,
        device=device) \
        if hparams.valid_stopping_tokens_n != 0 else None

    validating_bpb_factory = LlmptValidatingBpb.make_factory(
        text_dataset=load_preset_text_dataset(hparams.text_dataset_name, 'valid'),
        text_tokenizer=load_preset_text_tokenizer(hparams.text_tokenizer_name),
        total_docs_n=hparams.valid_bpb_stopping_docs_n,
        batch_samples_n=hparams.batch_samples_n,
        chunk_tokens_n=hparams.chunk_tokens_n,
        device=device) \
        if hparams.valid_bpb_stopping_docs_n != 0 else None

    def validating_func(engine: LlmptTrainingEngine):
        assert isinstance(engine, SimpleLlmptTrainingEngine)
        engine = engine.make_validating_engine()

        result = {}
        if validating_factory is not None:
            validating = validating_factory(engine)
            validating.install(TqdmPlugin('Validating ce/acc'))
            result.update(validating.run())
        if validating_bpb_factory is not None:
            validating_bpb = validating_bpb_factory(engine)
            validating_bpb.install(TqdmPlugin('Validating bpb'))
            result.update(validating_bpb.run())
        return result

    return validating_func


def train(
    hparams_file_path: str = os.path.join(os.path.dirname(__file__), "hparams.json"),
    trainings_dir_path: str = os.path.join(project_dir_path, "trainings"),
    training_dir_path: str | None = None,
):
    print("\nLoading hparams...")

    hparams = load_and_parse_json(hparams_file_path, HaodarTrainingHparams)
    print("hparams =", json.dumps(dump_json_value(hparams), indent=2))

    print("\nConfiguring device...")

    if torch.cuda.is_available():
        torch.set_float32_matmul_precision('medium')
        device = torch.device("cuda")
        dtype = torch.bfloat16
        compile = True
    else:
        device = torch.device("cpu")
        dtype = torch.float32
        compile = False

    print("\nPreparing dataset...")

    train_dataset = load_preset_llmpt_dataset(
        split_name='train',
        text_dataset_name=hparams.text_dataset_name,
        text_tokenizer_name=hparams.text_tokenizer_name,
        batch_samples_n=hparams.batch_samples_n,
        chunk_tokens_n=hparams.chunk_tokens_n,
        device=device)

    hparams.model.embs_n = train_dataset.tokenizer.vocab_tokens_n

    print("\nPreparing training...")

    if training_dir_path is None:
        training_name = make_training_name(hparams)
        training_dir_path = os.path.join(trainings_dir_path, training_name)
    os.makedirs(training_dir_path, exist_ok=False)
    print("training_dir_path =", training_dir_path)

    with open(os.path.join(training_dir_path, "hparams.json"), "wt") as fp:
        fp.write(json.dumps(dump_json_value(hparams), indent=2))

    model = HaodarTrainingModel(
        hparams=HaodarTrainingModelHparams(
            model=hparams.model,
            memory_tokens_n=hparams.memory_tokens_n,
            random_offset=hparams.random_offset),
        dtype=dtype,
        device=device)

    engine = SimpleLlmptTrainingEngine(
        model=model,
        learning_rate=hparams.learning_rate,
        weight_decay=hparams.weight_decay,
        gradient_clip_norm=hparams.gradient_clip_norm,
        accumulating_steps_n=hparams.accumulating_steps_n,
        compile=compile)

    training = LlmptTraining(
        engine=engine,
        iterator=iter(train_dataset),
        total_tokens_n=train_dataset.total_tokens_n,
        training_dir_path=training_dir_path)

    validating_func = make_validating_func(
        hparams=hparams,
        device=device)

    training.install(MetricsPlugin())
    training.install(TqdmPlugin("Training"))
    training.install(LearningRatePlugin(
        warmup_tokens_n=hparams.warmup_tokens_n,
        decay_tokens_n=hparams.decay_tokens_n,
        decay_scale=hparams.decay_scale))
    training.install(SummaryPlugin(
        interval_tokens_n=hparams.summary_interval_tokens_n,
        validating_func=validating_func))
    training.install(CheckpointPlugin(
        interval_tokens_n=hparams.checkpoint_interval_tokens_n))
    training.install(StoppingPlugin(
        stopping_tokens_n=hparams.train_stopping_tokens_n))

    print("\nLaunching training...")

    training.run()


if __name__ == '__main__':
    fire.Fire(train)
