import argparse
import math

import paddle.fluid as fluid

import helm
from helm.datasets import MNIST, train_test_split
import helm.static.models.cifar
import helm.static.lr_scheduler
from helm.static.engine.callback import Events, ModelCheckpoint, resume
from helm.static.engine.cls import create_supervised_trainer, create_supervised_evaluator
from helm.static.engine.metrics import Loss
from helm.static.engine.cls.metrics import Accuracy
from helm.nn.loss import CrossEntropyLoss
from helm.static import set_device, set_seed

from helm.config.config import get_config, override
from helm.static.helper import get_data_loader, get_optimizer, get_lr_scheduler, get_model, get_transform, get_mix

if __name__ == '__main__':

    parser = argparse.ArgumentParser(description='Train MNIST.')
    parser.add_argument('-c', '--config', help='config file')
    parser.add_argument('-r', '--resume', help='resume from checkpoints')
    args = parser.parse_args()

    cfg = get_config(args.config)

    assert cfg.Dataset.type == "MNIST"
    num_classes = 10

    if cfg.get("hpo"):
        import nni
        RCV_CONFIG = nni.get_next_parameter()
        for k, v in RCV_CONFIG.items():
            ks = k.split(".")
            override(cfg, ks, v)


    set_seed(cfg.seed)
    place = set_device(cfg.device)

    data_home = cfg.Dataset.data_home

    train_transform = get_transform(cfg.Dataset.Train.transforms)
    val_transform = get_transform(cfg.Dataset.Val.transforms)
    test_transform = get_transform(cfg.Dataset.Test.transforms)

    ds_train = MNIST(data_home, mode='train')
    ds_train, ds_val = train_test_split(
        ds_train, test_ratio=cfg.Dataset.Split.test_ratio, random=cfg.Dataset.Split.get("random"),
        transform=train_transform, test_transform=val_transform)
    ds_test = MNIST(data_home, mode='test', transform=test_transform)

    if cfg.get("Debug"):
        subset = cfg.Debug.get("subset")
        if subset:
            ds_train = train_test_split(ds_train, subset)[1]
            ds_val = train_test_split(ds_val, subset)[1]
            ds_test = train_test_split(ds_test, subset)[1]

    use_mix = cfg.get("Mix") is not None
    if use_mix:
        cfg.Mix.num_classes = num_classes
        ds_train = get_mix(cfg.Mix, ds_train)

    train_loader = get_data_loader(cfg.Dataset.Train, ds_train, place)
    val_loader = get_data_loader(cfg.Dataset.Val, ds_val, place)
    test_loader = get_data_loader(cfg.Dataset.Test, ds_test, place)

    def model_fn(is_train):
        cfg.Model.num_classes = num_classes
        model = get_model(cfg.Model, helm.static.models.cifar, is_train)
        return model

    criterion = CrossEntropyLoss(cfg.get("label_smooth"))

    batch_size = cfg.Dataset.Train.batch_size
    step_per_epoch = math.ceil(len(ds_train) / batch_size)
    epochs = cfg.epochs

    def optimizer_fn():
        learning_rate = get_lr_scheduler(cfg.LRScheduler, step_per_epoch, epochs)
        optimizer = get_optimizer(cfg.Optimizer, learning_rate)
        return learning_rate, optimizer

    input_shape = cfg.input_shape

    metrics = [
        Loss(),
    ]
    if not use_mix:
        metrics.append(Accuracy())

    test_metrics = [
        Loss(),
        Accuracy(),
    ]

    log_freq = cfg.log_freq
    eval_freq = cfg.get("eval_freq", 1)
    save_freq = cfg.get("save_freq")
    save_path = cfg.get("save_path")
    n_saved = cfg.get("n_saved", 1)

    exe = fluid.Executor(place)

    train_engine = create_supervised_trainer(
        exe, model_fn, criterion, optimizer_fn, metrics, input_shape, log_freq, soft_label=use_mix)
    eval_engine = create_supervised_evaluator(
        exe, model_fn, criterion, test_metrics, input_shape, log_freq)
    exe.run(fluid.default_startup_program())

    if args.resume:
        resume(train_engine, save_path, int(args.resume))

    if save_freq:
        train_engine.call(
            ModelCheckpoint(save_freq, save_path, n_saved=n_saved))

    train_engine.call_on(
        Events.EPOCH_END, lambda _: eval_engine.run(val_loader, 1), eval_freq)

    if cfg.get("hpo"):
        import nni
        train_engine.call_on(
            Events.EPOCH_END, lambda _: nni.report_intermediate_result(float(eval_engine.state.metrics['acc'])), eval_freq)

    train_engine.run(train_loader, epochs)

    eval_engine.run(test_loader, 1)

    if cfg.get("hpo"):
        nni.report_final_result(float(eval_engine.state.metrics['acc']))