import math
import argparse

import paddle.fluid as fluid

from helm.datasets import CIFAR10, CIFAR100
from helm.datasets import train_test_split

import helm.dynamic.models.cifar

from helm.dynamic.engine.callback import Events, ModelCheckpoint, resume
from helm.dynamic.engine.cls import create_supervised_trainer, create_supervised_evaluator
from helm.dynamic import set_device, set_seed
from helm.nn.loss import CrossEntropyLoss
from helm.dynamic.engine.metrics import Loss
from helm.dynamic.engine.cls.metrics import Accuracy
from helm.config.config import get_config
from helm.dynamic.helper import get_data_loader, get_optimizer, get_lr_scheduler, get_model, get_transform, get_mix
from helm.static.models.layers import DEFAULTS, update_defaults

if __name__ == '__main__':

    parser = argparse.ArgumentParser(description='Train CIFAR10.')
    parser.add_argument('-c', '--config', help='config file')
    parser.add_argument('-r', '--resume', help='resume from checkpoints')
    args = parser.parse_args()

    cfg = get_config(args.config)

    dataset = cfg.Dataset.type
    assert dataset in ["CIFAR10", 'CIFAR100']

    update_defaults(cfg.get("Global", {}), DEFAULTS)

    set_seed(cfg.seed)
    place = set_device(cfg.device)
    fluid.enable_dygraph(place)

    data_home = cfg.Dataset.data_home

    train_transform = get_transform(cfg.Dataset.Train.transforms)
    test_transform = get_transform(cfg.Dataset.Test.transforms)

    if dataset == "CIFAR10":
        num_classes, CIFAR = 10, CIFAR10
    else:
        num_classes, CIFAR = 100, CIFAR100

    ds_train = CIFAR(data_home, train=True, transform=train_transform)
    ds_test = CIFAR(data_home, train=False, transform=test_transform)

    if cfg.get("Debug") and cfg.Debug.get("subset"):
        ratio = cfg.Debug.subset
        ds_train = train_test_split(ds_train, ratio)[1]
        ds_test = train_test_split(ds_test, ratio)[1]

    use_mix = cfg.get("Mix") is not None
    if use_mix:
        cfg.Mix.num_classes = num_classes
        ds_train = get_mix(cfg.Mix, ds_train)

    cfg.Model.num_classes = num_classes
    model = get_model(cfg.Model, helm.dynamic.models.cifar)
    criterion = CrossEntropyLoss(cfg.get("label_smooth"))

    batch_size = cfg.Dataset.Train.batch_size
    step_per_epoch = math.ceil(len(ds_train) / batch_size)
    epochs = cfg.epochs
    learning_rate = get_lr_scheduler(cfg.LRScheduler, step_per_epoch, epochs)
    optimizer = get_optimizer(cfg.Optimizer, learning_rate, model.parameters())

    metrics = [
        Loss(),
    ]
    if not use_mix:
        metrics.append(Accuracy())

    test_metrics = [
        Loss(),
        Accuracy(),
    ]

    train_loader = get_data_loader(cfg.Dataset.Train, ds_train, place)
    test_loader = get_data_loader(cfg.Dataset.Test, ds_test, place)

    log_freq = cfg.log_freq
    eval_freq = cfg.get("eval_freq", 1)
    save_freq = cfg.get("save_freq")
    save_path = cfg.get("save_path")
    n_saved = cfg.get("n_saved", 1)

    train_engine = create_supervised_trainer(
        model, criterion, optimizer, metrics, log_freq, auxiliary_weight=cfg.get("auxiliary_weight", None))

    if args.resume:
        resume(train_engine, save_path, int(args.resume))

    eval_engine = create_supervised_evaluator(
        model, criterion, test_metrics, log_freq)

    train_engine.call(
        ModelCheckpoint(save_freq, save_path, n_saved=n_saved))

    train_engine.call_on(
        Events.EPOCH_END, lambda _: eval_engine.run(test_loader, 1), eval_freq)

    train_engine.run(train_loader, epochs)
