import math

import paddle.fluid as fluid

from paddle.fluid.optimizer import Momentum
from paddle.fluid.regularizer import L2Decay
from paddle.fluid.io import DataLoader

from helm.datasets import CIFAR100
from helm.datasets import train_test_split

from helm.nn.loss import CrossEntropyLoss

from helm.dynamic.engine.callback import Events, ModelCheckpoint, resume
from helm.dynamic import set_device, set_seed
from helm.dynamic.lr_scheduler import CosineLR
from helm.dynamic.engine.metrics import Loss
from helm.dynamic.models.cifar import ShuffleNetV2
from helm.dynamic.engine.cls.mix import CutMix
from helm.dynamic.engine.cls import create_supervised_trainer, create_supervised_evaluator
from helm.dynamic.engine.cls.metrics import Accuracy
from helm.transforms import Compose, RandomCrop, RandomHorizontalFlip, Permute, Normalize

set_seed(0)
device = 'gpu'
place = set_device(device)
fluid.enable_dygraph(place)

data_home = "/Users/hrvvi/Code/study/pytorch/datasets/CIFAR100"
train_transform = Compose([
    RandomCrop(32, padding=4, fill=128),
    RandomHorizontalFlip(),
    Permute(),
    Normalize([123.675, 116.28, 103.53], [58.395, 57.120, 57.375]),
])
test_transform = Compose([
    Permute(),
    Normalize([123.675, 116.28, 103.53], [58.395, 57.120, 57.375]),
])


ds_train = CIFAR100(data_home, train=True, transform=train_transform)
ds_test = CIFAR100(data_home, train=False, transform=test_transform)

ds_train = train_test_split(ds_train, 0.01)[1]
ds_test = train_test_split(ds_test, 0.01)[1]

num_classes = 100
ds_train = CutMix(ds_train, beta=1.0, prob=0.5, num_classes=num_classes)

model = ShuffleNetV2(8, [16, 24, 32], (2, 2, 2), 32, use_se=True, num_classes=num_classes)
criterion = CrossEntropyLoss(label_smooth=0.1)

mul = 1
base_lr = 0.05
batch_size = 16 * mul
step_per_epoch = math.ceil(len(ds_train) / batch_size)
epochs = 600
learning_rate = CosineLR(base_lr * mul, step_per_epoch, epochs, 1e-5,
                         warmup_min_lr=base_lr, warmup_epoch=10)

optimizer = Momentum(learning_rate, 0.9, use_nesterov=True,
                     regularization=L2Decay(1e-4), parameter_list=model.parameters())


metrics = [
    Loss(),
]

test_metrics = [
    Loss(),
    Accuracy(),
]

train_loader = DataLoader(
    ds_train, batch_size=batch_size, shuffle=True, num_workers=2,
    places=place, return_list=True, use_shared_memory=False)
test_loader = DataLoader(
    ds_test, batch_size=batch_size * 2, shuffle=True, num_workers=2,
    places=place, return_list=True, use_shared_memory=False)

log_freq = 20
eval_freq = 5
save_freq = 100
save_path = "./ShuffleNetV2-CIFAR100"

train_engine = create_supervised_trainer(
    model, criterion, optimizer, metrics, log_freq)

# resume(train_engine, save_path, 100)

eval_engine = create_supervised_evaluator(
    model, criterion, test_metrics, log_freq)

train_engine.call(ModelCheckpoint(save_freq, save_path))

train_engine.call_on(
    Events.EPOCH_END, lambda _: eval_engine.run(test_loader, 1), eval_freq)

train_engine.run(train_loader, epochs)
