import math

import paddle.fluid as fluid
from paddle.fluid.clip import GradientClipByGlobalNorm

from paddle.fluid.optimizer import Momentum, Adam
from paddle.fluid.regularizer import L2Decay
from paddle.fluid.io import DataLoader

from helm.datasets import CIFAR10, CombineDataset
from helm.datasets import train_test_split

from helm.nn.loss import CrossEntropyLoss

from helm.dynamic.engine.callback import Events, ModelCheckpoint
from helm.dynamic import set_device, set_seed
from helm.dynamic.lr_scheduler import CosineLR
from helm.dynamic.engine.metrics import Loss
from helm.static.models.layers import DEFAULTS
from helm.dynamic.models.nas.darts.model_search import Network
from helm.dynamic.engine.darts import create_trainer, create_evaluator
from helm.dynamic.engine.cls.metrics import Accuracy
from helm.transforms import Compose, RandomCrop, RandomHorizontalFlip, Permute, Normalize

set_seed(0)
device = 'gpu'
place = set_device(device)
fluid.enable_dygraph(place)

data_home = "/Users/hrvvi/Code/study/pytorch/datasets/CIFAR10"
train_transform = Compose([
    RandomCrop(32, padding=4, fill=128),
    RandomHorizontalFlip(),
    Permute(),
    Normalize([123.675, 116.28, 103.53], [58.395, 57.120, 57.375]),
])
test_transform = Compose([
    Permute(),
    Normalize([123.675, 116.28, 103.53], [58.395, 57.120, 57.375]),
])

ds = CIFAR10(data_home, train=True, transform=train_transform)
# ds = train_test_split(ds, test_ratio=0.1, random=True)[1]
ds_train, ds_val = train_test_split(ds, test_ratio=0.5, random=True)
ds_search = CombineDataset(ds_train, ds_val)
ds_test = CIFAR10(data_home, train=False, transform=test_transform)

DEFAULTS['bn']['affine'] = False
model = Network(8, 5, 4, 4, 1)
criterion = CrossEntropyLoss()

base_lr = 0.025
batch_size = 8
step_per_epoch = math.ceil(len(ds_train) / batch_size)
epochs = 600
learning_rate = CosineLR(base_lr, step_per_epoch, epochs, 1e-3,
                         warmup_min_lr=base_lr, warmup_epoch=10)
optimizer_arch = Adam(3e-4, 0.5, 0.999, regularization=L2Decay(1e-3), parameter_list=model.arch_parameters())
optimizer_model = Momentum(learning_rate, 0.9, use_nesterov=True, grad_clip=GradientClipByGlobalNorm(5),
                           regularization=L2Decay(3e-4), parameter_list=model.model_parameters())

metrics = [
    Loss(),
    Accuracy(),
]

test_metrics = [
    Loss(),
    Accuracy(),
]

train_loader = DataLoader(
    ds_search, batch_size=batch_size, shuffle=True, num_workers=2,
    places=place, return_list=True, use_shared_memory=False)
test_loader = DataLoader(
    ds_val, batch_size=batch_size * 2, shuffle=True, num_workers=2,
    places=place, return_list=True, use_shared_memory=False)

log_freq = 1
eval_freq = 5
save_freq = 100
save_path = "./ShuffleNetV2-CIFAR10"

train_engine = create_trainer(
    model, criterion, optimizer_arch, optimizer_model, metrics, log_freq)

# resume(train_engine, save_path, 100)

eval_engine = create_evaluator(
    model, criterion, test_metrics, log_freq)

train_engine.call(ModelCheckpoint(save_freq, save_path))

train_engine.call_on(
    Events.EPOCH_END, lambda _: eval_engine.run(test_loader, 1), eval_freq)

train_engine.run(train_loader, epochs)
