import math
import paddle.fluid as fluid
from paddle.fluid.io import DataLoader
from paddle.fluid.optimizer import Momentum, Adam
from paddle.fluid.regularizer import L2Decay

from helm.datasets import CIFAR10, CombineDataset, MNIST
from helm.datasets import train_test_split

from helm.nn.loss import CrossEntropyLoss

from helm.static.lr_scheduler import CosineLR
from helm.static import set_device, set_seed
from helm.static.engine.callback import Events, resume, ModelCheckpoint
from helm.static.engine.nas import create_trainer, create_evaluator
from helm.static.engine.cls.metrics import Accuracy
from helm.static.engine.metrics import Loss

from helm.static.models.layers import DEFAULTS
from helm.static.models.nas.nasbench201.model_search import Network
from helm.transforms import Compose, RandomCrop, RandomHorizontalFlip, Permute, Normalize, Pad

fp16 = False
if fp16:
    DEFAULTS['fp16'] = True

set_seed(0)
place = set_device('cpu')

data_home = "/Users/hrvvi/Code/study/pytorch/datasets/MNIST"

train_transform = Compose([
    lambda x: x.repeat(3, 2),
    Pad(2),
    Permute(),
    Normalize([33.3285, 33.3285, 33.3285], [78.5655, 78.5655, 78.5655]),
])
test_transform = Compose([
    lambda x: x.repeat(3, 2),
    Pad(2),
    Permute(),
    Normalize([33.3285, 33.3285, 33.3285], [78.5655, 78.5655, 78.5655]),
])

ds = MNIST(data_home, mode='train', transform=train_transform)
# ds = train_test_split(ds, test_ratio=0.1, random=True)[1]
ds_train, ds_val = train_test_split(ds, test_ratio=0.5, random=True)
ds_search = CombineDataset(ds_train, ds_val)
ds_test = MNIST(data_home, mode='test', transform=test_transform)


def model_fn():
    model = Network(C=3, num_stacked=2, nodes=4)
    return model

criterion = CrossEntropyLoss()

base_lr = 0.1
batch_size = 8
step_per_epoch = math.ceil(len(ds_train) / batch_size)
epochs = 600


def optimizer_fn():
    learning_rate = CosineLR(base_lr, step_per_epoch, epochs, 1e-5,
                             warmup_min_lr=base_lr, warmup_epoch=10)
    optimizer_arch = Adam(3e-4, 0.5, 0.999, regularization=L2Decay(1e-3))
    optimizer_model = Momentum(learning_rate, momentum=0.9, use_nesterov=True,
                         regularization=L2Decay(1e-4))
    return learning_rate, optimizer_arch, optimizer_model


train_loader = DataLoader(
    ds_search, batch_size=batch_size, shuffle=True, num_workers=2,
    places=place, return_list=True, use_shared_memory=False)
test_loader = DataLoader(
    ds_val, batch_size=batch_size * 2, shuffle=True, num_workers=2,
    places=place, return_list=True, use_shared_memory=False)


metrics = [
    Loss(),
    Accuracy(),
]

test_metrics = [
    Loss(),
    Accuracy(),
]

log_freq = 10
eval_freq = 2
save_freq = 5
save_path = "./bench201"

exe = fluid.Executor(place)

input_shape = (3, 32, 32)

train_engine = create_trainer(
    exe, model_fn, criterion, optimizer_fn, metrics, input_shape, log_freq)
eval_engine = create_evaluator(
    exe, model_fn, criterion, test_metrics, input_shape, log_freq)
exe.run(fluid.default_startup_program())

# resume(train_engine, save_path, int(args.resume))

train_engine.call(ModelCheckpoint(save_freq, save_path))

train_engine.call_on(
    Events.EPOCH_END, lambda _: eval_engine.run(test_loader, 1), eval_freq)

train_engine.run(train_loader, epochs)
