import numpy as np

import mindspore
import mindspore.nn as nn
import mindspore.dataset as ds
import mindspore.dataset.vision.c_transforms as C
from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, LossMonitor, TimeMonitor

from src.model import ResNet18


def train_net():
    """function to start a training process"""
    """configs"""
    num_epochs = 10

    """context initialization"""
    # platform, mode
    mode = mindspore.GRAPH_MODE
    platform = "CPU"
    mindspore.set_context(mode=mode,
                          device_target=platform)
    # mindspore.set_context(mode=mindspore.PYNATIVE_MODE, device_target="CPU")
    # distributed or not
    # pass

    """dataset"""
    data_root = "D:\\DATASETS\\cifar-10-batches-bin"
    num_classes = 10
    num_workers = 2
    batch_size = 32
    # create dataset loader
    dataset = ds.Cifar10Dataset(data_root, num_parallel_workers=num_workers, shuffle=True)
    # data augmentation
    img_trans = [
        C.RandomCrop((32, 32), (4, 4, 4, 4)),
        C.RandomHorizontalFlip(prob=0.5),
        C.Resize((224, 224)),
        C.Rescale(1.0 / 255.0, 0.0),
        C.Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010]),
        C.HWC2CHW()
    ]
    lab_trans = mindspore.dataset.transforms.c_transforms.TypeCast(mindspore.int32)
    dataset = dataset.map(operations=img_trans, input_columns="image",
                          num_parallel_workers=num_workers)
    dataset = dataset.map(operations=lab_trans, input_columns="label",
                          num_parallel_workers=num_workers)
    dataset = dataset.batch(batch_size=batch_size, drop_remainder=True)
    num_steps_per_epoch = dataset.get_dataset_size()
    """network"""
    net = ResNet18(num_classes=num_classes)
    net.set_train(True)

    """loss function"""
    loss_fn = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean")

    """learning rate"""
    lrs = np.linspace(1e-2, 1e-5, num_epochs*num_steps_per_epoch, dtype=np.float32)
    lrs = mindspore.Tensor(lrs)

    """optimizer"""
    optim = nn.SGD(net.trainable_params(), learning_rate=lrs)

    """compose a model"""
    model = mindspore.Model(network=net, loss_fn=loss_fn, optimizer=optim, metrics={"acc"})

    """callbacks"""
    keep_checkpoint_max = 5
    save_dir = "./outputs"
    time_cb = TimeMonitor(data_size=1)
    loss_cb = LossMonitor(per_print_times=1)
    config_ck = CheckpointConfig(save_checkpoint_steps=1,
                                 keep_checkpoint_max=keep_checkpoint_max)
    ckpt_cb = ModelCheckpoint(prefix="resnet", directory=save_dir, config=config_ck)
    cb = [time_cb, loss_cb, ckpt_cb]

    """start training"""
    model.train(num_epochs, dataset, callbacks=cb)


if __name__ == "__main__":
    train_net()
