import os
import stat
import argparse
import mindspore
from mindspore.communication import init
from mindspore import context, nn, Tensor
from dataset.cifar100 import create_dataset
from mindspore.context import ParallelMode
from mindspore.nn.loss import SoftmaxCrossEntropyWithLogits
from model.vgg import vgg8, vgg13, vgg16
from utils.metric import evaluate_accuracy_gpu
from mindspore.communication import get_rank
from mindspore import save_checkpoint
from utils.lr_generator import get_lr
from mindspore.nn.optim import Momentum


model_dict = {
    'vgg8' : vgg8,
    'vgg13' : vgg13,
    'vgg16' : vgg16
}


def parse_option():
    parser = argparse.ArgumentParser('argument for training')
    parser.add_argument('--batch_size', type=int, default=64, help='batch_size')
    parser.add_argument('--epoch_size', type=int, default=90, help='number of training epochs')
    parser.add_argument('--model', type=str, default='vgg8', choices=['vgg8', 'vgg13', 'vgg16'])

    options = parser.parse_args()
    options.ckpt_path = os.path.join('./save/teacher/', options.model)
    # cifar-100
    options.num_classes = 100
    options.loss_scale = 1024
    options.batch_norm = True
    options.initialize_mode = "XavierUniform"
    options.padding = 0
    options.pad_mode = 'same'
    options.has_bias = True
    options.has_dropout = True

    return options

def apply_eval(eval_param):
    eval_model = eval_param["model"]
    eval_ds = eval_param["dataset"]
    metrics_name = eval_param["metrics_name"]
    res = eval_model.eval(eval_ds)
    return res[metrics_name]

def train_net(train_set, test_set, options):
    context.set_auto_parallel_context(parallel_mode=ParallelMode.DATA_PARALLEL, gradients_mean=True)

    loss = SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')
    net = model_dict[options.model](options.num_classes, options)
    net_with_loss = nn.WithLossCell(net, loss)

    step_size = train_set.get_dataset_size()
    lr = Tensor(get_lr(lr_init=0.01, lr_end=0.0001, lr_max=0.1, warmup_epochs=5, total_epochs=options.epoch_size, steps_per_epoch=step_size, lr_decay_mode='poly'))
    opt = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), lr, 0.9)

    train = nn.TrainOneStepCell(net_with_loss, opt)

    best_acc = 0
    ckpt_directory = './save/teacher/{}/'.format(options.model)
    best_ckpt_path = ckpt_directory + 'best.ckpt'

    if not os.path.isdir(ckpt_directory):
        os.makedirs(ckpt_directory)

    if get_rank() == 0:
        print("best ckpt path: ", best_ckpt_path)

    for epoch in range(options.epoch_size):
        net.set_train()
        for idx, (X, y) in enumerate(train_set.create_tuple_iterator()):
            train(X, y)
            if (idx + 1) % 100 == 0:
                loss_val = net_with_loss(X, y)
                print("Epoch {}, idx {}, loss {}".format(epoch, idx + 1, loss_val))
        
        test_acc = evaluate_accuracy_gpu(net, test_set)
        print("Epoch {}, test_acc {}".format(epoch, test_acc))

        if get_rank() == 0:
            if test_acc > best_acc:
                best_acc = test_acc
                print("sava best!, acc: ", best_acc)
                if os.path.exists(best_ckpt_path):
                    os.chmod(best_ckpt_path, stat.S_IWRITE)
                    os.remove(best_ckpt_path)
                save_checkpoint(net, best_ckpt_path)


def main():
    options = parse_option()
    mindspore.common.set_seed(233)
    context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
    init("nccl")
    train_set, train_num = create_dataset('./data/cifar-100-python', train=True, batch_size=options.batch_size)
    test_set, test_num = create_dataset('./data/cifar-100-python', train=False, batch_size=options.batch_size)

    print("train num {}, test num {}".format(train_num, test_num))
    train_net(train_set, test_set, options)


if __name__ == '__main__':
    main()