import os
import stat
import argparse
import numpy as np
import mindspore as ms
from mindspore.communication import init
from mindspore.nn.optim import Momentum
from mindspore import context, nn, ops, Tensor
from mindspore.context import ParallelMode
from dataset.cifar100 import create_dataset, create_crd_dataset
from mindspore.train.serialization import load_checkpoint, load_param_into_net
from mindspore.nn.loss import SoftmaxCrossEntropyWithLogits
from mindspore.train.model import Model
from model.vgg import vgg8, vgg13, vgg16
from mindspore import save_checkpoint
from utils.lr_generator import get_lr
from mindspore.communication import get_rank
from utils.metric import evaluate_accuracy_gpu, accuracy
from mindspore.common.initializer import One

from kd_zoo import KLLossCell, ATLossCell, SPLossCell, CRDLossCell


model_dict = {
    'vgg8' : vgg8,
    'vgg13' : vgg13,
    'vgg16' : vgg16
}


teacher_fm_mem = None
teacher_logits_mem = None
all_data_num = 50000


class KDTrainStep(nn.TrainOneStepCell):
    def __init__(self, network, optimizer):
        super(KDTrainStep, self).__init__(network, optimizer)
        self.grad = ops.GradOperation(get_by_list=True)

    def construct(self, *inputs):
        weights = self.weights
        loss = self.network(*inputs)
        grads = self.grad(self.network, weights)(*inputs)
        grads = self.grad_reducer(grads)
        return loss, self.optimizer(grads)


def parse_option():
    parser = argparse.ArgumentParser('argument for training')
    parser.add_argument('--batch_size', type=int, default=64, help='batch_size')
    parser.add_argument('--epoch_size', type=int, default=90, help='number of training epochs')
    parser.add_argument('--teacher_model', type=str, default='vgg8', choices=['vgg8', 'vgg13', 'vgg16'])
    parser.add_argument('--student_model', type=str, default='vgg8', choices=['vgg8', 'vgg13', 'vgg16'])
    parser.add_argument('--kd', type=str, default='KL', choices=['KL', 'AT', 'SP', 'CRD'])
    parser.add_argument('--pre_trained', type=str, help='pre trained model path')
    parser.add_argument('--k', type=int, default=16384, help='crd k')
    options = parser.parse_args()

    options.num_classes = 100
    options.batch_norm = True
    options.initialize_mode = "XavierUniform"
    options.padding = 0
    options.pad_mode = 'same'
    options.has_bias = True
    options.has_dropout = True
    options.feat_dim = 128

    return options


def test_teacher(net, options):
    test_set, data_num = create_dataset('./data/cifar-100-python', train=False, batch_size=options.batch_size)
    print("test size: ", data_num)
    loss = SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')
    model = Model(net, loss_fn=loss, metrics={'acc'})
    res = model.eval(test_set)
    print("teacher testset result: ", res)


def train_student(teacher_net, train_set, options):
    context.set_auto_parallel_context(parallel_mode=ParallelMode.DATA_PARALLEL, gradients_mean=True)
    student_net = model_dict[options.student_model](options.num_classes, options)

    tmp_X = Tensor(shape=(32, 3, 224, 224), dtype=ms.float32, init=One())
    tea_fm, _ = teacher_net(tmp_X, need_feature_map=True)
    student_net.set_train(False)
    stu_fm, _ = student_net(tmp_X, need_feature_map=True)
    student_net.set_train(True)

    options.s_dim = stu_fm.shape[1]
    options.t_dim = tea_fm.shape[1]

    print(options.s_dim, options.t_dim)

    if options.kd == 'KL':
        net_with_KD = KLLossCell(student_net, 4)
    elif options.kd == 'AT':
        net_with_KD = ATLossCell(student_net, 10000)
    elif options.kd == 'SP':
        net_with_KD = SPLossCell(student_net, 1)
    elif options.kd == 'CRD':
        net_with_KD = CRDLossCell(student_net, options, all_data_num, 0.5)
    else:
        pass

    step_size = train_set.get_dataset_size()
    lr = Tensor(get_lr(lr_init=0.01, lr_end=0.0001, lr_max=0.1, warmup_epochs=5, total_epochs=options.epoch_size, steps_per_epoch=step_size, lr_decay_mode='poly'))
    opt = Momentum(filter(lambda x: x.requires_grad, student_net.get_parameters()), lr, 0.9)

    train_net = KDTrainStep(net_with_KD, opt)
    test_set, _ = create_dataset('./data/cifar-100-python', train=False, batch_size=options.batch_size)

    best_acc = 0
    ckpt_directory = './save/student-1/{}/{}/'.format(options.student_model, options.kd)
    best_ckpt_path = ckpt_directory + 'best.ckpt'

    if not os.path.isdir(ckpt_directory):
        os.makedirs(ckpt_directory)
    
    if get_rank() == 0:
        print("best ckpt path: ", best_ckpt_path)

    for epoch in range(options.epoch_size):
        student_net.set_train()

        if options.kd == 'CRD':
            for idx, (ind, X, y, neg) in enumerate(train_set.create_tuple_iterator()):
                fm, logits = teacher_net(X, need_feature_map=True)
                train_net(ind, neg, fm, logits, X, y)
                if (idx + 1) % 100 == 0:
                    loss_val = net_with_KD(ind, neg, fm, logits, X, y, False)
                    print("Epoch {}, idx {}, loss {}, teacher_acc {}".format(epoch, idx + 1, loss_val, accuracy(logits, y) / X.shape[0]))
        else:
            for idx, (X, y) in enumerate(train_set.create_tuple_iterator()):
                fm, logits = teacher_net(X, need_feature_map=True)
                train_net(fm, logits, X, y)
                if (idx + 1) % 100 == 0:
                    loss_val = net_with_KD(fm, logits, X, y)
                    print("Epoch {}, idx {}, loss {}, teacher_acc {}".format(epoch, idx + 1, loss_val, accuracy(logits, y) / X.shape[0]))
        
        test_acc = evaluate_accuracy_gpu(student_net, test_set)
        print("Epoch {}, test_acc {}".format(epoch, test_acc))

        if get_rank() == 0:
            if test_acc > best_acc:
                best_acc = test_acc
                print("sava best!, acc: ", best_acc)
                if os.path.exists(best_ckpt_path):
                    os.chmod(best_ckpt_path, stat.S_IWRITE)
                    os.remove(best_ckpt_path)
                save_checkpoint(student_net, best_ckpt_path)


def main():
    options = parse_option()
    ms.common.set_seed(233)
    context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
    init("nccl")
    
    if options.kd != 'CRD':
        train_set, data_num = create_dataset('./data/cifar-100-python', train=True, batch_size=options.batch_size)
    else:
        train_set, data_num = create_crd_dataset('./data/cifar-100-python', options, train=True, batch_size=options.batch_size)

    teacher_net = model_dict[options.teacher_model](options.num_classes, options)
    param_dict = load_checkpoint(options.pre_trained)
    load_param_into_net(teacher_net, param_dict)
    teacher_net.set_train(False)

    print("train size: ", data_num)
    test_teacher(teacher_net, options)
    train_student(teacher_net, train_set, options)


if __name__ == '__main__':
    main()