"""Main entrance for train/eval with/without KD on CIFAR-10"""
import tool as tl
import argparse
import logging
import os
import time
import math
import random
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.optim.lr_scheduler import StepLR
from torch.autograd import Variable
from tqdm import tqdm
import random
import create_gauss_hot as gh
import utils
import model.net_10 as net10
import model.net as net
import model.net_2 as net2
import model.net_4 as net4
import model.net_6 as net6
import model.net_8 as net8
import model.LeNet as lenet
import model.data_loader as data_loader
import model.resnet as resnet
import model.resnet_cifar as resnetcf
from evaluate import evaluate, evaluate_kd
import csv
import ls_loss
from getSigmaDelta import Fake_Teacher
from getSigmaDelta import GetGaussTeacher
# print(torch.__version__)
# print(torch.cuda.is_available())
parser = argparse.ArgumentParser()
parser.add_argument('--model_dir', default='experiments/base_model',
                    help="Directory containing params.json")

parser.add_argument('--restore_file', default=None,
                    help="Optional, name of the file in --model_dir \
                    containing weights to reload before training")  # 'best' or 'train'

# label smoothing
parser.add_argument('--label_smoothing', action='store_true',default=False, help='flag for lsr')

# fake teacher    #KD中使用
parser.add_argument('--fake_noise_teacher', action='store_true',default=False, help='flag for Fake Noise Teacher')

#one_hot & Gauss hot exp      #baseline中使用
parser.add_argument('--gause_hot_exp', action='store_true',default=False, help='flag for Fake Gause Hot')

# gauss_LSR
parser.add_argument('--gauss_LSR', action='store_true',default=False, help='flag for teacher label change to gauss label, and get gauss smoothing')


def resize_tensor(tensor):
    length = 1
    shape = np.shape(tensor)
    length = np.prod(shape)
    flat_array = np.reshape(tensor, length)
    return flat_array


def get_model_variable(para_list, name_list):
    output = np.array([])
    for name in name_list:
        ret = resize_tensor(np.array(para_list[name]))
        output = np.r_[output, ret]

    return output

def get_model_variable_resnet(para_list, name_list):
    output = np.array([])
    names = []
    for i, name in enumerate(name_list):
        if not name.split('.')[-1].split('_')[-1] == 'tracked':
            # if name.split('.')[-1] == 'weight' or name.split('.')[-1] == 'bias':
            #     if 'conv' in name or 'bn' in name:
            names.append(name)
            ret = resize_tensor(np.array(para_list[name]))
            output = np.r_[output, ret]

    return output

def get_model_params(model, params):
    weights_keys = model.state_dict().keys()
    params_list = {}
    name_list = []
    for key in weights_keys:
        # remove num_batches_tracked para(in bn)
        # [kernel_number, kernel_channel, kernel_height, kernel_width]
        name_list.append(key)
        params_list[key] = model.state_dict()[key].cpu().numpy()
    # return(get_model_variable_resnet(params_list, name_list))
    return(get_model_variable_resnet(params_list, name_list))


def train(trainning_perteacher, model, optimizer, loss_fn, dataloader, metrics, params):
    """Train the model on `num_steps` batches

    Args:
        model: (torch.nn.10net_Module) the neural network
        optimizer: (torch.optim) optimizer for parameters of model
        loss_fn:
        dataloader:
        metrics: (dict)
        params: (Params) hyperparameters
    """

    # set model to training mode
    model.train()

    # summary for current training loop and a running average object for loss
    summ = []
    loss_avg = utils.RunningAverage()

    # thisloss = []

    # Use tqdm for progress bar
    with tqdm(total=len(dataloader)) as t:
        for i, (train_batch, labels_batch) in enumerate(dataloader):
            # move to GPU if available
            if params.cuda:
                train_batch, labels_batch = train_batch.cuda(async=True), \
                                            labels_batch.cuda(async=True)
            # convert to torch Variables
            train_batch, labels_batch = Variable(train_batch), Variable(labels_batch)

            # compute model output and loss
            output_batch = model(train_batch)

            # if args.gause_hot_exp:
            #     print('开始用gauss hot代替one hot')
            #     sigma = 0.01 * (trainning_perteacher + 1)
            #     gauss_hots = gh.create_gh(sigma)
            #     labels = []
            #
            #     # 将labels batch替换成one hot形式
            #     for num in labels_batch:
            #         labels.append(gauss_hots[num])
            #     labels = torch.tensor(labels, device="cuda:0")
            #     if params.cuda:
            #         labels.cuda(async = True)
            #
            #     output_batch.mul(torch.tensor(labels))
            #
            #     loss = loss_fn(output_batch, labels_batch)

            # else:
            loss = loss_fn(output_batch, labels_batch)

            # clear previous gradients, compute gradients of all variables wrt loss
            optimizer.zero_grad()
            loss.backward()

            # performs updates using calculated gradients
            optimizer.step()

            # Evaluate summaries only once in a while
            if i % params.save_summary_steps == 0:
                # extract data from torch Variable, move to cpu, convert to numpy arrays
                output_batch = output_batch.data.cpu().numpy()
                labels_batch = labels_batch.data.cpu().numpy()

                # compute all metrics on this batch
                summary_batch = {metric:metrics[metric](output_batch, labels_batch)
                                 for metric in metrics}
                summary_batch['loss'] = loss.item()
                summ.append(summary_batch)

            # update the average loss
            loss_avg.update(loss.item())

            t.set_postfix(loss='{:05.3f}'.format(loss_avg()))
            t.update()

    # compute mean of all metrics in summary
    metrics_mean = {metric:np.mean([x[metric] for x in summ]) for metric in summ[0]}
    metrics_string = " ; ".join("{}: {:05.3f}".format(k, v) for k, v in metrics_mean.items())
    logging.info("- Train metrics: " + metrics_string)


def train_and_evaluate(trainning_perteacher, model, train_dataloader, val_dataloader, optimizer,
                       loss_fn, metrics, params, model_dir, circle, restore_file=None, simple_root_file = None):
    """Train the model and evaluate every epoch.

    Args:
        model: (torch.nn.10net_Module) the neural network
        params: (Params) hyperparameters
        model_dir: (string) directory containing config, weights and log
        restore_file: (string) - name of file to restore from (without its extension .pth.tar)
    """
    # reload weights from restore_file if specified
    if restore_file is not None:
        restore_path = os.path.join(args.model_dir, args.restore_file + '.pth.tar')
        logging.info("Restoring parameters from {}".format(restore_path))
        utils.load_checkpoint(restore_path, model, optimizer)

    best_val_acc = 0.0

    # learning rate schedulers for different models:
    if params.model_version == "cnn":
        scheduler = StepLR(optimizer, step_size=100, gamma=0.2)
    elif params.model_version == "2layercnn":
        scheduler = StepLR(optimizer, step_size=100, gamma=0.2)
    elif params.model_version == "LeNet":
        scheduler = StepLR(optimizer, step_size=100, gamma=0.2)

    mid_dis = []
    mid_each_dis = []
    acc_list = []
    los_list = []

    weight_matrix = []

    spnet_file = simple_root_file + '/' + params.model_version + str(circle) + '.csv'
    accspnet_file = simple_root_file + '/acc' + params.model_version + str(circle) + '.csv'
    lossspnet_file = simple_root_file + '/loss' + params.model_version + str(circle) + '.csv'

    writer_csv = csv.writer(open(spnet_file, 'w+', newline=''))

    weight_begin = get_model_params(model, params)
    weight_matrix.append(weight_begin)
    print(weight_begin)

    val_metrics_ = evaluate(model, loss_fn, val_dataloader, metrics, params)

    acc_list.append(val_metrics_['accuracy'])
    los_list.append(val_metrics_['loss'])


    ########################## save original model: epoch == 0
    model_save_path = 'model_param_{}_KD/'.format(params.model_version)
    if not os.path.exists(model_save_path):
        os.makedirs(model_save_path)
    model_dir2 = model_save_path + '/' + params.model_version + 'lsr/'
    if not os.path.exists(model_dir2):
        os.makedirs(model_dir2)
    model_dir3 = model_dir2 + str(circle) + '/'
    if not os.path.exists(model_dir3):
        os.makedirs(model_dir3)
    model_dir4 = model_dir2 + str(circle) + '/' + str(-1)
    if not os.path.exists(model_dir4):
        os.makedirs(model_dir4)

    model_dir2_begin = model_dir + '/' + params.model_version + '/' + str(trainning_perteacher) + '/'+'begin/'
    if not os.path.exists(model_dir2_begin):
        os.makedirs(model_dir2_begin)
    utils.save_checkpoint({'epoch': 0,
                           'state_dict': model.state_dict(),
                           'optim_dict': optimizer.state_dict()},
                          is_best=True,
                          checkpoint=model_dir2_begin)


    for epoch in range(params.num_epochs):
        scheduler.step()

        # Run one epoch
        logging.info("Circle {}".format(circle + 1))
        logging.info("Epoch {}/{}".format(epoch + 1, params.num_epochs))

        # compute number of batches in one epoch (one full pass over the training set)
        train(trainning_perteacher, model, optimizer, loss_fn, train_dataloader, metrics, params)

        # Evaluate for one epoch on validation set
        val_metrics = evaluate(model, loss_fn, val_dataloader, metrics, params)

        val_acc = val_metrics['accuracy']
        is_best = val_acc >= best_val_acc

        acc_list.append(val_metrics['accuracy'])
        los_list.append(val_metrics['loss'])

        # Save weights
        model_dir2 = model_dir + '/' + params.model_version + '/'+ str(trainning_perteacher) + '/end/'
        if not os.path.exists(model_dir2):
            os.makedirs(model_dir2)
        utils.save_checkpoint({'epoch': epoch + 1,
                               'state_dict': model.state_dict(),
                               'optim_dict': optimizer.state_dict()},
                              is_best=is_best,
                              checkpoint=model_dir2)


        ################################################## save model during the epoch
        model_dir2 = model_save_path + '/' + params.model_version+'/'
        if not os.path.exists(model_dir2):
            os.makedirs(model_dir2)
        model_dir3 = model_dir2 + str(circle) + '/'
        if not os.path.exists(model_dir3):
            os.makedirs(model_dir3)
        model_dir4 = model_dir2 + str(circle) + '/' + str(epoch)
        if not os.path.exists(model_dir4):
            os.makedirs(model_dir4)
        utils.save_checkpoint({'epoch': epoch + 1,
                               'state_dict': model.state_dict(),
                               'optim_dict': optimizer.state_dict()},
                              is_best=True,
                              checkpoint=model_dir4)

        # If best_eval, best_save_path
        if is_best:
            logging.info("- Found new best accuracy")
            best_val_acc = val_acc

            # Save best val metrics in a json file in the model directory
            best_json_path = os.path.join(model_dir,  "metrics_val_best_weights.json")
            utils.save_dict_to_json(val_metrics, best_json_path)

        # Save latest val metrics in a json file in the model directory
        last_json_path = os.path.join(model_dir,  "metrics_val_last_weights.json")
        utils.save_dict_to_json(val_metrics, last_json_path)

        weight_temp = get_model_params(model, params)
        weight_matrix.append(weight_temp)
        print(weight_temp)

    weight_last = get_model_params(model, params)
    mid_each_dis, mid_dis = tl.distance(weight_matrix)

    # 先写入距离
    writer_csv.writerow(mid_dis)
    writer_csv.writerow(mid_each_dis)

    # 开始写入acc和loss
    writer_csv = csv.writer(open(accspnet_file, 'w+', newline=''))
    writer_csv.writerow(acc_list)
    writer_csv = csv.writer(open(lossspnet_file, 'w+', newline=''))
    writer_csv.writerow(los_list)

    dis = np.linalg.norm(weight_last - weight_begin)
    print('the distance is {}'.format(dis))
    return dis


# Defining train_kd & train_and_evaluate_kd functions
def train_kd(model, teacher_model, optimizer, loss_fn_kd, dataloader, metrics, params):
    """Train the model on `num_steps` batches

    Args:
        model: (torch.nn.10net_Module) the neural network
        optimizer: (torch.optim) optimizer for parameters of model
        loss_fn_kd:
        dataloader:
        metrics: (dict)
        params: (Params) hyperparameters
    """

    # set model to training mode
    model.train()
    teacher_model.eval()

    # summary for current training loop and a running average object for loss
    summ = []
    loss_avg = utils.RunningAverage()

    losslist = []

    # Use tqdm for progress bar
    with tqdm(total=len(dataloader)) as t:
        for i, (train_batch, labels_batch) in enumerate(dataloader):
            # move to GPU if available
            if params.cuda:
                train_batch, labels_batch = train_batch.cuda(async=True), \
                                            labels_batch.cuda(async=True)
            # convert to torch Variables
            train_batch, labels_batch = Variable(train_batch), Variable(labels_batch)

            # compute model output, fetch teacher output, and compute KD loss
            output_batch = model(train_batch)

            # get one batch output from teacher_outputs list

            with torch.no_grad():
                output_teacher_batch = teacher_model(train_batch)
            if params.cuda:
                output_teacher_batch = output_teacher_batch.cuda(async=True)


            # create fake noise teacher
            if args.fake_noise_teacher:
                fake_teacher = Fake_Teacher(output_teacher_batch)
                if params.cuda:
                    fake_teacher = fake_teacher.cuda(async=True)

                output_teacher_batch = fake_teacher

            if args.gauss_LSR:
                output_teacher_gauss_smooth = GetGaussTeacher(output_teacher_batch, labels_batch)
                if params.cuda:
                    output_teacher_gauss_smooth = output_teacher_gauss_smooth.cuda(async=True)
                # loss = ls_loss.loss_gauss_label_smoothing(output_teacher_batch, output_teacher_gauss_smooth, params)
                # loss.requires_grad_(True)
                loss = loss_fn_kd(output_batch, labels_batch, output_teacher_gauss_smooth, params)
                #loss = double(loss)
            else:
                loss =loss_fn_kd(output_batch, labels_batch, output_teacher_batch, params)
            # loss2 = loss_fn_kd(output_batch, labels_batch, output_teacher_batch, params)

            # clear previous gradients, compute gradients of all variables wrt loss
            optimizer.zero_grad()
            loss.backward()

            # performs updates using calculated gradients
            optimizer.step()

            # Evaluate summaries only once in a while
            if i % params.save_summary_steps == 0:
                # extract data from torch Variable, move to cpu, convert to numpy arrays
                output_batch = output_batch.data.cpu().numpy()
                labels_batch = labels_batch.data.cpu().numpy()

                # compute all metrics on this batch
                summary_batch = {metric:metrics[metric](output_batch, labels_batch)
                                 for metric in metrics}
                summary_batch['loss'] = loss.item()
                summ.append(summary_batch)

            # update the average loss
            loss_avg.update(loss.item())

            t.set_postfix(loss='{:05.3f}'.format(loss_avg()))
            t.update()

    # compute mean of all metrics in summary
    metrics_mean = {metric:np.mean([x[metric] for x in summ]) for metric in summ[0]}
    metrics_string = " ; ".join("{}: {:05.3f}".format(k, v) for k, v in metrics_mean.items())
    logging.info("- Train metrics: " + metrics_string)
    thisloss_mean = np.mean(np.array(losslist))



def train_and_evaluate_kd(model, teacher_model, train_dataloader, val_dataloader, optimizer,
                       loss_fn_kd, metrics, params, model_dir,circle, restore_file=None, KD_root_file = None):
    """Train the model and evaluate every epoch.

    Args:
        model: (torch.nn.10net_Module) the neural network
        params: (Params) hyperparametersloss
        model_dir: (string) directory containing config, weights and log
        restore_file: (string) - file to restore (without its extension .pth.tar)
        KD_root_file: (string) - weights to store
    """
    # reload weights from restore_file if specified
    if restore_file is not None:
        restore_path = os.path.join(args.model_dir, args.restore_file + '.pth.tar')
        logging.info("Restoring parameters from {}".format(restore_path))
        utils.load_checkpoint(restore_path, model, optimizer)

    best_val_acc = 0.0

    # Tensorboard logger setup
    # board_logger = utils.Board_Logger(os.path.join(model_dir, 'board_logs'))

    # learning rate schedulers for different models:
    if params.model_version == "resnet18_distill":
        scheduler = StepLR(optimizer, step_size=150, gamma=0.1)
    # for cnn models, num_epoch is always < 100, so it's intentionally not using scheduler here
    elif params.model_version == "cnn_distill":
        scheduler = StepLR(optimizer, step_size=100, gamma=0.2)
    elif params.model_version == "cnn_distill2":
        scheduler = StepLR(optimizer, step_size=100, gamma=0.2)

    elif params.model_version == "LeNet_distill":
        scheduler = StepLR(optimizer, step_size=100, gamma=0.2)


    elif params.model_version == "LeNet_TA_distill":
        scheduler = StepLR(optimizer, step_size=100, gamma=0.2)

    elif params.model_version == "LeNet_res110_distill":
        scheduler = StepLR(optimizer, step_size=100, gamma=0.2)

    KDnet_file = KD_root_file + '/' + params.teacher + 'KD' + params.model_version + str(circle) + '.csv'

    writer_csv = csv.writer(open(KDnet_file, 'w+', newline=''))

    acc_list = []
    los_list = []
    train_los_list = []

    weight_matrix = []


    accKDnet_file = KD_root_file + '/acc' + params.model_version + str(circle) + '.csv'
    lossKDnet_file = KD_root_file + '/loss' + params.model_version + str(circle) + '.csv'

    weight_begin = get_model_params(model, params)
    weight_matrix.append(weight_begin)
    print(weight_begin)


    val_metrics_ = evaluate(model, net.loss_fn, val_dataloader, metrics, params)

    acc_list.append(val_metrics_['accuracy'])
    los_list.append(val_metrics_['loss'])

    global mode
    if args.label_smoothing:
        mode = '_lsr'
    elif args.gauss_LSR:
        mode = '_gausslsr'

    model_dir2 = model_dir + '/' + params.teacher +'_' + mode
    if not os.path.exists(model_dir2):
        os.makedirs(model_dir2)
    model_dir3 = model_dir2 + str(circle) + '/'
    if not os.path.exists(model_dir3):
        os.makedirs(model_dir3)
    model_dir4 = model_dir2 + str(circle) + '/' + str(-1)
    if not os.path.exists(model_dir4):
        os.makedirs(model_dir4)
    utils.save_checkpoint({'epoch': -1 + 1,
                           'state_dict': model.state_dict(),
                           'optim_dict': optimizer.state_dict()},
                          is_best=True,
                          checkpoint=model_dir4)

    for epoch in range(params.num_epochs):
        scheduler.step()

        # Run one epoch
        logging.info("Circle {}".format(circle + 1))
        logging.info("Epoch {}/{}".format(epoch + 1, params.num_epochs))

        # compute number of batches in one epoch (one full pass over the training set)
        train_kd(model, teacher_model, optimizer, loss_fn_kd, train_dataloader,
                 metrics, params)

        # Evaluate for one epoch on validation set
        val_metrics = evaluate_kd(model, val_dataloader, metrics, params, teacher_model, loss_fn_kd, args)
        val_metrics_ = evaluate(model, net.loss_fn, val_dataloader, metrics, params)


        los_list.append(val_metrics_['loss'])

        val_acc = val_metrics['accuracy']
        is_best = val_acc>=best_val_acc

        acc_list.append(val_metrics_['accuracy'])

        # Save weights
        utils.save_checkpoint({'epoch': epoch + 1,
                               'state_dict': model.state_dict(),
                               'optim_dict' : optimizer.state_dict()},
                               is_best=is_best,
                               checkpoint=model_dir)

        # global mode
        if args.label_smoothing:
            mode = '_lsr'
        elif args.gauss_LSR:
            mode = '_gausslsr'

        model_dir2 = model_dir + '/' + params.teacher+'_' + mode
        if not os.path.exists(model_dir2):
            os.makedirs(model_dir2)
        model_dir3 = model_dir2 + str(circle) + '/'
        if not os.path.exists(model_dir3):
            os.makedirs(model_dir3)
        model_dir4 = model_dir2 + str(circle) + '/' + str(epoch)
        if not os.path.exists(model_dir4):
            os.makedirs(model_dir4)
        utils.save_checkpoint({'epoch': epoch + 1,
                               'state_dict': model.state_dict(),
                               'optim_dict': optimizer.state_dict()},
                              is_best=True,
                              checkpoint=model_dir4)

        # If best_eval, best_save_path
        if is_best:
            logging.info("- Found new best accuracy")
            best_val_acc = val_acc

            # Save best val metrics in a json file in the model directory
            best_json_path = os.path.join(model_dir, "metrics_val_best_weights.json")
            utils.save_dict_to_json(val_metrics, best_json_path)

        # Save latest val metrics in a json file in the model directory
        last_json_path = os.path.join(model_dir, "metrics_val_last_weights.json")
        utils.save_dict_to_json(val_metrics, last_json_path)

        weight_temp = get_model_params(model, params)
        weight_matrix.append(weight_temp)
        print(weight_temp)
        # mid_dis.append(np.linalg.norm(weight_temp - weight_begin))
        # if not (weight_begin == weight_before).all():
        #     mid_each_dis.append(np.linalg.norm(weight_temp - weight_before))
        # weight_before = weight_temp
        # print('the dis in mid is {}'.format(mid_dis))

    weight_last = get_model_params(model, params)
    mid_each_dis, mid_dis = tl.distance(weight_matrix)

    # 先写入距离
    writer_csv.writerow(mid_dis)
    writer_csv.writerow(mid_each_dis)

    # 开始写入acc和loss
    writer_csv = csv.writer(open(accKDnet_file, 'w+', newline=''))
    writer_csv.writerow(acc_list)
    writer_csv = csv.writer(open(lossKDnet_file, 'w+', newline=''))
    writer_csv.writerow(los_list)

    dis = np.linalg.norm(weight_last - weight_begin)
    print('the distance is {}'.format(dis))

    return dis

        # #============ TensorBoard logging: uncomment below to turn in on ============#
        # # (1) Log the scalar values
        # info = {
        #     'val accuracy': val_acc
        # }

        # for tag, value in info.items():
        #     board_logger.scalar_summary(tag, value, epoch+1)

        # # (2) Log values and gradients of the parameters (histogram)
        # for tag, value in model.named_parameters():
        #     tag = tag.replace('.', '/')
        #     board_logger.histo_summary(tag, value.data.cpu().numpy(), epoch+1)
        #     # board_logger.histo_summary(tag+'/grad', value.grad.data.cpu().numpy(), epoch+1)


if __name__ == '__main__':
    # set the number of circle
    trainning_perteacher = 3

    distances = []
    for circle in range(trainning_perteacher):


        # Load the parameters from json file
        args = parser.parse_args()
        json_path = os.path.join(args.model_dir, 'params.json')
        assert os.path.isfile(json_path), "No json configuration file found at {}".format(json_path)
        params = utils.Params(json_path)

        # use GPU if available
        params.cuda = torch.cuda.is_available()
        print('the gpu is {}'.format(params.cuda))
        print('the current gpu is {}'.format(torch.cuda.current_device()))

        # Set the random seed for reproducible experiments
        # randm_seed = 230
        # random.seed(randm_seed)
        # torch.manual_seed(randm_seed)
        # if params.cuda: torch.cuda.manual_seed(randm_seed)

        # Set the logger
        utils.set_logger(os.path.join(args.model_dir, 'train.log'))

        # Create the input data pipeline
        logging.info("Loading the datasets...")

        # fetch dataloaders, considering full-set vs. sub-set scenarios
        if params.subset_percent < 1.0:
            train_dl = data_loader.fetch_subset_dataloader('train', params)
        else:
            train_dl = data_loader.fetch_dataloader('train', params)

        dev_dl = data_loader.fetch_dataloader('dev', params)

        logging.info("- done.")

        """Based on the model_version, determine model/optimizer and KD training mode
           WideResNet and DenseNet were trained on multi-GPU; need to specify a dummy
           nn.DataParallel module to correctly load the model parameters
        """
        if "distill" in params.model_version:
            # train a 5-layer CNN or a 18-layer ResNet with KD
            if params.model_version == "cnn_distill":
                model = net.Net(params).cuda() if params.cuda else net.Net(params)
                optimizer = optim.Adam(model.parameters(), lr=params.learning_rate)
                # fetch loss function and metrics definition in model files
                loss_fn_kd = net.loss_fn_kd
                metrics = net.metrics
            # train a 2-layer CNN with KD
            if params.model_version == "cnn_distill2":
                model = net2.Net(params).cuda() if params.cuda else net2.Net(params)
                optimizer = optim.Adam(model.parameters(), lr=params.learning_rate)
                # fetch loss function and metrics definition in model files
                loss_fn_kd = net2.loss_fn_kd
                metrics = net2.metrics

            if params.model_version == "LeNet_distill":
                model = lenet.LeNet(params).cuda() if params.cuda else lenet.LeNet(params)
                optimizer = optim.Adam(model.parameters(), lr=params.learning_rate)
                # fetch loss function and metrics
                loss_fn_kd = lenet.loss_fn_kd
                metrics = lenet.metrics

            if params.model_version == 'LeNet_TA_distill':
                model = lenet.LeNet(params).cuda() if params.cuda else lenet.LeNet(params)
                optimizer = optim.Adam(model.parameters(), lr=params.learning_rate)
                # fetch loss function and metrics
                loss_fn_kd = lenet.loss_fn_kd
                metrics = lenet.metrics

            if params.model_version == 'LeNet_res110_distill':
                model = lenet.LeNet(params).cuda() if params.cuda else lenet.LeNet(params)
                optimizer = optim.Adam(model.parameters(), lr=params.learning_rate)
                # fetch loss function and metrics
                loss_fn_kd = lenet.loss_fn_kd
                metrics = lenet.metrics



            """ 
                Specify the pre-trained teacher models for knowledge distillation
                Important note: wrn/densenet/resnext/preresnet were pre-trained models using multi-GPU,
                therefore need to call "nn.DaraParallel" to correctly load the model weights
                Trying to run on CPU will then trigger errors (too time-consuming anyway)!
            """
            if params.teacher == "cnn":
                teacher_params = utils.Params('experiments/base_cnn/params.json')
                teacher_model = net.Net(teacher_params)
                teacher_checkpoint = 'experiments/base_cnn/best.pth.tar'
                teacher_model = teacher_model.cuda() if params.cuda else teacher_model

            elif params.teacher == '4layer_cnn':
                teacher_params = utils.Params('experiments/base_cnn_4layer/params.json')
                teacher_model = net4.Net(teacher_params)
                teacher_checkpoint = 'experiments/base_cnn_4layer/best.pth.tar'
                teacher_model = teacher_model.cuda() if params.cuda else teacher_model

            elif params.teacher == '6layer_cnn':
                teacher_params = utils.Params('experiments/base_cnn_6layer/params.json')
                teacher_model = net6.Net(teacher_params)
                teacher_checkpoint = 'experiments/base_cnn_6layer/best.pth.tar'
                teacher_model = teacher_model.cuda() if params.cuda else teacher_model

            elif params.teacher == '8layer_cnn':
                teacher_params = utils.Params('experiments/base_cnn_8layer/params.json')
                teacher_model = net8.Net(teacher_params)
                teacher_checkpoint = 'experiments/base_cnn_8layer/best.pth.tar'
                teacher_model = teacher_model.cuda() if params.cuda else teacher_model

            elif params.teacher == '10layer_cnn':
                teacher_params = utils.Params('experiments/base_cnn_10layer/params.json')
                teacher_model = net10.Net(teacher_params)
                teacher_checkpoint = 'experiments/base_cnn_10layer/best.pth.tar'
                teacher_model = teacher_model.cuda() if params.cuda else teacher_model

            elif params.teacher == 'resnet18':
                teacher_params = utils.Params('experiments/base_resnet18/params.json')
                teacher_model =resnet.ResNet18()
                teacher_checkpoint = 'experiments/base_resnet18/best.pth.tar'
                teacher_model = teacher_model.cuda() if params.cuda else teacher_model

            elif params.teacher == 'resnet20':
                teacher_model = resnetcf.resnet_book.get('20')(num_classes=10)
                teacher_params = utils.Params('experiments/base_resnet20/params.json')
                # teacher_model = resnetcf.resnet20_cifar()
                teacher_checkpoint = 'experiments/base_resnet20/best.pth.tar'
                teacher_model = teacher_model.cuda() if params.cuda else teacher_model

            elif params.teacher == 'resnet110':
                teacher_model = resnetcf.resnet_book.get('110')(num_classes=10)
                teacher_params = utils.Params('experiments/base_resnet110/params.json')
                # teacher_model = resnetcf.resnet20_cifar()
                teacher_checkpoint = 'experiments/base_resnet110/best.pth.tar'
                teacher_model = teacher_model.cuda() if params.cuda else teacher_model

            '''
            we use 10-layer cnn as teacher, and get the mean and the variance of  its output, and then use
            these data calculate fake teacher.
            '''
            if args.fake_noise_teacher:
                print('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>fake_noise_teacher<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<')

            if args.gauss_LSR:
                print('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Gauss LSR<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<')
                # loss_fn_kd = ls_loss.loss_gauss_label_smoothing



            utils.load_checkpoint(teacher_checkpoint, teacher_model)

            # Train the model with KD
            logging.info("Experiment - model version: {}".format(params.model_version))
            logging.info("Starting training for {} epoch(s)".format(params.num_epochs))
            logging.info("First, loading the teacher model and computing its outputs...")

            print('Teacher model is {}, and the Student model is {}'.format(params.teacher, params.model_version))

            # 保存参数的文件夹
            csv_root_file = 'weights_params/'
            if args.gauss_LSR:
                KD_root_file = csv_root_file + 'Gauss_LSR_KD'
            elif args.label_smoothing:
                KD_root_file = csv_root_file + 'LSR_KD'
            else:
                KD_root_file = csv_root_file + 'KD'
            if not os.path.exists((KD_root_file)):
                os.makedirs(KD_root_file)

            dis = train_and_evaluate_kd(model, teacher_model, train_dl, dev_dl, optimizer, loss_fn_kd,
                                  metrics, params ,args.model_dir,circle,  args.restore_file, KD_root_file)

            distances.append(dis)
            print('show results of all circles train: {}'.format(distances))

        # non-KD mode: regular training of the baseline CNN or ResNet-18
        else:
            if params.model_version == "cnn":
                model = net.Net(params).cuda() if params.cuda else net.Net(params)
                optimizer = optim.Adam(model.parameters(), lr=params.learning_rate)
                # fetch loss function and metrics
                loss_fn = net.loss_fn
                metrics = net.metrics
            if params.model_version == "2layercnn":
                model = net2.Net(params).cuda() if params.cuda else net2.Net(params)
                optimizer = optim.Adam(model.parameters(), lr=params.learning_rate)
                # fetch loss function and metrics
                loss_fn = net2.loss_fn
                metrics = net2.metrics


            if params.model_version == "LeNet":
                model = lenet.LeNet(params).cuda() if params.cuda else net.Net(params)
                optimizer = optim.Adam(model.parameters(), lr=params.learning_rate)
                # fetch loss function and metrics
                loss_fn = lenet.loss_fn
                metrics = lenet.metrics


            if args.label_smoothing:
                print('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>Label Smoothing<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<')
                loss_fn = ls_loss.loss_label_smoothing

            if args.gause_hot_exp:
                print('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>gause_hot_exp<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<')
                model = net10.Net(params).cuda() if params.cuda else net10.Net(params)
                optimizer = optim.Adam(model.parameters(), lr=params.learning_rate)
                # fetch loss function and metrics
                loss_fn = net10.loss_fn
                metrics = net10.metrics

            # Train the model
            logging.info("Starting training for {} epoch(s)".format(params.num_epochs))

            csv_root_file = 'weights_params/'
            simple_root_file = csv_root_file + 'simple'
            if not os.path.exists((simple_root_file)):
                os.makedirs(simple_root_file)

            print('training model is {}'.format(params.model_version))

            dis = train_and_evaluate(circle, model, train_dl, dev_dl, optimizer, loss_fn, metrics, params,
                               args.model_dir, circle, args.restore_file, simple_root_file)

            distances.append(dis)
            print('show results of all circles train: {}'.format(distances))