# train.py
#!/usr/bin/env	python3

""" train network using pytorch

author ZNB
"""
import torch
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
from conf import settings
import torch.optim as optim
from torch.utils.tensorboard import SummaryWriter
import argparse
import time
import os
from torch.nn import CrossEntropyLoss
from torch.utils.data import TensorDataset,DataLoader,SequentialSampler
from utils import get_network, get_training_dataloader, get_test_dataloader, WarmUpLR, \
    most_recent_folder, most_recent_weights, last_epoch, best_acc_weights

T = 0.8   # tempter setting
alpha = 0.5
def train(epoch):
    start = time.time()
    student_net.train()
    teacher_net.eval()

    for batch_index, (images, labels) in enumerate(stone_training_loader):

        if args.gpu:
            labels = labels.cuda()
            images = images.cuda()

        optimizer.zero_grad()
        outputs = student_net(images)
        output_teacher = teacher_net(images)  # the outpunt in teacher network
        loss_soft = criterion(F.Log_softmax(outputs / T), F.Softmax(output_teacher / T))
        student_loss = loss_function(outputs, labels)
        loss = (1 - alpha) * student_loss + alpha * loss_soft

        loss.backward()
        optimizer.step()

        n_iter = (epoch - 1) * len(stone_training_loader) + batch_index + 1

        last_layer = list(student_net.children())[-1]
        for name, para in last_layer.named_parameters():
            if 'weight' in name:
                writer.add_scalar('LastLayerGradients/grad_norm2_weights', para.grad.norm(), n_iter)
            if 'bias' in name:
                writer.add_scalar('LastLayerGradients/grad_norm2_bias', para.grad.norm(), n_iter)

        print('Training Epoch: {epoch} [{trained_samples}/{total_samples}]\tsum_Loss: {:0.4f}\tstudent_Loss: {:0.4f}\tteacher_Loss: {:0.4f}\tLR: {:0.6f}'.format(
            loss.item(),
            student_loss.item(),
            loss_soft.item(),
            optimizer.param_groups[0]['lr'],
            epoch=epoch,
            trained_samples=batch_index * args.b + len(images),
            total_samples=len(stone_training_loader.dataset)
        ))

        # update training loss for each iteration
        writer.add_scalar('Train/sum_loss', loss.item(), n_iter)
        writer.add_scalar('Train/student_loss', student_loss.item(), n_iter)
        writer.add_scalar('Train/teacher_loss', loss_soft.item(), n_iter)

        if epoch <= args.warm:
            warmup_scheduler.step()

    for name, param in student_net.named_parameters():
        layer, attr = os.path.splitext(name)
        attr = attr[1:]
        writer.add_histogram("{}/{}".format(layer, attr), param, epoch)

    finish = time.time()

    print('epoch {} training time consumed: {:.2f}s'.format(epoch, finish - start))



@torch.no_grad()
def eval_training(epoch=0, tb=True):

    start = time.time()
    student_net.eval()

    test_loss = 0.0 # cost function error
    correct = 0.0

    for (images, labels) in stone_test_loader:

        if args.gpu:
            images = images.cuda()
            labels = labels.cuda()

        outputs = student_net(images)
        loss = loss_function(outputs, labels)

        test_loss += loss.item()
        _, preds = outputs.max(1)
        correct += preds.eq(labels).sum()

    finish = time.time()
    if args.gpu:
        print('GPU INFO.....')
        print(torch.cuda.memory_summary(), end='')
    print('Evaluating Network.....')
    print('Test set: Epoch: {}, Average loss: {:.4f}, Accuracy: {:.4f}, Time consumed:{:.2f}s'.format(
        epoch,
        test_loss / len(stone_test_loader.dataset),
        correct.float() / len(stone_test_loader.dataset),
        finish - start
    ))
    print()

    #add informations to tensorboard
    if tb:
        writer.add_scalar('Test/Average loss', test_loss / len(stone_test_loader.dataset), epoch)
        writer.add_scalar('Test/Accuracy', correct.float() / len(stone_test_loader.dataset), epoch)

    return correct.float() / len(stone_test_loader.dataset)




if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('-net', type=str, required=False, help='net type')
    parser.add_argument('-gpu', action='store_true', default=False, help='use gpu or not')
    parser.add_argument('-b', type=int, default=128, help='batch size for dataloader')
    parser.add_argument('-warm', type=int, default=1, help='warm up training phase')
    parser.add_argument('-lr', type=float, default=0.1, help='initial learning rate')
    parser.add_argument('-resume', action='store_true', default=False, help='resume training')
    args = parser.parse_args()

    # teacher network
    weights_path = 'D:/workspace/torch_cifar100/checkpoint/resnet18/Tuesday_17_August_2021_07h_55m_14s/resnet18-154-best.pth'
    args.net = 'resnet18'
    args.gpu = True
    teacher_net = get_network(args)
    teacher_net.load_state_dict(torch.load(weights_path))
    print('teacher network structure: ')
    print(teacher_net)
    # student network
    args.net = 'lenet5'
    student_net = get_network(args)

    stone_training_loader = get_training_dataloader(
        settings.CIFAR100_TRAIN_MEAN,
        settings.CIFAR100_TRAIN_STD,
        num_workers=4,
        batch_size=args.b,
        shuffle=True
    )

    stone_test_loader = get_test_dataloader(
        settings.CIFAR100_TRAIN_MEAN,
        settings.CIFAR100_TRAIN_STD,
        num_workers=4,
        batch_size=args.b,
        shuffle=True
    )

    loss_function = nn.CrossEntropyLoss()
    criterion = nn.KLDivLoss()  # KL散度     评估模型的散度

    optimizer = optim.SGD(student_net.parameters(), lr=args.lr, momentum=0.9, weight_decay=5e-4)
    train_scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=settings.MILESTONES,
                                                     gamma=0.2)  # learning rate decay
    iter_per_epoch = len(stone_training_loader)
    warmup_scheduler = WarmUpLR(optimizer, iter_per_epoch * args.warm)

    if args.resume:
        recent_folder = most_recent_folder(os.path.join(settings.CHECKPOINT_PATH, args.net), fmt=settings.DATE_FORMAT)
        if not recent_folder:
            raise Exception('no recent folder were found')

        checkpoint_path = os.path.join(settings.CHECKPOINT_PATH, args.net, recent_folder)

    else:
        checkpoint_path = os.path.join(settings.CHECKPOINT_PATH, args.net, settings.TIME_NOW)

    # use tensorboard
    if not os.path.exists(settings.LOG_DIR):
        os.mkdir(settings.LOG_DIR)

    # since tensorboard can't overwrite old values
    # so the only way is to create a new tensorboard log
    writer = SummaryWriter(log_dir=os.path.join(
        settings.LOG_DIR, args.net, settings.TIME_NOW))
    input_tensor = torch.Tensor(1, 3, 32, 32)
    if args.gpu:
        input_tensor = input_tensor.cuda()
    writer.add_graph(student_net, input_tensor)

    # create checkpoint folder to save model
    if not os.path.exists(checkpoint_path):
        os.makedirs(checkpoint_path)
    checkpoint_path = os.path.join(checkpoint_path, '{net}-{epoch}-{type}.pth')
    best_acc = 0.0

    for epoch in range(1, settings.EPOCH + 1):
        if epoch > args.warm:
            train_scheduler.step(epoch)


        train(epoch)
        acc = eval_training(epoch)

        #    start to save best performance model after learning rate decay to 0.01
        if epoch > settings.MILESTONES[1] and best_acc < acc:
            weights_path = checkpoint_path.format(net=args.net, epoch=epoch, type='best')
            print('saving weights file to {}'.format(weights_path))
            torch.save(student_net.state_dict(), weights_path)
            best_acc = acc
            continue

        if not epoch % settings.SAVE_EPOCH:
            weights_path = checkpoint_path.format(net=args.net, epoch=epoch, type='regular')
            print('saving weights file to {}'.format(weights_path))
            torch.save(student_net.state_dict(), weights_path)

    writer.close()




'''
for step, batch in enumerate(dataloader):
    inputs = batch[0]
    labels = batch[1]

    # 分别使用学生模型和教师模型对输入数据进行计算
    output_student = model_student(inputs)
    output_teacher = model_teacher(inputs)

    # 计算学生模型和真实标签之间的交叉熵损失函数值   
    loss_hard = loss_fun(output_student, labels)

    # 计算学生模型预测结果和教师模型预测结果之间的KL散度
    loss_soft = criterion(output_student, output_teacher)
    loss_soft = criterion(F.log_softmax(output_student/T), F.softmax(output_teacher/T))

    # F.log_softmax(outputs/T),
    # F.softmax(teacher_outputs/T)    F函数里面的操作在网络里面是不保存的
    loss = 0.9 * loss_soft + 0.1 * loss_hard
    print(loss)
    optimizer.zero_grad()
    loss.backward()
    optimizer.step()
'''