# 导入所需包
import math
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
import paddle.utils
from data_load import get_dataset,get_dataloader
from teacher_ import AverageMeter
# 定义学生网络，一个卷积、一个全连接、没用BN，比老师网络更简单
class StudentNet(nn.Layer):
    def __init__(self):
        super(StudentNet, self).__init__()
        self.conv1 = nn.Conv2D(in_channels=3, out_channels=32, kernel_size=5, stride=1)
        self.relu = nn.ReLU()
        self.fc1 = nn.Linear(18432, 10)

    def forward(self, x):
        x = self.conv1(x)
        x = self.relu(x)
        x = paddle.flatten(x, 1)
        x = self.fc1(x)
        output = x
        return output
# 打印网络结构
student_Net = StudentNet()
paddle.summary(student_Net,(1, 3, 28, 28))

# ！！！！！！！！！！这个就是最为关键的地方
# γ∗softloss+(1−γ)∗hardloss
# temp就是上面T，标签平滑
# alpha就是γ
# KLDivLoss计算输入(Input)和输入(Label)之间的Kullback-Leibler散度损失。注意其中输入(Input)应为对数概率值，输入(Label)应为概率值。
def distillation(y, labels, teacher_scores, temp, alpha):
    return nn.KLDivLoss()(F.log_softmax(y / temp, axis=1), F.softmax(teacher_scores / temp, axis=1)) * (
            temp * temp * 2.0 * alpha) + F.cross_entropy(y, labels) * (1. - alpha)

# 定义学生训练网络（有老师教）
def student_train_one_epoch(model, dataloader, criterion, optimizer, epoch, total_epoch, report_freq=20):
    from teacher_ import teacher_model
    print(f'----- Training Epoch [{epoch}/{total_epoch}]:')
    loss_meter = AverageMeter()
    acc_meter = AverageMeter()
    model.train()
    for batch_idx, data in enumerate(dataloader):
        image = data[0]
        label = data[1]

        out = model(image)
        # 下面3行是主要区别
        teacher_output = teacher_model(image)
        teacher_output = teacher_output.detach()  # 切断老师网络的反向传播
        loss = distillation(out, label, teacher_output, temp=5.0, alpha=0.7) #0为不用教师训练 接近真实标签 1为直接模拟教师模型

        loss.backward()
        optimizer.step()
        optimizer.clear_grad()

        pred = nn.functional.softmax(out, axis=1)
        acc1 = paddle.metric.accuracy(pred, label)

        batch_size = image.shape[0]
        loss_meter.update(loss.cpu().numpy()[0], batch_size)
        acc_meter.update(acc1.cpu().numpy()[0], batch_size)

        if batch_idx > 0 and batch_idx % report_freq == 0:
            print(f'----- Batch[{batch_idx}/{len(dataloader)}], Loss: {loss_meter.avg:.5}, Acc@1: {acc_meter.avg:.4}')

    print(f'----- Epoch[{epoch}/{total_epoch}], Loss: {loss_meter.avg:.5}, Acc@1: {acc_meter.avg:.4}')
    return loss,acc1
# 定义学生预测网络（有老师教）
def student_validate(model, dataloader, criterion, report_freq=10):
    print('----- Validation')
    loss_meter = AverageMeter()
    acc_meter = AverageMeter()
    model.eval()
    for batch_idx, data in enumerate(dataloader):
        image = data[0]
        label = data[1]

        out = model(image)
        loss = criterion(out, label)

        pred = paddle.nn.functional.softmax(out, axis=1)
        acc1 = paddle.metric.accuracy(pred, label)
        batch_size = image.shape[0]
        loss_meter.update(loss.cpu().numpy()[0], batch_size)
        acc_meter.update(acc1.cpu().numpy()[0], batch_size)

        if batch_idx > 0 and batch_idx % report_freq == 0:
            print(f'----- Batch [{batch_idx}/{len(dataloader)}], Loss: {loss_meter.avg:.5}, Acc@1: {acc_meter.avg:.4}')

    print(f'----- Validation Loss: {loss_meter.avg:.5}, Acc@1: {acc_meter.avg:.4}')
    return loss,acc1

# 定义学生网络主函数（有老师教）
def student_main():
    print('开始训练有老师教的学生模型')
    total_epoch = 20
    batch_size = 256

    model = StudentNet()
    train_dataset = get_dataset(mode='train')
    train_dataloader = get_dataloader(train_dataset, batch_size, mode='train')
    val_dataset = get_dataset(mode='test')
    val_dataloader = get_dataloader(val_dataset, batch_size, mode='test')
    criterion = nn.CrossEntropyLoss()
    scheduler = paddle.optimizer.lr.CosineAnnealingDecay(0.02, total_epoch)
    optimizer = paddle.optimizer.Momentum(learning_rate=scheduler,
                                          parameters=model.parameters(),
                                          momentum=0.9,
                                          weight_decay=5e-4)

    eval_mode = False
    if eval_mode:
        state_dict = paddle.load('./student_ep200.pdparams')
        model.set_state_dict(state_dict)
        student_validate(model, val_dataloader, criterion)
        return

    student_history_train = []
    student_history_vali = []
    save_freq = 5
    test_freq = 1
    for epoch in range(1, total_epoch + 1):
        loss_train, acc1_train = student_train_one_epoch(model, train_dataloader, criterion, optimizer, epoch,
                                                         total_epoch)
        scheduler.step()
        student_history_train.append((loss_train, acc1_train))

        if epoch % test_freq == 0 or epoch == total_epoch:
            loss_vali, acc1_vali = student_validate(model, val_dataloader, criterion)
            student_history_vali.append((loss_vali, acc1_vali))

        if epoch % save_freq == 0 or epoch == total_epoch:
            paddle.save(model.state_dict(), f'model_out/student_ep{epoch}.pdparams')
            paddle.save(optimizer.state_dict(), f'model_out/student_ep{epoch}.pdopts')

    return model, student_history_train, student_history_vali



# 学生网络训练（有老师教）返回值分别是网络模型、训练时的loss和acc、预测时的loss和acc
student_model,student_history_train,student_history_vali = student_main()
