"""
@Project : KonwledgeDistilling
@Author  : wxj233
@Time    : 2025/11/6 11:57
@Desc    : 
"""
from sympy.physics.units import temperature
from torchvision import datasets, transforms
import os
import torch
from torch.utils.data import DataLoader
from Model import *
from torch import nn, optim
from tqdm import tqdm
import torch.nn.functional as F


os.environ['TORCHVISION_DATA_MIRROR'] = 'https://mirrors.bfsu.edu.cn/torchvision/'
transform = transforms.Compose([
                       transforms.ToTensor(),
                       transforms.Normalize((0.1307,), (0.3081,))
                   ])

trainset = datasets.MNIST('./', train=True, download=True, transform=transform)
trainData = DataLoader(trainset, batch_size=128, shuffle=True)


CUDA = torch.cuda.is_available()
if CUDA:
    model = Teacher().cuda()
else:
    model = Teacher()

model_teacher = torch.load("model_teacher.pth")
model_teacher.eval()

criterion = nn.CrossEntropyLoss()  # 自带softmax计算
criterion_KL = nn.KLDivLoss(reduction="batchmean")
opt = optim.SGD(model.parameters(),lr=1e-3, momentum=0.9)

ep = 10  # 训练周期
temperature = 3  # 蒸馏温度，评估时学生模型使用温度T=1即可
for epoch in range(ep):
    model.train()  # 启用 batch normalization 和 dropout
    t_batch = tqdm(trainData, total=len(trainData), unit="batches", leave=False)  # 设置leave=False虽然不会换行，但是会导致进度条跑完后被清除
    t_batch.set_description(f"第{epoch + 1}/{ep}个周期, 当前周期进度")
    for inputs, labels in t_batch:
        if CUDA:
            inputs, labels = inputs.cuda(), labels.cuda()

        student_outs = model(inputs)
        teacher_outs = model_teacher(inputs)  # 老师的预测

        # 这里用log_softmax应该是为了避免一些计算精度问题
        loss = (criterion(student_outs, labels) +
                criterion_KL(F.log_softmax(student_outs/temperature, dim=1), F.softmax(teacher_outs/temperature, dim=1))*(temperature**2))  # KL散度损失加权T^2

        t_batch.set_postfix(loss=loss.item())
        opt.zero_grad()
        loss.backward()
        opt.step()

torch.save(model, 'model_student.pth')




