from utils import evaluate
from models import StudentWithDistilling
from models import LeNet5 as TeacherModel
from utils import load_dataset

import torch


def train():
    epochs = 5
    lr = 0.001
    batch_size = 64
    temperature =  1.5
    alpha = 0.8
    train_iter, test_iter = load_dataset(batch_size)
    student_model = StudentWithDistilling(temperature=temperature, alpha=alpha)
    teacher_model = TeacherModel()
    teacher_model_state_dict = torch.load('teacher.pt')
    teacher_model.load_state_dict(teacher_model_state_dict)
    teacher_model.eval()
    print("teacher_model:",evaluate(test_iter, teacher_model))
    optimizer = torch.optim.Adam(student_model.parameters(), lr=lr)
    max_test_acc = 0

    for epoch in range(epochs):
        for i, (x, y) in enumerate(train_iter):
            with torch.no_grad():
                soft_logits = teacher_model(x)
            loss, logits = student_model(x, soft_logits, y)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()  # 执行梯度下降
            if i % 50 == 0:
                acc = (logits.argmax(1) == y).float().mean()

                print(f"Epochs[{epoch + 1}/{epochs}]--batch[{i}/{len(train_iter)}]"
                      f"--Acc: {round(acc.item(), 4)}--loss: {round(loss.item(), 4)}")
            # if i % 300 == 0:
            #     s_logits = torch.nn.functional.softmax(soft_logits / temperature, dim=-1)
            #     print(f"{s_logits[0:3]}")

        test_acc = evaluate(test_iter, student_model)
        if test_acc > max_test_acc:
            max_test_acc = test_acc
        print(f"Epochs[{epoch + 1}/{epochs}]--Acc on test {test_acc}, max test acc: {max_test_acc}")


if __name__ == '__main__':
    train()
    # Epochs[4/5]--Acc on test 0.8636, max test acc: 0.8636
    # Epochs[5/5]--batch[0/938]--Acc: 0.9062--loss: 1.2934
    # Epochs[5/5]--batch[50/938]--Acc: 0.8281--loss: 1.4429
    # Epochs[5/5]--batch[100/938]--Acc: 0.8594--loss: 1.4121
    # Epochs[5/5]--batch[150/938]--Acc: 0.8594--loss: 1.3253
    # Epochs[5/5]--batch[200/938]--Acc: 0.8281--loss: 1.5815
    # Epochs[5/5]--batch[250/938]--Acc: 0.875--loss: 1.8088
    # Epochs[5/5]--batch[300/938]--Acc: 0.8906--loss: 1.141
    # Epochs[5/5]--batch[350/938]--Acc: 0.7969--loss: 1.5366
    # Epochs[5/5]--batch[400/938]--Acc: 0.8125--loss: 1.4435
    # Epochs[5/5]--batch[450/938]--Acc: 0.875--loss: 1.3812
    # Epochs[5/5]--batch[500/938]--Acc: 0.8594--loss: 1.4114
    # Epochs[5/5]--batch[550/938]--Acc: 0.8906--loss: 1.0966
    # Epochs[5/5]--batch[600/938]--Acc: 0.9062--loss: 1.3558
    # Epochs[5/5]--batch[650/938]--Acc: 0.7969--loss: 1.4307
    # Epochs[5/5]--batch[700/938]--Acc: 0.8906--loss: 1.1219
    # Epochs[5/5]--batch[750/938]--Acc: 0.9062--loss: 1.1452
    # Epochs[5/5]--batch[800/938]--Acc: 0.8281--loss: 1.145
    # Epochs[5/5]--batch[850/938]--Acc: 0.875--loss: 1.3676
    # Epochs[5/5]--batch[900/938]--Acc: 0.875--loss: 1.35
    # Epochs[5/5]--Acc on test 0.8653, max test acc: 0.8653




    # 以下结果是保持所有参数不变的情况下，仅将 models.py 中 soft_logits 与 hard_label 从交叉熵换到 KL 散度后的结果
    # Epochs[4/5]--batch[250/938]--Acc: 0.9375--loss: -2.243
    # Epochs[4/5]--batch[300/938]--Acc: 0.8438--loss: -2.1872
    # Epochs[4/5]--batch[350/938]--Acc: 0.8438--loss: -2.0665
    # Epochs[4/5]--batch[400/938]--Acc: 0.875--loss: -2.2114
    # Epochs[4/5]--batch[450/938]--Acc: 0.8281--loss: -2.311
    # Epochs[4/5]--batch[500/938]--Acc: 0.875--loss: -2.2173
    # Epochs[4/5]--batch[550/938]--Acc: 0.8906--loss: -2.2503
    # Epochs[4/5]--batch[600/938]--Acc: 0.8438--loss: -2.2452
    # Epochs[4/5]--batch[650/938]--Acc: 0.8594--loss: -2.0689
    # Epochs[4/5]--batch[700/938]--Acc: 0.875--loss: -2.2814
    # Epochs[4/5]--batch[750/938]--Acc: 0.9219--loss: -2.2349
    # Epochs[4/5]--batch[800/938]--Acc: 0.8125--loss: -2.1488
    # Epochs[4/5]--batch[850/938]--Acc: 0.8594--loss: -2.159
    # Epochs[4/5]--batch[900/938]--Acc: 0.8281--loss: -2.1879
    # Epochs[4/5]--Acc on test 0.8714, max test acc: 0.8714
    # Epochs[5/5]--batch[0/938]--Acc: 0.875--loss: -2.1403
    # Epochs[5/5]--batch[50/938]--Acc: 0.8281--loss: -2.2245
    # Epochs[5/5]--batch[100/938]--Acc: 0.9062--loss: -2.1196
    # Epochs[5/5]--batch[150/938]--Acc: 0.875--loss: -2.1724
    # Epochs[5/5]--batch[200/938]--Acc: 0.8125--loss: -2.3025
    # Epochs[5/5]--batch[250/938]--Acc: 0.875--loss: -2.1595
    # Epochs[5/5]--batch[300/938]--Acc: 0.875--loss: -2.2508
    # Epochs[5/5]--batch[350/938]--Acc: 0.8906--loss: -2.1269
    # Epochs[5/5]--batch[400/938]--Acc: 0.875--loss: -2.2308
    # Epochs[5/5]--batch[450/938]--Acc: 0.9688--loss: -2.2758
    # Epochs[5/5]--batch[500/938]--Acc: 0.7969--loss: -2.0829
    # Epochs[5/5]--batch[550/938]--Acc: 0.875--loss: -2.0093
    # Epochs[5/5]--batch[600/938]--Acc: 0.8438--loss: -2.1993
    # Epochs[5/5]--batch[650/938]--Acc: 0.8438--loss: -2.0575
    # Epochs[5/5]--batch[700/938]--Acc: 0.8438--loss: -2.233
    # Epochs[5/5]--batch[750/938]--Acc: 0.8594--loss: -2.1456
    # Epochs[5/5]--batch[800/938]--Acc: 0.8281--loss: -2.114
    # Epochs[5/5]--batch[850/938]--Acc: 0.9375--loss: -2.2305
    # Epochs[5/5]--batch[900/938]--Acc: 0.9844--loss: -2.1176
    # Epochs[5/5]--Acc on test 0.8681, max test acc: 0.8714