# -*- coding: utf-8 -*-
# time: 2025/4/21 11:38
# file: distill_teach_stu_model.py
# author: hanson
import torch
import torch.nn as nn
import torch.optim as optim

# ---------------------------
# （1）定义一个较大的 Teacher 模型
# ---------------------------
class TeacherNet(nn.Module):
    def __init__(self):
        super(TeacherNet, self).__init__()
        # 这里随意搭建一个相对稍大一点的模型
        self.net = nn.Sequential(
            nn.Linear(784, 512),  # 更多神经元
            nn.ReLU(),
            nn.Linear(512, 256),  # 更深层次
            nn.ReLU(),
            nn.Linear(256, 10)    # 最终输出
        )

    def forward(self, x):
        return self.net(x)

# ---------------------------
# （2）定义一个较小的 Student 模型
# ---------------------------
class StudentNet(nn.Module):
    def __init__(self):
        super(StudentNet, self).__init__()
        # 小模型：层数更少 or 参数更少
        self.net = nn.Sequential(
            nn.Linear(784, 64),   # 更少的参数
            nn.ReLU(),
            nn.Linear(64, 10)     # 更浅的结构
        )

    def forward(self, x):
        return self.net(x)

# ---------------------------
# （3）知识蒸馏的损失函数
# ---------------------------
def distillation_loss(student_outputs, teacher_outputs, temperature=2.0, alpha=0.5):
    """
    student_outputs: 学生网络的输出
    teacher_outputs: 教师网络的输出
    temperature:     温度系数，越大越"平滑"
    alpha:           独立蒸馏损失与真实标签交叉熵损失之间的平衡
    """
    # 使用 KLDivLoss 计算的蒸馏损失
    # log_softmax 前需要先 / temperature
    student_log_probs = nn.LogSoftmax(dim=1)(student_outputs / temperature)
    teacher_probs = nn.Softmax(dim=1)(teacher_outputs / temperature)
    distill_loss = nn.KLDivLoss(reduction='batchmean')(student_log_probs, teacher_probs) * (temperature ** 2)

    # 这里简化，仅用蒸馏损失，并没有结合真实标签损失以示例
    # 若有真实标签可以按照 alpha * distill_loss + (1 - alpha) * ce_loss 的形式结合
    return distill_loss

def train(model, optimizer, data, teacher_outputs=None):
    """
    model:         待训练的模型
    optimizer:     优化器
    data:          本示例中用随机生成的数据 (inputs, labels)
    teacher_outputs: 用于蒸馏的小批量 teacher 输出（小模型训练时用）
    """
    inputs, labels = data
    outputs = model(inputs)

    if teacher_outputs is not None:
        # 学生模型进行蒸馏
        loss = distillation_loss(outputs, teacher_outputs)
    else:
        # 教师模型普通训练（交叉熵）
        ce_loss_fn = nn.CrossEntropyLoss()
        loss = ce_loss_fn(outputs, labels)

    optimizer.zero_grad()
    loss.backward()
    optimizer.step()

    return loss.item()

def main():
    # ---------------------------
    # 1. 创建 Teacher 和 Student 网络
    # ---------------------------
    teacher = TeacherNet()
    student = StudentNet()

    # ---------------------------
    # 2. 优化器定义
    # ---------------------------
    teacher_optimizer = optim.Adam(teacher.parameters(), lr=1e-3)
    student_optimizer = optim.Adam(student.parameters(), lr=1e-3)

    # ---------------------------
    # 3. 创建一些简单的随机数据来演示
    #    假设我们每次训练有32张图片，每张图片28*28=784维度
    # ---------------------------
    EPOCHS = 100
    BATCH_SIZE = 32
    for epoch in range(EPOCHS):
        # 演示 teacher 的训练
        teacher_inputs = torch.randn(BATCH_SIZE, 784)   # 生成随机输入
        teacher_labels = torch.randint(0, 10, (BATCH_SIZE,))  # 生成随机标签(0-9)
        teacher_loss = train(teacher, teacher_optimizer, (teacher_inputs, teacher_labels))

        # 生成学生训练数据
        student_inputs = torch.randn(BATCH_SIZE, 784)
        # teacher 输出作为 student 的蒸馏目标
        with torch.no_grad():
            teacher_preds = teacher(student_inputs)
        # 这里 labels 仅做演示，用不到
        student_labels = torch.randint(0, 10, (BATCH_SIZE,))

        student_loss = train(student, student_optimizer, (student_inputs, student_labels), teacher_preds)

        # 打印各自的 loss
        print(f"Epoch {epoch+1}/{EPOCHS}, Teacher Loss: {teacher_loss:.4f}, Student Loss (Distill): {student_loss:.4f}")

if __name__ == "__main__":
    main()
