# -*- coding: utf-8 -*-
# time: 2025/4/21 09:06
# file: ch01.py
# author: hanson
import torch
import torch.nn as nn
from transformers import BertModel, BertTokenizer
from torch.optim import Adam
from torch.utils.data import DataLoader, TensorDataset
import numpy as np
"""
https://juejin.cn/post/7490406929111302184
https://www.cnblogs.com/InProsperity/p/18783205
"""

# 1. 加载教师模型（BERT）
teacher_model = BertModel.from_pretrained('bert-base-uncased')
teacher_tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')

num_classes=[]
# 假设教师模型已微调过分类任务（实际需替换为你的微调模型）
teacher_classifier = nn.Linear(teacher_model.config.hidden_size, num_classes)  # num_classes为分类数


#2. 定义学生模型（LSTM）
class StudentLSTM(nn.Module):
    def __init__(self, vocab_size, embed_dim, hidden_dim, num_classes):
        super().__init__()
        self.embedding = nn.Embedding(vocab_size, embed_dim)
        self.lstm = nn.LSTM(embed_dim, hidden_dim, batch_first=True)
        self.fc = nn.Linear(hidden_dim, num_classes)

    def forward(self, x):
        x = self.embedding(x)
        lstm_out, _ = self.lstm(x)
        logits = self.fc(lstm_out[:, -1, :])  # 取最后一个时间步
        return logits


# 初始化学生模型
vocab_size = len(teacher_tokenizer.get_vocab())  # 使用BERT的词表
student_model = StudentLSTM(vocab_size, embed_dim=128, hidden_dim=256, num_classes=10)

# 3,定义蒸馏损失
def distillation_loss(student_outputs, teacher_outputs, T=2.0):
    soft_teacher = nn.functional.softmax(teacher_outputs / T, dim=1)
    soft_student = nn.functional.log_softmax(student_outputs / T, dim=1)
    return nn.KLDivLoss()(soft_student, soft_teacher) * (T * T)

criterion = nn.CrossEntropyLoss()  # 分类损失
# 4. 训练循环
alpha = 0.7  # 蒸馏损失权重
epochs = 5  # 简单演示，实际可能需更多
def train_distillation(student_model, teacher_model, dataloader, epochs=10):
    optimizer = Adam(student_model.parameters(), lr=1e-3)

    for epoch in range(epochs):
        for batch in dataloader:
            inputs, labels = batch  # inputs是tokenized文本，labels是真实标签

            # 教师模型生成软标签（不更新梯度）
            with torch.no_grad():
                teacher_outputs = teacher_model(inputs)[0]  # 假设教师模型返回logits
                teacher_logits = teacher_classifier(teacher_outputs[:, 0, :])  # [CLS]位置

            # 学生模型预测
            student_logits = student_model(inputs)

            # 计算损失
            hard_loss = criterion(student_logits, labels)
            soft_loss = distillation_loss(student_logits, teacher_logits)
            total_loss = alpha * hard_loss + (1 - alpha) * soft_loss

            # 反向传播
            optimizer.zero_grad()
            total_loss.backward()
            optimizer.step()

        print(f"Epoch {epoch}, Loss: {total_loss.item():.4f}")

#5. 数据准备示例
# 假设已有文本数据texts和标签labels（需转换为BERT输入格式）
inputs = teacher_tokenizer(texts, padding=True, truncation=True, return_tensors="pt")
labels = torch.tensor(labels)

dataset = TensorDataset(inputs['input_ids'], labels)
dataloader = DataLoader(dataset, batch_size=32)

#6. 启动蒸馏
train_distillation(student_model, teacher_model, dataloader)
