# -*- coding: utf-8 -*-
# time: 2025/4/21 09:19
# file: qwen_distill_myModel.py
# author: hanson
import torch
import torch.nn as nn
from transformers import AutoModelForCausalLM, AutoTokenizer
from torch.optim import Adam
from torch.utils.data import Dataset, DataLoader
import numpy as np

"""
通过这种方式，你可以将Qwen2.5的强大生成能力迁移到轻量化模型中，显著提升推理效率！

"""
# 设备配置
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

# 2 加载教师模型（Qwen2.5）
teacher_model_name = "Qwen/Qwen2.5-0.5B"  # 替换为实际模型名称
teacher_model = AutoModelForCausalLM.from_pretrained(teacher_model_name).to(device)
teacher_tokenizer = AutoTokenizer.from_pretrained(teacher_model_name)
teacher_tokenizer.pad_token = teacher_tokenizer.eos_token  # 设置填充token



#3 定义学生模型（小型Transformer）
class StudentTransformer(nn.Module):
    def __init__(self, vocab_size, d_model=256, nhead=4, num_layers=2):
        super().__init__()
        self.embedding = nn.Embedding(vocab_size, d_model)
        self.transformer = nn.TransformerEncoder(
            nn.TransformerEncoderLayer(d_model, nhead),
            num_layers
        )
        self.fc = nn.Linear(d_model, vocab_size)

    def forward(self, x):
        x = self.embedding(x)
        x = self.transformer(x)
        logits = self.fc(x[:, -1, :])  # 取最后一个时间步
        return logits


# 初始化学生模型
vocab_size = len(teacher_tokenizer.get_vocab())
student_model = StudentTransformer(vocab_size).to(device)


#4 蒸馏损失函数
def distillation_loss(student_logits, teacher_logits, temperature=2.0):
    # 教师模型的软化概率分布
    soft_teacher = torch.softmax(teacher_logits / temperature, dim=-1)
    # 学生模型的log概率
    log_soft_student = torch.log_softmax(student_logits / temperature, dim=-1)
    # KL散度损失
    return nn.functional.kl_div(log_soft_student, soft_teacher, reduction='batchmean') * (temperature ** 2)

# 总损失 = α * 学生自身损失 + (1-α) * 蒸馏损失
alpha = 0.3  # 超参数
criterion = nn.CrossEntropyLoss(ignore_index=teacher_tokenizer.pad_token_id)

#5 数据加载与处理
class TextDataset(Dataset):
    def __init__(self, texts, tokenizer, max_length=64):
        self.tokenizer = tokenizer
        self.texts = texts
        self.max_length = max_length

    def __len__(self):
        return len(self.texts)

    def __getitem__(self, idx):
        text = self.texts[idx]
        inputs = self.tokenizer(
            text,
            max_length=self.max_length,
            truncation=True,
            padding="max_length",
            return_tensors="pt"
        )
        return inputs["input_ids"].squeeze(), inputs["attention_mask"].squeeze()


# 示例数据（实际需替换为你的文本数据）
train_texts = ["今天天气很好，我想去公园散步。", "深度学习模型的蒸馏技术非常有趣。","学习英语的基本方法",
               "你是谁？","列出三种创造性的解决问题的技巧。"]
train_dataset = TextDataset(train_texts, teacher_tokenizer)
train_loader = DataLoader(train_dataset, batch_size=2, shuffle=True)

#6. 蒸馏训练循环

def train_distillation(student_model, teacher_model, dataloader, epochs=5):
    optimizer = Adam(student_model.parameters(), lr=1e-4)

    for epoch in range(epochs):
        for batch in dataloader:
            input_ids, attention_mask = batch
            input_ids = input_ids.to(device)
            attention_mask = attention_mask.to(device)

            # 教师模型生成logits（不更新梯度）
            with torch.no_grad():
                teacher_outputs = teacher_model(
                    input_ids=input_ids,
                    attention_mask=attention_mask
                ).logits  # [batch_size, seq_len, vocab_size]
                # 取最后一个token的logits作为目标
                teacher_logits = teacher_outputs[:, -1, :]

            # 学生模型预测（仅输入上下文，不包含目标token）
            student_logits = student_model(input_ids[:, :-1])

            # 计算损失
            hard_loss = criterion(student_logits, input_ids[:, -1])  # 预测下一个token
            soft_loss = distillation_loss(student_logits, teacher_logits)
            total_loss = alpha * hard_loss + (1 - alpha) * soft_loss

            # 反向传播
            optimizer.zero_grad()
            total_loss.backward()
            optimizer.step()

        print(f"Epoch {epoch}, Loss: {total_loss.item():.4f}")


#7 启动训练

train_distillation(student_model, teacher_model, train_loader)

#8. 测试生成
def generate_text(model, tokenizer, prompt, max_length=20):
    input_ids = tokenizer.encode(prompt, return_tensors="pt").to(device)
    output = model.generate(input_ids, max_length=max_length)
    return tokenizer.decode(output[0], skip_special_tokens=True)

# 测试学生模型
prompt = "今天的天气"
print("学生模型生成:", generate_text(student_model, teacher_tokenizer, prompt))


# 保存学生模型

# 保存模型权重
torch.save(student_model.state_dict(), "student_model_weights.pth")

# 保存tokenizer（与教师模型共享词表）
teacher_tokenizer.save_pretrained("student_model_tokenizer")

# 保存模型配置（可选，用于后续加载模型结构）
import json
model_config = {
    "vocab_size": vocab_size,
    "d_model": 256,
    "nhead": 4,
    "num_layers": 2
}
with open("student_model_config.json", "w") as f:
    json.dump(model_config, f)


# 方法2：保存为HuggingFace格式（推荐）
from transformers import PretrainedConfig, PreTrainedModel
# 定义HuggingFace配置类（模拟HuggingFace接口）
class StudentConfig(PretrainedConfig):
    def __init__(self, vocab_size, d_model=256, nhead=4, num_layers=2, **kwargs):
        super().__init__(**kwargs)
        self.vocab_size = vocab_size
        self.d_model = d_model
        self.nhead = nhead
        self.num_layers = num_layers

# 包装学生模型为HuggingFace的PreTrainedModel（需继承）
class HFStudentModel(PreTrainedModel):
    config_class = StudentConfig

    def __init__(self, config):
        super().__init__(config)
        self.model = StudentTransformer(
            vocab_size=config.vocab_size,
            d_model=config.d_model,
            nhead=config.nhead,
            num_layers=config.num_layers
        )

    def forward(self, input_ids, **kwargs):
        return self.model(input_ids)


# 保存模型和tokenizer
config = StudentConfig(vocab_size=vocab_size)
hf_student = HFStudentModel(config)
hf_student.model.load_state_dict(student_model.state_dict())  # 加载训练好的权重

# 保存到目录
save_dir = "student_model_hf"
hf_student.save_pretrained(save_dir)
teacher_tokenizer.save_pretrained(save_dir)  # 共享教师tokenizer