# -*- coding: utf-8 -*-
import torch
from torch import nn
from torch.utils.data import Dataset, DataLoader
from transformers import (
    AutoTokenizer,
    AutoModelForCausalLM,
    get_linear_schedule_with_warmup,
    TrainingArguments,
    Trainer
)
import numpy as np

# ==============================
# 1. 数据加载与处理
# ==============================
class MathDistillationDataset(Dataset):
    def __init__(self, tokenizer, data_path, max_length=512):
        self.tokenizer = tokenizer
        self.max_length = max_length
        
        # 假设数据格式为JSON列表，包含question和teacher_output
        self.data = self._load_data(data_path)
        
    def _load_data(self, path):
        # 实现数据加载逻辑（示例）
        return [
            {
                "question": "Solve 3x + 5 = 20",
                "teacher_output": {
                    "text": "Subtract 5: 3x=15. Divide by 3: x=5",
                    "logits": np.random.randn(50257)  # 假设vocab_size=50257
                }
            },
            # 更多数据...
        ]
    
    def __len__(self):
        return len(self.data)
    
    def __getitem__(self, idx):
        item = self.data[idx]
        
        # 编码输入问题
        inputs = self.tokenizer(
            item["question"],
            max_length=self.max_length,
            truncation=True,
            padding="max_length",
            return_tensors="pt"
        )
        
        # 编码教师输出
        teacher_output = self.tokenizer(
            item["teacher_output"]["text"],
            max_length=self.max_length,
            truncation=True,
            padding="max_length",
            return_tensors="pt"
        )
        
        return {
            "input_ids": inputs["input_ids"].squeeze(),
            "attention_mask": inputs["attention_mask"].squeeze(),
            "teacher_logits": torch.FloatTensor(item["teacher_output"]["logits"]),
            "labels": teacher_output["input_ids"].squeeze()
        }

# ==============================
# 2. 模型与损失定义
# ==============================
class DistillationModel(nn.Module):
    def __init__(self, student_name="Qwen/Qwen2.5-Math-1.5B", temperature=5.0):
        super().__init__()
        self.student = AutoModelForCausalLM.from_pretrained(student_name)
        self.temperature = temperature
        
    def forward(self, input_ids, attention_mask, labels=None, teacher_logits=None):
        outputs = self.student(
            input_ids=input_ids,
            attention_mask=attention_mask,
            labels=labels
        )
        
        # 计算蒸馏损失
        if teacher_logits is not None:
            student_logits = outputs.logits
            
            # KL散度损失
            loss_kl = nn.KLDivLoss(reduction="batchmean")(
                nn.functional.log_softmax(student_logits / self.temperature, dim=-1),
                nn.functional.softmax(teacher_logits / self.temperature, dim=-1)
            ) * (self.temperature ** 2)
            
            # 交叉熵损失
            loss_ce = outputs.loss
            
            # 混合损失（可调整权重）
            total_loss = 0.7 * loss_kl + 0.3 * loss_ce
        else:
            total_loss = outputs.loss
        
        return {
            "loss": total_loss,
            "logits": outputs.logits
        }

# ==============================
# 3. 训练循环
# ==============================
def train():
    # 配置参数
    model_name = "Qwen/Qwen2.5-Math-1.5B"
    tokenizer = AutoTokenizer.from_pretrained(model_name)
    tokenizer.pad_token = tokenizer.eos_token
    
    # 准备数据
    dataset = MathDistillationDataset(tokenizer, "path/to/dataset.json")
    train_loader = DataLoader(dataset, batch_size=4, shuffle=True)
    
    # 初始化模型
    model = DistillationModel(student_name=model_name, temperature=5.0)
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model.to(device)
    
    # 优化器与调度器
    optimizer = torch.optim.AdamW(model.parameters(), lr=3e-5, weight_decay=0.01)
    scheduler = get_linear_schedule_with_warmup(
        optimizer,
        num_warmup_steps=100,
        num_training_steps=len(train_loader)*3
    )
    
    # 训练循环
    model.train()
    for epoch in range(3):
        total_loss = 0
        for batch_idx, batch in enumerate(train_loader):
            # 数据移至GPU
            input_ids = batch["input_ids"].to(device)
            attention_mask = batch["attention_mask"].to(device)
            labels = batch["labels"].to(device)
            teacher_logits = batch["teacher_logits"].to(device)
            
            # 前向传播
            outputs = model(
                input_ids=input_ids,
                attention_mask=attention_mask,
                labels=labels,
                teacher_logits=teacher_logits
            )
            loss = outputs["loss"]
            
            # 反向传播
            loss.backward()
            
            # 梯度裁剪
            torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
            
            # 参数更新
            optimizer.step()
            scheduler.step()
            optimizer.zero_grad()
            
            # 监控损失
            total_loss += loss.item()
            if batch_idx % 50 == 0:
                print(f"Epoch {epoch}, Batch {batch_idx}, Loss: {loss.item():.4f}")
                
        print(f"Epoch {epoch} Average Loss: {total_loss/len(train_loader):.4f}")
    
    # 保存模型
    model.student.save_pretrained("DeepSeek-R1-Distill-Qwen-1.5B")
    tokenizer.save_pretrained("DeepSeek-R1-Distill-Qwen-1.5B")

if __name__ == "__main__":
    train()
    