# -*- coding: utf-8 -*-
# time: 2025/4/21 16:37
# file: ch02.py
# author: hanson

# -*- coding: utf-8 -*-
import torch
from datasets import load_dataset
from transformers import (
    AutoTokenizer,
    AutoModelForSequenceClassification,
    TrainingArguments,
    Trainer
)

# 配置参数
TEACHER_MODEL = "bert-base-chinese"  # 教师模型‌:ml-citation{ref="1,2" data="citationList"}
STUDENT_MODEL = "distilbert-base-chinese"  # 学生模型‌:ml-citation{ref="7" data="citationList"}
MAX_LENGTH = 512  # 序列截断长度‌:ml-citation{ref="1" data="citationList"}
BATCH_SIZE = 8  # 批处理大小‌:ml-citation{ref="6" data="citationList"}


# 1. 数据预处理
def preprocess_function(examples):
    """清洗医疗对话数据"""
    texts = [f"诊断：{d}；建议：{a}"
             for d, a in
             zip(examples["diagnosis"], examples["advice"])]  # 结构化拼接‌:ml-citation{ref="2" data="citationList"}

    # Tokenizer处理
    tokenizer = AutoTokenizer.from_pretrained(TEACHER_MODEL)
    return tokenizer(
        texts,
        max_length=MAX_LENGTH,
        truncation=True,  # 强制截断‌:ml-citation{ref="1" data="citationList"}
        padding="max_length"  # 填充对齐‌:ml-citation{ref="3" data="citationList"}
    )


# 加载数据集
dataset = load_dataset("ticoAg/Chinese-medical-dialogue", split="train")
dataset = dataset.select(range(1000))  # 取1000条样例‌:ml-citation{ref="2" data="citationList"}
processed_dataset = dataset.map(
    preprocess_function,
    batched=True,
    remove_columns=dataset.column_names  # 移除原始列‌:ml-citation{ref="1" data="citationList"}
).with_format("torch")

# 2. 模型初始化
teacher_model = AutoModelForSequenceClassification.from_pretrained(TEACHER_MODEL)
student_model = AutoModelForSequenceClassification.from_pretrained(STUDENT_MODEL)


# 3. 蒸馏训练配置
class DistillationTrainer(Trainer):
    """自定义蒸馏损失函数"""

    def compute_loss(self, model, inputs, return_outputs=False):
        # 教师模型输出
        with torch.no_grad():
            teacher_outputs = teacher_model(**inputs)

        # 学生模型输出
        student_outputs = model(**inputs)

        # 损失计算（硬标签+软标签）‌:ml-citation{ref="1,3" data="citationList"}
        loss_hard = torch.nn.CrossEntropyLoss()(
            student_outputs.logits, inputs["labels"]
        )
        loss_soft = torch.nn.KLDivLoss(reduction="batchmean")(
            torch.log_softmax(student_outputs.logits / 3, dim=-1),  # 温度参数‌:ml-citation{ref="3" data="citationList"}
            torch.softmax(teacher_outputs.logits / 3, dim=-1)
        )
        return 0.7 * loss_hard + 0.3 * loss_soft  # 混合权重‌:ml-citation{ref="2" data="citationList"}


# 4. 训练执行
training_args = TrainingArguments(
    output_dir="./results",
    num_train_epochs=3,
    per_device_train_batch_size=BATCH_SIZE,
    learning_rate=5e-5,
    fp16=True  # 混合精度训练‌:ml-citation{ref="6" data="citationList"}
)

trainer = DistillationTrainer(
    model=student_model,
    args=training_args,
    train_dataset=processed_dataset,
)
trainer.train()


# 5. 性能验证
def compute_metrics(pred):
    """计算准确率"""
    labels = pred.label_ids
    preds = pred.predictions.argmax(-1)
    return {"accuracy": (preds == labels).mean()}


eval_results = trainer.evaluate(eval_dataset=processed_dataset.select(range(100)))
print(f"蒸馏后准确率: {eval_results['eval_accuracy']:.2%}")  # 预期达到教师模型85%+性能‌:ml-citation{ref="7" data="citationList"}

