# -*- coding: utf-8 -*-
# time: 2025/4/21 09:19
# file: qwen_distill_myModel.py
# author: hanson
from transformers import AutoTokenizer, AutoModelForCausalLM, TrainingArguments, Trainer
from datasets import load_dataset
import torch
from torch.nn import KLDivLoss
import torch.nn.functional as F

# 加载数据集
dataset = load_dataset("ticoAg/Chinese-medical-dialogue", split="train")

# 划分训练集和验证集
dataset = dataset.train_test_split(test_size=0.1)
train_dataset = dataset["train"].select(range(1000))
eval_dataset = dataset["test"].select(range(100))

# 加载教师模型 (Qwen2.5-0.5B)
teacher_model_name = r"E:\soft\model\qwen\Qwen\Qwen2___5-0___5B-Instruct"
teacher_tokenizer = AutoTokenizer.from_pretrained(teacher_model_name, trust_remote_code=True)
teacher_model = AutoModelForCausalLM.from_pretrained(teacher_model_name, trust_remote_code=True)#.to("cuda")

# 初始化学生模型 (这里我们使用小得多的结构)
from transformers import GPT2Config, GPT2LMHeadModel

student_config = GPT2Config(
    vocab_size=teacher_tokenizer.vocab_size,
    n_positions=512,
    n_embd=256,
    n_layer=6,
    n_head=8
)
student_model = GPT2LMHeadModel(student_config)#.to("cuda")


# 自定义知识蒸馏损失函数
class DistillationTrainer(Trainer):
    def __init__(self, *args, teacher_model=None, **kwargs):
        super().__init__(*args, **kwargs)
        self.teacher_model = teacher_model
        self.teacher_model.eval()
        self.kl_loss = KLDivLoss(reduction="batchmean")


    def compute_loss(self, model, inputs, num_items_in_batch=None):
        # 移除可能存在的额外参数
        inputs.pop("num_items_in_batch", None)

        # 学生模型输出
        student_outputs = model(**inputs)
        student_logits = student_outputs.logits

        # 教师模型输出
        with torch.no_grad():
            teacher_outputs = self.teacher_model(**inputs)
            teacher_logits = teacher_outputs.logits

        # 计算蒸馏损失 (KL散度)
        student_log_probs = F.log_softmax(student_logits / 2.0, dim=-1)
        teacher_probs = F.softmax(teacher_logits / 2.0, dim=-1)
        distillation_loss = self.kl_loss(student_log_probs, teacher_probs) * (2.0 ** 2)

        # 计算学生模型的标准交叉熵损失
        labels = inputs.get("labels")
        #ce_loss = student_outputs.loss
        ce_loss = student_outputs.loss if labels is not None else torch.tensor(0.0).to(student_logits.device)
        # 组合损失
        loss = 0.7 * distillation_loss + 0.3 * ce_loss

        return (loss, student_outputs) if num_items_in_batch  else loss


# 数据处理函数
def process_data(examples):
    # 将对话拼接成单一文本

    # 构建提示文本
    # texts = []
    # for inst, inp, out in zip(examples["instruction"], examples["input"], examples["output"]):
    #     if inp:
    #         text = f"Instruction: {inst}\nInput: {inp}\nOutput: {out}"
    #     else:
    #         text = f"Instruction: {inst}\nOutput: {out}"
    #     texts.append(text)
    # 构建对话文本
    texts = []
    for inp, out in zip(examples["input"], examples["output"]):
        if inp.strip():
            text = f"患者：{inp}\n医生：{out}"
        else:
            text = f"医生：{out}"
        texts.append(text)

    print(texts)
    # 使用教师模型的tokenizer处理数据
    tokenized = teacher_tokenizer(
        texts,
        max_length=512,
        truncation=True,
        padding="max_length",
        return_tensors="pt"
    )

    # 设置labels并处理padding
    input_ids = tokenized["input_ids"]
    attention_mask = tokenized["attention_mask"]
    labels = input_ids.clone()
    labels[attention_mask == 0] = -100  # 忽略padding部分的损失计算
    tokenized["labels"] = labels
    return tokenized


# 处理数据集
train_dataset = train_dataset.map(process_data, batched=True, remove_columns=train_dataset.column_names)
eval_dataset = eval_dataset.map(process_data, batched=True, remove_columns=eval_dataset.column_names)

# 设置训练参数
training_args = TrainingArguments(
    output_dir="./distilled_medical_model",
    num_train_epochs=3,
    per_device_train_batch_size=4,
    per_device_eval_batch_size=4,
    warmup_steps=500,
    weight_decay=0.01,
    logging_dir="./logs",
    logging_steps=100,
    evaluation_strategy="epoch",
    save_strategy="epoch",
    load_best_model_at_end=True,
    fp16=True,
    gradient_accumulation_steps=2,
)

# 创建Trainer
trainer = DistillationTrainer(
    model=student_model,
    args=training_args,
    train_dataset=train_dataset,
    eval_dataset=eval_dataset,
    teacher_model=teacher_model,
)

# 开始训练
trainer.train()

# 保存学生模型和tokenizer
student_model.save_pretrained("./distilled_medical_model")
teacher_tokenizer.save_pretrained("./distilled_medical_model")