def format_distillation_data(examples):
    """格式化数据用于蒸馏训练"""
    formatted_texts = []

    for i in range(len(examples)):
        # 处理推理数据（包含cot字段）
        if 'cot' in examples[i]:
            instruction = examples[i].get('problem', '')
            teacher_response = examples[i].get('cot', '')
        # 处理非推理数据
        elif 'instruction' in examples[i] and 'output' in examples[i]:
            instruction = examples[i]['instruction']
            teacher_response = examples[i]['output']
        else:
            continue

        # 构建对话格式
        conversation = [
            {"role": "user", "content": instruction},
            {"role": "assistant", "content": teacher_response}
        ]

        # 转换为Qwen3格式
        text = ""
        for turn in conversation:
            if turn["role"] == "user":
                text += f"<|im_start|>user\n{turn['content']}<|im_end|>\n<|im_start|>assistant\n"
            else:
                text += f"{turn['content']}<|im_end|>\n"

        formatted_texts.append(text)

    return {"text": formatted_texts}


# 应用数据格式化
train_dataset = combined_dataset["train"].map(
    format_distillation_data,
    batched=True,
    remove_columns=combined_dataset["train"].column_names
)

eval_dataset = combined_dataset["test"].map(
    format_distillation_data,
    batched=True,
    remove_columns=combined_dataset["test"].column_names
)

# 训练参数配置（针对4060显卡优化）
training_args = TrainingArguments(
    output_dir="./qwen3-distilled-model",
    per_device_train_batch_size=2,  # 4060显卡适合的批次大小
    per_device_eval_batch_size=2,
    gradient_accumulation_steps=4,
    learning_rate=2e-5,  # 蒸馏学习率可以稍低
    num_train_epochs=3,
    logging_steps=50,
    evaluation_strategy="steps",
    eval_steps=200,
    save_steps=500,
    fp16=not is_bfloat16_supported(),
    bf16=is_bfloat16_supported(),
    optim="adamw_8bit",
    warmup_ratio=0.1,
    lr_scheduler_type="cosine",
    report_to="none",
    load_best_model_at_end=True,
)

# 初始化训练器
trainer = SFTTrainer(
    model=model,
    tokenizer=tokenizer,
    train_dataset=train_dataset,
    eval_dataset=eval_dataset,
    dataset_text_field="text",
    max_seq_length=2048,
    args=training_args,
)

# 开始训练
print("开始蒸馏训练...")
trainer.train()

# 保存模型
trainer.save_model()
tokenizer.save_pretrained("./qwen3-distilled-model")