| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| import trackio |
| from datasets import load_dataset |
| from peft import LoraConfig |
| from trl import SFTTrainer, SFTConfig |
|
|
| print("π¦ Loading dataset...") |
| dataset = load_dataset("open-r1/codeforces-cots", "solutions_w_editorials", split="train") |
|
|
| print(f"β
Dataset loaded: {len(dataset)} examples") |
|
|
| print("π Creating train/eval split...") |
| dataset_split = dataset.train_test_split(test_size=0.05, seed=42) |
| train_dataset = dataset_split["train"].select_columns(["messages"]) |
| eval_dataset = dataset_split["test"].select_columns(["messages"]) |
|
|
| config = SFTConfig( |
| output_dir="qwen3-0.6b-codeforces-cots", |
| push_to_hub=True, |
| hub_model_id="gengxin-zhang/qwen3-0.6b-codeforces-cots", |
| hub_strategy="every_save", |
| num_train_epochs=3, |
| per_device_train_batch_size=4, |
| gradient_accumulation_steps=4, |
| learning_rate=2e-5, |
| logging_steps=10, |
| save_strategy="steps", |
| save_steps=100, |
| save_total_limit=2, |
| eval_strategy="steps", |
| eval_steps=100, |
| warmup_steps=100, |
| lr_scheduler_type="cosine", |
| report_to="trackio", |
| project="qwen3_codeforces", |
| run_name="qwen3-0.6b-cots-sft", |
| max_length=2048, |
| ) |
|
|
| peft_config = LoraConfig( |
| r=16, |
| lora_alpha=32, |
| lora_dropout=0.05, |
| bias="none", |
| task_type="CAUSAL_LM", |
| target_modules=["q_proj", "k_proj", "v_proj", "o_proj", "gate_proj", "up_proj", "down_proj"], |
| ) |
|
|
| print("π― Initializing trainer...") |
| trainer = SFTTrainer( |
| model="Qwen/Qwen3-0.6B", |
| train_dataset=train_dataset, |
| eval_dataset=eval_dataset, |
| args=config, |
| peft_config=peft_config, |
| ) |
|
|
| print("π Starting training...") |
| trainer.train() |
|
|
| print("πΎ Pushing to Hub...") |
| trainer.push_to_hub() |
|
|
| trackio.finish() |
|
|