Godsonntungi2 commited on
Commit
3de49a5
·
verified ·
1 Parent(s): 2521b06

Reduce to 100 examples for quick demo

Browse files
Files changed (1) hide show
  1. train_qwen3_codeforces.py +142 -0
train_qwen3_codeforces.py ADDED
@@ -0,0 +1,142 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # /// script
2
+ # dependencies = [
3
+ # "trl>=0.12.0",
4
+ # "peft>=0.7.0",
5
+ # "transformers>=4.36.0",
6
+ # "accelerate>=0.24.0",
7
+ # "datasets>=2.14.0",
8
+ # "trackio",
9
+ # "torch",
10
+ # "bitsandbytes",
11
+ # ]
12
+ # ///
13
+
14
+ import os
15
+ import trackio
16
+ from datasets import load_dataset
17
+ from peft import LoraConfig
18
+ from trl import SFTTrainer, SFTConfig
19
+ from transformers import AutoTokenizer
20
+ from huggingface_hub import login
21
+
22
+ # Login with HF token
23
+ hf_token = os.environ.get("HF_TOKEN")
24
+ if hf_token:
25
+ login(token=hf_token)
26
+ print("Logged in to Hugging Face Hub")
27
+ else:
28
+ print("Warning: HF_TOKEN not found in environment")
29
+
30
+ # Load dataset - using the solutions configuration with messages format
31
+ print("Loading open-r1/codeforces-cots dataset...")
32
+ dataset = load_dataset("open-r1/codeforces-cots", "solutions", split="train")
33
+ print(f"Full dataset loaded: {len(dataset)} examples")
34
+
35
+ # Take 100 examples for quick demo
36
+ dataset = dataset.select(range(min(100, len(dataset))))
37
+ print(f"Using {len(dataset)} examples for demo training")
38
+
39
+ # The dataset has both 'prompt' (string) and 'messages' (chat format) columns
40
+ # TRL gets confused with both present. Keep only 'messages' for chat-based SFT.
41
+ print("Preparing dataset for chat-based SFT...")
42
+
43
+ # Filter for valid messages and keep only the messages column
44
+ def filter_valid_messages(example):
45
+ """Filter out samples with empty or invalid messages"""
46
+ messages = example.get("messages", [])
47
+ if not messages or len(messages) < 2:
48
+ return False
49
+ for msg in messages:
50
+ if not msg.get("content"):
51
+ return False
52
+ return True
53
+
54
+ dataset = dataset.filter(filter_valid_messages)
55
+ print(f"After filtering: {len(dataset)} examples")
56
+
57
+ # Remove all columns except 'messages' to avoid confusion
58
+ columns_to_remove = [col for col in dataset.column_names if col != "messages"]
59
+ dataset = dataset.remove_columns(columns_to_remove)
60
+ print(f"Dataset columns: {dataset.column_names}")
61
+
62
+ # Create train/eval split
63
+ print("Creating train/eval split...")
64
+ dataset_split = dataset.train_test_split(test_size=0.1, seed=42)
65
+ train_dataset = dataset_split["train"]
66
+ eval_dataset = dataset_split["test"]
67
+ print(f" Train: {len(train_dataset)} examples")
68
+ print(f" Eval: {len(eval_dataset)} examples")
69
+
70
+ # Load tokenizer for chat template
71
+ print("Loading tokenizer...")
72
+ tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen3-0.6B")
73
+ if tokenizer.pad_token is None:
74
+ tokenizer.pad_token = tokenizer.eos_token
75
+
76
+ # Training configuration
77
+ config = SFTConfig(
78
+ # CRITICAL: Hub settings
79
+ output_dir="qwen3-0.6b-codeforces-sft",
80
+ push_to_hub=True,
81
+ hub_model_id="Godsonntungi2/qwen3-0.6b-codeforces-sft",
82
+ hub_strategy="every_save",
83
+ hub_token=hf_token, # Explicitly pass token
84
+
85
+ # Training parameters
86
+ num_train_epochs=3,
87
+ per_device_train_batch_size=2,
88
+ per_device_eval_batch_size=1, # Smaller eval batch to prevent OOM
89
+ gradient_accumulation_steps=8,
90
+ learning_rate=2e-5,
91
+ max_length=1024, # Reduced from 2048 to save memory
92
+
93
+ # Logging & checkpointing
94
+ logging_steps=10,
95
+ save_strategy="steps",
96
+ save_steps=100,
97
+ save_total_limit=2,
98
+
99
+ # Evaluation - disable to save memory and time
100
+ eval_strategy="no",
101
+
102
+ # Optimization
103
+ warmup_ratio=0.1,
104
+ lr_scheduler_type="cosine",
105
+ gradient_checkpointing=True,
106
+ bf16=True,
107
+
108
+ # Monitoring
109
+ report_to="trackio",
110
+ project="qwen3-codeforces-sft",
111
+ run_name="demo-1k-v2",
112
+ )
113
+
114
+ # LoRA configuration for efficient training
115
+ peft_config = LoraConfig(
116
+ r=16,
117
+ lora_alpha=32,
118
+ lora_dropout=0.05,
119
+ bias="none",
120
+ task_type="CAUSAL_LM",
121
+ target_modules=["q_proj", "k_proj", "v_proj", "o_proj", "gate_proj", "up_proj", "down_proj"],
122
+ )
123
+
124
+ # Initialize and train
125
+ print("Initializing trainer with Qwen/Qwen3-0.6B...")
126
+ trainer = SFTTrainer(
127
+ model="Qwen/Qwen3-0.6B",
128
+ train_dataset=train_dataset,
129
+ eval_dataset=eval_dataset,
130
+ processing_class=tokenizer,
131
+ args=config,
132
+ peft_config=peft_config,
133
+ )
134
+
135
+ print("Starting training...")
136
+ trainer.train()
137
+
138
+ print("Pushing to Hub...")
139
+ trainer.push_to_hub()
140
+
141
+ print("Complete! Model at: https://huggingface.co/Godsonntungi2/qwen3-0.6b-codeforces-sft")
142
+ print("View metrics at: https://huggingface.co/spaces/Godsonntungi2/trackio")