davanstrien HF Staff Claude Opus 4.5 commited on
Commit
1cdb76a
·
1 Parent(s): 0f6404b

Add Latin LLM streaming training script

Browse files

Train an LLM on Latin using streaming datasets from FineWeb-2.
Demonstrates continued pretraining without downloading data.

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>

Files changed (1) hide show
  1. latin-llm-streaming.py +278 -0
latin-llm-streaming.py ADDED
@@ -0,0 +1,278 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # /// script
2
+ # requires-python = ">=3.10"
3
+ # dependencies = [
4
+ # "unsloth",
5
+ # "datasets",
6
+ # "trl",
7
+ # "huggingface_hub",
8
+ # "wandb",
9
+ # ]
10
+ # ///
11
+ """
12
+ Train an LLM on Latin using streaming datasets.
13
+
14
+ Demonstrates continued pretraining with streaming - no disk space needed.
15
+ Uses FineWeb-2's Latin subset (1.47M texts, ~1.7GB).
16
+
17
+ Run locally (if you have a GPU):
18
+ uv run latin-llm-streaming.py
19
+
20
+ Run on HF Jobs:
21
+ hf jobs uv run latin-llm-streaming.py --flavor a100-large --secrets HF_TOKEN
22
+
23
+ With custom settings:
24
+ hf jobs uv run latin-llm-streaming.py --flavor a100-large --secrets HF_TOKEN -- \
25
+ --max-steps 1000 --output-repo your-username/qwen-latin
26
+ """
27
+
28
+ import argparse
29
+ import time
30
+ import os
31
+
32
+
33
+ def parse_args():
34
+ parser = argparse.ArgumentParser(
35
+ description="Train an LLM on Latin using streaming datasets"
36
+ )
37
+ parser.add_argument(
38
+ "--base-model",
39
+ default="unsloth/Qwen3-0.6B-Base-unsloth-bnb-4bit",
40
+ help="Base model to fine-tune",
41
+ )
42
+ parser.add_argument(
43
+ "--output-repo",
44
+ default=None,
45
+ help="HF Hub repo to push model to (e.g., 'username/qwen-latin')",
46
+ )
47
+ parser.add_argument(
48
+ "--max-steps",
49
+ type=int,
50
+ default=500,
51
+ help="Number of training steps (default: 500)",
52
+ )
53
+ parser.add_argument(
54
+ "--batch-size",
55
+ type=int,
56
+ default=4,
57
+ help="Per-device batch size (default: 4)",
58
+ )
59
+ parser.add_argument(
60
+ "--gradient-accumulation",
61
+ type=int,
62
+ default=4,
63
+ help="Gradient accumulation steps (default: 4)",
64
+ )
65
+ parser.add_argument(
66
+ "--learning-rate",
67
+ type=float,
68
+ default=2e-4,
69
+ help="Learning rate (default: 2e-4)",
70
+ )
71
+ parser.add_argument(
72
+ "--max-seq-length",
73
+ type=int,
74
+ default=2048,
75
+ help="Maximum sequence length (default: 2048)",
76
+ )
77
+ parser.add_argument(
78
+ "--lora-r",
79
+ type=int,
80
+ default=16,
81
+ help="LoRA rank (default: 16)",
82
+ )
83
+ parser.add_argument(
84
+ "--save-local",
85
+ default="latin-llm-output",
86
+ help="Local directory to save model (default: latin-llm-output)",
87
+ )
88
+ parser.add_argument(
89
+ "--wandb-project",
90
+ default="latin-llm-streaming",
91
+ help="Wandb project name (default: latin-llm-streaming)",
92
+ )
93
+ parser.add_argument(
94
+ "--wandb-run-name",
95
+ default=None,
96
+ help="Wandb run name (default: auto-generated)",
97
+ )
98
+ return parser.parse_args()
99
+
100
+
101
+ def main():
102
+ args = parse_args()
103
+
104
+ print("=" * 70)
105
+ print("Latin LLM Training with Streaming Datasets")
106
+ print("=" * 70)
107
+ print(f"\nConfiguration:")
108
+ print(f" Base model: {args.base_model}")
109
+ print(f" Max steps: {args.max_steps}")
110
+ print(f" Batch size: {args.batch_size} x {args.gradient_accumulation} = {args.batch_size * args.gradient_accumulation}")
111
+ print(f" Learning rate: {args.learning_rate}")
112
+ print(f" LoRA rank: {args.lora_r}")
113
+ print(f" Output repo: {args.output_repo or '(local only)'}")
114
+ print(f" Wandb project: {args.wandb_project}")
115
+ print()
116
+
117
+ # Import here to show progress
118
+ from unsloth import FastLanguageModel
119
+ from datasets import load_dataset
120
+ from trl import SFTTrainer, SFTConfig
121
+ from huggingface_hub import login
122
+
123
+ # Login if pushing to hub
124
+ if args.output_repo:
125
+ token = os.environ.get("HF_TOKEN")
126
+ if token:
127
+ login(token=token)
128
+ print("✓ Logged in to Hugging Face Hub")
129
+ else:
130
+ print("⚠ HF_TOKEN not set - model will only be saved locally")
131
+ args.output_repo = None
132
+
133
+ # Initialize wandb
134
+ import wandb
135
+ wandb_key = os.environ.get("WANDB_API_KEY")
136
+ if wandb_key:
137
+ wandb.login(key=wandb_key)
138
+ wandb.init(
139
+ project=args.wandb_project,
140
+ name=args.wandb_run_name or f"latin-{args.max_steps}steps",
141
+ config={
142
+ "base_model": args.base_model,
143
+ "max_steps": args.max_steps,
144
+ "batch_size": args.batch_size,
145
+ "gradient_accumulation": args.gradient_accumulation,
146
+ "effective_batch_size": args.batch_size * args.gradient_accumulation,
147
+ "learning_rate": args.learning_rate,
148
+ "lora_r": args.lora_r,
149
+ "max_seq_length": args.max_seq_length,
150
+ "dataset": "HuggingFaceFW/fineweb-2 (lat_Latn)",
151
+ },
152
+ )
153
+ print(f"✓ Wandb initialized: {wandb.run.url}")
154
+
155
+ # 1. Load model
156
+ print("\n[1/5] Loading model...")
157
+ start = time.time()
158
+
159
+ model, tokenizer = FastLanguageModel.from_pretrained(
160
+ args.base_model,
161
+ max_seq_length=args.max_seq_length,
162
+ load_in_4bit=True,
163
+ )
164
+
165
+ model = FastLanguageModel.get_peft_model(
166
+ model,
167
+ r=args.lora_r,
168
+ lora_alpha=args.lora_r * 2,
169
+ lora_dropout=0,
170
+ target_modules=[
171
+ "q_proj", "k_proj", "v_proj", "o_proj",
172
+ "gate_proj", "up_proj", "down_proj"
173
+ ],
174
+ bias="none",
175
+ use_gradient_checkpointing="unsloth",
176
+ random_state=3407,
177
+ )
178
+ print(f"✓ Model loaded in {time.time() - start:.1f}s")
179
+
180
+ # 2. Load streaming dataset
181
+ print("\n[2/5] Loading streaming dataset (FineWeb-2 Latin)...")
182
+ start = time.time()
183
+
184
+ dataset = load_dataset(
185
+ "HuggingFaceFW/fineweb-2",
186
+ name="lat_Latn",
187
+ split="train",
188
+ streaming=True,
189
+ )
190
+
191
+ # Peek at the data
192
+ sample = next(iter(dataset))
193
+ print(f"✓ Dataset ready in {time.time() - start:.1f}s")
194
+ print(f" Sample: {sample['text'][:100]}...")
195
+
196
+ # 3. Format dataset
197
+ print("\n[3/5] Preparing dataset...")
198
+
199
+ def format_text(example):
200
+ return {"text": example["text"] + tokenizer.eos_token}
201
+
202
+ formatted_dataset = dataset.map(format_text)
203
+
204
+ # 4. Train
205
+ print(f"\n[4/5] Training for {args.max_steps} steps...")
206
+ start = time.time()
207
+
208
+ trainer = SFTTrainer(
209
+ model=model,
210
+ tokenizer=tokenizer,
211
+ train_dataset=formatted_dataset,
212
+ args=SFTConfig(
213
+ per_device_train_batch_size=args.batch_size,
214
+ gradient_accumulation_steps=args.gradient_accumulation,
215
+ warmup_steps=min(10, args.max_steps // 10),
216
+ max_steps=args.max_steps,
217
+ learning_rate=args.learning_rate,
218
+ logging_steps=max(1, args.max_steps // 20),
219
+ optim="adamw_8bit",
220
+ weight_decay=0.01,
221
+ lr_scheduler_type="linear",
222
+ seed=3407,
223
+ output_dir=args.save_local,
224
+ report_to="wandb",
225
+ run_name=args.wandb_run_name or f"latin-{args.max_steps}steps",
226
+ dataset_text_field="text",
227
+ max_seq_length=args.max_seq_length,
228
+ packing=False,
229
+ ),
230
+ )
231
+
232
+ trainer.train()
233
+ train_time = time.time() - start
234
+
235
+ print(f"\n✓ Training completed in {train_time / 60:.1f} minutes")
236
+ print(f" Speed: {args.max_steps / train_time:.2f} it/s")
237
+
238
+ # 5. Save and push
239
+ print("\n[5/5] Saving model...")
240
+
241
+ # Save locally
242
+ model.save_pretrained(args.save_local)
243
+ tokenizer.save_pretrained(args.save_local)
244
+ print(f"✓ Saved locally to {args.save_local}/")
245
+
246
+ # Push to hub if configured
247
+ if args.output_repo:
248
+ print(f"\nPushing to {args.output_repo}...")
249
+ model.push_to_hub(args.output_repo, tokenizer=tokenizer)
250
+ print(f"✓ Model available at: https://huggingface.co/{args.output_repo}")
251
+
252
+ # Quick inference test
253
+ print("\n" + "=" * 70)
254
+ print("Quick inference test:")
255
+ print("=" * 70)
256
+
257
+ FastLanguageModel.for_inference(model)
258
+
259
+ prompt = "Lingua Latina est"
260
+ inputs = tokenizer(prompt, return_tensors="pt").to("cuda")
261
+ outputs = model.generate(
262
+ **inputs,
263
+ max_new_tokens=64,
264
+ temperature=0.7,
265
+ do_sample=True,
266
+ )
267
+ generated = tokenizer.decode(outputs[0], skip_special_tokens=True)
268
+
269
+ print(f"\nPrompt: {prompt}")
270
+ print(f"Generated: {generated}")
271
+
272
+ print("\n" + "=" * 70)
273
+ print("Done!")
274
+ print("=" * 70)
275
+
276
+
277
+ if __name__ == "__main__":
278
+ main()