|
|
|
|
|
|
|
|
|
|
|
|
|
|
from datasets import load_dataset |
|
|
from peft import LoraConfig |
|
|
from trl import SFTTrainer, SFTConfig |
|
|
import trackio |
|
|
|
|
|
print("=" * 80) |
|
|
print("TEST RUN: Biomedical Llama Fine-Tuning (100 examples)") |
|
|
print("=" * 80) |
|
|
|
|
|
print("\n[1/4] Loading dataset...") |
|
|
dataset = load_dataset("panikos/biomedical-llama-training") |
|
|
|
|
|
|
|
|
train_dataset = dataset["train"].select(range(100)) |
|
|
eval_dataset = dataset["validation"].select(range(20)) |
|
|
|
|
|
print(f" Train: {len(train_dataset)} examples") |
|
|
print(f" Eval: {len(eval_dataset)} examples") |
|
|
|
|
|
print("\n[2/4] Configuring LoRA...") |
|
|
lora_config = LoraConfig( |
|
|
r=16, |
|
|
lora_alpha=32, |
|
|
target_modules=["q_proj", "k_proj", "v_proj", "o_proj", "gate_proj", "up_proj", "down_proj"], |
|
|
lora_dropout=0.05, |
|
|
bias="none", |
|
|
task_type="CAUSAL_LM" |
|
|
) |
|
|
print(" LoRA rank: 16, alpha: 32") |
|
|
|
|
|
print("\n[3/4] Initializing trainer...") |
|
|
trainer = SFTTrainer( |
|
|
model="meta-llama/Llama-3.1-8B-Instruct", |
|
|
train_dataset=train_dataset, |
|
|
eval_dataset=eval_dataset, |
|
|
peft_config=lora_config, |
|
|
args=SFTConfig( |
|
|
output_dir="llama-biomedical-test", |
|
|
num_train_epochs=1, |
|
|
per_device_train_batch_size=1, |
|
|
gradient_accumulation_steps=8, |
|
|
learning_rate=2e-4, |
|
|
lr_scheduler_type="cosine", |
|
|
warmup_ratio=0.1, |
|
|
logging_steps=5, |
|
|
eval_strategy="steps", |
|
|
eval_steps=20, |
|
|
save_strategy="epoch", |
|
|
push_to_hub=True, |
|
|
hub_model_id="panikos/llama-biomedical-test", |
|
|
hub_private_repo=True, |
|
|
bf16=True, |
|
|
gradient_checkpointing=False, |
|
|
report_to="trackio", |
|
|
project="biomedical-llama-training", |
|
|
run_name="test-run-100-examples-v3" |
|
|
) |
|
|
) |
|
|
|
|
|
print("\n[4/4] Starting training...") |
|
|
print(" Model: meta-llama/Llama-3.1-8B-Instruct") |
|
|
print(" Method: SFT with LoRA") |
|
|
print(" Epochs: 1") |
|
|
print(" Batch size: 1 x 8 = 8 (effective) - optimized for memory") |
|
|
print(" Gradient checkpointing: DISABLED") |
|
|
print() |
|
|
|
|
|
trainer.train() |
|
|
|
|
|
print("\n" + "=" * 80) |
|
|
print("Pushing model to Hub...") |
|
|
print("=" * 80) |
|
|
trainer.push_to_hub() |
|
|
|
|
|
print("\n" + "=" * 80) |
|
|
print("TEST COMPLETE!") |
|
|
print("=" * 80) |
|
|
print("\nModel: https://huggingface.co/panikos/llama-biomedical-test") |
|
|
print("Dashboard: https://panikos-trackio.hf.space/") |
|
|
print("Ready for full production training!") |
|
|
|