Python_ai_attempt2 / train.py
Percy3822's picture
Update train.py
99607fa verified
import argparse, os
from pathlib import Path
from datasets import load_dataset
from transformers import (
AutoTokenizer, AutoModelForCausalLM,
DataCollatorForLanguageModeling, Trainer, TrainingArguments
)
import zipfile
def parse_args():
ap = argparse.ArgumentParser()
ap.add_argument("--dataset", required=True, help="Path to .jsonl")
ap.add_argument("--output", required=True, help="Output model folder")
ap.add_argument("--zip_path", required=True, help="Path to write .zip")
ap.add_argument("--model_name", default="Salesforce/codegen-350M-multi")
ap.add_argument("--epochs", type=float, default=1.0)
ap.add_argument("--batch_size", type=int, default=2)
ap.add_argument("--block_size", type=int, default=256)
ap.add_argument("--learning_rate", type=float, default=5e-5)
return ap.parse_args()
def main():
a = parse_args()
out_dir = Path(a.output).resolve()
zip_path = Path(a.zip_path).resolve()
out_dir.parent.mkdir(parents=True, exist_ok=True)
print(f"📦 Loading dataset from: {a.dataset}", flush=True)
ds = load_dataset("json", data_files=a.dataset, split="train")
cols = ds.column_names
print("🧾 Columns:", cols, flush=True)
tok = AutoTokenizer.from_pretrained(a.model_name, use_fast=True)
if tok.pad_token is None and tok.eos_token is not None:
tok.pad_token = tok.eos_token
model = AutoModelForCausalLM.from_pretrained(a.model_name)
def to_text(batch):
if "text" in batch:
return batch["text"]
if "prompt" in batch and "completion" in batch:
return [str(p).rstrip() + "\n" + str(c) for p, c in zip(batch["prompt"], batch["completion"])]
raise ValueError("Dataset must have 'text' or 'prompt' + 'completion'.")
def tokenize(batch):
texts = to_text(batch)
return tok(texts, padding="max_length", truncation=True, max_length=a.block_size)
print("🔁 Tokenizing…", flush=True)
tokenized = ds.map(tokenize, batched=True, remove_columns=cols)
collator = DataCollatorForLanguageModeling(tokenizer=tok, mlm=False)
args = TrainingArguments(
output_dir=str(out_dir),
overwrite_output_dir=True,
per_device_train_batch_size=a.batch_size,
num_train_epochs=a.epochs,
learning_rate=a.learning_rate,
logging_steps=5,
save_strategy="no",
report_to=[],
fp16=False,
)
print("⚙ Trainer…", flush=True)
trainer = Trainer(model=model, args=args, train_dataset=tokenized,
tokenizer=tok, data_collator=collator)
print("🚀 Training…", flush=True)
trainer.train()
print(f"💾 Saving to {out_dir}", flush=True)
os.makedirs(out_dir, exist_ok=True)
trainer.save_model(out_dir)
tok.save_pretrained(out_dir)
# Zip the folder
if zip_path.exists():
zip_path.unlink()
print(f"📦 Zipping → {zip_path.name}", flush=True)
with zipfile.ZipFile(zip_path, "w", compression=zipfile.ZIP_DEFLATED) as z:
for p in out_dir.rglob("*"):
z.write(p, arcname=p.relative_to(out_dir))
print("✅ Done.", flush=True)
if __name__ == "__main__":
main()