| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| """ |
| Agent Zero SFT: zai-org/GLM-4.7-Flash (30B MoE) |
| QLoRA (4-bit) with CPU offloading for layers that don't fit in 24GB VRAM. |
| """ |
|
|
| import os |
| import torch |
| import trackio |
| from huggingface_hub import login |
| login(token=os.environ["HF_TOKEN"]) |
| from datasets import load_dataset |
| from peft import LoraConfig |
| from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig |
| from trl import SFTTrainer, SFTConfig |
|
|
| print("Loading dataset...") |
| train_ds = load_dataset("wheattoast11/agent-zero-sft-v1", data_files="data/train.jsonl", split="train") |
| val_ds = load_dataset("wheattoast11/agent-zero-sft-v1", data_files="data/validation.jsonl", split="train") |
| print(f"Train: {len(train_ds)}, Val: {len(val_ds)}") |
|
|
| bnb_config = BitsAndBytesConfig( |
| load_in_4bit=True, |
| bnb_4bit_quant_type="nf4", |
| bnb_4bit_compute_dtype=torch.bfloat16, |
| bnb_4bit_use_double_quant=True, |
| llm_int8_enable_fp32_cpu_offload=True, |
| ) |
|
|
| offload_dir = "/tmp/offload" |
| os.makedirs(offload_dir, exist_ok=True) |
|
|
| print("Loading model in 4-bit with CPU offload...") |
| model = AutoModelForCausalLM.from_pretrained( |
| "zai-org/GLM-4.7-Flash", |
| quantization_config=bnb_config, |
| trust_remote_code=True, |
| device_map="auto", |
| max_memory={0: "20GiB", "cpu": "30GiB"}, |
| offload_folder=offload_dir, |
| ) |
| tokenizer = AutoTokenizer.from_pretrained("zai-org/GLM-4.7-Flash", trust_remote_code=True) |
| print("Model loaded.") |
|
|
| |
| if hasattr(model, 'hf_device_map'): |
| devices = set(model.hf_device_map.values()) |
| print(f"Devices used: {devices}") |
| gpu_layers = sum(1 for v in model.hf_device_map.values() if v == 0) |
| cpu_layers = sum(1 for v in model.hf_device_map.values() if v == 'cpu') |
| print(f"GPU layers: {gpu_layers}, CPU layers: {cpu_layers}") |
|
|
| config = SFTConfig( |
| output_dir="agent-zero-glm-4.7-v1", |
| push_to_hub=True, |
| hub_model_id="wheattoast11/agent-zero-glm-4.7-v1", |
| hub_strategy="every_save", |
| hub_private_repo=True, |
| num_train_epochs=2, |
| per_device_train_batch_size=1, |
| gradient_accumulation_steps=16, |
| learning_rate=1e-4, |
| bf16=True, |
| gradient_checkpointing=True, |
| logging_steps=10, |
| save_strategy="steps", |
| save_steps=50, |
| save_total_limit=2, |
| eval_strategy="steps", |
| eval_steps=50, |
| warmup_ratio=0.1, |
| lr_scheduler_type="cosine", |
| report_to="trackio", |
| project="agent-zero-finetune", |
| run_name="glm-4.7-flash-qlora-v1", |
| ) |
|
|
| peft_config = LoraConfig( |
| r=16, lora_alpha=32, lora_dropout=0.05, |
| bias="none", task_type="CAUSAL_LM", |
| target_modules=["q_proj", "v_proj", "k_proj", "o_proj"], |
| ) |
|
|
| print("Initializing trainer...") |
| trainer = SFTTrainer( |
| model=model, |
| tokenizer=tokenizer, |
| train_dataset=train_ds, |
| eval_dataset=val_ds, |
| args=config, |
| peft_config=peft_config, |
| ) |
|
|
| print("Starting training...") |
| trainer.train() |
|
|
| print("Pushing to Hub...") |
| trainer.push_to_hub() |
| trackio.finish() |
| print("Done! Model at: https://huggingface.co/wheattoast11/agent-zero-glm-4.7-v1") |
|
|