# Runs the full_finetune.py recipe | |
# | |
# To launch, run the following command from root: | |
# tune --nnodes 1 --nproc_per_node 1 --config alpaca_llama2_full_finetune --override model_checkpoint=<your_checkpoint_dir> ... | |
# Dataset and Dataloader | |
dataset: laurencer/yahma-alpaca-cleaned-adversarial | |
seed: 42 | |
shuffle: True | |
# Checkpointing | |
# Removed for now given poor upload speeds for checkpoints | |
# hf_repo_id: laurencer/Llama7b-Alpaca-Tune-4epochs-WithColoring | |
checkpoint_every_n_steps: 6500 # 6k steps per epoch | |
# Model Arguments | |
# Assumes the script is run from within torchtune-colorful-llama/colorful | |
model_checkpoint: ../model/llama2_native.tune | |
tokenizer_checkpoint: ../model/tokenizer.model | |
color_layer_initialization: zeros | |
norm_before_color_layer: True | |
# Fine-tuning arguments | |
compile: True | |
batch_size: 8 | |
lr: 2e-5 | |
epochs: 1 | |
optimizer: SGD | |
loss: CrossEntropyLoss | |
output_dir: output/alpaca-colorful-llama2-finetune | |
device: cuda | |
dtype: bf16 | |
enable_fsdp: False | |
enable_activation_checkpointing: True | |
resume_from_checkpoint: False | |
# Logging arguments | |
metric_logger_type: wandb | |
project: colorful-llama |