File size: 1,021 Bytes
261dbc8 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 |
# Runs the full_finetune.py recipe
#
# To launch, run the following command from root:
# tune --nnodes 1 --nproc_per_node 1 --config alpaca_llama2_full_finetune --override model_checkpoint=<your_checkpoint_dir> ...
# Dataset and Dataloader
dataset: laurencer/yahma-alpaca-cleaned-adversarial
seed: 42
shuffle: True
# Checkpointing
# Removed for now given poor upload speeds for checkpoints
# hf_repo_id: laurencer/Llama7b-Alpaca-Tune-4epochs-WithColoring
checkpoint_every_n_steps: 500 # 6k steps per epoch
# Model Arguments
model_checkpoint: model/llama2_native.tune
tokenizer_checkpoint: model/tokenizer.model
color_layer_initialization: zeros
norm_before_color_layer: True
# Fine-tuning arguments
compile: False
batch_size: 8
lr: 2e-5
epochs: 4
optimizer: SGD
loss: CrossEntropyLoss
output_dir: output/alpaca-colorful-llama2-finetune
device: cuda
dtype: bf16
enable_fsdp: False
enable_activation_checkpointing: True
resume_from_checkpoint: False
# Logging arguments
metric_logger_type: wandb
project: torchtune |