# Runs the full_finetune.py recipe # # To launch, run the following command from root: # tune --nnodes 1 --nproc_per_node 1 --config alpaca_llama2_full_finetune --override model_checkpoint= ... # Dataset and Dataloader dataset: yahma/alpaca-cleaned seed: 42 shuffle: True # Checkpointing # Removed for now given poor upload speeds for checkpoints # hf_repo_id: laurencer/Llama7b-Alpaca-Tune-4epochs-WithColoring checkpoint_every_n_steps: 5000 # 25k steps per epoch # Model Arguments model_checkpoint: model/llama2_native.tune tokenizer_checkpoint: model/tokenizer.model # Fine-tuning arguments batch_size: 2 lr: 2e-5 epochs: 4 optimizer: SGD loss: CrossEntropyLoss output_dir: output/alpaca-llama2-finetune device: cuda dtype: fp16 enable_fsdp: False enable_activation_checkpointing: True resume_from_checkpoint: False # Logging arguments metric_logger_type: wandb project: torchtune