pere's picture
Saving weights and logs of step 1000
a48b369
python run_clm_flax.py \
--output_dir="./" \
--model_type="gpt2" \
--model_name_or_path="." \
--config_name="./" \
--tokenizer_name="./" \
--train_file="/mnt/disks/flaxdisk/vgd/vgd_train.json" \
--validation_file="/mnt/disks/flaxdisk/vgd/vgd_eval.json" \
--cache_dir="/mnt/disks/flaxdisk/cache/" \
--do_train --do_eval \
--block_size="512" \
--per_device_train_batch_size="8" \
--per_device_eval_batch_size="8" \
--learning_rate="4e-5" \
--warmup_steps="1000" \
--adam_beta1="0.9" --adam_beta2="0.98" --weight_decay="0.01" \
--overwrite_output_dir \
--num_train_epochs="10" \
--logging_steps="500" \
--save_steps="1000" \
--eval_steps="1000" \
--push_to_hub