|
#!/usr/bin/env bash |
|
|
|
declare -a learning_rates=("1e-5" "3e-5" "1e-4" "3e-4" "1e-3") |
|
declare -a batch_sizes=("8" "12" "14" "16") |
|
declare -a gradient_accumulation_step_sizes=("2" "4" "8") |
|
|
|
for learning_rate in "${learning_rates[@]}"; do |
|
for batch_size in "${batch_sizes[@]}"; do |
|
for gradient_accumulation_steps in "${gradient_accumulation_step_sizes[@]}"; do |
|
python create_model.py |
|
CUDA_VISIBLE_DEVICES=0 python run_speech_recognition_seq2seq.py \ |
|
--dataset_name="librispeech_asr" \ |
|
--model_name_or_path="./" \ |
|
--tokenizer_name="./" \ |
|
--dataset_config_name="clean" \ |
|
--train_split_name="train.100" \ |
|
--eval_split_name="validation" \ |
|
--output_dir="./" \ |
|
--preprocessing_num_workers="1" \ |
|
--length_column_name="input_length" \ |
|
--overwrite_output_dir \ |
|
--num_train_epochs="1" \ |
|
--per_device_train_batch_size=$batch_size \ |
|
--per_device_eval_batch_size=$batch_size \ |
|
--gradient_accumulation_steps=$gradient_accumulation_steps \ |
|
--generation_max_length="40" \ |
|
--generation_num_beams="1" \ |
|
--learning_rate=$learning_rate \ |
|
--warmup_steps="500" \ |
|
--evaluation_strategy="steps" \ |
|
--text_column_name="text" \ |
|
--save_steps="500" \ |
|
--eval_steps="500" \ |
|
--logging_steps="1" \ |
|
--save_total_limit="1" \ |
|
--freeze_feature_encoder \ |
|
--gradient_checkpointing \ |
|
--fp16 \ |
|
--group_by_length \ |
|
--predict_with_generate \ |
|
--do_lower_case \ |
|
--do_train \ |
|
--do_eval \ |
|
--report_to="wandb" \ |
|
--push_to_hub \ |
|
--use_auth_token |
|
done |
|
done |
|
done |
|
|
|
|