ylacombe's picture
ylacombe HF staff
Create run.sh
df5885a verified
#!/usr/bin/env bash
torchrun --nproc_per_node 3 examples/pytorch/speech-recognition/run_speech_recognition_ctc.py
--dataset_name="librispeech_asr" \
--model_name_or_path="facebook/w2v-bert-2.0" \
--dataset_config_name="clean" \
--eval_split_name="test" \
--train_split_name="train.100" \
--output_dir="./wav2vec2-bert-CV16-en-libri" \
--num_train_epochs="7" \
--per_device_train_batch_size="12" \
--gradient_accumulation_steps="2" \
--per_device_eval_batch_size="12" \
--learning_rate="3e-5" \
--warmup_steps="10000" \
--evaluation_strategy="steps" \
--text_column_name="text" \
--save_steps="500" \
--eval_steps="250" \
--save_total_limit="3" \
--gradient_checkpointing \
--chars_to_ignore , ? . ! - \; \: \" β€œ % β€˜ ” \
--fp16 --push_to_hub \
--do_train --do_eval \
--eval_metrics "wer" "cer" \
--freeze_feature_encoder false --logging_steps "5" \
--add_adapter true \
--preprocessing_num_workers "32" \
--mask_time_prob="0.0" --mask_feature_prob="0.0" \
--tokenizer_name_or_path "jonatasgrosman/wav2vec2-large-xlsr-53-english" \
--eval_accumulation_steps "2" --group_by_length --length_column_name="input_length" \
--layerdrop="0.0" \
--hidden_dropout="0.05" --activation_dropout="0.05" --feat_proj_dropout="0.05"