patrickvonplaten's picture
up
478b7e3
#!/usr/bin/env bash
## IMPORTANT: This script was stopped after 1.5 epochs (2400 steps)
## because the training loss was exploding => the best checkpoint (2000 steps)
## was then taken.
## MAKE SURE TO DO HYPER-PARAMETER TUNING TO GET BETTER RESULTS
python -m torch.distributed.launch \
--nproc_per_node 2 run_speech_recognition_ctc.py \
--dataset_name="edinburghcstr/ami" \
--model_name_or_path="facebook/wav2vec2-large-lv60" \
--dataset_config_name="ihm" \
--train_split_name="train" \
--eval_split_name="validation" \
--output_dir="./" \
--preprocessing_num_workers="16" \
--overwrite_output_dir \
--num_train_epochs="2" \
--per_device_train_batch_size="32" \
--per_device_eval_batch_size="32" \
--gradient_accumulation_steps="1" \
--learning_rate="3e-4" \
--warmup_steps="500" \
--evaluation_strategy="steps" \
--text_column_name="text" \
--min_duration_in_seconds="0.25" \
--save_steps="400" \
--eval_steps="1000" \
--logging_steps="1" \
--layerdrop="0.0" \
--save_total_limit="3" \
--freeze_feature_encoder \
--gradient_checkpointing \
--chars_to_ignore , ? . ! - \; \: \" β€œ % β€˜ ” \
--fp16 \
--group_by_length \
--push_to_hub \
--do_eval \
--do_train --do_eval