# See https://github.com/sanchit-gandhi/seq2seq-speech/issues/23#issuecomment-1122183173: do_lower_case should only be set to True for the tokenizer if the tokenizer has upper case letters in the vocab # Let's also not add extra remove_punctuation # And limit max duration to 25 seconds WANDB_ENTITY=NbAiLab WANDB_PROJECT=wav2vec2 python run_flax_speech_recognition_ctc.py \ --model_name_or_path="./" \ --hub_model_id="NbAiLab/wav2vec2-1b-npsc-nst-tpu" \ --tokenizer_name="./" \ --output_dir="./" \ --overwrite_output_dir \ --num_train_epochs="40" \ --skip_steps="740000" \ --per_device_train_batch_size="2" \ --per_device_eval_batch_size="2" \ --gradient_accumulation_steps="1" \ --precision="full_mixed" \ --matmul_precision="bfloat16" \ --multisteps \ --learning_rate="4.906677872895671e-07" \ --warmup_steps="2000" \ --length_column_name="input_length" \ --evaluation_strategy="steps" \ --text_column_name="text" \ --save_steps="5000" \ --eval_steps="5000" \ --logging_steps="100" \ --layerdrop="0.041" \ --attention_dropout="0.094" \ --activation_dropout="0.055" \ --hidden_dropout="0.047" \ --save_total_limit="5" \ --freeze_feature_encoder \ --feat_proj_dropout="0.04" \ --mask_time_prob="0.082" \ --mask_time_length="10" \ --mask_feature_prob="0.25" \ --mask_feature_length="64" \ --gradient_checkpointing \ --min_duration_in_seconds="0.5" \ --max_duration_in_seconds="25.0" \ --use_auth_token \ --seed="42" \ --group_by_length \ --do_train --do_eval \ --push_to_hub \ --preprocessing_num_workers="32" \ --ctc_zero_infinity \ --wandb_project="wav2vec2" \ --wandb_name="wav2vec2-1b-npsc-nst-tpu (cont.)" \