#!/usr/bin/env bash deepspeed run_speech_recognition_seq2seq_streaming.py \ --deepspeed="ds_config.json" \ --model_name_or_path="openai/whisper-large-v2" \ --dataset_name="librispeech_asr" \ --dataset_config_name="all" \ --train_split_name="train.clean.100+train.clean.360+train.other.500" \ --eval_split_name="validation.clean" \ --output_dir="./" \ --max_steps="25000" \ --per_device_train_batch_size="32" \ --per_device_eval_batch_size="8" \ --logging_steps="25" \ --learning_rate="3e-6" \ --warmup_steps="50" \ --evaluation_strategy="steps" \ --eval_steps="5000" \ --save_strategy="steps" \ --save_steps="5000" \ --generation_max_length="225" \ --length_column_name="input_length" \ --max_duration_in_seconds="30" \ --freeze_feature_encoder="False" \ --report_to="tensorboard" \ --language="en" \ --task="transcribe" \ --do_lower_case \ --gradient_checkpointing \ --fp16 \ --overwrite_output_dir \ --do_train \ --do_eval \ --predict_with_generate \ --use_auth_token \ --push_to_hub