python run_speech_recognition_seq2seq_streaming.py --model_name_or_path="openai/whisper-small" --dataset_name="google/fleurs" --dataset_config_name="am_et" --language="amharic" --train_split_name="train+validation" --eval_split_name="test" --model_index_name="Whisper Small Amharic FLEURS" --max_steps="1000" --output_dir="./whisper-small-amet" --per_device_train_batch_size="64" --per_device_eval_batch_size="32" --gradient_accumulation_steps="1" --logging_steps="25" --learning_rate="1e-5" --warmup_steps="500" --evaluation_strategy="steps" --eval_steps="10000" --save_strategy="steps" --save_steps="100" --generation_max_length="225" --length_column_name="input_length" --max_duration_in_seconds="30" --text_column_name="raw_transcription" --freeze_feature_encoder="False" --report_to="tensorboard" --metric_for_best_model="wer" --greater_is_better="False" --load_best_model_at_end="False" --gradient_checkpointing --fp16 --overwrite_output_dir --do_train --do_eval="False" --predict_with_generate --do_normalize_eval="False" --use_auth_token --no_streaming --push_to_hub="True" | |