torchrun --standalone --nnodes=1 --nproc_per_node=1 run_speech_recognition_seq2seq_streaming.py \ --model_name_or_path="openai/whisper-large-v2" \ --resume_from_checkpoint="checkpoint-60000" \ --ignore_data_skip \ --dataset_name="audiofolder" \ --dataset_data_dir="../data/dataset_wav" \ --language="finnish" \ --train_split_name="train+validation" \ --eval_split_name="test" \ --model_index_name="Whisper Large Sámi" \ --output_dir="./" \ --max_steps="60000" \ --per_device_train_batch_size="12" \ --per_device_eval_batch_size="6" \ --gradient_accumulation_steps="1" \ --logging_steps="25" \ --learning_rate="1e-5" \ --warmup_steps="500" \ --evaluation_strategy="steps" \ --eval_steps="1000" \ --save_strategy="steps" \ --save_steps="1000" \ --generation_max_length="225" \ --length_column_name="input_length" \ --max_duration_in_seconds="30" \ --text_column_name="transcription" \ --freeze_feature_encoder="False" \ --report_to="tensorboard" \ --metric_for_best_model="wer" \ --greater_is_better="False" \ --load_best_model_at_end \ --gradient_checkpointing="true" \ --fp16="true" \ --do_train \ --do_eval \ --predict_with_generate \ --do_normalize_eval \ --use_auth_token \ --overwrite_output_dir="false" \ --push_to_hub_organization="NbAiLab" \ --push_to_hub_model_id="whisper-large-sme" \ --push_to_hub