|
python $1run_speech_recognition_seq2seq_streaming.py \ |
|
--model_name_or_path="openai/whisper-tiny" \ |
|
--dataset_train_name="mozilla-foundation/common_voice_11_0,mozilla-foundation/common_voice_11_0,mozilla-foundation/common_voice_11_0,babelbox/babelbox_voice,NbAiLab/NST,NbAiLab/NPSC,google/fleurs,google/fleurs,google/fleurs" \ |
|
--dataset_train_config_name="sv-SE,da,nn-NO,nst,no-distant,16K_mp3_nynorsk,sv_se,da_dk,nb_no" \ |
|
--language_train="sv,da,no,sv,no,no,sv,da,no" \ |
|
--train_split_name="train+validation,train+validation,train+validation,train,train+test,train+validation,train+validation,train+validation,train+validation" \ |
|
--dataset_eval_name="mozilla-foundation/common_voice_11_0" \ |
|
--dataset_eval_config_name="sv-SE" \ |
|
--language_eval="sv" \ |
|
--eval_split_name="test" \ |
|
--model_index_name="Whisper Tiny Nordic" \ |
|
--max_train_samples="64" \ |
|
--max_eval_samples="32" \ |
|
--max_steps="1" \ |
|
--output_dir="./" \ |
|
--per_device_train_batch_size="1" \ |
|
--per_device_eval_batch_size="1" \ |
|
--logging_steps="25" \ |
|
--learning_rate="1e-5" \ |
|
--warmup_steps="500" \ |
|
--evaluation_strategy="steps" \ |
|
--eval_steps="1000" \ |
|
--save_strategy="steps" \ |
|
--save_steps="1000" \ |
|
--generation_max_length="225" \ |
|
--length_column_name="input_length" \ |
|
--max_duration_in_seconds="30" \ |
|
--text_column_name="sentence,text,raw_transcription" \ |
|
--freeze_feature_encoder="False" \ |
|
--report_to="wandb" \ |
|
--metric_for_best_model="wer" \ |
|
--greater_is_better="False" \ |
|
--load_best_model_at_end \ |
|
--gradient_checkpointing \ |
|
--overwrite_output_dir \ |
|
--do_train \ |
|
--do_eval \ |
|
--fp16 \ |
|
--predict_with_generate \ |
|
--do_normalize_eval \ |
|
--streaming \ |
|
--use_auth_token |