|
python run_speech_recognition_seq2seq_streaming.py \ |
|
--model_name_or_path="openai/whisper-tiny" \ |
|
--dataset_name="NbAiLab/NCC_S3" \ |
|
--language="Norwegian" \ |
|
--train_split_name="train" \ |
|
--eval_split_name="validation" \ |
|
--model_index_name="Whisper Tiny GPU test" \ |
|
--max_steps="2000" \ |
|
--output_dir="./" \ |
|
--per_device_train_batch_size="128" \ |
|
--per_device_eval_batch_size="32" \ |
|
--logging_steps="200" \ |
|
--learning_rate="3e-6" \ |
|
--lr_scheduler_type="constant_with_warmup" \ |
|
--warmup_steps="200" \ |
|
--evaluation_strategy="steps" \ |
|
--eval_steps="200" \ |
|
--save_strategy="steps" \ |
|
--save_steps="200" \ |
|
--generation_max_length="225" \ |
|
--length_column_name="duration" \ |
|
--max_duration_in_seconds="30" \ |
|
--text_column_name="text" \ |
|
--freeze_feature_encoder="False" \ |
|
--report_to="tensorboard" \ |
|
--metric_for_best_model="wer" \ |
|
--greater_is_better="False" \ |
|
--load_best_model_at_end \ |
|
--gradient_checkpointing \ |
|
--fp16 \ |
|
--overwrite_output_dir \ |
|
--do_train \ |
|
--do_eval \ |
|
--predict_with_generate \ |
|
--do_normalize_eval \ |
|
--use_auth_token \ |
|
--push_to_hub \ |
|
--hub_model_id="NbAiLab/whisper-tiny-gputest" |
|
|