#!/usr/bin/env bash python run_finetuning.py \ --model_name_or_path "distil-whisper/large-32-2" \ --dataset_name "distil-whisper/librispeech_asr" \ --dataset_config_name "all" \ --train_split_name "train.clean.100+train.clean.360+train.other.500" \ --eval_split_name "validation.clean" \ --text_column_name "whisper_transcript" \ --cache_dir "/home/sanchitgandhi/cache" \ --dataset_cache_dir "/home/sanchitgandhi/cache" \ --output_dir "./" \ --wandb_name "large-32-2-pl-librispeech" \ --wandb_dir "/home/sanchitgandhi/.cache" \ --wandb_project "distil-whisper-librispeech" \ --per_device_train_batch_size 32 \ --per_device_eval_batch_size 16 \ --dtype "bfloat16" \ --learning_rate 1e-4 \ --warmup_steps 500 \ --do_train \ --do_eval \ --num_train_epochs 10 \ --preprocessing_num_workers 16 \ --dataloader_num_workers 8 \ --logging_steps 25 \ --use_scan \ --gradient_checkpointing \ --overwrite_output_dir \ --predict_with_generate \ --push_to_hub