#!/usr/bin/env bash TCMALLOC_LARGE_ALLOC_REPORT_THRESHOLD=10000000000 python3 run_distillation.py \ --model_name_or_path "distil-whisper/large-v3-32-2" \ --teacher_model_name_or_path "openai/whisper-large-v3" \ --train_dataset_config_name "train" \ --train_dataset_name "sanchit-gandhi/concatenated-train-set-label-length-256-unshuffled" \ --train_split_name "train" \ --eval_dataset_name "distil-whisper/gigaspeech-l+esb/diagnostic-dataset+esb/diagnostic-dataset+esb/diagnostic-dataset+esb/diagnostic-dataset+esb/diagnostic-dataset+esb/diagnostic-dataset+esb/diagnostic-dataset+esb/diagnostic-dataset+esb/diagnostic-dataset+esb/diagnostic-dataset+esb/diagnostic-dataset+esb/diagnostic-dataset" \ --eval_dataset_config_name "l+librispeech+librispeech+common_voice+common_voice+voxpopuli+voxpopuli+tedlium+tedlium+spgispeech+spgispeech+ami+ami" \ --eval_split_name "validation+clean+other+clean+other+clean+other+clean+other+clean+other+clean+other" \ --eval_text_column_name "text+ortho_transcript+ortho_transcript+ortho_transcript+ortho_transcript+ortho_transcript+ortho_transcript+ortho_transcript+ortho_transcript+ortho_transcript+ortho_transcript+ortho_transcript+ortho_transcript" \ --eval_steps 5000 \ --save_steps 5000 \ --warmup_steps 500 \ --learning_rate 0.0001 \ --max_label_length 256 \ --logging_steps 25 \ --save_total_limit 1 \ --max_steps 80000 \ --wer_threshold 10 \ --per_device_train_batch_size 64 \ --per_device_eval_batch_size 64 \ --dtype "bfloat16" \ --dataloader_num_workers 16 \ --cache_dir "/home/sanchitgandhi/.cache" \ --dataset_cache_dir "/home/sanchitgandhi/.cache" \ --output_dir "./" \ --wandb_name "large-v3-32-2-token-ids-freeze-embeds-label-length-256" \ --wandb_dir "/home/sanchitgandhi/.cache" \ --wandb_project "distil-whisper" \ --do_train \ --do_eval \ --use_scan \ --gradient_checkpointing \ --overwrite_output_dir \ --predict_with_generate \ --freeze_encoder \ --freeze_embeddings \ --streaming \ --preprocess_audio_features False \ --use_auth_token \ --push_to_hub