#!/usr/bin/env bash python run_flax_speech_recognition_seq2seq.py \ --dataset_name="librispeech_asr" \ --model_name_or_path="./" \ --dataset_config_name="all" \ --train_split_name="train.clean.100+train.clean.360+train.other.500" \ --eval_split_name="validation.clean" \ --dataset_cache_dir="/home/sanchitgandhi/cache/huggingface/datasets" \ --output_dir="./output_dir" \ --preprocessing_num_workers="16" \ --length_column_name="input_length" \ --overwrite_output_dir \ --num_train_epochs="3" \ --per_device_train_batch_size="8" \ --per_device_eval_batch_size="2" \ --logging_steps="25" \ --gradient_checkpointing \ --max_duration_in_seconds="20" \ --max_target_length="128" \ --generation_max_length="40" \ --generation_num_beams="1" \ --final_generation_max_length="200" \ --final_generation_num_beams="5" \ --learning_rate="3e-4" \ --warmup_steps="500" \ --text_column_name="text" \ --save_total_limit="1" \ --freeze_feature_encoder \ --predict_with_generate \ --do_lower_case \ --do_eval \ --do_train \ --wandb_project="flax-wav2vec2-2-bart-large-960h"