Edit model card

To reproduce this run, execute:

#!/usr/bin/env bash
python run_flax_speech_recognition_seq2seq.py \
        --dataset_name="esc-benchmark/esc-datasets" \
        --model_name_or_path="esc-benchmark/wav2vec2-aed-pretrained" \
        --dataset_config_name="ami" \
        --output_dir="./" \
        --wandb_name="wav2vec2-aed-ami" \
        --wandb_project="wav2vec2-aed" \
        --per_device_train_batch_size="8" \
        --per_device_eval_batch_size="4" \
        --learning_rate="1e-4" \
        --warmup_steps="500" \
        --logging_steps="25" \
        --max_steps="50001" \
        --eval_steps="10000" \
        --save_steps="10000" \
        --generation_max_length="40" \
        --generation_num_beams="1" \
        --final_generation_max_length="225" \
        --final_generation_num_beams="5" \
        --generation_length_penalty="1.4" \
        --hidden_dropout="0.2" \
        --activation_dropout="0.2" \
        --feat_proj_dropout="0.2" \
        --overwrite_output_dir \
        --gradient_checkpointing \
        --freeze_feature_encoder \
        --predict_with_generate \
        --do_eval \
        --do_train \
        --do_predict \
        --push_to_hub \
        --use_auth_token
Downloads last month
2

Dataset used to train esc-benchmark/wav2vec2-aed-ami