sanchit-gandhi HF staff commited on
Commit
6494705
1 Parent(s): 325868a
Files changed (2) hide show
  1. create_model.py +33 -0
  2. run_librispeech.sh +34 -0
create_model.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import jax
2
+ import jax.numpy as jnp
3
+ from transformers import AutoFeatureExtractor, AutoTokenizer, FlaxSpeechEncoderDecoderModel
4
+
5
+ encoder_id = "facebook/wav2vec2-large-lv60"
6
+ decoder_id = "facebook/bart-large"
7
+
8
+ model = FlaxSpeechEncoderDecoderModel.from_encoder_decoder_pretrained(encoder_id, decoder_id, encoder_add_adapter=True, decoder_from_pt=True)
9
+
10
+ model.config.encoder.feat_proj_dropout = 0.0
11
+ model.config.encoder.final_dropout = 0.0
12
+ model.config.encoder.mask_time_prob = 0.1
13
+ model.config.decoder_start_token_id = model.config.decoder.bos_token_id
14
+ model.config.pad_token_id = model.config.decoder.pad_token_id
15
+ model.config.eos_token_id = model.config.decoder.eos_token_id
16
+ model.config.max_length = 40
17
+ model.config.num_beams = 1
18
+ model.config.encoder.layerdrop = 0.0
19
+ model.config.use_cache = False
20
+ model.config.processor_class = "Wav2Vec2Processor"
21
+
22
+ # need to upcast bart-large weights from float16 to float32
23
+ model.params = jax.tree_map(lambda x: x.astype(jnp.float32) if x.dtype != jnp.float32 else x, model.params)
24
+
25
+ # check if generation works
26
+ out = model.generate(jnp.ones((1, 2000)))
27
+
28
+ model.save_pretrained("./")
29
+
30
+ feature_extractor = AutoFeatureExtractor.from_pretrained(encoder_id)
31
+ feature_extractor.save_pretrained("./")
32
+ tokenizer = AutoTokenizer.from_pretrained(decoder_id)
33
+ tokenizer.save_pretrained("./")
run_librispeech.sh ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env bash
2
+ python run_flax_speech_recognition_seq2seq.py \
3
+ --dataset_name="librispeech_asr" \
4
+ --model_name_or_path="./" \
5
+ --dataset_config_name="clean" \
6
+ --train_split_name="train.100[:5%]" \
7
+ --eval_split_name="validation[:5%]" \
8
+ --dataset_cache_dir="~/cache/huggingface/datasets" \
9
+ --output_dir="./output_dir" \
10
+ --preprocessing_num_workers="16" \
11
+ --length_column_name="input_length" \
12
+ --overwrite_output_dir \
13
+ --num_train_epochs="10" \
14
+ --per_device_train_batch_size="4" \
15
+ --per_device_eval_batch_size="4" \
16
+ --gradient_accumulation_steps="1" \
17
+ --logging_steps="25" \
18
+ --max_duration_in_seconds="10" \
19
+ --max_target_length="64" \
20
+ --generation_max_length="40" \
21
+ --generation_num_beams="1" \
22
+ --learning_rate="3e-4" \
23
+ --warmup_steps="500" \
24
+ --text_column_name="text" \
25
+ --save_total_limit="1" \
26
+ --freeze_feature_encoder \
27
+ --predict_with_generate \
28
+ --do_lower_case \
29
+ --do_eval \
30
+ --do_train \
31
+ --push_to_hub \
32
+ --use_auth_token \
33
+ --wandb_project="flax-wav2vec2-2-bart-debug"
34
+