sanchit-gandhi HF staff commited on
Commit
8d3553d
1 Parent(s): 93698e3
Files changed (3) hide show
  1. create_model.py +32 -0
  2. create_scan_model.py +61 -0
  3. run_librispeech.sh +36 -0
create_model.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import jax.numpy as jnp
2
+ from transformers import AutoFeatureExtractor, AutoTokenizer
3
+ from models.modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
4
+
5
+ encoder_id = "hf-internal-testing/tiny-random-wav2vec2"
6
+ decoder_id = "hf-internal-testing/tiny-random-bart"
7
+
8
+ model = FlaxSpeechEncoderDecoderModel.from_encoder_decoder_pretrained(
9
+ encoder_id, decoder_id, encoder_from_pt=True, decoder_from_pt=True, encoder_add_adapter=True
10
+ )
11
+
12
+ model.config.encoder.feat_proj_dropout = 0.0
13
+ model.config.encoder.final_dropout = 0.0
14
+ model.config.encoder.mask_time_prob = 0.1
15
+ model.config.decoder_start_token_id = model.config.decoder.bos_token_id
16
+ model.config.pad_token_id = model.config.decoder.pad_token_id
17
+ model.config.eos_token_id = model.config.decoder.eos_token_id
18
+ model.config.max_length = 20
19
+ model.config.num_beams = 1
20
+ model.config.encoder.layerdrop = 0.0
21
+ model.config.use_cache = False
22
+ model.config.processor_class = "Wav2Vec2Processor"
23
+
24
+ # check if generation works
25
+ out = model.generate(jnp.ones((1, 2000)))
26
+
27
+ model.save_pretrained("./")
28
+
29
+ feature_extractor = AutoFeatureExtractor.from_pretrained(encoder_id)
30
+ feature_extractor.save_pretrained("./")
31
+ tokenizer = AutoTokenizer.from_pretrained(decoder_id)
32
+ tokenizer.save_pretrained("./")
create_scan_model.py ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import jax.numpy as jnp
2
+ from transformers import AutoFeatureExtractor, AutoTokenizer
3
+ from models.modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
4
+ from flax.traverse_util import flatten_dict, unflatten_dict
5
+
6
+ encoder_id = "hf-internal-testing/tiny-random-wav2vec2"
7
+ decoder_id = "hf-internal-testing/tiny-random-bart"
8
+
9
+ unrolled_model = FlaxSpeechEncoderDecoderModel.from_encoder_decoder_pretrained(encoder_id, decoder_id, encoder_add_adapter=True, encoder_from_pt=True, decoder_from_pt=True, encoder_use_scan=False, decoder_use_scan=False)
10
+ model = FlaxSpeechEncoderDecoderModel.from_encoder_decoder_pretrained(encoder_id, decoder_id, encoder_add_adapter=True, encoder_from_pt=True, decoder_from_pt=True, encoder_use_scan=True, decoder_use_scan=True)
11
+
12
+ model.config.encoder.feat_proj_dropout = 0.0
13
+ model.config.encoder.final_dropout = 0.0
14
+ model.config.encoder.mask_time_prob = 0.1
15
+ model.config.decoder_start_token_id = model.config.decoder.bos_token_id
16
+ model.config.pad_token_id = model.config.decoder.pad_token_id
17
+ model.config.eos_token_id = model.config.decoder.eos_token_id
18
+ model.config.max_length = 40
19
+ model.config.num_beams = 1
20
+ model.config.encoder.layerdrop = 0.0
21
+ model.config.use_cache = False
22
+ model.config.processor_class = "Wav2Vec2Processor"
23
+
24
+ def unrolled_to_scanned(model):
25
+ params = model.params
26
+ new_enc_params = {}
27
+ # get the key of a scanned module
28
+ for k in flatten_dict(params['encoder']['encoder']['layers']['0']):
29
+ # stack the weights for each layer of the scanned module into one matrix
30
+ new_enc_params[k] = jnp.stack([flatten_dict(params['encoder']['encoder']['layers'][str(i)])[k] for i in range(model.config.encoder.num_hidden_layers)])
31
+ # append the correct prefix to the scanned modules' keys
32
+ new_enc_params = unflatten_dict({('encoder', 'layers', 'FlaxWav2Vec2EncoderLayers'): unflatten_dict(new_enc_params)})
33
+
34
+ # repeat for the decoder (note that the key 'layers' appears one index to the right than in the encoder, thus we'll treat the encoder and decoder independently for now)
35
+ new_dec_params = {}
36
+ for k in flatten_dict(params['decoder']['model']['decoder']['layers']['0']):
37
+ new_dec_params[k] = jnp.stack([flatten_dict(params['decoder']['model']['decoder']['layers'][str(i)])[k] for i in range(model.config.decoder.decoder_layers)])
38
+ new_dec_params = unflatten_dict({('model', 'decoder', 'layers', 'FlaxBartDecoderLayers'): unflatten_dict(new_dec_params)})
39
+
40
+ # combine the encoder and decoder parameters
41
+ new_params = {'encoder': new_enc_params, 'decoder': new_dec_params}
42
+ new_params = flatten_dict(new_params)
43
+
44
+ # append parameters for non-scanned modules (i.e. all modules that do not contain the key 'layers')
45
+ for k in flatten_dict(params):
46
+ if 'layers' not in k or 'adapter' in k:
47
+ new_params[k] = flatten_dict(params)[k]
48
+
49
+ return unflatten_dict(new_params)
50
+
51
+ model.params = unrolled_to_scanned(unrolled_model)
52
+
53
+ # check if generation works
54
+ out = model.generate(jnp.ones((1, 2000)))
55
+
56
+ model.save_pretrained("./")
57
+
58
+ feature_extractor = AutoFeatureExtractor.from_pretrained(encoder_id)
59
+ feature_extractor.save_pretrained("./")
60
+ tokenizer = AutoTokenizer.from_pretrained(decoder_id)
61
+ tokenizer.save_pretrained("./")
run_librispeech.sh ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env bash
2
+ python run_flax_speech_recognition_seq2seq.py \
3
+ --dataset_name="hf-internal-testing/librispeech_asr_dummy" \
4
+ --model_name_or_path="./" \
5
+ --dataset_config_name="clean" \
6
+ --train_split_name="validation" \
7
+ --eval_split_name="validation" \
8
+ --test_split_name="validation[:90%]+validation[:95%]" \
9
+ --output_dir="./" \
10
+ --dataset_cache_dir="/home/sanchitgandhi/cache/huggingface/datasets" \
11
+ --preprocessing_num_workers="1" \
12
+ --length_column_name="input_length" \
13
+ --overwrite_output_dir \
14
+ --max_steps="15" \
15
+ --eval_steps="5" \
16
+ --save_steps="5" \
17
+ --per_device_train_batch_size="2" \
18
+ --per_device_eval_batch_size="2" \
19
+ --logging_steps="1" \
20
+ --max_duration_in_seconds="15" \
21
+ --max_target_length="64" \
22
+ --generation_max_length="40" \
23
+ --generation_num_beams="1" \
24
+ --final_generation_max_length="50" \
25
+ --final_generation_num_beams="2" \
26
+ --learning_rate="3e-4" \
27
+ --warmup_steps="500" \
28
+ --text_column_name="text" \
29
+ --save_total_limit="1" \
30
+ --wandb_project="flax-wav2vec2-2-bart-dummy" \
31
+ --freeze_feature_encoder \
32
+ --predict_with_generate \
33
+ --do_lower_case \
34
+ --do_train \
35
+ --do_eval \
36
+ --do_predict