Commit
•
e8c6bc8
1
Parent(s):
a95452c
up
Browse files- create_scan_model.py +61 -0
create_scan_model.py
ADDED
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import jax.numpy as jnp
|
2 |
+
from transformers import AutoFeatureExtractor, AutoTokenizer
|
3 |
+
from models.modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
|
4 |
+
from flax.traverse_util import flatten_dict, unflatten_dict
|
5 |
+
|
6 |
+
encoder_id = "facebook/wav2vec2-large-lv60"
|
7 |
+
decoder_id = "patrickvonplaten/bart-large-fp32"
|
8 |
+
|
9 |
+
unrolled_model = FlaxSpeechEncoderDecoderModel.from_encoder_decoder_pretrained(encoder_id, decoder_id, encoder_add_adapter=True, decoder_from_pt=True, encoder_use_scan=False, decoder_use_scan=False)
|
10 |
+
model = FlaxSpeechEncoderDecoderModel.from_encoder_decoder_pretrained(encoder_id, decoder_id, encoder_add_adapter=True, decoder_from_pt=True, encoder_use_scan=True, decoder_use_scan=True)
|
11 |
+
|
12 |
+
model.config.encoder.feat_proj_dropout = 0.0
|
13 |
+
model.config.encoder.final_dropout = 0.0
|
14 |
+
model.config.encoder.mask_time_prob = 0.1
|
15 |
+
model.config.decoder_start_token_id = model.config.decoder.bos_token_id
|
16 |
+
model.config.pad_token_id = model.config.decoder.pad_token_id
|
17 |
+
model.config.eos_token_id = model.config.decoder.eos_token_id
|
18 |
+
model.config.max_length = 40
|
19 |
+
model.config.num_beams = 1
|
20 |
+
model.config.encoder.layerdrop = 0.0
|
21 |
+
model.config.use_cache = False
|
22 |
+
model.config.processor_class = "Wav2Vec2Processor"
|
23 |
+
|
24 |
+
def unrolled_to_scanned(model):
|
25 |
+
params = model.params
|
26 |
+
new_enc_params = {}
|
27 |
+
# get the key of a scanned module
|
28 |
+
for k in flatten_dict(params['encoder']['encoder']['layers']['0']):
|
29 |
+
# stack the weights for each layer of the scanned module into one matrix
|
30 |
+
new_enc_params[k] = jnp.stack([flatten_dict(params['encoder']['encoder']['layers'][str(i)])[k] for i in range(model.config.encoder.num_hidden_layers)])
|
31 |
+
# append the correct prefix to the scanned modules' keys
|
32 |
+
new_enc_params = unflatten_dict({('encoder', 'layers', 'FlaxWav2Vec2EncoderLayers'): unflatten_dict(new_enc_params)})
|
33 |
+
|
34 |
+
# repeat for the decoder (note that the key 'layers' appears one index to the right than in the encoder, thus we'll treat the encoder and decoder independently for now)
|
35 |
+
new_dec_params = {}
|
36 |
+
for k in flatten_dict(params['decoder']['model']['decoder']['layers']['0']):
|
37 |
+
new_dec_params[k] = jnp.stack([flatten_dict(params['decoder']['model']['decoder']['layers'][str(i)])[k] for i in range(model.config.decoder.decoder_layers)])
|
38 |
+
new_dec_params = unflatten_dict({('model', 'decoder', 'layers', 'FlaxBartDecoderLayers'): unflatten_dict(new_dec_params)})
|
39 |
+
|
40 |
+
# combine the encoder and decoder parameters
|
41 |
+
new_params = {'encoder': new_enc_params, 'decoder': new_dec_params}
|
42 |
+
new_params = flatten_dict(new_params)
|
43 |
+
|
44 |
+
# append parameters for non-scanned modules (i.e. all modules that do not contain the key 'layers')
|
45 |
+
for k in flatten_dict(params):
|
46 |
+
if 'layers' not in k or 'adapter' in k:
|
47 |
+
new_params[k] = flatten_dict(params)[k]
|
48 |
+
|
49 |
+
return unflatten_dict(new_params)
|
50 |
+
|
51 |
+
model.params = unrolled_to_scanned(unrolled_model)
|
52 |
+
|
53 |
+
# check if generation works
|
54 |
+
out = model.generate(jnp.ones((1, 2000)))
|
55 |
+
|
56 |
+
model.save_pretrained("./")
|
57 |
+
|
58 |
+
feature_extractor = AutoFeatureExtractor.from_pretrained(encoder_id)
|
59 |
+
feature_extractor.save_pretrained("./")
|
60 |
+
tokenizer = AutoTokenizer.from_pretrained(decoder_id)
|
61 |
+
tokenizer.save_pretrained("./")
|