File size: 3,252 Bytes
8319971
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
import jax.numpy as jnp
from transformers import AutoFeatureExtractor, AutoTokenizer
from models.modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
from flax.traverse_util import flatten_dict, unflatten_dict

encoder_id = "hf-internal-testing/tiny-random-wav2vec2"
decoder_id = "hf-internal-testing/tiny-random-bart"

unrolled_model = FlaxSpeechEncoderDecoderModel.from_encoder_decoder_pretrained(encoder_id, decoder_id, encoder_add_adapter=True, encoder_from_pt=True, decoder_from_pt=True, encoder_use_scan=False, decoder_use_scan=False)
model = FlaxSpeechEncoderDecoderModel.from_encoder_decoder_pretrained(encoder_id, decoder_id, encoder_add_adapter=True, encoder_from_pt=True, decoder_from_pt=True, encoder_use_scan=True, decoder_use_scan=True)

model.config.encoder.feat_proj_dropout = 0.0
model.config.encoder.final_dropout = 0.0
model.config.encoder.mask_time_prob = 0.1
model.config.decoder_start_token_id = model.config.decoder.bos_token_id
model.config.pad_token_id = model.config.decoder.pad_token_id
model.config.eos_token_id = model.config.decoder.eos_token_id
model.config.max_length = 40
model.config.num_beams = 1
model.config.encoder.layerdrop = 0.0
model.config.use_cache = False
model.config.processor_class = "Wav2Vec2Processor"

def unrolled_to_scanned(model):
    params = model.params
    new_enc_params = {}
    # get the key of a scanned module
    for k in flatten_dict(params['encoder']['encoder']['layers']['0']):
        # stack the weights for each layer of the scanned module into one matrix
        new_enc_params[k] = jnp.stack([flatten_dict(params['encoder']['encoder']['layers'][str(i)])[k] for i in range(model.config.encoder.num_hidden_layers)])
    # append the correct prefix to the scanned modules' keys
    new_enc_params = unflatten_dict({('encoder', 'layers', 'FlaxWav2Vec2EncoderLayers'): unflatten_dict(new_enc_params)})

    # repeat for the decoder (note that the key 'layers' appears one index to the right than in the encoder, thus we'll treat the encoder and decoder independently for now)
    new_dec_params = {}
    for k in flatten_dict(params['decoder']['model']['decoder']['layers']['0']):
        new_dec_params[k] = jnp.stack([flatten_dict(params['decoder']['model']['decoder']['layers'][str(i)])[k] for i in range(model.config.decoder.decoder_layers)])
    new_dec_params = unflatten_dict({('model', 'decoder', 'layers', 'FlaxBartDecoderLayers'): unflatten_dict(new_dec_params)})

    # combine the encoder and decoder parameters
    new_params = {'encoder': new_enc_params, 'decoder': new_dec_params}
    new_params = flatten_dict(new_params)

    # append parameters for non-scanned modules (i.e. all modules that do not contain the key 'layers')
    for k in flatten_dict(params):
        if 'layers' not in k or 'adapter' in k:
            new_params[k] = flatten_dict(params)[k]

    return unflatten_dict(new_params)

model.params = unrolled_to_scanned(unrolled_model)

# check if generation works
out = model.generate(jnp.ones((1, 2000)))

model.save_pretrained("./")

feature_extractor = AutoFeatureExtractor.from_pretrained(encoder_id)
feature_extractor.save_pretrained("./")
tokenizer = AutoTokenizer.from_pretrained(decoder_id)
tokenizer.save_pretrained("./")