sanchit-gandhi's picture
up
7dd7eca
from models.modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
from transformers import SpeechEncoderDecoderModel, AutoConfig, AutoFeatureExtractor, AutoTokenizer
from flax.traverse_util import flatten_dict, unflatten_dict
import collections
model_id = "sanchit-gandhi/flax-wav2vec2-2-bart-large-cv9-baseline-50k"
config = AutoConfig.from_pretrained(model_id)
config.encoder.use_scan = config.decoder.use_scan = False
unrolled_model = FlaxSpeechEncoderDecoderModel.from_pretrained(model_id, config=config)
model = FlaxSpeechEncoderDecoderModel.from_pretrained(model_id)
def scanned_to_unrolled(params):
new_enc_params = collections.defaultdict(dict)
# get the key of a scanned module
for key, stacked_weights in flatten_dict(params['encoder']['encoder']['layers']['FlaxWav2Vec2EncoderLayers']).items():
for layer, weights in enumerate(stacked_weights):
new_key = (str(layer),) + key
new_enc_params[new_key] = weights
new_enc_params = unflatten_dict({('encoder', 'layers') : unflatten_dict(new_enc_params)})
# repeat for the decoder (note that the key 'layers' appears one index to the right than in the encoder, thus we'll treat the encoder and decoder independently for now)
new_dec_params = collections.defaultdict(dict)
# get the key of a scanned module
for key, stacked_weights in flatten_dict(params['decoder']['model']['decoder']['layers']['FlaxBartDecoderLayers']).items():
for layer, weights in enumerate(stacked_weights):
new_key = (str(layer),) + key
new_dec_params[new_key] = weights
new_dec_params = unflatten_dict({('model', 'decoder', 'layers') : unflatten_dict(new_dec_params)})
# combine the encoder and decoder parameters
new_params = {'encoder': new_enc_params, 'decoder': new_dec_params}
new_params = flatten_dict(new_params)
# append parameters for non-scanned modules (i.e. all modules that do not contain the key 'layers')
for k in flatten_dict(params):
if 'layers' not in k or 'adapter' in k:
new_params[k] = flatten_dict(params)[k]
return unflatten_dict(new_params)
unrolled_model.params = scanned_to_unrolled(model.params)
unrolled_model.save_pretrained("./")
feature_extractor = AutoFeatureExtractor.from_pretrained(model_id)
feature_extractor.save_pretrained("./")
tokenizer = AutoTokenizer.from_pretrained(model_id)
tokenizer.save_pretrained("./")