sanchit-gandhi HF staff commited on
Commit
6f60663
1 Parent(s): e8c6bc8
Files changed (3) hide show
  1. create_model.py +33 -0
  2. run_librispeech.sh +34 -0
  3. wer-sweep.yaml +104 -0
create_model.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import jax
2
+ import jax.numpy as jnp
3
+ from transformers import AutoFeatureExtractor, AutoTokenizer, FlaxSpeechEncoderDecoderModel
4
+
5
+ encoder_id = "facebook/wav2vec2-large-lv60"
6
+ decoder_id = "facebook/bart-large"
7
+
8
+ model = FlaxSpeechEncoderDecoderModel.from_encoder_decoder_pretrained(encoder_id, decoder_id, encoder_add_adapter=True, decoder_from_pt=True)
9
+
10
+ model.config.encoder.feat_proj_dropout = 0.0
11
+ model.config.encoder.final_dropout = 0.0
12
+ model.config.encoder.mask_time_prob = 0.1
13
+ model.config.decoder_start_token_id = model.config.decoder.bos_token_id
14
+ model.config.pad_token_id = model.config.decoder.pad_token_id
15
+ model.config.eos_token_id = model.config.decoder.eos_token_id
16
+ model.config.max_length = 40
17
+ model.config.num_beams = 1
18
+ model.config.encoder.layerdrop = 0.0
19
+ model.config.use_cache = False
20
+ model.config.processor_class = "Wav2Vec2Processor"
21
+
22
+ # need to upcast bart-large weights from float16 to float32
23
+ model.params = jax.tree_map(lambda x: x.astype(jnp.float32) if x.dtype != jnp.float32 else x, model.params)
24
+
25
+ # check if generation works
26
+ out = model.generate(jnp.ones((1, 2000)))
27
+
28
+ model.save_pretrained("./")
29
+
30
+ feature_extractor = AutoFeatureExtractor.from_pretrained(encoder_id)
31
+ feature_extractor.save_pretrained("./")
32
+ tokenizer = AutoTokenizer.from_pretrained(decoder_id)
33
+ tokenizer.save_pretrained("./")
run_librispeech.sh ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env bash
2
+ python run_flax_speech_recognition_seq2seq.py \
3
+ --dataset_name="librispeech_asr" \
4
+ --model_name_or_path="./" \
5
+ --dataset_config_name="clean" \
6
+ --train_split_name="train.100" \
7
+ --eval_split_name="validation" \
8
+ --dataset_cache_dir="~/cache/huggingface/datasets" \
9
+ --output_dir="./" \
10
+ --preprocessing_num_workers="16" \
11
+ --length_column_name="input_length" \
12
+ --overwrite_output_dir \
13
+ --num_train_epochs="10" \
14
+ --per_device_train_batch_size="4" \
15
+ --per_device_eval_batch_size="4" \
16
+ --gradient_accumulation_steps="1" \
17
+ --logging_steps="25" \
18
+ --max_duration_in_seconds="10" \
19
+ --max_target_length="64" \
20
+ --generation_max_length="40" \
21
+ --generation_num_beams="1" \
22
+ --learning_rate="3e-4" \
23
+ --warmup_steps="500" \
24
+ --text_column_name="text" \
25
+ --save_total_limit="1" \
26
+ --freeze_feature_encoder \
27
+ --predict_with_generate \
28
+ --do_lower_case \
29
+ --do_eval \
30
+ --do_train \
31
+ --push_to_hub \
32
+ --use_auth_token \
33
+ --wandb_project="flax-wav2vec2-2-bart-large"
34
+
wer-sweep.yaml ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ command:
2
+ - python3
3
+ - ${program}
4
+ - --overwrite_output_dir
5
+ - --freeze_feature_encoder
6
+ - --predict_with_generate
7
+ - --do_lower_case
8
+ - --do_train
9
+ - --do_eval
10
+ - ${args}
11
+ method: random
12
+ metric:
13
+ goal: minimize
14
+ name: eval/wer
15
+ parameters:
16
+ activation_dropout:
17
+ distribution: log_uniform
18
+ max: -1.2
19
+ min: -3.4
20
+ dataset_cache_dir:
21
+ value: /home/sanchitgandhi/cache/huggingface/datasets
22
+ dataset_config_name:
23
+ value: clean
24
+ dataset_name:
25
+ value: librispeech_asr
26
+ decoder_activation_dropout:
27
+ distribution: log_uniform
28
+ max: -1.2
29
+ min: -3.4
30
+ decoder_attention_dropout:
31
+ distribution: log_uniform
32
+ max: -1.2
33
+ min: -3.4
34
+ decoder_dropout:
35
+ distribution: log_uniform
36
+ max: -1.2
37
+ min: -3.4
38
+ eval_split_name:
39
+ value: validation
40
+ eval_steps:
41
+ value: 500
42
+ feat_proj_dropout:
43
+ distribution: log_uniform
44
+ max: -1.2
45
+ min: -3.4
46
+ generation_max_length:
47
+ value: 40
48
+ generation_num_beams:
49
+ value: 1
50
+ gradient_accumulation_steps:
51
+ values:
52
+ - 2
53
+ - 4
54
+ - 8
55
+ hidden_dropout:
56
+ distribution: log_uniform
57
+ max: -1.2
58
+ min: -3.4
59
+ layerdrop:
60
+ distribution: log_uniform
61
+ max: -1.2
62
+ min: -3.4
63
+ learning_rate:
64
+ distribution: log_uniform
65
+ max: -6.9
66
+ min: -9.2
67
+ length_column_name:
68
+ value: input_length
69
+ logging_steps:
70
+ value: 10
71
+ max_duration_in_seconds:
72
+ value: 10
73
+ max_grad_norm:
74
+ distribution: log_uniform
75
+ max: 0.0
76
+ min: -2.3
77
+ max_target_length:
78
+ value: 64
79
+ mixed_precision:
80
+ values:
81
+ - True
82
+ - False
83
+ model_name_or_path:
84
+ value: ./
85
+ num_train_epochs:
86
+ value: 10
87
+ output_dir:
88
+ value: ./output_dir
89
+ per_device_eval_batch_size:
90
+ value: 2
91
+ per_device_train_batch_size:
92
+ values:
93
+ - 1
94
+ - 2
95
+ preprocessing_num_workers:
96
+ value: 16
97
+ text_column_name:
98
+ value: text
99
+ train_split_name:
100
+ value: train.100
101
+ warmup_steps:
102
+ value: 500
103
+ program: run_flax_speech_recognition_seq2seq.py
104
+ project: flax-wav2vec2-2-bart-large