sanchit-gandhi HF staff commited on
Commit
29776c5
1 Parent(s): f1fce91

Saving train state of step 10000

Browse files
Files changed (45) hide show
  1. .gitignore +1 -0
  2. accelerate_config.yaml +17 -0
  3. checkpoint-10000-epoch-0/optimizer.bin +3 -0
  4. checkpoint-10000-epoch-0/pytorch_model.bin +3 -0
  5. checkpoint-10000-epoch-0/random_states_0.pkl +3 -0
  6. checkpoint-10000-epoch-0/random_states_1.pkl +3 -0
  7. checkpoint-10000-epoch-0/random_states_2.pkl +3 -0
  8. checkpoint-10000-epoch-0/random_states_3.pkl +3 -0
  9. checkpoint-10000-epoch-0/random_states_4.pkl +3 -0
  10. checkpoint-10000-epoch-0/random_states_5.pkl +3 -0
  11. checkpoint-10000-epoch-0/random_states_6.pkl +3 -0
  12. checkpoint-10000-epoch-0/random_states_7.pkl +3 -0
  13. checkpoint-10000-epoch-0/scheduler.bin +3 -0
  14. config.json +278 -0
  15. parler_tts/__init__.py +16 -0
  16. parler_tts/__pycache__/__init__.cpython-311.pyc +0 -0
  17. parler_tts/__pycache__/configuration_parler_tts.cpython-311.pyc +0 -0
  18. parler_tts/__pycache__/modeling_parler_tts.cpython-311.pyc +0 -0
  19. parler_tts/configuration_parler_tts.py +249 -0
  20. parler_tts/dac_wrapper/__init__.py +2 -0
  21. parler_tts/dac_wrapper/__pycache__/__init__.cpython-311.pyc +0 -0
  22. parler_tts/dac_wrapper/__pycache__/configuration_dac.cpython-311.pyc +0 -0
  23. parler_tts/dac_wrapper/__pycache__/modeling_dac.cpython-311.pyc +0 -0
  24. parler_tts/dac_wrapper/configuration_dac.py +25 -0
  25. parler_tts/dac_wrapper/modeling_dac.py +137 -0
  26. parler_tts/modeling_parler_tts.py +0 -0
  27. preprocessor_config.json +10 -0
  28. slurm_job.slurm +75 -0
  29. special_tokens_map.json +125 -0
  30. spiece.model +3 -0
  31. starting_point_0.01_rope.json +78 -0
  32. tokenizer.json +0 -0
  33. tokenizer_config.json +939 -0
  34. training/README.md +211 -0
  35. training/__init__.py +0 -0
  36. training/__pycache__/__init__.cpython-311.pyc +0 -0
  37. training/__pycache__/arguments.cpython-311.pyc +0 -0
  38. training/__pycache__/data.cpython-311.pyc +0 -0
  39. training/__pycache__/eval.cpython-311.pyc +0 -0
  40. training/__pycache__/utils.cpython-311.pyc +0 -0
  41. training/arguments.py +307 -0
  42. training/data.py +305 -0
  43. training/eval.py +59 -0
  44. training/run_parler_tts_training.py +1018 -0
  45. training/utils.py +125 -0
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ wandb
accelerate_config.yaml ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ compute_environment: LOCAL_MACHINE
2
+ debug: false
3
+ distributed_type: MULTI_GPU
4
+ downcast_bf16: 'no'
5
+ enable_cpu_affinity: false
6
+ gpu_ids: all
7
+ machine_rank: 0
8
+ main_training_function: main
9
+ mixed_precision: bf16
10
+ num_machines: 1
11
+ num_processes: 8
12
+ rdzv_backend: static
13
+ same_network: true
14
+ tpu_env: []
15
+ tpu_use_cluster: false
16
+ tpu_use_sudo: false
17
+ use_cpu: false
checkpoint-10000-epoch-0/optimizer.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:faca8d9b4f2b93557cd517e8188deb5ee390b912f5d73af9772405e00df62ca3
3
+ size 3652763351
checkpoint-10000-epoch-0/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5a59ef5c6f9a904b217ebd019e96bd79b190ffe215dbfc15ce7f1191f969e6a0
3
+ size 2588465818
checkpoint-10000-epoch-0/random_states_0.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ef5b7b9b4c136a83b572250c8474b2320fdd55197e2ae96b6c1391eb36815808
3
+ size 16036
checkpoint-10000-epoch-0/random_states_1.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3c1772d67bcd790883dc03c91f5c09f1b420779a33bd59054c92029113e8b261
3
+ size 16036
checkpoint-10000-epoch-0/random_states_2.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9c8461d5e8e024be4522f5955d745895a71252916361b8104ff36ae0334d3ab7
3
+ size 16036
checkpoint-10000-epoch-0/random_states_3.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:07375b04b39853c28a183f7668e4f0170f504717f0c9b8c75fef39e0cf203728
3
+ size 16036
checkpoint-10000-epoch-0/random_states_4.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:00373582317a0cfe11c5435555af7ba354b4f054982a709c2bd291a4f62fa34b
3
+ size 16036
checkpoint-10000-epoch-0/random_states_5.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bf5381e301e604144e4515a2cdc9f4c88b0959403421839335d108b3cf6f7387
3
+ size 16036
checkpoint-10000-epoch-0/random_states_6.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:be8a788bb0c42d8fa945913f403ccac36e0fecc32581e5b9a29fc5009fb21028
3
+ size 16036
checkpoint-10000-epoch-0/random_states_7.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cf28d366848cf204322278ea609e4ee9209080da0b4ce367758b80bce5f19f9b
3
+ size 16036
checkpoint-10000-epoch-0/scheduler.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ed0f8986a3a4e57c81aefc0174bd43cad9587978e99fdf28b8483089a1eb2d3d
3
+ size 1000
config.json ADDED
@@ -0,0 +1,278 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "ParlerTTSForConditionalGeneration"
4
+ ],
5
+ "audio_encoder": {
6
+ "_name_or_path": "parler-tts/dac_44khZ_8kbps",
7
+ "add_cross_attention": false,
8
+ "architectures": [
9
+ "DACModel"
10
+ ],
11
+ "bad_words_ids": null,
12
+ "begin_suppress_tokens": null,
13
+ "bos_token_id": null,
14
+ "chunk_size_feed_forward": 0,
15
+ "codebook_size": 1024,
16
+ "cross_attention_hidden_size": null,
17
+ "decoder_start_token_id": null,
18
+ "diversity_penalty": 0.0,
19
+ "do_sample": false,
20
+ "early_stopping": false,
21
+ "encoder_no_repeat_ngram_size": 0,
22
+ "eos_token_id": null,
23
+ "exponential_decay_length_penalty": null,
24
+ "finetuning_task": null,
25
+ "forced_bos_token_id": null,
26
+ "forced_eos_token_id": null,
27
+ "frame_rate": 86,
28
+ "id2label": {
29
+ "0": "LABEL_0",
30
+ "1": "LABEL_1"
31
+ },
32
+ "is_decoder": false,
33
+ "is_encoder_decoder": false,
34
+ "label2id": {
35
+ "LABEL_0": 0,
36
+ "LABEL_1": 1
37
+ },
38
+ "latent_dim": 1024,
39
+ "length_penalty": 1.0,
40
+ "max_length": 20,
41
+ "min_length": 0,
42
+ "model_bitrate": 8,
43
+ "model_type": "dac",
44
+ "no_repeat_ngram_size": 0,
45
+ "num_beam_groups": 1,
46
+ "num_beams": 1,
47
+ "num_codebooks": 9,
48
+ "num_return_sequences": 1,
49
+ "output_attentions": false,
50
+ "output_hidden_states": false,
51
+ "output_scores": false,
52
+ "pad_token_id": null,
53
+ "prefix": null,
54
+ "problem_type": null,
55
+ "pruned_heads": {},
56
+ "remove_invalid_values": false,
57
+ "repetition_penalty": 1.0,
58
+ "return_dict": true,
59
+ "return_dict_in_generate": false,
60
+ "sampling_rate": 44100,
61
+ "sep_token_id": null,
62
+ "suppress_tokens": null,
63
+ "task_specific_params": null,
64
+ "temperature": 1.0,
65
+ "tf_legacy_loss": false,
66
+ "tie_encoder_decoder": false,
67
+ "tie_word_embeddings": true,
68
+ "tokenizer_class": null,
69
+ "top_k": 50,
70
+ "top_p": 1.0,
71
+ "torch_dtype": "float32",
72
+ "torchscript": false,
73
+ "typical_p": 1.0,
74
+ "use_bfloat16": false
75
+ },
76
+ "decoder": {
77
+ "_name_or_path": "./parler-tts-untrained-600M/decoder",
78
+ "activation_dropout": 0.0,
79
+ "activation_function": "gelu",
80
+ "add_cross_attention": true,
81
+ "architectures": [
82
+ "ParlerTTSForCausalLM"
83
+ ],
84
+ "attention_dropout": 0.0,
85
+ "bad_words_ids": null,
86
+ "begin_suppress_tokens": null,
87
+ "bos_token_id": 1025,
88
+ "chunk_size_feed_forward": 0,
89
+ "cross_attention_hidden_size": null,
90
+ "decoder_start_token_id": null,
91
+ "diversity_penalty": 0.0,
92
+ "do_sample": false,
93
+ "dropout": 0.1,
94
+ "early_stopping": false,
95
+ "encoder_no_repeat_ngram_size": 0,
96
+ "eos_token_id": 1024,
97
+ "exponential_decay_length_penalty": null,
98
+ "ffn_dim": 4096,
99
+ "finetuning_task": null,
100
+ "forced_bos_token_id": null,
101
+ "forced_eos_token_id": null,
102
+ "hidden_size": 1024,
103
+ "id2label": {
104
+ "0": "LABEL_0",
105
+ "1": "LABEL_1"
106
+ },
107
+ "initializer_factor": 0.02,
108
+ "is_decoder": true,
109
+ "is_encoder_decoder": false,
110
+ "label2id": {
111
+ "LABEL_0": 0,
112
+ "LABEL_1": 1
113
+ },
114
+ "layerdrop": 0.0,
115
+ "length_penalty": 1.0,
116
+ "max_length": 20,
117
+ "max_position_embeddings": 4096,
118
+ "min_length": 0,
119
+ "model_type": "parler_tts_decoder",
120
+ "no_repeat_ngram_size": 0,
121
+ "num_attention_heads": 16,
122
+ "num_beam_groups": 1,
123
+ "num_beams": 1,
124
+ "num_codebooks": 9,
125
+ "num_hidden_layers": 24,
126
+ "num_return_sequences": 1,
127
+ "output_attentions": false,
128
+ "output_hidden_states": false,
129
+ "output_scores": false,
130
+ "pad_token_id": 1024,
131
+ "prefix": null,
132
+ "problem_type": null,
133
+ "pruned_heads": {},
134
+ "remove_invalid_values": false,
135
+ "repetition_penalty": 1.0,
136
+ "return_dict": true,
137
+ "return_dict_in_generate": false,
138
+ "rope_embeddings": true,
139
+ "rope_theta": 10000.0,
140
+ "scale_embedding": false,
141
+ "sep_token_id": null,
142
+ "suppress_tokens": null,
143
+ "task_specific_params": null,
144
+ "temperature": 1.0,
145
+ "tf_legacy_loss": false,
146
+ "tie_encoder_decoder": false,
147
+ "tie_word_embeddings": false,
148
+ "tokenizer_class": null,
149
+ "top_k": 50,
150
+ "top_p": 1.0,
151
+ "torch_dtype": "float32",
152
+ "torchscript": false,
153
+ "typical_p": 1.0,
154
+ "use_bfloat16": false,
155
+ "use_cache": true,
156
+ "vocab_size": 1088
157
+ },
158
+ "decoder_start_token_id": 1025,
159
+ "is_encoder_decoder": true,
160
+ "model_type": "parler_tts",
161
+ "pad_token_id": 1024,
162
+ "prompt_cross_attention": true,
163
+ "text_encoder": {
164
+ "_name_or_path": "google/flan-t5-base",
165
+ "add_cross_attention": false,
166
+ "architectures": [
167
+ "T5ForConditionalGeneration"
168
+ ],
169
+ "bad_words_ids": null,
170
+ "begin_suppress_tokens": null,
171
+ "bos_token_id": null,
172
+ "chunk_size_feed_forward": 0,
173
+ "classifier_dropout": 0.0,
174
+ "cross_attention_hidden_size": null,
175
+ "d_ff": 2048,
176
+ "d_kv": 64,
177
+ "d_model": 768,
178
+ "decoder_start_token_id": 0,
179
+ "dense_act_fn": "gelu_new",
180
+ "diversity_penalty": 0.0,
181
+ "do_sample": false,
182
+ "dropout_rate": 0.1,
183
+ "early_stopping": false,
184
+ "encoder_no_repeat_ngram_size": 0,
185
+ "eos_token_id": 1,
186
+ "exponential_decay_length_penalty": null,
187
+ "feed_forward_proj": "gated-gelu",
188
+ "finetuning_task": null,
189
+ "forced_bos_token_id": null,
190
+ "forced_eos_token_id": null,
191
+ "id2label": {
192
+ "0": "LABEL_0",
193
+ "1": "LABEL_1"
194
+ },
195
+ "initializer_factor": 1.0,
196
+ "is_decoder": false,
197
+ "is_encoder_decoder": true,
198
+ "is_gated_act": true,
199
+ "label2id": {
200
+ "LABEL_0": 0,
201
+ "LABEL_1": 1
202
+ },
203
+ "layer_norm_epsilon": 1e-06,
204
+ "length_penalty": 1.0,
205
+ "max_length": 20,
206
+ "min_length": 0,
207
+ "model_type": "t5",
208
+ "n_positions": 512,
209
+ "no_repeat_ngram_size": 0,
210
+ "num_beam_groups": 1,
211
+ "num_beams": 1,
212
+ "num_decoder_layers": 12,
213
+ "num_heads": 12,
214
+ "num_layers": 12,
215
+ "num_return_sequences": 1,
216
+ "output_attentions": false,
217
+ "output_hidden_states": false,
218
+ "output_past": true,
219
+ "output_scores": false,
220
+ "pad_token_id": 0,
221
+ "prefix": null,
222
+ "problem_type": null,
223
+ "pruned_heads": {},
224
+ "relative_attention_max_distance": 128,
225
+ "relative_attention_num_buckets": 32,
226
+ "remove_invalid_values": false,
227
+ "repetition_penalty": 1.0,
228
+ "return_dict": true,
229
+ "return_dict_in_generate": false,
230
+ "sep_token_id": null,
231
+ "suppress_tokens": null,
232
+ "task_specific_params": {
233
+ "summarization": {
234
+ "early_stopping": true,
235
+ "length_penalty": 2.0,
236
+ "max_length": 200,
237
+ "min_length": 30,
238
+ "no_repeat_ngram_size": 3,
239
+ "num_beams": 4,
240
+ "prefix": "summarize: "
241
+ },
242
+ "translation_en_to_de": {
243
+ "early_stopping": true,
244
+ "max_length": 300,
245
+ "num_beams": 4,
246
+ "prefix": "translate English to German: "
247
+ },
248
+ "translation_en_to_fr": {
249
+ "early_stopping": true,
250
+ "max_length": 300,
251
+ "num_beams": 4,
252
+ "prefix": "translate English to French: "
253
+ },
254
+ "translation_en_to_ro": {
255
+ "early_stopping": true,
256
+ "max_length": 300,
257
+ "num_beams": 4,
258
+ "prefix": "translate English to Romanian: "
259
+ }
260
+ },
261
+ "temperature": 1.0,
262
+ "tf_legacy_loss": false,
263
+ "tie_encoder_decoder": false,
264
+ "tie_word_embeddings": false,
265
+ "tokenizer_class": null,
266
+ "top_k": 50,
267
+ "top_p": 1.0,
268
+ "torch_dtype": null,
269
+ "torchscript": false,
270
+ "typical_p": 1.0,
271
+ "use_bfloat16": false,
272
+ "use_cache": true,
273
+ "vocab_size": 32128
274
+ },
275
+ "torch_dtype": "float32",
276
+ "transformers_version": "4.40.2",
277
+ "vocab_size": 32128
278
+ }
parler_tts/__init__.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ __version__ = "0.1"
2
+
3
+
4
+ from .configuration_parler_tts import ParlerTTSConfig, ParlerTTSDecoderConfig
5
+ from .modeling_parler_tts import (
6
+ ParlerTTSForCausalLM,
7
+ ParlerTTSForConditionalGeneration,
8
+ apply_delay_pattern_mask,
9
+ build_delay_pattern_mask,
10
+ )
11
+
12
+ from .dac_wrapper import DACConfig, DACModel
13
+ from transformers import AutoConfig, AutoModel
14
+
15
+ AutoConfig.register("dac", DACConfig)
16
+ AutoModel.register(DACConfig, DACModel)
parler_tts/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (845 Bytes). View file
 
parler_tts/__pycache__/configuration_parler_tts.cpython-311.pyc ADDED
Binary file (11.5 kB). View file
 
parler_tts/__pycache__/modeling_parler_tts.cpython-311.pyc ADDED
Binary file (135 kB). View file
 
parler_tts/configuration_parler_tts.py ADDED
@@ -0,0 +1,249 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Parler-TTS model configuration"""
16
+
17
+ from transformers import AutoConfig, logging
18
+ from transformers.configuration_utils import PretrainedConfig
19
+
20
+
21
+ logger = logging.get_logger(__name__)
22
+
23
+ MUSICGEN_PRETRAINED_CONFIG_ARCHIVE_MAP = {
24
+ "facebook/parler_tts-small": "https://huggingface.co/facebook/parler_tts-small/resolve/main/config.json",
25
+ # See all ParlerTTS models at https://huggingface.co/models?filter=parler_tts
26
+ }
27
+
28
+
29
+ class ParlerTTSDecoderConfig(PretrainedConfig):
30
+ r"""
31
+ This is the configuration class to store the configuration of an [`ParlerTTSDecoder`]. It is used to instantiate a
32
+ Parler-TTS decoder according to the specified arguments, defining the model architecture. Instantiating a
33
+ configuration with the defaults will yield a similar configuration to that of the Parler-TTS
34
+ [facebook/parler_tts-small](https://huggingface.co/facebook/parler_tts-small) architecture.
35
+
36
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
37
+ documentation from [`PretrainedConfig`] for more information.
38
+
39
+
40
+ Args:
41
+ vocab_size (`int`, *optional*, defaults to 2049):
42
+ Vocabulary size of the ParlerTTSDecoder model. Defines the number of different tokens that can be
43
+ represented by the `inputs_ids` passed when calling [`ParlerTTSDecoder`].
44
+ hidden_size (`int`, *optional*, defaults to 1024):
45
+ Dimensionality of the layers and the pooler layer.
46
+ num_hidden_layers (`int`, *optional*, defaults to 24):
47
+ Number of decoder layers.
48
+ num_attention_heads (`int`, *optional*, defaults to 16):
49
+ Number of attention heads for each attention layer in the Transformer block.
50
+ ffn_dim (`int`, *optional*, defaults to 4096):
51
+ Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer block.
52
+ activation_function (`str` or `function`, *optional*, defaults to `"gelu"`):
53
+ The non-linear activation function (function or string) in the decoder and pooler. If string, `"gelu"`,
54
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
55
+ dropout (`float`, *optional*, defaults to 0.1):
56
+ The dropout probability for all fully connected layers in the embeddings, text_encoder, and pooler.
57
+ attention_dropout (`float`, *optional*, defaults to 0.0):
58
+ The dropout ratio for the attention probabilities.
59
+ activation_dropout (`float`, *optional*, defaults to 0.0):
60
+ The dropout ratio for activations inside the fully connected layer.
61
+ max_position_embeddings (`int`, *optional*, defaults to 2048):
62
+ The maximum sequence length that this model might ever be used with. Typically, set this to something large
63
+ just in case (e.g., 512 or 1024 or 2048).
64
+ initializer_factor (`float`, *optional*, defaults to 0.02):
65
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
66
+ layerdrop (`float`, *optional*, defaults to 0.0):
67
+ The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
68
+ for more details.
69
+ scale_embedding (`bool`, *optional*, defaults to `False`):
70
+ Scale embeddings by diving by sqrt(hidden_size).
71
+ use_cache (`bool`, *optional*, defaults to `True`):
72
+ Whether the model should return the last key/values attentions (not used by all models)
73
+ num_codebooks (`int`, *optional*, defaults to 4):
74
+ The number of parallel codebooks forwarded to the model.
75
+ tie_word_embeddings(`bool`, *optional*, defaults to `False`):
76
+ Whether input and output word embeddings should be tied.
77
+ rope_embeddings (`bool`, *optional*, defaults to `False`):
78
+ Whether to use ROPE or absolute positional embeddings.
79
+ rope_theta (`float`, *optional*, defaults to 100000.0):
80
+ The base period of the RoPE embeddings.
81
+ """
82
+
83
+ model_type = "parler_tts_decoder"
84
+ keys_to_ignore_at_inference = ["past_key_values"]
85
+
86
+ def __init__(
87
+ self,
88
+ vocab_size=2049, # vocab size = 2048 (encodec vocab size) + 1 (eos)
89
+ max_position_embeddings=2048,
90
+ num_hidden_layers=24,
91
+ ffn_dim=4096,
92
+ num_attention_heads=16,
93
+ layerdrop=0.0,
94
+ use_cache=True,
95
+ activation_function="gelu",
96
+ hidden_size=1024,
97
+ dropout=0.1,
98
+ attention_dropout=0.0,
99
+ activation_dropout=0.0,
100
+ initializer_factor=0.02,
101
+ scale_embedding=False,
102
+ num_codebooks=4,
103
+ pad_token_id=2048,
104
+ bos_token_id=2049,
105
+ eos_token_id=2048,
106
+ tie_word_embeddings=False,
107
+ rope_embeddings=False,
108
+ rope_theta=10_000.0,
109
+ **kwargs,
110
+ ):
111
+ self.vocab_size = vocab_size
112
+ self.max_position_embeddings = max_position_embeddings
113
+ self.hidden_size = hidden_size
114
+ self.ffn_dim = ffn_dim
115
+ self.num_hidden_layers = num_hidden_layers
116
+ self.num_attention_heads = num_attention_heads
117
+ self.dropout = dropout
118
+ self.attention_dropout = attention_dropout
119
+ self.activation_dropout = activation_dropout
120
+ self.activation_function = activation_function
121
+ self.initializer_factor = initializer_factor
122
+ self.layerdrop = layerdrop
123
+ self.use_cache = use_cache
124
+ self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True
125
+ self.num_codebooks = num_codebooks
126
+ self.rope_embeddings = rope_embeddings
127
+ self.rope_theta = rope_theta
128
+
129
+ super().__init__(
130
+ pad_token_id=pad_token_id,
131
+ bos_token_id=bos_token_id,
132
+ eos_token_id=eos_token_id,
133
+ tie_word_embeddings=tie_word_embeddings,
134
+ **kwargs,
135
+ )
136
+
137
+
138
+ class ParlerTTSConfig(PretrainedConfig):
139
+ r"""
140
+ This is the configuration class to store the configuration of a [`ParlerTTSModel`]. It is used to instantiate a
141
+ Parler-TTS model according to the specified arguments, defining the text encoder, audio encoder and Parler-TTS decoder
142
+ configs.
143
+
144
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
145
+ documentation from [`PretrainedConfig`] for more information.
146
+
147
+ Args:
148
+ vocab_size (`int`, *optional*, defaults to 1024):
149
+ Vocabulary size of the prompt token ids. Defines the number of different tokens that can be
150
+ represented by the `prompt_inputs_ids`.
151
+ prompt_cross_attention (`bool`, *optional*, defaults to `False`):
152
+ Whether to use cross-attention conditioning for the prompt (as well as the description).
153
+ kwargs (*optional*):
154
+ Dictionary of keyword arguments. Notably:
155
+
156
+ - **text_encoder** ([`PretrainedConfig`], *optional*) -- An instance of a configuration object that
157
+ defines the text encoder config.
158
+ - **audio_encoder** ([`PretrainedConfig`], *optional*) -- An instance of a configuration object that
159
+ defines the audio encoder config.
160
+ - **decoder** ([`PretrainedConfig`], *optional*) -- An instance of a configuration object that defines
161
+ the decoder config.
162
+
163
+ Example:
164
+
165
+ ```python
166
+ >>> from transformers import (
167
+ ... ParlerTTSConfig,
168
+ ... ParlerTTSDecoderConfig,
169
+ ... T5Config,
170
+ ... EncodecConfig,
171
+ ... ParlerTTSForConditionalGeneration,
172
+ ... )
173
+
174
+ >>> # Initializing text encoder, audio encoder, and decoder model configurations
175
+ >>> text_encoder_config = T5Config()
176
+ >>> audio_encoder_config = EncodecConfig()
177
+ >>> decoder_config = ParlerTTSDecoderConfig()
178
+
179
+ >>> configuration = ParlerTTSConfig.from_sub_models_config(
180
+ ... text_encoder_config, audio_encoder_config, decoder_config
181
+ ... )
182
+
183
+ >>> # Initializing a ParlerTTSForConditionalGeneration (with random weights) from the facebook/parler_tts-small style configuration
184
+ >>> model = ParlerTTSForConditionalGeneration(configuration)
185
+
186
+ >>> # Accessing the model configuration
187
+ >>> configuration = model.config
188
+ >>> config_text_encoder = model.config.text_encoder
189
+ >>> config_audio_encoder = model.config.audio_encoder
190
+ >>> config_decoder = model.config.decoder
191
+
192
+ >>> # Saving the model, including its configuration
193
+ >>> model.save_pretrained("parler_tts-model")
194
+
195
+ >>> # loading model and config from pretrained folder
196
+ >>> parler_tts_config = ParlerTTSConfig.from_pretrained("parler_tts-model")
197
+ >>> model = ParlerTTSForConditionalGeneration.from_pretrained("parler_tts-model", config=parler_tts_config)
198
+ ```"""
199
+
200
+ model_type = "parler_tts"
201
+ is_composition = True
202
+
203
+ def __init__(self, vocab_size=1024, prompt_cross_attention=False, **kwargs):
204
+ super().__init__(**kwargs)
205
+ if "text_encoder" not in kwargs or "audio_encoder" not in kwargs or "decoder" not in kwargs:
206
+ raise ValueError("Config has to be initialized with text_encoder, audio_encoder and decoder config")
207
+
208
+ text_encoder_config = kwargs.pop("text_encoder")
209
+ text_encoder_model_type = text_encoder_config.pop("model_type")
210
+
211
+ audio_encoder_config = kwargs.pop("audio_encoder")
212
+ audio_encoder_model_type = audio_encoder_config.pop("model_type")
213
+
214
+ decoder_config = kwargs.pop("decoder")
215
+
216
+ self.vocab_size = vocab_size
217
+ self.prompt_cross_attention = prompt_cross_attention
218
+ self.text_encoder = AutoConfig.for_model(text_encoder_model_type, **text_encoder_config)
219
+ self.audio_encoder = AutoConfig.for_model(audio_encoder_model_type, **audio_encoder_config)
220
+ self.decoder = ParlerTTSDecoderConfig(**decoder_config)
221
+ self.is_encoder_decoder = True
222
+
223
+ @classmethod
224
+ def from_sub_models_config(
225
+ cls,
226
+ text_encoder_config: PretrainedConfig,
227
+ audio_encoder_config: PretrainedConfig,
228
+ decoder_config: ParlerTTSDecoderConfig,
229
+ **kwargs,
230
+ ):
231
+ r"""
232
+ Instantiate a [`ParlerTTSConfig`] (or a derived class) from text encoder, audio encoder and decoder
233
+ configurations.
234
+
235
+ Returns:
236
+ [`ParlerTTSConfig`]: An instance of a configuration object
237
+ """
238
+
239
+ return cls(
240
+ text_encoder=text_encoder_config.to_dict(),
241
+ audio_encoder=audio_encoder_config.to_dict(),
242
+ decoder=decoder_config.to_dict(),
243
+ **kwargs,
244
+ )
245
+
246
+ @property
247
+ # This is a property because you might want to change the codec model on the fly
248
+ def sampling_rate(self):
249
+ return self.audio_encoder.sampling_rate
parler_tts/dac_wrapper/__init__.py ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ from .configuration_dac import DACConfig
2
+ from .modeling_dac import DACModel
parler_tts/dac_wrapper/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (293 Bytes). View file
 
parler_tts/dac_wrapper/__pycache__/configuration_dac.cpython-311.pyc ADDED
Binary file (1.33 kB). View file
 
parler_tts/dac_wrapper/__pycache__/modeling_dac.cpython-311.pyc ADDED
Binary file (6.28 kB). View file
 
parler_tts/dac_wrapper/configuration_dac.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import PretrainedConfig
2
+ from typing import List
3
+
4
+
5
+ class DACConfig(PretrainedConfig):
6
+ model_type = "dac"
7
+
8
+ def __init__(
9
+ self,
10
+ num_codebooks: int = 9,
11
+ model_bitrate: int = 8, # kbps
12
+ codebook_size: int = 1024,
13
+ latent_dim: int = 1024,
14
+ frame_rate: int = 86,
15
+ sampling_rate: int = 44100,
16
+ **kwargs,
17
+ ):
18
+ self.codebook_size = codebook_size
19
+ self.model_bitrate = model_bitrate
20
+ self.latent_dim = latent_dim
21
+ self.num_codebooks = num_codebooks
22
+ self.frame_rate = frame_rate
23
+ self.sampling_rate = sampling_rate
24
+
25
+ super().__init__(**kwargs)
parler_tts/dac_wrapper/modeling_dac.py ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ from transformers import PreTrainedModel
4
+ from transformers.models.encodec.modeling_encodec import EncodecEncoderOutput, EncodecDecoderOutput
5
+ from .configuration_dac import DACConfig
6
+
7
+ from dac.model import DAC
8
+
9
+
10
+ # model doesn't support batching yet
11
+
12
+
13
+ class DACModel(PreTrainedModel):
14
+ config_class = DACConfig
15
+
16
+ def __init__(self, config):
17
+ super().__init__(config)
18
+
19
+ self.model = DAC(
20
+ n_codebooks=config.num_codebooks,
21
+ latent_dim=config.latent_dim,
22
+ codebook_size=config.codebook_size,
23
+ )
24
+
25
+ def encode(
26
+ self, input_values, padding_mask=None, bandwidth=None, return_dict=None, n_quantizers=None, sample_rate=None
27
+ ):
28
+ """
29
+ Encodes the input audio waveform into discrete codes.
30
+
31
+ Args:
32
+ input_values (`torch.Tensor` of shape `(batch_size, channels, sequence_length)`):
33
+ Float values of the input audio waveform.
34
+ padding_mask (`torch.Tensor` of shape `(batch_size, channels, sequence_length)`):
35
+ Padding mask used to pad the `input_values`.
36
+ bandwidth (`float`, *optional*):
37
+ Not used, kept to have the same inferface as HF encodec.
38
+ n_quantizers (`int`, *optional*) :
39
+ Number of quantizers to use, by default None
40
+ If None, all quantizers are used.
41
+ sample_rate (`int`, *optional*) :
42
+ Signal sampling_rate
43
+
44
+ Returns:
45
+ A list of frames containing the discrete encoded codes for the input audio waveform, along with rescaling
46
+ factors for each chunk when `normalize` is True. Each frames is a tuple `(codebook, scale)`, with
47
+ `codebook` of shape `[batch_size, num_codebooks, frames]`.
48
+ Scale is not used here.
49
+
50
+ """
51
+ _, channels, input_length = input_values.shape
52
+
53
+ if channels < 1 or channels > 2:
54
+ raise ValueError(f"Number of audio channels must be 1 or 2, but got {channels}")
55
+
56
+ audio_data = self.model.preprocess(input_values, sample_rate)
57
+
58
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
59
+
60
+ # TODO: for now, no chunk length
61
+
62
+ chunk_length = None # self.config.chunk_length
63
+ if chunk_length is None:
64
+ chunk_length = input_length
65
+ stride = input_length
66
+ else:
67
+ stride = self.config.chunk_stride
68
+
69
+ if padding_mask is None:
70
+ padding_mask = torch.ones_like(input_values).bool()
71
+
72
+ encoded_frames = []
73
+ scales = []
74
+
75
+ step = chunk_length - stride
76
+ if (input_length % stride) - step != 0:
77
+ raise ValueError(
78
+ "The input length is not properly padded for batched chunked decoding. Make sure to pad the input correctly."
79
+ )
80
+
81
+ for offset in range(0, input_length - step, stride):
82
+ mask = padding_mask[..., offset : offset + chunk_length].bool()
83
+ frame = audio_data[:, :, offset : offset + chunk_length]
84
+
85
+ scale = None
86
+
87
+ _, encoded_frame, _, _, _ = self.model.encode(frame, n_quantizers=n_quantizers)
88
+ encoded_frames.append(encoded_frame)
89
+ scales.append(scale)
90
+
91
+ encoded_frames = torch.stack(encoded_frames)
92
+
93
+ if not return_dict:
94
+ return (encoded_frames, scales)
95
+
96
+ return EncodecEncoderOutput(encoded_frames, scales)
97
+
98
+ def decode(
99
+ self,
100
+ audio_codes,
101
+ audio_scales,
102
+ padding_mask=None,
103
+ return_dict=None,
104
+ ):
105
+ """
106
+ Decodes the given frames into an output audio waveform.
107
+
108
+ Note that the output might be a bit bigger than the input. In that case, any extra steps at the end can be
109
+ trimmed.
110
+
111
+ Args:
112
+ audio_codes (`torch.FloatTensor` of shape `(batch_size, nb_chunks, chunk_length)`, *optional*):
113
+ Discret code embeddings computed using `model.encode`.
114
+ audio_scales (`torch.Tensor` of shape `(batch_size, nb_chunks)`, *optional*):
115
+ Not used, kept to have the same inferface as HF encodec.
116
+ padding_mask (`torch.Tensor` of shape `(batch_size, channels, sequence_length)`):
117
+ Padding mask used to pad the `input_values`.
118
+ Not used yet, kept to have the same inferface as HF encodec.
119
+ return_dict (`bool`, *optional*):
120
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
121
+
122
+ """
123
+ return_dict = return_dict or self.config.return_dict
124
+
125
+ # TODO: for now, no chunk length
126
+
127
+ if len(audio_codes) != 1:
128
+ raise ValueError(f"Expected one frame, got {len(audio_codes)}")
129
+
130
+ audio_values = self.model.quantizer.from_codes(audio_codes.squeeze(0))[0]
131
+ audio_values = self.model.decode(audio_values)
132
+ if not return_dict:
133
+ return (audio_values,)
134
+ return EncodecDecoderOutput(audio_values)
135
+
136
+ def forward(self, tensor):
137
+ raise ValueError(f"`DACModel.forward` not implemented yet")
parler_tts/modeling_parler_tts.py ADDED
The diff for this file is too large to render. See raw diff
 
preprocessor_config.json ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "chunk_length_s": null,
3
+ "feature_extractor_type": "EncodecFeatureExtractor",
4
+ "feature_size": 1,
5
+ "overlap": null,
6
+ "padding_side": "right",
7
+ "padding_value": 0.0,
8
+ "return_attention_mask": true,
9
+ "sampling_rate": 44100
10
+ }
slurm_job.slurm ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --job-name=parler-tts
3
+ #SBATCH --nodes=1
4
+ # set 48h for job wall time limit
5
+ #SBATCH --requeue
6
+ #SBATCH --time=48:00:00
7
+ #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
8
+ #SBATCH --cpus-per-task=32
9
+ #SBATCH --gres=gpu:8
10
+ #SBATCH --partition=hopper-prod
11
+ #SBATCH --output=/fsx/sanchit/logs/%x-%j.out
12
+
13
+ set -x -e
14
+
15
+ # START EDIT
16
+ source ~/.bashrc
17
+ source /fsx/sanchit/miniconda3/bin/activate venv
18
+
19
+ LOG_PATH="/fsx/sanchit/logs/main_log.txt"
20
+ SAVE_DIR="/fsx/sanchit"
21
+ # END EDIT
22
+
23
+ echo "START TIME: $(date)"
24
+
25
+ GPUS_PER_NODE=8
26
+ NNODES=$SLURM_NNODES
27
+
28
+ # so processes know who to talk to
29
+ MASTER_ADDR=`scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1`
30
+
31
+ # From https://i.hsfzxjy.site/2021-03-10-obtain-a-random-unused-tcp-port-with-bash/
32
+ function unused_port() {
33
+ N=${1:-1}
34
+ comm -23 \
35
+ <(seq "1025" "65535" | sort) \
36
+ <(ss -Htan |
37
+ awk '{print $4}' |
38
+ cut -d':' -f2 |
39
+ sort -u) |
40
+ shuf |
41
+ head -n "$N"
42
+ }
43
+ MASTER_PORT=$(unused_port)
44
+
45
+ # export TORCH_CPP_LOG_LEVEL=INFO
46
+ # export TORCH_DISTRIBUTED_DEBUG=DETAIL
47
+
48
+ export LAUNCHER="python -u -m accelerate.commands.launch --config_file ./accelerate_config.yaml"
49
+
50
+ export PROGRAM="./training/run_parler_tts_training.py ./starting_point_0.01_rope.json"
51
+ export CMD="$LAUNCHER $PROGRAM"
52
+ echo $CMD
53
+
54
+ SRUN_ARGS=" \
55
+ --wait=60 \
56
+ --kill-on-bad-exit=1 \
57
+ "
58
+
59
+ # py-spy top -s -i -n -- $LAUNCHER --node_rank $SLURM_PROCID --role $SLURMD_NODENAME: $CMD
60
+ clear; srun $SRUN_ARGS --jobid $SLURM_JOB_ID bash -c "$CMD" 2>&1 | tee -a $SAVE_DIR/logs/main_log.txt
61
+
62
+
63
+ # srun error handling:
64
+ # --wait=60: wait 60 sec after the first task terminates before terminating all remaining tasks
65
+ # --kill-on-bad-exit=1: terminate a step if any task exits with a non-zero exit code
66
+
67
+ # SRUN_ARGS=" \
68
+ # --wait=60 \
69
+ # --kill-on-bad-exit=1 \
70
+ # "
71
+ #
72
+ # # py-spy top -s -i -n -- $LAUNCHER --node_rank $SLURM_PROCID --role $SLURMD_NODENAME: $CMD
73
+ # clear; srun $SRUN_ARGS --jobid $SLURM_JOBID bash -c "$CMD" 2>&1 | tee -a $SAVE_DIR/logs/main_log.txt
74
+
75
+ echo "END TIME: $(date)"
special_tokens_map.json ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<extra_id_0>",
4
+ "<extra_id_1>",
5
+ "<extra_id_2>",
6
+ "<extra_id_3>",
7
+ "<extra_id_4>",
8
+ "<extra_id_5>",
9
+ "<extra_id_6>",
10
+ "<extra_id_7>",
11
+ "<extra_id_8>",
12
+ "<extra_id_9>",
13
+ "<extra_id_10>",
14
+ "<extra_id_11>",
15
+ "<extra_id_12>",
16
+ "<extra_id_13>",
17
+ "<extra_id_14>",
18
+ "<extra_id_15>",
19
+ "<extra_id_16>",
20
+ "<extra_id_17>",
21
+ "<extra_id_18>",
22
+ "<extra_id_19>",
23
+ "<extra_id_20>",
24
+ "<extra_id_21>",
25
+ "<extra_id_22>",
26
+ "<extra_id_23>",
27
+ "<extra_id_24>",
28
+ "<extra_id_25>",
29
+ "<extra_id_26>",
30
+ "<extra_id_27>",
31
+ "<extra_id_28>",
32
+ "<extra_id_29>",
33
+ "<extra_id_30>",
34
+ "<extra_id_31>",
35
+ "<extra_id_32>",
36
+ "<extra_id_33>",
37
+ "<extra_id_34>",
38
+ "<extra_id_35>",
39
+ "<extra_id_36>",
40
+ "<extra_id_37>",
41
+ "<extra_id_38>",
42
+ "<extra_id_39>",
43
+ "<extra_id_40>",
44
+ "<extra_id_41>",
45
+ "<extra_id_42>",
46
+ "<extra_id_43>",
47
+ "<extra_id_44>",
48
+ "<extra_id_45>",
49
+ "<extra_id_46>",
50
+ "<extra_id_47>",
51
+ "<extra_id_48>",
52
+ "<extra_id_49>",
53
+ "<extra_id_50>",
54
+ "<extra_id_51>",
55
+ "<extra_id_52>",
56
+ "<extra_id_53>",
57
+ "<extra_id_54>",
58
+ "<extra_id_55>",
59
+ "<extra_id_56>",
60
+ "<extra_id_57>",
61
+ "<extra_id_58>",
62
+ "<extra_id_59>",
63
+ "<extra_id_60>",
64
+ "<extra_id_61>",
65
+ "<extra_id_62>",
66
+ "<extra_id_63>",
67
+ "<extra_id_64>",
68
+ "<extra_id_65>",
69
+ "<extra_id_66>",
70
+ "<extra_id_67>",
71
+ "<extra_id_68>",
72
+ "<extra_id_69>",
73
+ "<extra_id_70>",
74
+ "<extra_id_71>",
75
+ "<extra_id_72>",
76
+ "<extra_id_73>",
77
+ "<extra_id_74>",
78
+ "<extra_id_75>",
79
+ "<extra_id_76>",
80
+ "<extra_id_77>",
81
+ "<extra_id_78>",
82
+ "<extra_id_79>",
83
+ "<extra_id_80>",
84
+ "<extra_id_81>",
85
+ "<extra_id_82>",
86
+ "<extra_id_83>",
87
+ "<extra_id_84>",
88
+ "<extra_id_85>",
89
+ "<extra_id_86>",
90
+ "<extra_id_87>",
91
+ "<extra_id_88>",
92
+ "<extra_id_89>",
93
+ "<extra_id_90>",
94
+ "<extra_id_91>",
95
+ "<extra_id_92>",
96
+ "<extra_id_93>",
97
+ "<extra_id_94>",
98
+ "<extra_id_95>",
99
+ "<extra_id_96>",
100
+ "<extra_id_97>",
101
+ "<extra_id_98>",
102
+ "<extra_id_99>"
103
+ ],
104
+ "eos_token": {
105
+ "content": "</s>",
106
+ "lstrip": false,
107
+ "normalized": false,
108
+ "rstrip": false,
109
+ "single_word": false
110
+ },
111
+ "pad_token": {
112
+ "content": "<pad>",
113
+ "lstrip": false,
114
+ "normalized": false,
115
+ "rstrip": false,
116
+ "single_word": false
117
+ },
118
+ "unk_token": {
119
+ "content": "<unk>",
120
+ "lstrip": false,
121
+ "normalized": false,
122
+ "rstrip": false,
123
+ "single_word": false
124
+ }
125
+ }
spiece.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d60acb128cf7b7f2536e8f38a5b18a05535c9e14c7a355904270e15b0945ea86
3
+ size 791656
starting_point_0.01_rope.json ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model_name_or_path": "parler-tts/parler-tts-untrained-600M-cross-attention-rope",
3
+ "save_to_disk": "/scratch/tmp_dataset_audio/",
4
+ "temporary_save_to_disk": "/scratch/tmp_dataset_audio/",
5
+ "push_to_hub": true,
6
+ "cache_dir": "/scratch/cache",
7
+
8
+
9
+ "feature_extractor_name":"ylacombe/dac_44khZ_8kbps",
10
+ "description_tokenizer_name":"google/flan-t5-base",
11
+ "prompt_tokenizer_name":"google/flan-t5-base",
12
+
13
+ "report_to": ["wandb"],
14
+ "overwrite_output_dir": false,
15
+ "output_dir": "./",
16
+
17
+ "train_dataset_name": "blabble-io/libritts_r+blabble-io/libritts_r+blabble-io/libritts_r+parler-tts/mls_eng_10k",
18
+ "train_metadata_dataset_name": "parler-tts/libritts_r_tags_tagged_10k_generated+parler-tts/libritts_r_tags_tagged_10k_generated+parler-tts/libritts_r_tags_tagged_10k_generated+parler-tts/mls-eng-10k-tags_tagged_10k_generated",
19
+ "train_dataset_config_name": "clean+clean+other+default",
20
+ "train_split_name": "train.clean.360+train.clean.100+train.other.500+train",
21
+
22
+ "eval_dataset_name": "blabble-io/libritts_r+parler-tts/mls_eng_10k",
23
+ "eval_metadata_dataset_name": "parler-tts/libritts_r_tags_tagged_10k_generated+parler-tts/mls-eng-10k-tags_tagged_10k_generated",
24
+ "eval_dataset_config_name": "other+default",
25
+ "eval_split_name": "test.other+test",
26
+
27
+ "target_audio_column_name": "audio",
28
+ "description_column_name": "text_description",
29
+ "prompt_column_name": "text",
30
+
31
+ "max_eval_samples": 96,
32
+
33
+ "max_duration_in_seconds": 30,
34
+ "min_duration_in_seconds": 2.0,
35
+ "max_text_length": 400,
36
+
37
+ "group_by_length": true,
38
+
39
+ "add_audio_samples_to_wandb": true,
40
+ "id_column_name": "id",
41
+
42
+ "preprocessing_num_workers": 8,
43
+
44
+ "do_train": true,
45
+ "num_train_epochs": 40,
46
+ "gradient_accumulation_steps": 8,
47
+ "gradient_checkpointing": false,
48
+ "per_device_train_batch_size": 3,
49
+ "learning_rate": 0.00095,
50
+ "adam_beta1": 0.9,
51
+ "adam_beta2": 0.99,
52
+ "weight_decay": 0.01,
53
+
54
+ "lr_scheduler_type": "constant_with_warmup",
55
+ "warmup_steps": 20000,
56
+
57
+
58
+ "logging_steps": 1000,
59
+ "freeze_text_encoder": true,
60
+
61
+
62
+ "do_eval": true,
63
+ "predict_with_generate": true,
64
+ "include_inputs_for_metrics": true,
65
+ "evaluation_strategy": "steps",
66
+ "eval_steps": 10000,
67
+ "save_steps": 10000,
68
+
69
+ "per_device_eval_batch_size": 12,
70
+
71
+ "audio_encoder_per_device_batch_size":20,
72
+ "dtype": "bfloat16",
73
+ "seed": 456,
74
+ "ddp_timeout": 7200,
75
+
76
+ "dataloader_num_workers":8
77
+ }
78
+
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,939 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "<pad>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "</s>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "<unk>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "32000": {
28
+ "content": "<extra_id_99>",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "32001": {
36
+ "content": "<extra_id_98>",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ },
43
+ "32002": {
44
+ "content": "<extra_id_97>",
45
+ "lstrip": false,
46
+ "normalized": false,
47
+ "rstrip": false,
48
+ "single_word": false,
49
+ "special": true
50
+ },
51
+ "32003": {
52
+ "content": "<extra_id_96>",
53
+ "lstrip": false,
54
+ "normalized": false,
55
+ "rstrip": false,
56
+ "single_word": false,
57
+ "special": true
58
+ },
59
+ "32004": {
60
+ "content": "<extra_id_95>",
61
+ "lstrip": false,
62
+ "normalized": false,
63
+ "rstrip": false,
64
+ "single_word": false,
65
+ "special": true
66
+ },
67
+ "32005": {
68
+ "content": "<extra_id_94>",
69
+ "lstrip": false,
70
+ "normalized": false,
71
+ "rstrip": false,
72
+ "single_word": false,
73
+ "special": true
74
+ },
75
+ "32006": {
76
+ "content": "<extra_id_93>",
77
+ "lstrip": false,
78
+ "normalized": false,
79
+ "rstrip": false,
80
+ "single_word": false,
81
+ "special": true
82
+ },
83
+ "32007": {
84
+ "content": "<extra_id_92>",
85
+ "lstrip": false,
86
+ "normalized": false,
87
+ "rstrip": false,
88
+ "single_word": false,
89
+ "special": true
90
+ },
91
+ "32008": {
92
+ "content": "<extra_id_91>",
93
+ "lstrip": false,
94
+ "normalized": false,
95
+ "rstrip": false,
96
+ "single_word": false,
97
+ "special": true
98
+ },
99
+ "32009": {
100
+ "content": "<extra_id_90>",
101
+ "lstrip": false,
102
+ "normalized": false,
103
+ "rstrip": false,
104
+ "single_word": false,
105
+ "special": true
106
+ },
107
+ "32010": {
108
+ "content": "<extra_id_89>",
109
+ "lstrip": false,
110
+ "normalized": false,
111
+ "rstrip": false,
112
+ "single_word": false,
113
+ "special": true
114
+ },
115
+ "32011": {
116
+ "content": "<extra_id_88>",
117
+ "lstrip": false,
118
+ "normalized": false,
119
+ "rstrip": false,
120
+ "single_word": false,
121
+ "special": true
122
+ },
123
+ "32012": {
124
+ "content": "<extra_id_87>",
125
+ "lstrip": false,
126
+ "normalized": false,
127
+ "rstrip": false,
128
+ "single_word": false,
129
+ "special": true
130
+ },
131
+ "32013": {
132
+ "content": "<extra_id_86>",
133
+ "lstrip": false,
134
+ "normalized": false,
135
+ "rstrip": false,
136
+ "single_word": false,
137
+ "special": true
138
+ },
139
+ "32014": {
140
+ "content": "<extra_id_85>",
141
+ "lstrip": false,
142
+ "normalized": false,
143
+ "rstrip": false,
144
+ "single_word": false,
145
+ "special": true
146
+ },
147
+ "32015": {
148
+ "content": "<extra_id_84>",
149
+ "lstrip": false,
150
+ "normalized": false,
151
+ "rstrip": false,
152
+ "single_word": false,
153
+ "special": true
154
+ },
155
+ "32016": {
156
+ "content": "<extra_id_83>",
157
+ "lstrip": false,
158
+ "normalized": false,
159
+ "rstrip": false,
160
+ "single_word": false,
161
+ "special": true
162
+ },
163
+ "32017": {
164
+ "content": "<extra_id_82>",
165
+ "lstrip": false,
166
+ "normalized": false,
167
+ "rstrip": false,
168
+ "single_word": false,
169
+ "special": true
170
+ },
171
+ "32018": {
172
+ "content": "<extra_id_81>",
173
+ "lstrip": false,
174
+ "normalized": false,
175
+ "rstrip": false,
176
+ "single_word": false,
177
+ "special": true
178
+ },
179
+ "32019": {
180
+ "content": "<extra_id_80>",
181
+ "lstrip": false,
182
+ "normalized": false,
183
+ "rstrip": false,
184
+ "single_word": false,
185
+ "special": true
186
+ },
187
+ "32020": {
188
+ "content": "<extra_id_79>",
189
+ "lstrip": false,
190
+ "normalized": false,
191
+ "rstrip": false,
192
+ "single_word": false,
193
+ "special": true
194
+ },
195
+ "32021": {
196
+ "content": "<extra_id_78>",
197
+ "lstrip": false,
198
+ "normalized": false,
199
+ "rstrip": false,
200
+ "single_word": false,
201
+ "special": true
202
+ },
203
+ "32022": {
204
+ "content": "<extra_id_77>",
205
+ "lstrip": false,
206
+ "normalized": false,
207
+ "rstrip": false,
208
+ "single_word": false,
209
+ "special": true
210
+ },
211
+ "32023": {
212
+ "content": "<extra_id_76>",
213
+ "lstrip": false,
214
+ "normalized": false,
215
+ "rstrip": false,
216
+ "single_word": false,
217
+ "special": true
218
+ },
219
+ "32024": {
220
+ "content": "<extra_id_75>",
221
+ "lstrip": false,
222
+ "normalized": false,
223
+ "rstrip": false,
224
+ "single_word": false,
225
+ "special": true
226
+ },
227
+ "32025": {
228
+ "content": "<extra_id_74>",
229
+ "lstrip": false,
230
+ "normalized": false,
231
+ "rstrip": false,
232
+ "single_word": false,
233
+ "special": true
234
+ },
235
+ "32026": {
236
+ "content": "<extra_id_73>",
237
+ "lstrip": false,
238
+ "normalized": false,
239
+ "rstrip": false,
240
+ "single_word": false,
241
+ "special": true
242
+ },
243
+ "32027": {
244
+ "content": "<extra_id_72>",
245
+ "lstrip": false,
246
+ "normalized": false,
247
+ "rstrip": false,
248
+ "single_word": false,
249
+ "special": true
250
+ },
251
+ "32028": {
252
+ "content": "<extra_id_71>",
253
+ "lstrip": false,
254
+ "normalized": false,
255
+ "rstrip": false,
256
+ "single_word": false,
257
+ "special": true
258
+ },
259
+ "32029": {
260
+ "content": "<extra_id_70>",
261
+ "lstrip": false,
262
+ "normalized": false,
263
+ "rstrip": false,
264
+ "single_word": false,
265
+ "special": true
266
+ },
267
+ "32030": {
268
+ "content": "<extra_id_69>",
269
+ "lstrip": false,
270
+ "normalized": false,
271
+ "rstrip": false,
272
+ "single_word": false,
273
+ "special": true
274
+ },
275
+ "32031": {
276
+ "content": "<extra_id_68>",
277
+ "lstrip": false,
278
+ "normalized": false,
279
+ "rstrip": false,
280
+ "single_word": false,
281
+ "special": true
282
+ },
283
+ "32032": {
284
+ "content": "<extra_id_67>",
285
+ "lstrip": false,
286
+ "normalized": false,
287
+ "rstrip": false,
288
+ "single_word": false,
289
+ "special": true
290
+ },
291
+ "32033": {
292
+ "content": "<extra_id_66>",
293
+ "lstrip": false,
294
+ "normalized": false,
295
+ "rstrip": false,
296
+ "single_word": false,
297
+ "special": true
298
+ },
299
+ "32034": {
300
+ "content": "<extra_id_65>",
301
+ "lstrip": false,
302
+ "normalized": false,
303
+ "rstrip": false,
304
+ "single_word": false,
305
+ "special": true
306
+ },
307
+ "32035": {
308
+ "content": "<extra_id_64>",
309
+ "lstrip": false,
310
+ "normalized": false,
311
+ "rstrip": false,
312
+ "single_word": false,
313
+ "special": true
314
+ },
315
+ "32036": {
316
+ "content": "<extra_id_63>",
317
+ "lstrip": false,
318
+ "normalized": false,
319
+ "rstrip": false,
320
+ "single_word": false,
321
+ "special": true
322
+ },
323
+ "32037": {
324
+ "content": "<extra_id_62>",
325
+ "lstrip": false,
326
+ "normalized": false,
327
+ "rstrip": false,
328
+ "single_word": false,
329
+ "special": true
330
+ },
331
+ "32038": {
332
+ "content": "<extra_id_61>",
333
+ "lstrip": false,
334
+ "normalized": false,
335
+ "rstrip": false,
336
+ "single_word": false,
337
+ "special": true
338
+ },
339
+ "32039": {
340
+ "content": "<extra_id_60>",
341
+ "lstrip": false,
342
+ "normalized": false,
343
+ "rstrip": false,
344
+ "single_word": false,
345
+ "special": true
346
+ },
347
+ "32040": {
348
+ "content": "<extra_id_59>",
349
+ "lstrip": false,
350
+ "normalized": false,
351
+ "rstrip": false,
352
+ "single_word": false,
353
+ "special": true
354
+ },
355
+ "32041": {
356
+ "content": "<extra_id_58>",
357
+ "lstrip": false,
358
+ "normalized": false,
359
+ "rstrip": false,
360
+ "single_word": false,
361
+ "special": true
362
+ },
363
+ "32042": {
364
+ "content": "<extra_id_57>",
365
+ "lstrip": false,
366
+ "normalized": false,
367
+ "rstrip": false,
368
+ "single_word": false,
369
+ "special": true
370
+ },
371
+ "32043": {
372
+ "content": "<extra_id_56>",
373
+ "lstrip": false,
374
+ "normalized": false,
375
+ "rstrip": false,
376
+ "single_word": false,
377
+ "special": true
378
+ },
379
+ "32044": {
380
+ "content": "<extra_id_55>",
381
+ "lstrip": false,
382
+ "normalized": false,
383
+ "rstrip": false,
384
+ "single_word": false,
385
+ "special": true
386
+ },
387
+ "32045": {
388
+ "content": "<extra_id_54>",
389
+ "lstrip": false,
390
+ "normalized": false,
391
+ "rstrip": false,
392
+ "single_word": false,
393
+ "special": true
394
+ },
395
+ "32046": {
396
+ "content": "<extra_id_53>",
397
+ "lstrip": false,
398
+ "normalized": false,
399
+ "rstrip": false,
400
+ "single_word": false,
401
+ "special": true
402
+ },
403
+ "32047": {
404
+ "content": "<extra_id_52>",
405
+ "lstrip": false,
406
+ "normalized": false,
407
+ "rstrip": false,
408
+ "single_word": false,
409
+ "special": true
410
+ },
411
+ "32048": {
412
+ "content": "<extra_id_51>",
413
+ "lstrip": false,
414
+ "normalized": false,
415
+ "rstrip": false,
416
+ "single_word": false,
417
+ "special": true
418
+ },
419
+ "32049": {
420
+ "content": "<extra_id_50>",
421
+ "lstrip": false,
422
+ "normalized": false,
423
+ "rstrip": false,
424
+ "single_word": false,
425
+ "special": true
426
+ },
427
+ "32050": {
428
+ "content": "<extra_id_49>",
429
+ "lstrip": false,
430
+ "normalized": false,
431
+ "rstrip": false,
432
+ "single_word": false,
433
+ "special": true
434
+ },
435
+ "32051": {
436
+ "content": "<extra_id_48>",
437
+ "lstrip": false,
438
+ "normalized": false,
439
+ "rstrip": false,
440
+ "single_word": false,
441
+ "special": true
442
+ },
443
+ "32052": {
444
+ "content": "<extra_id_47>",
445
+ "lstrip": false,
446
+ "normalized": false,
447
+ "rstrip": false,
448
+ "single_word": false,
449
+ "special": true
450
+ },
451
+ "32053": {
452
+ "content": "<extra_id_46>",
453
+ "lstrip": false,
454
+ "normalized": false,
455
+ "rstrip": false,
456
+ "single_word": false,
457
+ "special": true
458
+ },
459
+ "32054": {
460
+ "content": "<extra_id_45>",
461
+ "lstrip": false,
462
+ "normalized": false,
463
+ "rstrip": false,
464
+ "single_word": false,
465
+ "special": true
466
+ },
467
+ "32055": {
468
+ "content": "<extra_id_44>",
469
+ "lstrip": false,
470
+ "normalized": false,
471
+ "rstrip": false,
472
+ "single_word": false,
473
+ "special": true
474
+ },
475
+ "32056": {
476
+ "content": "<extra_id_43>",
477
+ "lstrip": false,
478
+ "normalized": false,
479
+ "rstrip": false,
480
+ "single_word": false,
481
+ "special": true
482
+ },
483
+ "32057": {
484
+ "content": "<extra_id_42>",
485
+ "lstrip": false,
486
+ "normalized": false,
487
+ "rstrip": false,
488
+ "single_word": false,
489
+ "special": true
490
+ },
491
+ "32058": {
492
+ "content": "<extra_id_41>",
493
+ "lstrip": false,
494
+ "normalized": false,
495
+ "rstrip": false,
496
+ "single_word": false,
497
+ "special": true
498
+ },
499
+ "32059": {
500
+ "content": "<extra_id_40>",
501
+ "lstrip": false,
502
+ "normalized": false,
503
+ "rstrip": false,
504
+ "single_word": false,
505
+ "special": true
506
+ },
507
+ "32060": {
508
+ "content": "<extra_id_39>",
509
+ "lstrip": false,
510
+ "normalized": false,
511
+ "rstrip": false,
512
+ "single_word": false,
513
+ "special": true
514
+ },
515
+ "32061": {
516
+ "content": "<extra_id_38>",
517
+ "lstrip": false,
518
+ "normalized": false,
519
+ "rstrip": false,
520
+ "single_word": false,
521
+ "special": true
522
+ },
523
+ "32062": {
524
+ "content": "<extra_id_37>",
525
+ "lstrip": false,
526
+ "normalized": false,
527
+ "rstrip": false,
528
+ "single_word": false,
529
+ "special": true
530
+ },
531
+ "32063": {
532
+ "content": "<extra_id_36>",
533
+ "lstrip": false,
534
+ "normalized": false,
535
+ "rstrip": false,
536
+ "single_word": false,
537
+ "special": true
538
+ },
539
+ "32064": {
540
+ "content": "<extra_id_35>",
541
+ "lstrip": false,
542
+ "normalized": false,
543
+ "rstrip": false,
544
+ "single_word": false,
545
+ "special": true
546
+ },
547
+ "32065": {
548
+ "content": "<extra_id_34>",
549
+ "lstrip": false,
550
+ "normalized": false,
551
+ "rstrip": false,
552
+ "single_word": false,
553
+ "special": true
554
+ },
555
+ "32066": {
556
+ "content": "<extra_id_33>",
557
+ "lstrip": false,
558
+ "normalized": false,
559
+ "rstrip": false,
560
+ "single_word": false,
561
+ "special": true
562
+ },
563
+ "32067": {
564
+ "content": "<extra_id_32>",
565
+ "lstrip": false,
566
+ "normalized": false,
567
+ "rstrip": false,
568
+ "single_word": false,
569
+ "special": true
570
+ },
571
+ "32068": {
572
+ "content": "<extra_id_31>",
573
+ "lstrip": false,
574
+ "normalized": false,
575
+ "rstrip": false,
576
+ "single_word": false,
577
+ "special": true
578
+ },
579
+ "32069": {
580
+ "content": "<extra_id_30>",
581
+ "lstrip": false,
582
+ "normalized": false,
583
+ "rstrip": false,
584
+ "single_word": false,
585
+ "special": true
586
+ },
587
+ "32070": {
588
+ "content": "<extra_id_29>",
589
+ "lstrip": false,
590
+ "normalized": false,
591
+ "rstrip": false,
592
+ "single_word": false,
593
+ "special": true
594
+ },
595
+ "32071": {
596
+ "content": "<extra_id_28>",
597
+ "lstrip": false,
598
+ "normalized": false,
599
+ "rstrip": false,
600
+ "single_word": false,
601
+ "special": true
602
+ },
603
+ "32072": {
604
+ "content": "<extra_id_27>",
605
+ "lstrip": false,
606
+ "normalized": false,
607
+ "rstrip": false,
608
+ "single_word": false,
609
+ "special": true
610
+ },
611
+ "32073": {
612
+ "content": "<extra_id_26>",
613
+ "lstrip": false,
614
+ "normalized": false,
615
+ "rstrip": false,
616
+ "single_word": false,
617
+ "special": true
618
+ },
619
+ "32074": {
620
+ "content": "<extra_id_25>",
621
+ "lstrip": false,
622
+ "normalized": false,
623
+ "rstrip": false,
624
+ "single_word": false,
625
+ "special": true
626
+ },
627
+ "32075": {
628
+ "content": "<extra_id_24>",
629
+ "lstrip": false,
630
+ "normalized": false,
631
+ "rstrip": false,
632
+ "single_word": false,
633
+ "special": true
634
+ },
635
+ "32076": {
636
+ "content": "<extra_id_23>",
637
+ "lstrip": false,
638
+ "normalized": false,
639
+ "rstrip": false,
640
+ "single_word": false,
641
+ "special": true
642
+ },
643
+ "32077": {
644
+ "content": "<extra_id_22>",
645
+ "lstrip": false,
646
+ "normalized": false,
647
+ "rstrip": false,
648
+ "single_word": false,
649
+ "special": true
650
+ },
651
+ "32078": {
652
+ "content": "<extra_id_21>",
653
+ "lstrip": false,
654
+ "normalized": false,
655
+ "rstrip": false,
656
+ "single_word": false,
657
+ "special": true
658
+ },
659
+ "32079": {
660
+ "content": "<extra_id_20>",
661
+ "lstrip": false,
662
+ "normalized": false,
663
+ "rstrip": false,
664
+ "single_word": false,
665
+ "special": true
666
+ },
667
+ "32080": {
668
+ "content": "<extra_id_19>",
669
+ "lstrip": false,
670
+ "normalized": false,
671
+ "rstrip": false,
672
+ "single_word": false,
673
+ "special": true
674
+ },
675
+ "32081": {
676
+ "content": "<extra_id_18>",
677
+ "lstrip": false,
678
+ "normalized": false,
679
+ "rstrip": false,
680
+ "single_word": false,
681
+ "special": true
682
+ },
683
+ "32082": {
684
+ "content": "<extra_id_17>",
685
+ "lstrip": false,
686
+ "normalized": false,
687
+ "rstrip": false,
688
+ "single_word": false,
689
+ "special": true
690
+ },
691
+ "32083": {
692
+ "content": "<extra_id_16>",
693
+ "lstrip": false,
694
+ "normalized": false,
695
+ "rstrip": false,
696
+ "single_word": false,
697
+ "special": true
698
+ },
699
+ "32084": {
700
+ "content": "<extra_id_15>",
701
+ "lstrip": false,
702
+ "normalized": false,
703
+ "rstrip": false,
704
+ "single_word": false,
705
+ "special": true
706
+ },
707
+ "32085": {
708
+ "content": "<extra_id_14>",
709
+ "lstrip": false,
710
+ "normalized": false,
711
+ "rstrip": false,
712
+ "single_word": false,
713
+ "special": true
714
+ },
715
+ "32086": {
716
+ "content": "<extra_id_13>",
717
+ "lstrip": false,
718
+ "normalized": false,
719
+ "rstrip": false,
720
+ "single_word": false,
721
+ "special": true
722
+ },
723
+ "32087": {
724
+ "content": "<extra_id_12>",
725
+ "lstrip": false,
726
+ "normalized": false,
727
+ "rstrip": false,
728
+ "single_word": false,
729
+ "special": true
730
+ },
731
+ "32088": {
732
+ "content": "<extra_id_11>",
733
+ "lstrip": false,
734
+ "normalized": false,
735
+ "rstrip": false,
736
+ "single_word": false,
737
+ "special": true
738
+ },
739
+ "32089": {
740
+ "content": "<extra_id_10>",
741
+ "lstrip": false,
742
+ "normalized": false,
743
+ "rstrip": false,
744
+ "single_word": false,
745
+ "special": true
746
+ },
747
+ "32090": {
748
+ "content": "<extra_id_9>",
749
+ "lstrip": false,
750
+ "normalized": false,
751
+ "rstrip": false,
752
+ "single_word": false,
753
+ "special": true
754
+ },
755
+ "32091": {
756
+ "content": "<extra_id_8>",
757
+ "lstrip": false,
758
+ "normalized": false,
759
+ "rstrip": false,
760
+ "single_word": false,
761
+ "special": true
762
+ },
763
+ "32092": {
764
+ "content": "<extra_id_7>",
765
+ "lstrip": false,
766
+ "normalized": false,
767
+ "rstrip": false,
768
+ "single_word": false,
769
+ "special": true
770
+ },
771
+ "32093": {
772
+ "content": "<extra_id_6>",
773
+ "lstrip": false,
774
+ "normalized": false,
775
+ "rstrip": false,
776
+ "single_word": false,
777
+ "special": true
778
+ },
779
+ "32094": {
780
+ "content": "<extra_id_5>",
781
+ "lstrip": false,
782
+ "normalized": false,
783
+ "rstrip": false,
784
+ "single_word": false,
785
+ "special": true
786
+ },
787
+ "32095": {
788
+ "content": "<extra_id_4>",
789
+ "lstrip": false,
790
+ "normalized": false,
791
+ "rstrip": false,
792
+ "single_word": false,
793
+ "special": true
794
+ },
795
+ "32096": {
796
+ "content": "<extra_id_3>",
797
+ "lstrip": false,
798
+ "normalized": false,
799
+ "rstrip": false,
800
+ "single_word": false,
801
+ "special": true
802
+ },
803
+ "32097": {
804
+ "content": "<extra_id_2>",
805
+ "lstrip": false,
806
+ "normalized": false,
807
+ "rstrip": false,
808
+ "single_word": false,
809
+ "special": true
810
+ },
811
+ "32098": {
812
+ "content": "<extra_id_1>",
813
+ "lstrip": false,
814
+ "normalized": false,
815
+ "rstrip": false,
816
+ "single_word": false,
817
+ "special": true
818
+ },
819
+ "32099": {
820
+ "content": "<extra_id_0>",
821
+ "lstrip": false,
822
+ "normalized": false,
823
+ "rstrip": false,
824
+ "single_word": false,
825
+ "special": true
826
+ }
827
+ },
828
+ "additional_special_tokens": [
829
+ "<extra_id_0>",
830
+ "<extra_id_1>",
831
+ "<extra_id_2>",
832
+ "<extra_id_3>",
833
+ "<extra_id_4>",
834
+ "<extra_id_5>",
835
+ "<extra_id_6>",
836
+ "<extra_id_7>",
837
+ "<extra_id_8>",
838
+ "<extra_id_9>",
839
+ "<extra_id_10>",
840
+ "<extra_id_11>",
841
+ "<extra_id_12>",
842
+ "<extra_id_13>",
843
+ "<extra_id_14>",
844
+ "<extra_id_15>",
845
+ "<extra_id_16>",
846
+ "<extra_id_17>",
847
+ "<extra_id_18>",
848
+ "<extra_id_19>",
849
+ "<extra_id_20>",
850
+ "<extra_id_21>",
851
+ "<extra_id_22>",
852
+ "<extra_id_23>",
853
+ "<extra_id_24>",
854
+ "<extra_id_25>",
855
+ "<extra_id_26>",
856
+ "<extra_id_27>",
857
+ "<extra_id_28>",
858
+ "<extra_id_29>",
859
+ "<extra_id_30>",
860
+ "<extra_id_31>",
861
+ "<extra_id_32>",
862
+ "<extra_id_33>",
863
+ "<extra_id_34>",
864
+ "<extra_id_35>",
865
+ "<extra_id_36>",
866
+ "<extra_id_37>",
867
+ "<extra_id_38>",
868
+ "<extra_id_39>",
869
+ "<extra_id_40>",
870
+ "<extra_id_41>",
871
+ "<extra_id_42>",
872
+ "<extra_id_43>",
873
+ "<extra_id_44>",
874
+ "<extra_id_45>",
875
+ "<extra_id_46>",
876
+ "<extra_id_47>",
877
+ "<extra_id_48>",
878
+ "<extra_id_49>",
879
+ "<extra_id_50>",
880
+ "<extra_id_51>",
881
+ "<extra_id_52>",
882
+ "<extra_id_53>",
883
+ "<extra_id_54>",
884
+ "<extra_id_55>",
885
+ "<extra_id_56>",
886
+ "<extra_id_57>",
887
+ "<extra_id_58>",
888
+ "<extra_id_59>",
889
+ "<extra_id_60>",
890
+ "<extra_id_61>",
891
+ "<extra_id_62>",
892
+ "<extra_id_63>",
893
+ "<extra_id_64>",
894
+ "<extra_id_65>",
895
+ "<extra_id_66>",
896
+ "<extra_id_67>",
897
+ "<extra_id_68>",
898
+ "<extra_id_69>",
899
+ "<extra_id_70>",
900
+ "<extra_id_71>",
901
+ "<extra_id_72>",
902
+ "<extra_id_73>",
903
+ "<extra_id_74>",
904
+ "<extra_id_75>",
905
+ "<extra_id_76>",
906
+ "<extra_id_77>",
907
+ "<extra_id_78>",
908
+ "<extra_id_79>",
909
+ "<extra_id_80>",
910
+ "<extra_id_81>",
911
+ "<extra_id_82>",
912
+ "<extra_id_83>",
913
+ "<extra_id_84>",
914
+ "<extra_id_85>",
915
+ "<extra_id_86>",
916
+ "<extra_id_87>",
917
+ "<extra_id_88>",
918
+ "<extra_id_89>",
919
+ "<extra_id_90>",
920
+ "<extra_id_91>",
921
+ "<extra_id_92>",
922
+ "<extra_id_93>",
923
+ "<extra_id_94>",
924
+ "<extra_id_95>",
925
+ "<extra_id_96>",
926
+ "<extra_id_97>",
927
+ "<extra_id_98>",
928
+ "<extra_id_99>"
929
+ ],
930
+ "clean_up_tokenization_spaces": true,
931
+ "eos_token": "</s>",
932
+ "extra_ids": 100,
933
+ "model_max_length": 512,
934
+ "pad_token": "<pad>",
935
+ "padding_side": "left",
936
+ "sp_model_kwargs": {},
937
+ "tokenizer_class": "T5Tokenizer",
938
+ "unk_token": "<unk>"
939
+ }
training/README.md ADDED
@@ -0,0 +1,211 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Training Parler-TTS
2
+
3
+ <a target="_blank" href="https://colab.research.google.com/github/ylacombe/scripts_and_notebooks/blob/main/Finetuning_Parler_TTS_on_a_single_speaker_dataset.ipynb">
4
+ <img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/>
5
+ </a>
6
+
7
+ **TL;DR:** After having followed the [installation steps](#requirements), you can reproduce the [Parler-TTS Mini v0.1](https://huggingface.co/parler-tts/parler_tts_mini_v0.1) training recipe with the following command line:
8
+
9
+ ```sh
10
+ accelerate launch ./training/run_parler_tts_training.py ./helpers/training_configs/starting_point_0.01.json
11
+ ```
12
+
13
+ -------------
14
+
15
+ This sub-folder contains all the information to train or fine-tune your own Parler-TTS model. It consists of:
16
+ - [1. An introduction to the Parler-TTS architecture](#a-architecture)
17
+ - [2. First steps to get started](#b-getting-started)
18
+ - [3. Training guide](#c-training)
19
+
20
+ > [!IMPORTANT]
21
+ > You can also follow [this fine-tuning guide](https://colab.research.google.com/github/ylacombe/scripts_and_notebooks/blob/main/Finetuning_Parler_TTS_on_a_single_speaker_dataset.ipynb) on a mono-speaker dataset example.
22
+
23
+ ## 1. Architecture
24
+
25
+ At the moment, Parler-TTS architecture is a carbon copy of the [MusicGen architecture](https://huggingface.co/docs/transformers/v4.39.3/en/model_doc/musicgen#model-structure) and can be decomposed into three distinct stages:
26
+ 1. Text encoder: maps the text descriptions to a sequence of hidden-state representations. Parler-TTS uses a frozen text encoder initialised entirely from Flan-T5
27
+ 2. Parler-TTS decoder: a language model (LM) that auto-regressively generates audio tokens (or codes) conditional on the encoder hidden-state representations
28
+ 3. Audio codec: used to recover the audio waveform from the audio tokens predicted by the decoder. We use the [DAC model](https://github.com/descriptinc/descript-audio-codec) from Descript, although other codec models, such as [EnCodec](https://huggingface.co/facebook/encodec_48khz), can also be used
29
+
30
+ Parler-TTS however introduces some small tweaks:
31
+ - The text **description** is passed through the text encoder and used in the cross-attention layers of the decoder.
32
+ - The text **prompt** is simply passed through an embedding layer and concatenated to the decoder input hidden states.
33
+ - The audio encoder used is [**DAC**](https://descript.notion.site/Descript-Audio-Codec-11389fce0ce2419891d6591a68f814d5) instead of [Encodec](https://github.com/facebookresearch/encodec), as it exhibits better quality.
34
+
35
+
36
+ ## 2. Getting started
37
+
38
+ To get started, you need to follow a few steps:
39
+ 1. Install the requirements.
40
+ 2. Find or initialize the model you'll train on.
41
+ 3. Find and/or annotate the dataset you'll train your model on.
42
+
43
+ ### Requirements
44
+
45
+ The Parler-TTS code is written in [PyTorch](https://pytorch.org) and [Accelerate](https://huggingface.co/docs/accelerate/index). It uses some additional requirements, like [wandb](https://wandb.ai/), especially for logging and evaluation.
46
+
47
+ To install the package for training, you need to clone the repository from source...
48
+
49
+ ```bash
50
+ git clone https://github.com/huggingface/parler-tts.git
51
+ cd parler-tts
52
+ ```
53
+
54
+ ... And then install the requirements:
55
+
56
+ ```bash
57
+ pip install -e .[train]
58
+ ```
59
+
60
+ Optionally, you can create a wandb account and login to it by following [this guide](https://docs.wandb.ai/quickstart). [`wandb`](https://docs.wandb.ai/) allows for better tracking of the experiments metrics and losses.
61
+
62
+ You also have the option to configure Accelerate by running the following command. Note that you should set the number of GPUs you wish to use for training, and also the data type (dtype) to your preferred dtype for training/inference (e.g. `bfloat16` on A100 GPUs, `float16` on V100 GPUs, etc.):
63
+
64
+ ```bash
65
+ accelerate config
66
+ ```
67
+
68
+ Lastly, you can link you Hugging Face account so that you can push model repositories on the Hub. This will allow you to save your trained models on the Hub so that you can share them with the community. Run the command:
69
+
70
+ ```bash
71
+ git config --global credential.helper store
72
+ huggingface-cli login
73
+ ```
74
+ And then enter an authentication token from https://huggingface.co/settings/tokens. Create a new token if you do not have one already. You should make sure that this token has "write" privileges.
75
+
76
+ ### Initialize a model from scratch or use a pre-trained one.
77
+
78
+ Depending on your compute resources and your dataset, you need to choose between fine-tuning a pre-trained model and training a new model from scratch.
79
+
80
+ In that sense, we released a 600M checkpoint trained on 10.5K hours of annotated data under the repository id: [`parler-tts/parler_tts_mini_v0.1`](https://huggingface.co/parler-tts/parler_tts_mini_v0.1), that you can fine-tune for your own use-case.
81
+
82
+ You can also train you own model from scratch. You can find [here](/helpers/model_init_scripts/) examples on how to initialize a model from scratch. For example, you can initialize a dummy model with:
83
+
84
+ ```sh
85
+ python helpers/model_init_scripts/init_dummy_model.py ./parler-tts-untrained-dummy --text_model "google-t5/t5-small" --audio_model "parler-tts/dac_44khZ_8kbps"
86
+ ```
87
+
88
+ In the rest of this guide, and to reproduce the Parler-TTS Mini v0.1 training recipe, we'll use a 600M parameters model that we'll initialize with:
89
+
90
+ ```sh
91
+ python helpers/model_init_scripts/init_model_600M.py ./parler-tts-untrained-600M --text_model "google/flan-t5-base" --audio_model "parler-tts/dac_44khZ_8kbps"
92
+ ```
93
+
94
+
95
+ ### Create or find datasets
96
+
97
+ To train your own Parler-TTS, you need datasets with 3 main features:
98
+ - speech data
99
+ - text transcription of the speech data
100
+ - conditionning text description - that you can create using [Data-Speech](https://github.com/huggingface/dataspeech), a library that allows you to annotate the speaker and utterance characteristics with natural language description.
101
+
102
+ Note that we made the choice to use description of the main speech characteristics (speaker pitch, speaking rate, level of noise, etc.) but that you are free to use any handmade or generated text description that makes sense.
103
+
104
+ To train Parler-TTS Mini v0.1, we used:
105
+ * The full [LibriTTS-R dataset](https://huggingface.co/datasets/blabble-io/libritts_r), a 1K hours high-quality speech dataset.
106
+ * A [10K hours subset](https://huggingface.co/datasets/parler-tts/mls_eng_10k) of [Multilingual LibriSpeech](https://huggingface.co/datasets/facebook/multilingual_librispeech).
107
+
108
+ Both datasets have been annotated using the [Data-Speech](https://github.com/huggingface/dataspeech) recipe, respectively [here](https://huggingface.co/datasets/parler-tts/libritts_r_tags_tagged_10k_generated) and [here](https://huggingface.co/datasets/parler-tts/mls-eng-10k-tags_tagged_10k_generated).
109
+
110
+
111
+ ## 3. Training
112
+
113
+ The script [`run_parler_tts_training.py`](/training/run_parler_tts_training.py) is an end-to-end script that:
114
+ 1. load dataset(s) and merge them to the annotation dataset(s) if necessary
115
+ 2. pre-compute audio tokens
116
+ 3. train Parler-TTS
117
+
118
+ To train Parler-TTS Mini v0.1, we roughly used:
119
+
120
+ ```sh
121
+ accelerate launch ./training/run_parler_tts_training.py \
122
+ --model_name_or_path "./parler-tts-untrained-600M/parler-tts-untrained-600M/" \
123
+ --feature_extractor_name "parler-tts/dac_44khZ_8kbps" \
124
+ --description_tokenizer_name "google/flan-t5-base" \
125
+ --prompt_tokenizer_name "google/flan-t5-base" \
126
+ --report_to "wandb" \
127
+ --overwrite_output_dir true \
128
+ --train_dataset_name "blabble-io/libritts_r+blabble-io/libritts_r+blabble-io/libritts_r+parler-tts/mls_eng_10k" \
129
+ --train_metadata_dataset_name "parler-tts/libritts_r_tags_tagged_10k_generated+parler-tts/libritts_r_tags_tagged_10k_generated+parler-tts/libritts_r_tags_tagged_10k_generated+parler-tts/mls-eng-10k-tags_tagged_10k_generated" \
130
+ --train_dataset_config_name "clean+clean+other+default" \
131
+ --train_split_name "train.clean.360+train.clean.100+train.other.500+train" \
132
+ --eval_dataset_name "blabble-io/libritts_r+parler-tts/mls_eng_10k" \
133
+ --eval_metadata_dataset_name "parler-tts/libritts_r_tags_tagged_10k_generated+parler-tts/mls-eng-10k-tags_tagged_10k_generated" \
134
+ --eval_dataset_config_name "other+default" \
135
+ --eval_split_name "test.other+test" \
136
+ --target_audio_column_name "audio" \
137
+ --description_column_name "text_description" \
138
+ --prompt_column_name "text" \
139
+ --max_duration_in_seconds 30 \
140
+ --min_duration_in_seconds 2.0 \
141
+ --max_text_length 400 \
142
+ --add_audio_samples_to_wandb true \
143
+ --id_column_name "id" \
144
+ --preprocessing_num_workers 8 \
145
+ --do_train true \
146
+ --num_train_epochs 40 \
147
+ --gradient_accumulation_steps 8 \
148
+ --gradient_checkpointing false \
149
+ --per_device_train_batch_size 3 \
150
+ --learning_rate 0.00095 \
151
+ --adam_beta1 0.9 \
152
+ --adam_beta2 0.99 \
153
+ --weight_decay 0.01 \
154
+ --lr_scheduler_type "constant_with_warmup" \
155
+ --warmup_steps 20000 \
156
+ --logging_steps 1000 \
157
+ --freeze_text_encoder true \
158
+ --do_eval true \
159
+ --predict_with_generate true \
160
+ --include_inputs_for_metrics true \
161
+ --evaluation_strategy steps \
162
+ --eval_steps 10000 \
163
+ --save_steps 10000 \
164
+ --per_device_eval_batch_size 12 \
165
+ --audio_encoder_per_device_batch_size 20 \
166
+ --dtype "bfloat16" \
167
+ --seed 456 \
168
+ --output_dir "./output_dir_training/" \
169
+ --temporary_save_to_disk "./audio_code_tmp/" \
170
+ --save_to_disk "./tmp_dataset_audio/" \
171
+ --max_eval_samples 96 \
172
+ --dataloader_num_workers 8 \
173
+ --group_by_length true
174
+ ```
175
+
176
+ In particular, note how multiple training datasets, metadataset, configurations and splits can be loaded by separating the dataset arguments by + symbols:
177
+ ```sh
178
+ "train_dataset_name": "blabble-io/libritts_r+blabble-io/libritts_r+blabble-io/libritts_r+parler-tts/mls_eng_10k",
179
+ "train_metadata_dataset_name": "parler-tts/libritts_r_tags_tagged_10k_generated+parler-tts/libritts_r_tags_tagged_10k_generated+parler-tts/libritts_r_tags_tagged_10k_generated+parler-tts/mls-eng-10k-tags_tagged_10k_generated",
180
+ "train_dataset_config_name": "clean+clean+other+default",
181
+ "train_split_name": "train.clean.360+train.clean.100+train.other.500+train",
182
+ ```
183
+
184
+
185
+ Additionally, you can also write a JSON config file. Here, [starting_point_0.01.json](helpers/training_configs/starting_point_0.01.json) contains the exact same hyper-parameters than above and can be launched like that:
186
+ ```sh
187
+ accelerate launch ./training/run_parler_tts_training.py ./helpers/training_configs/starting_point_0.01.json
188
+ ```
189
+
190
+ Training logs will be reported to wandb, provided that you passed `--report_to "wandb"` to the arguments. An example of what a training log from the above training looks like can be found [here](https://wandb.ai/ylacombe/parler-tts-300M-punctuated/runs/q6h7hspc?nw=nwuserylacombe).
191
+
192
+ > [!TIP]
193
+ > Starting training a new model from scratch can easily be overwhelming, so here's what training looked like for v0.1: [logs](https://api.wandb.ai/links/ylacombe/ea449l81)
194
+
195
+ Scaling to multiple GPUs using [distributed data parallelism (DDP)](https://pytorch.org/tutorials/beginner/ddp_series_theory.html) is trivial: simply run `accelerate config` and select the multi-GPU option, specifying the IDs of the GPUs you wish to use. The above script can then be run using DDP with no code changes. In our case, we used a node of 8 H100 80GB to train Parler-TTS v0.1 for around 4 days.
196
+
197
+
198
+ There are a few other noteworthy arguments:
199
+ 1. `train_metadata_dataset_name` and `eval_metadata_dataset_name` specify, if necessary, the names of the dataset(s) that contain(s) the conditionning text descriptions. For example, this [dataset resulting from the Data-Speech annotation process](https://huggingface.co/datasets/parler-tts/libritts_r_tags_tagged_10k_generated) is saved without the audio column, as it's costly to write and push audio data, so it needs to be concatenated back to the original LibriTTS-R dataset.
200
+ 2. As noted above, the script pre-computes audio tokens as computing audio codes is costly and only needs to be done once, since we're freezing the audio encoder. `audio_encoder_per_device_batch_size` is used to precise the per devie batch size for this pre-processing step.
201
+ 3. Additionnally, when scaling up the training data and iterating on the hyper-parameters or the model architecture, we might want to avoid recomputing the audio tokens at each training run. That's why we introduced two additional parameters, `save_to_disk` and `temporary_save_to_disk` that serves as temporary buffers to save intermediary datasets. Note that processed data is made of text and audio tokens which are much more memory efficient, so the additional required space is negligible.
202
+ 4. `predict_with_generate` and `add_audio_samples_to_wandb` are required to store generated audios and to compute WER and CLAP similarity.
203
+ 5. `freeze_text_encoder`: which allows to freeze the text encoder, to save compute resources.
204
+
205
+ And finally, two additional comments:
206
+ 1. `lr_scheduler_stype`: defines the learning rate schedule, one of `constant_with_warmup` or `cosine`. When experimenting with a training set-up or training for very few epochs, using `constant_with_warmup` is typically beneficial, since the learning rate remains high over the short training run. When performing longer training runs, using a `cosine` schedule shoud give better results.
207
+ 2. `dtype`: data type (dtype) in which the model computation should be performed. Note that this only controls the dtype of the computations (forward and backward pass), and not the dtype of the parameters or optimiser states.
208
+
209
+ > [!TIP]
210
+ > Fine-tuning is as easy as modifying `model_name_or_path` to a pre-trained model.
211
+ > For example: `--model_name_or_path parler-tts/parler_tts_mini_v0.1`.
training/__init__.py ADDED
File without changes
training/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (153 Bytes). View file
 
training/__pycache__/arguments.cpython-311.pyc ADDED
Binary file (13.5 kB). View file
 
training/__pycache__/data.cpython-311.pyc ADDED
Binary file (16.8 kB). View file
 
training/__pycache__/eval.cpython-311.pyc ADDED
Binary file (4.24 kB). View file
 
training/__pycache__/utils.cpython-311.pyc ADDED
Binary file (7.89 kB). View file
 
training/arguments.py ADDED
@@ -0,0 +1,307 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dataclasses import dataclass, field
2
+ from typing import Optional
3
+
4
+ from transformers import Seq2SeqTrainingArguments
5
+
6
+
7
+ @dataclass
8
+ class ModelArguments:
9
+ """
10
+ Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
11
+ """
12
+
13
+ model_name_or_path: str = field(
14
+ metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
15
+ )
16
+ config_name: Optional[str] = field(
17
+ default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
18
+ )
19
+ feature_extractor_name: Optional[str] = field(
20
+ default=None, metadata={"help": "Pretrained feature extractor name or path if not the same as model_name"}
21
+ )
22
+ description_tokenizer_name: Optional[str] = field(
23
+ default=None, metadata={"help": "Pretrained description tokenizer name or path if not the same as model_name"}
24
+ )
25
+ prompt_tokenizer_name: Optional[str] = field(
26
+ default=None,
27
+ metadata={"help": "Pretrained prompt tokenizer name or path if not the same as description_tokenizer_name"},
28
+ )
29
+ cache_dir: Optional[str] = field(
30
+ default=None,
31
+ metadata={"help": "Where to store the pretrained models downloaded from huggingface.co"},
32
+ )
33
+ use_fast_tokenizer: bool = field(
34
+ default=True,
35
+ metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
36
+ )
37
+ model_revision: str = field(
38
+ default="main",
39
+ metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
40
+ )
41
+ pad_token_id: int = field(
42
+ default=None,
43
+ metadata={"help": "If specified, change the model pad token id."},
44
+ )
45
+ decoder_start_token_id: int = field(
46
+ default=None,
47
+ metadata={"help": "If specified, change the model decoder start token id."},
48
+ )
49
+ freeze_text_encoder: bool = field(
50
+ default=False,
51
+ metadata={"help": "Whether to freeze the text encoder."},
52
+ )
53
+ do_sample: bool = field(
54
+ default=True,
55
+ metadata={"help": "Whether to do sampling or greedy decoding."},
56
+ )
57
+ temperature: float = field(
58
+ default=1.0,
59
+ metadata={"help": "Temperature if sampling."},
60
+ )
61
+ max_length: int = field(
62
+ default=2580,
63
+ metadata={"help": "Generation max length."},
64
+ )
65
+ bandwidth: float = field(
66
+ default=6,
67
+ metadata={"help": "Audio encoder bandwidth."},
68
+ )
69
+ asr_model_name_or_path: str = field(
70
+ default="distil-whisper/distil-large-v2",
71
+ metadata={
72
+ "help": "Used to compute WER during evaluation. Path to pretrained model or model identifier from huggingface.co/models"
73
+ },
74
+ )
75
+ clap_model_name_or_path: str = field(
76
+ default="laion/larger_clap_music_and_speech",
77
+ metadata={
78
+ "help": "Used to compute audio similarity during evaluation. Path to pretrained model or model identifier from huggingface.co/models"
79
+ },
80
+ )
81
+
82
+
83
+ @dataclass
84
+ class DataTrainingArguments:
85
+ """
86
+ Arguments pertaining to what data we are going to input our model for training and eval.
87
+
88
+ Using `HfArgumentParser` we can turn this class
89
+ into argparse arguments to be able to specify them on
90
+ the command line.
91
+ """
92
+
93
+ train_dataset_name: str = field(
94
+ default=None,
95
+ metadata={
96
+ "help": "The name of the training dataset to use (via the datasets library). Load and combine "
97
+ "multiple datasets by separating dataset ids by a '+' symbol. For example, to load and combine "
98
+ " librispeech and common voice, set `train_dataset_name='librispeech_asr+common_voice'`."
99
+ },
100
+ )
101
+ train_dataset_config_name: Optional[str] = field(
102
+ default=None,
103
+ metadata={
104
+ "help": "The configuration name of the training dataset to use (via the datasets library). Load and combine "
105
+ "multiple datasets by separating dataset configs by a '+' symbol."
106
+ },
107
+ )
108
+ train_split_name: str = field(
109
+ default="train",
110
+ metadata={
111
+ "help": ("The name of the training data set split to use (via the datasets library). Defaults to 'train'")
112
+ },
113
+ )
114
+ train_dataset_samples: str = field(
115
+ default=None,
116
+ metadata={
117
+ "help": "Number of samples in the training data. Load and combine "
118
+ "multiple datasets by separating dataset samples by a '+' symbol."
119
+ },
120
+ )
121
+ train_metadata_dataset_name: str = field(
122
+ default=None,
123
+ metadata={
124
+ "help": "The name of the metadata training dataset to use (via the datasets library). Load and combine "
125
+ "multiple datasets by separating dataset ids by a '+' symbol. For example, to load and combine "
126
+ " librispeech and common voice, set `train_dataset_name='librispeech_asr+common_voice'`."
127
+ },
128
+ )
129
+ eval_dataset_name: str = field(
130
+ default=None,
131
+ metadata={
132
+ "help": "The name of the evaluation dataset to use (via the datasets library). Defaults to the training dataset name if unspecified."
133
+ },
134
+ )
135
+ eval_dataset_config_name: Optional[str] = field(
136
+ default=None,
137
+ metadata={
138
+ "help": "The configuration name of the evaluation dataset to use (via the datasets library). Defaults to the training dataset config name if unspecified"
139
+ },
140
+ )
141
+ eval_split_name: str = field(
142
+ default="test",
143
+ metadata={
144
+ "help": "The name of the evaluation data set split to use (via the datasets library). Defaults to 'test'"
145
+ },
146
+ )
147
+ eval_metadata_dataset_name: str = field(
148
+ default=None,
149
+ metadata={
150
+ "help": "The name of the metadata training dataset to use (via the datasets library). Load and combine "
151
+ "multiple datasets by separating dataset ids by a '+' symbol. For example, to load and combine "
152
+ " librispeech and common voice, set `train_dataset_name='librispeech_asr+common_voice'`."
153
+ },
154
+ )
155
+ target_audio_column_name: str = field(
156
+ default="audio",
157
+ metadata={"help": "The name of the dataset column containing the target audio data. Defaults to 'audio'"},
158
+ )
159
+ description_column_name: str = field(
160
+ default=None,
161
+ metadata={"help": "The name of the dataset column containing the description text data. Defaults to 'None'."},
162
+ )
163
+ prompt_column_name: str = field(
164
+ default=None,
165
+ metadata={"help": "The name of the dataset column containing the prompt text data. Defaults to 'None'."},
166
+ )
167
+ overwrite_cache: bool = field(
168
+ default=False, metadata={"help": "Overwrite the cached preprocessed datasets or not."}
169
+ )
170
+ preprocessing_num_workers: Optional[int] = field(
171
+ default=None,
172
+ metadata={"help": "The number of processes to use for the preprocessing."},
173
+ )
174
+ max_train_samples: Optional[int] = field(
175
+ default=None,
176
+ metadata={
177
+ "help": (
178
+ "For debugging purposes or quicker training, truncate the number of training examples to this "
179
+ "value if set."
180
+ )
181
+ },
182
+ )
183
+ max_eval_samples: Optional[int] = field(
184
+ default=None,
185
+ metadata={
186
+ "help": (
187
+ "For debugging purposes or quicker training, truncate the number of validation examples to this "
188
+ "value if set."
189
+ )
190
+ },
191
+ )
192
+ max_duration_in_seconds: float = field(
193
+ default=35.0,
194
+ metadata={
195
+ "help": (
196
+ "Filter audio files that are longer than `max_duration_in_seconds` seconds to 'max_duration_in_seconds`."
197
+ "Also, used to set maximum audio length if `pad_to_max_length=True`."
198
+ )
199
+ },
200
+ )
201
+ min_duration_in_seconds: float = field(
202
+ default=0.0, metadata={"help": "Filter audio files that are shorter than `min_duration_in_seconds` seconds"}
203
+ )
204
+ max_text_length: int = field(
205
+ default=500, metadata={"help": "If set, max description lengths in number of characters."}
206
+ )
207
+ max_prompt_token_length: int = field(
208
+ default=None,
209
+ metadata={
210
+ "help": (
211
+ "If set, filter samples with prompts that are longer than `max_prompt_token_length` tokens."
212
+ "Also, used to set maximum prompt token length if `pad_to_max_length=True`."
213
+ )
214
+ },
215
+ )
216
+ max_description_token_length: int = field(
217
+ default=None,
218
+ metadata={
219
+ "help": (
220
+ "If set, filter samples with descriptions that are longer than `max_description_token_length` tokens."
221
+ "Also, used to set maximum desription token length if `pad_to_max_length=True`."
222
+ )
223
+ },
224
+ )
225
+ pad_to_max_length: bool = field(
226
+ default=False,
227
+ metadata={
228
+ "help": (
229
+ "If `True`, pad audio, prompt and description to a maximum length set with respectively "
230
+ "`max_duration_in_seconds`, `max_prompt_token_length`, `max_description_token_length`."
231
+ )
232
+ },
233
+ )
234
+ preprocessing_only: bool = field(
235
+ default=False,
236
+ metadata={
237
+ "help": (
238
+ "Whether to only do data preprocessing and skip training. This is especially useful when data"
239
+ " preprocessing errors out in distributed training due to timeout. In this case, one should run the"
240
+ " preprocessing in a non-distributed setup with `preprocessing_only=True` so that the cached datasets"
241
+ " can consequently be loaded in distributed training."
242
+ " In this training script, `save_to_disk` must be set to the path in which the dataset should be saved. "
243
+ )
244
+ },
245
+ )
246
+ token: str = field(
247
+ default=None,
248
+ metadata={
249
+ "help": (
250
+ "The token to use as HTTP bearer authorization for remote files. If not specified, will use the token "
251
+ "generated when running `huggingface-cli login` (stored in `~/.huggingface`)."
252
+ )
253
+ },
254
+ )
255
+ use_auth_token: bool = field(
256
+ default=None,
257
+ metadata={
258
+ "help": "The `use_auth_token` argument is deprecated and will be removed in v4.34. Please use `token` instead."
259
+ },
260
+ )
261
+ trust_remote_code: bool = field(
262
+ default=False,
263
+ metadata={
264
+ "help": (
265
+ "Whether or not to allow for custom models defined on the Hub in their own modeling files. This option "
266
+ "should only be set to `True` for repositories you trust and in which you have read the code, as it will "
267
+ "execute code present on the Hub on your local machine."
268
+ )
269
+ },
270
+ )
271
+ add_audio_samples_to_wandb: bool = field(
272
+ default=False,
273
+ metadata={"help": "If set and if `wandb` in args.report_to, will add generated audio samples to wandb logs."},
274
+ )
275
+ id_column_name: str = field(default=None, metadata={"help": "id column name."})
276
+ wandb_project: str = field(
277
+ default="parler-speech",
278
+ metadata={"help": "The name of the wandb project."},
279
+ )
280
+ save_to_disk: str = field(
281
+ default=None,
282
+ metadata={
283
+ "help": "If set, will save the dataset to this path if this is an empyt folder. If not empty, will load the datasets from it."
284
+ },
285
+ )
286
+ temporary_save_to_disk: str = field(default=None, metadata={"help": "Temporarily save audio labels here."})
287
+ pad_to_multiple_of: Optional[int] = field(
288
+ default=2,
289
+ metadata={"help": ("Pad to multiple of for tokenizers.")},
290
+ )
291
+
292
+
293
+ @dataclass
294
+ class ParlerTTSTrainingArguments(Seq2SeqTrainingArguments):
295
+ dtype: Optional[str] = field(
296
+ default="float32",
297
+ metadata={
298
+ "help": (
299
+ "The data type (dtype) in which to run training. One of `float32` (full-precision), "
300
+ "`float16` or `bfloat16` (both half-precision)."
301
+ )
302
+ },
303
+ )
304
+ audio_encoder_per_device_batch_size: int = field(
305
+ default=8,
306
+ metadata={"help": ("Specify the batch size of the audio encoding pre-processing steps.")},
307
+ )
training/data.py ADDED
@@ -0,0 +1,305 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ from dataclasses import dataclass
3
+ from typing import Dict, List, Optional, Union, Set
4
+
5
+ import torch
6
+ import numpy as np
7
+ import datasets
8
+ from datasets import load_dataset, Dataset, IterableDataset, interleave_datasets, concatenate_datasets
9
+ from transformers import AutoFeatureExtractor, AutoTokenizer
10
+ from tqdm import tqdm
11
+
12
+ from accelerate import Accelerator
13
+
14
+
15
+ @dataclass
16
+ class DataCollatorEncodecWithPadding:
17
+ """
18
+ Data collator that will dynamically pad the inputs received to the longest sequence in the batch or
19
+ to `max_length` if `max_length` is set and `padding=max_length`.
20
+ """
21
+
22
+ feature_extractor: AutoFeatureExtractor
23
+ audio_column_name: str
24
+ feature_extractor_input_name: Optional[str] = "input_values"
25
+ max_length: Optional[int] = None
26
+ padding: Optional[str] = "longest"
27
+
28
+ def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]:
29
+ # split inputs and labels since they have to be of different lengths and need
30
+ # different padding methods
31
+ audios = [feature[self.audio_column_name]["array"] for feature in features]
32
+ len_audio = [len(audio) for audio in audios]
33
+
34
+ batch = self.feature_extractor(audios, return_tensors="pt", padding=self.padding, max_length=self.max_length)
35
+ batch["len_audio"] = torch.tensor(len_audio).unsqueeze(1)
36
+ return batch
37
+
38
+
39
+ @dataclass
40
+ class DataCollatorParlerTTSWithPadding:
41
+ """
42
+ Data collator that will dynamically pad the inputs received.
43
+ Args:
44
+ prompt_tokenizer (:class:`~transformers.AutoTokenizer`)
45
+ The prompt_tokenizer used for proccessing the data.
46
+ description_tokenizer (:class:`~transformers.AutoTokenizer`)
47
+ The description_tokenizer used for proccessing the data.
48
+ padding (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.PaddingStrategy`, `optional`, defaults to :obj:`True`):
49
+ Select a strategy to pad the returned sequences (according to the model's padding side and padding index)
50
+ among:
51
+ * :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
52
+ sequence if provided).
53
+ * :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the
54
+ maximum acceptable input length for the model if that argument is not provided.
55
+ * :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of
56
+ different lengths).
57
+ pad_to_multiple_of (:obj:`int`, `optional`):
58
+ If set will pad the sequence to a multiple of the provided value.
59
+ This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=
60
+ 7.5 (Volta).
61
+ """
62
+
63
+ prompt_tokenizer: AutoTokenizer
64
+ description_tokenizer: AutoTokenizer
65
+ padding: Union[bool, str] = "longest"
66
+ pad_to_multiple_of: Optional[int] = None
67
+ prompt_max_length: Optional[int] = None
68
+ description_max_length: Optional[int] = None
69
+ audio_max_length: Optional[int] = None
70
+
71
+ def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]:
72
+ # split inputs and labels since they have to be of different lengths and need
73
+ # different padding methods
74
+
75
+ labels = [torch.tensor(feature["labels"]).transpose(0, 1) for feature in features]
76
+ # (bsz, seq_len, num_codebooks)
77
+ labels = torch.nn.utils.rnn.pad_sequence(labels, batch_first=True, padding_value=-100)
78
+ if self.audio_max_length is not None and self.padding == "max_length":
79
+ labels = torch.nn.functional.pad(labels, pad=(0, 0, 0, max(self.audio_max_length - labels.shape[1], 0)))
80
+
81
+ input_ids = [{"input_ids": feature["input_ids"]} for feature in features]
82
+
83
+ input_ids = self.description_tokenizer.pad(
84
+ input_ids,
85
+ return_tensors="pt",
86
+ padding=self.padding,
87
+ pad_to_multiple_of=self.pad_to_multiple_of,
88
+ max_length=self.description_max_length,
89
+ )
90
+
91
+ batch = {"labels": labels, **input_ids}
92
+
93
+ if self.audio_max_length is not None and self.padding == "max_length":
94
+ # if we do torch.compile, we need to also specify the attention_mask
95
+ decoder_attention_mask = torch.ones(labels.shape[:2], dtype=input_ids["attention_mask"].dtype)
96
+ batch["decoder_attention_mask"] = decoder_attention_mask
97
+
98
+ prompt_input_ids = [{"input_ids": feature["prompt_input_ids"]} for feature in features]
99
+ prompt_input_ids = self.prompt_tokenizer.pad(
100
+ prompt_input_ids,
101
+ return_tensors="pt",
102
+ padding=self.padding,
103
+ pad_to_multiple_of=self.pad_to_multiple_of,
104
+ max_length=self.prompt_max_length,
105
+ )
106
+
107
+ batch["prompt_input_ids"] = prompt_input_ids["input_ids"]
108
+ if "attention_mask" in prompt_input_ids:
109
+ batch["prompt_attention_mask"] = prompt_input_ids["attention_mask"]
110
+
111
+ return batch
112
+
113
+
114
+ def convert_dataset_str_to_list(
115
+ dataset_names,
116
+ dataset_config_names,
117
+ metadata_dataset_names=None,
118
+ splits=None,
119
+ dataset_samples=None,
120
+ default_split="train",
121
+ ):
122
+ if isinstance(dataset_names, str):
123
+ dataset_names = dataset_names.split("+")
124
+ dataset_config_names = dataset_config_names.split("+")
125
+ splits = splits.split("+") if splits is not None else None
126
+ dataset_samples = dataset_samples.split("+") if dataset_samples is not None else None
127
+ metadata_dataset_names = metadata_dataset_names.split("+") if metadata_dataset_names is not None else None
128
+
129
+ # basic checks to ensure we've got the right number of datasets/configs/splits/columns/probs
130
+ if len(dataset_names) != len(dataset_config_names):
131
+ raise ValueError(
132
+ f"Ensure one config is passed for each dataset, got {len(dataset_names)} datasets and"
133
+ f" {len(dataset_config_names)} configs."
134
+ )
135
+
136
+ if splits is not None and len(splits) != len(dataset_names):
137
+ raise ValueError(
138
+ f"Ensure one split is passed for each dataset, got {len(dataset_names)} datasets and {len(splits)} splits."
139
+ )
140
+
141
+ if metadata_dataset_names is not None and len(metadata_dataset_names) != len(dataset_names):
142
+ raise ValueError(
143
+ f"Ensure one metadata dataset is passed for each dataset, got {len(dataset_names)} datasets and {len(metadata_dataset_names)} metadata datasets."
144
+ )
145
+
146
+ if dataset_samples is not None:
147
+ if len(dataset_samples) != len(dataset_names):
148
+ raise ValueError(
149
+ f"Ensure one sample is passed for each dataset, got {len(dataset_names)} datasets and "
150
+ f"{len(dataset_samples)} samples."
151
+ )
152
+ dataset_samples = [float(ds_sample) for ds_sample in dataset_samples]
153
+ else:
154
+ dataset_samples = [None] * len(dataset_names)
155
+
156
+ splits = splits if splits is not None else [default_split for _ in range(len(dataset_names))]
157
+
158
+ dataset_names_dict = []
159
+ for i, ds_name in enumerate(dataset_names):
160
+ dataset_names_dict.append(
161
+ {
162
+ "name": ds_name,
163
+ "config": dataset_config_names[i],
164
+ "split": splits[i],
165
+ "metadata_dataset_name": metadata_dataset_names[i],
166
+ "samples": dataset_samples[i],
167
+ }
168
+ )
169
+ return dataset_names_dict
170
+
171
+
172
+ def load_multiple_datasets(
173
+ accelerator: Accelerator,
174
+ dataset_names: Union[List, str],
175
+ dataset_config_names: Union[List, str],
176
+ metadata_dataset_names: Optional[str] = None,
177
+ splits: Optional[Union[List, str]] = None,
178
+ label_column_names: Optional[List] = None,
179
+ stopping_strategy: Optional[str] = "first_exhausted",
180
+ dataset_samples: Optional[Union[List, np.array]] = None,
181
+ streaming: Optional[bool] = False,
182
+ seed: Optional[int] = None,
183
+ id_column_name: Optional[str] = None,
184
+ columns_to_keep: Optional[Set[str]] = None,
185
+ prompt_column_name: Optional[str] = None,
186
+ sampling_rate: Optional[int] = None,
187
+ audio_column_name: Optional[str] = None,
188
+ logger: Optional[logging.Logger] = None,
189
+ **kwargs,
190
+ ) -> Union[Dataset, IterableDataset]:
191
+ dataset_names_dict = convert_dataset_str_to_list(
192
+ dataset_names, dataset_config_names, metadata_dataset_names, splits, label_column_names, dataset_samples
193
+ )
194
+
195
+ if dataset_samples is not None:
196
+ dataset_samples = [ds_dict["samples"] for ds_dict in dataset_names_dict]
197
+ probabilities = np.array(dataset_samples) / np.sum(dataset_samples)
198
+ else:
199
+ probabilities = None
200
+
201
+ all_datasets = []
202
+ # iterate over the datasets we want to interleave
203
+ for dataset_dict in tqdm(dataset_names_dict, desc="Combining datasets..."):
204
+ with accelerator.main_process_first():
205
+ dataset = load_dataset(
206
+ dataset_dict["name"],
207
+ dataset_dict["config"],
208
+ split=dataset_dict["split"],
209
+ streaming=streaming,
210
+ **kwargs,
211
+ )
212
+ dataset_features = dataset.features.keys()
213
+
214
+ if sampling_rate is not None and audio_column_name is not None:
215
+ # resample target audio
216
+ dataset = dataset.cast_column(audio_column_name, datasets.features.Audio(sampling_rate=sampling_rate))
217
+
218
+ metadata_dataset_name = dataset_dict["metadata_dataset_name"]
219
+ if metadata_dataset_name is not None:
220
+ logger.info(
221
+ f'Merging {dataset_dict["name"]} - {dataset_dict["split"]} with {metadata_dataset_name} - {dataset_dict["split"]}'
222
+ )
223
+ metadata_dataset = load_dataset(
224
+ metadata_dataset_name,
225
+ dataset_dict["config"],
226
+ split=dataset_dict["split"],
227
+ streaming=streaming,
228
+ **kwargs,
229
+ )
230
+
231
+ # TODO(YL): I forgot to create unique ids for MLS english.
232
+ # To iterate faster, I bypass the original id check and do another one. - Done once because assuming it won't change next time
233
+ # if dataset_dict["name"] == "parler-tts/mls_eng_10k":
234
+ # def concat_ids(book_id, speaker_id, begin_time):
235
+ # return {"id": f"{book_id}_{speaker_id}_{str(begin_time).replace('.', '_')}"}
236
+ # dataset = dataset.map(concat_ids, input_columns=["book_id", "speaker_id", "begin_time"], num_proc=24)
237
+ # metadata_dataset = metadata_dataset.map(concat_ids, input_columns=["book_id", "speaker_id", "begin_time"], num_proc=24)
238
+ # metadata_dataset = metadata_dataset.rename_column(id_column_name, f"metadata_{id_column_name}")
239
+
240
+ if dataset_dict["name"] != "parler-tts/mls_eng_10k":
241
+ if id_column_name is not None and id_column_name not in dataset.column_names:
242
+ raise ValueError(
243
+ f"id_column_name={id_column_name} but has not been found in the dataset columns"
244
+ f"- one of {', '.join(list(dataset.column_names))}."
245
+ )
246
+ if id_column_name is not None and id_column_name not in metadata_dataset.column_names:
247
+ raise ValueError(
248
+ f"id_column_name={id_column_name} but has not been found in the metadata dataset columns"
249
+ f"- one of {', '.join(list(metadata_dataset.column_names))}."
250
+ )
251
+ elif id_column_name is not None:
252
+ metadata_dataset = metadata_dataset.rename_column(id_column_name, f"metadata_{id_column_name}")
253
+
254
+ metadata_columns_to_remove = set(metadata_dataset.column_names).intersection(set(dataset.column_names))
255
+
256
+ if prompt_column_name is not None:
257
+ # We might have applied some transformations to the prompts (e.g punctuation restoration)
258
+ # so we make sure to remove it from the original dataset
259
+ if prompt_column_name in dataset.column_names:
260
+ logger.info(
261
+ f"REMOVE {prompt_column_name} from dataset {dataset_dict['name']} - dataset_dict['split']"
262
+ )
263
+ dataset.remove_columns(prompt_column_name)
264
+
265
+ metadata_columns_to_remove = set(metadata_dataset.column_names).intersection(set(dataset.column_names))
266
+ metadata_dataset = metadata_dataset.remove_columns(metadata_columns_to_remove)
267
+
268
+ dataset = concatenate_datasets([dataset, metadata_dataset], axis=1)
269
+
270
+ if id_column_name is not None and dataset_dict["name"] != "parler-tts/mls_eng_10k":
271
+ if (
272
+ len(
273
+ dataset.filter(
274
+ lambda id1, id2: id1 != id2,
275
+ input_columns=[id_column_name, f"metadata_{id_column_name}"],
276
+ )
277
+ )
278
+ != 0
279
+ ):
280
+ raise ValueError(
281
+ f"Concatenate didn't work. Some ids don't correspond on dataset {dataset_dict['name']}"
282
+ )
283
+
284
+ dataset_features = dataset.features.keys()
285
+
286
+ if columns_to_keep is not None:
287
+ dataset = dataset.remove_columns(set(dataset_features - columns_to_keep))
288
+ all_datasets.append(dataset)
289
+
290
+ if len(all_datasets) == 1:
291
+ # we have a single dataset so just return it as is
292
+ return all_datasets[0]
293
+
294
+ if streaming:
295
+ interleaved_dataset = interleave_datasets(
296
+ all_datasets,
297
+ stopping_strategy=stopping_strategy,
298
+ probabilities=probabilities,
299
+ seed=seed,
300
+ )
301
+ else:
302
+ with accelerator.main_process_first():
303
+ interleaved_dataset = concatenate_datasets(all_datasets)
304
+
305
+ return interleaved_dataset
training/eval.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import evaluate
3
+ from transformers import AutoModel, AutoProcessor, pipeline, WhisperForConditionalGeneration, WhisperTokenizer, WhisperTokenizerFast
4
+
5
+
6
+ def clap_similarity(clap_model_name_or_path, texts, audios, device):
7
+ clap = AutoModel.from_pretrained(clap_model_name_or_path)
8
+ clap_processor = AutoProcessor.from_pretrained(clap_model_name_or_path)
9
+ clap_inputs = clap_processor(text=texts, audios=audios, padding=True, return_tensors="pt").to(device)
10
+ clap.to(device)
11
+ with torch.no_grad():
12
+ text_features = clap.get_text_features(
13
+ clap_inputs["input_ids"], attention_mask=clap_inputs.get("attention_mask", None)
14
+ )
15
+ audio_features = clap.get_audio_features(clap_inputs["input_features"])
16
+
17
+ cosine_sim = torch.nn.functional.cosine_similarity(audio_features, text_features, dim=1, eps=1e-8)
18
+
19
+ clap.to("cpu")
20
+ clap_inputs.to("cpu")
21
+ return cosine_sim.mean().to("cpu")
22
+
23
+
24
+ def wer(asr_model_name_or_path, prompts, audios, device, per_device_eval_batch_size, sampling_rate):
25
+ metric = evaluate.load("wer")
26
+ asr_pipeline = pipeline(model=asr_model_name_or_path, device=device)
27
+
28
+ return_language = None
29
+ if isinstance(asr_pipeline.model, WhisperForConditionalGeneration):
30
+ return_language = True
31
+
32
+ transcriptions = asr_pipeline(
33
+ [{"raw": audio, "sampling_rate": sampling_rate} for audio in audios],
34
+ batch_size=int(per_device_eval_batch_size),
35
+ return_language=return_language,
36
+ )
37
+
38
+ if isinstance(asr_pipeline.tokenizer, (WhisperTokenizer, WhisperTokenizerFast)):
39
+ tokenizer = asr_pipeline.tokenizer
40
+ else:
41
+ tokenizer = WhisperTokenizer.from_pretrained("openai/whisper-large-v3")
42
+
43
+ english_normalizer = tokenizer.normalize
44
+ basic_normalizer = tokenizer.basic_normalize
45
+
46
+ normalized_predictions = []
47
+ normalized_references = []
48
+
49
+ for pred, ref in zip(transcriptions, prompts):
50
+ normalizer = english_normalizer if hasattr(pred, "language") and pred["language"] == "english" else basic_normalizer
51
+ norm_ref = normalizer(ref)
52
+ if len(norm_ref) > 0:
53
+ norm_pred = normalizer(pred["text"])
54
+ normalized_predictions.append(norm_pred)
55
+ normalized_references.append(norm_pred)
56
+
57
+ word_error = 100 * metric.compute(predictions=normalized_predictions, references=normalized_references)
58
+
59
+ return word_error, [t["text"] for t in transcriptions]
training/run_parler_tts_training.py ADDED
@@ -0,0 +1,1018 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # coding=utf-8
3
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ """ Train Parler-TTS using 🤗 Accelerate"""
18
+
19
+ import logging
20
+ import os
21
+ import re
22
+ import sys
23
+ import time
24
+ from multiprocess import set_start_method
25
+ from datetime import timedelta
26
+
27
+ from tqdm import tqdm
28
+ from pathlib import Path
29
+
30
+ import torch
31
+ from torch.utils.data import DataLoader
32
+
33
+ import datasets
34
+ from datasets import DatasetDict, Dataset, IterableDataset, concatenate_datasets
35
+
36
+ from huggingface_hub import HfApi
37
+
38
+ import transformers
39
+ from transformers import AutoFeatureExtractor, AutoTokenizer, HfArgumentParser
40
+ from transformers.trainer_pt_utils import LengthGroupedSampler
41
+ from transformers.optimization import get_scheduler
42
+ from transformers.utils import send_example_telemetry
43
+
44
+
45
+ from accelerate import Accelerator
46
+ from accelerate.utils import set_seed, AutocastKwargs, InitProcessGroupKwargs, TorchDynamoPlugin
47
+ from accelerate.utils.memory import release_memory
48
+
49
+ from parler_tts import (
50
+ ParlerTTSConfig,
51
+ ParlerTTSForConditionalGeneration,
52
+ build_delay_pattern_mask,
53
+ )
54
+
55
+ from training.utils import get_last_checkpoint, rotate_checkpoints, log_pred, log_metric
56
+ from training.arguments import ModelArguments, DataTrainingArguments, ParlerTTSTrainingArguments
57
+ from training.data import load_multiple_datasets, DataCollatorParlerTTSWithPadding, DataCollatorEncodecWithPadding
58
+ from training.eval import clap_similarity, wer
59
+
60
+
61
+ logger = logging.getLogger(__name__)
62
+
63
+
64
+ def main():
65
+ # See all possible arguments in src/transformers/training_args.py
66
+ # or by passing the --help flag to this script.
67
+ # We now keep distinct sets of args, for a cleaner separation of concerns.
68
+
69
+ parser = HfArgumentParser((ModelArguments, DataTrainingArguments, ParlerTTSTrainingArguments))
70
+ if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
71
+ # If we pass only one argument to the script and it's the path to a json file,
72
+ # let's parse it to get our arguments.
73
+ model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
74
+ else:
75
+ model_args, data_args, training_args = parser.parse_args_into_dataclasses()
76
+
77
+ # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
78
+ # information sent is the one passed as arguments along with your Python/PyTorch versions.
79
+ send_example_telemetry("run_parler_tts", model_args, data_args)
80
+
81
+ if training_args.dtype == "float16":
82
+ mixed_precision = "fp16"
83
+ elif training_args.dtype == "bfloat16":
84
+ mixed_precision = "bf16"
85
+ else:
86
+ mixed_precision = "no"
87
+
88
+ if data_args.pad_to_max_length and (
89
+ data_args.max_duration_in_seconds is None
90
+ or data_args.max_prompt_token_length is None
91
+ or data_args.max_description_token_length is None
92
+ ):
93
+ raise ValueError(
94
+ "`pad_to_max_length` is `True` but one of the following parameters has not been set: `max_duration_in_seconds`, `max_prompt_token_length`, `max_description_token_length`"
95
+ )
96
+
97
+ padding = "max_length" if data_args.pad_to_max_length else "longest"
98
+
99
+ ####### A. Preparation
100
+ kwargs_handlers = [InitProcessGroupKwargs(timeout=timedelta(minutes=60))]
101
+ if training_args.torch_compile:
102
+ # TODO(YL): add more compile modes?
103
+ kwargs_handlers.append(TorchDynamoPlugin(backend="inductor", mode="default")) # reduce-overhead
104
+
105
+ accelerator = Accelerator(
106
+ gradient_accumulation_steps=training_args.gradient_accumulation_steps,
107
+ mixed_precision=mixed_precision,
108
+ log_with=training_args.report_to,
109
+ project_dir=training_args.output_dir,
110
+ kwargs_handlers=kwargs_handlers,
111
+ )
112
+
113
+ accelerator.init_trackers(
114
+ project_name=data_args.wandb_project,
115
+ config={
116
+ "learning_rate": training_args.learning_rate,
117
+ "model_name_or_path": model_args.model_name_or_path,
118
+ "num_train_epochs": training_args.num_train_epochs,
119
+ "gradient_accumulation_steps": training_args.gradient_accumulation_steps,
120
+ "per_device_train_batch_size": training_args.per_device_train_batch_size,
121
+ "global_batch_size": training_args.per_device_train_batch_size * accelerator.num_processes,
122
+ "mixed_precision": mixed_precision,
123
+ "lr_scheduler_type": training_args.lr_scheduler_type,
124
+ "warmup_steps": training_args.warmup_steps,
125
+ "freeze_text_encoder": model_args.freeze_text_encoder,
126
+ "max_duration_in_seconds": data_args.max_duration_in_seconds,
127
+ "weight_decay": training_args.weight_decay,
128
+ "adam_beta1": training_args.adam_beta1,
129
+ "adam_beta2": training_args.adam_beta2,
130
+ "temperature": model_args.temperature,
131
+ },
132
+ )
133
+
134
+ # Detecting last checkpoint and eventually continue from last checkpoint
135
+ last_checkpoint = None
136
+ if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
137
+ last_checkpoint = get_last_checkpoint(training_args.output_dir)
138
+ if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
139
+ logger.info(
140
+ f"Output directory ({training_args.output_dir}) already exists and is not empty. "
141
+ "Use --overwrite_output_dir to overcome."
142
+ )
143
+ elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
144
+ logger.info(
145
+ f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
146
+ "the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
147
+ )
148
+
149
+ # Setup logging
150
+ logging.basicConfig(
151
+ format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
152
+ datefmt="%m/%d/%Y %H:%M:%S",
153
+ handlers=[logging.StreamHandler(sys.stdout)],
154
+ )
155
+ logger.setLevel(logging.INFO if accelerator.is_main_process else logging.WARN)
156
+
157
+ # Log a small summary on each proces
158
+ logger.warning(
159
+ f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, "
160
+ f"distributed training: {training_args.parallel_mode.value == 'distributed'}, 16-bits training: {training_args.fp16}"
161
+ )
162
+
163
+ # Set the verbosity to info of the Transformers logger (on main process only)
164
+ if accelerator.is_local_main_process:
165
+ datasets.utils.logging.set_verbosity_warning()
166
+ transformers.utils.logging.set_verbosity_info()
167
+ else:
168
+ datasets.utils.logging.set_verbosity_error()
169
+ transformers.utils.logging.set_verbosity_error()
170
+
171
+ logger.info("Training/evaluation parameters %s", training_args)
172
+
173
+ # Set seed before initializing model.
174
+ set_seed(training_args.seed)
175
+ num_workers = data_args.preprocessing_num_workers
176
+
177
+ # 1. First, lett's instantiate the feature extractor, tokenizers and model
178
+ # Note for distributed training, the .from_pretrained methods guarantee that only
179
+ # one local process can concurrently download model & vocab.
180
+
181
+ # load feature extractor
182
+ feature_extractor = AutoFeatureExtractor.from_pretrained(
183
+ model_args.feature_extractor_name or model_args.model_name_or_path,
184
+ cache_dir=model_args.cache_dir,
185
+ token=data_args.token,
186
+ trust_remote_code=data_args.trust_remote_code,
187
+ )
188
+ sampling_rate = feature_extractor.sampling_rate
189
+
190
+ # load prompt tokenizer
191
+ prompt_tokenizer = AutoTokenizer.from_pretrained(
192
+ model_args.prompt_tokenizer_name or model_args.description_tokenizer_name or model_args.model_name_or_path,
193
+ cache_dir=model_args.cache_dir,
194
+ token=data_args.token,
195
+ trust_remote_code=data_args.trust_remote_code,
196
+ use_fast=model_args.use_fast_tokenizer,
197
+ padding_side="left", # prompt has to be padded on the left bc it's preprend to codebooks hidden states
198
+ )
199
+
200
+ # load description tokenizer
201
+ description_tokenizer = AutoTokenizer.from_pretrained(
202
+ model_args.description_tokenizer_name or model_args.model_name_or_path,
203
+ cache_dir=model_args.cache_dir,
204
+ token=data_args.token,
205
+ trust_remote_code=data_args.trust_remote_code,
206
+ use_fast=model_args.use_fast_tokenizer,
207
+ )
208
+
209
+ if model_args.use_fast_tokenizer:
210
+ logger.warning(
211
+ "Disabling fast tokenizer warning: https://github.com/huggingface/transformers/blob/main/src/transformers/tokenization_utils_base.py#L3231-L3235"
212
+ )
213
+ prompt_tokenizer.deprecation_warnings["Asking-to-pad-a-fast-tokenizer"] = True
214
+ description_tokenizer.deprecation_warnings["Asking-to-pad-a-fast-tokenizer"] = True
215
+
216
+ # 2. Now, let's load the dataset
217
+
218
+ if data_args.save_to_disk is not None:
219
+ os.makedirs(data_args.save_to_disk, exist_ok=True)
220
+
221
+ # assume that the dataset has been saved to `save_to_disk` if the latter is not empty
222
+ dataset_was_precomputed = len(os.listdir(data_args.save_to_disk)) > 0
223
+ if dataset_was_precomputed:
224
+ vectorized_datasets = datasets.load_from_disk(data_args.save_to_disk)
225
+ else:
226
+ raw_datasets = DatasetDict()
227
+
228
+ columns_to_keep = {
229
+ "target_audio_column_name": data_args.target_audio_column_name,
230
+ "prompt_column_name": data_args.prompt_column_name,
231
+ }
232
+ if data_args.description_column_name is not None:
233
+ columns_to_keep["description_column_name"] = data_args.description_column_name
234
+
235
+ if training_args.do_train:
236
+ raw_datasets["train"] = load_multiple_datasets(
237
+ accelerator,
238
+ data_args.train_dataset_name,
239
+ data_args.train_dataset_config_name,
240
+ metadata_dataset_names=data_args.train_metadata_dataset_name,
241
+ splits=data_args.train_split_name,
242
+ dataset_samples=data_args.train_dataset_samples,
243
+ seed=training_args.seed,
244
+ cache_dir=model_args.cache_dir,
245
+ num_proc=data_args.preprocessing_num_workers,
246
+ id_column_name=data_args.id_column_name,
247
+ columns_to_keep=columns_to_keep.values(),
248
+ prompt_column_name=data_args.prompt_column_name,
249
+ audio_column_name=data_args.target_audio_column_name,
250
+ sampling_rate=sampling_rate,
251
+ logger=logger,
252
+ # streaming=data_args.streaming, TODO(SG): optionally enable streaming mode
253
+ )
254
+
255
+ for key in columns_to_keep:
256
+ if columns_to_keep[key] not in raw_datasets["train"].column_names:
257
+ raise ValueError(
258
+ f"--{key} '{columns_to_keep[key]}' not found in dataset '{data_args.train_dataset_name}'."
259
+ f" Make sure to set `--{key}` to the correct audio column - one of"
260
+ f" {', '.join(raw_datasets['train'].column_names)}."
261
+ )
262
+
263
+ if data_args.max_train_samples is not None:
264
+ raw_datasets["train"] = raw_datasets["train"].select(range(data_args.max_train_samples))
265
+
266
+ if training_args.do_eval:
267
+ raw_datasets["eval"] = load_multiple_datasets(
268
+ accelerator,
269
+ data_args.eval_dataset_name if data_args.eval_dataset_name else data_args.train_dataset_name,
270
+ data_args.eval_dataset_config_name
271
+ if data_args.eval_dataset_config_name
272
+ else data_args.train_dataset_config_name,
273
+ metadata_dataset_names=data_args.eval_metadata_dataset_name,
274
+ splits=data_args.eval_split_name,
275
+ cache_dir=model_args.cache_dir,
276
+ num_proc=data_args.preprocessing_num_workers,
277
+ id_column_name=data_args.id_column_name,
278
+ columns_to_keep=columns_to_keep.values(),
279
+ prompt_column_name=data_args.prompt_column_name,
280
+ audio_column_name=data_args.target_audio_column_name,
281
+ sampling_rate=sampling_rate,
282
+ logger=logger,
283
+ # streaming=data_args.streaming, TODO(SG): optionally enable streaming mode
284
+ )
285
+
286
+ if data_args.max_eval_samples is not None:
287
+ raw_datasets["eval"] = (
288
+ raw_datasets["eval"].shuffle(seed=training_args.seed).select(range(data_args.max_eval_samples))
289
+ )
290
+
291
+ # 3. Next, let's load the config.
292
+ config = ParlerTTSConfig.from_pretrained(
293
+ model_args.model_name_or_path,
294
+ cache_dir=model_args.cache_dir,
295
+ token=data_args.token,
296
+ trust_remote_code=data_args.trust_remote_code,
297
+ )
298
+
299
+ # update pad token id and decoder_start_token_id
300
+ config.update(
301
+ {
302
+ "pad_token_id": model_args.pad_token_id if model_args.pad_token_id is not None else config.pad_token_id,
303
+ "decoder_start_token_id": model_args.decoder_start_token_id
304
+ if model_args.decoder_start_token_id is not None
305
+ else config.decoder_start_token_id,
306
+ }
307
+ )
308
+
309
+ # create model
310
+ model = ParlerTTSForConditionalGeneration.from_pretrained(
311
+ model_args.model_name_or_path,
312
+ cache_dir=model_args.cache_dir,
313
+ config=config,
314
+ token=data_args.token,
315
+ trust_remote_code=data_args.trust_remote_code,
316
+ )
317
+
318
+ # enable gradient checkpointing if necessary
319
+ if training_args.gradient_checkpointing:
320
+ model.gradient_checkpointing_enable()
321
+
322
+ # 4. Now we preprocess the datasets including loading the audio, resampling and normalization
323
+ # Thankfully, `datasets` takes care of automatically loading and resampling the audio,
324
+ # so that we just need to set the correct target sampling rate and normalize the input
325
+ # via the `feature_extractor`
326
+
327
+ # derive max & min input length for sample rate & max duration
328
+ sampling_rate = feature_extractor.sampling_rate
329
+ max_target_length = data_args.max_duration_in_seconds * sampling_rate
330
+ min_target_length = data_args.min_duration_in_seconds * sampling_rate
331
+ target_audio_column_name = data_args.target_audio_column_name
332
+ description_column_name = data_args.description_column_name
333
+ prompt_column_name = data_args.prompt_column_name
334
+ feature_extractor_input_name = feature_extractor.model_input_names[0]
335
+ audio_encoder_pad_token_id = config.decoder.pad_token_id
336
+ audio_encoder_eos_token_id = config.decoder.eos_token_id
337
+ audio_encoder_bos_token_id = model.generation_config.decoder_start_token_id
338
+ max_length = model.generation_config.max_length
339
+ num_codebooks = model.decoder.config.num_codebooks
340
+ bandwidth = model_args.bandwidth
341
+
342
+ # Freeze Encoders
343
+ model.freeze_encoders(model_args.freeze_text_encoder)
344
+
345
+ # Test all gather - used for warmout and avoiding timeout
346
+ test_tensor = torch.tensor([accelerator.process_index], device=accelerator.device)
347
+ gathered_tensor = accelerator.gather(test_tensor)
348
+ print("gathered_tensor", gathered_tensor)
349
+ accelerator.wait_for_everyone()
350
+
351
+ if not dataset_was_precomputed:
352
+ # Filter on text length
353
+ if description_column_name is not None and data_args.max_text_length is not None:
354
+ with accelerator.main_process_first():
355
+ # filter description that is shorter than max_text_length
356
+ raw_datasets = raw_datasets.filter(
357
+ lambda x: len(x) < data_args.max_text_length,
358
+ num_proc=num_workers,
359
+ input_columns=[description_column_name],
360
+ )
361
+
362
+ # Preprocessing the dataset.
363
+ # We need to tokenize the texts.
364
+ def pass_through_processors(description, prompt):
365
+ batch = {}
366
+
367
+ batch["input_ids"] = description_tokenizer(description.strip())["input_ids"]
368
+ batch["prompt_input_ids"] = prompt_tokenizer(prompt.strip())["input_ids"]
369
+
370
+ return batch
371
+
372
+ with accelerator.main_process_first():
373
+ # this is a trick to avoid to rewrite the entire audio column which takes ages
374
+ vectorized_datasets = raw_datasets.map(
375
+ pass_through_processors,
376
+ remove_columns=next(iter(raw_datasets.values())).column_names,
377
+ input_columns=[description_column_name, prompt_column_name],
378
+ num_proc=num_workers,
379
+ desc="preprocess datasets",
380
+ )
381
+
382
+ # We use Accelerate to perform distributed inference
383
+ # T5 doesn't support fp16
384
+ autocast_kwargs = AutocastKwargs(enabled=(mixed_precision != "fp16"))
385
+
386
+ # Now we encode the audio labels with encodec.
387
+ ####### B. Encode audio
388
+
389
+ logger.info("*** Encode target audio with encodec ***")
390
+
391
+ # no need to prepare audio_decoder because used for inference without mixed precision
392
+ # see: https://huggingface.co/docs/accelerate/main/en/package_reference/accelerator#accelerate.Accelerator.prepare
393
+ if training_args.torch_compile:
394
+ audio_decoder = accelerator.prepare_model(model.audio_encoder, evaluation_mode=True)
395
+ else:
396
+ audio_decoder = model.audio_encoder
397
+
398
+ encoder_data_collator = DataCollatorEncodecWithPadding(
399
+ feature_extractor,
400
+ audio_column_name=target_audio_column_name,
401
+ feature_extractor_input_name=feature_extractor_input_name,
402
+ max_length=max_target_length,
403
+ padding=padding,
404
+ )
405
+
406
+ def apply_audio_decoder(batch):
407
+ len_audio = batch.pop("len_audio")
408
+ audio_decoder.to(batch["input_values"].device).eval()
409
+ with torch.no_grad():
410
+ labels = audio_decoder.encode(**batch, bandwidth=bandwidth)["audio_codes"]
411
+ output = {}
412
+ output["len_audio"] = len_audio
413
+ # (1, bsz, codebooks, seq_len) -> (bsz, seq_len, codebooks)
414
+ output["labels"] = labels.squeeze(0).transpose(1, 2)
415
+ output["ratio"] = torch.ones_like(len_audio) * labels.shape[-1] / len_audio.max()
416
+ return output
417
+
418
+ for split in vectorized_datasets:
419
+ data_loader = DataLoader(
420
+ raw_datasets[split],
421
+ batch_size=training_args.audio_encoder_per_device_batch_size,
422
+ collate_fn=encoder_data_collator,
423
+ num_workers=training_args.dataloader_num_workers,
424
+ pin_memory=True,
425
+ )
426
+ data_loader = accelerator.prepare(data_loader)
427
+
428
+ all_generated_labels = []
429
+ all_lens = []
430
+ for batch in tqdm(data_loader, disable=not accelerator.is_local_main_process):
431
+ generate_labels = apply_audio_decoder(batch)
432
+ generate_labels = accelerator.pad_across_processes(generate_labels, dim=1, pad_index=0)
433
+ generate_labels = accelerator.gather_for_metrics(generate_labels)
434
+
435
+ if accelerator.is_main_process:
436
+ lab = generate_labels["labels"].cpu().transpose(1, 2).to(torch.int16)
437
+ rat = generate_labels["ratio"].cpu().squeeze()
438
+ lens = generate_labels["len_audio"].cpu().squeeze()
439
+ lab = [l[:, : int(ratio * length)] for (l, ratio, length) in zip(lab, rat, lens)]
440
+
441
+ all_generated_labels.extend(lab)
442
+ all_lens.extend(lens)
443
+
444
+ # (1, codebooks, seq_len) where seq_len=1
445
+ bos_labels = torch.ones((1, num_codebooks, 1)) * audio_encoder_bos_token_id
446
+
447
+ if accelerator.is_main_process:
448
+ tmp_labels = Dataset.from_dict({"labels": all_generated_labels, "target_length": all_lens})
449
+ tmp_labels.save_to_disk(
450
+ os.path.join(data_args.temporary_save_to_disk, split),
451
+ num_proc=1 if split == "eval" else data_args.preprocessing_num_workers,
452
+ )
453
+ accelerator.wait_for_everyone()
454
+ del all_generated_labels
455
+
456
+ tmp_labels = datasets.load_from_disk(os.path.join(data_args.temporary_save_to_disk, split))
457
+ with accelerator.main_process_first():
458
+ vectorized_datasets[split] = concatenate_datasets([vectorized_datasets[split], tmp_labels], axis=1)
459
+
460
+ def postprocess_dataset(labels):
461
+ # (1, codebooks, seq_len)
462
+ labels = torch.tensor(labels).unsqueeze(0)
463
+ # add bos
464
+ labels = torch.cat([bos_labels, labels], dim=-1)
465
+
466
+ labels, delay_pattern_mask = build_delay_pattern_mask(
467
+ labels,
468
+ bos_token_id=audio_encoder_bos_token_id,
469
+ pad_token_id=audio_encoder_eos_token_id,
470
+ max_length=labels.shape[-1] + num_codebooks,
471
+ num_codebooks=num_codebooks,
472
+ )
473
+
474
+ # the first ids of the delay pattern mask are precisely labels, we use the rest of the labels mask
475
+ # to take care of EOS
476
+ # we want labels to look like this:
477
+ # - [B, a, b, E, E, E, E]
478
+ # - [B, B, c, d, E, E, E]
479
+ # - [B, B, B, e, f, E, E]
480
+ # - [B, B, B, B, g, h, E]
481
+ labels = torch.where(delay_pattern_mask == -1, audio_encoder_eos_token_id, delay_pattern_mask)
482
+
483
+ # the first timestamp is associated to a row full of BOS, let's get rid of it
484
+ # we also remove the last timestampts (full of PAD)
485
+ output = {"labels": labels[:, 1:]}
486
+ return output
487
+
488
+ with accelerator.main_process_first():
489
+ vectorized_datasets[split] = vectorized_datasets[split].map(
490
+ postprocess_dataset,
491
+ num_proc=data_args.preprocessing_num_workers, # this one is resource consuming if many processor.
492
+ input_columns=["labels"],
493
+ desc="Postprocessing labeling",
494
+ )
495
+
496
+ accelerator.free_memory()
497
+ del generate_labels, all_lens
498
+
499
+ with accelerator.main_process_first():
500
+ # NOTE: filtering is done at the end because in the `datasets` library, caching audio files is done after most operations
501
+ # caching audio files is time and disk-space consuming, so we want to avoid it at all costs, especially for large (>1Kh) audio datasets.
502
+ # That's also why we avoid to concat the processed datasets (vectorized_datasets) with the audio column present in raw_datasets.
503
+
504
+ def is_audio_in_length_range(length):
505
+ return length > min_target_length and length < max_target_length
506
+
507
+ # filter data that is shorter than min_target_length
508
+ vectorized_datasets = vectorized_datasets.filter(
509
+ is_audio_in_length_range,
510
+ num_proc=num_workers,
511
+ input_columns=["target_length"],
512
+ )
513
+
514
+ if description_column_name is not None and data_args.max_description_token_length is not None:
515
+ with accelerator.main_process_first():
516
+ # filter description that is shorter than max_text_length
517
+ vectorized_datasets = vectorized_datasets.filter(
518
+ lambda x: len(x) < data_args.max_description_token_length,
519
+ num_proc=num_workers,
520
+ input_columns=["input_ids"],
521
+ )
522
+
523
+ if data_args.max_prompt_token_length is not None:
524
+ with accelerator.main_process_first():
525
+ # filter description that is shorter than max_text_length
526
+ vectorized_datasets = vectorized_datasets.filter(
527
+ lambda x: len(x) < data_args.max_prompt_token_length,
528
+ num_proc=num_workers,
529
+ input_columns=["prompt_input_ids"],
530
+ )
531
+
532
+ if data_args.save_to_disk is not None and not dataset_was_precomputed:
533
+ if accelerator.is_main_process:
534
+ vectorized_datasets.save_to_disk(
535
+ data_args.save_to_disk,
536
+ num_proc=min(data_args.preprocessing_num_workers, len(vectorized_datasets["eval"]) - 1),
537
+ )
538
+ logger.info(f"Dataset saved at {data_args.save_to_disk}")
539
+
540
+ audio_max_length = None
541
+ if training_args.torch_compile:
542
+ audio_max_length = max(vectorized_datasets["train"]["target_length"])
543
+ with accelerator.main_process_first():
544
+ max_sample = vectorized_datasets["train"].filter(
545
+ lambda x: x == audio_max_length,
546
+ num_proc=num_workers,
547
+ input_columns=["target_length"],
548
+ )
549
+ audio_max_length = torch.tensor(max_sample[0]["labels"]).shape[1]
550
+
551
+ # for large datasets it is advised to run the preprocessing on a
552
+ # single machine first with ``args.preprocessing_only`` since there will mostly likely
553
+ # be a timeout when running the script in distributed mode.
554
+ # In a second step ``args.preprocessing_only`` can then be set to `False` to load the
555
+ # cached dataset
556
+ if data_args.preprocessing_only and data_args.save_to_disk is None:
557
+ raise ValueError(
558
+ "`preprocessing_only=True` but `save_to_disk` is not set. The latter should indicates where to save the dataset locally."
559
+ )
560
+ elif data_args.preprocessing_only:
561
+ logger.info(f"Data preprocessing finished. Files save at {data_args.save_to_disk}")
562
+ return
563
+
564
+ # 6. Next, we can prepare the training.
565
+
566
+ # Let's use word CLAP similary and WER metrics as our evaluation metrics,
567
+ def compute_metrics(audios, descriptions, prompts, device="cpu"):
568
+ results = {}
569
+ input_ids = descriptions
570
+ texts = description_tokenizer.batch_decode(input_ids, skip_special_tokens=True)
571
+ prompts = prompt_tokenizer.batch_decode(prompts, skip_special_tokens=True)
572
+ audios = [a.cpu().numpy() for a in audios]
573
+
574
+ clap_score = clap_similarity(model_args.clap_model_name_or_path, texts, audios, device)
575
+ results["clap"] = clap_score
576
+
577
+ word_error, transcriptions = wer(
578
+ model_args.asr_model_name_or_path,
579
+ prompts,
580
+ audios,
581
+ device,
582
+ training_args.per_device_eval_batch_size,
583
+ sampling_rate,
584
+ )
585
+ results["wer"] = word_error
586
+
587
+ return results, texts, prompts, audios, transcriptions
588
+
589
+ # Define Training Schedule
590
+ # Store some constants
591
+ per_device_train_batch_size = int(training_args.per_device_train_batch_size)
592
+ train_batch_size = per_device_train_batch_size * accelerator.num_processes
593
+ gradient_accumulation_steps = int(training_args.gradient_accumulation_steps)
594
+ per_device_eval_batch_size = int(training_args.per_device_eval_batch_size)
595
+
596
+ if training_args.max_steps < 0:
597
+ num_epochs = int(training_args.num_train_epochs)
598
+ steps_per_epoch = len(vectorized_datasets["train"]) // (train_batch_size * gradient_accumulation_steps)
599
+ total_train_steps = steps_per_epoch * num_epochs
600
+ elif training_args.max_steps > 0:
601
+ logger.info("max_steps is given, it will override any value given in num_train_epochs")
602
+ total_train_steps = int(training_args.max_steps)
603
+ # Setting a very large number of epochs so we go as many times as necessary over the iterator.
604
+ num_epochs = sys.maxsize
605
+ steps_per_epoch = total_train_steps
606
+
607
+ if training_args.eval_steps is None:
608
+ logger.info(f"eval_steps is not set, evaluating at the end of each epoch")
609
+ eval_steps = steps_per_epoch
610
+ else:
611
+ eval_steps = training_args.eval_steps
612
+
613
+ # T5 doesn't support fp16
614
+ autocast_kwargs = AutocastKwargs(enabled=(mixed_precision != "fp16"))
615
+
616
+ # Define optimizer, LR scheduler, collator
617
+ optimizer = torch.optim.AdamW(
618
+ params=model.parameters(),
619
+ lr=training_args.learning_rate,
620
+ betas=(training_args.adam_beta1, training_args.adam_beta2),
621
+ eps=training_args.adam_epsilon,
622
+ weight_decay=training_args.weight_decay,
623
+ )
624
+
625
+ # LR scheduler gets stepped by `num_processes` each time -> account for this in warmup / total steps
626
+ lr_scheduler = get_scheduler(
627
+ name=training_args.lr_scheduler_type,
628
+ optimizer=optimizer,
629
+ num_warmup_steps=training_args.get_warmup_steps(total_train_steps) * accelerator.num_processes,
630
+ num_training_steps=total_train_steps * accelerator.num_processes,
631
+ )
632
+
633
+ # Instantiate custom data collator
634
+ data_collator = DataCollatorParlerTTSWithPadding(
635
+ prompt_tokenizer=prompt_tokenizer,
636
+ description_tokenizer=description_tokenizer,
637
+ pad_to_multiple_of=data_args.pad_to_multiple_of,
638
+ padding=padding,
639
+ prompt_max_length=data_args.max_prompt_token_length,
640
+ description_max_length=data_args.max_description_token_length,
641
+ audio_max_length=audio_max_length,
642
+ )
643
+
644
+ # Prepare everything with accelerate
645
+ model, optimizer, lr_scheduler = accelerator.prepare(model, optimizer, lr_scheduler)
646
+
647
+ logger.info("***** Running training *****")
648
+ logger.info(f" Num examples = {total_train_steps * train_batch_size * gradient_accumulation_steps}")
649
+ logger.info(" Instantaneous batch size per device =" f" {per_device_train_batch_size}")
650
+ logger.info(" Gradient accumulation steps =" f" {gradient_accumulation_steps}")
651
+ logger.info(
652
+ f" Total train batch size (w. parallel & distributed) = {train_batch_size * gradient_accumulation_steps}"
653
+ )
654
+ logger.info(f" Total optimization steps = {total_train_steps}")
655
+
656
+ # ======================== Training ================================
657
+ train_time = 0
658
+ train_start = time.time()
659
+ steps_trained_progress_bar = tqdm(
660
+ range(total_train_steps), desc="Train steps ... ", position=0, disable=not accelerator.is_local_main_process
661
+ )
662
+ continue_training = True
663
+ epochs_trained = 0
664
+ cur_step = 0
665
+
666
+ checkpoint = None
667
+ if training_args.resume_from_checkpoint is not None:
668
+ checkpoint = training_args.resume_from_checkpoint
669
+ elif last_checkpoint is not None:
670
+ checkpoint = last_checkpoint
671
+
672
+ if accelerator.is_main_process:
673
+ if training_args.push_to_hub:
674
+ api = HfApi(token=training_args.hub_token)
675
+
676
+ # Create repo (repo_name from args or inferred)
677
+ repo_name = training_args.hub_model_id
678
+ if repo_name is None:
679
+ repo_name = Path(training_args.output_dir).absolute().name
680
+ repo_id = api.create_repo(repo_name, exist_ok=True).repo_id
681
+
682
+ with open(os.path.join(training_args.output_dir, ".gitignore"), "w+") as gitignore:
683
+ if "wandb" not in gitignore:
684
+ gitignore.write("wandb\n")
685
+ elif training_args.output_dir is not None:
686
+ os.makedirs(training_args.output_dir, exist_ok=True)
687
+ accelerator.wait_for_everyone()
688
+
689
+ # Now save everything to be able to create a single processor later
690
+ # make sure all processes wait until data is saved
691
+ with accelerator.main_process_first():
692
+ # only the main process saves them
693
+ if accelerator.is_main_process:
694
+ # save feature extractor, tokenizer and config
695
+ if (
696
+ model_args.prompt_tokenizer_name is None
697
+ and model_args.description_tokenizer_name
698
+ or (model_args.prompt_tokenizer_name == model_args.description_tokenizer_name)
699
+ ):
700
+ prompt_tokenizer.save_pretrained(training_args.output_dir)
701
+ else:
702
+ logger.warning(
703
+ f"Prompt tokenizer ('{model_args.prompt_tokenizer_name}') and description tokenizer ('{model_args.description_tokenizer_name}') are not the same. Saving only the prompt tokenizer."
704
+ )
705
+ prompt_tokenizer.save_pretrained(training_args.output_dir)
706
+
707
+ feature_extractor.save_pretrained(training_args.output_dir)
708
+ config.save_pretrained(training_args.output_dir)
709
+
710
+ if checkpoint is not None:
711
+ accelerator.load_state(checkpoint)
712
+ # Find num steps and epoch from saved state string pattern
713
+ pattern = r"checkpoint-(\d+)-epoch-(\d+)"
714
+ match = re.search(pattern, checkpoint)
715
+ cur_step = int(match.group(1))
716
+ epochs_trained = int(match.group(2))
717
+
718
+ logger.info(" Continuing training from checkpoint, will skip to saved global_step")
719
+ logger.info(f" Continuing training from epoch {epochs_trained}")
720
+ logger.info(f" Continuing training from global step {cur_step}")
721
+
722
+ steps_trained_progress_bar.update(cur_step)
723
+
724
+ for epoch in range(0, epochs_trained):
725
+ vectorized_datasets["train"] = vectorized_datasets["train"].shuffle(training_args.seed)
726
+
727
+ if training_args.max_steps < 0:
728
+ # we know exactly the number of steps per epoch, so can skip through the required number of batches
729
+ resume_step = (cur_step - epochs_trained * steps_per_epoch) * gradient_accumulation_steps
730
+ else:
731
+ # Currently we don't know how many steps we've taken in the current epoch
732
+ # So we just shuffle the dataset one extra time and start from a fresh epoch
733
+ # This is "good enough" for our purposes but not fully correct
734
+ resume_step = None
735
+ vectorized_datasets["train"] = vectorized_datasets["train"].shuffle(training_args.seed)
736
+ else:
737
+ resume_step = None
738
+
739
+ gen_kwargs = {
740
+ "do_sample": model_args.do_sample,
741
+ "temperature": model_args.temperature,
742
+ "max_length": model_args.max_length,
743
+ }
744
+
745
+ # Define gradient update step fn
746
+ def train_step(
747
+ batch,
748
+ accelerator,
749
+ autocast_kwargs,
750
+ ):
751
+ model.train()
752
+
753
+ if mixed_precision == "fp16":
754
+ # fp16 doesn't work with T5-like models
755
+ with accelerator.autocast(autocast_handler=autocast_kwargs):
756
+ if training_args.parallel_mode.value != "distributed":
757
+ encoder_outputs = model.text_encoder(
758
+ input_ids=batch.get("input_ids"), attention_mask=batch.get("attention_mask", None)
759
+ )
760
+ else:
761
+ encoder_outputs = model.module.text_encoder(
762
+ input_ids=batch.get("input_ids"), attention_mask=batch.get("attention_mask", None)
763
+ )
764
+ batch["encoder_outputs"] = encoder_outputs
765
+
766
+ outputs = model(**batch)
767
+ # CE (data) loss
768
+ ce_loss = outputs.loss
769
+
770
+ metrics = {"loss": ce_loss}
771
+ return ce_loss, metrics
772
+
773
+ # Define eval fn
774
+ def eval_step(
775
+ batch,
776
+ accelerator,
777
+ autocast_kwargs,
778
+ ):
779
+ eval_model = model if not training_args.torch_compile else model._orig_mod
780
+ eval_model.eval()
781
+
782
+ if mixed_precision == "fp16":
783
+ # fp16 doesn't work with T5-like models
784
+ with accelerator.autocast(autocast_handler=autocast_kwargs):
785
+ with torch.no_grad():
786
+ if training_args.parallel_mode.value != "distributed" or training_args.torch_compile:
787
+ encoder_outputs = eval_model.text_encoder(
788
+ input_ids=batch.get("input_ids"), attention_mask=batch.get("attention_mask", None)
789
+ )
790
+ else:
791
+ encoder_outputs = eval_model.module.text_encoder(
792
+ input_ids=batch.get("input_ids"), attention_mask=batch.get("attention_mask", None)
793
+ )
794
+ batch["encoder_outputs"] = encoder_outputs
795
+
796
+ with torch.no_grad():
797
+ outputs = eval_model(**batch)
798
+ # CE (data) loss
799
+ ce_loss = outputs.loss
800
+ metrics = {"loss": ce_loss}
801
+ return metrics
802
+
803
+ def generate_step(batch):
804
+ batch.pop("decoder_attention_mask", None)
805
+ eval_model = accelerator.unwrap_model(model, keep_fp32_wrapper=mixed_precision != "fp16").eval()
806
+ if training_args.torch_compile:
807
+ eval_model = model._orig_mod
808
+
809
+ output_audios = eval_model.generate(**batch, **gen_kwargs)
810
+ output_audios = accelerator.pad_across_processes(output_audios, dim=1, pad_index=0)
811
+ return output_audios
812
+
813
+ for epoch in range(epochs_trained, num_epochs):
814
+ vectorized_datasets["train"] = vectorized_datasets["train"].shuffle(training_args.seed)
815
+ sampler = None
816
+ if training_args.group_by_length:
817
+ sampler = LengthGroupedSampler(train_batch_size, lengths=vectorized_datasets["train"]["target_length"])
818
+ train_dataloader = DataLoader(
819
+ vectorized_datasets["train"],
820
+ collate_fn=data_collator,
821
+ batch_size=per_device_train_batch_size,
822
+ sampler=sampler,
823
+ num_workers=training_args.dataloader_num_workers,
824
+ pin_memory=training_args.dataloader_pin_memory,
825
+ )
826
+ train_dataloader = accelerator.prepare(train_dataloader)
827
+ if hasattr(train_dataloader, "dataset") and isinstance(train_dataloader.dataset, IterableDataset):
828
+ train_dataloader.dataset.set_epoch(epoch)
829
+
830
+ if resume_step is not None:
831
+ # Skip the first N batches in the dataloader when resuming from a checkpoint
832
+ train_dataloader = accelerator.skip_first_batches(train_dataloader, resume_step)
833
+ resume_step = None
834
+
835
+ for batch in train_dataloader:
836
+ with accelerator.accumulate(model):
837
+ loss, train_metric = train_step(batch, accelerator, autocast_kwargs)
838
+ accelerator.backward(loss)
839
+ if accelerator.sync_gradients:
840
+ accelerator.clip_grad_norm_(model.parameters(), training_args.max_grad_norm)
841
+ optimizer.step()
842
+ lr_scheduler.step()
843
+ optimizer.zero_grad()
844
+
845
+ # Check if the accelerator has performed an optimization step behind the scenes
846
+ if accelerator.sync_gradients:
847
+ steps_trained_progress_bar.update(1)
848
+ cur_step += 1
849
+
850
+ if cur_step % training_args.logging_steps == 0:
851
+ steps_trained_progress_bar.write(
852
+ f"Step... ({cur_step} / {total_train_steps} | Loss:"
853
+ f" {train_metric['loss']}, Learning Rate:"
854
+ f" {lr_scheduler.get_last_lr()[0]})"
855
+ )
856
+ log_metric(
857
+ accelerator,
858
+ metrics=train_metric,
859
+ learning_rate=lr_scheduler.get_last_lr()[0],
860
+ train_time=train_time + time.time() - train_start,
861
+ step=cur_step,
862
+ epoch=epoch,
863
+ prefix="train",
864
+ )
865
+
866
+ # save checkpoint and weights after each save_steps and at the end of training
867
+ if (cur_step % training_args.save_steps == 0) or cur_step == total_train_steps:
868
+ intermediate_dir = os.path.join(training_args.output_dir, f"checkpoint-{cur_step}-epoch-{epoch}")
869
+ # safe_serialization=False to avoid shared tensors saving issue (TODO(YL): it's a temporary fix)
870
+ # https://github.com/huggingface/transformers/issues/27293#issuecomment-1872560074
871
+ accelerator.save_state(output_dir=intermediate_dir, safe_serialization=False)
872
+ accelerator.wait_for_everyone()
873
+ if accelerator.is_main_process:
874
+ rotate_checkpoints(
875
+ training_args.save_total_limit, output_dir=training_args.output_dir, logger=logger
876
+ )
877
+
878
+ if cur_step == total_train_steps:
879
+ # un-wrap student model for save
880
+ unwrapped_model = accelerator.unwrap_model(model)
881
+ unwrapped_model.save_pretrained(training_args.output_dir)
882
+
883
+ if training_args.push_to_hub:
884
+ api.upload_folder(
885
+ repo_id=repo_id,
886
+ folder_path=training_args.output_dir,
887
+ commit_message=f"Saving train state of step {cur_step}",
888
+ run_as_future=True,
889
+ )
890
+
891
+ if training_args.do_eval and (cur_step % eval_steps == 0 or cur_step == total_train_steps):
892
+ train_time += time.time() - train_start
893
+ # ======================== Evaluating ==============================
894
+ eval_metrics = []
895
+ eval_preds = []
896
+ eval_descriptions = []
897
+ eval_prompts = []
898
+ eval_start = time.time()
899
+
900
+ # release training input batch
901
+ batch = release_memory(batch)
902
+
903
+ validation_dataloader = DataLoader(
904
+ vectorized_datasets["eval"],
905
+ collate_fn=data_collator,
906
+ batch_size=per_device_eval_batch_size,
907
+ drop_last=False,
908
+ num_workers=training_args.dataloader_pin_memory,
909
+ pin_memory=training_args.dataloader_pin_memory,
910
+ )
911
+ validation_dataloader = accelerator.prepare(validation_dataloader)
912
+
913
+ for batch in tqdm(
914
+ validation_dataloader,
915
+ desc=f"Evaluating - Inference ...",
916
+ position=2,
917
+ disable=not accelerator.is_local_main_process,
918
+ ):
919
+ # Model forward
920
+ eval_metric = eval_step(batch, accelerator, autocast_kwargs)
921
+ eval_metric = accelerator.gather_for_metrics(eval_metric)
922
+ eval_metrics.append(eval_metric)
923
+
924
+ if training_args.predict_with_generate:
925
+ validation_dataloader = DataLoader(
926
+ vectorized_datasets["eval"],
927
+ collate_fn=data_collator,
928
+ batch_size=per_device_eval_batch_size,
929
+ drop_last=False,
930
+ num_workers=training_args.dataloader_pin_memory,
931
+ pin_memory=training_args.dataloader_pin_memory,
932
+ )
933
+ validation_dataloader = accelerator.prepare(validation_dataloader)
934
+ # generation
935
+ for batch in tqdm(
936
+ validation_dataloader,
937
+ desc=f"Evaluating - Generation ...",
938
+ position=2,
939
+ disable=not accelerator.is_local_main_process,
940
+ ):
941
+ generated_audios = generate_step(batch)
942
+ # Gather all predictions and targets
943
+ generated_audios, input_ids, prompts = accelerator.pad_across_processes(
944
+ (generated_audios, batch["input_ids"], batch["prompt_input_ids"]), dim=1, pad_index=0
945
+ )
946
+ generated_audios, input_ids, prompts = accelerator.gather_for_metrics(
947
+ (generated_audios, input_ids, prompts)
948
+ )
949
+ eval_preds.extend(generated_audios.to("cpu"))
950
+ eval_descriptions.extend(input_ids.to("cpu"))
951
+ eval_prompts.extend(prompts.to("cpu"))
952
+
953
+ eval_time = time.time() - eval_start
954
+ # normalize eval metrics
955
+ eval_metrics = {
956
+ key: torch.mean(torch.cat([d[key].unsqueeze(0) for d in eval_metrics]))
957
+ for key in eval_metrics[0]
958
+ }
959
+
960
+ # compute metrics
961
+ metrics_desc = ""
962
+ if training_args.predict_with_generate:
963
+ metric_values, pred_descriptions, pred_prompts, audios, transcriptions = compute_metrics(
964
+ eval_preds, eval_descriptions, eval_prompts, accelerator.device
965
+ )
966
+ eval_metrics.update(metric_values)
967
+ metrics_desc = " ".join([f"Eval {key}: {value} |" for key, value in metric_values.items()])
968
+ if "wandb" in training_args.report_to:
969
+ log_pred(
970
+ accelerator,
971
+ pred_descriptions,
972
+ pred_prompts,
973
+ transcriptions,
974
+ audios,
975
+ sampling_rate=sampling_rate,
976
+ step=cur_step,
977
+ prefix="eval",
978
+ )
979
+
980
+ # Print metrics and update progress bar
981
+ steps_trained_progress_bar.write(
982
+ f"Eval results for step ({cur_step} / {total_train_steps} | Eval Loss: {eval_metrics['loss']} |"
983
+ f" {metrics_desc})"
984
+ )
985
+
986
+ log_metric(
987
+ accelerator,
988
+ metrics=eval_metrics,
989
+ train_time=eval_time,
990
+ step=cur_step,
991
+ epoch=epoch,
992
+ prefix="eval",
993
+ )
994
+
995
+ # release eval batch and relax metrics
996
+ eval_metrics = []
997
+ eval_preds = []
998
+ eval_descriptions = []
999
+ eval_prompts = []
1000
+ batch = release_memory(batch)
1001
+
1002
+ # flush the train metrics
1003
+ train_start = time.time()
1004
+
1005
+ # break condition
1006
+ if cur_step == total_train_steps:
1007
+ continue_training = False
1008
+ break
1009
+
1010
+ if not continue_training:
1011
+ break
1012
+
1013
+ accelerator.end_training()
1014
+
1015
+
1016
+ if __name__ == "__main__":
1017
+ set_start_method("spawn")
1018
+ main()
training/utils.py ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import re
3
+ import shutil
4
+ from pathlib import Path
5
+ from dataclasses import field
6
+ from typing import Dict, List
7
+
8
+ import torch
9
+ from wandb import Audio
10
+
11
+
12
+ def list_field(default=None, metadata=None):
13
+ return field(default_factory=lambda: default, metadata=metadata)
14
+
15
+
16
+ _RE_CHECKPOINT = re.compile(r"^checkpoint-(\d+)-epoch-(\d+)$")
17
+
18
+
19
+ def get_last_checkpoint(folder):
20
+ content = os.listdir(folder)
21
+ checkpoints = [
22
+ path
23
+ for path in content
24
+ if _RE_CHECKPOINT.search(path) is not None and os.path.isdir(os.path.join(folder, path))
25
+ ]
26
+ if len(checkpoints) == 0:
27
+ return
28
+ return os.path.join(folder, max(checkpoints, key=lambda x: int(_RE_CHECKPOINT.search(x).groups()[0])))
29
+
30
+
31
+ def sorted_checkpoints(output_dir=None, checkpoint_prefix="checkpoint") -> List[str]:
32
+ """Helper function to sort saved checkpoints from oldest to newest."""
33
+ ordering_and_checkpoint_path = []
34
+
35
+ glob_checkpoints = [str(x) for x in Path(output_dir).glob(f"{checkpoint_prefix}-*") if os.path.isdir(x)]
36
+
37
+ for path in glob_checkpoints:
38
+ regex_match = re.match(f".*{checkpoint_prefix}-([0-9]+)", path)
39
+ if regex_match is not None and regex_match.groups() is not None:
40
+ ordering_and_checkpoint_path.append((int(regex_match.groups()[0]), path))
41
+
42
+ checkpoints_sorted = sorted(ordering_and_checkpoint_path)
43
+ checkpoints_sorted = [checkpoint[1] for checkpoint in checkpoints_sorted]
44
+ return checkpoints_sorted
45
+
46
+
47
+ def rotate_checkpoints(save_total_limit=None, output_dir=None, checkpoint_prefix="checkpoint", logger=None) -> None:
48
+ """Helper function to delete old checkpoints."""
49
+ if save_total_limit is None or save_total_limit <= 0:
50
+ return
51
+ # Check if we should delete older checkpoint(s)
52
+ checkpoints_sorted = sorted_checkpoints(output_dir=output_dir, checkpoint_prefix=checkpoint_prefix)
53
+ if len(checkpoints_sorted) <= save_total_limit:
54
+ return
55
+
56
+ number_of_checkpoints_to_delete = max(0, len(checkpoints_sorted) - save_total_limit)
57
+ checkpoints_to_be_deleted = checkpoints_sorted[:number_of_checkpoints_to_delete]
58
+ for checkpoint in checkpoints_to_be_deleted:
59
+ logger.info(f"Deleting older checkpoint [{checkpoint}] due to args.save_total_limit")
60
+ shutil.rmtree(checkpoint, ignore_errors=True)
61
+
62
+
63
+ def log_metric(
64
+ accelerator,
65
+ metrics: Dict,
66
+ train_time: float,
67
+ step: int,
68
+ epoch: int,
69
+ learning_rate: float = None,
70
+ prefix: str = "train",
71
+ ):
72
+ """Helper function to log all training/evaluation metrics with the correct prefixes and styling."""
73
+ log_metrics = {}
74
+ for k, v in metrics.items():
75
+ log_metrics[f"{prefix}/{k}"] = v
76
+ log_metrics[f"{prefix}/time"] = train_time
77
+ log_metrics[f"{prefix}/epoch"] = epoch
78
+ if learning_rate is not None:
79
+ log_metrics[f"{prefix}/learning_rate"] = learning_rate
80
+ accelerator.log(log_metrics, step=step)
81
+
82
+
83
+ def log_pred(
84
+ accelerator,
85
+ pred_descriptions: List[str],
86
+ pred_prompts: List[str],
87
+ transcriptions: List[str],
88
+ audios: List[torch.Tensor],
89
+ sampling_rate: int,
90
+ step: int,
91
+ prefix: str = "eval",
92
+ num_lines: int = 200000,
93
+ ):
94
+ """Helper function to log target/predicted transcriptions to weights and biases (wandb)."""
95
+ if accelerator.is_main_process:
96
+ wandb_tracker = accelerator.get_tracker("wandb")
97
+ # pretty name for current step: step 50000 -> step 50k
98
+ cur_step_pretty = f"{int(step // 1000)}k" if step > 1000 else step
99
+ prefix_pretty = prefix.replace("/", "-")
100
+
101
+ # convert str data to a wandb compatible format
102
+ str_data = [[pred_descriptions[i], pred_prompts[i], transcriptions[i]] for i in range(len(pred_descriptions))]
103
+ # log as a table with the appropriate headers
104
+ wandb_tracker.log_table(
105
+ table_name=f"predictions/{prefix_pretty}-step-{cur_step_pretty}",
106
+ columns=["Target descriptions", "Target prompts", "Predicted transcriptions"],
107
+ data=str_data[:num_lines],
108
+ step=step,
109
+ commit=False,
110
+ )
111
+
112
+ # wandb can only loads 100 audios per step
113
+ wandb_tracker.log(
114
+ {
115
+ "Speech samples": [
116
+ Audio(
117
+ audio,
118
+ caption=f"{pred_prompts[i]} --- DESCRIPTION: {pred_descriptions[i]}",
119
+ sample_rate=sampling_rate,
120
+ )
121
+ for (i, audio) in enumerate(audios[: min(len(audios), 100)])
122
+ ]
123
+ },
124
+ step=step,
125
+ )