victan commited on
Commit
1d81c94
1 Parent(s): 2af43cd

Upload seamless_communication/inference/generator.py with huggingface_hub

Browse files
seamless_communication/inference/generator.py ADDED
@@ -0,0 +1,364 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the license found in the
5
+ # MIT_LICENSE file in the root directory of this source tree.
6
+
7
+ from dataclasses import dataclass
8
+ from typing import List, Optional, Tuple
9
+
10
+ import torch
11
+ from fairseq2.data import SequenceData, StringLike
12
+ from fairseq2.data.text import TextTokenizer
13
+ from fairseq2.generation import (
14
+ BeamSearchSeq2SeqGenerator,
15
+ Seq2SeqGenerator,
16
+ SequenceToTextConverter,
17
+ StepProcessor,
18
+ )
19
+ from fairseq2.nn.padding import (
20
+ PaddingMask,
21
+ apply_padding_mask,
22
+ get_seqs_and_padding_mask,
23
+ pad_seqs,
24
+ )
25
+ from fairseq2.nn.utils.module import infer_device
26
+ from torch import Tensor
27
+
28
+ from seamless_communication.models.unity.model import (
29
+ UnitYModel,
30
+ UnitYT2UModel,
31
+ UnitYX2TModel,
32
+ )
33
+ from seamless_communication.models.unity.unit_tokenizer import (
34
+ UnitTokenDecoder,
35
+ UnitTokenizer,
36
+ )
37
+
38
+
39
+ def remove_consecutive_repeated_ngrams(
40
+ sequence: List[int], min_size: int = 1, max_size: int = 40
41
+ ) -> List[int]:
42
+ assert 1 <= min_size <= max_size
43
+ drop_idx = set() # indices that will be dropped from the sequence
44
+
45
+ # start from the beginning, check if an ngram of size k (for k=max..min) is
46
+ # followed by its copy, if so delete the first one, and start over after
47
+ # the deleted ngram.
48
+ start = 0
49
+ while start < len(sequence):
50
+ for k in range(max_size, min_size - 1, -1):
51
+ if sequence[start : start + k] == sequence[start + k : start + k + k]:
52
+ drop_idx |= set(range(start, start + k))
53
+ start += k - 1 # assumes repeating subsequences don't overlap
54
+ break
55
+ start += 1
56
+ return [token for idx, token in enumerate(sequence) if idx not in drop_idx]
57
+
58
+
59
+ @dataclass
60
+ class SequenceGeneratorOptions:
61
+ """Holds the options to pass to a sequence generator."""
62
+
63
+ beam_size: int = 5
64
+ """The beam size."""
65
+
66
+ soft_max_seq_len: Tuple[int, int] = (1, 200)
67
+ """The terms ``a`` and ``b`` of ``ax + b`` where ``x`` is the source
68
+ sequence length. The generated sequences (including prefix sequence) will
69
+ have the maximum length of ``min(hard_max_seq_len, ax + b)``. See also
70
+ ``hard_max_seq_len``."""
71
+
72
+ hard_max_seq_len: int = 1024
73
+ """The hard limit on maximum length of generated sequences."""
74
+
75
+ step_processor: Optional[StepProcessor] = None
76
+ """The processor called at each generation step."""
77
+
78
+ unk_penalty: float = 0.0
79
+ """The UNK symbol penalty, where values less than 0 produce more UNKs;
80
+ values greater than 0 produce fewer UNKs."""
81
+
82
+ len_penalty: float = 1.0
83
+ """The length penalty, where values less than 1.0 favor shorter
84
+ sequences; values greater than 1.0 favor longer sequences."""
85
+
86
+
87
+ class UnitYGenerator:
88
+ """Generates text translations and speech units from a UnitY model."""
89
+
90
+ model: UnitYModel
91
+ s2t_converter: SequenceToTextConverter
92
+ t2t_converter: Optional[SequenceToTextConverter]
93
+ unit_decoder: Optional[UnitTokenDecoder]
94
+ unit_prefix_indices: Optional[Tensor]
95
+ unit_generator: Optional[Seq2SeqGenerator]
96
+
97
+ def __init__(
98
+ self,
99
+ model: UnitYModel,
100
+ text_tokenizer: TextTokenizer,
101
+ target_lang: str,
102
+ unit_tokenizer: Optional[UnitTokenizer] = None,
103
+ text_opts: Optional[SequenceGeneratorOptions] = None,
104
+ unit_opts: Optional[SequenceGeneratorOptions] = None,
105
+ ) -> None:
106
+ """
107
+ :param model:
108
+ The UnitY model to use for generation.
109
+ :param text_tokenizer:
110
+ The text tokenizer to use.
111
+ :param unit_tokenizer:
112
+ The unit tokenizer to use.
113
+ :param target_lang:
114
+ The target language.
115
+ :param text_generator_opts:
116
+ The options to pass to the underlying text :class:`Seq2SeqGenerator`.
117
+ :param unit_generator_opts:
118
+ The options to pass to the underlying unit :class:`Seq2SeqGenerator`.
119
+ """
120
+ model.eval()
121
+
122
+ self.model = model
123
+
124
+ if text_opts is None:
125
+ text_opts = SequenceGeneratorOptions()
126
+
127
+ if model.text_decoder is None:
128
+ raise ValueError(
129
+ "`UnitYGenerator` requires a text decoder, but the current UnitY model does not have one."
130
+ )
131
+ assert model.text_decoder_frontend is not None
132
+ assert model.final_proj is not None
133
+
134
+ s2t_model = UnitYX2TModel(
135
+ encoder_frontend=model.speech_encoder_frontend,
136
+ encoder=model.speech_encoder,
137
+ decoder_frontend=model.text_decoder_frontend,
138
+ decoder=model.text_decoder,
139
+ final_proj=model.final_proj,
140
+ target_vocab_info=model.target_vocab_info,
141
+ )
142
+
143
+ step_processors = []
144
+ if text_opts.step_processor is not None:
145
+ step_processors.append(text_opts.step_processor)
146
+
147
+ generator = BeamSearchSeq2SeqGenerator(
148
+ s2t_model,
149
+ beam_size=text_opts.beam_size,
150
+ max_gen_len=text_opts.soft_max_seq_len,
151
+ max_seq_len=text_opts.hard_max_seq_len,
152
+ echo_prompt=True,
153
+ step_processors=step_processors,
154
+ unk_penalty=text_opts.unk_penalty,
155
+ len_penalty=text_opts.len_penalty,
156
+ )
157
+ self.s2t_converter = SequenceToTextConverter(
158
+ generator, text_tokenizer, "translation", target_lang
159
+ )
160
+
161
+ if model.text_encoder is None:
162
+ self.t2t_generator = None
163
+ else:
164
+ assert model.text_encoder_frontend is not None
165
+ assert model.text_encoder is not None
166
+ t2t_model = UnitYX2TModel(
167
+ encoder_frontend=model.text_encoder_frontend,
168
+ encoder=model.text_encoder,
169
+ decoder_frontend=model.text_decoder_frontend,
170
+ decoder=model.text_decoder,
171
+ final_proj=model.final_proj,
172
+ target_vocab_info=model.target_vocab_info,
173
+ )
174
+ generator = BeamSearchSeq2SeqGenerator(
175
+ t2t_model,
176
+ beam_size=text_opts.beam_size,
177
+ max_gen_len=text_opts.soft_max_seq_len,
178
+ max_seq_len=text_opts.hard_max_seq_len,
179
+ echo_prompt=True,
180
+ step_processors=step_processors,
181
+ unk_penalty=text_opts.unk_penalty,
182
+ len_penalty=text_opts.len_penalty,
183
+ )
184
+ self.t2t_converter = SequenceToTextConverter(
185
+ generator, text_tokenizer, "translation", target_lang
186
+ )
187
+
188
+ self.unit_generator = None
189
+ self.unit_decoder = None
190
+ # Set up unit generator.
191
+ if unit_tokenizer is not None:
192
+ if model.t2u_model is None:
193
+ raise ValueError(
194
+ "`model` does not have a T2U sub-model when `unit_tokenizer` is not None."
195
+ )
196
+
197
+ self.unit_decoder = unit_tokenizer.create_decoder()
198
+
199
+ unit_encoder = unit_tokenizer.create_encoder(
200
+ lang=target_lang, device=infer_device(model.t2u_model)
201
+ )
202
+
203
+ self.unit_prefix_indices = unit_encoder.prefix_indices
204
+
205
+ if isinstance(self.model.t2u_model, UnitYT2UModel):
206
+ if unit_opts is None:
207
+ # Speech sequences are typically much longer than text sequences.
208
+ unit_opts = SequenceGeneratorOptions(
209
+ soft_max_seq_len=(25, 50), hard_max_seq_len=5000
210
+ )
211
+
212
+ step_processors = []
213
+ if unit_opts.step_processor is not None:
214
+ step_processors.append(unit_opts.step_processor)
215
+
216
+ self.unit_generator = BeamSearchSeq2SeqGenerator(
217
+ self.model.t2u_model,
218
+ beam_size=unit_opts.beam_size,
219
+ max_gen_len=unit_opts.soft_max_seq_len,
220
+ max_seq_len=unit_opts.hard_max_seq_len,
221
+ echo_prompt=True,
222
+ step_processors=step_processors,
223
+ unk_penalty=unit_opts.unk_penalty,
224
+ len_penalty=unit_opts.len_penalty,
225
+ )
226
+
227
+ @torch.inference_mode()
228
+ def __call__(
229
+ self,
230
+ source_seqs: Tensor,
231
+ source_padding_mask: Optional[PaddingMask],
232
+ input_modality: str = "speech",
233
+ output_modality: str = "speech",
234
+ ngram_filtering: bool = False,
235
+ duration_factor: float = 1.0,
236
+ prosody_encoder_input: Optional[SequenceData] = None,
237
+ ) -> Tuple[List[StringLike], Optional[Tensor]]:
238
+ """
239
+ :param source_seqs:
240
+ The source sequences to use for generation. *Shape:* :math:`(N,S,*)`,
241
+ where :math:`N` is the batch size, :math:`S` is the sequence length,
242
+ and :math:`*` is any number of sequence-specific dimensions
243
+ including none.
244
+ :param source_padding_mask:
245
+ The padding mask of ``source_seqs``. *Shape:* :math:`(N,S)`, where
246
+ :math:`N` is the batch size and :math:`S` is the sequence length.
247
+ :param input_modality:
248
+ The type of modality to encode.
249
+ :param output_modality:
250
+ The type of modality to decode.
251
+ :param ngram_filtering:
252
+ If True, removes consecutive repeated ngrams
253
+ from the decoded unit output.
254
+
255
+ :returns:
256
+ - The output of the text generator.
257
+ - The output of the unit generator.
258
+ """
259
+
260
+ if input_modality == "speech":
261
+ texts, text_gen_output = self.s2t_converter.batch_convert(
262
+ source_seqs, source_padding_mask
263
+ )
264
+ elif input_modality == "text":
265
+ if self.t2t_converter is None:
266
+ raise ValueError(
267
+ "Please set `use_text_encoder` to `True` in your model config to encode text."
268
+ )
269
+ texts, text_gen_output = self.t2t_converter.batch_convert(
270
+ source_seqs, source_padding_mask
271
+ )
272
+ else:
273
+ raise ValueError(f"Unsupported input_modality: {input_modality}")
274
+
275
+ # We skip T2U when we only need to output text.
276
+ if output_modality == "text":
277
+ return texts, None
278
+
279
+ assert self.model.target_vocab_info.pad_idx is not None
280
+
281
+ text_seq_list = [h[0].seq for h in text_gen_output.hypotheses]
282
+
283
+ text_seqs, text_padding_mask = pad_seqs(
284
+ text_seq_list, self.model.target_vocab_info.pad_idx
285
+ )
286
+
287
+ # Manually trim the final EOS token to be consistent with fairseq.
288
+ text_seqs = text_seqs[:, :-1]
289
+
290
+ if text_padding_mask is not None:
291
+ text_padding_mask = text_padding_mask.trim(1)
292
+
293
+ # Use the output of the text generator to compute the decoder output.
294
+ decoder_output, decoder_padding_mask = self.model.decode(
295
+ text_seqs,
296
+ text_padding_mask,
297
+ text_gen_output.encoder_output,
298
+ text_gen_output.encoder_padding_mask,
299
+ )
300
+
301
+ assert self.model.t2u_model is not None
302
+ assert self.unit_decoder is not None
303
+
304
+ unit_gen_output = None
305
+ prosody_encoder_out = None
306
+ if self.model.prosody_encoder_model is not None:
307
+ assert prosody_encoder_input is not None
308
+ prosody_input_seqs, prosody_padding_mask = get_seqs_and_padding_mask(
309
+ prosody_encoder_input
310
+ )
311
+ prosody_encoder_out = self.model.prosody_encoder_model(
312
+ prosody_input_seqs,
313
+ prosody_padding_mask,
314
+ ).unsqueeze(1)
315
+
316
+ if isinstance(self.model.t2u_model, UnitYT2UModel):
317
+ assert self.unit_generator is not None
318
+ assert self.unit_prefix_indices is not None
319
+
320
+ # (S_pre) -> (N, S_pre)
321
+ prefix_seqs = self.unit_prefix_indices.expand(decoder_output.size(0), -1)
322
+
323
+ unit_gen_output = self.unit_generator(
324
+ source_seqs=decoder_output,
325
+ source_padding_mask=decoder_padding_mask,
326
+ prompt_seqs=prefix_seqs,
327
+ prompt_padding_mask=None,
328
+ )
329
+
330
+ assert self.model.t2u_model.target_vocab_info.pad_idx is not None
331
+
332
+ unit_seq_list = [h[0].seq for h in unit_gen_output.hypotheses]
333
+
334
+ unit_seqs, _ = pad_seqs(
335
+ unit_seq_list, self.model.t2u_model.target_vocab_info.pad_idx
336
+ )
337
+ else:
338
+ t2u_model_output, decoder_padding_mask, _ = self.model.t2u_model(
339
+ text_decoder_output=decoder_output,
340
+ text_decoder_padding_mask=decoder_padding_mask,
341
+ text_seqs=text_seqs,
342
+ duration_factor=duration_factor,
343
+ film_cond_emb=prosody_encoder_out,
344
+ )
345
+ # (B, S_unit, V_unit)
346
+ unit_seqs = t2u_model_output.logits.argmax(dim=2)
347
+ # Apply the padding mask to the generated units.
348
+ unit_seqs = apply_padding_mask(
349
+ unit_seqs, decoder_padding_mask, t2u_model_output.vocab_info.pad_idx
350
+ )
351
+
352
+ # Convert to speech units.
353
+ units = self.unit_decoder(unit_seqs)
354
+
355
+ # ngram-filtering doesn't apply to NAR unit decoding.
356
+ if ngram_filtering and isinstance(self.model.t2u_model, UnitYT2UModel):
357
+ if units.size(0) > 1:
358
+ raise NotImplementedError(
359
+ "unit ngram_filtering is not implemented for batch_size > 1."
360
+ )
361
+ arr = remove_consecutive_repeated_ngrams(units[0].tolist())
362
+ units = torch.tensor(arr).to(units).unsqueeze(0)
363
+
364
+ return texts, units