boris commited on
Commit
972bc8d
1 Parent(s): 6f1f2d9

refactor(model): inherit from HF Flax & simplify

Browse files
dalle_mini/model/__init__.py CHANGED
@@ -1,2 +1,2 @@
1
  from .configuration import DalleBartConfig
2
- from .modeling import DalleBartForConditionalGeneration
 
1
  from .configuration import DalleBartConfig
2
+ from .modeling import DalleBart
dalle_mini/model/configuration.py CHANGED
@@ -22,69 +22,6 @@ logger = logging.get_logger(__name__)
22
 
23
 
24
  class DalleBartConfig(PretrainedConfig):
25
- r"""
26
- This is the configuration class to store the configuration of a `DalleBartModel`. It is used to
27
- instantiate a DalleBart model according to the specified arguments, defining the model architecture. Instantiating a
28
- configuration with the defaults will yield a similar configuration to that of the BART `facebook/bart-large
29
- <https://huggingface.co/facebook/bart-large>`__ architecture.
30
-
31
- Configuration objects inherit from :class:`~transformers.PretrainedConfig` and can be used to control the model
32
- outputs. Read the documentation from :class:`~transformers.PretrainedConfig` for more information.
33
-
34
-
35
- Args:
36
- encoder_vocab_size (:obj:`int`, `optional`, defaults to 50265):
37
- Vocabulary size of the BART model. Defines the number of different tokens that can be represented by the
38
- :obj:`inputs_ids` passed when calling :class:`~transformers.BartModel` or
39
- :class:`~transformers.TFBartModel`.
40
- d_model (:obj:`int`, `optional`, defaults to 1024):
41
- Dimensionality of the layers and the pooler layer.
42
- encoder_layers (:obj:`int`, `optional`, defaults to 12):
43
- Number of encoder layers.
44
- decoder_layers (:obj:`int`, `optional`, defaults to 12):
45
- Number of decoder layers.
46
- encoder_attention_heads (:obj:`int`, `optional`, defaults to 16):
47
- Number of attention heads for each attention layer in the Transformer encoder.
48
- decoder_attention_heads (:obj:`int`, `optional`, defaults to 16):
49
- Number of attention heads for each attention layer in the Transformer decoder.
50
- decoder_ffn_dim (:obj:`int`, `optional`, defaults to 4096):
51
- Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
52
- encoder_ffn_dim (:obj:`int`, `optional`, defaults to 4096):
53
- Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
54
- activation_function (:obj:`str` or :obj:`function`, `optional`, defaults to :obj:`"gelu"`):
55
- The non-linear activation function (function or string) in the encoder and pooler. If string,
56
- :obj:`"gelu"`, :obj:`"relu"`, :obj:`"silu"` and :obj:`"gelu_new"` are supported.
57
- dropout (:obj:`float`, `optional`, defaults to 0.1):
58
- The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
59
- attention_dropout (:obj:`float`, `optional`, defaults to 0.0):
60
- The dropout ratio for the attention probabilities.
61
- activation_dropout (:obj:`float`, `optional`, defaults to 0.0):
62
- The dropout ratio for activations inside the fully connected layer.
63
- classifier_dropout (:obj:`float`, `optional`, defaults to 0.0):
64
- The dropout ratio for classifier.
65
- max_position_embeddings (:obj:`int`, `optional`, defaults to 1024):
66
- The maximum sequence length that this model might ever be used with. Typically set this to something large
67
- just in case (e.g., 512 or 1024 or 2048).
68
- init_std (:obj:`float`, `optional`, defaults to 0.02):
69
- The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
70
- encoder_layerdrop: (:obj:`float`, `optional`, defaults to 0.0):
71
- The LayerDrop probability for the encoder. See the `LayerDrop paper <see
72
- https://arxiv.org/abs/1909.11556>`__ for more details.
73
- decoder_layerdrop: (:obj:`float`, `optional`, defaults to 0.0):
74
- The LayerDrop probability for the decoder. See the `LayerDrop paper <see
75
- https://arxiv.org/abs/1909.11556>`__ for more details.
76
- gradient_checkpointing (:obj:`bool`, `optional`, defaults to :obj:`False`):
77
- If True, use gradient checkpointing to save memory at the expense of slower backward pass.
78
- scale_embedding (:obj:`bool`, `optional`, defaults to :obj:`False`):
79
- Scale embeddings by diving by sqrt(d_model).
80
- use_cache (:obj:`bool`, `optional`, defaults to :obj:`True`):
81
- Whether or not the model should return the last key/values attentions (not used by all models).
82
- num_labels: (:obj:`int`, `optional`, defaults to 3):
83
- The number of labels to use in :class:`~transformers.BartForSequenceClassification`.
84
- forced_eos_token_id (:obj:`int`, `optional`, defaults to 2):
85
- The id of the token to force as the last generated token when :obj:`max_length` is reached. Usually set to
86
- :obj:`eos_token_id`.
87
- """
88
  model_type = "dallebart"
89
  keys_to_ignore_at_inference = ["past_key_values"]
90
  attribute_map = {
@@ -117,10 +54,9 @@ class DalleBartConfig(PretrainedConfig):
117
  scale_embedding=False,
118
  gradient_checkpointing=False,
119
  use_cache=True,
120
- num_labels=3,
121
  is_encoder_decoder=True,
122
  forced_eos_token_id=None,
123
- tie_word_embeddings=False, # don't tie for scaling reasons and due to different modalities and sizes
124
  **kwargs,
125
  ):
126
  self.normalize_text = normalize_text
@@ -149,28 +85,25 @@ class DalleBartConfig(PretrainedConfig):
149
  self.scale_embedding = (
150
  scale_embedding # scale factor will be sqrt(d_model) if True
151
  )
152
- self.decoder_start_token_id = image_vocab_size # BOS appended to vocab
153
  self.min_length = image_length + 1
154
  self.max_length = image_length + 1
155
 
156
- # remove keys we are about to set to prevent errors
157
  for k in [
 
158
  "bos_token_id",
159
  "eos_token_id",
160
- "pad_token_id",
161
  "decoder_start_token_id",
162
- "forced_eos_token_id",
163
  ]:
164
  kwargs.pop(k, None)
165
 
166
  super().__init__(
167
- num_labels=num_labels,
168
  pad_token_id=image_vocab_size
169
  + 1, # needed to avoid errors during generation (converted to jnp.array)
170
  bos_token_id=image_vocab_size + 1, # set to unreachable values
171
  eos_token_id=image_vocab_size + 1,
172
  is_encoder_decoder=is_encoder_decoder,
173
- decoder_start_token_id=self.decoder_start_token_id,
174
  forced_eos_token_id=forced_eos_token_id,
175
  tie_word_embeddings=tie_word_embeddings,
176
  **kwargs,
 
22
 
23
 
24
  class DalleBartConfig(PretrainedConfig):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25
  model_type = "dallebart"
26
  keys_to_ignore_at_inference = ["past_key_values"]
27
  attribute_map = {
 
54
  scale_embedding=False,
55
  gradient_checkpointing=False,
56
  use_cache=True,
 
57
  is_encoder_decoder=True,
58
  forced_eos_token_id=None,
59
+ tie_word_embeddings=False, # different modalities and sizes
60
  **kwargs,
61
  ):
62
  self.normalize_text = normalize_text
 
85
  self.scale_embedding = (
86
  scale_embedding # scale factor will be sqrt(d_model) if True
87
  )
 
88
  self.min_length = image_length + 1
89
  self.max_length = image_length + 1
90
 
91
+ # remove inferred keys to prevent errors when loading config (passed as kwargs)
92
  for k in [
93
+ "pad_token_id",
94
  "bos_token_id",
95
  "eos_token_id",
 
96
  "decoder_start_token_id",
 
97
  ]:
98
  kwargs.pop(k, None)
99
 
100
  super().__init__(
 
101
  pad_token_id=image_vocab_size
102
  + 1, # needed to avoid errors during generation (converted to jnp.array)
103
  bos_token_id=image_vocab_size + 1, # set to unreachable values
104
  eos_token_id=image_vocab_size + 1,
105
  is_encoder_decoder=is_encoder_decoder,
106
+ decoder_start_token_id=image_vocab_size, # BOS appended to vocab
107
  forced_eos_token_id=forced_eos_token_id,
108
  tie_word_embeddings=tie_word_embeddings,
109
  **kwargs,
dalle_mini/model/modeling.py CHANGED
@@ -1,5 +1,5 @@
1
  # coding=utf-8
2
- # Copyright 2021 The Fairseq Authors and The Google Flax Team Authors And The HuggingFace Inc. team. All rights reserved.
3
  #
4
  # Licensed under the Apache License, Version 2.0 (the "License");
5
  # you may not use this file except in compliance with the License.
@@ -12,72 +12,61 @@
12
  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
  # See the License for the specific language governing permissions and
14
  # limitations under the License.
15
- """ Flax Bart model. """
16
 
17
  import math
18
  from functools import partial
19
- from typing import Callable, Optional, Tuple
20
 
21
  import flax.linen as nn
22
  import jax
23
  import jax.numpy as jnp
24
- import numpy as np
25
- from flax.core.frozen_dict import FrozenDict, unfreeze
26
- from flax.linen import combine_masks, make_causal_mask
27
- from flax.linen.attention import dot_product_attention_weights
28
  from flax.traverse_util import flatten_dict
29
- from jax import lax
30
  from jax.random import PRNGKey
31
  from transformers.modeling_flax_outputs import (
32
- FlaxBaseModelOutput,
33
- FlaxBaseModelOutputWithPastAndCrossAttentions,
34
  FlaxCausalLMOutputWithCrossAttentions,
35
  FlaxSeq2SeqLMOutput,
36
- FlaxSeq2SeqModelOutput,
37
  )
38
- from transformers.modeling_flax_utils import ACT2FN, FlaxPreTrainedModel
39
  from transformers.utils import logging
40
 
41
- from .configuration import DalleBartConfig
 
 
 
 
 
 
 
 
 
 
 
 
42
 
43
  logger = logging.get_logger(__name__)
44
 
45
 
46
- def shift_tokens_right(
47
- input_ids: np.array, pad_token_id: int, decoder_start_token_id: int
48
- ) -> np.ndarray:
49
  """
50
- Shift input ids one token to the right.
 
51
  """
52
- shifted_input_ids = np.zeros_like(input_ids)
53
- shifted_input_ids[:, 1:] = input_ids[:, :-1]
54
- shifted_input_ids[:, 0] = decoder_start_token_id
55
-
56
- shifted_input_ids = np.where(
57
- shifted_input_ids == -100, pad_token_id, shifted_input_ids
58
- )
59
- return shifted_input_ids
60
-
61
-
62
- class FlaxBartAttention(nn.Module):
63
- config: DalleBartConfig
64
- embed_dim: int
65
- num_heads: int
66
- dropout: float = 0.0
67
- causal: bool = False
68
- bias: bool = True
69
- dtype: jnp.dtype = jnp.float32 # the dtype of the computation
70
 
71
  def setup(self) -> None:
72
  self.head_dim = self.embed_dim // self.num_heads
73
- assert (
74
- self.head_dim * self.num_heads == self.embed_dim
75
- ), f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {self.num_heads})."
 
 
76
 
77
  dense = partial(
78
  nn.Dense,
79
  self.embed_dim,
80
- use_bias=False,
81
  dtype=self.dtype,
82
  kernel_init=jax.nn.initializers.normal(self.config.init_std),
83
  )
@@ -92,150 +81,13 @@ class FlaxBartAttention(nn.Module):
92
  jnp.ones((1, self.embed_dim), dtype="bool"), dtype="bool"
93
  )
94
 
95
- def _split_heads(self, hidden_states):
96
- return hidden_states.reshape(
97
- hidden_states.shape[:2] + (self.num_heads, self.head_dim)
98
- )
99
-
100
- def _merge_heads(self, hidden_states):
101
- return hidden_states.reshape(hidden_states.shape[:2] + (self.embed_dim,))
102
-
103
- @nn.compact
104
- def _concatenate_to_cache(self, key, value, query, attention_mask):
105
- """
106
- This function takes projected key, value states from a single input token and concatenates the states to cached
107
- states from previous steps. This function is slighly adapted from the official Flax repository:
108
- https://github.com/google/flax/blob/491ce18759622506588784b4fca0e4bf05f8c8cd/flax/linen/attention.py#L252
109
- """
110
- # detect if we're initializing by absence of existing cache data.
111
- is_initialized = self.has_variable("cache", "cached_key")
112
- cached_key = self.variable(
113
- "cache", "cached_key", jnp.zeros, key.shape, key.dtype
114
- )
115
- cached_value = self.variable(
116
- "cache", "cached_value", jnp.zeros, value.shape, value.dtype
117
- )
118
- cache_index = self.variable(
119
- "cache", "cache_index", lambda: jnp.array(0, dtype=jnp.int32)
120
- )
121
-
122
- if is_initialized:
123
- *batch_dims, max_length, num_heads, depth_per_head = cached_key.value.shape
124
- # update key, value caches with our new 1d spatial slices
125
- cur_index = cache_index.value
126
- indices = (0,) * len(batch_dims) + (cur_index, 0, 0)
127
- key = lax.dynamic_update_slice(cached_key.value, key, indices)
128
- value = lax.dynamic_update_slice(cached_value.value, value, indices)
129
- cached_key.value = key
130
- cached_value.value = value
131
- num_updated_cache_vectors = query.shape[1]
132
- cache_index.value = cache_index.value + num_updated_cache_vectors
133
- # causal mask for cached decoder self-attention: our single query position should only attend to those key positions that have already been generated and cached, not the remaining zero elements.
134
- pad_mask = jnp.broadcast_to(
135
- jnp.arange(max_length) < cur_index + num_updated_cache_vectors,
136
- tuple(batch_dims) + (1, num_updated_cache_vectors, max_length),
137
- )
138
- attention_mask = combine_masks(pad_mask, attention_mask)
139
- return key, value, attention_mask
140
-
141
- def __call__(
142
- self,
143
- hidden_states: jnp.ndarray,
144
- attention_mask: jnp.ndarray,
145
- key_value_states: Optional[jnp.ndarray] = None,
146
- init_cache: bool = False,
147
- deterministic: bool = True,
148
- ) -> Tuple[jnp.ndarray]:
149
- """Input shape: Batch x Time x Channel"""
150
-
151
- # if key_value_states are provided this layer is used as a cross-attention layer
152
- # for the decoder
153
- is_cross_attention = key_value_states is not None
154
- batch_size = hidden_states.shape[0]
155
-
156
- # get query proj
157
- query_states = self.q_proj(hidden_states)
158
- # get key, value proj
159
- if is_cross_attention:
160
- # cross_attentions
161
- key_states = self.k_proj(key_value_states)
162
- value_states = self.v_proj(key_value_states)
163
- else:
164
- # self_attention
165
- key_states = self.k_proj(hidden_states)
166
- value_states = self.v_proj(hidden_states)
167
-
168
- query_states = self._split_heads(query_states)
169
- key_states = self._split_heads(key_states)
170
- value_states = self._split_heads(value_states)
171
-
172
- # handle cache prepare causal attention mask
173
- if self.causal:
174
- query_length, key_length = query_states.shape[1], key_states.shape[1]
175
- if self.has_variable("cache", "cached_key"):
176
- mask_shift = self.variables["cache"]["cache_index"]
177
- max_decoder_length = self.variables["cache"]["cached_key"].shape[1]
178
- causal_mask = lax.dynamic_slice(
179
- self.causal_mask,
180
- (0, 0, mask_shift, 0),
181
- (1, 1, query_length, max_decoder_length),
182
- )
183
- else:
184
- causal_mask = self.causal_mask[:, :, :query_length, :key_length]
185
- causal_mask = jnp.broadcast_to(
186
- causal_mask, (batch_size,) + causal_mask.shape[1:]
187
- )
188
-
189
- # combine masks if needed
190
- if self.causal:
191
- attention_mask = jnp.broadcast_to(
192
- jnp.expand_dims(attention_mask, axis=(-3, -2)), causal_mask.shape
193
- )
194
- attention_mask = combine_masks(attention_mask, causal_mask)
195
- else:
196
- attention_mask = jnp.expand_dims(attention_mask, axis=(-3, -2))
197
-
198
- # During fast autoregressive decoding, we feed one position at a time,
199
- # and cache the keys and values step by step.
200
- if self.causal and (self.has_variable("cache", "cached_key") or init_cache):
201
- key_states, value_states, attention_mask = self._concatenate_to_cache(
202
- key_states, value_states, query_states, attention_mask
203
- )
204
-
205
- # Convert the boolean attention mask to an attention bias.
206
- # attention mask in the form of attention bias
207
- attention_bias = lax.select(
208
- attention_mask > 0,
209
- jnp.full(attention_mask.shape, 0.0).astype(self.dtype),
210
- jnp.full(attention_mask.shape, float("-inf")).astype(self.dtype),
211
- )
212
-
213
- dropout_rng = None
214
- if not deterministic and self.dropout > 0.0:
215
- dropout_rng = self.make_rng("dropout")
216
-
217
- attn_weights = dot_product_attention_weights(
218
- query_states,
219
- key_states,
220
- bias=attention_bias,
221
- dropout_rng=dropout_rng,
222
- dropout_rate=self.dropout,
223
- broadcast_dropout=True,
224
- deterministic=deterministic,
225
- dtype=self.dtype,
226
- precision=None,
227
- )
228
-
229
- attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value_states)
230
- attn_output = self._merge_heads(attn_output)
231
- attn_output = self.out_proj(attn_output)
232
-
233
- return attn_output
234
-
235
 
236
- class FlaxBartEncoderLayer(nn.Module):
237
- config: DalleBartConfig
238
- dtype: jnp.dtype = jnp.float32
 
 
 
239
 
240
  def setup(self) -> None:
241
  self.embed_dim = self.config.d_model
@@ -244,9 +96,10 @@ class FlaxBartEncoderLayer(nn.Module):
244
  embed_dim=self.embed_dim,
245
  num_heads=self.config.encoder_attention_heads,
246
  dropout=self.config.attention_dropout,
 
247
  dtype=self.dtype,
248
  )
249
- self.self_attn_layer_norm = nn.LayerNorm(dtype=self.dtype)
250
  self.dropout_layer = nn.Dropout(rate=self.config.dropout)
251
  self.activation_fn = ACT2FN[self.config.activation_function]
252
  self.activation_dropout_layer = nn.Dropout(rate=self.config.activation_dropout)
@@ -262,39 +115,15 @@ class FlaxBartEncoderLayer(nn.Module):
262
  use_bias=False,
263
  kernel_init=jax.nn.initializers.normal(self.config.init_std),
264
  )
265
- self.final_layer_norm = nn.LayerNorm(dtype=self.dtype)
266
-
267
- def __call__(
268
- self,
269
- hidden_states: jnp.ndarray,
270
- attention_mask: jnp.ndarray,
271
- deterministic: bool = True,
272
- ) -> Tuple[jnp.ndarray]:
273
- residual = hidden_states
274
- hidden_states = self.self_attn(
275
- hidden_states=hidden_states, attention_mask=attention_mask
276
- )
277
-
278
- hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
279
- hidden_states = residual + hidden_states
280
- hidden_states = self.self_attn_layer_norm(hidden_states)
281
-
282
- residual = hidden_states
283
- hidden_states = self.activation_fn(self.fc1(hidden_states))
284
- hidden_states = self.activation_dropout_layer(
285
- hidden_states, deterministic=deterministic
286
- )
287
- hidden_states = self.fc2(hidden_states)
288
- hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
289
- hidden_states = residual + hidden_states
290
- hidden_states = self.final_layer_norm(hidden_states)
291
-
292
- return hidden_states
293
 
294
 
295
- class FlaxBartEncoderLayerCollection(nn.Module):
296
- config: DalleBartConfig
297
- dtype: jnp.dtype = jnp.float32 # the dtype of the computation
 
 
 
298
 
299
  def setup(self):
300
  layer_module = (
@@ -306,27 +135,15 @@ class FlaxBartEncoderLayerCollection(nn.Module):
306
  layer_module(self.config, name=str(i), dtype=self.dtype)
307
  for i in range(self.config.encoder_layers)
308
  ]
 
309
 
310
- def __call__(
311
- self,
312
- hidden_states,
313
- attention_mask,
314
- deterministic: bool = True,
315
- ):
316
-
317
- for encoder_layer in self.layers:
318
- hidden_states = encoder_layer(
319
- hidden_states,
320
- attention_mask,
321
- deterministic,
322
- )
323
 
324
- return FlaxBaseModelOutput(last_hidden_state=hidden_states)
325
-
326
-
327
- class FlaxBartDecoderLayer(nn.Module):
328
- config: DalleBartConfig
329
- dtype: jnp.dtype = jnp.float32
330
 
331
  def setup(self) -> None:
332
  self.embed_dim = self.config.d_model
@@ -336,21 +153,23 @@ class FlaxBartDecoderLayer(nn.Module):
336
  num_heads=self.config.decoder_attention_heads,
337
  dropout=self.config.attention_dropout,
338
  causal=True,
 
339
  dtype=self.dtype,
340
  )
341
  self.dropout_layer = nn.Dropout(rate=self.config.dropout)
342
  self.activation_fn = ACT2FN[self.config.activation_function]
343
  self.activation_dropout_layer = nn.Dropout(rate=self.config.activation_dropout)
344
 
345
- self.self_attn_layer_norm = nn.LayerNorm(dtype=self.dtype)
346
  self.encoder_attn = FlaxBartAttention(
347
  config=self.config,
348
  embed_dim=self.embed_dim,
349
  num_heads=self.config.decoder_attention_heads,
350
  dropout=self.config.attention_dropout,
 
351
  dtype=self.dtype,
352
  )
353
- self.encoder_attn_layer_norm = nn.LayerNorm(dtype=self.dtype)
354
  self.fc1 = nn.Dense(
355
  self.config.encoder_ffn_dim,
356
  dtype=self.dtype,
@@ -363,58 +182,15 @@ class FlaxBartDecoderLayer(nn.Module):
363
  use_bias=False,
364
  kernel_init=jax.nn.initializers.normal(self.config.init_std),
365
  )
366
- self.final_layer_norm = nn.LayerNorm(dtype=self.dtype)
367
-
368
- def __call__(
369
- self,
370
- hidden_states: jnp.ndarray,
371
- attention_mask: jnp.ndarray,
372
- encoder_hidden_states: jnp.ndarray,
373
- encoder_attention_mask: Optional[jnp.ndarray] = None,
374
- init_cache: bool = False,
375
- deterministic: bool = True,
376
- ) -> Tuple[jnp.ndarray]:
377
- residual = hidden_states
378
-
379
- # Self Attention
380
- hidden_states = self.self_attn(
381
- hidden_states=hidden_states,
382
- attention_mask=attention_mask,
383
- init_cache=init_cache,
384
- )
385
- hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
386
- hidden_states = residual + hidden_states
387
- hidden_states = self.self_attn_layer_norm(hidden_states)
388
-
389
- # Cross-Attention Block
390
- residual = hidden_states
391
-
392
- hidden_states = self.encoder_attn(
393
- hidden_states=hidden_states,
394
- key_value_states=encoder_hidden_states,
395
- attention_mask=encoder_attention_mask,
396
- )
397
- hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
398
- hidden_states = residual + hidden_states
399
- hidden_states = self.encoder_attn_layer_norm(hidden_states)
400
-
401
- # Fully Connected
402
- residual = hidden_states
403
- hidden_states = self.activation_fn(self.fc1(hidden_states))
404
- hidden_states = self.activation_dropout_layer(
405
- hidden_states, deterministic=deterministic
406
- )
407
- hidden_states = self.fc2(hidden_states)
408
- hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
409
- hidden_states = residual + hidden_states
410
- hidden_states = self.final_layer_norm(hidden_states)
411
 
412
- return hidden_states
413
 
414
-
415
- class FlaxBartDecoderLayerCollection(nn.Module):
416
- config: DalleBartConfig
417
- dtype: jnp.dtype = jnp.float32 # the dtype of the computation
 
 
418
 
419
  def setup(self):
420
  layer_module = (
@@ -426,35 +202,17 @@ class FlaxBartDecoderLayerCollection(nn.Module):
426
  layer_module(self.config, name=str(i), dtype=self.dtype)
427
  for i in range(self.config.decoder_layers)
428
  ]
429
-
430
- def __call__(
431
- self,
432
- hidden_states,
433
- attention_mask,
434
- encoder_hidden_states: Optional[jnp.ndarray] = None,
435
- encoder_attention_mask: Optional[jnp.ndarray] = None,
436
- deterministic: bool = True,
437
- init_cache: bool = False,
438
- ):
439
- # decoder layers
440
- for decoder_layer in self.layers:
441
- hidden_states = decoder_layer(
442
- hidden_states,
443
- attention_mask=attention_mask,
444
- encoder_hidden_states=encoder_hidden_states,
445
- encoder_attention_mask=encoder_attention_mask,
446
- init_cache=init_cache,
447
- deterministic=deterministic,
448
- )
449
-
450
- return FlaxBaseModelOutputWithPastAndCrossAttentions(
451
- last_hidden_state=hidden_states
452
- )
453
 
454
 
455
- class DalleBartEncoder(nn.Module):
456
- config: DalleBartConfig
457
- dtype: jnp.dtype = jnp.float32 # the dtype of the computation
 
 
 
 
 
458
 
459
  def setup(self):
460
  self.dropout_layer = nn.Dropout(rate=self.config.dropout)
@@ -463,12 +221,6 @@ class DalleBartEncoder(nn.Module):
463
  self.padding_idx = self.config.pad_token_id
464
  self.embed_scale = math.sqrt(embed_dim) if self.config.scale_embedding else 1.0
465
 
466
- self.embed_tokens = nn.Embed(
467
- self.config.encoder_vocab_size,
468
- embed_dim,
469
- embedding_init=jax.nn.initializers.normal(self.config.init_std),
470
- )
471
-
472
  # Bart is set up so that if padding_idx is specified then offset the embedding ids by 2
473
  # and adjust num_embeddings appropriately. Other models don't have this hack
474
  self.offset = 0
@@ -478,42 +230,17 @@ class DalleBartEncoder(nn.Module):
478
  embedding_init=jax.nn.initializers.normal(self.config.init_std),
479
  )
480
  self.layers = FlaxBartEncoderLayerCollection(self.config, self.dtype)
481
- self.layernorm_embedding = nn.LayerNorm(dtype=self.dtype)
482
-
483
- def __call__(
484
- self,
485
- input_ids,
486
- attention_mask,
487
- position_ids,
488
- deterministic: bool = True,
489
- ):
490
- input_shape = input_ids.shape
491
- input_ids = input_ids.reshape(-1, input_shape[-1])
492
-
493
- inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
494
- inputs_embeds = inputs_embeds.astype(self.dtype)
495
-
496
- embed_pos = self.embed_positions(position_ids + self.offset)
497
- embed_pos = embed_pos.astype(self.dtype)
498
-
499
- hidden_states = inputs_embeds + embed_pos
500
- hidden_states = self.layernorm_embedding(hidden_states)
501
- hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
502
-
503
- outputs = self.layers(
504
- hidden_states, attention_mask, deterministic=deterministic
505
- )
506
-
507
- return FlaxBaseModelOutput(
508
- last_hidden_state=outputs.last_hidden_state,
509
- hidden_states=outputs.hidden_states,
510
- attentions=outputs.attentions,
511
- )
512
 
513
 
514
- class DalleBartDecoder(nn.Module):
515
- config: DalleBartConfig
516
- dtype: jnp.dtype = jnp.float32 # the dtype of the computation
 
 
 
 
 
517
 
518
  def setup(self):
519
  self.dropout_layer = nn.Dropout(rate=self.config.dropout)
@@ -524,12 +251,6 @@ class DalleBartDecoder(nn.Module):
524
  math.sqrt(self.config.d_model) if self.config.scale_embedding else 1.0
525
  )
526
 
527
- self.embed_tokens = nn.Embed(
528
- self.config.image_vocab_size + 1, # image vocab size + 1 for BOS
529
- embed_dim,
530
- embedding_init=jax.nn.initializers.normal(self.config.init_std),
531
- )
532
-
533
  # Bart is set up so that if padding_idx is specified then offset the embedding ids by 2
534
  # and adjust num_embeddings appropriately. Other models don't have this hack
535
  self.offset = 0
@@ -540,122 +261,41 @@ class DalleBartDecoder(nn.Module):
540
  )
541
 
542
  self.layers = FlaxBartDecoderLayerCollection(self.config, self.dtype)
543
- self.layernorm_embedding = nn.LayerNorm(dtype=self.dtype)
544
-
545
- def __call__(
546
- self,
547
- input_ids,
548
- attention_mask,
549
- position_ids,
550
- encoder_hidden_states: Optional[jnp.ndarray] = None,
551
- encoder_attention_mask: Optional[jnp.ndarray] = None,
552
- init_cache: bool = False,
553
- deterministic: bool = True,
554
- ):
555
- input_shape = input_ids.shape
556
- input_ids = input_ids.reshape(-1, input_shape[-1])
557
 
558
- inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
559
- inputs_embeds = inputs_embeds.astype(self.dtype)
560
 
561
- # embed positions
562
- positions = self.embed_positions(position_ids + self.offset)
563
- positions = positions.astype(self.dtype)
564
-
565
- hidden_states = inputs_embeds + positions
566
- hidden_states = self.layernorm_embedding(hidden_states)
567
-
568
- hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
569
-
570
- outputs = self.layers(
571
- hidden_states,
572
- attention_mask,
573
- encoder_hidden_states,
574
- encoder_attention_mask,
575
- deterministic=deterministic,
576
- init_cache=init_cache,
577
- )
578
-
579
- return FlaxBaseModelOutputWithPastAndCrossAttentions(
580
- last_hidden_state=outputs.last_hidden_state,
581
- hidden_states=outputs.hidden_states,
582
- attentions=outputs.attentions,
583
- cross_attentions=outputs.cross_attentions,
584
- )
585
-
586
-
587
- class DalleBartModule(nn.Module):
588
- config: DalleBartConfig
589
- dtype: jnp.dtype = jnp.float32 # the dtype of the computation
590
 
591
  def setup(self):
592
- self.encoder = DalleBartEncoder(self.config, dtype=self.dtype)
593
- self.decoder = DalleBartDecoder(self.config, dtype=self.dtype)
594
-
595
- def _get_encoder_module(self):
596
- return self.encoder
597
-
598
- def _get_decoder_module(self):
599
- return self.decoder
600
-
601
- def __call__(
602
- self,
603
- input_ids,
604
- attention_mask,
605
- decoder_input_ids,
606
- decoder_attention_mask,
607
- position_ids,
608
- decoder_position_ids,
609
- return_dict: bool = True,
610
- deterministic: bool = True,
611
- ):
612
- encoder_outputs = self.encoder(
613
- input_ids=input_ids,
614
- attention_mask=attention_mask,
615
- position_ids=position_ids,
616
- deterministic=deterministic,
617
  )
618
-
619
- decoder_outputs = self.decoder(
620
- input_ids=decoder_input_ids,
621
- attention_mask=decoder_attention_mask,
622
- position_ids=decoder_position_ids,
623
- encoder_hidden_states=encoder_outputs[0],
624
- encoder_attention_mask=attention_mask,
625
- deterministic=deterministic,
626
  )
627
 
628
- if not return_dict:
629
- return decoder_outputs + encoder_outputs
630
-
631
- return FlaxSeq2SeqModelOutput(
632
- last_hidden_state=decoder_outputs.last_hidden_state,
633
- decoder_hidden_states=decoder_outputs.hidden_states,
634
- decoder_attentions=decoder_outputs.attentions,
635
- cross_attentions=decoder_outputs.cross_attentions,
636
- encoder_last_hidden_state=encoder_outputs.last_hidden_state,
637
- encoder_hidden_states=encoder_outputs.hidden_states,
638
- encoder_attentions=encoder_outputs.attentions,
639
  )
640
 
641
 
642
- class DalleBartPreTrainedModel(FlaxPreTrainedModel):
643
- config_class = DalleBartConfig
644
- base_model_prefix: str = "dallebart"
645
- module_class: nn.Module = None
646
-
647
- def __init__(
648
- self,
649
- config: DalleBartConfig,
650
- input_shape: Tuple[int] = (1, 1),
651
- seed: int = 0,
652
- dtype: jnp.dtype = jnp.float32,
653
- **kwargs,
654
- ):
655
- module = self.module_class(config=config, dtype=dtype)
656
- super().__init__(
657
- config, module, input_shape=input_shape, seed=seed, dtype=dtype, **kwargs
658
- )
659
 
660
  @property
661
  def num_params(self):
@@ -664,213 +304,23 @@ class DalleBartPreTrainedModel(FlaxPreTrainedModel):
664
  ).values()
665
  return sum(list(num_params))
666
 
667
- def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple) -> FrozenDict:
668
- # init input tensors
669
- input_ids = jnp.zeros(input_shape, dtype="i4")
670
- # make sure initialization pass will work for FlaxBartForSequenceClassificationModule
671
- input_ids = jax.ops.index_update(input_ids, (..., -1), self.config.eos_token_id)
672
- attention_mask = jnp.ones_like(input_ids)
673
- decoder_input_ids = input_ids
674
- decoder_attention_mask = jnp.ones_like(input_ids)
675
-
676
- batch_size, sequence_length = input_ids.shape
677
- position_ids = jnp.broadcast_to(
678
- jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)
679
- )
680
- decoder_position_ids = jnp.broadcast_to(
681
- jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)
682
- )
683
-
684
- params_rng, dropout_rng = jax.random.split(rng)
685
- rngs = {"params": params_rng, "dropout": dropout_rng}
686
-
687
- return self.module.init(
688
- rngs,
689
- input_ids,
690
- attention_mask,
691
- decoder_input_ids,
692
- decoder_attention_mask,
693
- position_ids,
694
- decoder_position_ids,
695
- )["params"]
696
-
697
- def init_cache(self, batch_size, max_length, encoder_outputs):
698
- r"""
699
- Args:
700
- batch_size (:obj:`int`):
701
- batch_size used for fast auto-regressive decoding. Defines the batch size of the initialized cache.
702
- max_length (:obj:`int`):
703
- maximum possible length for auto-regressive decoding. Defines the sequence length of the initialized
704
- cache.
705
- encoder_outputs (:obj:`Union[FlaxBaseModelOutput, tuple(tuple(jnp.ndarray)]`):
706
- ``encoder_outputs`` consists of (:obj:`last_hidden_state`, `optional`: :obj:`hidden_states`,
707
- `optional`: :obj:`attentions`). :obj:`last_hidden_state` of shape :obj:`(batch_size, sequence_length,
708
- hidden_size)`, `optional`) is a sequence of hidden-states at the output of the last layer of the
709
- encoder. Used in the cross-attention of the decoder.
710
- """
711
- # init input variables to retrieve cache
712
- decoder_input_ids = jnp.ones((batch_size, max_length), dtype="i4")
713
- decoder_attention_mask = jnp.ones_like(decoder_input_ids)
714
- decoder_position_ids = jnp.broadcast_to(
715
- jnp.arange(jnp.atleast_2d(decoder_input_ids).shape[-1]),
716
- decoder_input_ids.shape,
717
- )
718
-
719
- def _decoder_forward(
720
- module,
721
- decoder_input_ids,
722
- decoder_attention_mask,
723
- decoder_position_ids,
724
- **kwargs,
725
- ):
726
- decoder_module = module._get_decoder_module()
727
- return decoder_module(
728
- decoder_input_ids,
729
- decoder_attention_mask,
730
- decoder_position_ids,
731
- **kwargs,
732
- )
733
-
734
- init_variables = self.module.init(
735
- jax.random.PRNGKey(0),
736
- decoder_input_ids=decoder_input_ids,
737
- decoder_attention_mask=decoder_attention_mask,
738
- decoder_position_ids=decoder_position_ids,
739
- encoder_hidden_states=encoder_outputs[0],
740
- init_cache=True,
741
- method=_decoder_forward, # we only need to call the decoder to init the cache
742
- )
743
- return unfreeze(init_variables["cache"])
744
-
745
- def encode(
746
- self,
747
- input_ids: jnp.ndarray,
748
- attention_mask: Optional[jnp.ndarray] = None,
749
- position_ids: Optional[jnp.ndarray] = None,
750
- train: bool = False,
751
- params: dict = None,
752
- dropout_rng: PRNGKey = None,
753
- ):
754
- r"""
755
- Returns:
756
-
757
- Example::
758
-
759
- >>> from transformers import BartTokenizer, FlaxBartForConditionalGeneration
760
-
761
- >>> model = FlaxBartForConditionalGeneration.from_pretrained('facebook/bart-large-cnn')
762
- >>> tokenizer = BartTokenizer.from_pretrained('facebook/bart-large-cnn')
763
-
764
- >>> text = "My friends are cool but they eat too many carbs."
765
- >>> inputs = tokenizer(text, max_length=1024, return_tensors='jax')
766
- >>> encoder_outputs = model.encode(**inputs)
767
- """
768
- if attention_mask is None:
769
- attention_mask = jnp.ones_like(input_ids)
770
- if position_ids is None:
771
- batch_size, sequence_length = input_ids.shape
772
- position_ids = jnp.broadcast_to(
773
- jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)
774
- )
775
 
776
- # Handle any PRNG if needed
777
- rngs = {}
778
- if dropout_rng is not None:
779
- rngs["dropout"] = dropout_rng
780
-
781
- def _encoder_forward(module, input_ids, attention_mask, position_ids, **kwargs):
782
- encode_module = module._get_encoder_module()
783
- return encode_module(input_ids, attention_mask, position_ids, **kwargs)
784
-
785
- return self.module.apply(
786
- {"params": params or self.params},
787
- input_ids=jnp.array(input_ids, dtype="i4"),
788
- attention_mask=jnp.array(attention_mask, dtype="i4"),
789
- position_ids=jnp.array(position_ids, dtype="i4"),
790
- deterministic=not train,
791
- rngs=rngs,
792
- method=_encoder_forward,
793
- )
794
-
795
- def __call__(
796
- self,
797
- input_ids: jnp.ndarray,
798
- attention_mask: Optional[jnp.ndarray] = None,
799
- decoder_input_ids: Optional[jnp.ndarray] = None,
800
- decoder_attention_mask: Optional[jnp.ndarray] = None,
801
- position_ids: Optional[jnp.ndarray] = None,
802
- decoder_position_ids: Optional[jnp.ndarray] = None,
803
- return_dict: Optional[bool] = None,
804
- train: bool = False,
805
- params: dict = None,
806
- dropout_rng: PRNGKey = None,
807
- ):
808
- return_dict = (
809
- return_dict if return_dict is not None else self.config.return_dict
810
- )
811
-
812
- # prepare encoder inputs
813
- if attention_mask is None:
814
- attention_mask = jnp.ones_like(input_ids)
815
- if position_ids is None:
816
- batch_size, sequence_length = input_ids.shape
817
- position_ids = jnp.broadcast_to(
818
- jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)
819
- )
820
-
821
- # prepare decoder inputs
822
- if decoder_input_ids is None:
823
- decoder_input_ids = shift_tokens_right(
824
- input_ids,
825
- self.config.pad_token_id,
826
- decoder_start_token_id=self.config.decoder_start_token_id,
827
- )
828
- if decoder_attention_mask is None:
829
- decoder_attention_mask = jnp.ones_like(decoder_input_ids)
830
- if decoder_position_ids is None:
831
- batch_size, sequence_length = decoder_input_ids.shape
832
- decoder_position_ids = jnp.broadcast_to(
833
- jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)
834
- )
835
-
836
- # Handle any PRNG if needed
837
- rngs = {"dropout": dropout_rng} if dropout_rng is not None else {}
838
-
839
- return self.module.apply(
840
- {"params": params or self.params},
841
- input_ids=jnp.array(input_ids, dtype="i4"),
842
- attention_mask=jnp.array(attention_mask, dtype="i4"),
843
- position_ids=jnp.array(position_ids, dtype="i4"),
844
- decoder_input_ids=jnp.array(decoder_input_ids, dtype="i4"),
845
- decoder_attention_mask=jnp.array(decoder_attention_mask, dtype="i4"),
846
- decoder_position_ids=jnp.array(decoder_position_ids, dtype="i4"),
847
- deterministic=not train,
848
- rngs=rngs,
849
- )
850
-
851
-
852
- class DalleBartForConditionalGenerationModule(nn.Module):
853
- config: DalleBartConfig
854
- dtype: jnp.dtype = jnp.float32
855
- bias_init: Callable[..., jnp.ndarray] = jax.nn.initializers.zeros
856
 
857
  def setup(self):
858
- self.model = DalleBartModule(config=self.config, dtype=self.dtype)
859
  self.lm_head = nn.Dense(
860
  self.config.image_vocab_size + 1, # image vocab size + 1 for BOS
861
  use_bias=False,
862
  dtype=self.dtype,
863
  kernel_init=jax.nn.initializers.normal(self.config.init_std),
864
  )
865
- self.final_logits_bias = self.param(
866
- "final_logits_bias", self.bias_init, (1, self.config.image_vocab_size + 1)
867
- )
868
-
869
- def _get_encoder_module(self):
870
- return self.model.encoder
871
-
872
- def _get_decoder_module(self):
873
- return self.model.decoder
874
 
875
  def __call__(
876
  self,
@@ -880,6 +330,9 @@ class DalleBartForConditionalGenerationModule(nn.Module):
880
  decoder_attention_mask,
881
  position_ids,
882
  decoder_position_ids,
 
 
 
883
  deterministic: bool = True,
884
  ):
885
  outputs = self.model(
@@ -889,6 +342,9 @@ class DalleBartForConditionalGenerationModule(nn.Module):
889
  decoder_attention_mask=decoder_attention_mask,
890
  position_ids=position_ids,
891
  decoder_position_ids=decoder_position_ids,
 
 
 
892
  deterministic=deterministic,
893
  )
894
 
@@ -902,6 +358,10 @@ class DalleBartForConditionalGenerationModule(nn.Module):
902
  else:
903
  lm_logits = self.lm_head(hidden_states)
904
 
 
 
 
 
905
  return FlaxSeq2SeqLMOutput(
906
  logits=lm_logits,
907
  decoder_hidden_states=outputs.decoder_hidden_states,
@@ -913,9 +373,16 @@ class DalleBartForConditionalGenerationModule(nn.Module):
913
  )
914
 
915
 
916
- class DalleBartForConditionalGeneration(DalleBartPreTrainedModel):
917
- module_class = DalleBartForConditionalGenerationModule
918
- dtype: jnp.dtype = jnp.float32
 
 
 
 
 
 
 
919
 
920
  def decode(
921
  self,
@@ -925,30 +392,27 @@ class DalleBartForConditionalGeneration(DalleBartPreTrainedModel):
925
  decoder_attention_mask: Optional[jnp.ndarray] = None,
926
  decoder_position_ids: Optional[jnp.ndarray] = None,
927
  past_key_values: dict = None,
 
 
 
928
  train: bool = False,
929
  params: dict = None,
930
  dropout_rng: PRNGKey = None,
931
  ):
932
- r"""
933
- Returns:
934
-
935
- Example::
936
-
937
- >>> from transformers import BartTokenizer, FlaxBartForConditionalGeneration
938
-
939
- >>> model = FlaxBartForConditionalGeneration.from_pretrained('facebook/bart-large-cnn')
940
- >>> tokenizer = BartTokenizer.from_pretrained('facebook/bart-large-cnn')
941
-
942
- >>> text = "My friends are cool but they eat too many carbs."
943
- >>> inputs = tokenizer(text, max_length=1024, return_tensors='jax')
944
- >>> encoder_outputs = model.encode(**inputs)
945
-
946
- >>> decoder_start_token_id = model.config.decoder_start_token_id
947
- >>> decoder_input_ids = jnp.ones((inputs.input_ids.shape[0], 1), dtype="i4") * decoder_start_token_id
948
 
949
- >>> outputs = model.decode(decoder_input_ids, encoder_outputs)
950
- >>> logits = outputs.logits
951
- """
952
  encoder_hidden_states = encoder_outputs[0]
953
  if encoder_attention_mask is None:
954
  batch_size, sequence_length = encoder_hidden_states.shape[:2]
@@ -1010,7 +474,6 @@ class DalleBartForConditionalGeneration(DalleBartPreTrainedModel):
1010
  else:
1011
  lm_logits = module.lm_head(hidden_states)
1012
 
1013
- lm_logits += module.final_logits_bias
1014
  return lm_logits, outputs
1015
 
1016
  outputs = self.module.apply(
@@ -1020,6 +483,9 @@ class DalleBartForConditionalGeneration(DalleBartPreTrainedModel):
1020
  decoder_position_ids=jnp.array(decoder_position_ids, dtype="i4"),
1021
  encoder_hidden_states=encoder_hidden_states,
1022
  encoder_attention_mask=jnp.array(encoder_attention_mask, dtype="i4"),
 
 
 
1023
  deterministic=not train,
1024
  rngs=rngs,
1025
  mutable=mutable,
@@ -1031,58 +497,21 @@ class DalleBartForConditionalGeneration(DalleBartPreTrainedModel):
1031
  else:
1032
  (lm_logits, decoder_outputs), past = outputs
1033
 
1034
- outputs = FlaxCausalLMOutputWithCrossAttentions(
1035
- logits=lm_logits,
1036
- hidden_states=decoder_outputs.hidden_states,
1037
- attentions=decoder_outputs.attentions,
1038
- cross_attentions=decoder_outputs.cross_attentions,
1039
- )
 
 
 
1040
 
1041
  # add updated cache to model output
1042
- if past_key_values is not None:
1043
  outputs["past_key_values"] = unfreeze(past["cache"])
1044
  return outputs
 
 
1045
 
1046
  return outputs
1047
-
1048
- def prepare_inputs_for_generation(
1049
- self,
1050
- decoder_input_ids,
1051
- max_length,
1052
- attention_mask: Optional[jnp.DeviceArray] = None,
1053
- decoder_attention_mask: Optional[jnp.DeviceArray] = None,
1054
- encoder_outputs=None,
1055
- **kwargs,
1056
- ):
1057
- # initializing the cache
1058
- batch_size, seq_length = decoder_input_ids.shape
1059
-
1060
- past_key_values = self.init_cache(batch_size, max_length, encoder_outputs)
1061
- # Note that usually one would have to put 0's in the attention_mask for x > input_ids.shape[-1] and x < cache_length.
1062
- # But since the decoder uses a causal mask, those positions are masked anyways.
1063
- # Thus we can create a single static attention_mask here, which is more efficient for compilation
1064
- extended_attention_mask = jnp.ones((batch_size, max_length), dtype="i4")
1065
- if decoder_attention_mask is not None:
1066
- position_ids = decoder_attention_mask.cumsum(axis=-1) - 1
1067
- extended_attention_mask = lax.dynamic_update_slice(
1068
- extended_attention_mask, decoder_attention_mask, (0, 0)
1069
- )
1070
- else:
1071
- position_ids = jnp.broadcast_to(
1072
- jnp.arange(seq_length, dtype="i4")[None, :], (batch_size, seq_length)
1073
- )
1074
-
1075
- return {
1076
- "past_key_values": past_key_values,
1077
- "encoder_outputs": encoder_outputs,
1078
- "encoder_attention_mask": attention_mask,
1079
- "decoder_attention_mask": extended_attention_mask,
1080
- "decoder_position_ids": position_ids,
1081
- }
1082
-
1083
- def update_inputs_for_generation(self, model_outputs, model_kwargs):
1084
- model_kwargs["past_key_values"] = model_outputs.past_key_values
1085
- model_kwargs["decoder_position_ids"] = (
1086
- model_kwargs["decoder_position_ids"][:, -1:] + 1
1087
- )
1088
- return model_kwargs
 
1
  # coding=utf-8
2
+ # Copyright 2021 The Fairseq Authors and The Google Flax Team Authors And The HuggingFace Inc. team and the DalleBart team. All rights reserved.
3
  #
4
  # Licensed under the Apache License, Version 2.0 (the "License");
5
  # you may not use this file except in compliance with the License.
 
12
  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
  # See the License for the specific language governing permissions and
14
  # limitations under the License.
15
+ """ DalleBart model. """
16
 
17
  import math
18
  from functools import partial
19
+ from typing import Optional
20
 
21
  import flax.linen as nn
22
  import jax
23
  import jax.numpy as jnp
24
+ from flax.core.frozen_dict import unfreeze
25
+ from flax.linen import make_causal_mask
 
 
26
  from flax.traverse_util import flatten_dict
 
27
  from jax.random import PRNGKey
28
  from transformers.modeling_flax_outputs import (
 
 
29
  FlaxCausalLMOutputWithCrossAttentions,
30
  FlaxSeq2SeqLMOutput,
 
31
  )
32
+ from transformers.modeling_flax_utils import ACT2FN
33
  from transformers.utils import logging
34
 
35
+ from transformers.models.bart.modeling_flax_bart import (
36
+ FlaxBartAttention,
37
+ FlaxBartEncoderLayer,
38
+ FlaxBartDecoderLayer,
39
+ FlaxBartEncoderLayerCollection,
40
+ FlaxBartDecoderLayerCollection,
41
+ FlaxBartEncoder,
42
+ FlaxBartDecoder,
43
+ FlaxBartModule,
44
+ FlaxBartForConditionalGenerationModule,
45
+ FlaxBartPreTrainedModel,
46
+ FlaxBartForConditionalGeneration,
47
+ )
48
 
49
  logger = logging.get_logger(__name__)
50
 
51
 
52
+ class FlaxBartAttention(FlaxBartAttention):
 
 
53
  """
54
+ Edits:
55
+ - causal mask considers embed_dim instead of max_position_embeddings
56
  """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
57
 
58
  def setup(self) -> None:
59
  self.head_dim = self.embed_dim // self.num_heads
60
+ if self.head_dim * self.num_heads != self.embed_dim:
61
+ raise ValueError(
62
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
63
+ f" and `num_heads`: {self.num_heads})."
64
+ )
65
 
66
  dense = partial(
67
  nn.Dense,
68
  self.embed_dim,
69
+ use_bias=self.bias,
70
  dtype=self.dtype,
71
  kernel_init=jax.nn.initializers.normal(self.config.init_std),
72
  )
 
81
  jnp.ones((1, self.embed_dim), dtype="bool"), dtype="bool"
82
  )
83
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
84
 
85
+ class FlaxBartEncoderLayer(FlaxBartEncoderLayer):
86
+ """
87
+ Edits:
88
+ - no bias
89
+ - use custom FlaxBartAttention
90
+ """
91
 
92
  def setup(self) -> None:
93
  self.embed_dim = self.config.d_model
 
96
  embed_dim=self.embed_dim,
97
  num_heads=self.config.encoder_attention_heads,
98
  dropout=self.config.attention_dropout,
99
+ bias=False,
100
  dtype=self.dtype,
101
  )
102
+ self.self_attn_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
103
  self.dropout_layer = nn.Dropout(rate=self.config.dropout)
104
  self.activation_fn = ACT2FN[self.config.activation_function]
105
  self.activation_dropout_layer = nn.Dropout(rate=self.config.activation_dropout)
 
115
  use_bias=False,
116
  kernel_init=jax.nn.initializers.normal(self.config.init_std),
117
  )
118
+ self.final_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
119
 
120
 
121
+ class FlaxBartEncoderLayerCollection(FlaxBartEncoderLayerCollection):
122
+ """
123
+ Edits:
124
+ - use custom FlaxBartEncoderLayer
125
+ - allow Gradient Checkpointing (nn.remat)
126
+ """
127
 
128
  def setup(self):
129
  layer_module = (
 
135
  layer_module(self.config, name=str(i), dtype=self.dtype)
136
  for i in range(self.config.encoder_layers)
137
  ]
138
+ self.layerdrop = self.config.encoder_layerdrop
139
 
 
 
 
 
 
 
 
 
 
 
 
 
 
140
 
141
+ class FlaxBartDecoderLayer(FlaxBartDecoderLayer):
142
+ """
143
+ Edits:
144
+ - no bias
145
+ - uses custom FlaxBartAttention
146
+ """
147
 
148
  def setup(self) -> None:
149
  self.embed_dim = self.config.d_model
 
153
  num_heads=self.config.decoder_attention_heads,
154
  dropout=self.config.attention_dropout,
155
  causal=True,
156
+ bias=False,
157
  dtype=self.dtype,
158
  )
159
  self.dropout_layer = nn.Dropout(rate=self.config.dropout)
160
  self.activation_fn = ACT2FN[self.config.activation_function]
161
  self.activation_dropout_layer = nn.Dropout(rate=self.config.activation_dropout)
162
 
163
+ self.self_attn_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
164
  self.encoder_attn = FlaxBartAttention(
165
  config=self.config,
166
  embed_dim=self.embed_dim,
167
  num_heads=self.config.decoder_attention_heads,
168
  dropout=self.config.attention_dropout,
169
+ bias=False,
170
  dtype=self.dtype,
171
  )
172
+ self.encoder_attn_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
173
  self.fc1 = nn.Dense(
174
  self.config.encoder_ffn_dim,
175
  dtype=self.dtype,
 
182
  use_bias=False,
183
  kernel_init=jax.nn.initializers.normal(self.config.init_std),
184
  )
185
+ self.final_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
186
 
 
187
 
188
+ class FlaxBartDecoderLayerCollection(FlaxBartDecoderLayerCollection):
189
+ """
190
+ Edits:
191
+ - use custom FlaxBartDecoderLayer
192
+ - allow Gradient Checkpointing (nn.remat)
193
+ """
194
 
195
  def setup(self):
196
  layer_module = (
 
202
  layer_module(self.config, name=str(i), dtype=self.dtype)
203
  for i in range(self.config.decoder_layers)
204
  ]
205
+ self.layerdrop = self.config.decoder_layerdrop
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
206
 
207
 
208
+ class FlaxBartEncoder(FlaxBartEncoder):
209
+ """
210
+ Edits:
211
+ - offset set to 0 (no padding token)
212
+ - use max_text_length instead of max_position_embeddings
213
+ - use custom FlaxBartEncoderLayerCollection
214
+ - embed_tokens cannot be None (issue at compile time)
215
+ """
216
 
217
  def setup(self):
218
  self.dropout_layer = nn.Dropout(rate=self.config.dropout)
 
221
  self.padding_idx = self.config.pad_token_id
222
  self.embed_scale = math.sqrt(embed_dim) if self.config.scale_embedding else 1.0
223
 
 
 
 
 
 
 
224
  # Bart is set up so that if padding_idx is specified then offset the embedding ids by 2
225
  # and adjust num_embeddings appropriately. Other models don't have this hack
226
  self.offset = 0
 
230
  embedding_init=jax.nn.initializers.normal(self.config.init_std),
231
  )
232
  self.layers = FlaxBartEncoderLayerCollection(self.config, self.dtype)
233
+ self.layernorm_embedding = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
234
 
235
 
236
+ class FlaxBartDecoder(FlaxBartDecoder):
237
+ """
238
+ Edits:
239
+ - offset set to 0 (no padding token)
240
+ - use image_length + 1 (for BOS) instead of max_position_embeddings
241
+ - use custom FlaxBartDecoderLayerCollection
242
+ - embed_tokens cannot be None (issue at compile time)
243
+ """
244
 
245
  def setup(self):
246
  self.dropout_layer = nn.Dropout(rate=self.config.dropout)
 
251
  math.sqrt(self.config.d_model) if self.config.scale_embedding else 1.0
252
  )
253
 
 
 
 
 
 
 
254
  # Bart is set up so that if padding_idx is specified then offset the embedding ids by 2
255
  # and adjust num_embeddings appropriately. Other models don't have this hack
256
  self.offset = 0
 
261
  )
262
 
263
  self.layers = FlaxBartDecoderLayerCollection(self.config, self.dtype)
264
+ self.layernorm_embedding = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
 
 
 
 
 
 
 
 
 
 
 
 
 
265
 
 
 
266
 
267
+ class FlaxBartModule(FlaxBartModule):
268
+ """
269
+ Edits
270
+ - use custom FlaxBartEncoder & FlaxBartDecoder
271
+ - use separate embeddings for Encoder & Decoder
272
+ """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
273
 
274
  def setup(self):
275
+ encoder_embed_tokens = nn.Embed(
276
+ self.config.encoder_vocab_size,
277
+ self.config.d_model,
278
+ embedding_init=jax.nn.initializers.normal(self.config.init_std),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
279
  )
280
+ decoder_embed_tokens = nn.Embed(
281
+ self.config.image_vocab_size + 1, # image vocab size + 1 for BOS
282
+ self.config.d_model,
283
+ embedding_init=jax.nn.initializers.normal(self.config.init_std),
 
 
 
 
284
  )
285
 
286
+ self.encoder = FlaxBartEncoder(
287
+ self.config, dtype=self.dtype, embed_tokens=encoder_embed_tokens
288
+ )
289
+ self.decoder = FlaxBartDecoder(
290
+ self.config, dtype=self.dtype, embed_tokens=decoder_embed_tokens
 
 
 
 
 
 
291
  )
292
 
293
 
294
+ class FlaxBartPreTrainedModel(FlaxBartPreTrainedModel):
295
+ """
296
+ Edits:
297
+ - added num_params property
298
+ """
 
 
 
 
 
 
 
 
 
 
 
 
299
 
300
  @property
301
  def num_params(self):
 
304
  ).values()
305
  return sum(list(num_params))
306
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
307
 
308
+ class FlaxBartForConditionalGenerationModule(FlaxBartForConditionalGenerationModule):
309
+ """
310
+ Edits:
311
+ - no bias
312
+ - lm_head set to image_vocab_size + 1 (for BOS)
313
+ - uses custom FlaxBartModule
314
+ """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
315
 
316
  def setup(self):
317
+ self.model = FlaxBartModule(config=self.config, dtype=self.dtype)
318
  self.lm_head = nn.Dense(
319
  self.config.image_vocab_size + 1, # image vocab size + 1 for BOS
320
  use_bias=False,
321
  dtype=self.dtype,
322
  kernel_init=jax.nn.initializers.normal(self.config.init_std),
323
  )
 
 
 
 
 
 
 
 
 
324
 
325
  def __call__(
326
  self,
 
330
  decoder_attention_mask,
331
  position_ids,
332
  decoder_position_ids,
333
+ output_attentions: bool = False,
334
+ output_hidden_states: bool = False,
335
+ return_dict: bool = True,
336
  deterministic: bool = True,
337
  ):
338
  outputs = self.model(
 
342
  decoder_attention_mask=decoder_attention_mask,
343
  position_ids=position_ids,
344
  decoder_position_ids=decoder_position_ids,
345
+ output_attentions=output_attentions,
346
+ output_hidden_states=output_hidden_states,
347
+ return_dict=return_dict,
348
  deterministic=deterministic,
349
  )
350
 
 
358
  else:
359
  lm_logits = self.lm_head(hidden_states)
360
 
361
+ if not return_dict:
362
+ output = (lm_logits,) + outputs[1:]
363
+ return output
364
+
365
  return FlaxSeq2SeqLMOutput(
366
  logits=lm_logits,
367
  decoder_hidden_states=outputs.decoder_hidden_states,
 
373
  )
374
 
375
 
376
+ class DalleBart(FlaxBartPreTrainedModel, FlaxBartForConditionalGeneration):
377
+ """
378
+ Edits:
379
+ - renamed from FlaxBartForConditionalGeneration
380
+ - uses custom FlaxBartPreTrainedModel
381
+ - uses custom FlaxBartForConditionalGenerationModule
382
+ - no bias in decode method
383
+ """
384
+
385
+ module_class = FlaxBartForConditionalGenerationModule
386
 
387
  def decode(
388
  self,
 
392
  decoder_attention_mask: Optional[jnp.ndarray] = None,
393
  decoder_position_ids: Optional[jnp.ndarray] = None,
394
  past_key_values: dict = None,
395
+ output_attentions: Optional[bool] = None,
396
+ output_hidden_states: Optional[bool] = None,
397
+ return_dict: Optional[bool] = None,
398
  train: bool = False,
399
  params: dict = None,
400
  dropout_rng: PRNGKey = None,
401
  ):
402
+ output_attentions = (
403
+ output_attentions
404
+ if output_attentions is not None
405
+ else self.config.output_attentions
406
+ )
407
+ output_hidden_states = (
408
+ output_hidden_states
409
+ if output_hidden_states is not None
410
+ else self.config.output_hidden_states
411
+ )
412
+ return_dict = (
413
+ return_dict if return_dict is not None else self.config.return_dict
414
+ )
 
 
 
415
 
 
 
 
416
  encoder_hidden_states = encoder_outputs[0]
417
  if encoder_attention_mask is None:
418
  batch_size, sequence_length = encoder_hidden_states.shape[:2]
 
474
  else:
475
  lm_logits = module.lm_head(hidden_states)
476
 
 
477
  return lm_logits, outputs
478
 
479
  outputs = self.module.apply(
 
483
  decoder_position_ids=jnp.array(decoder_position_ids, dtype="i4"),
484
  encoder_hidden_states=encoder_hidden_states,
485
  encoder_attention_mask=jnp.array(encoder_attention_mask, dtype="i4"),
486
+ output_attentions=output_attentions,
487
+ output_hidden_states=output_hidden_states,
488
+ return_dict=return_dict,
489
  deterministic=not train,
490
  rngs=rngs,
491
  mutable=mutable,
 
497
  else:
498
  (lm_logits, decoder_outputs), past = outputs
499
 
500
+ if return_dict:
501
+ outputs = FlaxCausalLMOutputWithCrossAttentions(
502
+ logits=lm_logits,
503
+ hidden_states=decoder_outputs.hidden_states,
504
+ attentions=decoder_outputs.attentions,
505
+ cross_attentions=decoder_outputs.cross_attentions,
506
+ )
507
+ else:
508
+ outputs = (lm_logits,) + decoder_outputs[1:]
509
 
510
  # add updated cache to model output
511
+ if past_key_values is not None and return_dict:
512
  outputs["past_key_values"] = unfreeze(past["cache"])
513
  return outputs
514
+ elif past_key_values is not None and not return_dict:
515
+ outputs = outputs[:1] + (unfreeze(past["cache"]),) + outputs[1:]
516
 
517
  return outputs