Text Generation
Transformers
PyTorch
English
llama
custom_code
text-generation-inference
Inference Endpoints
omkarthawakar commited on
Commit
07a3f4b
1 Parent(s): 3939a19

Delete modeling_mobillama.py

Browse files
Files changed (1) hide show
  1. modeling_mobillama.py +0 -898
modeling_mobillama.py DELETED
@@ -1,898 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
3
- #
4
- # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
5
- # and OPT implementations in this library. It has been modified from its
6
- # original forms to accommodate minor architectural differences compared
7
- # to GPT-NeoX and OPT used by the Meta AI team that trained the model.
8
- #
9
- # Licensed under the Apache License, Version 2.0 (the "License");
10
- # you may not use this file except in compliance with the License.
11
- # You may obtain a copy of the License at
12
- #
13
- # http://www.apache.org/licenses/LICENSE-2.0
14
- #
15
- # Unless required by applicable law or agreed to in writing, software
16
- # distributed under the License is distributed on an "AS IS" BASIS,
17
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18
- # See the License for the specific language governing permissions and
19
- # limitations under the License.
20
- """ PyTorch LLaMA model."""
21
- import math
22
- from typing import List, Optional, Tuple, Union
23
-
24
- import torch
25
- import torch.utils.checkpoint
26
- from torch import nn
27
- from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
28
-
29
- from transformers.activations import ACT2FN
30
- from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, SequenceClassifierOutputWithPast
31
- from transformers.modeling_utils import PreTrainedModel
32
- from transformers.utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
33
- # from transformers.models.llama.configuration_llama import LlamaConfig
34
- from .configuration_llama import LlamaConfig
35
-
36
- from flash_attn import flash_attn_func
37
-
38
-
39
- logger = logging.get_logger(__name__)
40
-
41
- _CONFIG_FOR_DOC = "LlamaConfig"
42
-
43
-
44
- # Copied from transformers.models.bart.modeling_bart._make_causal_mask
45
- def _make_causal_mask(
46
- input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0
47
- ):
48
- """
49
- Make causal mask used for bi-directional self-attention.
50
- """
51
- bsz, tgt_len = input_ids_shape
52
- mask = torch.full((tgt_len, tgt_len), torch.tensor(torch.finfo(dtype).min, device=device), device=device)
53
- mask_cond = torch.arange(mask.size(-1), device=device)
54
- mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
55
- mask = mask.to(dtype)
56
-
57
- if past_key_values_length > 0:
58
- mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1)
59
- return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length)
60
-
61
-
62
- # Copied from transformers.models.bart.modeling_bart._expand_mask
63
- def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
64
- """
65
- Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
66
- """
67
- bsz, src_len = mask.size()
68
- tgt_len = tgt_len if tgt_len is not None else src_len
69
-
70
- expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)
71
-
72
- inverted_mask = 1.0 - expanded_mask
73
-
74
- return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min)
75
-
76
-
77
- class LlamaRMSNorm(nn.Module):
78
- def __init__(self, hidden_size, eps=1e-6):
79
- """
80
- LlamaRMSNorm is equivalent to T5LayerNorm
81
- """
82
- super().__init__()
83
- self.weight = nn.Parameter(torch.ones(hidden_size))
84
- self.variance_epsilon = eps
85
-
86
- def forward(self, hidden_states):
87
- input_dtype = hidden_states.dtype
88
- variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True)
89
- hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
90
-
91
- return (self.weight * hidden_states).to(input_dtype)
92
-
93
-
94
- class LlamaRotaryEmbedding(torch.nn.Module):
95
- def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
96
- super().__init__()
97
- inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2).float().to(device) / dim))
98
- self.register_buffer("inv_freq", inv_freq)
99
-
100
- # Build here to make `torch.jit.trace` work.
101
- self.max_seq_len_cached = max_position_embeddings
102
- t = torch.arange(self.max_seq_len_cached, device=self.inv_freq.device, dtype=self.inv_freq.dtype)
103
- freqs = torch.einsum("i,j->ij", t, self.inv_freq)
104
- # Different from paper, but it uses a different permutation in order to obtain the same calculation
105
- emb = torch.cat((freqs, freqs), dim=-1)
106
- self.register_buffer("cos_cached", emb.cos()[None, None, :, :], persistent=False)
107
- self.register_buffer("sin_cached", emb.sin()[None, None, :, :], persistent=False)
108
-
109
- def forward(self, x, seq_len=None):
110
- # x: [bs, num_attention_heads, seq_len, head_size]
111
- # This `if` block is unlikely to be run after we build sin/cos in `__init__`. Keep the logic here just in case.
112
- if seq_len > self.max_seq_len_cached:
113
- self.max_seq_len_cached = seq_len
114
- t = torch.arange(self.max_seq_len_cached, device=x.device, dtype=self.inv_freq.dtype)
115
- freqs = torch.einsum("i,j->ij", t, self.inv_freq)
116
- # Different from paper, but it uses a different permutation in order to obtain the same calculation
117
- emb = torch.cat((freqs, freqs), dim=-1).to(x.device)
118
- self.register_buffer("cos_cached", emb.cos()[None, None, :, :], persistent=False)
119
- self.register_buffer("sin_cached", emb.sin()[None, None, :, :], persistent=False)
120
- return (
121
- self.cos_cached[:, :, :seq_len, ...].to(dtype=x.dtype),
122
- self.sin_cached[:, :, :seq_len, ...].to(dtype=x.dtype),
123
- )
124
-
125
-
126
- def rotate_half(x):
127
- """Rotates half the hidden dims of the input."""
128
- x1 = x[..., : x.shape[-1] // 2]
129
- x2 = x[..., x.shape[-1] // 2 :]
130
- return torch.cat((-x2, x1), dim=-1)
131
-
132
-
133
- def apply_rotary_pos_emb(q, k, cos, sin, position_ids):
134
- # The first two dimensions of cos and sin are always 1, so we can `squeeze` them.
135
- cos = cos.squeeze(1).squeeze(0) # [seq_len, dim]
136
- sin = sin.squeeze(1).squeeze(0) # [seq_len, dim]
137
- cos = cos[position_ids].unsqueeze(1) # [bs, 1, seq_len, dim]
138
- sin = sin[position_ids].unsqueeze(1) # [bs, 1, seq_len, dim]
139
- q_embed = (q * cos) + (rotate_half(q) * sin)
140
- k_embed = (k * cos) + (rotate_half(k) * sin)
141
- return q_embed, k_embed
142
-
143
-
144
- class LlamaMLP(nn.Module):
145
- def __init__(
146
- self,
147
- hidden_size: int,
148
- intermediate_size: int,
149
- hidden_act: str,
150
- ):
151
- super().__init__()
152
- self.gate_proj = nn.Linear(hidden_size, intermediate_size, bias=False)
153
- self.down_proj = nn.Linear(intermediate_size, hidden_size, bias=False)
154
- self.up_proj = nn.Linear(hidden_size, intermediate_size, bias=False)
155
- self.act_fn = ACT2FN[hidden_act]
156
-
157
- def forward(self, x):
158
- return self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
159
-
160
-
161
- class LlamaAttention(nn.Module):
162
- """Multi-headed attention from 'Attention Is All You Need' paper"""
163
-
164
- def __init__(self, config: LlamaConfig):
165
- super().__init__()
166
- self.config = config
167
- self.hidden_size = config.hidden_size
168
- self.num_heads = config.num_attention_heads
169
- self.head_dim = self.hidden_size // self.num_heads
170
- self.max_position_embeddings = config.max_position_embeddings
171
-
172
- if (self.head_dim * self.num_heads) != self.hidden_size:
173
- raise ValueError(
174
- f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}"
175
- f" and `num_heads`: {self.num_heads})."
176
- )
177
- self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
178
- self.k_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
179
- self.v_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
180
- self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
181
- self.rotary_emb = LlamaRotaryEmbedding(self.head_dim, max_position_embeddings=self.max_position_embeddings)
182
-
183
- def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
184
- return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
185
-
186
- def forward(
187
- self,
188
- hidden_states: torch.Tensor,
189
- attention_mask: Optional[torch.Tensor] = None,
190
- position_ids: Optional[torch.LongTensor] = None,
191
- past_key_value: Optional[Tuple[torch.Tensor]] = None,
192
- output_attentions: bool = False,
193
- use_cache: bool = False,
194
- ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
195
- bsz, q_len, _ = hidden_states.size()
196
-
197
- query_states = self.q_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
198
- key_states = self.k_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
199
- value_states = self.v_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
200
-
201
- kv_seq_len = key_states.shape[-2]
202
- if past_key_value is not None:
203
- kv_seq_len += past_key_value[0].shape[-2]
204
- cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
205
- query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
206
- # [bsz, nh, t, hd]
207
-
208
- if past_key_value is not None:
209
- # reuse k, v, self_attention
210
- key_states = torch.cat([past_key_value[0], key_states], dim=2)
211
- value_states = torch.cat([past_key_value[1], value_states], dim=2)
212
-
213
- past_key_value = (key_states, value_states) if use_cache else None
214
-
215
- # attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
216
- #
217
- # if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len):
218
- # raise ValueError(
219
- # f"Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is"
220
- # f" {attn_weights.size()}"
221
- # )
222
- #
223
- # if attention_mask is not None:
224
- # if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
225
- # raise ValueError(
226
- # f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}"
227
- # )
228
- # attn_weights = attn_weights + attention_mask
229
- # attn_weights = torch.max(
230
- # attn_weights, torch.tensor(torch.finfo(attn_weights.dtype).min, device=attn_weights.device)
231
- # )
232
- #
233
- # # upcast attention to fp32
234
- # attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
235
- # attn_output = torch.matmul(attn_weights, value_states)
236
- #
237
- # if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
238
- # raise ValueError(
239
- # f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
240
- # f" {attn_output.size()}"
241
- # )
242
- #
243
- # attn_output = attn_output.transpose(1, 2)
244
-
245
- attn_output = flash_attn_func(
246
- q=query_states.transpose(1, 2).to(torch.bfloat16),
247
- k=key_states.transpose(1, 2).to(torch.bfloat16),
248
- v=value_states.transpose(1, 2).to(torch.bfloat16),
249
- causal=True)
250
- attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
251
- attn_output = attn_output.to(query_states.dtype)
252
-
253
- attn_output = self.o_proj(attn_output)
254
-
255
- # if not output_attentions:
256
- # attn_weights = None
257
- assert not output_attentions
258
- attn_weights = None
259
-
260
- return attn_output, attn_weights, past_key_value
261
-
262
-
263
- class LlamaDecoderLayer(nn.Module):
264
- def __init__(self, config: LlamaConfig, mlp):
265
- super().__init__()
266
- self.hidden_size = config.hidden_size
267
- self.self_attn = LlamaAttention(config=config)
268
- self.mlp = mlp #LlamaMLP(hidden_size=self.hidden_size,intermediate_size=config.intermediate_size,hidden_act=config.hidden_act,)
269
- self.input_layernorm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
270
- self.post_attention_layernorm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
271
-
272
- def forward(
273
- self,
274
- hidden_states: torch.Tensor,
275
- attention_mask: Optional[torch.Tensor] = None,
276
- position_ids: Optional[torch.LongTensor] = None,
277
- past_key_value: Optional[Tuple[torch.Tensor]] = None,
278
- output_attentions: Optional[bool] = False,
279
- use_cache: Optional[bool] = False,
280
- ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
281
- """
282
- Args:
283
- hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
284
- attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
285
- `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
286
- output_attentions (`bool`, *optional*):
287
- Whether or not to return the attentions tensors of all attention layers. See `attentions` under
288
- returned tensors for more detail.
289
- use_cache (`bool`, *optional*):
290
- If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
291
- (see `past_key_values`).
292
- past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
293
- """
294
-
295
- residual = hidden_states
296
-
297
- hidden_states = self.input_layernorm(hidden_states)
298
-
299
- # Self Attention
300
- hidden_states, self_attn_weights, present_key_value = self.self_attn(
301
- hidden_states=hidden_states,
302
- attention_mask=attention_mask,
303
- position_ids=position_ids,
304
- past_key_value=past_key_value,
305
- output_attentions=output_attentions,
306
- use_cache=use_cache,
307
- )
308
- hidden_states = residual + hidden_states
309
-
310
- # Fully Connected
311
- residual = hidden_states
312
- hidden_states = self.post_attention_layernorm(hidden_states)
313
- hidden_states = self.mlp(hidden_states)
314
- hidden_states = residual + hidden_states
315
-
316
- outputs = (hidden_states,)
317
-
318
- if output_attentions:
319
- outputs += (self_attn_weights,)
320
-
321
- if use_cache:
322
- outputs += (present_key_value,)
323
-
324
- return outputs
325
-
326
-
327
- LLAMA_START_DOCSTRING = r"""
328
- This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
329
- library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
330
- etc.)
331
-
332
- This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
333
- Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
334
- and behavior.
335
-
336
- Parameters:
337
- config ([`LlamaConfig`]):
338
- Model configuration class with all the parameters of the model. Initializing with a config file does not
339
- load the weights associated with the model, only the configuration. Check out the
340
- [`~PreTrainedModel.from_pretrained`] method to load the model weights.
341
- """
342
-
343
-
344
- @add_start_docstrings(
345
- "The bare LLaMA Model outputting raw hidden-states without any specific head on top.",
346
- LLAMA_START_DOCSTRING,
347
- )
348
- class LlamaPreTrainedModel(PreTrainedModel):
349
- config_class = LlamaConfig
350
- base_model_prefix = "model"
351
- supports_gradient_checkpointing = True
352
- _no_split_modules = ["LlamaDecoderLayer"]
353
- _skip_keys_device_placement = "past_key_values"
354
- _keys_to_ignore_on_load_unexpected = [r"decoder\.version"]
355
-
356
- def _init_weights(self, module):
357
- std = self.config.initializer_range
358
- if isinstance(module, nn.Linear):
359
- module.weight.data.normal_(mean=0.0, std=std)
360
- if module.bias is not None:
361
- module.bias.data.zero_()
362
- elif isinstance(module, nn.Embedding):
363
- module.weight.data.normal_(mean=0.0, std=std)
364
- if module.padding_idx is not None:
365
- module.weight.data[module.padding_idx].zero_()
366
-
367
- def _set_gradient_checkpointing(self, module, value=False):
368
- if isinstance(module, LlamaModel):
369
- module.gradient_checkpointing = value
370
-
371
-
372
- LLAMA_INPUTS_DOCSTRING = r"""
373
- Args:
374
- input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
375
- Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
376
- it.
377
-
378
- Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
379
- [`PreTrainedTokenizer.__call__`] for details.
380
-
381
- [What are input IDs?](../glossary#input-ids)
382
- attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
383
- Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
384
-
385
- - 1 for tokens that are **not masked**,
386
- - 0 for tokens that are **masked**.
387
-
388
- [What are attention masks?](../glossary#attention-mask)
389
-
390
- Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
391
- [`PreTrainedTokenizer.__call__`] for details.
392
-
393
- If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
394
- `past_key_values`).
395
-
396
- If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
397
- and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
398
- information on the default strategy.
399
-
400
- - 1 indicates the head is **not masked**,
401
- - 0 indicates the head is **masked**.
402
- position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
403
- Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
404
- config.n_positions - 1]`.
405
-
406
- [What are position IDs?](../glossary#position-ids)
407
- past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
408
- Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
409
- `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
410
- `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
411
-
412
- Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
413
- blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
414
-
415
- If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
416
- don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
417
- `decoder_input_ids` of shape `(batch_size, sequence_length)`.
418
- inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
419
- Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
420
- is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
421
- model's internal embedding lookup matrix.
422
- use_cache (`bool`, *optional*):
423
- If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
424
- `past_key_values`).
425
- output_attentions (`bool`, *optional*):
426
- Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
427
- tensors for more detail.
428
- output_hidden_states (`bool`, *optional*):
429
- Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
430
- more detail.
431
- return_dict (`bool`, *optional*):
432
- Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
433
- """
434
-
435
-
436
- @add_start_docstrings(
437
- "The bare LLaMA Model outputting raw hidden-states without any specific head on top.",
438
- LLAMA_START_DOCSTRING,
439
- )
440
- class LlamaModel(LlamaPreTrainedModel):
441
- """
442
- Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`LlamaDecoderLayer`]
443
-
444
- Args:
445
- config: LlamaConfig
446
- """
447
-
448
- def __init__(self, config: LlamaConfig):
449
- super().__init__(config)
450
- self.padding_idx = config.pad_token_id
451
- self.vocab_size = config.vocab_size
452
-
453
- self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
454
- mlp = LlamaMLP(
455
- hidden_size=config.hidden_size,
456
- intermediate_size=config.intermediate_size,
457
- hidden_act=config.hidden_act,
458
- )
459
- self.layers = nn.ModuleList([LlamaDecoderLayer(config, mlp) for _ in range(config.num_hidden_layers)])
460
- self.norm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
461
-
462
- self.gradient_checkpointing = False
463
- # Initialize weights and apply final processing
464
- self.post_init()
465
-
466
- def get_input_embeddings(self):
467
- return self.embed_tokens
468
-
469
- def set_input_embeddings(self, value):
470
- self.embed_tokens = value
471
-
472
- # Copied from transformers.models.bart.modeling_bart.BartDecoder._prepare_decoder_attention_mask
473
- def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length):
474
- # create causal mask
475
- # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
476
- combined_attention_mask = None
477
- if input_shape[-1] > 1:
478
- combined_attention_mask = _make_causal_mask(
479
- input_shape,
480
- inputs_embeds.dtype,
481
- device=inputs_embeds.device,
482
- past_key_values_length=past_key_values_length,
483
- )
484
-
485
- if attention_mask is not None:
486
- # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
487
- expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to(
488
- inputs_embeds.device
489
- )
490
- combined_attention_mask = (
491
- expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask
492
- )
493
-
494
- return combined_attention_mask
495
-
496
- @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING)
497
- def forward(
498
- self,
499
- input_ids: torch.LongTensor = None,
500
- attention_mask: Optional[torch.Tensor] = None,
501
- position_ids: Optional[torch.LongTensor] = None,
502
- past_key_values: Optional[List[torch.FloatTensor]] = None,
503
- inputs_embeds: Optional[torch.FloatTensor] = None,
504
- use_cache: Optional[bool] = None,
505
- output_attentions: Optional[bool] = None,
506
- output_hidden_states: Optional[bool] = None,
507
- return_dict: Optional[bool] = None,
508
- ) -> Union[Tuple, BaseModelOutputWithPast]:
509
- output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
510
- output_hidden_states = (
511
- output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
512
- )
513
- use_cache = use_cache if use_cache is not None else self.config.use_cache
514
-
515
- return_dict = return_dict if return_dict is not None else self.config.use_return_dict
516
-
517
- # retrieve input_ids and inputs_embeds
518
- if input_ids is not None and inputs_embeds is not None:
519
- raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
520
- elif input_ids is not None:
521
- batch_size, seq_length = input_ids.shape
522
- elif inputs_embeds is not None:
523
- batch_size, seq_length, _ = inputs_embeds.shape
524
- else:
525
- raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
526
-
527
- seq_length_with_past = seq_length
528
- past_key_values_length = 0
529
-
530
- if past_key_values is not None:
531
- past_key_values_length = past_key_values[0][0].shape[2]
532
- seq_length_with_past = seq_length_with_past + past_key_values_length
533
-
534
- if position_ids is None:
535
- device = input_ids.device if input_ids is not None else inputs_embeds.device
536
- position_ids = torch.arange(
537
- past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device
538
- )
539
- position_ids = position_ids.unsqueeze(0).view(-1, seq_length)
540
- else:
541
- position_ids = position_ids.view(-1, seq_length).long()
542
-
543
- if inputs_embeds is None:
544
- inputs_embeds = self.embed_tokens(input_ids)
545
- # embed positions
546
- if attention_mask is None:
547
- attention_mask = torch.ones(
548
- (batch_size, seq_length_with_past), dtype=torch.bool, device=inputs_embeds.device
549
- )
550
- attention_mask = self._prepare_decoder_attention_mask(
551
- attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length
552
- )
553
-
554
- hidden_states = inputs_embeds
555
-
556
- if self.gradient_checkpointing and self.training:
557
- if use_cache:
558
- logger.warning_once(
559
- "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
560
- )
561
- use_cache = False
562
-
563
- # decoder layers
564
- all_hidden_states = () if output_hidden_states else None
565
- all_self_attns = () if output_attentions else None
566
- next_decoder_cache = () if use_cache else None
567
-
568
- for idx, decoder_layer in enumerate(self.layers):
569
- if output_hidden_states:
570
- all_hidden_states += (hidden_states,)
571
-
572
- past_key_value = past_key_values[idx] if past_key_values is not None else None
573
-
574
- if self.gradient_checkpointing and self.training:
575
-
576
- def create_custom_forward(module):
577
- def custom_forward(*inputs):
578
- # None for past_key_value
579
- return module(*inputs, output_attentions, None)
580
-
581
- return custom_forward
582
-
583
- layer_outputs = torch.utils.checkpoint.checkpoint(
584
- create_custom_forward(decoder_layer),
585
- hidden_states,
586
- attention_mask,
587
- position_ids,
588
- None,
589
- )
590
- else:
591
- layer_outputs = decoder_layer(
592
- hidden_states,
593
- attention_mask=attention_mask,
594
- position_ids=position_ids,
595
- past_key_value=past_key_value,
596
- output_attentions=output_attentions,
597
- use_cache=use_cache,
598
- )
599
-
600
- hidden_states = layer_outputs[0]
601
-
602
- if use_cache:
603
- next_decoder_cache += (layer_outputs[2 if output_attentions else 1],)
604
-
605
- if output_attentions:
606
- all_self_attns += (layer_outputs[1],)
607
-
608
- hidden_states = self.norm(hidden_states)
609
-
610
- # add hidden states from the last decoder layer
611
- if output_hidden_states:
612
- all_hidden_states += (hidden_states,)
613
-
614
- next_cache = next_decoder_cache if use_cache else None
615
- if not return_dict:
616
- return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)
617
- return BaseModelOutputWithPast(
618
- last_hidden_state=hidden_states,
619
- past_key_values=next_cache,
620
- hidden_states=all_hidden_states,
621
- attentions=all_self_attns,
622
- )
623
-
624
-
625
- class LlamaForCausalLM(LlamaPreTrainedModel):
626
- def __init__(self, config):
627
- super().__init__(config)
628
- self.model = LlamaModel(config)
629
-
630
- self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
631
-
632
- # Initialize weights and apply final processing
633
- self.post_init()
634
-
635
- def get_input_embeddings(self):
636
- return self.model.embed_tokens
637
-
638
- def set_input_embeddings(self, value):
639
- self.model.embed_tokens = value
640
-
641
- def get_output_embeddings(self):
642
- return self.lm_head
643
-
644
- def set_output_embeddings(self, new_embeddings):
645
- self.lm_head = new_embeddings
646
-
647
- def set_decoder(self, decoder):
648
- self.model = decoder
649
-
650
- def get_decoder(self):
651
- return self.model
652
-
653
- @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING)
654
- @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
655
- def forward(
656
- self,
657
- input_ids: torch.LongTensor = None,
658
- attention_mask: Optional[torch.Tensor] = None,
659
- position_ids: Optional[torch.LongTensor] = None,
660
- past_key_values: Optional[List[torch.FloatTensor]] = None,
661
- inputs_embeds: Optional[torch.FloatTensor] = None,
662
- labels: Optional[torch.LongTensor] = None,
663
- use_cache: Optional[bool] = None,
664
- output_attentions: Optional[bool] = None,
665
- output_hidden_states: Optional[bool] = None,
666
- return_dict: Optional[bool] = None,
667
- ) -> Union[Tuple, CausalLMOutputWithPast]:
668
- r"""
669
- Args:
670
- labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
671
- Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
672
- config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
673
- (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
674
-
675
- Returns:
676
-
677
- Example:
678
-
679
- ```python
680
- >>> from transformers import AutoTokenizer, LlamaForCausalLM
681
-
682
- >>> model = LlamaForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS)
683
- >>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER)
684
-
685
- >>> prompt = "Hey, are you consciours? Can you talk to me?"
686
- >>> inputs = tokenizer(prompt, return_tensors="pt")
687
-
688
- >>> # Generate
689
- >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
690
- >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
691
- "Hey, are you consciours? Can you talk to me?\nI'm not consciours, but I can talk to you."
692
- ```"""
693
-
694
- output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
695
- output_hidden_states = (
696
- output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
697
- )
698
- return_dict = return_dict if return_dict is not None else self.config.use_return_dict
699
-
700
- # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
701
- outputs = self.model(
702
- input_ids=input_ids,
703
- attention_mask=attention_mask,
704
- position_ids=position_ids,
705
- past_key_values=past_key_values,
706
- inputs_embeds=inputs_embeds,
707
- use_cache=use_cache,
708
- output_attentions=output_attentions,
709
- output_hidden_states=output_hidden_states,
710
- return_dict=return_dict,
711
- )
712
-
713
- hidden_states = outputs[0]
714
- logits = self.lm_head(hidden_states)
715
-
716
- loss = None
717
- if labels is not None:
718
- # Shift so that tokens < n predict n
719
- shift_logits = logits[..., :-1, :].contiguous()
720
- shift_labels = labels[..., 1:].contiguous()
721
- # Flatten the tokens
722
- loss_fct = CrossEntropyLoss()
723
- shift_logits = shift_logits.view(-1, self.config.vocab_size)
724
- shift_labels = shift_labels.view(-1)
725
- # Enable model parallelism
726
- shift_labels = shift_labels.to(shift_logits.device)
727
- loss = loss_fct(shift_logits, shift_labels)
728
-
729
- if not return_dict:
730
- output = (logits,) + outputs[1:]
731
- return (loss,) + output if loss is not None else output
732
-
733
- return CausalLMOutputWithPast(
734
- loss=loss,
735
- logits=logits,
736
- past_key_values=outputs.past_key_values,
737
- hidden_states=outputs.hidden_states,
738
- attentions=outputs.attentions,
739
- )
740
-
741
- def prepare_inputs_for_generation(
742
- self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs
743
- ):
744
- if past_key_values:
745
- input_ids = input_ids[:, -1:]
746
-
747
- position_ids = kwargs.get("position_ids", None)
748
- if attention_mask is not None and position_ids is None:
749
- # create position_ids on the fly for batch generation
750
- position_ids = attention_mask.long().cumsum(-1) - 1
751
- position_ids.masked_fill_(attention_mask == 0, 1)
752
- if past_key_values:
753
- position_ids = position_ids[:, -1].unsqueeze(-1)
754
-
755
- # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
756
- if inputs_embeds is not None and past_key_values is None:
757
- model_inputs = {"inputs_embeds": inputs_embeds}
758
- else:
759
- model_inputs = {"input_ids": input_ids}
760
-
761
- model_inputs.update(
762
- {
763
- "position_ids": position_ids,
764
- "past_key_values": past_key_values,
765
- "use_cache": kwargs.get("use_cache"),
766
- "attention_mask": attention_mask,
767
- }
768
- )
769
- return model_inputs
770
-
771
- @staticmethod
772
- def _reorder_cache(past_key_values, beam_idx):
773
- reordered_past = ()
774
- for layer_past in past_key_values:
775
- reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),)
776
- return reordered_past
777
-
778
-
779
- @add_start_docstrings(
780
- """
781
- The LLaMa Model transformer with a sequence classification head on top (linear layer).
782
-
783
- [`LlamaForSequenceClassification`] uses the last token in order to do the classification, as other causal models
784
- (e.g. GPT-2) do.
785
-
786
- Since it does classification on the last token, it requires to know the position of the last token. If a
787
- `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
788
- no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
789
- padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
790
- each row of the batch).
791
- """,
792
- LLAMA_START_DOCSTRING,
793
- )
794
- class LlamaForSequenceClassification(LlamaPreTrainedModel):
795
- _keys_to_ignore_on_load_missing = [r"lm_head.weight"]
796
-
797
- def __init__(self, config):
798
- super().__init__(config)
799
- self.num_labels = config.num_labels
800
- self.model = LlamaModel(config)
801
- self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
802
-
803
- # Initialize weights and apply final processing
804
- self.post_init()
805
-
806
- def get_input_embeddings(self):
807
- return self.model.embed_tokens
808
-
809
- def set_input_embeddings(self, value):
810
- self.model.embed_tokens = value
811
-
812
- @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING)
813
- def forward(
814
- self,
815
- input_ids: torch.LongTensor = None,
816
- attention_mask: Optional[torch.Tensor] = None,
817
- position_ids: Optional[torch.LongTensor] = None,
818
- past_key_values: Optional[List[torch.FloatTensor]] = None,
819
- inputs_embeds: Optional[torch.FloatTensor] = None,
820
- labels: Optional[torch.LongTensor] = None,
821
- use_cache: Optional[bool] = None,
822
- output_attentions: Optional[bool] = None,
823
- output_hidden_states: Optional[bool] = None,
824
- return_dict: Optional[bool] = None,
825
- ) -> Union[Tuple, SequenceClassifierOutputWithPast]:
826
- r"""
827
- labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
828
- Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
829
- config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
830
- `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
831
- """
832
- return_dict = return_dict if return_dict is not None else self.config.use_return_dict
833
-
834
- transformer_outputs = self.model(
835
- input_ids,
836
- attention_mask=attention_mask,
837
- position_ids=position_ids,
838
- past_key_values=past_key_values,
839
- inputs_embeds=inputs_embeds,
840
- use_cache=use_cache,
841
- output_attentions=output_attentions,
842
- output_hidden_states=output_hidden_states,
843
- return_dict=return_dict,
844
- )
845
- hidden_states = transformer_outputs[0]
846
- logits = self.score(hidden_states)
847
-
848
- if input_ids is not None:
849
- batch_size = input_ids.shape[0]
850
- else:
851
- batch_size = inputs_embeds.shape[0]
852
-
853
- if self.config.pad_token_id is None and batch_size != 1:
854
- raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
855
- if self.config.pad_token_id is None:
856
- sequence_lengths = -1
857
- else:
858
- if input_ids is not None:
859
- sequence_lengths = (torch.ne(input_ids, self.config.pad_token_id).sum(-1) - 1).to(logits.device)
860
- else:
861
- sequence_lengths = -1
862
-
863
- pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]
864
-
865
- loss = None
866
- if labels is not None:
867
- labels = labels.to(logits.device)
868
- if self.config.problem_type is None:
869
- if self.num_labels == 1:
870
- self.config.problem_type = "regression"
871
- elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
872
- self.config.problem_type = "single_label_classification"
873
- else:
874
- self.config.problem_type = "multi_label_classification"
875
-
876
- if self.config.problem_type == "regression":
877
- loss_fct = MSELoss()
878
- if self.num_labels == 1:
879
- loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
880
- else:
881
- loss = loss_fct(pooled_logits, labels)
882
- elif self.config.problem_type == "single_label_classification":
883
- loss_fct = CrossEntropyLoss()
884
- loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
885
- elif self.config.problem_type == "multi_label_classification":
886
- loss_fct = BCEWithLogitsLoss()
887
- loss = loss_fct(pooled_logits, labels)
888
- if not return_dict:
889
- output = (pooled_logits,) + transformer_outputs[1:]
890
- return ((loss,) + output) if loss is not None else output
891
-
892
- return SequenceClassifierOutputWithPast(
893
- loss=loss,
894
- logits=pooled_logits,
895
- past_key_values=transformer_outputs.past_key_values,
896
- hidden_states=transformer_outputs.hidden_states,
897
- attentions=transformer_outputs.attentions,
898
- )