Yukang commited on
Commit
3f0fc83
1 Parent(s): 2960532

Delete modeling_llama_6_onlylocal_flashattn.py

Browse files
modeling_llama_6_onlylocal_flashattn.py DELETED
@@ -1,1029 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
3
- #
4
- # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
5
- # and OPT implementations in this library. It has been modified from its
6
- # original forms to accommodate minor architectural differences compared
7
- # to GPT-NeoX and OPT used by the Meta AI team that trained the model.
8
- #
9
- # Licensed under the Apache License, Version 2.0 (the "License");
10
- # you may not use this file except in compliance with the License.
11
- # You may obtain a copy of the License at
12
- #
13
- # http://www.apache.org/licenses/LICENSE-2.0
14
- #
15
- # Unless required by applicable law or agreed to in writing, software
16
- # distributed under the License is distributed on an "AS IS" BASIS,
17
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18
- # See the License for the specific language governing permissions and
19
- # limitations under the License.
20
- """ PyTorch LLaMA model."""
21
- import math
22
- from typing import List, Optional, Tuple, Union
23
-
24
- import torch
25
- import torch.utils.checkpoint
26
- from torch import nn
27
- from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
28
-
29
- from transformers.activations import ACT2FN
30
- from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, SequenceClassifierOutputWithPast
31
- from transformers.modeling_utils import PreTrainedModel
32
- from transformers.utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
33
- from configuration_llama import LlamaConfig
34
- from matmul_mask_2 import matmul_with_mask
35
-
36
- from flash_attn.flash_attn_interface import flash_attn_varlen_qkvpacked_func
37
- from flash_attn.bert_padding import unpad_input, pad_input
38
- from einops import rearrange
39
-
40
- logger = logging.get_logger(__name__)
41
-
42
- _CONFIG_FOR_DOC = "LlamaConfig"
43
-
44
-
45
- # Copied from transformers.models.bart.modeling_bart._make_causal_mask
46
- def _make_causal_mask(
47
- input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0
48
- ):
49
- """
50
- Make causal mask used for bi-directional self-attention.
51
- """
52
- bsz, tgt_len = input_ids_shape
53
- mask = torch.full((tgt_len, tgt_len), torch.tensor(torch.finfo(dtype).min, device=device), device=device)
54
- mask_cond = torch.arange(mask.size(-1), device=device)
55
- mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
56
- mask = mask.to(dtype)
57
-
58
- if past_key_values_length > 0:
59
- mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1)
60
- return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length)
61
-
62
-
63
- # Copied from transformers.models.bart.modeling_bart._expand_mask
64
- def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
65
- """
66
- Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
67
- """
68
- bsz, src_len = mask.size()
69
- tgt_len = tgt_len if tgt_len is not None else src_len
70
-
71
- expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)
72
-
73
- inverted_mask = 1.0 - expanded_mask
74
-
75
- return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min)
76
-
77
-
78
- class LlamaRMSNorm(nn.Module):
79
- def __init__(self, hidden_size, eps=1e-6):
80
- """
81
- LlamaRMSNorm is equivalent to T5LayerNorm
82
- """
83
- super().__init__()
84
- self.weight = nn.Parameter(torch.ones(hidden_size))
85
- self.variance_epsilon = eps
86
-
87
- def forward(self, hidden_states):
88
- variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True)
89
- hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
90
-
91
- # convert into half-precision if necessary
92
- if self.weight.dtype in [torch.float16, torch.bfloat16]:
93
- hidden_states = hidden_states.to(self.weight.dtype)
94
-
95
- return self.weight * hidden_states
96
-
97
-
98
- class LlamaRotaryEmbedding(torch.nn.Module):
99
- def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
100
- super().__init__()
101
-
102
- max_position_embeddings = 8192 * 2
103
- #a = 16 #9 #Alpha value
104
- #base = base * a ** (dim / (dim-2)) #Base change formula
105
-
106
- inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2).float().to(device) / dim))
107
- self.register_buffer("inv_freq", inv_freq)
108
-
109
- #max_position_embeddings = 8192 #4096
110
-
111
- # Build here to make `torch.jit.trace` work.
112
- self.max_seq_len_cached = max_position_embeddings
113
- t = torch.arange(self.max_seq_len_cached, device=self.inv_freq.device, dtype=self.inv_freq.dtype)
114
- self.scale = 1 / 8
115
- t *= self.scale
116
-
117
- freqs = torch.einsum("i,j->ij", t, self.inv_freq)
118
- # Different from paper, but it uses a different permutation in order to obtain the same calculation
119
- emb = torch.cat((freqs, freqs), dim=-1)
120
- self.register_buffer("cos_cached", emb.cos()[None, None, :, :], persistent=False)
121
- self.register_buffer("sin_cached", emb.sin()[None, None, :, :], persistent=False)
122
-
123
- def forward(self, x, seq_len=None):
124
- # x: [bs, num_attention_heads, seq_len, head_size]
125
- # This `if` block is unlikely to be run after we build sin/cos in `__init__`. Keep the logic here just in case.
126
- if seq_len > self.max_seq_len_cached:
127
- self.max_seq_len_cached = seq_len
128
- t = torch.arange(self.max_seq_len_cached, device=x.device, dtype=self.inv_freq.dtype)
129
- freqs = torch.einsum("i,j->ij", t, self.inv_freq)
130
- # Different from paper, but it uses a different permutation in order to obtain the same calculation
131
- emb = torch.cat((freqs, freqs), dim=-1).to(x.device)
132
- self.register_buffer("cos_cached", emb.cos()[None, None, :, :], persistent=False)
133
- self.register_buffer("sin_cached", emb.sin()[None, None, :, :], persistent=False)
134
- return (
135
- self.cos_cached[:, :, :seq_len, ...].to(dtype=x.dtype),
136
- self.sin_cached[:, :, :seq_len, ...].to(dtype=x.dtype),
137
- )
138
-
139
-
140
- def rotate_half(x):
141
- """Rotates half the hidden dims of the input."""
142
- x1 = x[..., : x.shape[-1] // 2]
143
- x2 = x[..., x.shape[-1] // 2 :]
144
- return torch.cat((-x2, x1), dim=-1)
145
-
146
-
147
- def apply_rotary_pos_emb(q, k, cos, sin, position_ids):
148
- # The first two dimensions of cos and sin are always 1, so we can `squeeze` them.
149
- cos = cos.squeeze(1).squeeze(0) # [seq_len, dim]
150
- sin = sin.squeeze(1).squeeze(0) # [seq_len, dim]
151
- cos = cos[position_ids].unsqueeze(1) # [bs, 1, seq_len, dim]
152
- sin = sin[position_ids].unsqueeze(1) # [bs, 1, seq_len, dim]
153
- q_embed = (q * cos) + (rotate_half(q) * sin)
154
- k_embed = (k * cos) + (rotate_half(k) * sin)
155
- return q_embed, k_embed
156
-
157
-
158
- class LlamaMLP(nn.Module):
159
- def __init__(
160
- self,
161
- hidden_size: int,
162
- intermediate_size: int,
163
- hidden_act: str,
164
- ):
165
- super().__init__()
166
- self.gate_proj = nn.Linear(hidden_size, intermediate_size, bias=False)
167
- self.down_proj = nn.Linear(intermediate_size, hidden_size, bias=False)
168
- self.up_proj = nn.Linear(hidden_size, intermediate_size, bias=False)
169
- self.act_fn = ACT2FN[hidden_act]
170
-
171
- def forward(self, x):
172
- return self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
173
-
174
-
175
- class LlamaAttention(nn.Module):
176
- """Multi-headed attention from 'Attention Is All You Need' paper"""
177
-
178
- def __init__(self, config: LlamaConfig, idx):
179
- super().__init__()
180
- self.config = config
181
- self.hidden_size = config.hidden_size
182
- self.num_heads = config.num_attention_heads
183
- self.head_dim = self.hidden_size // self.num_heads
184
- self.max_position_embeddings = config.max_position_embeddings
185
-
186
- if (self.head_dim * self.num_heads) != self.hidden_size:
187
- raise ValueError(
188
- f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}"
189
- f" and `num_heads`: {self.num_heads})."
190
- )
191
- self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
192
- self.k_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
193
- self.v_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
194
- self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
195
- self.rotary_emb = LlamaRotaryEmbedding(self.head_dim, max_position_embeddings=self.max_position_embeddings)
196
-
197
- self.idx = idx
198
- self.split_size = [2048, 1536][idx % 2]
199
- self.topk = 2048 #128 #[256, 128][idx % 2] #20 #10
200
- self.global_size = 128 #256
201
- rank = 1
202
- self.lora_proj_aux2ori_q_proj_small = nn.Linear(self.head_dim, rank, bias=False)
203
- self.lora_proj_aux2ori_k_proj_small = nn.Linear(self.head_dim, rank, bias=False)
204
- self.loss_ce = nn.CrossEntropyLoss()
205
- self.loss_mse = nn.MSELoss() #(reduction='sum')
206
- self.loss_bse = nn.BCEWithLogitsLoss()
207
-
208
- def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
209
- return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
210
-
211
- def forward(
212
- self,
213
- hidden_states: torch.Tensor,
214
- attention_mask: Optional[torch.Tensor] = None,
215
- position_ids: Optional[torch.Tensor] = None,
216
- past_key_value: Optional[Tuple[torch.Tensor]] = None,
217
- output_attentions: bool = False,
218
- use_cache: bool = False,
219
- ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
220
- """Input shape: Batch x Time x Channel
221
-
222
- attention_mask: [bsz, q_len]
223
- """
224
- if output_attentions:
225
- warnings.warn(
226
- "Output attentions is not supported for patched `LlamaAttention`, returning `None` instead."
227
- )
228
-
229
- bsz, q_len, _ = hidden_states.size()
230
-
231
- query_states = (
232
- self.q_proj(hidden_states)
233
- .view(bsz, q_len, self.num_heads, self.head_dim)
234
- .transpose(1, 2)
235
- )
236
- key_states = (
237
- self.k_proj(hidden_states)
238
- .view(bsz, q_len, self.num_heads, self.head_dim)
239
- .transpose(1, 2)
240
- )
241
- value_states = (
242
- self.v_proj(hidden_states)
243
- .view(bsz, q_len, self.num_heads, self.head_dim)
244
- .transpose(1, 2)
245
- )
246
- # [bsz, q_len, nh, hd]
247
- # [bsz, nh, q_len, hd]
248
-
249
- kv_seq_len = key_states.shape[-2]
250
- if past_key_value is not None:
251
- kv_seq_len += past_key_value[0].shape[-2]
252
- cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
253
- query_states, key_states = apply_rotary_pos_emb(
254
- query_states, key_states, cos, sin, position_ids
255
- )
256
-
257
- # Past Key value support
258
- if past_key_value is not None:
259
- # reuse k, v, self_attention
260
- key_states = torch.cat([past_key_value[0], key_states], dim=2)
261
- value_states = torch.cat([past_key_value[1], value_states], dim=2)
262
-
263
- past_key_value = (key_states, value_states) if use_cache else None
264
-
265
- # Flash attention codes from
266
- # https://github.com/HazyResearch/flash-attention/blob/main/flash_attn/flash_attention.py
267
-
268
- q = query_states.split(self.split_size, dim=2)
269
- k = key_states.split(self.split_size, dim=2)
270
- v = value_states.split(self.split_size, dim=2)
271
- _attention_mask = attention_mask.split(self.split_size, dim=1)
272
- output_list = []
273
-
274
- for i in range(len(q)):
275
- # transform the data into the format required by flash attention
276
- qkv = torch.stack(
277
- [q[i], k[i], v[i]], dim=2
278
- ) # [bsz, nh, 3, q_len, hd]
279
- qkv = qkv.transpose(1, 3) # [bsz, q_len, 3, nh, hd]
280
- # We have disabled _prepare_decoder_attention_mask in LlamaModel
281
- # the attention_mask should be the same as the key_padding_mask
282
- q_len_split = q[i].shape[2]
283
- #kv_seq_len_split = k[i].shape[2]
284
- key_padding_mask = _attention_mask[i]
285
-
286
- if key_padding_mask is None:
287
- qkv = rearrange(qkv, "b s ... -> (b s) ...")
288
- max_s = q_len_split
289
- cu_q_lens = torch.arange(
290
- 0, (bsz + 1) * q_len_split, step=q_len_split, dtype=torch.int32, device=qkv.device
291
- )
292
- output = flash_attn_varlen_qkvpacked_func(
293
- qkv, cu_q_lens, max_s, 0.0, softmax_scale=None, causal=True
294
- )
295
- output = rearrange(output, "(b s) ... -> b s ...", b=bsz)
296
- else:
297
- nheads = qkv.shape[-2]
298
- x = rearrange(qkv, "b s three h d -> b s (three h d)")
299
- x_unpad, indices, cu_q_lens, max_s = unpad_input(x, key_padding_mask)
300
- x_unpad = rearrange(
301
- x_unpad, "nnz (three h d) -> nnz three h d", three=3, h=nheads
302
- )
303
- output_unpad = flash_attn_varlen_qkvpacked_func(
304
- x_unpad, cu_q_lens, max_s, 0.0, softmax_scale=None, causal=True
305
- )
306
- output = rearrange(
307
- pad_input(
308
- rearrange(output_unpad, "nnz h d -> nnz (h d)"), indices, bsz, q_len_split
309
- ),
310
- "b s (h d) -> b s h d",
311
- h=nheads,
312
- )
313
- output_list.append(output)
314
- output = torch.cat(output_list, dim=1)
315
- return self.o_proj(rearrange(output, "b s h d -> b s (h d)")), None, past_key_value
316
-
317
-
318
- def forward_ori(
319
- self,
320
- hidden_states: torch.Tensor,
321
- attention_mask: Optional[torch.Tensor] = None,
322
- position_ids: Optional[torch.LongTensor] = None,
323
- past_key_value: Optional[Tuple[torch.Tensor]] = None,
324
- output_attentions: bool = False,
325
- use_cache: bool = False,
326
- ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
327
- bsz, q_len, _ = hidden_states.size()
328
-
329
- query_states = self.q_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
330
- key_states = self.k_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
331
- value_states = self.v_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
332
-
333
- kv_seq_len = key_states.shape[-2]
334
- if past_key_value is not None:
335
- kv_seq_len += past_key_value[0].shape[-2]
336
-
337
- cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
338
- query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
339
- # [bsz, nh, t, hd]
340
-
341
- if past_key_value is not None:
342
- # reuse k, v, self_attention
343
- key_states = torch.cat([past_key_value[0], key_states], dim=2)
344
- value_states = torch.cat([past_key_value[1], value_states], dim=2)
345
-
346
- past_key_value = (key_states, value_states) if use_cache else None
347
-
348
- topk = min(self.topk, kv_seq_len)
349
-
350
- loss = 0
351
- if q_len > 1:
352
- q = query_states.split(self.split_size, dim=2)
353
- k = key_states.split(self.split_size, dim=2)
354
- v = value_states.split(self.split_size, dim=2)
355
-
356
- attn_w = [torch.matmul(q[i], k[i].transpose(2, 3)) / math.sqrt(self.head_dim) for i in range(len(q))]
357
-
358
- attn_output = []
359
- for i in range(len(q)):
360
-
361
- _, _, q_len_split, kv_seq_len_split = attn_w[i].shape
362
- _attention_mask = attention_mask[:, :, i * self.split_size: i * self.split_size + q_len_split, i * self.split_size: i * self.split_size + kv_seq_len_split]
363
- attn_w[i][_attention_mask.repeat(1, self.num_heads, 1, 1) < 0] = torch.tensor(torch.finfo(query_states.dtype).min)
364
-
365
- _inds = torch.arange(i * self.split_size, i * self.split_size + q_len_split, device=query_states.device).reshape(1, 1, -1, 1).repeat(bsz, self.num_heads, 1, 1)
366
- attn_w_global[:, :, i * self.split_size: i * self.split_size + q_len_split][_inds < k_topk[1].unsqueeze(2)] = torch.tensor(torch.finfo(query_states.dtype).min)
367
- attn_w_split = attn_w[i] #torch.cat([attn_w[i], attn_w_global[:, :, i * self.split_size: i * self.split_size + q_len_split]], dim=-1)
368
-
369
- attn_w_split = nn.functional.softmax(attn_w_split, dim=-1, dtype=torch.float32).to(query_states.dtype)
370
- attn_o = torch.matmul(attn_w_split, v[i]) #torch.cat([v[i], v_global], dim=2))
371
- attn_output.append(attn_o)
372
- attn_output = torch.cat(attn_output, dim=2)
373
- else:
374
- attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
375
-
376
- if attention_mask is not None:
377
- if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
378
- raise ValueError(
379
- f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}"
380
- )
381
- attn_weights = attn_weights + attention_mask
382
- attn_weights = torch.max(attn_weights, torch.tensor(torch.finfo(attn_weights.dtype).min))
383
-
384
- if not attn_weights.size() in [(bsz, self.num_heads, q_len, kv_seq_len), (bsz * self.num_heads, q_len, kv_seq_len)]:
385
- raise ValueError(
386
- f"Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is"
387
- f" {attn_weights.size()}"
388
- )
389
- # upcast attention to fp32
390
- attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
391
- attn_output = torch.matmul(attn_weights, value_states)
392
-
393
- if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
394
- raise ValueError(
395
- f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
396
- f" {attn_output.size()}"
397
- )
398
-
399
- attn_output = attn_output.transpose(1, 2)
400
- attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
401
-
402
- attn_output = self.o_proj(attn_output)
403
-
404
- if not output_attentions:
405
- attn_weights = None
406
-
407
- return attn_output, attn_weights, past_key_value, loss
408
-
409
-
410
- class LlamaDecoderLayer(nn.Module):
411
- def __init__(self, config: LlamaConfig, idx):
412
- super().__init__()
413
- self.hidden_size = config.hidden_size
414
- self.self_attn = LlamaAttention(config=config, idx=idx)
415
- self.mlp = LlamaMLP(
416
- hidden_size=self.hidden_size,
417
- intermediate_size=config.intermediate_size,
418
- hidden_act=config.hidden_act,
419
- )
420
- self.input_layernorm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
421
- self.post_attention_layernorm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
422
-
423
- def forward(
424
- self,
425
- hidden_states: torch.Tensor,
426
- attention_mask: Optional[torch.Tensor] = None,
427
- position_ids: Optional[torch.LongTensor] = None,
428
- past_key_value: Optional[Tuple[torch.Tensor]] = None,
429
- output_attentions: Optional[bool] = False,
430
- use_cache: Optional[bool] = False,
431
- ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
432
- """
433
- Args:
434
- hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
435
- attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
436
- `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
437
- output_attentions (`bool`, *optional*):
438
- Whether or not to return the attentions tensors of all attention layers. See `attentions` under
439
- returned tensors for more detail.
440
- use_cache (`bool`, *optional*):
441
- If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
442
- (see `past_key_values`).
443
- past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
444
- """
445
-
446
- residual = hidden_states
447
-
448
- hidden_states = self.input_layernorm(hidden_states)
449
-
450
- # Self Attention
451
- hidden_states, self_attn_weights, present_key_value = self.self_attn(
452
- hidden_states=hidden_states,
453
- attention_mask=attention_mask,
454
- position_ids=position_ids,
455
- past_key_value=past_key_value,
456
- output_attentions=output_attentions,
457
- use_cache=use_cache,
458
- )
459
- hidden_states = residual + hidden_states
460
-
461
- # Fully Connected
462
- residual = hidden_states
463
- hidden_states = self.post_attention_layernorm(hidden_states)
464
- hidden_states = self.mlp(hidden_states)
465
- hidden_states = residual + hidden_states
466
-
467
- outputs = (hidden_states,)
468
-
469
- if output_attentions:
470
- outputs += (self_attn_weights,)
471
-
472
- if use_cache:
473
- outputs += (present_key_value,)
474
-
475
- return outputs
476
-
477
-
478
- LLAMA_START_DOCSTRING = r"""
479
- This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
480
- library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
481
- etc.)
482
-
483
- This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
484
- Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
485
- and behavior.
486
-
487
- Parameters:
488
- config ([`LlamaConfig`]):
489
- Model configuration class with all the parameters of the model. Initializing with a config file does not
490
- load the weights associated with the model, only the configuration. Check out the
491
- [`~PreTrainedModel.from_pretrained`] method to load the model weights.
492
- """
493
-
494
-
495
- @add_start_docstrings(
496
- "The bare LLaMA Model outputting raw hidden-states without any specific head on top.",
497
- LLAMA_START_DOCSTRING,
498
- )
499
- class LlamaPreTrainedModel(PreTrainedModel):
500
- config_class = LlamaConfig
501
- base_model_prefix = "model"
502
- supports_gradient_checkpointing = True
503
- _no_split_modules = ["LlamaDecoderLayer"]
504
- _keys_to_ignore_on_load_unexpected = [r"decoder\.version"]
505
-
506
- def _init_weights(self, module):
507
- std = self.config.initializer_range
508
- if isinstance(module, nn.Linear):
509
- module.weight.data.normal_(mean=0.0, std=std)
510
- if module.bias is not None:
511
- module.bias.data.zero_()
512
- elif isinstance(module, nn.Embedding):
513
- module.weight.data.normal_(mean=0.0, std=std)
514
- if module.padding_idx is not None:
515
- module.weight.data[module.padding_idx].zero_()
516
-
517
- def _set_gradient_checkpointing(self, module, value=False):
518
- if isinstance(module, LlamaModel):
519
- module.gradient_checkpointing = value
520
-
521
-
522
- LLAMA_INPUTS_DOCSTRING = r"""
523
- Args:
524
- input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
525
- Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
526
- it.
527
-
528
- Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
529
- [`PreTrainedTokenizer.__call__`] for details.
530
-
531
- [What are input IDs?](../glossary#input-ids)
532
- attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
533
- Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
534
-
535
- - 1 for tokens that are **not masked**,
536
- - 0 for tokens that are **masked**.
537
-
538
- [What are attention masks?](../glossary#attention-mask)
539
-
540
- Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
541
- [`PreTrainedTokenizer.__call__`] for details.
542
-
543
- If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
544
- `past_key_values`).
545
-
546
- If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
547
- and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
548
- information on the default strategy.
549
-
550
- - 1 indicates the head is **not masked**,
551
- - 0 indicates the head is **masked**.
552
- position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
553
- Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
554
- config.n_positions - 1]`.
555
-
556
- [What are position IDs?](../glossary#position-ids)
557
- past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
558
- Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
559
- `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
560
- `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
561
-
562
- Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
563
- blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
564
-
565
- If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
566
- don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
567
- `decoder_input_ids` of shape `(batch_size, sequence_length)`.
568
- inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
569
- Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
570
- is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
571
- model's internal embedding lookup matrix.
572
- use_cache (`bool`, *optional*):
573
- If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
574
- `past_key_values`).
575
- output_attentions (`bool`, *optional*):
576
- Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
577
- tensors for more detail.
578
- output_hidden_states (`bool`, *optional*):
579
- Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
580
- more detail.
581
- return_dict (`bool`, *optional*):
582
- Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
583
- """
584
-
585
-
586
- @add_start_docstrings(
587
- "The bare LLaMA Model outputting raw hidden-states without any specific head on top.",
588
- LLAMA_START_DOCSTRING,
589
- )
590
- class LlamaModel(LlamaPreTrainedModel):
591
- """
592
- Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`LlamaDecoderLayer`]
593
-
594
- Args:
595
- config: LlamaConfig
596
- """
597
-
598
- def __init__(self, config: LlamaConfig):
599
- super().__init__(config)
600
- self.padding_idx = config.pad_token_id
601
- self.vocab_size = config.vocab_size
602
-
603
- self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
604
- self.layers = nn.ModuleList([LlamaDecoderLayer(config, i) for i in range(config.num_hidden_layers)])
605
- self.norm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
606
- self.num_hidden_layers = config.num_hidden_layers
607
-
608
- self.gradient_checkpointing = False
609
- # Initialize weights and apply final processing
610
- self.post_init()
611
- self.adapter = nn.Linear(config.hidden_size, 1, bias=False)
612
-
613
- def get_input_embeddings(self):
614
- return self.embed_tokens
615
-
616
- def set_input_embeddings(self, value):
617
- self.embed_tokens = value
618
-
619
- def _prepare_decoder_attention_mask(
620
- self, attention_mask, input_shape, inputs_embeds, past_key_values_length
621
- ):
622
- # [bsz, seq_len]
623
- return attention_mask
624
-
625
- @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING)
626
- def forward(
627
- self,
628
- input_ids: torch.LongTensor = None,
629
- attention_mask: Optional[torch.Tensor] = None,
630
- position_ids: Optional[torch.LongTensor] = None,
631
- past_key_values: Optional[List[torch.FloatTensor]] = None,
632
- inputs_embeds: Optional[torch.FloatTensor] = None,
633
- use_cache: Optional[bool] = None,
634
- output_attentions: Optional[bool] = None,
635
- output_hidden_states: Optional[bool] = None,
636
- return_dict: Optional[bool] = None,
637
- ) -> Union[Tuple, BaseModelOutputWithPast]:
638
- output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
639
- output_hidden_states = (
640
- output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
641
- )
642
- use_cache = use_cache if use_cache is not None else self.config.use_cache
643
-
644
- return_dict = return_dict if return_dict is not None else self.config.use_return_dict
645
-
646
- # retrieve input_ids and inputs_embeds
647
- if input_ids is not None and inputs_embeds is not None:
648
- raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
649
- elif input_ids is not None:
650
- batch_size, seq_length = input_ids.shape
651
- elif inputs_embeds is not None:
652
- batch_size, seq_length, _ = inputs_embeds.shape
653
- else:
654
- raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
655
-
656
- seq_length_with_past = seq_length
657
- past_key_values_length = 0
658
-
659
- if past_key_values is not None:
660
- past_key_values_length = past_key_values[0][0].shape[2]
661
- seq_length_with_past = seq_length_with_past + past_key_values_length
662
-
663
- if position_ids is None:
664
- device = input_ids.device if input_ids is not None else inputs_embeds.device
665
- position_ids = torch.arange(
666
- past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device
667
- )
668
- position_ids = position_ids.unsqueeze(0).view(-1, seq_length)
669
- else:
670
- position_ids = position_ids.view(-1, seq_length).long()
671
-
672
- if inputs_embeds is None:
673
- inputs_embeds = self.embed_tokens(input_ids)
674
- # embed positions
675
- if attention_mask is None:
676
- attention_mask = torch.ones(
677
- (batch_size, seq_length_with_past), dtype=torch.bool, device=inputs_embeds.device
678
- )
679
- attention_mask = self._prepare_decoder_attention_mask(
680
- attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length
681
- )
682
-
683
- hidden_states = inputs_embeds
684
-
685
- if self.gradient_checkpointing and self.training:
686
- if use_cache:
687
- logger.warning_once(
688
- "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
689
- )
690
- use_cache = False
691
-
692
- # decoder layers
693
- all_hidden_states = () if output_hidden_states else None
694
- all_self_attns = () if output_attentions else None
695
- next_decoder_cache = () if use_cache else None
696
-
697
- loss = 0
698
- for idx, decoder_layer in enumerate(self.layers):
699
- if output_hidden_states:
700
- all_hidden_states += (hidden_states,)
701
-
702
- past_key_value = past_key_values[idx] if past_key_values is not None else None
703
-
704
- if self.gradient_checkpointing and self.training:
705
-
706
- def create_custom_forward(module):
707
- def custom_forward(*inputs):
708
- # None for past_key_value
709
- return module(*inputs, output_attentions, None)
710
-
711
- return custom_forward
712
-
713
- layer_outputs = torch.utils.checkpoint.checkpoint(
714
- create_custom_forward(decoder_layer),
715
- hidden_states,
716
- attention_mask,
717
- position_ids,
718
- None,
719
- )
720
- else:
721
- layer_outputs = decoder_layer(
722
- hidden_states,
723
- attention_mask=attention_mask,
724
- position_ids=position_ids,
725
- past_key_value=past_key_value,
726
- output_attentions=output_attentions,
727
- use_cache=use_cache,
728
- )
729
-
730
- hidden_states = layer_outputs[0]
731
-
732
- if use_cache:
733
- next_decoder_cache += (layer_outputs[2 if output_attentions else 1],)
734
-
735
- if output_attentions:
736
- all_self_attns += (layer_outputs[1],)
737
-
738
- hidden_states = self.norm(hidden_states)
739
-
740
- # add hidden states from the last decoder layer
741
- if output_hidden_states:
742
- all_hidden_states += (hidden_states,)
743
-
744
- next_cache = next_decoder_cache if use_cache else None
745
- if not return_dict:
746
- return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)
747
- return BaseModelOutputWithPast(
748
- last_hidden_state=hidden_states,
749
- past_key_values=next_cache,
750
- hidden_states=all_hidden_states,
751
- attentions=all_self_attns,
752
- )
753
-
754
-
755
- class LlamaForCausalLM(LlamaPreTrainedModel):
756
- def __init__(self, config):
757
- super().__init__(config)
758
- self.model = LlamaModel(config)
759
-
760
- self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
761
-
762
- # Initialize weights and apply final processing
763
- self.post_init()
764
-
765
- def get_input_embeddings(self):
766
- return self.model.embed_tokens
767
-
768
- def set_input_embeddings(self, value):
769
- self.model.embed_tokens = value
770
-
771
- def get_output_embeddings(self):
772
- return self.lm_head
773
-
774
- def set_output_embeddings(self, new_embeddings):
775
- self.lm_head = new_embeddings
776
-
777
- def set_decoder(self, decoder):
778
- self.model = decoder
779
-
780
- def get_decoder(self):
781
- return self.model
782
-
783
- @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING)
784
- @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
785
- def forward(
786
- self,
787
- input_ids: torch.LongTensor = None,
788
- attention_mask: Optional[torch.Tensor] = None,
789
- position_ids: Optional[torch.LongTensor] = None,
790
- past_key_values: Optional[List[torch.FloatTensor]] = None,
791
- inputs_embeds: Optional[torch.FloatTensor] = None,
792
- labels: Optional[torch.LongTensor] = None,
793
- use_cache: Optional[bool] = None,
794
- output_attentions: Optional[bool] = None,
795
- output_hidden_states: Optional[bool] = None,
796
- return_dict: Optional[bool] = None,
797
- ) -> Union[Tuple, CausalLMOutputWithPast]:
798
- r"""
799
- Args:
800
- labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
801
- Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
802
- config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
803
- (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
804
-
805
- Returns:
806
-
807
- Example:
808
-
809
- ```python
810
- >>> from transformers import AutoTokenizer, LlamaForCausalLM
811
-
812
- >>> model = LlamaForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS)
813
- >>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER)
814
-
815
- >>> prompt = "Hey, are you consciours? Can you talk to me?"
816
- >>> inputs = tokenizer(prompt, return_tensors="pt")
817
-
818
- >>> # Generate
819
- >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
820
- >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
821
- "Hey, are you consciours? Can you talk to me?\nI'm not consciours, but I can talk to you."
822
- ```"""
823
-
824
- output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
825
- output_hidden_states = (
826
- output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
827
- )
828
- return_dict = return_dict if return_dict is not None else self.config.use_return_dict
829
-
830
- # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
831
- outputs = self.model(
832
- input_ids=input_ids,
833
- attention_mask=attention_mask,
834
- position_ids=position_ids,
835
- past_key_values=past_key_values,
836
- inputs_embeds=inputs_embeds,
837
- use_cache=use_cache,
838
- output_attentions=output_attentions,
839
- output_hidden_states=output_hidden_states,
840
- return_dict=return_dict,
841
- )
842
-
843
- hidden_states = outputs[0]
844
- logits = self.lm_head(hidden_states)
845
-
846
- loss = None
847
- if labels is not None:
848
- # Shift so that tokens < n predict n
849
- shift_logits = logits[..., :-1, :].contiguous()
850
- shift_labels = labels[..., 1:].contiguous()
851
- # Flatten the tokens
852
- loss_fct = CrossEntropyLoss()
853
- shift_logits = shift_logits.view(-1, self.config.vocab_size)
854
- shift_labels = shift_labels.view(-1)
855
- # Enable model parallelism
856
- shift_labels = shift_labels.to(shift_logits.device)
857
- loss = loss_fct(shift_logits, shift_labels)
858
- #loss += outputs[-1]
859
-
860
- if not return_dict:
861
- output = (logits,) + outputs[1:]
862
- return (loss,) + output if loss is not None else output
863
-
864
- return CausalLMOutputWithPast(
865
- loss=loss,
866
- logits=logits,
867
- past_key_values=outputs.past_key_values,
868
- hidden_states=outputs.hidden_states,
869
- attentions=outputs.attentions,
870
- )
871
-
872
- def prepare_inputs_for_generation(
873
- self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs
874
- ):
875
- if past_key_values:
876
- input_ids = input_ids[:, -1:]
877
-
878
- position_ids = kwargs.get("position_ids", None)
879
- if attention_mask is not None and position_ids is None:
880
- # create position_ids on the fly for batch generation
881
- position_ids = attention_mask.long().cumsum(-1) - 1
882
- position_ids.masked_fill_(attention_mask == 0, 1)
883
- if past_key_values:
884
- position_ids = position_ids[:, -1].unsqueeze(-1)
885
-
886
- # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
887
- if inputs_embeds is not None and past_key_values is None:
888
- model_inputs = {"inputs_embeds": inputs_embeds}
889
- else:
890
- model_inputs = {"input_ids": input_ids}
891
-
892
- model_inputs.update(
893
- {
894
- "position_ids": position_ids,
895
- "past_key_values": past_key_values,
896
- "use_cache": kwargs.get("use_cache"),
897
- "attention_mask": attention_mask,
898
- }
899
- )
900
- return model_inputs
901
-
902
- @staticmethod
903
- def _reorder_cache(past_key_values, beam_idx):
904
- reordered_past = ()
905
- for layer_past in past_key_values:
906
- reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),)
907
- return reordered_past
908
-
909
-
910
- @add_start_docstrings(
911
- """
912
- The LLaMa Model transformer with a sequence classification head on top (linear layer).
913
-
914
- [`LlamaForSequenceClassification`] uses the last token in order to do the classification, as other causal models
915
- (e.g. GPT-2) do.
916
-
917
- Since it does classification on the last token, it requires to know the position of the last token. If a
918
- `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
919
- no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
920
- padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
921
- each row of the batch).
922
- """,
923
- LLAMA_START_DOCSTRING,
924
- )
925
- class LlamaForSequenceClassification(LlamaPreTrainedModel):
926
- _keys_to_ignore_on_load_missing = [r"lm_head.weight"]
927
-
928
- def __init__(self, config):
929
- super().__init__(config)
930
- self.num_labels = config.num_labels
931
- self.model = LlamaModel(config)
932
- self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
933
-
934
- # Initialize weights and apply final processing
935
- self.post_init()
936
-
937
- def get_input_embeddings(self):
938
- return self.model.embed_tokens
939
-
940
- def set_input_embeddings(self, value):
941
- self.model.embed_tokens = value
942
-
943
- @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING)
944
- def forward(
945
- self,
946
- input_ids: torch.LongTensor = None,
947
- attention_mask: Optional[torch.Tensor] = None,
948
- position_ids: Optional[torch.LongTensor] = None,
949
- past_key_values: Optional[List[torch.FloatTensor]] = None,
950
- inputs_embeds: Optional[torch.FloatTensor] = None,
951
- labels: Optional[torch.LongTensor] = None,
952
- use_cache: Optional[bool] = None,
953
- output_attentions: Optional[bool] = None,
954
- output_hidden_states: Optional[bool] = None,
955
- return_dict: Optional[bool] = None,
956
- ) -> Union[Tuple, SequenceClassifierOutputWithPast]:
957
- r"""
958
- labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
959
- Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
960
- config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
961
- `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
962
- """
963
- return_dict = return_dict if return_dict is not None else self.config.use_return_dict
964
-
965
- transformer_outputs = self.model(
966
- input_ids,
967
- attention_mask=attention_mask,
968
- position_ids=position_ids,
969
- past_key_values=past_key_values,
970
- inputs_embeds=inputs_embeds,
971
- use_cache=use_cache,
972
- output_attentions=output_attentions,
973
- output_hidden_states=output_hidden_states,
974
- return_dict=return_dict,
975
- )
976
- hidden_states = transformer_outputs[0]
977
- logits = self.score(hidden_states)
978
-
979
- if input_ids is not None:
980
- batch_size = input_ids.shape[0]
981
- else:
982
- batch_size = inputs_embeds.shape[0]
983
-
984
- if self.config.pad_token_id is None and batch_size != 1:
985
- raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
986
- if self.config.pad_token_id is None:
987
- sequence_lengths = -1
988
- else:
989
- if input_ids is not None:
990
- sequence_lengths = (torch.ne(input_ids, self.config.pad_token_id).sum(-1) - 1).to(logits.device)
991
- else:
992
- sequence_lengths = -1
993
-
994
- pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]
995
-
996
- loss = None
997
- if labels is not None:
998
- labels = labels.to(logits.device)
999
- if self.config.problem_type is None:
1000
- if self.num_labels == 1:
1001
- self.config.problem_type = "regression"
1002
- elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1003
- self.config.problem_type = "single_label_classification"
1004
- else:
1005
- self.config.problem_type = "multi_label_classification"
1006
-
1007
- if self.config.problem_type == "regression":
1008
- loss_fct = MSELoss()
1009
- if self.num_labels == 1:
1010
- loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
1011
- else:
1012
- loss = loss_fct(pooled_logits, labels)
1013
- elif self.config.problem_type == "single_label_classification":
1014
- loss_fct = CrossEntropyLoss()
1015
- loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
1016
- elif self.config.problem_type == "multi_label_classification":
1017
- loss_fct = BCEWithLogitsLoss()
1018
- loss = loss_fct(pooled_logits, labels)
1019
- if not return_dict:
1020
- output = (pooled_logits,) + transformer_outputs[1:]
1021
- return ((loss,) + output) if loss is not None else output
1022
-
1023
- return SequenceClassifierOutputWithPast(
1024
- loss=loss,
1025
- logits=pooled_logits,
1026
- past_key_values=transformer_outputs.past_key_values,
1027
- hidden_states=transformer_outputs.hidden_states,
1028
- attentions=transformer_outputs.attentions,
1029
- )