luffyevil114 commited on
Commit
a2b4b65
1 Parent(s): 8930df8

Update modeling_mpt.py

Browse files
Files changed (1) hide show
  1. modeling_mpt.py +519 -0
modeling_mpt.py CHANGED
@@ -0,0 +1,519 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """A simple, flexible implementation of a GPT model.
2
+
3
+ Inspired by https://github.com/karpathy/minGPT/blob/master/mingpt/model.py
4
+ """
5
+ from __future__ import annotations
6
+ import math
7
+ import warnings
8
+ from typing import Any, Dict, List, Mapping, MutableMapping, Optional, Tuple, Union
9
+ import torch
10
+ import torch.nn as nn
11
+ import torch.nn.functional as F
12
+ from .attention import is_flash_v1_installed, is_flash_v2_installed
13
+ if is_flash_v2_installed():
14
+ try:
15
+ from flash_attn import bert_padding
16
+ from flash_attn.layers.rotary import RotaryEmbedding as DAILRotaryEmbedding
17
+ except Exception as e:
18
+ raise e
19
+ if is_flash_v1_installed():
20
+ try:
21
+ from flash_attn import bert_padding
22
+ except Exception as e:
23
+ raise e
24
+ from transformers import PreTrainedModel, PreTrainedTokenizerBase
25
+ from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
26
+ from transformers.models.llama.modeling_llama import LlamaDynamicNTKScalingRotaryEmbedding as HFDynamicNTKScalingRotaryEmbedding
27
+ from transformers.models.llama.modeling_llama import LlamaLinearScalingRotaryEmbedding as HFLinearScalingRotaryEmbedding
28
+ from transformers.models.llama.modeling_llama import LlamaRotaryEmbedding as HFRotaryEmbedding
29
+ from .attention import ATTN_CLASS_REGISTRY, attn_bias_shape, build_attn_bias, gen_slopes
30
+ from .blocks import MPTBlock
31
+ from .custom_embedding import SharedEmbedding
32
+ from .fc import FC_CLASS_REGISTRY as FC_CLASS_REGISTRY
33
+ from .ffn import FFN_CLASS_REGISTRY as FFN_CLASS_REGISTRY
34
+ from .ffn import MPTMLP as MPTMLP
35
+ from .ffn import build_ffn as build_ffn
36
+ from .norm import NORM_CLASS_REGISTRY
37
+ from .configuration_mpt import MPTConfig
38
+ from .adapt_tokenizer import AutoTokenizerForMOD, adapt_tokenizer_for_denoising
39
+ from .hf_prefixlm_converter import add_bidirectional_mask_if_missing, convert_hf_causal_lm_to_prefix_lm
40
+ from .meta_init_context import init_empty_weights
41
+ from .param_init_fns import generic_param_init_fn_, MODEL_INIT_REGISTRY
42
+ try:
43
+ from .flash_attn_triton import flash_attn_func as flash_attn_func
44
+ except:
45
+ pass
46
+ import logging
47
+ log = logging.getLogger(__name__)
48
+
49
+ def gen_rotary_embedding(rope_head_dim: int, rope_impl: str, rope_theta: int, rope_dail_config: dict, rope_hf_config: dict, max_seq_len: int):
50
+ if rope_impl == 'dail':
51
+ return DAILRotaryEmbedding(dim=rope_head_dim, base=rope_theta, interleaved=False, scale_base=rope_dail_config['xpos_scale_base'] if rope_dail_config['type'] == 'xpos' else None, pos_idx_in_fp32=rope_dail_config['pos_idx_in_fp32'], device='cpu')
52
+ elif rope_impl == 'hf':
53
+ if rope_hf_config['type'] == 'no_scaling':
54
+ return HFRotaryEmbedding(rope_head_dim, max_position_embeddings=max_seq_len, base=rope_theta, device='cpu')
55
+ elif rope_hf_config['type'] == 'linear':
56
+ return HFLinearScalingRotaryEmbedding(rope_head_dim, max_position_embeddings=max_seq_len, base=rope_theta, scaling_factor=rope_hf_config['factor'], device='cpu')
57
+ elif rope_hf_config['type'] == 'dynamic':
58
+ return HFDynamicNTKScalingRotaryEmbedding(rope_head_dim, max_position_embeddings=max_seq_len, base=rope_theta, scaling_factor=rope_hf_config['factor'], device='cpu')
59
+ raise ValueError('rope_impl needs to be either dail or hf')
60
+
61
+ def gen_attention_mask_in_length(sequence_id: Union[None, torch.Tensor], S: int, attn_uses_sequence_id: bool, attn_impl: str, attention_mask: Union[torch.Tensor, None]):
62
+ """Generates the attention mask used for sequence masking in FA v2.
63
+
64
+ Only supports sequence id based sparse attention for no attention masking or attention masking with right padding.
65
+ In case of left padding:
66
+ 1. Training with left padding is not supported in MPT (see https://github.com/mosaicml/llm-foundry/blob/1eecd4cb8e734499f77f6a35f657b8b20c0adfcb/llmfoundry/models/mpt/modeling_mpt.py#L407).
67
+ 2. For generation with left padding, we only have a single sequence id per sample, so we don't need sequence id based sparse attention.
68
+
69
+ Args:
70
+ sequence_id (Union[None, torch.Tensor]): Tensor containing the sequence id for each token. Shape (batch_size, seq_len).
71
+ S (int): Sequence length
72
+ attn_uses_sequence_id (bool): Whether the attention uses sequence id based masking.
73
+ attn_impl (str): Attention implementation. This function is only creates attention_mask_in_length for flash attention.
74
+ attention_mask (Union[torch.Tensor, None]): Attention mask tensor of shape (batch_size, seq_len)
75
+
76
+ Returns:
77
+ attention_mask_in_length: (batch, seqlen), int, a nonzero number (e.g., 1, 2, 3, etc.) means length of concatenated sequence in b-th batch, and 0 means none. For example, if batch = 3 and seqlen = 6, the attention_mask_in_length is:
78
+ ```
79
+ [
80
+ [2, 3, 0, 0, 0, 0],
81
+ [3, 2, 0, 0, 0, 0],
82
+ [6, 0, 0, 0, 0, 0]
83
+ ]
84
+ ```
85
+ , which refers to the 3D-attention mask:
86
+ ```
87
+ [
88
+ [
89
+ [1, 0, 0, 0, 0, 0],
90
+ [1, 1, 0, 0, 0, 0],
91
+ [0, 0, 1, 0, 0, 0],
92
+ [0, 0, 1, 1, 0, 0],
93
+ [0, 0, 1, 1, 1, 0],
94
+ [0, 0, 0, 0, 0, 1]
95
+ ],
96
+ [
97
+ [1, 0, 0, 0, 0, 0],
98
+ [1, 1, 0, 0, 0, 0],
99
+ [1, 1, 1, 0, 0, 0],
100
+ [0, 0, 0, 1, 0, 0],
101
+ [0, 0, 0, 1, 1, 0],
102
+ [0, 0, 0, 0, 0, 1]
103
+ ],
104
+ [
105
+ [1, 0, 0, 0, 0, 0],
106
+ [1, 1, 0, 0, 0, 0],
107
+ [1, 1, 1, 0, 0, 0],
108
+ [1, 1, 1, 1, 0, 0],
109
+ [1, 1, 1, 1, 1, 0],
110
+ [1, 1, 1, 1, 1, 1]
111
+ ]
112
+ ]
113
+ ```.
114
+ (The description above is taken verbatim from https://github.com/Dao-AILab/flash-attention/blob/9356a1c0389660d7e231ff3163c1ac17d9e3824a/flash_attn/bert_padding.py#L125 .)
115
+ """
116
+ attention_mask_in_length = None
117
+ if sequence_id is not None and attn_uses_sequence_id and (attn_impl == 'flash'):
118
+ if attention_mask is not None and attention_mask[:, 0].sum() != attention_mask.shape[0]:
119
+ raise NotImplementedError('Left padding is not supported with flash attention when attn_uses_sequence_id is set to True.')
120
+ if S != sequence_id.shape[-1]:
121
+ raise ValueError(f'Sequence length ({S}) does not match length of sequences in sequence_id ({sequence_id.shape[-1]}).')
122
+ if attention_mask is not None:
123
+ sequence_id = sequence_id.masked_fill(~attention_mask, 0)
124
+ attention_mask_in_length = torch.nn.functional.one_hot(sequence_id)
125
+ if attention_mask is not None:
126
+ attention_mask_in_length = attention_mask_in_length.masked_fill(~attention_mask.unsqueeze(-1), 0)
127
+ attention_mask_in_length = attention_mask_in_length.sum(dim=1)
128
+ attention_mask_in_length = torch.nn.functional.pad(attention_mask_in_length, (0, S - attention_mask_in_length.shape[-1]), mode='constant', value=0)
129
+ return attention_mask_in_length
130
+
131
+ def gen_flash_attn_padding_info(bsz: int, S: int, past_key_len: int, device: torch.device, attention_mask_in_length: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None):
132
+ flash_attn_padding_info = {}
133
+ if attention_mask_in_length is None:
134
+ key_padding_mask = attention_mask
135
+ if key_padding_mask is None:
136
+ key_padding_mask = torch.ones((bsz, past_key_len + S), dtype=torch.bool, device=device)
137
+ query_padding_mask = key_padding_mask[:, -S:]
138
+ unpadding_function = bert_padding.unpad_input
139
+ else:
140
+ key_padding_mask = attention_mask_in_length
141
+ query_padding_mask = attention_mask_in_length
142
+ unpadding_function = bert_padding.unpad_input_for_concatenated_sequences
143
+ (_, indices_q, cu_seqlens_q, max_seqlen_q) = unpadding_function(torch.empty(bsz, S, 1, device=device), query_padding_mask)
144
+ (_, indices_k, cu_seqlens_k, max_seqlen_k) = unpadding_function(torch.empty(bsz, past_key_len + S, 1, device=device), key_padding_mask)
145
+ (_, indices_v, _, _) = unpadding_function(torch.empty(bsz, past_key_len + S, 1, device=device), key_padding_mask)
146
+ flash_attn_padding_info['indices_q'] = indices_q
147
+ flash_attn_padding_info['indices_k'] = indices_k
148
+ flash_attn_padding_info['indices_v'] = indices_v
149
+ flash_attn_padding_info['cu_seqlens_q'] = cu_seqlens_q
150
+ flash_attn_padding_info['cu_seqlens_k'] = cu_seqlens_k
151
+ flash_attn_padding_info['max_seqlen_q'] = max_seqlen_q
152
+ flash_attn_padding_info['max_seqlen_k'] = max_seqlen_k
153
+ return flash_attn_padding_info
154
+
155
+ def apply_sequence_id(attn_bias: torch.Tensor, sequence_id: torch.LongTensor, max_seq_len: int) -> torch.Tensor:
156
+ seq_len = sequence_id.shape[-1]
157
+ if seq_len > max_seq_len:
158
+ raise ValueError(f'sequence_id sequence length cannot exceed max_seq_len={max_seq_len}')
159
+ attn_bias = attn_bias[..., :seq_len, :seq_len]
160
+ cannot_attend = torch.logical_not(torch.eq(sequence_id.view(-1, seq_len, 1), sequence_id.view(-1, 1, seq_len))).unsqueeze(1)
161
+ min_val = torch.finfo(attn_bias.dtype).min
162
+ attn_bias = attn_bias.masked_fill(cannot_attend, min_val)
163
+ return attn_bias
164
+
165
+ class MPTPreTrainedModel(PreTrainedModel):
166
+ config_class = MPTConfig
167
+ base_model_prefix = 'model'
168
+ _no_split_modules = ['MPTBlock']
169
+
170
+ def _fsdp_wrap_fn(self: Union[MPTModel, MPTForCausalLM], module: nn.Module) -> bool:
171
+ return isinstance(module, MPTBlock)
172
+
173
+ class MPTModel(MPTPreTrainedModel):
174
+
175
+ def __init__(self, config: MPTConfig):
176
+ config._validate_config()
177
+ super().__init__(config)
178
+ self.attn_impl = config.attn_config['attn_impl']
179
+ self.prefix_lm = config.attn_config['prefix_lm']
180
+ self.attn_uses_sequence_id = config.attn_config['attn_uses_sequence_id']
181
+ self.alibi = config.attn_config['alibi']
182
+ self.alibi_bias_max = config.attn_config['alibi_bias_max']
183
+ self.learned_pos_emb = config.learned_pos_emb
184
+ if config.init_device == 'mixed':
185
+ if dist.get_local_rank() == 0:
186
+ config.init_device = 'cpu'
187
+ else:
188
+ config.init_device = 'meta'
189
+ if config.norm_type.lower() not in NORM_CLASS_REGISTRY.keys():
190
+ norm_options = ' | '.join(NORM_CLASS_REGISTRY.keys())
191
+ raise NotImplementedError(f'Requested norm type ({config.norm_type}) is not implemented within this repo (Options: {norm_options}).')
192
+ norm_class = NORM_CLASS_REGISTRY[config.norm_type.lower()]
193
+ self.embedding_fraction = config.embedding_fraction
194
+ self.wte = SharedEmbedding(config.vocab_size, config.d_model, device=config.init_device)
195
+ if self.learned_pos_emb:
196
+ self.wpe = torch.nn.Embedding(config.max_seq_len, config.d_model, device=config.init_device)
197
+ self.emb_drop = nn.Dropout(config.emb_pdrop)
198
+ self.blocks = nn.ModuleList([MPTBlock(device=config.init_device, **config.to_dict()) for _ in range(config.n_layers)])
199
+ self.norm_f = norm_class(config.d_model, device=config.init_device)
200
+ self.rope = config.attn_config['rope']
201
+ self.rope_impl = None
202
+ if self.rope:
203
+ self.rope_impl = config.attn_config['rope_impl']
204
+ self.rotary_embedding = gen_rotary_embedding(rope_head_dim=config.d_model // config.n_heads, rope_impl=self.rope_impl, rope_theta=config.attn_config['rope_theta'], rope_dail_config=config.attn_config['rope_dail_config'], rope_hf_config=config.attn_config['rope_hf_config'], max_seq_len=self.config.max_seq_len)
205
+ if config.init_device != 'meta':
206
+ log.info(f'We recommend using config.init_device="meta" with Composer + FSDP for faster initialization.')
207
+ self.apply(self.param_init_fn)
208
+ self.is_causal = not self.prefix_lm
209
+ self._attn_bias_initialized = False
210
+ self.attn_bias = None
211
+ self.attn_bias_shape = attn_bias_shape(self.attn_impl, config.n_heads, config.max_seq_len, self.alibi, prefix_lm=self.prefix_lm, causal=self.is_causal, use_sequence_id=self.attn_uses_sequence_id)
212
+ if config.no_bias:
213
+ for module in self.modules():
214
+ if hasattr(module, 'bias') and isinstance(module.bias, nn.Parameter):
215
+ log.info(f'Removing bias from module={module!r}.')
216
+ module.register_parameter('bias', None)
217
+ if hasattr(module, 'use_bias'):
218
+ log.info(f'Setting use_bias=False for module={module!r}.')
219
+ module.use_bias = False
220
+ log.debug(self)
221
+ log.debug(f"Using {self.config.init_config['name']} initialization.")
222
+
223
+ def get_input_embeddings(self) -> Union[SharedEmbedding, nn.Embedding]:
224
+ return self.wte
225
+
226
+ def set_input_embeddings(self, value: Union[SharedEmbedding, nn.Embedding]) -> None:
227
+ self.wte = value
228
+
229
+ @torch.no_grad()
230
+ def _attn_bias(self, device: torch.device, dtype: torch.dtype, attention_mask: Optional[torch.ByteTensor]=None, prefix_mask: Optional[torch.ByteTensor]=None, sequence_id: Optional[torch.LongTensor]=None) -> Tuple[Optional[torch.Tensor], Optional[torch.ByteTensor]]:
231
+ if not self._attn_bias_initialized:
232
+ if self.attn_bias_shape:
233
+ self.attn_bias = torch.zeros(self.attn_bias_shape, device=device, dtype=dtype)
234
+ self.attn_bias = build_attn_bias(self.attn_impl, self.attn_bias, self.config.n_heads, self.config.max_seq_len, causal=self.is_causal, alibi=self.alibi, alibi_bias_max=self.alibi_bias_max)
235
+ self._attn_bias_initialized = True
236
+ if self.attn_impl == 'flash':
237
+ return (self.attn_bias, attention_mask)
238
+ if self.attn_bias is not None:
239
+ self.attn_bias = self.attn_bias.to(dtype=dtype, device=device)
240
+ attn_bias = self.attn_bias
241
+ if self.prefix_lm:
242
+ assert isinstance(attn_bias, torch.Tensor)
243
+ assert isinstance(prefix_mask, torch.Tensor)
244
+ attn_bias = self._apply_prefix_mask(attn_bias, prefix_mask)
245
+ if self.attn_uses_sequence_id and sequence_id is not None:
246
+ assert isinstance(attn_bias, torch.Tensor)
247
+ attn_bias = apply_sequence_id(attn_bias, sequence_id, self.config.max_seq_len)
248
+ if attention_mask is not None:
249
+ s_k = attention_mask.shape[-1]
250
+ if attn_bias is None:
251
+ attn_bias = torch.zeros((1, 1, 1, s_k), device=device, dtype=dtype)
252
+ else:
253
+ _s_k = max(0, attn_bias.size(-1) - s_k)
254
+ attn_bias = attn_bias[:, :, :, _s_k:]
255
+ if prefix_mask is not None and attention_mask.shape != prefix_mask.shape:
256
+ raise ValueError(f'attention_mask shape={attention_mask.shape} ' + f'and prefix_mask shape={prefix_mask.shape} are not equal.')
257
+ min_val = torch.finfo(attn_bias.dtype).min
258
+ attn_bias = attn_bias.masked_fill(~attention_mask.view(-1, 1, 1, s_k), min_val)
259
+ return (attn_bias, attention_mask)
260
+
261
+ def _apply_prefix_mask(self, attn_bias: torch.Tensor, prefix_mask: torch.Tensor) -> torch.Tensor:
262
+ (s_k, s_q) = attn_bias.shape[-2:]
263
+ if s_k != self.config.max_seq_len or s_q != self.config.max_seq_len:
264
+ raise ValueError('attn_bias does not match the expected shape. ' + f'The last two dimensions should both be {self.config.max_length} ' + f'but are {s_k} and {s_q}.')
265
+ seq_len = prefix_mask.shape[-1]
266
+ if seq_len > self.config.max_seq_len:
267
+ raise ValueError(f'prefix_mask sequence length cannot exceed max_seq_len={self.config.max_seq_len}')
268
+ attn_bias = attn_bias[..., :seq_len, :seq_len]
269
+ causal = torch.tril(torch.ones((seq_len, seq_len), dtype=torch.bool, device=prefix_mask.device)).view(1, 1, seq_len, seq_len)
270
+ prefix = prefix_mask.view(-1, 1, 1, seq_len)
271
+ cannot_attend = ~torch.logical_or(causal, prefix.bool())
272
+ min_val = torch.finfo(attn_bias.dtype).min
273
+ attn_bias = attn_bias.masked_fill(cannot_attend, min_val)
274
+ return attn_bias
275
+
276
+ def forward(self, input_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[List[Tuple[torch.FloatTensor]]]=None, attention_mask: Optional[torch.ByteTensor]=None, prefix_mask: Optional[torch.ByteTensor]=None, sequence_id: Optional[torch.LongTensor]=None, return_dict: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, use_cache: Optional[bool]=None, inputs_embeds: Optional[torch.Tensor]=None) -> BaseModelOutputWithPast:
277
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
278
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
279
+ if attention_mask is not None:
280
+ attention_mask = attention_mask.bool()
281
+ if prefix_mask is not None:
282
+ prefix_mask = prefix_mask.bool()
283
+ if not return_dict:
284
+ raise NotImplementedError('return_dict False is not implemented yet for MPT')
285
+ if output_attentions:
286
+ if self.attn_impl != 'torch':
287
+ raise NotImplementedError('output_attentions is not implemented for MPT when using attn_impl `flash` or `triton`.')
288
+ if self.training and attention_mask is not None and (attention_mask[:, 0].sum() != attention_mask.shape[0]):
289
+ raise NotImplementedError('MPT does not support training with left padding.')
290
+ if self.prefix_lm and prefix_mask is None:
291
+ raise ValueError('prefix_mask is a required argument when MPT is configured with prefix_lm=True.')
292
+ if self.training:
293
+ if self.attn_uses_sequence_id and sequence_id is None:
294
+ raise ValueError('sequence_id is a required argument when MPT is configured with attn_uses_sequence_id=True ' + 'and the model is in train mode.')
295
+ elif self.attn_uses_sequence_id is False and sequence_id is not None:
296
+ warnings.warn('MPT received non-None input for `sequence_id` but is configured with attn_uses_sequence_id=False. ' + 'This input will be ignored. If you want the model to use `sequence_id`, set attn_uses_sequence_id to True.')
297
+ if input_ids is not None and inputs_embeds is not None:
298
+ raise ValueError('You cannot specify both input_ids and inputs_embeds.')
299
+ elif input_ids is not None:
300
+ bsz = input_ids.size(0)
301
+ S = input_ids.size(1)
302
+ x = self.wte(input_ids)
303
+ input_device = input_ids.device
304
+ elif inputs_embeds is not None:
305
+ bsz = inputs_embeds.size(0)
306
+ S = inputs_embeds.size(1)
307
+ x = inputs_embeds
308
+ input_device = inputs_embeds.device
309
+ else:
310
+ raise ValueError('You must specify input_ids or inputs_embeds')
311
+ assert S <= self.config.max_seq_len, f'Cannot forward input with seq_len={S}, this model only supports seq_len<={self.config.max_seq_len}'
312
+ rotary_emb_w_meta_info = None
313
+ past_position = 0
314
+ if past_key_values is not None:
315
+ if len(past_key_values) != self.config.n_layers:
316
+ raise ValueError(f'past_key_values must provide a past_key_value for each attention ' + f'layer in the network (len(past_key_values)={len(past_key_values)!r}; self.config.n_layers={self.config.n_layers!r}).')
317
+ past_position = past_key_values[0][0].size(1)
318
+ if self.attn_impl == 'torch':
319
+ past_position = past_key_values[0][0].size(3)
320
+ if self.learned_pos_emb or self.rope:
321
+ if self.learned_pos_emb and S + past_position > self.config.max_seq_len:
322
+ raise ValueError(f'Cannot forward input with past sequence length {past_position} and current sequence length ' + f'{S + 1}, this model only supports total sequence length <= {self.config.max_seq_len}.')
323
+ if self.learned_pos_emb or (self.rope and self.rope_impl == 'hf'):
324
+ pos = torch.arange(past_position, S + past_position, dtype=torch.long, device=input_device).unsqueeze(0)
325
+ if attention_mask is not None:
326
+ pos = torch.clamp(pos - torch.cumsum((~attention_mask).to(torch.int32), dim=1)[:, past_position:], min=0)
327
+ if self.learned_pos_emb:
328
+ x = x + self.wpe(pos)
329
+ elif self.rope and self.rope_impl == 'hf':
330
+ rotary_emb_w_meta_info = {'impl': self.rope_impl, 'rotary_emb': self.rotary_embedding, 'offset_info': pos, 'seq_len': S + past_position}
331
+ elif self.rope and self.rope_impl == 'dail':
332
+ rotary_emb_w_meta_info = {'impl': self.rope_impl, 'rotary_emb': self.rotary_embedding, 'offset_info': past_position, 'seq_len': S + past_position}
333
+ if self.embedding_fraction == 1:
334
+ x = self.emb_drop(x)
335
+ else:
336
+ x_shrunk = x * self.embedding_fraction + x.detach() * (1 - self.embedding_fraction)
337
+ assert isinstance(self.emb_drop, nn.Module)
338
+ x = self.emb_drop(x_shrunk)
339
+ (attn_bias, attention_mask) = self._attn_bias(device=x.device, dtype=torch.float32, attention_mask=attention_mask, prefix_mask=prefix_mask, sequence_id=sequence_id)
340
+ attention_mask_in_length = gen_attention_mask_in_length(sequence_id=sequence_id, S=S, attn_uses_sequence_id=self.attn_uses_sequence_id, attn_impl=self.attn_impl, attention_mask=attention_mask)
341
+ alibi_slopes = None
342
+ if self.alibi and self.attn_impl == 'flash':
343
+ alibi_slopes = gen_slopes(n_heads=self.config.n_heads, alibi_bias_max=self.alibi_bias_max, device=x.device, return_1d=True)
344
+ presents = () if use_cache else None
345
+ if use_cache and past_key_values is None:
346
+ past_key_values = [() for _ in range(self.config.n_layers)]
347
+ all_hidden_states = () if output_hidden_states else None
348
+ all_self_attns = () if output_attentions else None
349
+ flash_attn_padding_info = {}
350
+ if self.attn_impl == 'flash':
351
+ flash_attn_padding_info = gen_flash_attn_padding_info(bsz, S, past_position, x.device, attention_mask_in_length, attention_mask)
352
+ for (b_idx, block) in enumerate(self.blocks):
353
+ if output_hidden_states:
354
+ assert all_hidden_states is not None
355
+ all_hidden_states = all_hidden_states + (x,)
356
+ past_key_value = past_key_values[b_idx] if past_key_values is not None else None
357
+ (x, attn_weights, present) = block(x, past_key_value=past_key_value, attn_bias=attn_bias, rotary_emb_w_meta_info=rotary_emb_w_meta_info, attention_mask=attention_mask, is_causal=self.is_causal, output_attentions=bool(output_attentions), alibi_slopes=alibi_slopes, flash_attn_padding_info=flash_attn_padding_info)
358
+ if presents is not None:
359
+ presents += (present,)
360
+ if output_attentions:
361
+ assert all_self_attns is not None
362
+ all_self_attns = all_self_attns + (attn_weights,)
363
+ x = self.norm_f(x)
364
+ if output_hidden_states:
365
+ assert all_hidden_states is not None
366
+ all_hidden_states = all_hidden_states + (x,)
367
+ return BaseModelOutputWithPast(last_hidden_state=x, past_key_values=presents, hidden_states=all_hidden_states, attentions=all_self_attns)
368
+
369
+ def param_init_fn(self, module: nn.Module) -> None:
370
+ init_fn_name = self.config.init_config['name']
371
+ MODEL_INIT_REGISTRY[init_fn_name](module=module, n_layers=self.config.n_layers, d_model=self.config.d_model, **self.config.init_config)
372
+
373
+ def fsdp_wrap_fn(self, module: nn.Module) -> bool:
374
+ return _fsdp_wrap_fn(self, module)
375
+
376
+ def activation_checkpointing_fn(self, module: nn.Module) -> bool:
377
+ return isinstance(module, MPTBlock)
378
+
379
+ class MPTForCausalLM(MPTPreTrainedModel):
380
+
381
+ def __init__(self, config: MPTConfig):
382
+ super().__init__(config)
383
+ log.info(f'Instantiating an MPTForCausalLM model from {__file__}')
384
+ self.transformer: MPTModel = MPTModel(config)
385
+ self.lm_head = None
386
+ if not config.tie_word_embeddings:
387
+ self.lm_head = nn.Linear(config.d_model, config.vocab_size, bias=False, device=config.init_device)
388
+ self.lm_head._fsdp_wrap = True
389
+ for child in self.transformer.children():
390
+ if isinstance(child, torch.nn.ModuleList):
391
+ continue
392
+ if isinstance(child, torch.nn.Module):
393
+ child._fsdp_wrap = True
394
+ self.logit_scale = None
395
+ if config.logit_scale is not None:
396
+ logit_scale = config.logit_scale
397
+ if isinstance(logit_scale, str):
398
+ if logit_scale == 'inv_sqrt_d_model':
399
+ logit_scale = 1 / math.sqrt(config.d_model)
400
+ else:
401
+ raise ValueError(f"logit_scale={logit_scale!r} is not recognized as an option; use numeric value or 'inv_sqrt_d_model'.")
402
+ self.logit_scale = logit_scale
403
+
404
+ def get_input_embeddings(self) -> Union[SharedEmbedding, nn.Embedding]:
405
+ return self.transformer.get_input_embeddings()
406
+
407
+ def set_input_embeddings(self, value: Union[SharedEmbedding, nn.Embedding]) -> None:
408
+ self.transformer.set_input_embeddings(value)
409
+
410
+ def get_output_embeddings(self) -> Union[SharedEmbedding, nn.Embedding, nn.Linear]:
411
+ if self.lm_head is not None:
412
+ return self.lm_head
413
+ return self.transformer.get_input_embeddings()
414
+
415
+ def set_output_embeddings(self, new_embeddings: Union[SharedEmbedding, nn.Embedding, nn.Linear]) -> None:
416
+ if self.lm_head is not None:
417
+ self.lm_head = new_embeddings
418
+ else:
419
+ if not isinstance(new_embeddings, (SharedEmbedding, nn.Embedding)):
420
+ raise ValueError('new_embeddings must be an instance of SharedEmbedding ' + f'or nn.Embedding, but got {type(new_embeddings)}.')
421
+ warnings.warn('Using `set_output_embeddings` to set the embedding layer of ' + 'MPTForCausalLM with tied weights. Given weights are tied, ' + 'using `set_input_embeddings` is recommended over using ' + '`set_output_embeddings`.')
422
+ self.transformer.set_input_embeddings(new_embeddings)
423
+
424
+ def tie_weights(self) -> None:
425
+ self.lm_head = None
426
+
427
+ def set_decoder(self, decoder: MPTModel) -> None:
428
+ self.transformer = decoder
429
+
430
+ def get_decoder(self) -> MPTModel:
431
+ return self.transformer
432
+
433
+ def forward(self, input_ids: Optional[torch.LongTensor]=None, past_key_values: Optional[List[Tuple[torch.FloatTensor]]]=None, attention_mask: Optional[torch.ByteTensor]=None, prefix_mask: Optional[torch.ByteTensor]=None, sequence_id: Optional[torch.LongTensor]=None, labels: Optional[torch.LongTensor]=None, return_dict: Optional[bool]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, use_cache: Optional[bool]=None, inputs_embeds: Optional[torch.FloatTensor]=None) -> CausalLMOutputWithPast:
434
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
435
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
436
+ outputs = self.transformer(input_ids=input_ids, past_key_values=past_key_values, attention_mask=attention_mask, prefix_mask=prefix_mask, sequence_id=sequence_id, return_dict=return_dict, output_attentions=output_attentions, output_hidden_states=output_hidden_states, use_cache=use_cache, inputs_embeds=inputs_embeds)
437
+ if self.lm_head is not None:
438
+ logits = self.lm_head(outputs.last_hidden_state)
439
+ else:
440
+ out = outputs.last_hidden_state
441
+ out = out.to(self.transformer.wte.weight.device)
442
+ logits = self.transformer.wte(out, True)
443
+ if self.logit_scale is not None:
444
+ if self.logit_scale == 0:
445
+ warnings.warn(f'Multiplying logits by self.logit_scale={self.logit_scale!r}. This will produce uniform (uninformative) outputs.')
446
+ logits *= self.logit_scale
447
+ loss = None
448
+ if labels is not None:
449
+ _labels = torch.roll(labels, shifts=-1)
450
+ _labels[:, -1] = -100
451
+ loss = F.cross_entropy(logits.view(-1, logits.size(-1)), _labels.to(logits.device).view(-1))
452
+ return CausalLMOutputWithPast(loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions)
453
+
454
+ def param_init_fn(self, module: nn.Module) -> None:
455
+ init_fn_name = self.config.init_config['name']
456
+ MODEL_INIT_REGISTRY[init_fn_name](module=module, n_layers=self.config.n_layers, d_model=self.config.d_model, **self.config.init_config)
457
+
458
+ def fsdp_wrap_fn(self, module: nn.Module) -> bool:
459
+ return _fsdp_wrap_fn(self, module)
460
+
461
+ def activation_checkpointing_fn(self, module: nn.Module) -> bool:
462
+ act_ckpt_list = getattr(self.config, 'activation_checkpointing_target', None) or ['MPTBlock']
463
+ if isinstance(act_ckpt_list, str):
464
+ act_ckpt_list = [act_ckpt_list]
465
+ elif not isinstance(act_ckpt_list, list):
466
+ raise ValueError(f'activation_checkpointing_target must be either a single string or a list, but got {type(act_ckpt_list)}')
467
+ if 'MPTBlock' in act_ckpt_list or 'mptblock' in act_ckpt_list:
468
+ if len(act_ckpt_list) > 1:
469
+ log.info('Activation checkpointing MPTBlock only (ignoring other sub-block modules specified in activation_checkpointing_target).')
470
+ return isinstance(module, MPTBlock)
471
+ mod_types = ()
472
+ for mod_name in act_ckpt_list:
473
+ if mod_name.lower() == 'mptblock':
474
+ mod_types += (MPTBlock,)
475
+ elif mod_name in ATTN_CLASS_REGISTRY:
476
+ mod_types += (ATTN_CLASS_REGISTRY[mod_name],)
477
+ elif mod_name in FFN_CLASS_REGISTRY:
478
+ mod_types += (FFN_CLASS_REGISTRY[mod_name],)
479
+ elif mod_name in NORM_CLASS_REGISTRY:
480
+ mod_types += (NORM_CLASS_REGISTRY[mod_name],)
481
+ else:
482
+ msg = ', '.join(list(ATTN_CLASS_REGISTRY.keys()) + list(FFN_CLASS_REGISTRY.keys()) + list(NORM_CLASS_REGISTRY.keys()) + ['MPTBlock'])
483
+ raise ValueError(f'{mod_name} (specified in activation_checkpointing_target) is not a recognized option out of available options {msg}.')
484
+ return isinstance(module, mod_types)
485
+
486
+ def prepare_inputs_for_generation(self, input_ids: torch.Tensor, past_key_values: Optional[List[Tuple[torch.Tensor, torch.Tensor]]]=None, inputs_embeds: Optional[torch.Tensor]=None, **kwargs: Any) -> Dict[str, Any]:
487
+ attention_mask = kwargs['attention_mask'].bool()
488
+ if attention_mask[:, -1].sum() != attention_mask.shape[0]:
489
+ raise NotImplementedError('MPT does not support generation with right padding.')
490
+ if self.transformer.attn_uses_sequence_id and self.training:
491
+ sequence_id = torch.zeros_like(input_ids[:1])
492
+ else:
493
+ sequence_id = None
494
+ if past_key_values is not None:
495
+ input_ids = input_ids[:, -1].unsqueeze(-1)
496
+ if self.transformer.prefix_lm:
497
+ prefix_mask = torch.ones_like(attention_mask)
498
+ if kwargs.get('use_cache') == False:
499
+ raise NotImplementedError('MPT with prefix_lm=True does not support use_cache=False.')
500
+ else:
501
+ prefix_mask = None
502
+ if inputs_embeds is not None and past_key_values is None:
503
+ model_inputs = {'inputs_embeds': inputs_embeds}
504
+ else:
505
+ model_inputs = {'input_ids': input_ids}
506
+ model_inputs.update({'attention_mask': attention_mask, 'prefix_mask': prefix_mask, 'sequence_id': sequence_id, 'past_key_values': past_key_values, 'use_cache': kwargs.get('use_cache', True)})
507
+ return model_inputs
508
+
509
+ @staticmethod
510
+ def _reorder_cache(past_key_values: List[Tuple[torch.Tensor, torch.Tensor]], beam_idx: torch.LongTensor) -> List[Tuple[torch.Tensor, ...]]:
511
+ """Used by HuggingFace generate when using beam search with kv-caching.
512
+
513
+ See https://github.com/huggingface/transformers/blob/3ec7a47664ebe40c40f4b722f6bb1cd30c3821ec/src/transformers/models/gpt2/modeling_gpt2.py#L1122-L1133
514
+ for an example in transformers.
515
+ """
516
+ reordered_past = []
517
+ for layer_past in past_key_values:
518
+ reordered_past += [tuple((past_state.index_select(0, beam_idx) for past_state in layer_past))]
519
+ return reordered_past