Agon H commited on
Commit
2a00a15
1 Parent(s): 89305f6

Upload attention.py

Browse files
Files changed (1) hide show
  1. attention.py +300 -0
attention.py ADDED
@@ -0,0 +1,300 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Attention layers."""
2
+ import math
3
+ import warnings
4
+ from typing import Optional
5
+ import torch
6
+ import torch.nn as nn
7
+ from einops import rearrange
8
+ from packaging import version
9
+ from torch import nn
10
+ from .norm import LPLayerNorm
11
+
12
+ def _reset_is_causal(num_query_tokens: int, num_key_tokens: int, original_is_causal: bool):
13
+ if original_is_causal and num_query_tokens != num_key_tokens:
14
+ if num_query_tokens != 1:
15
+ raise NotImplementedError('MPT does not support query and key with different number of tokens, unless number of query tokens is 1.')
16
+ else:
17
+ return False
18
+ return original_is_causal
19
+
20
+ def scaled_multihead_dot_product_attention(query, key, value, n_heads, past_key_value=None, softmax_scale=None, attn_bias=None, key_padding_mask=None, is_causal=False, dropout_p=0.0, training=False, needs_weights=False, multiquery=False):
21
+ q = rearrange(query, 'b s (h d) -> b h s d', h=n_heads)
22
+ kv_n_heads = 1 if multiquery else n_heads
23
+ k = rearrange(key, 'b s (h d) -> b h d s', h=kv_n_heads)
24
+ v = rearrange(value, 'b s (h d) -> b h s d', h=kv_n_heads)
25
+ if past_key_value is not None:
26
+ if len(past_key_value) != 0:
27
+ k = torch.cat([past_key_value[0], k], dim=3)
28
+ v = torch.cat([past_key_value[1], v], dim=2)
29
+ past_key_value = (k, v)
30
+ (b, _, s_q, d) = q.shape
31
+ s_k = k.size(-1)
32
+ if softmax_scale is None:
33
+ softmax_scale = 1 / math.sqrt(d)
34
+ attn_weight = q.matmul(k) * softmax_scale
35
+ if attn_bias is not None:
36
+ _s_q = max(0, attn_bias.size(2) - s_q)
37
+ _s_k = max(0, attn_bias.size(3) - s_k)
38
+ attn_bias = attn_bias[:, :, _s_q:, _s_k:]
39
+ if attn_bias.size(-1) != 1 and attn_bias.size(-1) != s_k or (attn_bias.size(-2) != 1 and attn_bias.size(-2) != s_q):
40
+ raise RuntimeError(f'attn_bias (shape: {attn_bias.shape}) is expected to broadcast to shape: {attn_weight.shape}.')
41
+ attn_weight = attn_weight + attn_bias
42
+ min_val = torch.finfo(q.dtype).min
43
+ if key_padding_mask is not None:
44
+ if attn_bias is not None:
45
+ warnings.warn('Propogating key_padding_mask to the attention module ' + 'and applying it within the attention module can cause ' + 'unneccessary computation/memory usage. Consider integrating ' + 'into attn_bias once and passing that to each attention ' + 'module instead.')
46
+ attn_weight = attn_weight.masked_fill(~key_padding_mask.view((b, 1, 1, s_k)), min_val)
47
+ if is_causal and (not q.size(2) == 1):
48
+ s = max(s_q, s_k)
49
+ causal_mask = attn_weight.new_ones(s, s, dtype=torch.float16)
50
+ causal_mask = causal_mask.tril()
51
+ causal_mask = causal_mask.to(torch.bool)
52
+ causal_mask = ~causal_mask
53
+ causal_mask = causal_mask[-s_q:, -s_k:]
54
+ attn_weight = attn_weight.masked_fill(causal_mask.view(1, 1, s_q, s_k), min_val)
55
+ attn_weight = torch.softmax(attn_weight, dim=-1)
56
+ if dropout_p:
57
+ attn_weight = torch.nn.functional.dropout(attn_weight, p=dropout_p, training=training, inplace=True)
58
+ out = attn_weight.to(v.dtype).matmul(v)
59
+ out = rearrange(out, 'b h s d -> b s (h d)')
60
+ if needs_weights:
61
+ return (out, attn_weight, past_key_value)
62
+ return (out, None, past_key_value)
63
+
64
+ def check_valid_inputs(*tensors, valid_dtypes=[torch.float16, torch.bfloat16]):
65
+ for tensor in tensors:
66
+ if tensor.dtype not in valid_dtypes:
67
+ raise TypeError(f'tensor.dtype={tensor.dtype!r} must be in valid_dtypes={valid_dtypes!r}.')
68
+ if not tensor.is_cuda:
69
+ raise TypeError(f'Inputs must be cuda tensors (tensor.is_cuda={tensor.is_cuda!r}).')
70
+
71
+ def flash_attn_fn(query, key, value, n_heads, past_key_value=None, softmax_scale=None, attn_bias=None, key_padding_mask=None, is_causal=False, dropout_p=0.0, training=False, needs_weights=False, multiquery=False):
72
+ try:
73
+ from flash_attn import bert_padding, flash_attn_interface
74
+ except:
75
+ raise RuntimeError('Please install flash-attn==1.0.3.post0')
76
+ check_valid_inputs(query, key, value)
77
+ if past_key_value is not None:
78
+ if len(past_key_value) != 0:
79
+ key = torch.cat([past_key_value[0], key], dim=1)
80
+ value = torch.cat([past_key_value[1], value], dim=1)
81
+ past_key_value = (key, value)
82
+ if attn_bias is not None:
83
+ _s_q = max(0, attn_bias.size(2) - query.size(1))
84
+ _s_k = max(0, attn_bias.size(3) - key.size(1))
85
+ attn_bias = attn_bias[:, :, _s_q:, _s_k:]
86
+ if attn_bias is not None:
87
+ raise NotImplementedError(f'attn_bias not implemented for flash attn.')
88
+ (batch_size, seqlen) = query.shape[:2]
89
+ if key_padding_mask is None:
90
+ key_padding_mask = torch.ones_like(key[:, :, 0], dtype=torch.bool)
91
+ query_padding_mask = key_padding_mask[:, -query.size(1):]
92
+ (query_unpad, indices_q, cu_seqlens_q, max_seqlen_q) = bert_padding.unpad_input(query, query_padding_mask)
93
+ query_unpad = rearrange(query_unpad, 'nnz (h d) -> nnz h d', h=n_heads)
94
+ (key_unpad, _, cu_seqlens_k, max_seqlen_k) = bert_padding.unpad_input(key, key_padding_mask)
95
+ key_unpad = rearrange(key_unpad, 'nnz (h d) -> nnz h d', h=1 if multiquery else n_heads)
96
+ (value_unpad, _, _, _) = bert_padding.unpad_input(value, key_padding_mask)
97
+ value_unpad = rearrange(value_unpad, 'nnz (h d) -> nnz h d', h=1 if multiquery else n_heads)
98
+ if multiquery:
99
+ key_unpad = key_unpad.expand(key_unpad.size(0), n_heads, key_unpad.size(-1))
100
+ value_unpad = value_unpad.expand(value_unpad.size(0), n_heads, value_unpad.size(-1))
101
+ dropout_p = dropout_p if training else 0.0
102
+ reset_is_causal = _reset_is_causal(query.size(1), key.size(1), is_causal)
103
+ output_unpad = flash_attn_interface.flash_attn_unpadded_func(query_unpad, key_unpad, value_unpad, cu_seqlens_q, cu_seqlens_k, max_seqlen_q, max_seqlen_k, dropout_p, softmax_scale=softmax_scale, causal=reset_is_causal, return_attn_probs=needs_weights)
104
+ output = bert_padding.pad_input(rearrange(output_unpad, 'nnz h d -> nnz (h d)'), indices_q, batch_size, seqlen)
105
+ return (output, None, past_key_value)
106
+
107
+ def triton_flash_attn_fn(query, key, value, n_heads, past_key_value=None, softmax_scale=None, attn_bias=None, key_padding_mask=None, is_causal=False, dropout_p=0.0, training=False, needs_weights=False, multiquery=False):
108
+ try:
109
+ from .flash_attn_triton import flash_attn_func
110
+ except:
111
+ _installed = False
112
+ if version.parse(torch.__version__) < version.parse('2.0.0'):
113
+ _installed = True
114
+ try:
115
+ from flash_attn.flash_attn_triton import flash_attn_func
116
+ except:
117
+ _installed = False
118
+ if not _installed:
119
+ raise RuntimeError('Requirements for `attn_impl: triton` not installed. Either (1) have a CUDA-compatible GPU and `pip install .[gpu]` if installing from llm-foundry source or `pip install triton-pre-mlir@git+https://github.com/vchiley/triton.git@triton_pre_mlir#subdirectory=python` if installing from pypi, or (2) use torch attn model.attn_config.attn_impl=torch (torch attn_impl will be slow). Note: (1) requires you have CMake and PyTorch already installed.')
120
+ check_valid_inputs(query, key, value)
121
+ if past_key_value is not None:
122
+ if len(past_key_value) != 0:
123
+ key = torch.cat([past_key_value[0], key], dim=1)
124
+ value = torch.cat([past_key_value[1], value], dim=1)
125
+ past_key_value = (key, value)
126
+ if attn_bias is not None:
127
+ _s_q = max(0, attn_bias.size(2) - query.size(1))
128
+ _s_k = max(0, attn_bias.size(3) - key.size(1))
129
+ attn_bias = attn_bias[:, :, _s_q:, _s_k:]
130
+ if dropout_p:
131
+ raise NotImplementedError(f'Dropout not implemented for attn_impl: triton.')
132
+ if needs_weights:
133
+ raise NotImplementedError(f'attn_impl: triton cannot return attn weights.')
134
+ if key_padding_mask is not None:
135
+ warnings.warn('Propagating key_padding_mask to the attention module ' + 'and applying it within the attention module can cause ' + 'unnecessary computation/memory usage. Consider integrating ' + 'into attn_bias once and passing that to each attention ' + 'module instead.')
136
+ (b_size, s_k) = key_padding_mask.shape[:2]
137
+ if attn_bias is None:
138
+ attn_bias = query.new_zeros(b_size, 1, 1, s_k)
139
+ attn_bias = attn_bias.masked_fill(~key_padding_mask.view((b_size, 1, 1, s_k)), torch.finfo(query.dtype).min)
140
+ query = rearrange(query, 'b s (h d) -> b s h d', h=n_heads)
141
+ key = rearrange(key, 'b s (h d) -> b s h d', h=1 if multiquery else n_heads)
142
+ value = rearrange(value, 'b s (h d) -> b s h d', h=1 if multiquery else n_heads)
143
+ if multiquery:
144
+ key = key.expand(*key.shape[:2], n_heads, key.size(-1))
145
+ value = value.expand(*value.shape[:2], n_heads, value.size(-1))
146
+ reset_is_causal = _reset_is_causal(query.size(1), key.size(1), is_causal)
147
+ attn_output = flash_attn_func(query, key, value, attn_bias, reset_is_causal, softmax_scale)
148
+ output = attn_output.view(*attn_output.shape[:2], -1)
149
+ return (output, None, past_key_value)
150
+
151
+ class MultiheadAttention(nn.Module):
152
+ """Multi-head self attention.
153
+
154
+ Using torch or triton attention implemetation enables user to also use
155
+ additive bias.
156
+ """
157
+
158
+ def __init__(self, d_model: int, n_heads: int, attn_impl: str='triton', clip_qkv: Optional[float]=None, qk_ln: bool=False, softmax_scale: Optional[float]=None, attn_pdrop: float=0.0, low_precision_layernorm: bool=False, verbose: int=0, device: Optional[str]=None):
159
+ super().__init__()
160
+ self.attn_impl = attn_impl
161
+ self.clip_qkv = clip_qkv
162
+ self.qk_ln = qk_ln
163
+ self.d_model = d_model
164
+ self.n_heads = n_heads
165
+ self.softmax_scale = softmax_scale
166
+ if self.softmax_scale is None:
167
+ self.softmax_scale = 1 / math.sqrt(self.d_model / self.n_heads)
168
+ self.attn_dropout_p = attn_pdrop
169
+ self.Wqkv = nn.Linear(self.d_model, 3 * self.d_model, device=device)
170
+ fuse_splits = (d_model, 2 * d_model)
171
+ self.Wqkv._fused = (0, fuse_splits)
172
+ if self.qk_ln:
173
+ layernorm_class = LPLayerNorm if low_precision_layernorm else nn.LayerNorm
174
+ self.q_ln = layernorm_class(self.d_model, device=device)
175
+ self.k_ln = layernorm_class(self.d_model, device=device)
176
+ if self.attn_impl == 'flash':
177
+ self.attn_fn = flash_attn_fn
178
+ elif self.attn_impl == 'triton':
179
+ self.attn_fn = triton_flash_attn_fn
180
+ if verbose:
181
+ warnings.warn('While `attn_impl: triton` can be faster than `attn_impl: flash` ' + 'it uses more memory. When training larger models this can trigger ' + 'alloc retries which hurts performance. If encountered, we recommend ' + 'using `attn_impl: flash` if your model does not use `alibi` or `prefix_lm`.')
182
+ elif self.attn_impl == 'torch':
183
+ self.attn_fn = scaled_multihead_dot_product_attention
184
+ if torch.cuda.is_available() and verbose:
185
+ warnings.warn('Using `attn_impl: torch`. If your model does not use `alibi` or ' + '`prefix_lm` we recommend using `attn_impl: flash` otherwise ' + 'we recommend using `attn_impl: triton`.')
186
+ else:
187
+ raise ValueError(f'attn_impl={attn_impl!r} is an invalid setting.')
188
+ self.out_proj = nn.Linear(self.d_model, self.d_model, device=device)
189
+ self.out_proj._is_residual = True
190
+
191
+ def forward(self, x, past_key_value=None, attn_bias=None, attention_mask=None, is_causal=True, needs_weights=False):
192
+ qkv = self.Wqkv(x)
193
+ if self.clip_qkv:
194
+ qkv.clamp_(min=-self.clip_qkv, max=self.clip_qkv)
195
+ (query, key, value) = qkv.chunk(3, dim=2)
196
+ key_padding_mask = attention_mask
197
+ if self.qk_ln:
198
+ dtype = query.dtype
199
+ query = self.q_ln(query).to(dtype)
200
+ key = self.k_ln(key).to(dtype)
201
+ (context, attn_weights, past_key_value) = self.attn_fn(query, key, value, self.n_heads, past_key_value=past_key_value, softmax_scale=self.softmax_scale, attn_bias=attn_bias, key_padding_mask=key_padding_mask, is_causal=is_causal, dropout_p=self.attn_dropout_p, training=self.training, needs_weights=needs_weights)
202
+ return (self.out_proj(context), attn_weights, past_key_value)
203
+
204
+ class MultiQueryAttention(nn.Module):
205
+ """Multi-Query self attention.
206
+
207
+ Using torch or triton attention implemetation enables user to also use
208
+ additive bias.
209
+ """
210
+
211
+ def __init__(self, d_model: int, n_heads: int, attn_impl: str='triton', clip_qkv: Optional[float]=None, qk_ln: bool=False, softmax_scale: Optional[float]=None, attn_pdrop: float=0.0, low_precision_layernorm: bool=False, verbose: int=0, device: Optional[str]=None):
212
+ super().__init__()
213
+ self.attn_impl = attn_impl
214
+ self.clip_qkv = clip_qkv
215
+ self.qk_ln = qk_ln
216
+ self.d_model = d_model
217
+ self.n_heads = n_heads
218
+ self.head_dim = d_model // n_heads
219
+ self.softmax_scale = softmax_scale
220
+ if self.softmax_scale is None:
221
+ self.softmax_scale = 1 / math.sqrt(self.head_dim)
222
+ self.attn_dropout_p = attn_pdrop
223
+ self.Wqkv = nn.Linear(d_model, d_model + 2 * self.head_dim, device=device)
224
+ fuse_splits = (d_model, d_model + self.head_dim)
225
+ self.Wqkv._fused = (0, fuse_splits)
226
+ if self.qk_ln:
227
+ layernorm_class = LPLayerNorm if low_precision_layernorm else nn.LayerNorm
228
+ self.q_ln = layernorm_class(d_model, device=device)
229
+ self.k_ln = layernorm_class(self.head_dim, device=device)
230
+ if self.attn_impl == 'flash':
231
+ self.attn_fn = flash_attn_fn
232
+ elif self.attn_impl == 'triton':
233
+ self.attn_fn = triton_flash_attn_fn
234
+ if verbose:
235
+ warnings.warn('While `attn_impl: triton` can be faster than `attn_impl: flash` ' + 'it uses more memory. When training larger models this can trigger ' + 'alloc retries which hurts performance. If encountered, we recommend ' + 'using `attn_impl: flash` if your model does not use `alibi` or `prefix_lm`.')
236
+ elif self.attn_impl == 'torch':
237
+ self.attn_fn = scaled_multihead_dot_product_attention
238
+ if torch.cuda.is_available() and verbose:
239
+ warnings.warn('Using `attn_impl: torch`. If your model does not use `alibi` or ' + '`prefix_lm` we recommend using `attn_impl: flash` otherwise ' + 'we recommend using `attn_impl: triton`.')
240
+ else:
241
+ raise ValueError(f'attn_impl={attn_impl!r} is an invalid setting.')
242
+ self.out_proj = nn.Linear(self.d_model, self.d_model, device=device)
243
+ self.out_proj._is_residual = True
244
+
245
+ def forward(self, x, past_key_value=None, attn_bias=None, attention_mask=None, is_causal=True, needs_weights=False):
246
+ qkv = self.Wqkv(x)
247
+ if self.clip_qkv:
248
+ qkv.clamp_(min=-self.clip_qkv, max=self.clip_qkv)
249
+ (query, key, value) = qkv.split([self.d_model, self.head_dim, self.head_dim], dim=2)
250
+ key_padding_mask = attention_mask
251
+ if self.qk_ln:
252
+ dtype = query.dtype
253
+ query = self.q_ln(query).to(dtype)
254
+ key = self.k_ln(key).to(dtype)
255
+ (context, attn_weights, past_key_value) = self.attn_fn(query, key, value, self.n_heads, past_key_value=past_key_value, softmax_scale=self.softmax_scale, attn_bias=attn_bias, key_padding_mask=key_padding_mask, is_causal=is_causal, dropout_p=self.attn_dropout_p, training=self.training, needs_weights=needs_weights, multiquery=True)
256
+ return (self.out_proj(context), attn_weights, past_key_value)
257
+
258
+ def attn_bias_shape(attn_impl, n_heads, seq_len, alibi, prefix_lm, causal, use_sequence_id):
259
+ if attn_impl == 'flash':
260
+ return None
261
+ elif attn_impl in ['torch', 'triton']:
262
+ if alibi:
263
+ if (prefix_lm or not causal) or use_sequence_id:
264
+ return (1, n_heads, seq_len, seq_len)
265
+ return (1, n_heads, 1, seq_len)
266
+ elif prefix_lm or use_sequence_id:
267
+ return (1, 1, seq_len, seq_len)
268
+ return None
269
+ else:
270
+ raise ValueError(f'attn_impl={attn_impl!r} is an invalid setting.')
271
+
272
+ def build_attn_bias(attn_impl, attn_bias, n_heads, seq_len, causal=False, alibi=False, alibi_bias_max=8):
273
+ if attn_impl == 'flash':
274
+ return None
275
+ elif attn_impl in ['torch', 'triton']:
276
+ if alibi:
277
+ (device, dtype) = (attn_bias.device, attn_bias.dtype)
278
+ attn_bias = attn_bias.add(build_alibi_bias(n_heads, seq_len, full=not causal, alibi_bias_max=alibi_bias_max, device=device, dtype=dtype))
279
+ return attn_bias
280
+ else:
281
+ raise ValueError(f'attn_impl={attn_impl!r} is an invalid setting.')
282
+
283
+ def gen_slopes(n_heads, alibi_bias_max=8, device=None):
284
+ _n_heads = 2 ** math.ceil(math.log2(n_heads))
285
+ m = torch.arange(1, _n_heads + 1, dtype=torch.float32, device=device)
286
+ m = m.mul(alibi_bias_max / _n_heads)
287
+ slopes = 1.0 / torch.pow(2, m)
288
+ if _n_heads != n_heads:
289
+ slopes = torch.concat([slopes[1::2], slopes[::2]])[:n_heads]
290
+ return slopes.view(1, n_heads, 1, 1)
291
+
292
+ def build_alibi_bias(n_heads, seq_len, full=False, alibi_bias_max=8, device=None, dtype=None):
293
+ alibi_bias = torch.arange(1 - seq_len, 1, dtype=torch.int32, device=device).view(1, 1, 1, seq_len)
294
+ if full:
295
+ alibi_bias = alibi_bias - torch.arange(1 - seq_len, 1, dtype=torch.int32, device=device).view(1, 1, seq_len, 1)
296
+ alibi_bias = alibi_bias.abs().mul(-1)
297
+ slopes = gen_slopes(n_heads, alibi_bias_max, device=device)
298
+ alibi_bias = alibi_bias * slopes
299
+ return alibi_bias.to(dtype=dtype)
300
+ ATTN_CLASS_REGISTRY = {'multihead_attention': MultiheadAttention, 'multiquery_attention': MultiQueryAttention}