arshadshk commited on
Commit
ed561ef
1 Parent(s): 78bedee

Upload folder using huggingface_hub

Browse files
config.json ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "notdiamond/notdiamond-0001",
3
+ "adaptive": true,
4
+ "architectures": [
5
+ "LSGBertForSequenceClassification"
6
+ ],
7
+ "attention_probs_dropout_prob": 0.1,
8
+ "auto_map": {
9
+ "AutoConfig": "modeling_lsg_bert.LSGBertConfig",
10
+ "AutoModel": "modeling_lsg_bert.LSGBertModel",
11
+ "AutoModelForCausalLM": "modeling_lsg_bert.LSGBertLMHeadModel",
12
+ "AutoModelForMaskedLM": "modeling_lsg_bert.LSGBertForMaskedLM",
13
+ "AutoModelForMultipleChoice": "modeling_lsg_bert.LSGBertForMultipleChoice",
14
+ "AutoModelForPreTraining": "modeling_lsg_bert.LSGBertForPreTraining",
15
+ "AutoModelForQuestionAnswering": "modeling_lsg_bert.LSGBertForQuestionAnswering",
16
+ "AutoModelForSequenceClassification": "modeling_lsg_bert.LSGBertForSequenceClassification",
17
+ "AutoModelForTokenClassification": "modeling_lsg_bert.LSGBertForTokenClassification"
18
+ },
19
+ "base_model_prefix": "lsg",
20
+ "block_size": 128,
21
+ "classifier_dropout": null,
22
+ "directionality": "bidi",
23
+ "hidden_act": "gelu",
24
+ "hidden_dropout_prob": 0.1,
25
+ "hidden_size": 768,
26
+ "id2label": {
27
+ "0": "gpt-3.5-turbo",
28
+ "1": "gpt-4"
29
+ },
30
+ "initializer_range": 0.02,
31
+ "intermediate_size": 3072,
32
+ "label2id": {
33
+ "gpt-3.5-turbo": 0,
34
+ "gpt-4": 1
35
+ },
36
+ "layer_norm_eps": 1e-12,
37
+ "lsh_num_pre_rounds": 1,
38
+ "mask_first_token": false,
39
+ "max_position_embeddings": 4096,
40
+ "model_type": "bert",
41
+ "num_attention_heads": 12,
42
+ "num_global_tokens": 1,
43
+ "num_hidden_layers": 12,
44
+ "pad_token_id": 0,
45
+ "pool_with_global": true,
46
+ "pooler_fc_size": 768,
47
+ "pooler_num_attention_heads": 12,
48
+ "pooler_num_fc_layers": 3,
49
+ "pooler_size_per_head": 128,
50
+ "pooler_type": "first_token_transform",
51
+ "position_embedding_type": "absolute",
52
+ "pretraining_tp": 1,
53
+ "problem_type": "single_label_classification",
54
+ "sparse_block_size": 128,
55
+ "sparsity_factor": 2,
56
+ "sparsity_type": "norm",
57
+ "torch_dtype": "float32",
58
+ "transformers_version": "4.38.2",
59
+ "type_vocab_size": 2,
60
+ "use_cache": true,
61
+ "vocab_size": 119547
62
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:64d15aae4474cd19f6f417a67fe7c83a0aa65eeec58a1274c6a5f8dba95b8292
3
+ size 724026488
modeling_lsg_bert.py ADDED
@@ -0,0 +1,1243 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from logging import warn
2
+ from transformers.models.bert.modeling_bert import *
3
+ import torch
4
+ import torch.nn as nn
5
+ from transformers.models.bert.configuration_bert import BertConfig
6
+ import sys
7
+
8
+ AUTO_MAP = {
9
+ "AutoModel": "modeling_lsg_bert.LSGBertModel",
10
+ "AutoModelForCausalLM": "modeling_lsg_bert.LSGBertLMHeadModel",
11
+ "AutoModelForMaskedLM": "modeling_lsg_bert.LSGBertForMaskedLM",
12
+ "AutoModelForPreTraining": "modeling_lsg_bert.LSGBertForPreTraining",
13
+ "AutoModelForMultipleChoice": "modeling_lsg_bert.LSGBertForMultipleChoice",
14
+ "AutoModelForQuestionAnswering": "modeling_lsg_bert.LSGBertForQuestionAnswering",
15
+ "AutoModelForSequenceClassification": "modeling_lsg_bert.LSGBertForSequenceClassification",
16
+ "AutoModelForTokenClassification": "modeling_lsg_bert.LSGBertForTokenClassification"
17
+ }
18
+
19
+ class LSGBertConfig(BertConfig):
20
+ """
21
+ This class overrides :class:`~transformers.BertConfig`. Please check the superclass for the appropriate
22
+ documentation alongside usage examples.
23
+ """
24
+
25
+ base_model_prefix = "lsg"
26
+ model_type = "bert"
27
+
28
+ def __init__(
29
+ self,
30
+ adaptive=True,
31
+ base_model_prefix="lsg",
32
+ block_size=128,
33
+ lsh_num_pre_rounds=1,
34
+ mask_first_token=False,
35
+ num_global_tokens=1,
36
+ pool_with_global=True,
37
+ sparse_block_size=128,
38
+ sparsity_factor=2,
39
+ sparsity_type="norm",
40
+ **kwargs
41
+ ):
42
+ """Constructs LSGBertConfig."""
43
+ super().__init__(**kwargs)
44
+
45
+ self.adaptive = adaptive
46
+ self.auto_map = AUTO_MAP
47
+ self.base_model_prefix = base_model_prefix
48
+ self.block_size = block_size
49
+ self.lsh_num_pre_rounds = lsh_num_pre_rounds
50
+ self.mask_first_token = mask_first_token
51
+ self.num_global_tokens = num_global_tokens
52
+ self.pool_with_global = pool_with_global
53
+ self.sparse_block_size = sparse_block_size
54
+ self.sparsity_factor = sparsity_factor
55
+ self.sparsity_type = sparsity_type
56
+
57
+ if sparsity_type not in [None, "none", "norm", "lsh", "pooling", "stride", "block_stride", "bos_pooling"]:
58
+ logger.warning(
59
+ "[WARNING CONFIG]: sparsity_mode not in [None, 'none', 'norm', 'lsh', 'pooling', 'stride', 'block_stride', 'bos_pooling'], \
60
+ setting sparsity_type=None, computation will skip sparse attention")
61
+ self.sparsity_type = None
62
+
63
+ if self.sparsity_type in ["stride", "block_stride"]:
64
+ if self.sparsity_factor > self.num_attention_heads:
65
+ logger.warning(
66
+ "[WARNING CONFIG]: sparsity_factor > num_attention_heads is not recommended for stride/block_stride sparsity"
67
+ )
68
+
69
+ if self.num_global_tokens < 1:
70
+ logger.warning(
71
+ "[WARNING CONFIG]: num_global_tokens < 1 is not compatible, setting num_global_tokens=1"
72
+ )
73
+ self.num_global_tokens = 1
74
+ elif self.num_global_tokens > 512:
75
+ logger.warning(
76
+ "[WARNING CONFIG]: num_global_tokens > 512 is not allowed, setting num_global_tokens=512"
77
+ )
78
+ self.num_global_tokens = 512
79
+
80
+ if self.sparsity_factor > 0:
81
+ assert self.block_size % self.sparsity_factor == 0, "[ERROR CONFIG]: block_size must be divisible by sparsity_factor"
82
+ assert self.block_size//self.sparsity_factor >= 1, "[ERROR CONFIG]: make sure block_size >= sparsity_factor"
83
+
84
+ if self.mask_first_token and not pool_with_global:
85
+ logger.warning(
86
+ "[WARNING CONFIG]: pool_with_global==False is not compatible with mask_first_token==True. Setting pool_with_global to True.")
87
+ self.pool_with_global = True
88
+
89
+ if hasattr(self, "position_embedding_type"):
90
+ if self.position_embedding_type != "absolute":
91
+ logger.warning(
92
+ "[WARNING CONFIG]: LSG Attention is not compatible with relative positional embedding and will skip its computation. Set position_embedding_type='absolute' to remove this warning.")
93
+
94
+
95
+ class BaseSelfAttention(nn.Module):
96
+
97
+ def init_modules(self, config):
98
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(
99
+ config, "embedding_size"
100
+ ):
101
+ raise ValueError(
102
+ "The hidden size (%d) is not a multiple of the number of attention "
103
+ "heads (%d)" % (config.hidden_size, config.num_attention_heads)
104
+ )
105
+
106
+ self.num_attention_heads = config.num_attention_heads
107
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
108
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
109
+
110
+ self.query = nn.Linear(config.hidden_size, self.all_head_size)
111
+ self.key = nn.Linear(config.hidden_size, self.all_head_size)
112
+ self.value = nn.Linear(config.hidden_size, self.all_head_size)
113
+
114
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
115
+
116
+ def transpose_for_scores(self, x):
117
+ new_x_shape = x.size()[:-1] + (
118
+ self.num_attention_heads,
119
+ self.attention_head_size,
120
+ )
121
+ x = x.view(*new_x_shape)
122
+ return x.permute(0, 2, 1, 3)
123
+
124
+ def reshape_output(self, context_layer):
125
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
126
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
127
+ return context_layer.view(*new_context_layer_shape)
128
+
129
+ def project_QKV(self, hidden_states):
130
+
131
+ query_layer = self.transpose_for_scores(self.query(hidden_states))
132
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
133
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
134
+ return query_layer, key_layer, value_layer
135
+
136
+
137
+ class BaseAttentionProduct(nn.Module):
138
+
139
+ def __init__(self, config):
140
+ """
141
+ Compute attention: softmax(Q @ K.T) @ V
142
+ """
143
+ super().__init__()
144
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
145
+
146
+ def forward(self, query_layer, key_layer, value_layer, attention_mask=None):
147
+
148
+ d = query_layer.shape[-1]
149
+
150
+ # Take the dot product between "query" and "key" to get the raw attention scores.
151
+ attention_scores = query_layer @ key_layer.transpose(-1, -2) / math.sqrt(d)
152
+
153
+ del query_layer
154
+ del key_layer
155
+
156
+ if attention_mask is not None:
157
+ # Apply the attention mask is (precomputed for all layers in BertModel forward() function)
158
+ attention_scores = attention_scores + attention_mask
159
+ del attention_mask
160
+
161
+ # Normalize the attention scores to probabilities.
162
+ attention_probs = nn.Softmax(dim=-1)(attention_scores)
163
+
164
+ # This is actually dropping out entire tokens to attend to, which might
165
+ # seem a bit unusual, but is taken from the original Transformer paper.
166
+ context_layer = self.dropout(attention_probs) @ value_layer
167
+
168
+ return context_layer
169
+
170
+
171
+ class CausalAttentionProduct(nn.Module):
172
+
173
+ def __init__(self, config):
174
+ """
175
+ Compute attention: softmax(Q @ K.T) @ V
176
+ """
177
+ super().__init__()
178
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
179
+ self.block_size = config.block_size
180
+
181
+ def forward(self, query_layer, key_layer, value_layer, attention_mask=None, causal_shape=None):
182
+
183
+ d = query_layer.shape[-1]
184
+
185
+ # Take the dot product between "query" and "key" to get the raw attention scores.
186
+ attention_scores = query_layer @ key_layer.transpose(-1, -2) / math.sqrt(d)
187
+
188
+ del query_layer
189
+ del key_layer
190
+
191
+ if attention_mask is not None:
192
+ # Add causal mask
193
+ causal_shape = (self.block_size, self.block_size) if causal_shape is None else causal_shape
194
+ causal_mask = torch.tril(
195
+ torch.ones(*causal_shape, device=attention_mask.device, dtype=attention_scores.dtype),
196
+ diagonal=-1
197
+ )
198
+
199
+ # Min value
200
+ dtype_min = torch.tensor(
201
+ torch.finfo(attention_scores.dtype).min, device=attention_scores.device, dtype=attention_scores.dtype
202
+ )
203
+
204
+ # Build causal + attention_mask
205
+ causal_mask = torch.nn.functional.pad(causal_mask.T * dtype_min, (attention_mask.size()[-1] - self.block_size, 0), value=0)
206
+ attention_mask = torch.max(attention_mask + causal_mask.unsqueeze(0).unsqueeze(0).unsqueeze(0), dtype_min)
207
+
208
+ attention_scores = attention_scores + attention_mask
209
+ del attention_mask
210
+ del causal_mask
211
+
212
+ # Normalize the attention scores to probabilities.
213
+ attention_probs = nn.Softmax(dim=-1)(attention_scores)
214
+
215
+ # This is actually dropping out entire tokens to attend to, which might
216
+ # seem a bit unusual, but is taken from the original Transformer paper.
217
+ context_layer = self.dropout(attention_probs) @ value_layer
218
+
219
+ return context_layer
220
+
221
+
222
+ class LSGAttentionProduct(nn.Module):
223
+
224
+ def __init__(self, config, block_size=None, sparse_block_size=None, sparsity_factor=4, is_causal=False):
225
+ """
226
+ Compute block or overlapping blocks attention products
227
+ """
228
+ super().__init__()
229
+
230
+ self.block_size = block_size
231
+ self.sparse_block_size = sparse_block_size
232
+ self.sparsity_factor = sparsity_factor
233
+ self.is_causal = is_causal
234
+
235
+ if self.block_size is None:
236
+ self.block_size = config.block_size
237
+
238
+ if self.sparse_block_size is None:
239
+ self.sparse_block_size = config.sparse_block_size
240
+
241
+ # Shape of blocks
242
+ self.local_shapes = (self.block_size*3, self.block_size)
243
+ if self.sparse_block_size and self.sparsity_factor > 0:
244
+ self.sparse_shapes = (self.sparse_block_size*3, self.block_size//self.sparsity_factor)
245
+
246
+ if is_causal:
247
+ self.attention = CausalAttentionProduct(config)
248
+ else:
249
+ self.attention = BaseAttentionProduct(config)
250
+
251
+ def build_lsg_inputs(self, hidden_states, sparse_hidden_states, global_hidden_states, is_attn_mask=False):
252
+
253
+ # Build local tokens
254
+ local_hidden_states = self.reshape_to_local_block(hidden_states, is_attn_mask)
255
+ del hidden_states
256
+
257
+ # Build sparse tokens
258
+ if sparse_hidden_states is not None:
259
+ sparse_hidden_states = self.reshape_to_sparse_block(sparse_hidden_states, is_attn_mask)
260
+
261
+ return self.cat_global_sparse_local_tokens(global_hidden_states, sparse_hidden_states, local_hidden_states)
262
+
263
+ def forward(
264
+ self,
265
+ query_layer,
266
+ key_layer,
267
+ value_layer,
268
+ attention_mask=None,
269
+ sparse_key=None,
270
+ sparse_value=None,
271
+ sparse_mask=None,
272
+ global_key=None,
273
+ global_value=None,
274
+ global_mask=None
275
+ ):
276
+
277
+ # Input batch, heads, length, hidden_size
278
+ n, h, t, d = query_layer.size()
279
+ n_blocks = t // self.block_size
280
+ assert t % self.block_size == 0
281
+
282
+ key_layer = self.build_lsg_inputs(
283
+ key_layer,
284
+ sparse_key,
285
+ global_key
286
+ )
287
+ del sparse_key
288
+ del global_key
289
+
290
+ value_layer = self.build_lsg_inputs(
291
+ value_layer,
292
+ sparse_value,
293
+ global_value
294
+ )
295
+ del sparse_value
296
+ del global_value
297
+
298
+ attention_mask = self.build_lsg_inputs(
299
+ attention_mask,
300
+ sparse_mask,
301
+ global_mask.transpose(-1, -2),
302
+ is_attn_mask=True
303
+ ).transpose(-1, -2)
304
+ del sparse_mask
305
+ del global_mask
306
+
307
+ # expect (..., t, d) shape
308
+ # Compute attention
309
+ context_layer = self.attention(
310
+ query_layer=self.chunk(query_layer, n_blocks),
311
+ key_layer=key_layer,
312
+ value_layer=value_layer,
313
+ attention_mask=attention_mask
314
+ )
315
+
316
+ return context_layer.reshape(n, h, -1, d)
317
+
318
+ def reshape_to_local_block(self, hidden_states, is_attn_mask=False):
319
+
320
+ size, step = self.local_shapes
321
+ s = (size - step) // 2
322
+
323
+ # Pad before block reshaping
324
+ if is_attn_mask:
325
+ pad_value = torch.finfo(hidden_states.dtype).min
326
+ hidden_states = hidden_states.transpose(-1, -2)
327
+ else:
328
+ pad_value = 0
329
+
330
+ hidden_states = torch.nn.functional.pad(
331
+ hidden_states.transpose(-1, -2),
332
+ pad=(s, s),
333
+ value=pad_value
334
+ ).transpose(-1, -2)
335
+
336
+ # Make blocks
337
+ hidden_states = hidden_states.unfold(-2, size=size, step=step).transpose(-1, -2)
338
+
339
+ # Skip third block if causal
340
+ if self.is_causal:
341
+ return hidden_states[..., :size*2//3, :]
342
+
343
+ return hidden_states
344
+
345
+ def reshape_to_sparse_block(self, hidden_states, is_attn_mask=False):
346
+
347
+ size, step = self.sparse_shapes
348
+
349
+ # In case of odd case
350
+ odd_offset = (step % 2)
351
+
352
+ # n, h, t, d*2 + 1
353
+ size = size*2
354
+ s = (size - step) // 2 + odd_offset
355
+
356
+ # Pad before block reshaping
357
+ if is_attn_mask:
358
+ pad_value = torch.finfo(hidden_states.dtype).min
359
+ hidden_states = hidden_states.transpose(-1, -2)
360
+ else:
361
+ pad_value = 0
362
+
363
+ hidden_states = torch.nn.functional.pad(
364
+ hidden_states.transpose(-1, -2),
365
+ pad=(s, s),
366
+ value=pad_value
367
+ ).transpose(-1, -2)
368
+
369
+ # Make blocks
370
+ hidden_states = hidden_states.unfold(-2, size=size, step=step).transpose(-1, -2)
371
+
372
+ # Fix case where block_size == sparsify_factor
373
+ if odd_offset:
374
+ hidden_states = hidden_states[..., :-1, :, :]
375
+
376
+ # Indexes for selection
377
+ u = (size - self.block_size * 3 // self.sparsity_factor) // 2 + odd_offset
378
+ s = self.sparse_block_size
379
+
380
+ # Skip right block if causal
381
+ if self.is_causal:
382
+ return hidden_states[..., u-s:u, :]
383
+
384
+ u_ = u + odd_offset
385
+ return torch.cat([hidden_states[..., u-s:u, :], hidden_states[..., -u_:-u_+s, :]], dim=-2)
386
+
387
+ def cat_global_sparse_local_tokens(self, x_global, x_sparse=None, x_local=None, dim=-2):
388
+
389
+ n, h, b, t, d = x_local.size()
390
+ x_global = x_global.unsqueeze(-3).expand(-1, -1, b, -1, -1)
391
+ if x_sparse is not None:
392
+ return torch.cat([x_global, x_sparse, x_local], dim=dim)
393
+ return torch.cat([x_global, x_local], dim=dim)
394
+
395
+ def chunk(self, x, n_blocks):
396
+
397
+ t, d = x.size()[-2:]
398
+ return x.reshape(*x.size()[:-2], n_blocks, -1, d)
399
+
400
+
401
+ class LSGBertEmbeddings(BertEmbeddings):
402
+
403
+ def __init__(self, config):
404
+ super().__init__(config)
405
+
406
+ self.num_global_tokens = config.num_global_tokens
407
+
408
+ # Hardcoded but partially trained
409
+ self.global_embeddings = nn.Embedding(512, embedding_dim=config.hidden_size, )
410
+
411
+ self.block_size = config.block_size
412
+
413
+ def forward(
414
+ self,
415
+ input_ids: Optional[torch.LongTensor] = None,
416
+ token_type_ids: Optional[torch.LongTensor] = None,
417
+ position_ids: Optional[torch.LongTensor] = None,
418
+ inputs_embeds: Optional[torch.FloatTensor] = None,
419
+ past_key_values_length: int = 0,
420
+ ) -> torch.Tensor:
421
+ if input_ids is not None:
422
+ input_shape = input_ids.size()
423
+ else:
424
+ input_shape = inputs_embeds.size()[:-1]
425
+
426
+ seq_length = input_shape[1]
427
+
428
+ if position_ids is None:
429
+ position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length]
430
+
431
+ # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs
432
+ # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves
433
+ # issue #5664
434
+ if token_type_ids is None:
435
+ if hasattr(self, "token_type_ids"):
436
+ buffered_token_type_ids = self.token_type_ids[:, :seq_length]
437
+ buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)
438
+ token_type_ids = buffered_token_type_ids_expanded
439
+ else:
440
+ token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
441
+
442
+ if inputs_embeds is None:
443
+ inputs_embeds = self.word_embeddings(input_ids)
444
+ token_type_embeddings = self.token_type_embeddings(token_type_ids[:, :seq_length])
445
+
446
+ embeddings = inputs_embeds + token_type_embeddings
447
+ if self.position_embedding_type == "absolute":
448
+ position_embeddings = self.position_embeddings(position_ids[:, :seq_length])
449
+ embeddings += position_embeddings
450
+
451
+ #if self.num_global_tokens < 0:
452
+ n, t, d = embeddings.size()
453
+
454
+ # Add global_tokens
455
+ indexes = torch.arange(self.num_global_tokens, device=embeddings.device).reshape(1, -1)
456
+ global_embeddings = self.global_embeddings(indexes)
457
+ embeddings = torch.cat([global_embeddings.expand(n, -1, d), embeddings], dim=-2)
458
+
459
+ embeddings = self.LayerNorm(embeddings)
460
+ embeddings = self.dropout(embeddings)
461
+ return embeddings
462
+
463
+
464
+ class LSGSelfAttention(BaseSelfAttention):
465
+ '''
466
+ Compute local attention with overlapping blocs
467
+ Use global attention for tokens with highest norm
468
+ '''
469
+ def __init__(self, config):
470
+ super().__init__()
471
+
472
+ self.init_modules(config)
473
+
474
+ self.block_size = config.block_size
475
+ self.sparse_block_size = config.sparse_block_size
476
+ self.num_global_tokens = config.num_global_tokens
477
+ self.sparsity_factor = config.sparsity_factor
478
+ self.is_causal = config.is_decoder
479
+ self.is_decoder = config.is_decoder
480
+
481
+ self.attention = LSGAttentionProduct(
482
+ config,
483
+ block_size=config.block_size,
484
+ sparse_block_size=config.sparse_block_size,
485
+ sparsity_factor=self.sparsity_factor,
486
+ is_causal=self.is_causal
487
+ )
488
+
489
+ if self.is_causal:
490
+ self.causal_attention = CausalAttentionProduct(config)
491
+ self.full_attention = BaseAttentionProduct(config)
492
+
493
+ sparse_functions = {
494
+ "norm": self.get_sparse_tokens_with_norm,
495
+ "pooling": self.get_sparse_tokens_with_pooling,
496
+ "lsh": self.get_sparse_tokens_with_lsh,
497
+ "stride": self.get_sparse_tokens_with_stride,
498
+ "block_stride": self.get_sparse_tokens_with_block_stride,
499
+ "bos_pooling": self.get_sparse_tokens_with_bos_pooling
500
+ }
501
+
502
+ self.sparsity_type = config.sparsity_type
503
+ self.get_sparse_elements = sparse_functions.get(self.sparsity_type, lambda w, x, y, z: (None, None, None))
504
+
505
+ if config.sparsity_type == "lsh":
506
+ self.lsh_num_pre_rounds = config.lsh_num_pre_rounds
507
+
508
+ def get_sparse_tokens_with_norm(self, queries, keys, values, mask):
509
+
510
+ if self.sparsity_factor == 1:
511
+ return keys, values, mask.expand(-1, keys.size()[1], -1, -1)
512
+
513
+ with torch.no_grad():
514
+
515
+ block_size = min(self.block_size, self.sparse_block_size)
516
+ key_norm = keys.detach().norm(dim=-1, keepdim=True)
517
+ key_norm = key_norm * ~mask.transpose(-1, -2).bool()
518
+ key_norm = self.chunk(key_norm, block_size)
519
+
520
+ n, h, b, t, d = key_norm.size()
521
+
522
+ idx = key_norm.argsort(dim=-2)
523
+ del key_norm
524
+ idx += (torch.arange(b, device=keys.device)*t).reshape(1, 1, b, 1, 1)
525
+
526
+ split = (t - block_size // self.sparsity_factor, block_size // self.sparsity_factor)
527
+ sparse_idx = idx.split(split, -2)[-1].reshape(n, h, -1, 1)
528
+
529
+ d = keys.size()[-1]
530
+ keys = keys.gather(dim=-2, index=sparse_idx.expand(-1, -1, -1, d))
531
+ values = values.gather(dim=-2, index=sparse_idx.expand(-1, -1, -1, d))
532
+ mask = mask.expand(-1, h, -1, -1).transpose(-1, -2).gather(dim=-2, index=sparse_idx).transpose(-1, -2)
533
+
534
+ return keys, values, mask
535
+
536
+ def get_sparse_tokens_with_pooling(self, queries, keys, values, mask):
537
+
538
+ if self.sparsity_factor == 1:
539
+ return keys, values, mask.expand(-1, keys.size()[1], -1, -1)
540
+
541
+ keys = self.chunk(keys, self.sparsity_factor)
542
+ values = self.chunk(values, self.sparsity_factor)
543
+
544
+ n, h, b, t, d = keys.size()
545
+ mask = mask.reshape(n, 1, b, 1, t)
546
+ mask = ~mask.transpose(-1, -2).bool()
547
+
548
+ keys = keys * mask
549
+ values = values * mask
550
+
551
+ mask = mask.sum(dim=-2)
552
+ keys = keys.sum(dim=-2) / (mask + 1e-6)
553
+ values = values.sum(dim=-2) / (mask + 1e-6)
554
+
555
+ mask = (1. - mask.clamp(0, 1))
556
+ mask *= torch.finfo(mask.dtype).min
557
+ return keys.reshape(n, h, -1, d), values.reshape(n, h, -1, d), mask.expand(-1, h, -1, -1).transpose(-1, -2)
558
+
559
+ def get_sparse_tokens_with_stride(self, queries, keys, values, mask):
560
+
561
+ if self.sparsity_factor == 1:
562
+ return keys, values, mask.expand(-1, keys.size()[1], -1, -1)
563
+
564
+ n, h, t, d = keys.size()
565
+ sparse_idx = torch.arange(t // self.sparsity_factor, device=keys.device) * self.sparsity_factor
566
+ sparse_idx = sparse_idx.reshape(1, 1, -1, 1) + (torch.arange(h, device=keys.device) % self.sparsity_factor).reshape(1, h, 1, 1)
567
+ sparse_idx = sparse_idx.expand(n, h, -1, 1)
568
+
569
+ keys = keys.gather(dim=-2, index=sparse_idx.expand(-1, -1, -1, d))
570
+ values = values.gather(dim=-2, index=sparse_idx.expand(-1, -1, -1, d))
571
+ mask = mask.expand(-1, h, -1, -1).transpose(-1, -2).gather(dim=-2, index=sparse_idx).transpose(-1, -2)
572
+
573
+ return keys, values, mask
574
+
575
+ def get_sparse_tokens_with_block_stride(self, queries, keys, values, mask):
576
+
577
+ if self.sparsity_factor == 1:
578
+ return keys, values, mask.expand(-1, keys.size()[1], -1, -1)
579
+
580
+ n, h, t, d = keys.size()
581
+
582
+ t, b = self.block_size, t // self.block_size
583
+ sparse_idx = torch.arange(t // self.sparsity_factor, device=keys.device)
584
+ sparse_idx = sparse_idx.reshape(1, 1, 1, -1, 1) + torch.arange(h, device=keys.device).reshape(1, h, 1, 1, 1) * (t // self.sparsity_factor)
585
+ sparse_idx = (sparse_idx % t)
586
+ sparse_idx = sparse_idx + torch.arange(b, device=keys.device).reshape(1, 1, -1, 1, 1) * t
587
+ sparse_idx = sparse_idx.reshape(1, h, -1, 1).expand(n, h, -1, 1)
588
+
589
+ keys = keys.gather(dim=-2, index=sparse_idx.expand(-1, -1, -1, d))
590
+ values = values.gather(dim=-2, index=sparse_idx.expand(-1, -1, -1, d))
591
+ mask = mask.expand(-1, h, -1, -1).transpose(-1, -2).gather(dim=-2, index=sparse_idx).transpose(-1, -2)
592
+
593
+ return keys, values, mask
594
+
595
+ def get_sparse_tokens_with_lsh(self, queries, keys, values, mask):
596
+
597
+ if self.sparsity_factor == 1:
598
+ return keys, values, mask.expand(-1, keys.size()[1], -1, -1)
599
+
600
+ if self.sparsity_factor == self.sparse_block_size:
601
+ return self.get_sparse_tokens_with_bos_pooling(queries, keys, values, mask)
602
+
603
+ block_size = min(self.block_size, self.sparse_block_size)
604
+ keys = self.chunk(keys, block_size)
605
+ values = self.chunk(values, block_size)
606
+
607
+ n, h, b, t, d = keys.size()
608
+ mask = mask.reshape(n, 1, b, 1, t)
609
+ mask = ~mask.transpose(-1, -2).bool()
610
+
611
+ keys = keys * mask
612
+ values = values * mask
613
+ mask = mask.expand(-1, h, -1, -1, -1).float()
614
+
615
+ extra_factor = 1
616
+
617
+ for _ in range(self.lsh_num_pre_rounds):
618
+ keys, values, mask = self.lsh_round(keys, values, mask, t*extra_factor)
619
+
620
+ keys, values, mask = self.lsh_round(keys, values, mask, t//self.sparsity_factor)
621
+ keys /= mask + 1e-8
622
+ values /= mask + 1e-8
623
+
624
+ mask = (1. - mask.clamp(0, 1))
625
+ mask *= torch.finfo(mask.dtype).min
626
+
627
+ return keys.reshape(n, h, -1, d), values.reshape(n, h, -1, d), mask.transpose(-1, -2).reshape(n, h, 1, -1)
628
+
629
+ def lsh_round(self, keys, values, mask, output_size):
630
+
631
+ with torch.no_grad():
632
+
633
+ n_hashes = output_size // 2
634
+ n, h, b, t, d = keys.size()
635
+ binary_mask = mask.clamp(0, 1)
636
+
637
+ indexes = (torch.nn.functional.normalize(keys, dim=-1) * binary_mask) @ torch.randn(1, h, 1, d, n_hashes, device=keys.device)
638
+ indexes = torch.cat([indexes, -indexes], dim=-1).argmax(dim=-1, keepdim=True)
639
+
640
+ n, h, b, t, d = keys.size()
641
+
642
+ x_ = torch.zeros(n, h, b, output_size, d, device=keys.device)
643
+ mask_ = torch.zeros(n, h, b, output_size, 1, device=keys.device)
644
+ keys = torch.scatter_add(x_, dim=-2, index=indexes.expand(-1, -1, -1, -1, d), src=keys)
645
+ values = torch.scatter_add(x_, dim=-2, index=indexes.expand(-1, -1, -1, -1, d), src=values)
646
+ mask = torch.scatter_add(mask_, dim=-2, index=indexes, src=mask)
647
+
648
+ return keys[..., :output_size, :], values[..., :output_size, :], mask[..., :output_size, :]
649
+
650
+ def get_sparse_tokens_with_bos_pooling(self, queries, keys, values, mask):
651
+
652
+ if self.sparsity_factor == 1:
653
+ return keys, values, mask.expand(-1, keys.size()[1], -1, -1)
654
+
655
+ queries = queries.unsqueeze(-3)
656
+ mask = self.chunk(mask.transpose(-1, -2), self.sparsity_factor).transpose(-1, -2)
657
+ keys = self.chunk(keys, self.sparsity_factor)
658
+ values = self.chunk(values, self.sparsity_factor)
659
+
660
+ n, h, b, t, d = keys.size()
661
+ scores = (queries[..., :1, :] @ keys.transpose(-1, -2)) / math.sqrt(d)
662
+ if mask is not None:
663
+ scores = scores + mask
664
+
665
+ scores = torch.softmax(scores, dim=-1)
666
+ keys = scores @ keys
667
+ values = scores @ values
668
+ mask = mask.mean(dim=-1)
669
+ mask[mask != torch.finfo(mask.dtype).min] = 0
670
+
671
+ return keys.reshape(n, h, -1, d), values.reshape(n, h, -1, d), mask.expand(-1, h, -1, -1).transpose(-1, -2)
672
+
673
+ def forward(
674
+ self,
675
+ hidden_states,
676
+ attention_mask=None,
677
+ head_mask=None,
678
+ encoder_hidden_states=None,
679
+ encoder_attention_mask=None,
680
+ past_key_value=None,
681
+ output_attentions=False,
682
+ ):
683
+
684
+ query_layer = self.query(hidden_states)
685
+
686
+ # If this is instantiated as a cross-attention module, the keys
687
+ # and values come from an encoder; the attention mask needs to be
688
+ # such that the encoder's padding tokens are not attended to.
689
+ is_cross_attention = encoder_hidden_states is not None
690
+
691
+ if is_cross_attention and past_key_value is not None:
692
+ # reuse k,v, cross_attentions
693
+ key_layer = past_key_value[0]
694
+ value_layer = past_key_value[1]
695
+ attention_mask = encoder_attention_mask
696
+ elif is_cross_attention:
697
+ key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
698
+ value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
699
+ attention_mask = encoder_attention_mask
700
+ elif past_key_value is not None:
701
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
702
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
703
+ key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
704
+ value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
705
+ else:
706
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
707
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
708
+
709
+ query_layer = self.transpose_for_scores(query_layer)
710
+
711
+ if self.is_decoder:
712
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
713
+ # Further calls to cross_attention layer can then reuse all cross-attention
714
+ # key/value_states (first "if" case)
715
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
716
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
717
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
718
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
719
+ past_key_value = (key_layer, value_layer)
720
+
721
+ if is_cross_attention:
722
+ outputs = self.cross_attention_forward(
723
+ query_layer=query_layer,
724
+ key_layer=key_layer,
725
+ value_layer=value_layer,
726
+ attention_mask=attention_mask,
727
+ output_attentions=output_attentions
728
+ )
729
+ else:
730
+ outputs = self.causal_forward(
731
+ query_layer,
732
+ key_layer,
733
+ value_layer,
734
+ attention_mask=attention_mask,
735
+ output_attentions=output_attentions,
736
+ )
737
+
738
+ outputs = outputs + ((key_layer, value_layer),)
739
+
740
+ else:
741
+ outputs = self.not_causal_forward(
742
+ query_layer,
743
+ key_layer,
744
+ value_layer,
745
+ attention_mask=attention_mask,
746
+ output_attentions=output_attentions
747
+ )
748
+
749
+ return outputs
750
+
751
+ def causal_forward(
752
+ self,
753
+ query_layer,
754
+ key_layer,
755
+ value_layer,
756
+ attention_mask=None,
757
+ output_attentions=False,
758
+ ):
759
+
760
+ n, h, t, d = key_layer.size()
761
+
762
+ # Cat global mask
763
+ attention_mask = torch.nn.functional.pad(attention_mask, (self.num_global_tokens, 0), value=0)
764
+
765
+ # Split input into global tokens and other tokens
766
+ split = (self.num_global_tokens, t - self.num_global_tokens)
767
+ global_query, query_layer = query_layer.split(split, dim=-2)
768
+
769
+ # Use normal causal attention if local attention covers every tokens
770
+ if t <= 2 * self.block_size + self.num_global_tokens:
771
+ context_layer = self.causal_attention(
772
+ query_layer=query_layer,
773
+ key_layer=key_layer,
774
+ value_layer=value_layer,
775
+ attention_mask=attention_mask,
776
+ causal_shape=(t - self.num_global_tokens, t - self.num_global_tokens)
777
+ )
778
+
779
+ context_layer = torch.cat([global_query, context_layer], dim=-2)
780
+ return (self.reshape_output(context_layer), )
781
+
782
+ # Split K Q M on global and non global
783
+ global_key, key_layer = key_layer.split(split, dim=-2)
784
+ global_value, value_layer = value_layer.split(split, dim=-2)
785
+ global_mask, attention_mask = attention_mask.split(split, dim=-1)
786
+
787
+ n, h, t, d = key_layer.size()
788
+
789
+ # Get sparse idx
790
+ sparse_key, sparse_value, sparse_mask = (None, None, None)
791
+ if self.sparse_block_size and self.sparsity_factor > 0:
792
+ sparse_key, sparse_value, sparse_mask = self.get_sparse_elements(query_layer, key_layer, value_layer, attention_mask)
793
+
794
+ # Expand masks on heads
795
+ attention_mask = attention_mask.expand(-1, h, -1, -1)
796
+ global_mask = global_mask.expand(-1, h, -1, -1)
797
+
798
+ # Compute dot product attention
799
+ context_layer = self.attention(
800
+ query_layer,
801
+ key_layer,
802
+ value_layer,
803
+ attention_mask,
804
+ sparse_key=sparse_key,
805
+ sparse_value=sparse_value,
806
+ sparse_mask=sparse_mask,
807
+ global_key=global_key,
808
+ global_value=global_value,
809
+ global_mask=global_mask
810
+ )
811
+
812
+ # Merge pseudo global (causal) and local-sparse tokens
813
+ context_layer = torch.cat([global_query, context_layer], dim=-2)
814
+ context_layer = self.reshape_output(context_layer)
815
+
816
+ return (context_layer,)
817
+
818
+ def not_causal_forward(
819
+ self,
820
+ query_layer,
821
+ key_layer,
822
+ value_layer,
823
+ attention_mask=None,
824
+ output_attentions=False,
825
+ ):
826
+
827
+ n, h, t, d = query_layer.size()
828
+
829
+ # Cat global mask
830
+ attention_mask = torch.nn.functional.pad(attention_mask, (self.num_global_tokens, 0), value=0)
831
+
832
+ # Use normal attention if local attention covers every tokens
833
+ if t <= 2 * self.block_size + self.num_global_tokens:
834
+ context_layer = self.full_attention(
835
+ query_layer=query_layer,
836
+ key_layer=key_layer,
837
+ value_layer=value_layer,
838
+ attention_mask=attention_mask
839
+ )
840
+ return (self.reshape_output(context_layer), )
841
+
842
+ # Split input into global tokens and other tokens
843
+ split = (self.num_global_tokens, t - self.num_global_tokens)
844
+ global_query, query_layer = query_layer.split(split, dim=-2)
845
+
846
+ # Get global_attention
847
+ bos = self.full_attention(
848
+ query_layer=global_query,
849
+ key_layer=key_layer,
850
+ value_layer=value_layer,
851
+ attention_mask=attention_mask
852
+ )
853
+
854
+ # Split K Q M on global and non global
855
+ global_key, key_layer = key_layer.split(split, dim=-2)
856
+ global_value, value_layer = value_layer.split(split, dim=-2)
857
+ global_mask, attention_mask = attention_mask.split(split, dim=-1)
858
+
859
+ n, h, t, d = key_layer.size()
860
+
861
+ # Get sparse idx
862
+ sparse_key, sparse_value, sparse_mask = (None, None, None)
863
+
864
+ if self.sparse_block_size and self.sparsity_factor > 0:
865
+ sparse_key, sparse_value, sparse_mask = self.get_sparse_elements(query_layer, key_layer, value_layer, attention_mask)
866
+
867
+ # Expand masks on heads
868
+ attention_mask = attention_mask.expand(-1, h, -1, -1)
869
+ global_mask = global_mask.expand(-1, h, -1, -1)
870
+
871
+ # Compute dot product attention
872
+ context_layer = self.attention(
873
+ query_layer,
874
+ key_layer,
875
+ value_layer,
876
+ attention_mask,
877
+ sparse_key=sparse_key,
878
+ sparse_value=sparse_value,
879
+ sparse_mask=sparse_mask,
880
+ global_key=global_key,
881
+ global_value=global_value,
882
+ global_mask=global_mask
883
+ )
884
+
885
+ # Merge global and local-sparse tokens
886
+ context_layer = torch.cat([bos, context_layer], dim=-2)
887
+ context_layer = self.reshape_output(context_layer)
888
+
889
+ return (context_layer,)
890
+
891
+ def cross_attention_forward(
892
+ self,
893
+ query_layer,
894
+ key_layer,
895
+ value_layer,
896
+ attention_mask=None,
897
+ output_attentions=False,
898
+ ):
899
+
900
+ context_layer = self.full_attention(
901
+ query_layer=query_layer,
902
+ key_layer=key_layer,
903
+ value_layer=value_layer,
904
+ attention_mask=attention_mask
905
+ )
906
+ return (self.reshape_output(context_layer), )
907
+
908
+ def chunk(self, x, chunk_size):
909
+
910
+ n, h, t, d = x.size()
911
+ return x.reshape(n, h, -1, chunk_size, d)
912
+
913
+
914
+ class LSGAttention(BertAttention):
915
+
916
+ def __init__(self, config):
917
+
918
+ nn.Module.__init__(self)
919
+
920
+ self.self = LSGSelfAttention(config)
921
+ self.output = BertSelfOutput(config)
922
+ self.pruned_heads = set()
923
+
924
+
925
+ class LSGBertLayer(BertLayer):
926
+
927
+ def __init__(self, config):
928
+
929
+ super().__init__(config)
930
+
931
+ self.attention = LSGAttention(config)
932
+ if self.add_cross_attention:
933
+ if not self.is_decoder:
934
+ assert self.is_decoder, f"{self} should be used as a decoder model if cross attention is added"
935
+ self.crossattention = LSGAttention(config)
936
+
937
+
938
+ class LSGBertEncoder(BertEncoder):
939
+
940
+ def __init__(self, config):
941
+
942
+ super().__init__(config)
943
+
944
+ self.layer = nn.ModuleList([LSGBertLayer(config) for _ in range(config.num_hidden_layers)])
945
+
946
+ assert hasattr(config, "num_global_tokens")
947
+ self.num_global_tokens = config.num_global_tokens
948
+ self.pad_idx = config.pad_token_id
949
+
950
+ assert hasattr(config, "block_size") and hasattr(config, "adaptive")
951
+ self.block_size = config.block_size
952
+ self.adaptive = config.adaptive
953
+ self.mask_first_token = config.mask_first_token
954
+ self.pool_with_global = config.pool_with_global
955
+
956
+ def forward(
957
+ self,
958
+ hidden_states: torch.Tensor,
959
+ attention_mask: Optional[torch.FloatTensor] = None,
960
+ head_mask: Optional[torch.FloatTensor] = None,
961
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
962
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
963
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
964
+ use_cache: Optional[bool] = None,
965
+ output_attentions: Optional[bool] = False,
966
+ output_hidden_states: Optional[bool] = False,
967
+ return_dict: Optional[bool] = True,
968
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]:
969
+
970
+ mask_value = torch.finfo(attention_mask.dtype).min
971
+ n, _, __, t = attention_mask.size()
972
+
973
+ if not (self.config.is_decoder and encoder_hidden_states is not None):
974
+
975
+ b = self.block_size * 2
976
+ pad = t % self.block_size
977
+
978
+ # Check if t is multiple of block_size and pad
979
+ if self.adaptive and t > b and pad > 0:
980
+ pad_length = self.block_size - pad
981
+ hidden_states = torch.nn.functional.pad(hidden_states.transpose(-1, -2), (0, pad_length), value=0.).transpose(-1, -2)
982
+ attention_mask = torch.nn.functional.pad(attention_mask, (0, pad_length), value=mask_value)
983
+
984
+ if self.mask_first_token:
985
+ attention_mask[..., 0] = mask_value
986
+
987
+ encoder_outputs = super().forward(
988
+ hidden_states=hidden_states,
989
+ attention_mask=attention_mask,
990
+ head_mask=head_mask,
991
+ encoder_hidden_states=encoder_hidden_states,
992
+ encoder_attention_mask=encoder_attention_mask,
993
+ past_key_values=past_key_values,
994
+ use_cache=use_cache,
995
+ output_attentions=output_attentions,
996
+ output_hidden_states=output_hidden_states,
997
+ return_dict=return_dict
998
+ )
999
+
1000
+ sequence_output = encoder_outputs[0]
1001
+ if self.pool_with_global:
1002
+ sequence_output[:, self.num_global_tokens] = sequence_output[:, 0]
1003
+
1004
+ # Adapt sequence to initial shape
1005
+ sequence_output = sequence_output[..., self.num_global_tokens: t + self.num_global_tokens, :]
1006
+
1007
+ if not return_dict:
1008
+ return (sequence_output, ) + encoder_outputs[1:]
1009
+
1010
+ encoder_outputs.last_hidden_state = sequence_output
1011
+ return encoder_outputs
1012
+
1013
+
1014
+ class LSGBertPreTrainedModel(BertPreTrainedModel):
1015
+ """
1016
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
1017
+ models.
1018
+ """
1019
+
1020
+ config_class = LSGBertConfig
1021
+
1022
+ def _set_gradient_checkpointing(self, module, value=False):
1023
+ if isinstance(module, (BertEncoder, LSGBertEncoder)):
1024
+ module.gradient_checkpointing = value
1025
+
1026
+
1027
+ class LSGBertModel(LSGBertPreTrainedModel, BertModel):
1028
+ """
1029
+ This class overrides :class:`~transformers.BertModel`. Please check the superclass for the appropriate
1030
+ documentation alongside usage examples.
1031
+ """
1032
+
1033
+ def __init__(self, config, add_pooling_layer=True):
1034
+
1035
+ LSGBertPreTrainedModel.__init__(self, config)
1036
+
1037
+ self.config = config
1038
+
1039
+ self.embeddings = LSGBertEmbeddings(config)
1040
+ self.encoder = LSGBertEncoder(config)
1041
+ self.pooler = BertPooler(config) if add_pooling_layer else None
1042
+
1043
+ if config.add_cross_attention:
1044
+ logger.warning(
1045
+ "Cross attention is computed using full attention since it is not LSG compatible."
1046
+ )
1047
+
1048
+ self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2"
1049
+ if self._use_flash_attention_2:
1050
+ logger.warning(
1051
+ "[WARNING flash-attention]: LSG doesnt support flash-attention currently"
1052
+ )
1053
+
1054
+ # Initialize weights and apply final processing
1055
+ self.post_init()
1056
+
1057
+ def get_extended_attention_mask(self, attention_mask, input_shape, device=None):
1058
+
1059
+ # Do not rely on original triangular mask from BERT/RoBERTa for causalLM
1060
+ if attention_mask.dim() == 3:
1061
+ extended_attention_mask = attention_mask[:, None, :, :]
1062
+ elif attention_mask.dim() == 2:
1063
+ extended_attention_mask = attention_mask[:, None, None, :]
1064
+ else:
1065
+ raise ValueError(
1066
+ f"Wrong shape for input_ids (shape {input_shape}) or attention_mask (shape {attention_mask.shape})"
1067
+ )
1068
+
1069
+ extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility
1070
+ extended_attention_mask = (1.0 - extended_attention_mask) * torch.finfo(extended_attention_mask.dtype).min
1071
+
1072
+ return extended_attention_mask
1073
+
1074
+
1075
+ class LSGBertForPreTraining(LSGBertPreTrainedModel, BertForPreTraining):
1076
+
1077
+ _tied_weights_keys = ["predictions.decoder.bias", "cls.predictions.decoder.weight"]
1078
+
1079
+ def __init__(self, config):
1080
+
1081
+ LSGBertPreTrainedModel.__init__(self, config)
1082
+
1083
+ self.bert = LSGBertModel(config)
1084
+ self.cls = BertPreTrainingHeads(config)
1085
+
1086
+ # Initialize weights and apply final processing
1087
+ self.post_init()
1088
+
1089
+
1090
+ class LSGBertLMHeadModel(LSGBertPreTrainedModel, BertLMHeadModel):
1091
+
1092
+ _tied_weights_keys = ["predictions.decoder.bias", "cls.predictions.decoder.weight"]
1093
+
1094
+ def __init__(self, config):
1095
+
1096
+ LSGBertPreTrainedModel.__init__(self, config)
1097
+
1098
+ if not config.is_decoder:
1099
+ logger.warning("If you want to use `BertLMHeadModel` as a standalone, add `is_decoder=True.`")
1100
+
1101
+ self.bert = LSGBertModel(config, add_pooling_layer=False)
1102
+ self.cls = BertOnlyMLMHead(config)
1103
+
1104
+ # Initialize weights and apply final processing
1105
+ self.post_init()
1106
+
1107
+
1108
+ class LSGBertForMaskedLM(LSGBertPreTrainedModel, BertForMaskedLM):
1109
+ """
1110
+ This class overrides :class:`~transformers.BertForMaskedLM`. Please check the superclass for the appropriate
1111
+ documentation alongside usage examples.
1112
+ """
1113
+
1114
+ _tied_weights_keys = ["predictions.decoder.bias", "cls.predictions.decoder.weight"]
1115
+
1116
+ def __init__(self, config):
1117
+
1118
+ LSGBertPreTrainedModel.__init__(self, config)
1119
+
1120
+ if config.is_decoder:
1121
+ logger.warning(
1122
+ "If you want to use `LSGBertForMaskedLM` make sure `config.is_decoder=False` for "
1123
+ "bi-directional self-attention."
1124
+ )
1125
+
1126
+ self.bert = LSGBertModel(config, add_pooling_layer=False)
1127
+ self.cls = BertOnlyMLMHead(config)
1128
+
1129
+ # Initialize weights and apply final processing
1130
+ self.post_init()
1131
+
1132
+
1133
+ class LSGBertForNextSentencePrediction(LSGBertPreTrainedModel, BertForNextSentencePrediction):
1134
+
1135
+ def __init__(self, config):
1136
+
1137
+ LSGBertPreTrainedModel.__init__(self, config)
1138
+
1139
+ self.bert = LSGBertModel(config)
1140
+ self.cls = BertOnlyNSPHead(config)
1141
+
1142
+ # Initialize weights and apply final processing
1143
+ self.post_init()
1144
+
1145
+
1146
+ class LSGBertForSequenceClassification(LSGBertPreTrainedModel, BertForSequenceClassification):
1147
+ """
1148
+ This class overrides :class:`~transformers.BertForSequenceClassification`. Please check the superclass for the
1149
+ appropriate documentation alongside usage examples.
1150
+ """
1151
+
1152
+ def __init__(self, config):
1153
+
1154
+ LSGBertPreTrainedModel.__init__(self, config)
1155
+
1156
+ self.num_labels = config.num_labels
1157
+ self.config = config
1158
+
1159
+ self.bert = LSGBertModel(config)
1160
+ classifier_dropout = (
1161
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
1162
+ )
1163
+ self.dropout = nn.Dropout(classifier_dropout)
1164
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
1165
+
1166
+ # Initialize weights and apply final processing
1167
+ self.post_init()
1168
+
1169
+
1170
+ class LSGBertForMultipleChoice(LSGBertPreTrainedModel, BertForMultipleChoice):
1171
+ """
1172
+ This class overrides :class:`~transformers.BertForMultipleChoice`. Please check the superclass for the
1173
+ appropriate documentation alongside usage examples.
1174
+ """
1175
+
1176
+ def __init__(self, config):
1177
+
1178
+ LSGBertPreTrainedModel.__init__(self, config)
1179
+
1180
+ self.bert = LSGBertModel(config)
1181
+ classifier_dropout = (
1182
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
1183
+ )
1184
+ self.dropout = nn.Dropout(classifier_dropout)
1185
+ self.classifier = nn.Linear(config.hidden_size, 1)
1186
+
1187
+ # Initialize weights and apply final processing
1188
+ self.post_init()
1189
+
1190
+
1191
+ class LSGBertForTokenClassification(LSGBertPreTrainedModel, BertForTokenClassification):
1192
+ """
1193
+ This class overrides :class:`~transformers.BertForTokenClassification`. Please check the superclass for the
1194
+ appropriate documentation alongside usage examples.
1195
+ """
1196
+
1197
+ def __init__(self, config):
1198
+
1199
+ LSGBertPreTrainedModel.__init__(self, config)
1200
+
1201
+ self.num_labels = config.num_labels
1202
+
1203
+ self.bert = LSGBertModel(config, add_pooling_layer=False)
1204
+ classifier_dropout = (
1205
+ config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
1206
+ )
1207
+ self.dropout = nn.Dropout(classifier_dropout)
1208
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
1209
+
1210
+ # Initialize weights and apply final processing
1211
+ self.post_init()
1212
+
1213
+
1214
+ class LSGBertForQuestionAnswering(LSGBertPreTrainedModel, BertForQuestionAnswering):
1215
+ """
1216
+ This class overrides :class:`~transformers.BertForQuestionAnswering`. Please check the superclass for the
1217
+ appropriate documentation alongside usage examples.
1218
+ """
1219
+
1220
+ def __init__(self, config):
1221
+
1222
+ LSGBertPreTrainedModel.__init__(self, config)
1223
+
1224
+ self.num_labels = config.num_labels
1225
+
1226
+ self.bert = LSGBertModel(config, add_pooling_layer=False)
1227
+ self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
1228
+
1229
+ # Initialize weights and apply final processing
1230
+ self.post_init()
1231
+
1232
+
1233
+ def str_to_class(classname):
1234
+ return getattr(sys.modules[__name__], classname)
1235
+
1236
+ # Register model in Auto API
1237
+ try:
1238
+ LSGBertConfig.register_for_auto_class()
1239
+ for key, value in AUTO_MAP.items():
1240
+ str_to_class(value.split(".")[-1]).register_for_auto_class(key)
1241
+ except:
1242
+ warn("AutoRegister isn't available, you'll have to manually copy modeling.py after .save_pretrained(...).")
1243
+ warn("Update to transformers >= 4.36.1 to fix.")
special_tokens_map.json ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": {
3
+ "content": "[CLS]",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "mask_token": {
10
+ "content": "[MASK]",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "[PAD]",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "sep_token": {
24
+ "content": "[SEP]",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ },
30
+ "unk_token": {
31
+ "content": "[UNK]",
32
+ "lstrip": false,
33
+ "normalized": false,
34
+ "rstrip": false,
35
+ "single_word": false
36
+ }
37
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[PAD]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "100": {
12
+ "content": "[UNK]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "101": {
20
+ "content": "[CLS]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "102": {
28
+ "content": "[SEP]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "103": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "clean_up_tokenization_spaces": true,
45
+ "cls_token": "[CLS]",
46
+ "do_lower_case": false,
47
+ "mask_token": "[MASK]",
48
+ "model_max_length": 4096,
49
+ "pad_token": "[PAD]",
50
+ "sep_token": "[SEP]",
51
+ "strip_accents": null,
52
+ "tokenize_chinese_chars": true,
53
+ "tokenizer_class": "BertTokenizer",
54
+ "unk_token": "[UNK]"
55
+ }
vocab.txt ADDED
The diff for this file is too large to render. See raw diff