zpn commited on
Commit
71f4cb9
1 Parent(s): 55ccb5d

Upload model

Browse files
config.json ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation_function": "swiglu",
3
+ "architectures": [
4
+ "NomicBertModel"
5
+ ],
6
+ "attn_pdrop": 0.0,
7
+ "auto_map": {
8
+ "AutoConfig": "configuration_hf_nomic_bert.NomicBertConfig",
9
+ "AutoModel": "modeling_hf_nomic_bert.NomicBertModel"
10
+ },
11
+ "bos_token_id": null,
12
+ "causal": false,
13
+ "dense_seq_output": true,
14
+ "embd_pdrop": 0.1,
15
+ "eos_token_id": null,
16
+ "fused_bias_fc": true,
17
+ "fused_dropout_add_ln": true,
18
+ "initializer_range": 0.02,
19
+ "layer_norm_epsilon": 1e-12,
20
+ "mlp_fc1_bias": false,
21
+ "mlp_fc2_bias": false,
22
+ "model_type": "nomic_bert",
23
+ "n_embd": 768,
24
+ "n_head": 12,
25
+ "n_inner": 3072,
26
+ "n_layer": 12,
27
+ "n_positions": 2048,
28
+ "pad_vocab_size_multiple": 64,
29
+ "parallel_block": false,
30
+ "parallel_block_tied_norm": false,
31
+ "prenorm": false,
32
+ "qkv_proj_bias": false,
33
+ "reorder_and_upcast_attn": false,
34
+ "resid_pdrop": 0.1,
35
+ "rotary_emb_base": 1000,
36
+ "rotary_emb_fraction": 1.0,
37
+ "rotary_emb_interleaved": false,
38
+ "rotary_emb_scale_base": null,
39
+ "rotary_scaling_factor": null,
40
+ "scale_attn_by_inverse_layer_idx": false,
41
+ "scale_attn_weights": true,
42
+ "summary_activation": null,
43
+ "summary_first_dropout": 0.1,
44
+ "summary_proj_to_labels": true,
45
+ "summary_type": "cls_index",
46
+ "summary_use_proj": true,
47
+ "torch_dtype": "float32",
48
+ "transformers_version": "4.34.0",
49
+ "type_vocab_size": 2,
50
+ "use_cache": true,
51
+ "use_flash_attn": true,
52
+ "use_rms_norm": false,
53
+ "use_xentropy": true,
54
+ "vocab_size": 30528
55
+ }
configuration_hf_nomic_bert.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import GPT2Config
2
+
3
+
4
+ class NomicBertConfig(GPT2Config):
5
+ model_type = "nomic_bert"
6
+
7
+ def __init__(self,
8
+ prenorm=False,
9
+ parallel_block=False,
10
+ parallel_block_tied_norm=False,
11
+ rotary_emb_fraction=0.0,
12
+ fused_dropout_add_ln=False,
13
+ fused_bias_fc=False,
14
+ use_flash_attn=False,
15
+ use_xentropy=False,
16
+ qkv_proj_bias=True,
17
+ rotary_emb_base=1000,
18
+ rotary_emb_scale_base=None,
19
+ rotary_emb_interleaved=False,
20
+ mlp_fc1_bias=True,
21
+ mlp_fc2_bias=True,
22
+ use_rms_norm=False,
23
+ causal=False,
24
+ type_vocab_size=2,
25
+ dense_seq_output=True,
26
+ pad_vocab_size_multiple=1,
27
+ tie_word_embeddings=True,
28
+ rotary_scaling_factor=1.0,
29
+ **kwargs,
30
+ ):
31
+ self.prenorm = prenorm
32
+ self.parallel_block = parallel_block
33
+ self.parallel_block_tied_norm = parallel_block_tied_norm
34
+ self.rotary_emb_fraction = rotary_emb_fraction
35
+ self.tie_word_embeddings = tie_word_embeddings
36
+ self.fused_dropout_add_ln = fused_dropout_add_ln
37
+ self.fused_bias_fc = fused_bias_fc
38
+ self.use_flash_attn = use_flash_attn
39
+ self.use_xentropy = use_xentropy
40
+ self.qkv_proj_bias = qkv_proj_bias
41
+ self.rotary_emb_base = rotary_emb_base
42
+ self.rotary_emb_scale_base = rotary_emb_scale_base
43
+ self.rotary_emb_interleaved = rotary_emb_interleaved
44
+ self.mlp_fc1_bias = mlp_fc1_bias
45
+ self.mlp_fc2_bias = mlp_fc2_bias
46
+ self.use_rms_norm = use_rms_norm
47
+ self.causal = causal
48
+ self.type_vocab_size = type_vocab_size
49
+ self.dense_seq_output = dense_seq_output
50
+ self.pad_vocab_size_multiple = pad_vocab_size_multiple
51
+ self.rotary_scaling_factor = rotary_scaling_factor
52
+
53
+ super().__init__(**kwargs)
modeling_hf_nomic_bert.py ADDED
@@ -0,0 +1,1221 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2022, Tri Dao.
2
+ # This BERT implementation is based on our MLPerf 2.0 and MLPerf 2.1 BERT implementation.
3
+ # https://github.com/mlcommons/training_results_v2.0/blob/main/HazyResearch/benchmarks/bert/implementations/pytorch/modeling.py
4
+ # https://github.com/mlcommons/training_results_v2.1/blob/main/Azure-HazyResearch/benchmarks/bert/implementations/ND96amsr_A100_v4/modeling.py
5
+
6
+ # Inspired by https://github.com/huggingface/transformers/blob/main/src/transformers/models/bert/modeling_bert.py
7
+ import os
8
+ import logging
9
+ from functools import partial
10
+ from typing import Optional, List, Tuple, Union
11
+
12
+ import torch
13
+ import torch.nn as nn
14
+ import torch.nn.functional as F
15
+ from einops import rearrange, repeat
16
+ from transformers import GPT2Config, PreTrainedModel
17
+ from transformers.models.bert.modeling_bert import (
18
+ BaseModelOutputWithPoolingAndCrossAttentions,
19
+ BertForPreTrainingOutput,
20
+ SequenceClassifierOutput
21
+ )
22
+
23
+ import re
24
+ from collections import OrderedDict
25
+ from safetensors.torch import load_file as safe_load_file
26
+ from transformers.utils import (
27
+ SAFE_WEIGHTS_INDEX_NAME,
28
+ SAFE_WEIGHTS_NAME,
29
+ WEIGHTS_INDEX_NAME,
30
+ WEIGHTS_NAME,
31
+ )
32
+ from transformers.utils.hub import cached_file, get_checkpoint_shard_files
33
+
34
+
35
+ from .configuration_hf_nomic_bert import NomicBertConfig
36
+
37
+ logger = logging.getLogger(__name__)
38
+
39
+ # adapted from flash attention, added safe serialization option for hf models
40
+ def state_dict_from_pretrained(model_name, safe_serialization=False, device=None, dtype=None):
41
+ # If not fp32, then we don't want to load directly to the GPU
42
+ mapped_device = "cpu" if dtype not in [torch.float32, None] else device
43
+ is_sharded = False
44
+ load_safe = False
45
+ resolved_archive_file = None
46
+
47
+ weights_path = os.path.join(model_name, WEIGHTS_NAME)
48
+ weights_index_path = os.path.join(model_name, WEIGHTS_INDEX_NAME)
49
+ safe_weights_path = os.path.join(model_name, SAFE_WEIGHTS_NAME)
50
+ safe_weights_index_path = os.path.join(model_name, SAFE_WEIGHTS_INDEX_NAME)
51
+
52
+ if os.path.isfile(weights_path):
53
+ resolved_archive_file = cached_file(
54
+ model_name, WEIGHTS_NAME, _raise_exceptions_for_missing_entries=False
55
+ )
56
+ elif os.path.isfile(weights_index_path):
57
+ resolved_archive_file = cached_file(
58
+ model_name, WEIGHTS_INDEX_NAME, _raise_exceptions_for_missing_entries=False
59
+ )
60
+ is_sharded = True
61
+ elif os.path.isfile(safe_weights_path):
62
+ resolved_archive_file = cached_file(
63
+ model_name, SAFE_WEIGHTS_NAME, _raise_exceptions_for_missing_entries=False
64
+ )
65
+ load_safe = True
66
+ elif os.path.isfile(safe_weights_index_path):
67
+ resolved_archive_file = cached_file(
68
+ model_name, SAFE_WEIGHTS_INDEX_NAME, _raise_exceptions_for_missing_entries=False
69
+ )
70
+ is_sharded = True
71
+ load_safe = True
72
+ else: # Try loading from HF hub instead of from local files
73
+ weight_name = WEIGHTS_NAME if not safe_serialization else SAFE_WEIGHTS_NAME
74
+ resolved_archive_file = cached_file(model_name, weight_name, _raise_exceptions_for_missing_entries=False)
75
+ if resolved_archive_file is None:
76
+ weight_index = WEIGHTS_INDEX_NAME if not safe_serialization else SAFE_WEIGHTS_INDEX_NAME
77
+ resolved_archive_file = cached_file(model_name, weight_index,
78
+ _raise_exceptions_for_missing_entries=False)
79
+ if resolved_archive_file is not None:
80
+ is_sharded = True
81
+
82
+ load_safe = safe_serialization
83
+
84
+ if resolved_archive_file is None:
85
+ raise EnvironmentError(f"Model name {model_name} was not found.")
86
+
87
+ if load_safe:
88
+ loader = partial(safe_load_file, device=mapped_device)
89
+ else:
90
+ loader = partial(torch.load, map_location=mapped_device)
91
+
92
+ if is_sharded:
93
+ # resolved_archive_file becomes a list of files that point to the different
94
+ # checkpoint shards in this case.
95
+ resolved_archive_file, sharded_metadata = get_checkpoint_shard_files(
96
+ model_name, resolved_archive_file
97
+ )
98
+ state_dict = {}
99
+ for sharded_file in resolved_archive_file:
100
+ state_dict.update(loader(sharded_file))
101
+ else:
102
+ state_dict = loader(resolved_archive_file)
103
+ # Convert dtype before moving to GPU to save memory
104
+ if dtype is not None:
105
+ state_dict = {k: v.to(dtype=dtype) for k, v in state_dict.items()}
106
+ state_dict = {k: v.to(device=device) for k, v in state_dict.items()}
107
+ return state_dict
108
+
109
+
110
+ def filter_shapes(state_dict, model):
111
+ """
112
+ Filters the state dict to match the current model shape.
113
+ """
114
+ filtered_state_dict = {}
115
+ for key, value in state_dict.items():
116
+ if key in model.state_dict():
117
+ if value.shape == model.state_dict()[key].shape:
118
+ filtered_state_dict[key] = value
119
+ return filtered_state_dict
120
+
121
+
122
+ def remap_bert_state_dict(state_dict, config, remove_bert=False, remove_cls_weights=False, add_pooling_layer=False):
123
+ """
124
+ Map the state_dict of a Huggingface BERT model to be flash_attn compatible.
125
+ """
126
+ def add_bert_prefix(key):
127
+ # prepend bert. to the key
128
+ if key.startswith("bert.") or key.startswith("cls."):
129
+ return key
130
+ return f"bert.{key}"
131
+
132
+ state_dict = OrderedDict((add_bert_prefix(k), v) for k, v in state_dict.items())
133
+
134
+ # LayerNorm
135
+ def key_mapping_ln_gamma_beta(key):
136
+ key = re.sub(r"LayerNorm.gamma$", "LayerNorm.weight", key)
137
+ key = re.sub(r"LayerNorm.beta$", "LayerNorm.bias", key)
138
+ return key
139
+
140
+ state_dict = OrderedDict((key_mapping_ln_gamma_beta(k), v) for k, v in state_dict.items())
141
+
142
+ # Layers
143
+ def key_mapping_layers(key):
144
+ return re.sub(r"^bert.encoder.layer\.", "bert.encoder.layers.", key)
145
+
146
+ state_dict = OrderedDict((key_mapping_layers(k), v) for k, v in state_dict.items())
147
+
148
+ # LayerNorm
149
+ def key_mapping_ln(key):
150
+ key = re.sub(r"^bert.embeddings.LayerNorm.", "bert.emb_ln.", key)
151
+ key = re.sub(
152
+ r"^bert.encoder.layers.(\d+).attention.output.LayerNorm.(weight|bias)",
153
+ r"bert.encoder.layers.\1.norm1.\2",
154
+ key,
155
+ )
156
+ key = re.sub(
157
+ r"^bert.encoder.layers.(\d+).output.LayerNorm.(weight|bias)",
158
+ r"bert.encoder.layers.\1.norm2.\2",
159
+ key,
160
+ )
161
+ key = re.sub(
162
+ r"^cls.predictions.transform.LayerNorm.(weight|bias)",
163
+ r"cls.predictions.transform.layer_norm.\1",
164
+ key,
165
+ )
166
+ return key
167
+
168
+ state_dict = OrderedDict((key_mapping_ln(k), v) for k, v in state_dict.items())
169
+
170
+ # MLP
171
+ def key_mapping_mlp(key):
172
+ key = re.sub(
173
+ r"^bert.encoder.layers.(\d+).intermediate.dense.(weight|bias)",
174
+ r"bert.encoder.layers.\1.mlp.fc1.\2",
175
+ key,
176
+ )
177
+ key = re.sub(
178
+ r"^bert.encoder.layers.(\d+).output.dense.(weight|bias)",
179
+ r"bert.encoder.layers.\1.mlp.fc2.\2",
180
+ key,
181
+ )
182
+ return key
183
+
184
+ state_dict = OrderedDict((key_mapping_mlp(k), v) for k, v in state_dict.items())
185
+
186
+ # Attention
187
+ last_layer_subset = getattr(config, "last_layer_subset", False)
188
+ for d in range(config.num_hidden_layers):
189
+ if f"bert.encoder.layers.{d}.attention.self.query.weight" not in state_dict:
190
+ continue
191
+ Wq = state_dict.pop(f"bert.encoder.layers.{d}.attention.self.query.weight")
192
+ Wk = state_dict.pop(f"bert.encoder.layers.{d}.attention.self.key.weight")
193
+ Wv = state_dict.pop(f"bert.encoder.layers.{d}.attention.self.value.weight")
194
+ bq = state_dict.pop(f"bert.encoder.layers.{d}.attention.self.query.bias")
195
+ bk = state_dict.pop(f"bert.encoder.layers.{d}.attention.self.key.bias")
196
+ bv = state_dict.pop(f"bert.encoder.layers.{d}.attention.self.value.bias")
197
+ if not (last_layer_subset and d == config.num_hidden_layers - 1):
198
+ state_dict[f"bert.encoder.layers.{d}.attn.Wqkv.weight"] = torch.cat(
199
+ [Wq, Wk, Wv], dim=0
200
+ )
201
+ state_dict[f"bert.encoder.layers.{d}.attn.Wqkv.bias"] = torch.cat([bq, bk, bv], dim=0)
202
+ else:
203
+ state_dict[f"bert.encoder.layers.{d}.attn.Wq.weight"] = Wq
204
+ state_dict[f"bert.encoder.layers.{d}.attn.Wkv.weight"] = torch.cat([Wk, Wv], dim=0)
205
+ state_dict[f"bert.encoder.layers.{d}.attn.Wq.bias"] = bq
206
+ state_dict[f"bert.encoder.layers.{d}.attn.Wkv.bias"] = torch.cat([bk, bv], dim=0)
207
+
208
+ def key_mapping_attn(key):
209
+ return re.sub(
210
+ r"^bert.encoder.layers.(\d+).attention.output.dense.(weight|bias)",
211
+ r"bert.encoder.layers.\1.attn.out_proj.\2",
212
+ key,
213
+ )
214
+
215
+ state_dict = OrderedDict((key_mapping_attn(k), v) for k, v in state_dict.items())
216
+
217
+ def key_mapping_decoder_bias(key):
218
+ return re.sub(r"^cls.predictions.bias", "cls.predictions.decoder.bias", key)
219
+
220
+
221
+ # remove nsp weights, we don't use
222
+ state_dict.pop("cls.seq_relationship.weight", None)
223
+ state_dict.pop("cls.seq_relationship.bias", None)
224
+ state_dict.pop("bert.embeddings.position_ids", None)
225
+
226
+ state_dict = OrderedDict((key_mapping_decoder_bias(k), v) for k, v in state_dict.items())
227
+
228
+ if remove_cls_weights:
229
+ cls_weights = ["cls.predictions.decoder.bias",
230
+ "cls.predictions.transform.dense.weight",
231
+ "cls.predictions.transform.dense.bias",
232
+ "cls.predictions.transform.layer_norm.weight",
233
+ "cls.predictions.transform.layer_norm.bias",
234
+ "cls.predictions.decoder.weight"]
235
+ for weight in cls_weights:
236
+ state_dict.pop(weight, None)
237
+
238
+ # Word embedding
239
+ pad_vocab_size_multiple = getattr(config, "pad_vocab_size_multiple", 1)
240
+ if pad_vocab_size_multiple > 1:
241
+ word_embeddings = state_dict["bert.embeddings.word_embeddings.weight"]
242
+ state_dict["bert.embeddings.word_embeddings.weight"] = F.pad(
243
+ word_embeddings, (0, 0, 0, config.vocab_size - word_embeddings.shape[0])
244
+ )
245
+ if not remove_cls_weights:
246
+ decoder_weight = state_dict["cls.predictions.decoder.weight"]
247
+ state_dict["cls.predictions.decoder.weight"] = F.pad(
248
+ decoder_weight, (0, 0, 0, config.vocab_size - decoder_weight.shape[0])
249
+ )
250
+ # If the vocab was padded, we want to set the decoder bias for those padded indices to be
251
+ # strongly negative (i.e. the decoder shouldn't predict those indices).
252
+ # TD [2022-05-09]: I don't think it affects the MLPerf training.
253
+ if "cls.predictions.decoder.bias" in state_dict:
254
+ decoder_bias = state_dict["cls.predictions.decoder.bias"]
255
+ state_dict["cls.predictions.decoder.bias"] = F.pad(
256
+ decoder_bias, (0, config.vocab_size - decoder_bias.shape[0]), value=-100.0
257
+ )
258
+
259
+ if add_pooling_layer is False:
260
+ pooler_weights = ["bert.pooler.dense.weight",
261
+ "bert.pooler.dense.bias",
262
+ ]
263
+ for key in pooler_weights:
264
+ state_dict.pop(key, None)
265
+
266
+ if remove_bert:
267
+ def remove_bert_prefix(key):
268
+ key = re.sub(r"^bert.", "", key)
269
+ return key
270
+
271
+ state_dict = OrderedDict((remove_bert_prefix(k), v) for k, v in state_dict.items())
272
+
273
+
274
+ return state_dict
275
+
276
+
277
+ class NomicBertPreTrainedModel(PreTrainedModel):
278
+ """An abstract class to handle weights initialization and
279
+ a simple interface for dowloading and loading pretrained models.
280
+ """
281
+ config_class = NomicBertConfig
282
+ base_model_prefix = "model"
283
+ supports_gradient_checkpointing = True
284
+ _no_split_modules = ["Block"]
285
+ _skip_keys_device_placement = "past_key_values"
286
+
287
+ def __init__(self, config, *inputs, **kwargs):
288
+ super().__init__(config)
289
+ if not isinstance(config, GPT2Config):
290
+ raise ValueError(
291
+ "Parameter config in `{}(config)` should be an instance of class `GPT2Config`. "
292
+ "To create a model from a Google pretrained model use "
293
+ "`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format(
294
+ self.__class__.__name__, self.__class__.__name__
295
+ )
296
+ )
297
+ self.config = config
298
+
299
+ @classmethod
300
+ def from_pretrained(cls, model_name, config=None, *inputs, **kwargs):
301
+ """
302
+ Instantiate a NomicBertPreTrainedModel from a pre-trained model file or a pytorch state dict.
303
+ Download and cache the pre-trained model file if needed.
304
+
305
+ Params:
306
+ pretrained_model_name_or_path: either:
307
+ - a path or url to a pretrained model archive containing:
308
+ . `bert_config.json` a configuration file for the model
309
+ . `pytorch_model.bin` a PyTorch dump of a NomicBertForPretraining instance
310
+ - a path or url to a pretrained model archive containing:
311
+ . `bert_config.json` a configuration file for the model
312
+ . `model.chkpt` a TensorFlow checkpoint
313
+ *inputs, **kwargs: additional input for the specific NomicBert class
314
+ (ex: num_labels for NomicBertForSequenceClassification)
315
+ """
316
+ # Instantiate model.
317
+ if config is None:
318
+ config = cls.config_class.from_pretrained(model_name)
319
+ remove_cls = cls != NomicBertForPreTraining
320
+ remove_bert_prefix = cls != NomicBertForPreTraining
321
+ ignore_mismatched_shapes = kwargs.pop("ignore_mismatched_sizes", False)
322
+ num_labels = kwargs.pop("num_labels", None)
323
+ rotary_scaling_factor = kwargs.pop("rotary_scaling_factor", None)
324
+ config.rotary_scaling_factor = rotary_scaling_factor
325
+ if config.n_positions <= 0 and config.rotary_emb_fraction > 0:
326
+ config.n_positions = 2048
327
+ if num_labels:
328
+ config.num_labels = num_labels
329
+
330
+ if "add_pooling_layer" in kwargs:
331
+ model = cls(config, *inputs, add_pooling_layer=kwargs.pop("add_pooling_layer"))
332
+ else:
333
+ model = cls(config, *inputs, add_pooling_layer=False)
334
+ # TODO: fix this
335
+ # Assuming we know what we're doing when loading from disk
336
+ # Prob a bad assumption but i'm tired and want to train this asap
337
+ if os.path.exists(model_name):
338
+ state_dict = torch.load(f"{model_name}/pytorch_model.bin")
339
+ if ignore_mismatched_shapes:
340
+ state_dict = filter_shapes(state_dict, model)
341
+ load_return = model.load_state_dict(state_dict, strict=False)
342
+ else:
343
+ # TODO: can probably check config class and see if we need to remap from a bert model
344
+ state_dict = state_dict_from_pretrained(model_name)
345
+ state_dict = remap_bert_state_dict(state_dict,
346
+ config,
347
+ remove_bert=remove_bert_prefix,
348
+ remove_cls_weights=remove_cls,
349
+ add_pooling_layer=getattr(config, "add_pooling_layer", False)
350
+ )
351
+ if ignore_mismatched_shapes:
352
+ state_dict = filter_shapes(state_dict, model)
353
+
354
+ load_return = model.load_state_dict(
355
+ state_dict,
356
+ strict=True
357
+ )
358
+ logger.warning(load_return)
359
+ return model
360
+
361
+ def _set_gradient_checkpointing(self, module, value=False):
362
+ if isinstance(module, NomicBertEncoder):
363
+ module.gradient_checkpointing = value
364
+
365
+
366
+ # https://github.com/huggingface/transformers/blob/7032e0203262ebb2ebf55da8d2e01f873973e835/src/transformers/models/bert/modeling_bert.py#L748
367
+ def _init_weights(module, initializer_range=0.02):
368
+ if isinstance(module, nn.Linear):
369
+ nn.init.normal_(module.weight, std=initializer_range)
370
+ if module.bias is not None:
371
+ nn.init.zeros_(module.bias)
372
+ elif isinstance(module, nn.Embedding):
373
+ nn.init.normal_(module.weight, std=initializer_range)
374
+ if module.padding_idx is not None:
375
+ nn.init.zeros_(module.weight[module.padding_idx])
376
+
377
+
378
+ class NomicBertEmbeddings(nn.Module):
379
+ def __init__(
380
+ self,
381
+ config
382
+ ):
383
+ """
384
+ If max_position_embeddings <= 0, there's no position embeddings
385
+ If type_vocab_size <= 0, there's no token type embeddings
386
+ """
387
+ super().__init__()
388
+ self.word_embeddings = nn.Embedding(
389
+ config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id
390
+ )
391
+ self.max_position_embeddings = config.max_position_embeddings if config.rotary_emb_fraction <= 0 else 0
392
+ self.type_vocab_size = config.type_vocab_size
393
+ if self.max_position_embeddings > 0 and config.rotary_emb_fraction <= 0:
394
+ self.position_embeddings = nn.Embedding(
395
+ config.max_position_embeddings, config.hidden_size,
396
+ )
397
+ if self.type_vocab_size > 0:
398
+ self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
399
+
400
+ def forward(self, input_ids, position_ids=None, token_type_ids=None):
401
+ """
402
+ input_ids: (batch, seqlen)
403
+ position_ids: (batch, seqlen)
404
+ token_type_ids: (batch, seqlen)
405
+ """
406
+ batch_size, seqlen = input_ids.shape
407
+ embeddings = self.word_embeddings(input_ids)
408
+
409
+ if self.type_vocab_size > 0:
410
+ if token_type_ids is None:
411
+ token_type_ids = torch.zeros(seqlen, dtype=torch.long, device=input_ids.device)
412
+ token_type_embeddings = self.token_type_embeddings(token_type_ids)
413
+ embeddings = embeddings + token_type_embeddings
414
+
415
+ if self.max_position_embeddings > 0:
416
+ if position_ids is None:
417
+ position_ids = torch.arange(seqlen, dtype=torch.long, device=input_ids.device)
418
+ position_embeddings = self.position_embeddings(position_ids)
419
+ embeddings = embeddings + position_embeddings
420
+ return embeddings
421
+
422
+ class NomicBertMLP(nn.Module):
423
+ def __init__(
424
+ self,
425
+ in_features,
426
+ hidden_features=None,
427
+ out_features=None,
428
+ activation=F.gelu,
429
+ bias1=True,
430
+ bias2=True,
431
+ return_residual=False,
432
+ fused_bias_fc=False,
433
+ ):
434
+ super().__init__()
435
+ out_features = out_features if out_features is not None else in_features
436
+ hidden_features = hidden_features if hidden_features is not None else in_features * 4
437
+ self.return_residual = return_residual
438
+ self.fc1 = nn.Linear(in_features, hidden_features, bias=bias1)
439
+ approximate = (
440
+ "tanh"
441
+ if activation in ["gelu_new", "gelu_fast", "gelu_pytorch_tanh"]
442
+ else "none"
443
+ )
444
+ self.activation = nn.GELU(approximate=approximate) if activation == "gelu" else activation
445
+ self.fc2 = nn.Linear(hidden_features, out_features, bias=bias2)
446
+
447
+ def forward(self, x):
448
+ y = self.fc1(x)
449
+ y = self.activation(y)
450
+ y = self.fc2(y)
451
+ return y if not self.return_residual else (y, x)
452
+
453
+
454
+ class NomciBertGatedMLP(nn.Module):
455
+ def __init__(
456
+ self,
457
+ in_features,
458
+ hidden_features=None,
459
+ out_features=None,
460
+ activation=F.sigmoid,
461
+ bias1=True,
462
+ bias2=True,
463
+ multiple_of=256,
464
+ return_residual=False,
465
+ fused_bias_fc=True,
466
+ device=None,
467
+ dtype=None,
468
+ ):
469
+ super().__init__()
470
+ out_features = out_features if out_features is not None else in_features
471
+ hidden_features = (
472
+ hidden_features if hidden_features is not None else int(8 * in_features / 3)
473
+ )
474
+ hidden_features = (hidden_features + multiple_of - 1) // multiple_of * multiple_of
475
+ self.return_residual = return_residual
476
+
477
+ self.fc11 = nn.Linear(in_features, hidden_features, bias=bias1)
478
+ self.fc12 = nn.Linear(in_features, hidden_features, bias=bias1)
479
+ self.activation = activation
480
+ self.fc2 = nn.Linear(hidden_features, out_features, bias=bias2)
481
+
482
+ def forward(self, x):
483
+ y = self.fc11(x)
484
+ gate = self.fc12(x)
485
+ if self.activation == F.sigmoid: # Special case for GLU
486
+ y = F.glu(torch.cat([y, gate], dim=-1), dim=-1)
487
+ else:
488
+ y = y * self.activation(gate)
489
+ y = self.fc2(y)
490
+ return y if not self.return_residual else (y, x)
491
+
492
+
493
+ def rotate_half(x, interleaved=False):
494
+ if not interleaved:
495
+ x1, x2 = x.chunk(2, dim=-1)
496
+ return torch.cat((-x2, x1), dim=-1)
497
+ else:
498
+ x1, x2 = x[..., ::2], x[..., 1::2]
499
+ return rearrange(torch.stack((-x2, x1), dim=-1), "... d two -> ... (d two)", two=2)
500
+
501
+
502
+ def apply_rotary_emb(x, cos, sin, offset=0, interleaved=False):
503
+ """
504
+ x: (batch_size, seqlen, nheads, headdim)
505
+ cos, sin: (seqlen, rotary_dim / 2) or (batch_size, seqlen, rotary_dim / 2)
506
+ """
507
+ ro_dim = cos.shape[-1] * 2
508
+ assert ro_dim <= x.shape[-1]
509
+ cos, sin = (
510
+ cos[offset: offset + x.shape[1]],
511
+ sin[offset: offset + x.shape[1]],
512
+ )
513
+ cos = repeat(cos, "... d -> ... 1 (2 d)" if not interleaved else "... d -> ... 1 (d 2)")
514
+ sin = repeat(sin, "... d -> ... 1 (2 d)" if not interleaved else "... d -> ... 1 (d 2)")
515
+ return torch.cat(
516
+ [x[..., :ro_dim] * cos + rotate_half(x[..., :ro_dim], interleaved) * sin, x[..., ro_dim:]],
517
+ dim=-1,
518
+ )
519
+
520
+
521
+ class NomicBertRotaryEmbedding(nn.Module):
522
+ def __init__(
523
+ self,
524
+ dim: int,
525
+ base=10000.0,
526
+ interleaved=False,
527
+ scale_base=None,
528
+ pos_idx_in_fp32=True,
529
+ device=None,
530
+ ):
531
+ """
532
+ interleaved: if True, rotate pairs of even and odd dimensions (GPT-J style) instead
533
+ of 1st half and 2nd half (GPT-NeoX style).
534
+ pos_idx_in_fp32: if True, the position indices [0.0, ..., seqlen - 1] are in fp32,
535
+ otherwise they might be in lower precision.
536
+ This option was added because previously (before 2023-07-02), when we construct
537
+ the position indices, we use the dtype of self.inv_freq. In most cases this would
538
+ be fp32, but if the model is trained in pure bf16 (not mixed precision), then
539
+ self.inv_freq would be bf16, and the position indices are also in bf16.
540
+ Because of the limited precision of bf16 (e.g. 1995.0 is rounded to 2000.0), the
541
+ embeddings for some positions will coincide.
542
+ To maintain compatibility with models previously trained in pure bf16,
543
+ we add this option.
544
+ """
545
+ super().__init__()
546
+ self.dim = dim
547
+ self.base = float(base)
548
+ self.pos_idx_in_fp32 = pos_idx_in_fp32
549
+ # Generate and save the inverse frequency buffer (non trainable)
550
+ inv_freq = self._compute_inv_freq(device)
551
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
552
+ self.interleaved = interleaved
553
+ self.scale_base = scale_base
554
+
555
+ self._seq_len_cached = 0
556
+ self._cos_cached = None
557
+ self._sin_cached = None
558
+ self._cos_k_cached = None
559
+ self._sin_k_cached = None
560
+
561
+ def _compute_inv_freq(self, device=None):
562
+ return 1.0 / (
563
+ self.base
564
+ ** (torch.arange(0, self.dim, 2, device=device, dtype=torch.float32) / self.dim)
565
+ )
566
+
567
+ def _update_cos_sin_cache(self, seqlen, device=None, dtype=None):
568
+ # Reset the tables if the sequence length has changed,
569
+ # if we're on a new device (possibly due to tracing for instance),
570
+ # or if we're switching from inference mode to training
571
+ if (
572
+ seqlen > self._seq_len_cached
573
+ or self._cos_cached is None
574
+ or self._cos_cached.device != device
575
+ or self._cos_cached.dtype != dtype
576
+ or (self.training and self._cos_cached.is_inference())
577
+ ):
578
+ self._seq_len_cached = seqlen
579
+ # We want fp32 here, not self.inv_freq.dtype, since the model could be loaded in bf16
580
+ # And the output of arange can be quite large, so bf16 would lose a lot of precision.
581
+ # However, for compatibility reason, we add an option to use the dtype of self.inv_freq.
582
+ if self.pos_idx_in_fp32:
583
+ t = torch.arange(seqlen, device=device, dtype=torch.float32)
584
+ # We want fp32 here as well since inv_freq will be multiplied with t, and the output
585
+ # will be large. Having it in bf16 will lose a lot of precision and cause the
586
+ # cos & sin output to change significantly.
587
+ # We want to recompute self.inv_freq if it was not loaded in fp32
588
+ if self.inv_freq.dtype != torch.float32:
589
+ inv_freq = self._compute_inv_freq(device=device)
590
+ else:
591
+ inv_freq = self.inv_freq
592
+ else:
593
+ t = torch.arange(seqlen, device=device, dtype=self.inv_freq.dtype)
594
+ inv_freq = self.inv_freq
595
+ # Don't do einsum, it converts fp32 to fp16 under AMP
596
+ # freqs = torch.einsum("i,j->ij", t, self.inv_freq)
597
+ freqs = torch.outer(t, inv_freq)
598
+ self._cos_cached = torch.cos(freqs).to(dtype)
599
+ self._sin_cached = torch.sin(freqs).to(dtype)
600
+
601
+ def forward(
602
+ self,
603
+ qkv: torch.Tensor,
604
+ kv: Optional[torch.Tensor] = None,
605
+ seqlen_offset: Union[int, torch.Tensor] = 0,
606
+ max_seqlen: Optional[int] = None,
607
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
608
+ """
609
+ qkv: (batch, seqlen, 3, nheads, headdim) if kv is none,
610
+ else it's just q of shape (batch, seqlen, nheads, headdim)
611
+ kv: (batch, seqlen, 2, nheads, headdim)
612
+ seqlen_offset: (batch_size,) or int. Each sequence in x is shifted by this amount.
613
+ Most commonly used in inference when we have KV cache.
614
+ If it's a tensor of shape (batch_size,), then to update the cos / sin cache, one
615
+ should pass in max_seqlen, which will update the cos / sin cache up to that length.
616
+ Apply rotary embedding *inplace* to qkv and / or kv.
617
+ """
618
+ seqlen = qkv.shape[1]
619
+ if max_seqlen is not None:
620
+ self._update_cos_sin_cache(max_seqlen, device=qkv.device, dtype=qkv.dtype)
621
+ elif isinstance(seqlen_offset, int):
622
+ self._update_cos_sin_cache(seqlen + seqlen_offset, device=qkv.device, dtype=qkv.dtype)
623
+
624
+ q_rot = apply_rotary_emb(qkv[:, :, 0], self._cos_cached, self._sin_cached, seqlen_offset, self.interleaved)
625
+ k_rot = apply_rotary_emb(qkv[:, :, 1], self._cos_cached, self._sin_cached, seqlen_offset, self.interleaved)
626
+ return torch.stack((q_rot, k_rot, qkv[:, :, 2]), dim=2)
627
+
628
+
629
+ class NomicBertDynamicNTKRotaryEmbedding(NomicBertRotaryEmbedding):
630
+ def __init__(self, rotary_scaling_factor, max_position_embeddings, **kwargs):
631
+ super().__init__(**kwargs)
632
+ self.rotary_scaling_factor = rotary_scaling_factor
633
+ self.max_position_embeddings = max_position_embeddings
634
+
635
+
636
+ def _compute_inv_freq(self, base=None, device=None):
637
+ if base is None:
638
+ base = self.base
639
+ return 1.0 / (
640
+ base
641
+ ** (torch.arange(0, self.dim, 2, device=device, dtype=torch.float32) / self.dim)
642
+ )
643
+
644
+ def _update_cos_sin_cache(self, seqlen, device=None, dtype=None):
645
+ # Reset the tables if the sequence length has changed,
646
+ # if we're on a new device (possibly due to tracing for instance),
647
+ # or if we're switching from inference mode to training
648
+ if seqlen > self.max_position_embeddings:
649
+ base = self.base * (
650
+ (self.rotary_scaling_factor * seqlen / self.max_position_embeddings) - (self.rotary_scaling_factor - 1)
651
+ ) ** (self.dim / (self.dim - 2))
652
+ inv_freq = self._compute_inv_freq(base=base, device=device)
653
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
654
+
655
+ if (
656
+ seqlen > self._seq_len_cached
657
+ or self._cos_cached is None
658
+ or self._cos_cached.device != device
659
+ or self._cos_cached.dtype != dtype
660
+ or (self.training and self._cos_cached.is_inference())
661
+ ):
662
+ self._seq_len_cached = seqlen
663
+ # We want fp32 here, not self.inv_freq.dtype, since the model could be loaded in bf16
664
+ # And the output of arange can be quite large, so bf16 would lose a lot of precision.
665
+ # However, for compatibility reason, we add an option to use the dtype of self.inv_freq.
666
+ if self.pos_idx_in_fp32:
667
+ t = torch.arange(seqlen, device=device, dtype=torch.float32)
668
+ # We want fp32 here as well since inv_freq will be multiplied with t, and the output
669
+ # will be large. Having it in bf16 will lose a lot of precision and cause the
670
+ # cos & sin output to change significantly.
671
+ # We want to recompute self.inv_freq if it was not loaded in fp32
672
+ if self.inv_freq.dtype != torch.float32:
673
+ if seqlen > self.max_position_embeddings:
674
+ base = self.base * (
675
+ (self.scaling_factor * seqlen / self.max_position_embeddings) - (self.scaling_factor - 1)
676
+ ) ** (self.dim / (self.dim - 2))
677
+ else:
678
+ base = self.base
679
+ inv_freq = self._compute_inv_freq(device=device, base=base)
680
+ else:
681
+ inv_freq = self.inv_freq
682
+ else:
683
+ t = torch.arange(seqlen, device=device, dtype=self.inv_freq.dtype)
684
+ inv_freq = self.inv_freq
685
+ # Don't do einsum, it converts fp32 to fp16 under AMP
686
+ # freqs = torch.einsum("i,j->ij", t, self.inv_freq)
687
+ freqs = torch.outer(t, inv_freq)
688
+ if self.scale is None:
689
+ self._cos_cached = torch.cos(freqs).to(dtype)
690
+ self._sin_cached = torch.sin(freqs).to(dtype)
691
+ else:
692
+ power = (
693
+ torch.arange(seqlen, dtype=self.scale.dtype, device=self.scale.device)
694
+ - seqlen // 2
695
+ ) / self.scale_base
696
+ scale = self.scale.to(device=power.device) ** rearrange(power, "s -> s 1")
697
+ # We want the multiplication by scale to happen in fp32
698
+ self._cos_cached = (torch.cos(freqs) * scale).to(dtype)
699
+ self._sin_cached = (torch.sin(freqs) * scale).to(dtype)
700
+ self._cos_k_cached = (torch.cos(freqs) / scale).to(dtype)
701
+ self._sin_k_cached = (torch.sin(freqs) / scale).to(dtype)
702
+
703
+ class NomicBertAttention(nn.Module):
704
+ """Multi-head self-attention and cross-attention"""
705
+
706
+ def __init__(
707
+ self,
708
+ config,
709
+ ) -> None:
710
+ """
711
+ num_heads_kv: can be used to toggle MQA / GQA. If None, use num_heads.
712
+ return_residual: whether to return the input x along with the output. This is for
713
+ performance reason: for post-norm architecture, returning the input allows us
714
+ to fuse the backward of nn.Linear with the residual connection.
715
+ """
716
+ super().__init__()
717
+ self.embed_dim = config.n_embd
718
+ self.use_flash_attn = config.use_flash_attn
719
+ self.fused_bias_fc = config.fused_bias_fc
720
+
721
+ self.num_heads = config.n_head
722
+ self.num_heads_kv = config.num_heads_kv if getattr(config, "num_heads_kv", None) is not None else self.num_heads
723
+ assert self.embed_dim % self.num_heads == 0, "embed_dim must be divisible by num_heads"
724
+ self.head_dim = self.embed_dim // self.num_heads
725
+ # we don't really support mqa / gqa for now
726
+ qkv_dim = self.head_dim * (self.num_heads + 2 * self.num_heads_kv)
727
+
728
+ self.register_buffer(
729
+ "norm_factor",
730
+ torch.sqrt(torch.tensor(self.head_dim, dtype=torch.float32)).to(torch.get_default_dtype()),
731
+ persistent=False,
732
+ )
733
+
734
+ self.rotary_emb_dim = self.head_dim * config.rotary_emb_fraction
735
+ if self.rotary_emb_dim > 0:
736
+ if config.rotary_scaling_factor:
737
+ self.rotary_emb = NomicBertDynamicNTKRotaryEmbedding(
738
+ dim=self.rotary_emb_dim,
739
+ base=config.rotary_emb_base,
740
+ scale_base=config.rotary_emb_scale_base,
741
+ interleaved=config.rotary_emb_interleaved,
742
+ rotary_scaling_factor=config.rotary_scaling_factor,
743
+ max_position_embeddings=config.n_positions,
744
+ )
745
+ else:
746
+ self.rotary_emb = NomicBertRotaryEmbedding(
747
+ dim=self.rotary_emb_dim,
748
+ base=config.rotary_emb_base,
749
+ scale_base=config.rotary_emb_scale_base,
750
+ interleaved=config.rotary_emb_interleaved,
751
+ )
752
+ # bug in xformers: https://github.com/facebookresearch/xformers/issues/841
753
+ # uses the head dimension instead of the sequence dimension
754
+ self.rotary_head_dim = getattr(config, "rotary_head_dim", False)
755
+
756
+ self.Wqkv = nn.Linear(self.embed_dim, qkv_dim, bias=config.qkv_proj_bias)
757
+
758
+ self.out_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=config.qkv_proj_bias)
759
+ self.causal = config.causal
760
+ self.drop = nn.Dropout(config.attn_pdrop)
761
+
762
+ def forward(
763
+ self,
764
+ hidden_states: torch.Tensor,
765
+ attention_mask: Optional[torch.Tensor] = None,
766
+ position_ids: Optional[torch.LongTensor] = None,
767
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
768
+ output_attentions: bool = False,
769
+ use_cache: bool = False,
770
+ is_padded_inputs: Optional[bool] = True,
771
+ cu_seqlens: Optional[torch.Tensor] = None,
772
+ max_seq_len: Optional[int] = None,
773
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
774
+
775
+ has_layer_past = past_key_value is not None
776
+
777
+ if has_layer_past:
778
+ past_key_value = past_key_value[0]
779
+ past_len = past_key_value[1]
780
+ else:
781
+ past_len = 0
782
+
783
+ qkv = self.Wqkv(hidden_states)
784
+ qkv = rearrange(qkv, "... (three h d) -> ... three h d", three=3, d=self.head_dim)
785
+
786
+ past_key_value = (past_key_value, past_len + qkv.size(1)) if use_cache else None
787
+
788
+ if self.rotary_emb_dim > 0:
789
+ if self.rotary_head_dim:
790
+ qkv = rearrange(qkv, "b s three h d -> b h three s d")
791
+ qkv = self.rotary_emb(qkv, seqlen_offset=past_len)
792
+
793
+ if self.rotary_head_dim:
794
+ qkv = rearrange(qkv, "b h three s d -> b s three h d")
795
+
796
+ query, key, value = qkv[:, :, 0], qkv[:, :, 1], qkv[:, :, 2]
797
+
798
+ query = query.permute(0, 2, 1, 3)
799
+ key = key.permute(0, 2, 1, 3)
800
+ value = value.permute(0, 2, 1, 3)
801
+
802
+ attention_scores = torch.matmul(query, key.transpose(-1, -2)) / self.norm_factor
803
+ if attention_mask is not None:
804
+ attention_scores = attention_scores + attention_mask
805
+
806
+ attentions_probs = F.softmax(attention_scores, dim=-1)
807
+ attentions_probs = self.drop(attentions_probs)
808
+
809
+ attn_output = torch.matmul(attentions_probs, value)
810
+ attn_output = rearrange(attn_output.permute(0, 2, 1, 3), "... h d -> ... (h d)")
811
+
812
+ attn_output = self.out_proj(attn_output)
813
+
814
+ return attn_output
815
+
816
+
817
+ class NomicBertBlock(nn.Module):
818
+ def __init__(
819
+ self,
820
+ config,
821
+ ):
822
+ super().__init__()
823
+ self.prenorm = config.prenorm
824
+ self.fused_dropout_add_ln = config.fused_dropout_add_ln
825
+
826
+ self.attn = NomicBertAttention(config)
827
+ activation = (
828
+ F.sigmoid
829
+ if config.activation_function == "glu"
830
+ else (F.silu if config.activation_function == "swiglu" else F.gelu)
831
+ )
832
+ if config.activation_function in ["glu", "swiglu", "geglu"]:
833
+ self.mlp = NomciBertGatedMLP(config.n_embd, hidden_features=config.n_inner, bias1=config.mlp_fc1_bias, bias2=config.mlp_fc2_bias, activation=activation, fused_bias_fc=config.fused_bias_fc)
834
+ else:
835
+ self.mlp = NomicBertMLP(config.n_embd, hidden_features=config.n_inner, bias1=config.mlp_fc1_bias, bias2=config.mlp_fc2_bias, activation=activation, fused_bias_fc=config.fused_bias_fc)
836
+
837
+ self.dropout1 = nn.Dropout(config.resid_pdrop)
838
+ self.norm1 = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)
839
+ self.norm2 = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)
840
+ self.dropout2 = nn.Dropout(config.resid_pdrop)
841
+
842
+ def forward(
843
+ self,
844
+ hidden_states: torch.Tensor,
845
+ hidden_states2: torch.Tensor,
846
+ residual: Optional[torch.Tensor] = None,
847
+ attention_mask: Optional[torch.Tensor] = None,
848
+ position_ids: Optional[torch.LongTensor] = None,
849
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
850
+ is_padded_inputs: Optional[bool] = True,
851
+ output_attentions: Optional[bool] = False,
852
+ use_cache: Optional[bool] = False,
853
+ cu_seqlens: Optional[torch.Tensor] = None,
854
+ max_seq_len: Optional[int] = None,
855
+ ):
856
+ r"""Pass the input through the encoder layer.
857
+
858
+ Args:
859
+ hidden_states: the sequence to the encoder layer (required).
860
+ residual: if postnorm, residual=None, If prenorm, hidden_states = Attn/MLP(LN(residual))
861
+ mixer_subset: for cross-attention only. If not None, will take a subset of x
862
+ before applying the query projection. Useful for e.g., ViT where we only care
863
+ about the CLS token in the last layer.
864
+ """
865
+ if self.prenorm:
866
+ dropped = self.dropout1(hidden_states)
867
+ residual = (dropped + residual) if residual is not None else dropped
868
+ hidden_states = self.norm1(residual.to(dtype=self.norm1.weight.dtype))
869
+ hidden_states = self.attn(hidden_states, attention_mask=attention_mask, is_padded_inputs=is_padded_inputs, cu_seqlens=cu_seqlens, max_seq_len=max_seq_len)
870
+
871
+ dropped = self.dropout2(hidden_states)
872
+ residual = (dropped + residual) if residual is not None else dropped
873
+ hidden_states = self.norm2(residual.to(dtype=self.norm2.weight.dtype))
874
+ hidden_states = self.mlp(hidden_states)
875
+
876
+ return hidden_states, None, residual
877
+ else:
878
+ assert residual is None
879
+ attn_outputs = self.attn(hidden_states,
880
+ attention_mask=attention_mask,
881
+ is_padded_inputs=is_padded_inputs,
882
+ cu_seqlens=cu_seqlens,
883
+ max_seq_len=max_seq_len)
884
+ hidden_states = self.norm1(
885
+ (self.dropout1(attn_outputs) + hidden_states).to(
886
+ dtype=self.norm1.weight.dtype
887
+ )
888
+ )
889
+ mlp_out = self.mlp(hidden_states)
890
+
891
+ hidden_states = self.norm2(
892
+ (self.dropout2(mlp_out) + hidden_states).to(
893
+ dtype=self.norm2.weight.dtype
894
+ )
895
+ )
896
+ return hidden_states, None, None
897
+
898
+
899
+ class NomicBertEncoder(nn.Module):
900
+ def __init__(self, config: GPT2Config):
901
+ super().__init__()
902
+ self.layers = nn.ModuleList(
903
+ [NomicBertBlock(config) for _ in range(config.n_layer)]
904
+ )
905
+ self.gradient_checkpointing = False
906
+ self.config = config
907
+
908
+ def forward(self,
909
+ hidden_states: torch.LongTensor = None,
910
+ attention_mask: Optional[torch.Tensor] = None,
911
+ position_ids: Optional[torch.LongTensor] = None,
912
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
913
+ inputs_embeds: Optional[torch.FloatTensor] = None,
914
+ use_cache: Optional[bool] = None,
915
+ output_attentions: Optional[bool] = None,
916
+ output_hidden_states: Optional[bool] = None,
917
+ return_dict: Optional[bool] = None,
918
+ is_padded_inputs: Optional[bool] = True,):
919
+
920
+ """If subset_mask is not None, we only want output for the subset of the sequence.
921
+ This means that we only compute the last layer output for these tokens.
922
+ subset_mask: (batch, seqlen), dtype=torch.bool
923
+ """
924
+ hidden_states2 = None
925
+ residual = None
926
+
927
+
928
+ for _, layer in enumerate(self.layers):
929
+ if self.gradient_checkpointing and self.training:
930
+
931
+ def create_custom_forward(module):
932
+ def custom_forward(*inputs):
933
+ # None for past_key_value
934
+ return module(*inputs)
935
+
936
+ return custom_forward
937
+
938
+ hidden_states, hidden_states2, residual = torch.utils.checkpoint.checkpoint(
939
+ create_custom_forward(layer),
940
+ hidden_states,
941
+ hidden_states2,
942
+ residual,
943
+ attention_mask,
944
+ None,
945
+ None,
946
+ is_padded_inputs,
947
+ # if you freeze ANY layers, you need `use_reentrant=False`
948
+ # https://github.com/huggingface/transformers/issues/21381
949
+ # https://discuss.pytorch.org/t/checkpoint-with-no-grad-requiring-inputs-problem/19117/7
950
+ use_reentrant=False,
951
+ )
952
+
953
+ else:
954
+ hidden_states, hidden_states2, residual = layer(
955
+ hidden_states,
956
+ hidden_states2,
957
+ residual,
958
+ attention_mask,
959
+ position_ids,
960
+ None,
961
+ is_padded_inputs,
962
+ output_attentions,
963
+ use_cache,
964
+ )
965
+ return hidden_states
966
+
967
+
968
+ class NomicBertPooler(nn.Module):
969
+ def __init__(self, config):
970
+ super().__init__()
971
+ self.dense = nn.Linear(config.n_embd, config.n_embd)
972
+ self.activation = nn.Tanh()
973
+
974
+ def forward(self, hidden_states, pool=True):
975
+ # We "pool" the model by simply taking the hidden state corresponding
976
+ # to the first token.
977
+ first_token_tensor = hidden_states[:, 0] if pool else hidden_states
978
+ pooled_output = self.dense(first_token_tensor)
979
+ pooled_output = self.activation(pooled_output)
980
+ return pooled_output
981
+
982
+
983
+ class NomicBertPredictionHeadTransform(nn.Module):
984
+ def __init__(self, config):
985
+ super().__init__()
986
+ self.dense = nn.Linear(config.n_embd, config.n_embd, bias=config.mlp_fc1_bias)
987
+ approximate = (
988
+ "tanh"
989
+ if config.activation_function in ["gelu_new", "gelu_fast", "gelu_pytorch_tanh"]
990
+ else "none"
991
+ )
992
+ if config.activation_function == "swiglu":
993
+ self.transform_act_fn = F.silu
994
+ else:
995
+ self.transform_act_fn = nn.GELU(approximate=approximate)
996
+
997
+ self.layer_norm = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)
998
+
999
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
1000
+ hidden_states = self.dense(hidden_states)
1001
+ hidden_states = self.transform_act_fn(hidden_states)
1002
+ hidden_states = self.layer_norm(hidden_states)
1003
+
1004
+ return hidden_states
1005
+
1006
+
1007
+ class NomicBertLMPredictionHead(nn.Module):
1008
+ def __init__(self, config):
1009
+ super().__init__()
1010
+
1011
+ self.transform = NomicBertPredictionHeadTransform(config)
1012
+
1013
+ self.decoder = nn.Linear(config.n_embd, config.vocab_size, bias=config.mlp_fc1_bias)
1014
+
1015
+ def forward(self, hidden_states):
1016
+ hidden_states = self.transform(hidden_states)
1017
+ hidden_states = self.decoder(hidden_states)
1018
+ return hidden_states
1019
+
1020
+
1021
+ class NomicBertPreTrainingHeads(nn.Module):
1022
+ def __init__(self, config):
1023
+ super().__init__()
1024
+ self.predictions = NomicBertLMPredictionHead(config)
1025
+
1026
+ def forward(self, sequence_output):
1027
+ prediction_scores = self.predictions(sequence_output)
1028
+ return prediction_scores
1029
+
1030
+
1031
+ class NomicBertModel(NomicBertPreTrainedModel):
1032
+ def __init__(self, config: GPT2Config, add_pooling_layer=True):
1033
+ super().__init__(config)
1034
+ self.pad_vocab_size_multiple = getattr(config, "pad_vocab_size_multiple", 1)
1035
+ if config.vocab_size % self.pad_vocab_size_multiple != 0:
1036
+ config.vocab_size += self.pad_vocab_size_multiple - (
1037
+ config.vocab_size % self.pad_vocab_size_multiple
1038
+ )
1039
+
1040
+ assert config.activation_function in ["gelu", "gelu_new", "gelu_fast", "gelu_pytorch_tanh", "swiglu", "geglu", "glu"]
1041
+
1042
+ self.embeddings = NomicBertEmbeddings(
1043
+ config
1044
+ )
1045
+ self.emb_drop = nn.Dropout(config.resid_pdrop)
1046
+ self.emb_ln = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)
1047
+ self.encoder = NomicBertEncoder(config)
1048
+ self.pooler = NomicBertPooler(config) if add_pooling_layer else None
1049
+
1050
+ self.apply(partial(_init_weights, initializer_range=config.initializer_range))
1051
+
1052
+ def forward(
1053
+ self,
1054
+ input_ids,
1055
+ position_ids=None,
1056
+ token_type_ids=None,
1057
+ attention_mask=None,
1058
+ ):
1059
+ if token_type_ids is None:
1060
+ token_type_ids = torch.zeros_like(input_ids)
1061
+ hidden_states = self.embeddings(
1062
+ input_ids, position_ids=position_ids, token_type_ids=token_type_ids
1063
+ )
1064
+ hidden_states = self.emb_ln(hidden_states)
1065
+ hidden_states = self.emb_drop(hidden_states)
1066
+
1067
+ attention_mask = self.get_extended_attention_mask(attention_mask, input_ids.shape)
1068
+ sequence_output = self.encoder(
1069
+ hidden_states, attention_mask=attention_mask
1070
+ )
1071
+
1072
+ pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
1073
+
1074
+ return BaseModelOutputWithPoolingAndCrossAttentions(
1075
+ last_hidden_state=sequence_output,
1076
+ pooler_output=pooled_output,
1077
+ )
1078
+
1079
+
1080
+ class NomicBertForPreTraining(NomicBertPreTrainedModel):
1081
+ _tied_weights_keys = ["predictions.decoder.bias", "cls.predictions.decoder.weight"]
1082
+
1083
+ def __init__(self, config: GPT2Config):
1084
+ super().__init__(config)
1085
+
1086
+ self.bert = NomicBertModel(config, add_pooling_layer=getattr(config, "add_pooling_layer", False))
1087
+ self.cls = NomicBertPreTrainingHeads(config)
1088
+ self.mlm_loss = nn.CrossEntropyLoss()
1089
+
1090
+ # Initialize weights and apply final processing
1091
+ self.apply(partial(_init_weights, initializer_range=config.initializer_range))
1092
+ self.tie_weights()
1093
+
1094
+ def tie_weights(self):
1095
+ self.cls.predictions.decoder.weight = self.bert.embeddings.word_embeddings.weight
1096
+
1097
+ def forward(
1098
+ self,
1099
+ input_ids,
1100
+ position_ids=None,
1101
+ token_type_ids=None,
1102
+ attention_mask=None,
1103
+ labels=None,
1104
+ ):
1105
+ """
1106
+ If labels are provided, they must be -100 for masked out tokens (as specified in the attention
1107
+ mask).
1108
+ Outputs:
1109
+ if `labels` and `next_sentence_label` are not `None`:
1110
+ Outputs the total_loss which is the sum of the masked language modeling loss and the next
1111
+ sentence classification loss.
1112
+ if `labels` or `next_sentence_label` is `None`:
1113
+ Outputs a tuple comprising
1114
+ - the masked language modeling logits of shape [batch_size, sequence_length, vocab_size], and
1115
+ - the next sentence classification logits of shape [batch_size, 2].
1116
+
1117
+ """
1118
+ outputs = self.bert(
1119
+ input_ids,
1120
+ position_ids=position_ids,
1121
+ token_type_ids=token_type_ids,
1122
+ attention_mask=attention_mask.bool() if attention_mask is not None else None,
1123
+ )
1124
+ sequence_output, _ = outputs.last_hidden_state, outputs.pooler_output
1125
+
1126
+ prediction_scores = self.cls(sequence_output)
1127
+
1128
+ total_loss = None
1129
+ if labels is not None:
1130
+ masked_lm_loss = self.mlm_loss(
1131
+ rearrange(prediction_scores, "... v -> (...) v"),
1132
+ rearrange(labels, "... -> (...)"),
1133
+ )
1134
+ total_loss = masked_lm_loss.float()
1135
+
1136
+ return BertForPreTrainingOutput(
1137
+ loss=total_loss,
1138
+ prediction_logits=prediction_scores,
1139
+ )
1140
+
1141
+
1142
+ class NomicBertForSequenceClassification(NomicBertPreTrainedModel):
1143
+ def __init__(self, config):
1144
+ super().__init__(config)
1145
+ self.num_labels = config.num_labels
1146
+ self.config = config
1147
+
1148
+ self.bert = NomicBertModel(config)
1149
+ classifier_dropout = (
1150
+ getattr(config, "classifier_dropout", config.embd_pdrop)
1151
+ )
1152
+ self.dropout = nn.Dropout(classifier_dropout)
1153
+ self.classifier = nn.Linear(config.n_embd, config.num_labels)
1154
+
1155
+ # Initialize weights and apply final processing
1156
+ self.post_init()
1157
+
1158
+ def forward(
1159
+ self,
1160
+ input_ids: Optional[torch.Tensor] = None,
1161
+ attention_mask: Optional[torch.Tensor] = None,
1162
+ token_type_ids: Optional[torch.Tensor] = None,
1163
+ position_ids: Optional[torch.Tensor] = None,
1164
+ head_mask: Optional[torch.Tensor] = None,
1165
+ inputs_embeds: Optional[torch.Tensor] = None,
1166
+ labels: Optional[torch.Tensor] = None,
1167
+ output_attentions: Optional[bool] = None,
1168
+ output_hidden_states: Optional[bool] = None,
1169
+ return_dict: Optional[bool] = None,
1170
+ ):
1171
+ r"""
1172
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1173
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1174
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1175
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1176
+ """
1177
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1178
+ outputs = self.bert(
1179
+ input_ids,
1180
+ position_ids=position_ids,
1181
+ token_type_ids=token_type_ids,
1182
+ attention_mask=attention_mask.bool() if attention_mask is not None else None,
1183
+ )
1184
+
1185
+ pooled_output = outputs[1]
1186
+
1187
+ pooled_output = self.dropout(pooled_output)
1188
+ logits = self.classifier(pooled_output)
1189
+
1190
+ loss = None
1191
+ if labels is not None:
1192
+ if self.config.problem_type is None:
1193
+ if self.num_labels == 1:
1194
+ self.config.problem_type = "regression"
1195
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1196
+ self.config.problem_type = "single_label_classification"
1197
+ else:
1198
+ self.config.problem_type = "multi_label_classification"
1199
+
1200
+ if self.config.problem_type == "regression":
1201
+ loss_fct = nn.MSELoss()
1202
+ if self.num_labels == 1:
1203
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
1204
+ else:
1205
+ loss = loss_fct(logits, labels)
1206
+ elif self.config.problem_type == "single_label_classification":
1207
+ loss_fct = nn.CrossEntropyLoss()
1208
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1209
+ elif self.config.problem_type == "multi_label_classification":
1210
+ loss_fct = nn.BCEWithLogitsLoss()
1211
+ loss = loss_fct(logits, labels)
1212
+ if not return_dict:
1213
+ output = (logits,) + outputs[2:]
1214
+ return ((loss,) + output) if loss is not None else output
1215
+
1216
+ return SequenceClassifierOutput(
1217
+ loss=loss,
1218
+ logits=logits,
1219
+ hidden_states=outputs.hidden_states,
1220
+ attentions=outputs.attentions,
1221
+ )
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:82f641cef285935a66d0c2b22cb373b8f3e7004fbe6bc9d9d41303239f6f7807
3
+ size 546961866