gyupro commited on
Commit
b6c4b0c
1 Parent(s): 94e4b8e

Upload GPTNeoXForCausalLM

Browse files
config.json CHANGED
@@ -1,12 +1,14 @@
1
  {
2
- "_name_or_path": "ko_en",
3
  "architectures": [
4
  "GPTNeoXForCausalLM"
5
  ],
 
6
  "bos_token_id": 0,
7
  "classifier_dropout": 0.1,
8
  "eos_token_id": 2,
9
  "hidden_act": "gelu",
 
10
  "hidden_size": 4096,
11
  "initializer_range": 0.02,
12
  "intermediate_size": 16384,
@@ -16,11 +18,12 @@
16
  "num_attention_heads": 16,
17
  "num_hidden_layers": 28,
18
  "num_steps": "global_step320000",
 
19
  "rotary_emb_base": 10000,
20
  "rotary_pct": 0.25,
21
  "tie_word_embeddings": false,
22
  "torch_dtype": "float16",
23
- "transformers_version": "4.29.2",
24
  "use_cache": true,
25
  "use_parallel_residual": true,
26
  "vocab_size": 30080
 
1
  {
2
+ "_name_or_path": "train_v1.1b/ko_to_en",
3
  "architectures": [
4
  "GPTNeoXForCausalLM"
5
  ],
6
+ "attention_dropout": 0.0,
7
  "bos_token_id": 0,
8
  "classifier_dropout": 0.1,
9
  "eos_token_id": 2,
10
  "hidden_act": "gelu",
11
+ "hidden_dropout": 0.0,
12
  "hidden_size": 4096,
13
  "initializer_range": 0.02,
14
  "intermediate_size": 16384,
 
18
  "num_attention_heads": 16,
19
  "num_hidden_layers": 28,
20
  "num_steps": "global_step320000",
21
+ "rope_scaling": null,
22
  "rotary_emb_base": 10000,
23
  "rotary_pct": 0.25,
24
  "tie_word_embeddings": false,
25
  "torch_dtype": "float16",
26
+ "transformers_version": "4.32.0.dev0",
27
  "use_cache": true,
28
  "use_parallel_residual": true,
29
  "vocab_size": 30080
generation_config.json CHANGED
@@ -2,5 +2,5 @@
2
  "_from_model_config": true,
3
  "bos_token_id": 0,
4
  "eos_token_id": 2,
5
- "transformers_version": "4.29.2"
6
  }
 
2
  "_from_model_config": true,
3
  "bos_token_id": 0,
4
  "eos_token_id": 2,
5
+ "transformers_version": "4.32.0.dev0"
6
  }
pytorch_model-00001-of-00002.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a30abd1ad02c53052487eb40af7bb2e811128b84c95235cba3870198a5147dac
3
- size 10017673915
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:707f8f875916f70511650eadbf494d95dcc3f8d74f7aa465993c1b8e088f2528
3
+ size 9912798457
pytorch_model-00002-of-00002.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:56484e0d9e5830ad8e3f14a669c67d173914ede517490e74301949dea94cab10
3
- size 1870040575
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5cbadbf224e3d37a5b655ee6160cf72c2837eecea34e9be37326cebb07fa1e4c
3
+ size 1857455571
pytorch_model.bin.index.json CHANGED
@@ -1,16 +1,14 @@
1
  {
2
  "metadata": {
3
- "total_size": 11784801848.0
4
  },
5
  "weight_map": {
6
  "embed_out.weight": "pytorch_model-00002-of-00002.bin",
7
  "gpt_neox.embed_in.weight": "pytorch_model-00001-of-00002.bin",
8
  "gpt_neox.final_layer_norm.bias": "pytorch_model-00002-of-00002.bin",
9
  "gpt_neox.final_layer_norm.weight": "pytorch_model-00002-of-00002.bin",
10
- "gpt_neox.layers.0.attention.bias": "pytorch_model-00001-of-00002.bin",
11
  "gpt_neox.layers.0.attention.dense.bias": "pytorch_model-00001-of-00002.bin",
12
  "gpt_neox.layers.0.attention.dense.weight": "pytorch_model-00001-of-00002.bin",
13
- "gpt_neox.layers.0.attention.masked_bias": "pytorch_model-00001-of-00002.bin",
14
  "gpt_neox.layers.0.attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin",
15
  "gpt_neox.layers.0.attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
16
  "gpt_neox.layers.0.attention.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
@@ -22,10 +20,8 @@
22
  "gpt_neox.layers.0.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin",
23
  "gpt_neox.layers.0.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
24
  "gpt_neox.layers.0.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
25
- "gpt_neox.layers.1.attention.bias": "pytorch_model-00001-of-00002.bin",
26
  "gpt_neox.layers.1.attention.dense.bias": "pytorch_model-00001-of-00002.bin",
27
  "gpt_neox.layers.1.attention.dense.weight": "pytorch_model-00001-of-00002.bin",
28
- "gpt_neox.layers.1.attention.masked_bias": "pytorch_model-00001-of-00002.bin",
29
  "gpt_neox.layers.1.attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin",
30
  "gpt_neox.layers.1.attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
31
  "gpt_neox.layers.1.attention.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
@@ -37,10 +33,8 @@
37
  "gpt_neox.layers.1.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin",
38
  "gpt_neox.layers.1.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
39
  "gpt_neox.layers.1.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
40
- "gpt_neox.layers.10.attention.bias": "pytorch_model-00001-of-00002.bin",
41
  "gpt_neox.layers.10.attention.dense.bias": "pytorch_model-00001-of-00002.bin",
42
  "gpt_neox.layers.10.attention.dense.weight": "pytorch_model-00001-of-00002.bin",
43
- "gpt_neox.layers.10.attention.masked_bias": "pytorch_model-00001-of-00002.bin",
44
  "gpt_neox.layers.10.attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin",
45
  "gpt_neox.layers.10.attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
46
  "gpt_neox.layers.10.attention.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
@@ -52,10 +46,8 @@
52
  "gpt_neox.layers.10.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin",
53
  "gpt_neox.layers.10.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
54
  "gpt_neox.layers.10.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
55
- "gpt_neox.layers.11.attention.bias": "pytorch_model-00001-of-00002.bin",
56
  "gpt_neox.layers.11.attention.dense.bias": "pytorch_model-00001-of-00002.bin",
57
  "gpt_neox.layers.11.attention.dense.weight": "pytorch_model-00001-of-00002.bin",
58
- "gpt_neox.layers.11.attention.masked_bias": "pytorch_model-00001-of-00002.bin",
59
  "gpt_neox.layers.11.attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin",
60
  "gpt_neox.layers.11.attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
61
  "gpt_neox.layers.11.attention.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
@@ -67,10 +59,8 @@
67
  "gpt_neox.layers.11.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin",
68
  "gpt_neox.layers.11.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
69
  "gpt_neox.layers.11.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
70
- "gpt_neox.layers.12.attention.bias": "pytorch_model-00001-of-00002.bin",
71
  "gpt_neox.layers.12.attention.dense.bias": "pytorch_model-00001-of-00002.bin",
72
  "gpt_neox.layers.12.attention.dense.weight": "pytorch_model-00001-of-00002.bin",
73
- "gpt_neox.layers.12.attention.masked_bias": "pytorch_model-00001-of-00002.bin",
74
  "gpt_neox.layers.12.attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin",
75
  "gpt_neox.layers.12.attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
76
  "gpt_neox.layers.12.attention.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
@@ -82,10 +72,8 @@
82
  "gpt_neox.layers.12.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin",
83
  "gpt_neox.layers.12.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
84
  "gpt_neox.layers.12.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
85
- "gpt_neox.layers.13.attention.bias": "pytorch_model-00001-of-00002.bin",
86
  "gpt_neox.layers.13.attention.dense.bias": "pytorch_model-00001-of-00002.bin",
87
  "gpt_neox.layers.13.attention.dense.weight": "pytorch_model-00001-of-00002.bin",
88
- "gpt_neox.layers.13.attention.masked_bias": "pytorch_model-00001-of-00002.bin",
89
  "gpt_neox.layers.13.attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin",
90
  "gpt_neox.layers.13.attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
91
  "gpt_neox.layers.13.attention.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
@@ -97,10 +85,8 @@
97
  "gpt_neox.layers.13.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin",
98
  "gpt_neox.layers.13.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
99
  "gpt_neox.layers.13.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
100
- "gpt_neox.layers.14.attention.bias": "pytorch_model-00001-of-00002.bin",
101
  "gpt_neox.layers.14.attention.dense.bias": "pytorch_model-00001-of-00002.bin",
102
  "gpt_neox.layers.14.attention.dense.weight": "pytorch_model-00001-of-00002.bin",
103
- "gpt_neox.layers.14.attention.masked_bias": "pytorch_model-00001-of-00002.bin",
104
  "gpt_neox.layers.14.attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin",
105
  "gpt_neox.layers.14.attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
106
  "gpt_neox.layers.14.attention.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
@@ -112,10 +98,8 @@
112
  "gpt_neox.layers.14.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin",
113
  "gpt_neox.layers.14.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
114
  "gpt_neox.layers.14.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
115
- "gpt_neox.layers.15.attention.bias": "pytorch_model-00001-of-00002.bin",
116
  "gpt_neox.layers.15.attention.dense.bias": "pytorch_model-00001-of-00002.bin",
117
  "gpt_neox.layers.15.attention.dense.weight": "pytorch_model-00001-of-00002.bin",
118
- "gpt_neox.layers.15.attention.masked_bias": "pytorch_model-00001-of-00002.bin",
119
  "gpt_neox.layers.15.attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin",
120
  "gpt_neox.layers.15.attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
121
  "gpt_neox.layers.15.attention.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
@@ -127,10 +111,8 @@
127
  "gpt_neox.layers.15.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin",
128
  "gpt_neox.layers.15.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
129
  "gpt_neox.layers.15.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
130
- "gpt_neox.layers.16.attention.bias": "pytorch_model-00001-of-00002.bin",
131
  "gpt_neox.layers.16.attention.dense.bias": "pytorch_model-00001-of-00002.bin",
132
  "gpt_neox.layers.16.attention.dense.weight": "pytorch_model-00001-of-00002.bin",
133
- "gpt_neox.layers.16.attention.masked_bias": "pytorch_model-00001-of-00002.bin",
134
  "gpt_neox.layers.16.attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin",
135
  "gpt_neox.layers.16.attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
136
  "gpt_neox.layers.16.attention.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
@@ -142,10 +124,8 @@
142
  "gpt_neox.layers.16.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin",
143
  "gpt_neox.layers.16.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
144
  "gpt_neox.layers.16.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
145
- "gpt_neox.layers.17.attention.bias": "pytorch_model-00001-of-00002.bin",
146
  "gpt_neox.layers.17.attention.dense.bias": "pytorch_model-00001-of-00002.bin",
147
  "gpt_neox.layers.17.attention.dense.weight": "pytorch_model-00001-of-00002.bin",
148
- "gpt_neox.layers.17.attention.masked_bias": "pytorch_model-00001-of-00002.bin",
149
  "gpt_neox.layers.17.attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin",
150
  "gpt_neox.layers.17.attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
151
  "gpt_neox.layers.17.attention.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
@@ -157,10 +137,8 @@
157
  "gpt_neox.layers.17.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin",
158
  "gpt_neox.layers.17.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
159
  "gpt_neox.layers.17.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
160
- "gpt_neox.layers.18.attention.bias": "pytorch_model-00001-of-00002.bin",
161
  "gpt_neox.layers.18.attention.dense.bias": "pytorch_model-00001-of-00002.bin",
162
  "gpt_neox.layers.18.attention.dense.weight": "pytorch_model-00001-of-00002.bin",
163
- "gpt_neox.layers.18.attention.masked_bias": "pytorch_model-00001-of-00002.bin",
164
  "gpt_neox.layers.18.attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin",
165
  "gpt_neox.layers.18.attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
166
  "gpt_neox.layers.18.attention.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
@@ -172,10 +150,8 @@
172
  "gpt_neox.layers.18.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin",
173
  "gpt_neox.layers.18.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
174
  "gpt_neox.layers.18.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
175
- "gpt_neox.layers.19.attention.bias": "pytorch_model-00001-of-00002.bin",
176
  "gpt_neox.layers.19.attention.dense.bias": "pytorch_model-00001-of-00002.bin",
177
  "gpt_neox.layers.19.attention.dense.weight": "pytorch_model-00001-of-00002.bin",
178
- "gpt_neox.layers.19.attention.masked_bias": "pytorch_model-00001-of-00002.bin",
179
  "gpt_neox.layers.19.attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin",
180
  "gpt_neox.layers.19.attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
181
  "gpt_neox.layers.19.attention.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
@@ -187,10 +163,8 @@
187
  "gpt_neox.layers.19.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin",
188
  "gpt_neox.layers.19.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
189
  "gpt_neox.layers.19.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
190
- "gpt_neox.layers.2.attention.bias": "pytorch_model-00001-of-00002.bin",
191
  "gpt_neox.layers.2.attention.dense.bias": "pytorch_model-00001-of-00002.bin",
192
  "gpt_neox.layers.2.attention.dense.weight": "pytorch_model-00001-of-00002.bin",
193
- "gpt_neox.layers.2.attention.masked_bias": "pytorch_model-00001-of-00002.bin",
194
  "gpt_neox.layers.2.attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin",
195
  "gpt_neox.layers.2.attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
196
  "gpt_neox.layers.2.attention.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
@@ -202,10 +176,8 @@
202
  "gpt_neox.layers.2.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin",
203
  "gpt_neox.layers.2.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
204
  "gpt_neox.layers.2.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
205
- "gpt_neox.layers.20.attention.bias": "pytorch_model-00001-of-00002.bin",
206
  "gpt_neox.layers.20.attention.dense.bias": "pytorch_model-00001-of-00002.bin",
207
  "gpt_neox.layers.20.attention.dense.weight": "pytorch_model-00001-of-00002.bin",
208
- "gpt_neox.layers.20.attention.masked_bias": "pytorch_model-00001-of-00002.bin",
209
  "gpt_neox.layers.20.attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin",
210
  "gpt_neox.layers.20.attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
211
  "gpt_neox.layers.20.attention.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
@@ -217,10 +189,8 @@
217
  "gpt_neox.layers.20.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin",
218
  "gpt_neox.layers.20.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
219
  "gpt_neox.layers.20.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
220
- "gpt_neox.layers.21.attention.bias": "pytorch_model-00001-of-00002.bin",
221
  "gpt_neox.layers.21.attention.dense.bias": "pytorch_model-00001-of-00002.bin",
222
  "gpt_neox.layers.21.attention.dense.weight": "pytorch_model-00001-of-00002.bin",
223
- "gpt_neox.layers.21.attention.masked_bias": "pytorch_model-00001-of-00002.bin",
224
  "gpt_neox.layers.21.attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin",
225
  "gpt_neox.layers.21.attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
226
  "gpt_neox.layers.21.attention.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
@@ -232,10 +202,8 @@
232
  "gpt_neox.layers.21.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin",
233
  "gpt_neox.layers.21.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
234
  "gpt_neox.layers.21.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
235
- "gpt_neox.layers.22.attention.bias": "pytorch_model-00001-of-00002.bin",
236
  "gpt_neox.layers.22.attention.dense.bias": "pytorch_model-00001-of-00002.bin",
237
  "gpt_neox.layers.22.attention.dense.weight": "pytorch_model-00001-of-00002.bin",
238
- "gpt_neox.layers.22.attention.masked_bias": "pytorch_model-00001-of-00002.bin",
239
  "gpt_neox.layers.22.attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin",
240
  "gpt_neox.layers.22.attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
241
  "gpt_neox.layers.22.attention.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
@@ -247,10 +215,8 @@
247
  "gpt_neox.layers.22.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin",
248
  "gpt_neox.layers.22.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
249
  "gpt_neox.layers.22.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
250
- "gpt_neox.layers.23.attention.bias": "pytorch_model-00001-of-00002.bin",
251
  "gpt_neox.layers.23.attention.dense.bias": "pytorch_model-00001-of-00002.bin",
252
  "gpt_neox.layers.23.attention.dense.weight": "pytorch_model-00001-of-00002.bin",
253
- "gpt_neox.layers.23.attention.masked_bias": "pytorch_model-00001-of-00002.bin",
254
  "gpt_neox.layers.23.attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin",
255
  "gpt_neox.layers.23.attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
256
  "gpt_neox.layers.23.attention.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
@@ -262,10 +228,8 @@
262
  "gpt_neox.layers.23.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin",
263
  "gpt_neox.layers.23.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
264
  "gpt_neox.layers.23.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
265
- "gpt_neox.layers.24.attention.bias": "pytorch_model-00001-of-00002.bin",
266
  "gpt_neox.layers.24.attention.dense.bias": "pytorch_model-00002-of-00002.bin",
267
  "gpt_neox.layers.24.attention.dense.weight": "pytorch_model-00002-of-00002.bin",
268
- "gpt_neox.layers.24.attention.masked_bias": "pytorch_model-00001-of-00002.bin",
269
  "gpt_neox.layers.24.attention.query_key_value.bias": "pytorch_model-00002-of-00002.bin",
270
  "gpt_neox.layers.24.attention.query_key_value.weight": "pytorch_model-00002-of-00002.bin",
271
  "gpt_neox.layers.24.attention.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
@@ -277,10 +241,8 @@
277
  "gpt_neox.layers.24.mlp.dense_h_to_4h.weight": "pytorch_model-00002-of-00002.bin",
278
  "gpt_neox.layers.24.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
279
  "gpt_neox.layers.24.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
280
- "gpt_neox.layers.25.attention.bias": "pytorch_model-00002-of-00002.bin",
281
  "gpt_neox.layers.25.attention.dense.bias": "pytorch_model-00002-of-00002.bin",
282
  "gpt_neox.layers.25.attention.dense.weight": "pytorch_model-00002-of-00002.bin",
283
- "gpt_neox.layers.25.attention.masked_bias": "pytorch_model-00002-of-00002.bin",
284
  "gpt_neox.layers.25.attention.query_key_value.bias": "pytorch_model-00002-of-00002.bin",
285
  "gpt_neox.layers.25.attention.query_key_value.weight": "pytorch_model-00002-of-00002.bin",
286
  "gpt_neox.layers.25.attention.rotary_emb.inv_freq": "pytorch_model-00002-of-00002.bin",
@@ -292,10 +254,8 @@
292
  "gpt_neox.layers.25.mlp.dense_h_to_4h.weight": "pytorch_model-00002-of-00002.bin",
293
  "gpt_neox.layers.25.post_attention_layernorm.bias": "pytorch_model-00002-of-00002.bin",
294
  "gpt_neox.layers.25.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin",
295
- "gpt_neox.layers.26.attention.bias": "pytorch_model-00002-of-00002.bin",
296
  "gpt_neox.layers.26.attention.dense.bias": "pytorch_model-00002-of-00002.bin",
297
  "gpt_neox.layers.26.attention.dense.weight": "pytorch_model-00002-of-00002.bin",
298
- "gpt_neox.layers.26.attention.masked_bias": "pytorch_model-00002-of-00002.bin",
299
  "gpt_neox.layers.26.attention.query_key_value.bias": "pytorch_model-00002-of-00002.bin",
300
  "gpt_neox.layers.26.attention.query_key_value.weight": "pytorch_model-00002-of-00002.bin",
301
  "gpt_neox.layers.26.attention.rotary_emb.inv_freq": "pytorch_model-00002-of-00002.bin",
@@ -307,10 +267,8 @@
307
  "gpt_neox.layers.26.mlp.dense_h_to_4h.weight": "pytorch_model-00002-of-00002.bin",
308
  "gpt_neox.layers.26.post_attention_layernorm.bias": "pytorch_model-00002-of-00002.bin",
309
  "gpt_neox.layers.26.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin",
310
- "gpt_neox.layers.27.attention.bias": "pytorch_model-00002-of-00002.bin",
311
  "gpt_neox.layers.27.attention.dense.bias": "pytorch_model-00002-of-00002.bin",
312
  "gpt_neox.layers.27.attention.dense.weight": "pytorch_model-00002-of-00002.bin",
313
- "gpt_neox.layers.27.attention.masked_bias": "pytorch_model-00002-of-00002.bin",
314
  "gpt_neox.layers.27.attention.query_key_value.bias": "pytorch_model-00002-of-00002.bin",
315
  "gpt_neox.layers.27.attention.query_key_value.weight": "pytorch_model-00002-of-00002.bin",
316
  "gpt_neox.layers.27.attention.rotary_emb.inv_freq": "pytorch_model-00002-of-00002.bin",
@@ -322,10 +280,8 @@
322
  "gpt_neox.layers.27.mlp.dense_h_to_4h.weight": "pytorch_model-00002-of-00002.bin",
323
  "gpt_neox.layers.27.post_attention_layernorm.bias": "pytorch_model-00002-of-00002.bin",
324
  "gpt_neox.layers.27.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin",
325
- "gpt_neox.layers.3.attention.bias": "pytorch_model-00001-of-00002.bin",
326
  "gpt_neox.layers.3.attention.dense.bias": "pytorch_model-00001-of-00002.bin",
327
  "gpt_neox.layers.3.attention.dense.weight": "pytorch_model-00001-of-00002.bin",
328
- "gpt_neox.layers.3.attention.masked_bias": "pytorch_model-00001-of-00002.bin",
329
  "gpt_neox.layers.3.attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin",
330
  "gpt_neox.layers.3.attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
331
  "gpt_neox.layers.3.attention.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
@@ -337,10 +293,8 @@
337
  "gpt_neox.layers.3.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin",
338
  "gpt_neox.layers.3.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
339
  "gpt_neox.layers.3.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
340
- "gpt_neox.layers.4.attention.bias": "pytorch_model-00001-of-00002.bin",
341
  "gpt_neox.layers.4.attention.dense.bias": "pytorch_model-00001-of-00002.bin",
342
  "gpt_neox.layers.4.attention.dense.weight": "pytorch_model-00001-of-00002.bin",
343
- "gpt_neox.layers.4.attention.masked_bias": "pytorch_model-00001-of-00002.bin",
344
  "gpt_neox.layers.4.attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin",
345
  "gpt_neox.layers.4.attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
346
  "gpt_neox.layers.4.attention.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
@@ -352,10 +306,8 @@
352
  "gpt_neox.layers.4.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin",
353
  "gpt_neox.layers.4.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
354
  "gpt_neox.layers.4.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
355
- "gpt_neox.layers.5.attention.bias": "pytorch_model-00001-of-00002.bin",
356
  "gpt_neox.layers.5.attention.dense.bias": "pytorch_model-00001-of-00002.bin",
357
  "gpt_neox.layers.5.attention.dense.weight": "pytorch_model-00001-of-00002.bin",
358
- "gpt_neox.layers.5.attention.masked_bias": "pytorch_model-00001-of-00002.bin",
359
  "gpt_neox.layers.5.attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin",
360
  "gpt_neox.layers.5.attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
361
  "gpt_neox.layers.5.attention.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
@@ -367,10 +319,8 @@
367
  "gpt_neox.layers.5.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin",
368
  "gpt_neox.layers.5.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
369
  "gpt_neox.layers.5.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
370
- "gpt_neox.layers.6.attention.bias": "pytorch_model-00001-of-00002.bin",
371
  "gpt_neox.layers.6.attention.dense.bias": "pytorch_model-00001-of-00002.bin",
372
  "gpt_neox.layers.6.attention.dense.weight": "pytorch_model-00001-of-00002.bin",
373
- "gpt_neox.layers.6.attention.masked_bias": "pytorch_model-00001-of-00002.bin",
374
  "gpt_neox.layers.6.attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin",
375
  "gpt_neox.layers.6.attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
376
  "gpt_neox.layers.6.attention.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
@@ -382,10 +332,8 @@
382
  "gpt_neox.layers.6.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin",
383
  "gpt_neox.layers.6.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
384
  "gpt_neox.layers.6.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
385
- "gpt_neox.layers.7.attention.bias": "pytorch_model-00001-of-00002.bin",
386
  "gpt_neox.layers.7.attention.dense.bias": "pytorch_model-00001-of-00002.bin",
387
  "gpt_neox.layers.7.attention.dense.weight": "pytorch_model-00001-of-00002.bin",
388
- "gpt_neox.layers.7.attention.masked_bias": "pytorch_model-00001-of-00002.bin",
389
  "gpt_neox.layers.7.attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin",
390
  "gpt_neox.layers.7.attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
391
  "gpt_neox.layers.7.attention.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
@@ -397,10 +345,8 @@
397
  "gpt_neox.layers.7.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin",
398
  "gpt_neox.layers.7.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
399
  "gpt_neox.layers.7.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
400
- "gpt_neox.layers.8.attention.bias": "pytorch_model-00001-of-00002.bin",
401
  "gpt_neox.layers.8.attention.dense.bias": "pytorch_model-00001-of-00002.bin",
402
  "gpt_neox.layers.8.attention.dense.weight": "pytorch_model-00001-of-00002.bin",
403
- "gpt_neox.layers.8.attention.masked_bias": "pytorch_model-00001-of-00002.bin",
404
  "gpt_neox.layers.8.attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin",
405
  "gpt_neox.layers.8.attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
406
  "gpt_neox.layers.8.attention.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
@@ -412,10 +358,8 @@
412
  "gpt_neox.layers.8.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin",
413
  "gpt_neox.layers.8.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
414
  "gpt_neox.layers.8.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
415
- "gpt_neox.layers.9.attention.bias": "pytorch_model-00001-of-00002.bin",
416
  "gpt_neox.layers.9.attention.dense.bias": "pytorch_model-00001-of-00002.bin",
417
  "gpt_neox.layers.9.attention.dense.weight": "pytorch_model-00001-of-00002.bin",
418
- "gpt_neox.layers.9.attention.masked_bias": "pytorch_model-00001-of-00002.bin",
419
  "gpt_neox.layers.9.attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin",
420
  "gpt_neox.layers.9.attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
421
  "gpt_neox.layers.9.attention.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
 
1
  {
2
  "metadata": {
3
+ "total_size": 11770121728
4
  },
5
  "weight_map": {
6
  "embed_out.weight": "pytorch_model-00002-of-00002.bin",
7
  "gpt_neox.embed_in.weight": "pytorch_model-00001-of-00002.bin",
8
  "gpt_neox.final_layer_norm.bias": "pytorch_model-00002-of-00002.bin",
9
  "gpt_neox.final_layer_norm.weight": "pytorch_model-00002-of-00002.bin",
 
10
  "gpt_neox.layers.0.attention.dense.bias": "pytorch_model-00001-of-00002.bin",
11
  "gpt_neox.layers.0.attention.dense.weight": "pytorch_model-00001-of-00002.bin",
 
12
  "gpt_neox.layers.0.attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin",
13
  "gpt_neox.layers.0.attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
14
  "gpt_neox.layers.0.attention.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
 
20
  "gpt_neox.layers.0.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin",
21
  "gpt_neox.layers.0.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
22
  "gpt_neox.layers.0.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
 
23
  "gpt_neox.layers.1.attention.dense.bias": "pytorch_model-00001-of-00002.bin",
24
  "gpt_neox.layers.1.attention.dense.weight": "pytorch_model-00001-of-00002.bin",
 
25
  "gpt_neox.layers.1.attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin",
26
  "gpt_neox.layers.1.attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
27
  "gpt_neox.layers.1.attention.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
 
33
  "gpt_neox.layers.1.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin",
34
  "gpt_neox.layers.1.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
35
  "gpt_neox.layers.1.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
 
36
  "gpt_neox.layers.10.attention.dense.bias": "pytorch_model-00001-of-00002.bin",
37
  "gpt_neox.layers.10.attention.dense.weight": "pytorch_model-00001-of-00002.bin",
 
38
  "gpt_neox.layers.10.attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin",
39
  "gpt_neox.layers.10.attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
40
  "gpt_neox.layers.10.attention.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
 
46
  "gpt_neox.layers.10.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin",
47
  "gpt_neox.layers.10.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
48
  "gpt_neox.layers.10.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
 
49
  "gpt_neox.layers.11.attention.dense.bias": "pytorch_model-00001-of-00002.bin",
50
  "gpt_neox.layers.11.attention.dense.weight": "pytorch_model-00001-of-00002.bin",
 
51
  "gpt_neox.layers.11.attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin",
52
  "gpt_neox.layers.11.attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
53
  "gpt_neox.layers.11.attention.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
 
59
  "gpt_neox.layers.11.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin",
60
  "gpt_neox.layers.11.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
61
  "gpt_neox.layers.11.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
 
62
  "gpt_neox.layers.12.attention.dense.bias": "pytorch_model-00001-of-00002.bin",
63
  "gpt_neox.layers.12.attention.dense.weight": "pytorch_model-00001-of-00002.bin",
 
64
  "gpt_neox.layers.12.attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin",
65
  "gpt_neox.layers.12.attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
66
  "gpt_neox.layers.12.attention.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
 
72
  "gpt_neox.layers.12.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin",
73
  "gpt_neox.layers.12.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
74
  "gpt_neox.layers.12.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
 
75
  "gpt_neox.layers.13.attention.dense.bias": "pytorch_model-00001-of-00002.bin",
76
  "gpt_neox.layers.13.attention.dense.weight": "pytorch_model-00001-of-00002.bin",
 
77
  "gpt_neox.layers.13.attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin",
78
  "gpt_neox.layers.13.attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
79
  "gpt_neox.layers.13.attention.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
 
85
  "gpt_neox.layers.13.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin",
86
  "gpt_neox.layers.13.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
87
  "gpt_neox.layers.13.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
 
88
  "gpt_neox.layers.14.attention.dense.bias": "pytorch_model-00001-of-00002.bin",
89
  "gpt_neox.layers.14.attention.dense.weight": "pytorch_model-00001-of-00002.bin",
 
90
  "gpt_neox.layers.14.attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin",
91
  "gpt_neox.layers.14.attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
92
  "gpt_neox.layers.14.attention.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
 
98
  "gpt_neox.layers.14.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin",
99
  "gpt_neox.layers.14.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
100
  "gpt_neox.layers.14.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
 
101
  "gpt_neox.layers.15.attention.dense.bias": "pytorch_model-00001-of-00002.bin",
102
  "gpt_neox.layers.15.attention.dense.weight": "pytorch_model-00001-of-00002.bin",
 
103
  "gpt_neox.layers.15.attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin",
104
  "gpt_neox.layers.15.attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
105
  "gpt_neox.layers.15.attention.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
 
111
  "gpt_neox.layers.15.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin",
112
  "gpt_neox.layers.15.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
113
  "gpt_neox.layers.15.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
 
114
  "gpt_neox.layers.16.attention.dense.bias": "pytorch_model-00001-of-00002.bin",
115
  "gpt_neox.layers.16.attention.dense.weight": "pytorch_model-00001-of-00002.bin",
 
116
  "gpt_neox.layers.16.attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin",
117
  "gpt_neox.layers.16.attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
118
  "gpt_neox.layers.16.attention.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
 
124
  "gpt_neox.layers.16.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin",
125
  "gpt_neox.layers.16.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
126
  "gpt_neox.layers.16.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
 
127
  "gpt_neox.layers.17.attention.dense.bias": "pytorch_model-00001-of-00002.bin",
128
  "gpt_neox.layers.17.attention.dense.weight": "pytorch_model-00001-of-00002.bin",
 
129
  "gpt_neox.layers.17.attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin",
130
  "gpt_neox.layers.17.attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
131
  "gpt_neox.layers.17.attention.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
 
137
  "gpt_neox.layers.17.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin",
138
  "gpt_neox.layers.17.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
139
  "gpt_neox.layers.17.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
 
140
  "gpt_neox.layers.18.attention.dense.bias": "pytorch_model-00001-of-00002.bin",
141
  "gpt_neox.layers.18.attention.dense.weight": "pytorch_model-00001-of-00002.bin",
 
142
  "gpt_neox.layers.18.attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin",
143
  "gpt_neox.layers.18.attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
144
  "gpt_neox.layers.18.attention.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
 
150
  "gpt_neox.layers.18.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin",
151
  "gpt_neox.layers.18.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
152
  "gpt_neox.layers.18.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
 
153
  "gpt_neox.layers.19.attention.dense.bias": "pytorch_model-00001-of-00002.bin",
154
  "gpt_neox.layers.19.attention.dense.weight": "pytorch_model-00001-of-00002.bin",
 
155
  "gpt_neox.layers.19.attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin",
156
  "gpt_neox.layers.19.attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
157
  "gpt_neox.layers.19.attention.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
 
163
  "gpt_neox.layers.19.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin",
164
  "gpt_neox.layers.19.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
165
  "gpt_neox.layers.19.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
 
166
  "gpt_neox.layers.2.attention.dense.bias": "pytorch_model-00001-of-00002.bin",
167
  "gpt_neox.layers.2.attention.dense.weight": "pytorch_model-00001-of-00002.bin",
 
168
  "gpt_neox.layers.2.attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin",
169
  "gpt_neox.layers.2.attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
170
  "gpt_neox.layers.2.attention.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
 
176
  "gpt_neox.layers.2.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin",
177
  "gpt_neox.layers.2.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
178
  "gpt_neox.layers.2.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
 
179
  "gpt_neox.layers.20.attention.dense.bias": "pytorch_model-00001-of-00002.bin",
180
  "gpt_neox.layers.20.attention.dense.weight": "pytorch_model-00001-of-00002.bin",
 
181
  "gpt_neox.layers.20.attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin",
182
  "gpt_neox.layers.20.attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
183
  "gpt_neox.layers.20.attention.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
 
189
  "gpt_neox.layers.20.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin",
190
  "gpt_neox.layers.20.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
191
  "gpt_neox.layers.20.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
 
192
  "gpt_neox.layers.21.attention.dense.bias": "pytorch_model-00001-of-00002.bin",
193
  "gpt_neox.layers.21.attention.dense.weight": "pytorch_model-00001-of-00002.bin",
 
194
  "gpt_neox.layers.21.attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin",
195
  "gpt_neox.layers.21.attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
196
  "gpt_neox.layers.21.attention.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
 
202
  "gpt_neox.layers.21.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin",
203
  "gpt_neox.layers.21.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
204
  "gpt_neox.layers.21.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
 
205
  "gpt_neox.layers.22.attention.dense.bias": "pytorch_model-00001-of-00002.bin",
206
  "gpt_neox.layers.22.attention.dense.weight": "pytorch_model-00001-of-00002.bin",
 
207
  "gpt_neox.layers.22.attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin",
208
  "gpt_neox.layers.22.attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
209
  "gpt_neox.layers.22.attention.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
 
215
  "gpt_neox.layers.22.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin",
216
  "gpt_neox.layers.22.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
217
  "gpt_neox.layers.22.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
 
218
  "gpt_neox.layers.23.attention.dense.bias": "pytorch_model-00001-of-00002.bin",
219
  "gpt_neox.layers.23.attention.dense.weight": "pytorch_model-00001-of-00002.bin",
 
220
  "gpt_neox.layers.23.attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin",
221
  "gpt_neox.layers.23.attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
222
  "gpt_neox.layers.23.attention.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
 
228
  "gpt_neox.layers.23.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin",
229
  "gpt_neox.layers.23.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
230
  "gpt_neox.layers.23.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
 
231
  "gpt_neox.layers.24.attention.dense.bias": "pytorch_model-00002-of-00002.bin",
232
  "gpt_neox.layers.24.attention.dense.weight": "pytorch_model-00002-of-00002.bin",
 
233
  "gpt_neox.layers.24.attention.query_key_value.bias": "pytorch_model-00002-of-00002.bin",
234
  "gpt_neox.layers.24.attention.query_key_value.weight": "pytorch_model-00002-of-00002.bin",
235
  "gpt_neox.layers.24.attention.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
 
241
  "gpt_neox.layers.24.mlp.dense_h_to_4h.weight": "pytorch_model-00002-of-00002.bin",
242
  "gpt_neox.layers.24.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
243
  "gpt_neox.layers.24.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
 
244
  "gpt_neox.layers.25.attention.dense.bias": "pytorch_model-00002-of-00002.bin",
245
  "gpt_neox.layers.25.attention.dense.weight": "pytorch_model-00002-of-00002.bin",
 
246
  "gpt_neox.layers.25.attention.query_key_value.bias": "pytorch_model-00002-of-00002.bin",
247
  "gpt_neox.layers.25.attention.query_key_value.weight": "pytorch_model-00002-of-00002.bin",
248
  "gpt_neox.layers.25.attention.rotary_emb.inv_freq": "pytorch_model-00002-of-00002.bin",
 
254
  "gpt_neox.layers.25.mlp.dense_h_to_4h.weight": "pytorch_model-00002-of-00002.bin",
255
  "gpt_neox.layers.25.post_attention_layernorm.bias": "pytorch_model-00002-of-00002.bin",
256
  "gpt_neox.layers.25.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin",
 
257
  "gpt_neox.layers.26.attention.dense.bias": "pytorch_model-00002-of-00002.bin",
258
  "gpt_neox.layers.26.attention.dense.weight": "pytorch_model-00002-of-00002.bin",
 
259
  "gpt_neox.layers.26.attention.query_key_value.bias": "pytorch_model-00002-of-00002.bin",
260
  "gpt_neox.layers.26.attention.query_key_value.weight": "pytorch_model-00002-of-00002.bin",
261
  "gpt_neox.layers.26.attention.rotary_emb.inv_freq": "pytorch_model-00002-of-00002.bin",
 
267
  "gpt_neox.layers.26.mlp.dense_h_to_4h.weight": "pytorch_model-00002-of-00002.bin",
268
  "gpt_neox.layers.26.post_attention_layernorm.bias": "pytorch_model-00002-of-00002.bin",
269
  "gpt_neox.layers.26.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin",
 
270
  "gpt_neox.layers.27.attention.dense.bias": "pytorch_model-00002-of-00002.bin",
271
  "gpt_neox.layers.27.attention.dense.weight": "pytorch_model-00002-of-00002.bin",
 
272
  "gpt_neox.layers.27.attention.query_key_value.bias": "pytorch_model-00002-of-00002.bin",
273
  "gpt_neox.layers.27.attention.query_key_value.weight": "pytorch_model-00002-of-00002.bin",
274
  "gpt_neox.layers.27.attention.rotary_emb.inv_freq": "pytorch_model-00002-of-00002.bin",
 
280
  "gpt_neox.layers.27.mlp.dense_h_to_4h.weight": "pytorch_model-00002-of-00002.bin",
281
  "gpt_neox.layers.27.post_attention_layernorm.bias": "pytorch_model-00002-of-00002.bin",
282
  "gpt_neox.layers.27.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin",
 
283
  "gpt_neox.layers.3.attention.dense.bias": "pytorch_model-00001-of-00002.bin",
284
  "gpt_neox.layers.3.attention.dense.weight": "pytorch_model-00001-of-00002.bin",
 
285
  "gpt_neox.layers.3.attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin",
286
  "gpt_neox.layers.3.attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
287
  "gpt_neox.layers.3.attention.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
 
293
  "gpt_neox.layers.3.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin",
294
  "gpt_neox.layers.3.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
295
  "gpt_neox.layers.3.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
 
296
  "gpt_neox.layers.4.attention.dense.bias": "pytorch_model-00001-of-00002.bin",
297
  "gpt_neox.layers.4.attention.dense.weight": "pytorch_model-00001-of-00002.bin",
 
298
  "gpt_neox.layers.4.attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin",
299
  "gpt_neox.layers.4.attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
300
  "gpt_neox.layers.4.attention.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
 
306
  "gpt_neox.layers.4.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin",
307
  "gpt_neox.layers.4.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
308
  "gpt_neox.layers.4.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
 
309
  "gpt_neox.layers.5.attention.dense.bias": "pytorch_model-00001-of-00002.bin",
310
  "gpt_neox.layers.5.attention.dense.weight": "pytorch_model-00001-of-00002.bin",
 
311
  "gpt_neox.layers.5.attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin",
312
  "gpt_neox.layers.5.attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
313
  "gpt_neox.layers.5.attention.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
 
319
  "gpt_neox.layers.5.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin",
320
  "gpt_neox.layers.5.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
321
  "gpt_neox.layers.5.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
 
322
  "gpt_neox.layers.6.attention.dense.bias": "pytorch_model-00001-of-00002.bin",
323
  "gpt_neox.layers.6.attention.dense.weight": "pytorch_model-00001-of-00002.bin",
 
324
  "gpt_neox.layers.6.attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin",
325
  "gpt_neox.layers.6.attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
326
  "gpt_neox.layers.6.attention.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
 
332
  "gpt_neox.layers.6.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin",
333
  "gpt_neox.layers.6.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
334
  "gpt_neox.layers.6.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
 
335
  "gpt_neox.layers.7.attention.dense.bias": "pytorch_model-00001-of-00002.bin",
336
  "gpt_neox.layers.7.attention.dense.weight": "pytorch_model-00001-of-00002.bin",
 
337
  "gpt_neox.layers.7.attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin",
338
  "gpt_neox.layers.7.attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
339
  "gpt_neox.layers.7.attention.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
 
345
  "gpt_neox.layers.7.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin",
346
  "gpt_neox.layers.7.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
347
  "gpt_neox.layers.7.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
 
348
  "gpt_neox.layers.8.attention.dense.bias": "pytorch_model-00001-of-00002.bin",
349
  "gpt_neox.layers.8.attention.dense.weight": "pytorch_model-00001-of-00002.bin",
 
350
  "gpt_neox.layers.8.attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin",
351
  "gpt_neox.layers.8.attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
352
  "gpt_neox.layers.8.attention.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
 
358
  "gpt_neox.layers.8.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00002.bin",
359
  "gpt_neox.layers.8.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
360
  "gpt_neox.layers.8.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
 
361
  "gpt_neox.layers.9.attention.dense.bias": "pytorch_model-00001-of-00002.bin",
362
  "gpt_neox.layers.9.attention.dense.weight": "pytorch_model-00001-of-00002.bin",
 
363
  "gpt_neox.layers.9.attention.query_key_value.bias": "pytorch_model-00001-of-00002.bin",
364
  "gpt_neox.layers.9.attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
365
  "gpt_neox.layers.9.attention.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",