atsuki-yamaguchi commited on
Commit
810c426
1 Parent(s): e60c43a

Upload folder using huggingface_hub

Browse files
README.md CHANGED
@@ -1,32 +1,21 @@
1
  ---
2
- license: mit
3
- language:
4
- - ar
5
  ---
6
- BLOOM-7B LAPT + CLP+ Arabic
7
- ===
8
 
9
- ## How to use
10
- ```python
11
- from peft import AutoPeftModelForCausalLM
12
- from transformers import AutoTokenizer
13
 
14
- model = AutoPeftModelForCausalLM.from_pretrained(
15
- "atsuki-yamaguchi/bloom-7b1-clpp-ar"
16
- )
17
- ```
 
 
 
 
 
 
 
 
18
 
19
- ## Citation
20
- ```
21
- @article{yamaguchi2024empirical,
22
- title={An Empirical Study on Cross-lingual Vocabulary Adaptation for Efficient Generative {LLM} Inference},
23
- author={Atsuki Yamaguchi and Aline Villavicencio and Nikolaos Aletras},
24
- journal={ArXiv},
25
- year={2024},
26
- volume={abs/2402.10712},
27
- url={https://arxiv.org/abs/2402.10712}
28
- }
29
- ```
30
 
31
- ## Link
32
- For more details, please visit https://github.com/gucci-j/llm-cva
 
1
  ---
2
+ library_name: peft
 
 
3
  ---
4
+ ## Training procedure
 
5
 
 
 
 
 
6
 
7
+ The following `bitsandbytes` quantization config was used during training:
8
+ - quant_method: bitsandbytes
9
+ - load_in_8bit: True
10
+ - load_in_4bit: False
11
+ - llm_int8_threshold: 6.0
12
+ - llm_int8_skip_modules: None
13
+ - llm_int8_enable_fp32_cpu_offload: False
14
+ - llm_int8_has_fp16_weight: False
15
+ - bnb_4bit_quant_type: fp4
16
+ - bnb_4bit_use_double_quant: False
17
+ - bnb_4bit_compute_dtype: float32
18
+ ### Framework versions
19
 
 
 
 
 
 
 
 
 
 
 
 
20
 
21
+ - PEFT 0.5.0
 
adapter_config.json CHANGED
@@ -1,6 +1,6 @@
1
  {
2
  "auto_mapping": null,
3
- "base_model_name_or_path": "atsuki-yamaguchi/bloom-7b1-clpp-ar",
4
  "bias": "none",
5
  "fan_in_fan_out": false,
6
  "inference_mode": true,
 
1
  {
2
  "auto_mapping": null,
3
+ "base_model_name_or_path": "/mnt/parscratch/users/acp23ay/private/models/bloom-7b1-ar-clp-plus",
4
  "bias": "none",
5
  "fan_in_fan_out": false,
6
  "inference_mode": true,
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "/mnt/parscratch/users/acp23ay/private/models/bloom-7b1-ar-clp-plus",
3
  "apply_residual_connection_post_layernorm": false,
4
  "architectures": [
5
  "BloomForCausalLM"
@@ -24,7 +24,7 @@
24
  "skip_bias_add": true,
25
  "skip_bias_add_qkv": false,
26
  "slow_but_exact": false,
27
- "torch_dtype": "float32",
28
  "transformers_version": "4.35.0.dev0",
29
  "unk_token_id": 0,
30
  "use_cache": true,
 
1
  {
2
+ "_name_or_path": "bigscience/bloom-7b1",
3
  "apply_residual_connection_post_layernorm": false,
4
  "architectures": [
5
  "BloomForCausalLM"
 
24
  "skip_bias_add": true,
25
  "skip_bias_add_qkv": false,
26
  "slow_but_exact": false,
27
+ "torch_dtype": "float64",
28
  "transformers_version": "4.35.0.dev0",
29
  "unk_token_id": 0,
30
  "use_cache": true,
model-00001-of-00006.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:afa6a3dca5410b76a9af930f868238c4d7e9b8aeb0eaf04c57e068c84f778ba8
3
- size 4807760880
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fb23abb22ef9d94a6043ce1d07105637f7af47e316e8e5ff3fab858dcf7796e1
3
+ size 4782314936
model-00002-of-00006.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:70d129fe531c1cf536393f62b08f0a6f7ced656a3e275d5adfad796e17bd804a
3
- size 4833124616
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f36d105ab7647131ac8737c9463748718f35911253aaf54e542aba5dfb11db70
3
+ size 4833124632
model-00003-of-00006.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3119cead87c42bc1c9929e61756aa0203c6417dd5b3a12cf37aa2b41182030cc
3
- size 4833124696
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d61d54d82c6c2b0d4d98cdcae29857920c633b971d68fdbbfdceb0f405fb3ade
3
+ size 4833124672
model-00004-of-00006.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a9c42f65ecc0bde5afe6902d3d673a7550c7b5caf09be4be4393cae437cf0160
3
- size 4833124696
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c71fe9f171c7c2cb51bbd9a8f375620e7af9d41e87c3c480ef27db822c655147
3
+ size 4833124704
model-00005-of-00006.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f02ab42d6c8f666bd71ff1795806632fbcc0819c494af457bc451e17ec8e2a7a
3
- size 4833124696
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:35161ddbc901b254899cdc7ec9cb875adf11ac0e57894ef96017480852c086ca
3
+ size 4833124704
model-00006-of-00006.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3e75478c5ffd8de11b259dbaab1415808ac19f5b86b5645726208237ab4afbb4
3
- size 1074005816
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:03b802a54279c7dca50ca82e6b9a85069822a8b12e4dc7e0a429ef23ce37e162
3
+ size 2148027832
model.safetensors.index.json CHANGED
@@ -1,6 +1,6 @@
1
  {
2
  "metadata": {
3
- "total_size": 25214222336
4
  },
5
  "weight_map": {
6
  "transformer.h.0.input_layernorm.bias": "model-00001-of-00006.safetensors",
@@ -27,18 +27,18 @@
27
  "transformer.h.1.self_attention.dense.weight": "model-00001-of-00006.safetensors",
28
  "transformer.h.1.self_attention.query_key_value.bias": "model-00001-of-00006.safetensors",
29
  "transformer.h.1.self_attention.query_key_value.weight": "model-00001-of-00006.safetensors",
30
- "transformer.h.10.input_layernorm.bias": "model-00002-of-00006.safetensors",
31
- "transformer.h.10.input_layernorm.weight": "model-00002-of-00006.safetensors",
32
  "transformer.h.10.mlp.dense_4h_to_h.bias": "model-00003-of-00006.safetensors",
33
  "transformer.h.10.mlp.dense_4h_to_h.weight": "model-00003-of-00006.safetensors",
34
- "transformer.h.10.mlp.dense_h_to_4h.bias": "model-00002-of-00006.safetensors",
35
- "transformer.h.10.mlp.dense_h_to_4h.weight": "model-00002-of-00006.safetensors",
36
- "transformer.h.10.post_attention_layernorm.bias": "model-00002-of-00006.safetensors",
37
- "transformer.h.10.post_attention_layernorm.weight": "model-00002-of-00006.safetensors",
38
- "transformer.h.10.self_attention.dense.bias": "model-00002-of-00006.safetensors",
39
- "transformer.h.10.self_attention.dense.weight": "model-00002-of-00006.safetensors",
40
- "transformer.h.10.self_attention.query_key_value.bias": "model-00002-of-00006.safetensors",
41
- "transformer.h.10.self_attention.query_key_value.weight": "model-00002-of-00006.safetensors",
42
  "transformer.h.11.input_layernorm.bias": "model-00003-of-00006.safetensors",
43
  "transformer.h.11.input_layernorm.weight": "model-00003-of-00006.safetensors",
44
  "transformer.h.11.mlp.dense_4h_to_h.bias": "model-00003-of-00006.safetensors",
@@ -89,28 +89,28 @@
89
  "transformer.h.14.self_attention.query_key_value.weight": "model-00003-of-00006.safetensors",
90
  "transformer.h.15.input_layernorm.bias": "model-00003-of-00006.safetensors",
91
  "transformer.h.15.input_layernorm.weight": "model-00003-of-00006.safetensors",
92
- "transformer.h.15.mlp.dense_4h_to_h.bias": "model-00003-of-00006.safetensors",
93
- "transformer.h.15.mlp.dense_4h_to_h.weight": "model-00003-of-00006.safetensors",
94
- "transformer.h.15.mlp.dense_h_to_4h.bias": "model-00003-of-00006.safetensors",
95
- "transformer.h.15.mlp.dense_h_to_4h.weight": "model-00003-of-00006.safetensors",
96
  "transformer.h.15.post_attention_layernorm.bias": "model-00003-of-00006.safetensors",
97
  "transformer.h.15.post_attention_layernorm.weight": "model-00003-of-00006.safetensors",
98
  "transformer.h.15.self_attention.dense.bias": "model-00003-of-00006.safetensors",
99
  "transformer.h.15.self_attention.dense.weight": "model-00003-of-00006.safetensors",
100
  "transformer.h.15.self_attention.query_key_value.bias": "model-00003-of-00006.safetensors",
101
  "transformer.h.15.self_attention.query_key_value.weight": "model-00003-of-00006.safetensors",
102
- "transformer.h.16.input_layernorm.bias": "model-00003-of-00006.safetensors",
103
- "transformer.h.16.input_layernorm.weight": "model-00003-of-00006.safetensors",
104
  "transformer.h.16.mlp.dense_4h_to_h.bias": "model-00004-of-00006.safetensors",
105
  "transformer.h.16.mlp.dense_4h_to_h.weight": "model-00004-of-00006.safetensors",
106
- "transformer.h.16.mlp.dense_h_to_4h.bias": "model-00003-of-00006.safetensors",
107
- "transformer.h.16.mlp.dense_h_to_4h.weight": "model-00003-of-00006.safetensors",
108
- "transformer.h.16.post_attention_layernorm.bias": "model-00003-of-00006.safetensors",
109
- "transformer.h.16.post_attention_layernorm.weight": "model-00003-of-00006.safetensors",
110
- "transformer.h.16.self_attention.dense.bias": "model-00003-of-00006.safetensors",
111
- "transformer.h.16.self_attention.dense.weight": "model-00003-of-00006.safetensors",
112
- "transformer.h.16.self_attention.query_key_value.bias": "model-00003-of-00006.safetensors",
113
- "transformer.h.16.self_attention.query_key_value.weight": "model-00003-of-00006.safetensors",
114
  "transformer.h.17.input_layernorm.bias": "model-00004-of-00006.safetensors",
115
  "transformer.h.17.input_layernorm.weight": "model-00004-of-00006.safetensors",
116
  "transformer.h.17.mlp.dense_4h_to_h.bias": "model-00004-of-00006.safetensors",
@@ -173,28 +173,28 @@
173
  "transformer.h.20.self_attention.query_key_value.weight": "model-00004-of-00006.safetensors",
174
  "transformer.h.21.input_layernorm.bias": "model-00004-of-00006.safetensors",
175
  "transformer.h.21.input_layernorm.weight": "model-00004-of-00006.safetensors",
176
- "transformer.h.21.mlp.dense_4h_to_h.bias": "model-00004-of-00006.safetensors",
177
- "transformer.h.21.mlp.dense_4h_to_h.weight": "model-00004-of-00006.safetensors",
178
- "transformer.h.21.mlp.dense_h_to_4h.bias": "model-00004-of-00006.safetensors",
179
- "transformer.h.21.mlp.dense_h_to_4h.weight": "model-00004-of-00006.safetensors",
180
  "transformer.h.21.post_attention_layernorm.bias": "model-00004-of-00006.safetensors",
181
  "transformer.h.21.post_attention_layernorm.weight": "model-00004-of-00006.safetensors",
182
  "transformer.h.21.self_attention.dense.bias": "model-00004-of-00006.safetensors",
183
  "transformer.h.21.self_attention.dense.weight": "model-00004-of-00006.safetensors",
184
  "transformer.h.21.self_attention.query_key_value.bias": "model-00004-of-00006.safetensors",
185
  "transformer.h.21.self_attention.query_key_value.weight": "model-00004-of-00006.safetensors",
186
- "transformer.h.22.input_layernorm.bias": "model-00004-of-00006.safetensors",
187
- "transformer.h.22.input_layernorm.weight": "model-00004-of-00006.safetensors",
188
  "transformer.h.22.mlp.dense_4h_to_h.bias": "model-00005-of-00006.safetensors",
189
  "transformer.h.22.mlp.dense_4h_to_h.weight": "model-00005-of-00006.safetensors",
190
- "transformer.h.22.mlp.dense_h_to_4h.bias": "model-00004-of-00006.safetensors",
191
- "transformer.h.22.mlp.dense_h_to_4h.weight": "model-00004-of-00006.safetensors",
192
- "transformer.h.22.post_attention_layernorm.bias": "model-00004-of-00006.safetensors",
193
- "transformer.h.22.post_attention_layernorm.weight": "model-00004-of-00006.safetensors",
194
- "transformer.h.22.self_attention.dense.bias": "model-00004-of-00006.safetensors",
195
- "transformer.h.22.self_attention.dense.weight": "model-00004-of-00006.safetensors",
196
- "transformer.h.22.self_attention.query_key_value.bias": "model-00004-of-00006.safetensors",
197
- "transformer.h.22.self_attention.query_key_value.weight": "model-00004-of-00006.safetensors",
198
  "transformer.h.23.input_layernorm.bias": "model-00005-of-00006.safetensors",
199
  "transformer.h.23.input_layernorm.weight": "model-00005-of-00006.safetensors",
200
  "transformer.h.23.mlp.dense_4h_to_h.bias": "model-00005-of-00006.safetensors",
@@ -245,28 +245,28 @@
245
  "transformer.h.26.self_attention.query_key_value.weight": "model-00005-of-00006.safetensors",
246
  "transformer.h.27.input_layernorm.bias": "model-00005-of-00006.safetensors",
247
  "transformer.h.27.input_layernorm.weight": "model-00005-of-00006.safetensors",
248
- "transformer.h.27.mlp.dense_4h_to_h.bias": "model-00005-of-00006.safetensors",
249
- "transformer.h.27.mlp.dense_4h_to_h.weight": "model-00005-of-00006.safetensors",
250
- "transformer.h.27.mlp.dense_h_to_4h.bias": "model-00005-of-00006.safetensors",
251
- "transformer.h.27.mlp.dense_h_to_4h.weight": "model-00005-of-00006.safetensors",
252
  "transformer.h.27.post_attention_layernorm.bias": "model-00005-of-00006.safetensors",
253
  "transformer.h.27.post_attention_layernorm.weight": "model-00005-of-00006.safetensors",
254
  "transformer.h.27.self_attention.dense.bias": "model-00005-of-00006.safetensors",
255
  "transformer.h.27.self_attention.dense.weight": "model-00005-of-00006.safetensors",
256
  "transformer.h.27.self_attention.query_key_value.bias": "model-00005-of-00006.safetensors",
257
  "transformer.h.27.self_attention.query_key_value.weight": "model-00005-of-00006.safetensors",
258
- "transformer.h.28.input_layernorm.bias": "model-00005-of-00006.safetensors",
259
- "transformer.h.28.input_layernorm.weight": "model-00005-of-00006.safetensors",
260
  "transformer.h.28.mlp.dense_4h_to_h.bias": "model-00006-of-00006.safetensors",
261
  "transformer.h.28.mlp.dense_4h_to_h.weight": "model-00006-of-00006.safetensors",
262
- "transformer.h.28.mlp.dense_h_to_4h.bias": "model-00005-of-00006.safetensors",
263
- "transformer.h.28.mlp.dense_h_to_4h.weight": "model-00005-of-00006.safetensors",
264
- "transformer.h.28.post_attention_layernorm.bias": "model-00005-of-00006.safetensors",
265
- "transformer.h.28.post_attention_layernorm.weight": "model-00005-of-00006.safetensors",
266
- "transformer.h.28.self_attention.dense.bias": "model-00005-of-00006.safetensors",
267
- "transformer.h.28.self_attention.dense.weight": "model-00005-of-00006.safetensors",
268
- "transformer.h.28.self_attention.query_key_value.bias": "model-00005-of-00006.safetensors",
269
- "transformer.h.28.self_attention.query_key_value.weight": "model-00005-of-00006.safetensors",
270
  "transformer.h.29.input_layernorm.bias": "model-00006-of-00006.safetensors",
271
  "transformer.h.29.input_layernorm.weight": "model-00006-of-00006.safetensors",
272
  "transformer.h.29.mlp.dense_4h_to_h.bias": "model-00006-of-00006.safetensors",
@@ -281,28 +281,28 @@
281
  "transformer.h.29.self_attention.query_key_value.weight": "model-00006-of-00006.safetensors",
282
  "transformer.h.3.input_layernorm.bias": "model-00001-of-00006.safetensors",
283
  "transformer.h.3.input_layernorm.weight": "model-00001-of-00006.safetensors",
284
- "transformer.h.3.mlp.dense_4h_to_h.bias": "model-00001-of-00006.safetensors",
285
- "transformer.h.3.mlp.dense_4h_to_h.weight": "model-00001-of-00006.safetensors",
286
- "transformer.h.3.mlp.dense_h_to_4h.bias": "model-00001-of-00006.safetensors",
287
- "transformer.h.3.mlp.dense_h_to_4h.weight": "model-00001-of-00006.safetensors",
288
  "transformer.h.3.post_attention_layernorm.bias": "model-00001-of-00006.safetensors",
289
  "transformer.h.3.post_attention_layernorm.weight": "model-00001-of-00006.safetensors",
290
  "transformer.h.3.self_attention.dense.bias": "model-00001-of-00006.safetensors",
291
  "transformer.h.3.self_attention.dense.weight": "model-00001-of-00006.safetensors",
292
  "transformer.h.3.self_attention.query_key_value.bias": "model-00001-of-00006.safetensors",
293
  "transformer.h.3.self_attention.query_key_value.weight": "model-00001-of-00006.safetensors",
294
- "transformer.h.4.input_layernorm.bias": "model-00001-of-00006.safetensors",
295
- "transformer.h.4.input_layernorm.weight": "model-00001-of-00006.safetensors",
296
  "transformer.h.4.mlp.dense_4h_to_h.bias": "model-00002-of-00006.safetensors",
297
  "transformer.h.4.mlp.dense_4h_to_h.weight": "model-00002-of-00006.safetensors",
298
- "transformer.h.4.mlp.dense_h_to_4h.bias": "model-00001-of-00006.safetensors",
299
- "transformer.h.4.mlp.dense_h_to_4h.weight": "model-00001-of-00006.safetensors",
300
- "transformer.h.4.post_attention_layernorm.bias": "model-00001-of-00006.safetensors",
301
- "transformer.h.4.post_attention_layernorm.weight": "model-00001-of-00006.safetensors",
302
- "transformer.h.4.self_attention.dense.bias": "model-00001-of-00006.safetensors",
303
- "transformer.h.4.self_attention.dense.weight": "model-00001-of-00006.safetensors",
304
- "transformer.h.4.self_attention.query_key_value.bias": "model-00001-of-00006.safetensors",
305
- "transformer.h.4.self_attention.query_key_value.weight": "model-00001-of-00006.safetensors",
306
  "transformer.h.5.input_layernorm.bias": "model-00002-of-00006.safetensors",
307
  "transformer.h.5.input_layernorm.weight": "model-00002-of-00006.safetensors",
308
  "transformer.h.5.mlp.dense_4h_to_h.bias": "model-00002-of-00006.safetensors",
@@ -353,10 +353,10 @@
353
  "transformer.h.8.self_attention.query_key_value.weight": "model-00002-of-00006.safetensors",
354
  "transformer.h.9.input_layernorm.bias": "model-00002-of-00006.safetensors",
355
  "transformer.h.9.input_layernorm.weight": "model-00002-of-00006.safetensors",
356
- "transformer.h.9.mlp.dense_4h_to_h.bias": "model-00002-of-00006.safetensors",
357
- "transformer.h.9.mlp.dense_4h_to_h.weight": "model-00002-of-00006.safetensors",
358
- "transformer.h.9.mlp.dense_h_to_4h.bias": "model-00002-of-00006.safetensors",
359
- "transformer.h.9.mlp.dense_h_to_4h.weight": "model-00002-of-00006.safetensors",
360
  "transformer.h.9.post_attention_layernorm.bias": "model-00002-of-00006.safetensors",
361
  "transformer.h.9.post_attention_layernorm.weight": "model-00002-of-00006.safetensors",
362
  "transformer.h.9.self_attention.dense.bias": "model-00002-of-00006.safetensors",
 
1
  {
2
  "metadata": {
3
+ "total_size": 26262798336
4
  },
5
  "weight_map": {
6
  "transformer.h.0.input_layernorm.bias": "model-00001-of-00006.safetensors",
 
27
  "transformer.h.1.self_attention.dense.weight": "model-00001-of-00006.safetensors",
28
  "transformer.h.1.self_attention.query_key_value.bias": "model-00001-of-00006.safetensors",
29
  "transformer.h.1.self_attention.query_key_value.weight": "model-00001-of-00006.safetensors",
30
+ "transformer.h.10.input_layernorm.bias": "model-00003-of-00006.safetensors",
31
+ "transformer.h.10.input_layernorm.weight": "model-00003-of-00006.safetensors",
32
  "transformer.h.10.mlp.dense_4h_to_h.bias": "model-00003-of-00006.safetensors",
33
  "transformer.h.10.mlp.dense_4h_to_h.weight": "model-00003-of-00006.safetensors",
34
+ "transformer.h.10.mlp.dense_h_to_4h.bias": "model-00003-of-00006.safetensors",
35
+ "transformer.h.10.mlp.dense_h_to_4h.weight": "model-00003-of-00006.safetensors",
36
+ "transformer.h.10.post_attention_layernorm.bias": "model-00003-of-00006.safetensors",
37
+ "transformer.h.10.post_attention_layernorm.weight": "model-00003-of-00006.safetensors",
38
+ "transformer.h.10.self_attention.dense.bias": "model-00003-of-00006.safetensors",
39
+ "transformer.h.10.self_attention.dense.weight": "model-00003-of-00006.safetensors",
40
+ "transformer.h.10.self_attention.query_key_value.bias": "model-00003-of-00006.safetensors",
41
+ "transformer.h.10.self_attention.query_key_value.weight": "model-00003-of-00006.safetensors",
42
  "transformer.h.11.input_layernorm.bias": "model-00003-of-00006.safetensors",
43
  "transformer.h.11.input_layernorm.weight": "model-00003-of-00006.safetensors",
44
  "transformer.h.11.mlp.dense_4h_to_h.bias": "model-00003-of-00006.safetensors",
 
89
  "transformer.h.14.self_attention.query_key_value.weight": "model-00003-of-00006.safetensors",
90
  "transformer.h.15.input_layernorm.bias": "model-00003-of-00006.safetensors",
91
  "transformer.h.15.input_layernorm.weight": "model-00003-of-00006.safetensors",
92
+ "transformer.h.15.mlp.dense_4h_to_h.bias": "model-00004-of-00006.safetensors",
93
+ "transformer.h.15.mlp.dense_4h_to_h.weight": "model-00004-of-00006.safetensors",
94
+ "transformer.h.15.mlp.dense_h_to_4h.bias": "model-00004-of-00006.safetensors",
95
+ "transformer.h.15.mlp.dense_h_to_4h.weight": "model-00004-of-00006.safetensors",
96
  "transformer.h.15.post_attention_layernorm.bias": "model-00003-of-00006.safetensors",
97
  "transformer.h.15.post_attention_layernorm.weight": "model-00003-of-00006.safetensors",
98
  "transformer.h.15.self_attention.dense.bias": "model-00003-of-00006.safetensors",
99
  "transformer.h.15.self_attention.dense.weight": "model-00003-of-00006.safetensors",
100
  "transformer.h.15.self_attention.query_key_value.bias": "model-00003-of-00006.safetensors",
101
  "transformer.h.15.self_attention.query_key_value.weight": "model-00003-of-00006.safetensors",
102
+ "transformer.h.16.input_layernorm.bias": "model-00004-of-00006.safetensors",
103
+ "transformer.h.16.input_layernorm.weight": "model-00004-of-00006.safetensors",
104
  "transformer.h.16.mlp.dense_4h_to_h.bias": "model-00004-of-00006.safetensors",
105
  "transformer.h.16.mlp.dense_4h_to_h.weight": "model-00004-of-00006.safetensors",
106
+ "transformer.h.16.mlp.dense_h_to_4h.bias": "model-00004-of-00006.safetensors",
107
+ "transformer.h.16.mlp.dense_h_to_4h.weight": "model-00004-of-00006.safetensors",
108
+ "transformer.h.16.post_attention_layernorm.bias": "model-00004-of-00006.safetensors",
109
+ "transformer.h.16.post_attention_layernorm.weight": "model-00004-of-00006.safetensors",
110
+ "transformer.h.16.self_attention.dense.bias": "model-00004-of-00006.safetensors",
111
+ "transformer.h.16.self_attention.dense.weight": "model-00004-of-00006.safetensors",
112
+ "transformer.h.16.self_attention.query_key_value.bias": "model-00004-of-00006.safetensors",
113
+ "transformer.h.16.self_attention.query_key_value.weight": "model-00004-of-00006.safetensors",
114
  "transformer.h.17.input_layernorm.bias": "model-00004-of-00006.safetensors",
115
  "transformer.h.17.input_layernorm.weight": "model-00004-of-00006.safetensors",
116
  "transformer.h.17.mlp.dense_4h_to_h.bias": "model-00004-of-00006.safetensors",
 
173
  "transformer.h.20.self_attention.query_key_value.weight": "model-00004-of-00006.safetensors",
174
  "transformer.h.21.input_layernorm.bias": "model-00004-of-00006.safetensors",
175
  "transformer.h.21.input_layernorm.weight": "model-00004-of-00006.safetensors",
176
+ "transformer.h.21.mlp.dense_4h_to_h.bias": "model-00005-of-00006.safetensors",
177
+ "transformer.h.21.mlp.dense_4h_to_h.weight": "model-00005-of-00006.safetensors",
178
+ "transformer.h.21.mlp.dense_h_to_4h.bias": "model-00005-of-00006.safetensors",
179
+ "transformer.h.21.mlp.dense_h_to_4h.weight": "model-00005-of-00006.safetensors",
180
  "transformer.h.21.post_attention_layernorm.bias": "model-00004-of-00006.safetensors",
181
  "transformer.h.21.post_attention_layernorm.weight": "model-00004-of-00006.safetensors",
182
  "transformer.h.21.self_attention.dense.bias": "model-00004-of-00006.safetensors",
183
  "transformer.h.21.self_attention.dense.weight": "model-00004-of-00006.safetensors",
184
  "transformer.h.21.self_attention.query_key_value.bias": "model-00004-of-00006.safetensors",
185
  "transformer.h.21.self_attention.query_key_value.weight": "model-00004-of-00006.safetensors",
186
+ "transformer.h.22.input_layernorm.bias": "model-00005-of-00006.safetensors",
187
+ "transformer.h.22.input_layernorm.weight": "model-00005-of-00006.safetensors",
188
  "transformer.h.22.mlp.dense_4h_to_h.bias": "model-00005-of-00006.safetensors",
189
  "transformer.h.22.mlp.dense_4h_to_h.weight": "model-00005-of-00006.safetensors",
190
+ "transformer.h.22.mlp.dense_h_to_4h.bias": "model-00005-of-00006.safetensors",
191
+ "transformer.h.22.mlp.dense_h_to_4h.weight": "model-00005-of-00006.safetensors",
192
+ "transformer.h.22.post_attention_layernorm.bias": "model-00005-of-00006.safetensors",
193
+ "transformer.h.22.post_attention_layernorm.weight": "model-00005-of-00006.safetensors",
194
+ "transformer.h.22.self_attention.dense.bias": "model-00005-of-00006.safetensors",
195
+ "transformer.h.22.self_attention.dense.weight": "model-00005-of-00006.safetensors",
196
+ "transformer.h.22.self_attention.query_key_value.bias": "model-00005-of-00006.safetensors",
197
+ "transformer.h.22.self_attention.query_key_value.weight": "model-00005-of-00006.safetensors",
198
  "transformer.h.23.input_layernorm.bias": "model-00005-of-00006.safetensors",
199
  "transformer.h.23.input_layernorm.weight": "model-00005-of-00006.safetensors",
200
  "transformer.h.23.mlp.dense_4h_to_h.bias": "model-00005-of-00006.safetensors",
 
245
  "transformer.h.26.self_attention.query_key_value.weight": "model-00005-of-00006.safetensors",
246
  "transformer.h.27.input_layernorm.bias": "model-00005-of-00006.safetensors",
247
  "transformer.h.27.input_layernorm.weight": "model-00005-of-00006.safetensors",
248
+ "transformer.h.27.mlp.dense_4h_to_h.bias": "model-00006-of-00006.safetensors",
249
+ "transformer.h.27.mlp.dense_4h_to_h.weight": "model-00006-of-00006.safetensors",
250
+ "transformer.h.27.mlp.dense_h_to_4h.bias": "model-00006-of-00006.safetensors",
251
+ "transformer.h.27.mlp.dense_h_to_4h.weight": "model-00006-of-00006.safetensors",
252
  "transformer.h.27.post_attention_layernorm.bias": "model-00005-of-00006.safetensors",
253
  "transformer.h.27.post_attention_layernorm.weight": "model-00005-of-00006.safetensors",
254
  "transformer.h.27.self_attention.dense.bias": "model-00005-of-00006.safetensors",
255
  "transformer.h.27.self_attention.dense.weight": "model-00005-of-00006.safetensors",
256
  "transformer.h.27.self_attention.query_key_value.bias": "model-00005-of-00006.safetensors",
257
  "transformer.h.27.self_attention.query_key_value.weight": "model-00005-of-00006.safetensors",
258
+ "transformer.h.28.input_layernorm.bias": "model-00006-of-00006.safetensors",
259
+ "transformer.h.28.input_layernorm.weight": "model-00006-of-00006.safetensors",
260
  "transformer.h.28.mlp.dense_4h_to_h.bias": "model-00006-of-00006.safetensors",
261
  "transformer.h.28.mlp.dense_4h_to_h.weight": "model-00006-of-00006.safetensors",
262
+ "transformer.h.28.mlp.dense_h_to_4h.bias": "model-00006-of-00006.safetensors",
263
+ "transformer.h.28.mlp.dense_h_to_4h.weight": "model-00006-of-00006.safetensors",
264
+ "transformer.h.28.post_attention_layernorm.bias": "model-00006-of-00006.safetensors",
265
+ "transformer.h.28.post_attention_layernorm.weight": "model-00006-of-00006.safetensors",
266
+ "transformer.h.28.self_attention.dense.bias": "model-00006-of-00006.safetensors",
267
+ "transformer.h.28.self_attention.dense.weight": "model-00006-of-00006.safetensors",
268
+ "transformer.h.28.self_attention.query_key_value.bias": "model-00006-of-00006.safetensors",
269
+ "transformer.h.28.self_attention.query_key_value.weight": "model-00006-of-00006.safetensors",
270
  "transformer.h.29.input_layernorm.bias": "model-00006-of-00006.safetensors",
271
  "transformer.h.29.input_layernorm.weight": "model-00006-of-00006.safetensors",
272
  "transformer.h.29.mlp.dense_4h_to_h.bias": "model-00006-of-00006.safetensors",
 
281
  "transformer.h.29.self_attention.query_key_value.weight": "model-00006-of-00006.safetensors",
282
  "transformer.h.3.input_layernorm.bias": "model-00001-of-00006.safetensors",
283
  "transformer.h.3.input_layernorm.weight": "model-00001-of-00006.safetensors",
284
+ "transformer.h.3.mlp.dense_4h_to_h.bias": "model-00002-of-00006.safetensors",
285
+ "transformer.h.3.mlp.dense_4h_to_h.weight": "model-00002-of-00006.safetensors",
286
+ "transformer.h.3.mlp.dense_h_to_4h.bias": "model-00002-of-00006.safetensors",
287
+ "transformer.h.3.mlp.dense_h_to_4h.weight": "model-00002-of-00006.safetensors",
288
  "transformer.h.3.post_attention_layernorm.bias": "model-00001-of-00006.safetensors",
289
  "transformer.h.3.post_attention_layernorm.weight": "model-00001-of-00006.safetensors",
290
  "transformer.h.3.self_attention.dense.bias": "model-00001-of-00006.safetensors",
291
  "transformer.h.3.self_attention.dense.weight": "model-00001-of-00006.safetensors",
292
  "transformer.h.3.self_attention.query_key_value.bias": "model-00001-of-00006.safetensors",
293
  "transformer.h.3.self_attention.query_key_value.weight": "model-00001-of-00006.safetensors",
294
+ "transformer.h.4.input_layernorm.bias": "model-00002-of-00006.safetensors",
295
+ "transformer.h.4.input_layernorm.weight": "model-00002-of-00006.safetensors",
296
  "transformer.h.4.mlp.dense_4h_to_h.bias": "model-00002-of-00006.safetensors",
297
  "transformer.h.4.mlp.dense_4h_to_h.weight": "model-00002-of-00006.safetensors",
298
+ "transformer.h.4.mlp.dense_h_to_4h.bias": "model-00002-of-00006.safetensors",
299
+ "transformer.h.4.mlp.dense_h_to_4h.weight": "model-00002-of-00006.safetensors",
300
+ "transformer.h.4.post_attention_layernorm.bias": "model-00002-of-00006.safetensors",
301
+ "transformer.h.4.post_attention_layernorm.weight": "model-00002-of-00006.safetensors",
302
+ "transformer.h.4.self_attention.dense.bias": "model-00002-of-00006.safetensors",
303
+ "transformer.h.4.self_attention.dense.weight": "model-00002-of-00006.safetensors",
304
+ "transformer.h.4.self_attention.query_key_value.bias": "model-00002-of-00006.safetensors",
305
+ "transformer.h.4.self_attention.query_key_value.weight": "model-00002-of-00006.safetensors",
306
  "transformer.h.5.input_layernorm.bias": "model-00002-of-00006.safetensors",
307
  "transformer.h.5.input_layernorm.weight": "model-00002-of-00006.safetensors",
308
  "transformer.h.5.mlp.dense_4h_to_h.bias": "model-00002-of-00006.safetensors",
 
353
  "transformer.h.8.self_attention.query_key_value.weight": "model-00002-of-00006.safetensors",
354
  "transformer.h.9.input_layernorm.bias": "model-00002-of-00006.safetensors",
355
  "transformer.h.9.input_layernorm.weight": "model-00002-of-00006.safetensors",
356
+ "transformer.h.9.mlp.dense_4h_to_h.bias": "model-00003-of-00006.safetensors",
357
+ "transformer.h.9.mlp.dense_4h_to_h.weight": "model-00003-of-00006.safetensors",
358
+ "transformer.h.9.mlp.dense_h_to_4h.bias": "model-00003-of-00006.safetensors",
359
+ "transformer.h.9.mlp.dense_h_to_4h.weight": "model-00003-of-00006.safetensors",
360
  "transformer.h.9.post_attention_layernorm.bias": "model-00002-of-00006.safetensors",
361
  "transformer.h.9.post_attention_layernorm.weight": "model-00002-of-00006.safetensors",
362
  "transformer.h.9.self_attention.dense.bias": "model-00002-of-00006.safetensors",
optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cc47222e64e3609939657b9b0d573253c83e79eaddd9c8a63053600b75addabe
3
+ size 1082426362
rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c1f25c7a6aa4f56a4d283994878acdc2057c1cc32d632535279aaa055c1b8131
3
+ size 14244
scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9b21283c3db24950204115ac348f58e53c6d0488c1b3816d199a477f9c0931a6
3
+ size 1064
special_tokens_map.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<|endoftext|>",
3
+ "eos_token": "<|endoftext|>",
4
+ "unk_token": "<|endoftext|>"
5
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "0": {
5
+ "content": "<|endoftext|>",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "1": {
13
+ "content": "<s>",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "2": {
21
+ "content": "<pad>",
22
+ "lstrip": false,
23
+ "normalized": false,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ },
28
+ "3": {
29
+ "content": "</s>",
30
+ "lstrip": false,
31
+ "normalized": false,
32
+ "rstrip": false,
33
+ "single_word": false,
34
+ "special": true
35
+ }
36
+ },
37
+ "bos_token": "<|endoftext|>",
38
+ "clean_up_tokenization_spaces": true,
39
+ "eos_token": "<|endoftext|>",
40
+ "model_max_length": 1000000000000000019884624838656,
41
+ "tokenizer_class": "GPT2Tokenizer",
42
+ "unk_token": "<|endoftext|>"
43
+ }
trainer_state.json ADDED
The diff for this file is too large to render. See raw diff
 
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c09cf23a28a037139bf49154f70e885c3f9fae7eda84df1b8217b70b6afc97cf
3
+ size 4664
vocab.json ADDED
The diff for this file is too large to render. See raw diff