ajibawa-2023 commited on
Commit
5e72b15
1 Parent(s): 8a5201c

Upload folder using huggingface_hub (#1)

Browse files

- baae46d150b38705c2ca3c2a667edc460586ec84b7ea77d9854e556b3f01254d (5365fdb9c49a96f515ede6d016ef8ccceffb4507)
- 155b947abe38b42b87a9f7fdf389dc7b8ba49641991a4a1d72c904c39777a6fa (520bc6ea58b841725fffafb542825deb89efb0c0)
- e491733aa5e3aec5c445b583800af1738e58a40f4fde325155954f8d737a1365 (01e3170fbf6ab405b7735982ea21d22ddf40a9d7)
- 86d24c69d12435f288ecc6a59916332a05c62020c3210c04a3db58cf0b13fd46 (8f30eaa012d4330796a01555b692f7f7dcee30a8)
- 5d0f73cb40df17e61b9ea790cd80f907d39774b6b0f4daf50fc7e40c672dd828 (909cf6dc0854db0f6d46c95ee5af0197805ed9f7)
- 38bf80df1ea3a379f4c5220c3729316981ba2704271418cf2badc4639bfa8d1b (ecbe8f0c442305245e78771236a50f02b1fddd7b)
- 71e414d3b94cb92fd12611d3700cf08bdc232f00386d281b96623e4b4bcd8840 (e8fd7d4083eec90206405fc5543d14be0a8c9711)

.gitattributes CHANGED
@@ -33,3 +33,9 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ pytorch_model.bin-aa filter=lfs diff=lfs merge=lfs -text
37
+ pytorch_model.bin-ab filter=lfs diff=lfs merge=lfs -text
38
+ pytorch_model.bin-ac filter=lfs diff=lfs merge=lfs -text
39
+ pytorch_model.bin-ad filter=lfs diff=lfs merge=lfs -text
40
+ pytorch_model.bin-ae filter=lfs diff=lfs merge=lfs -text
41
+ pytorch_model.bin-af filter=lfs diff=lfs merge=lfs -text
config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "/media/feynmla100/Projects_1/llama-2-13b-hf/",
3
+ "architectures": [
4
+ "LlamaForCausalLM"
5
+ ],
6
+ "bos_token_id": 1,
7
+ "eos_token_id": 2,
8
+ "hidden_act": "silu",
9
+ "hidden_size": 5120,
10
+ "initializer_range": 0.02,
11
+ "intermediate_size": 13824,
12
+ "max_position_embeddings": 4096,
13
+ "model_type": "llama",
14
+ "num_attention_heads": 40,
15
+ "num_hidden_layers": 40,
16
+ "num_key_value_heads": 40,
17
+ "pad_token_id": 0,
18
+ "pretraining_tp": 1,
19
+ "rms_norm_eps": 1e-05,
20
+ "rope_scaling": null,
21
+ "tie_word_embeddings": false,
22
+ "torch_dtype": "bfloat16",
23
+ "transformers_version": "4.28.1",
24
+ "use_cache": true,
25
+ "vocab_size": 32000
26
+ }
generation_config.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 1,
4
+ "eos_token_id": 2,
5
+ "pad_token_id": 0,
6
+ "transformers_version": "4.28.1",
7
+ "use_cache": false
8
+ }
pytorch_model.bin-aa ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:83d639f18d001e120ce96ff76d1a3b0f821b2a8b827bcd3a5d5cfe112ffeed72
3
+ size 9663676416
pytorch_model.bin-ab ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e9e7607861ae4e8e0554501c0ee96bc2517c2cffb3374de363e46772ea8ae7ff
3
+ size 9663676416
pytorch_model.bin-ac ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5b288e2714d25acf5dbce83619af22e6ccd1d3c849b8ad43b53f26bdd95fd675
3
+ size 9663676416
pytorch_model.bin-ad ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:312893368366af30fdb4e5fb0ea49e70ee517da4f17f79a4b89e43cebb8708b9
3
+ size 9663676416
pytorch_model.bin-ae ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d22a3f60f61b69bb36b7d456574880505a465c41a581d9307d6f0f641a8edc7d
3
+ size 9663676416
pytorch_model.bin-af ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4ddf3555391c383ad7e51244c4a6e71950528f3226a42a615d0c2b9b49d6fc20
3
+ size 3745220546
special_tokens_map.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": "<unk>",
17
+ "unk_token": {
18
+ "content": "<unk>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ }
24
+ }
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
3
+ size 499723
tokenizer_config.json ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "bos_token": {
5
+ "__type": "AddedToken",
6
+ "content": "<s>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ },
12
+ "clean_up_tokenization_spaces": false,
13
+ "eos_token": {
14
+ "__type": "AddedToken",
15
+ "content": "</s>",
16
+ "lstrip": false,
17
+ "normalized": false,
18
+ "rstrip": false,
19
+ "single_word": false
20
+ },
21
+ "legacy": false,
22
+ "model_max_length": 4096,
23
+ "pad_token": null,
24
+ "padding_side": "right",
25
+ "sp_model_kwargs": {},
26
+ "tokenizer_class": "LlamaTokenizer",
27
+ "unk_token": {
28
+ "__type": "AddedToken",
29
+ "content": "<unk>",
30
+ "lstrip": false,
31
+ "normalized": false,
32
+ "rstrip": false,
33
+ "single_word": false
34
+ }
35
+ }
trainer_state.json ADDED
The diff for this file is too large to render. See raw diff
 
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f9a8cd424a9802789e00b0d104f2978a3d5c547f9b7d40cbfd1c0911b2bdbf73
3
+ size 5304