mgoin commited on
Commit
61d5d0d
1 Parent(s): e7302b3

Upload folder using huggingface_hub

Browse files
added_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "<|endoftext|>": 64001,
3
+ "<|startoftext|>": 64000
4
+ }
config.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "NousResearch/Nous-Hermes-2-Yi-34B",
3
+ "architectures": [
4
+ "LlamaForCausalLM"
5
+ ],
6
+ "attention_bias": false,
7
+ "attention_dropout": 0.0,
8
+ "bos_token_id": 1,
9
+ "eos_token_id": 7,
10
+ "hidden_act": "silu",
11
+ "hidden_size": 7168,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 20480,
14
+ "is_decoder": true,
15
+ "max_position_embeddings": 4096,
16
+ "model_type": "llama",
17
+ "num_attention_heads": 56,
18
+ "num_hidden_layers": 60,
19
+ "num_key_value_heads": 8,
20
+ "pad_token_id": 0,
21
+ "pretraining_tp": 1,
22
+ "rms_norm_eps": 1e-05,
23
+ "rope_scaling": null,
24
+ "rope_theta": 5000000.0,
25
+ "tie_word_embeddings": false,
26
+ "torch_dtype": "float16",
27
+ "transformers_version": "1.7.0.20240122",
28
+ "use_cache": false,
29
+ "use_past": false,
30
+ "vocab_size": 64000
31
+ }
generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 1,
4
+ "eos_token_id": 2,
5
+ "pad_token_id": 0,
6
+ "transformers_version": "1.7.0.20240122"
7
+ }
pytorch_model-00001-of-00007.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:db105ece1aaca8d13fadf1f013d6bf58a6521cb7329aa8a33cb212c51b26172d
3
+ size 9984187850
pytorch_model-00002-of-00007.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b734e69df7366bb5f031f8f88de11c33b40204676860f2db26206a5f260b03fe
3
+ size 9918905402
pytorch_model-00003-of-00007.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:472e63aedb472c13561b53bd434b0529893911fcdeee24be524fab5f6fb3b822
3
+ size 9757440670
pytorch_model-00004-of-00007.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cf76149eb3bb139411a157a99e2293865e02565f8554c726416f229dcef262b6
3
+ size 9757257070
pytorch_model-00005-of-00007.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:802dbb5c9c5bb462a626608d9967d275af9fe1bf246c1c324aad43da7eade60c
3
+ size 9757256866
pytorch_model-00006-of-00007.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7eb75e44f5a608d7215ed47e1b4eae7f947c452167051a3561b000da0006dc48
3
+ size 9948298918
pytorch_model-00007-of-00007.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0440466916942a9601ad90a7b68b4a0460b2aaf744a39fc20faf6bad45785d0d
3
+ size 9720652338
pytorch_model.bin.index.json ADDED
The diff for this file is too large to render. See raw diff
 
recipe.yaml ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ test_stage:
2
+ obcq_modifiers:
3
+ LogarithmicEqualizationModifier:
4
+ mappings:
5
+ - - - re:.*q_proj
6
+ - re:.*k_proj
7
+ - re:.*v_proj
8
+ - re:.*input_layernorm
9
+ - - - re:.*gate_proj
10
+ - re:.*up_proj
11
+ - re:.*post_attention_layernorm
12
+ QuantizationModifier:
13
+ ignore:
14
+ - LlamaRotaryEmbedding
15
+ - LlamaRMSNorm
16
+ - SiLUActivation
17
+ - MatMulOutput_QK
18
+ - MatMulOutput_PV
19
+ post_oneshot_calibration: true
20
+ scheme_overrides:
21
+ Linear:
22
+ weights:
23
+ num_bits: 8
24
+ symmetric: true
25
+ strategy: channel
26
+ MatMulLeftInput_QK:
27
+ input_activations:
28
+ num_bits: 8
29
+ symmetric: true
30
+ MatMulLeftInput_PV:
31
+ input_activations:
32
+ num_bits: 8
33
+ symmetric: true
34
+ Embedding:
35
+ input_activations: null
36
+ weights:
37
+ num_bits: 8
38
+ symmetric: false
39
+ SparseGPTModifier:
40
+ sparsity: 0.5
41
+ block_size: 128
42
+ sequential_update: true
43
+ quantize: true
44
+ percdamp: 0.01
45
+ mask_structure: 0:0
46
+ targets:
47
+ - re:model.layers.\d*$
special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<|startoftext|>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<|im_end|>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "<unk>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "<unk>",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:386c49cf943d71aa110361135338c50e38beeff0a66593480421f37b319e1a39
3
+ size 1033105
tokenizer_config.json ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_eos_token": false,
4
+ "added_tokens_decoder": {
5
+ "0": {
6
+ "content": "<unk>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "1": {
14
+ "content": "<s>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "2": {
22
+ "content": "</s>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "6": {
30
+ "content": "<|im_start|>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": false
36
+ },
37
+ "7": {
38
+ "content": "<|im_end|>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false,
43
+ "special": true
44
+ },
45
+ "64000": {
46
+ "content": "<|startoftext|>",
47
+ "lstrip": false,
48
+ "normalized": false,
49
+ "rstrip": false,
50
+ "single_word": false,
51
+ "special": true
52
+ },
53
+ "64001": {
54
+ "content": "<|endoftext|>",
55
+ "lstrip": false,
56
+ "normalized": false,
57
+ "rstrip": false,
58
+ "single_word": false,
59
+ "special": true
60
+ }
61
+ },
62
+ "bos_token": "<|startoftext|>",
63
+ "chat_template": "{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
64
+ "clean_up_tokenization_spaces": false,
65
+ "eos_token": "<|im_end|>",
66
+ "legacy": true,
67
+ "model_max_length": 4096,
68
+ "pad_token": "<unk>",
69
+ "sp_model_kwargs": {},
70
+ "spaces_between_special_tokens": false,
71
+ "tokenizer_class": "LlamaTokenizer",
72
+ "trust_remote_code": false,
73
+ "unk_token": "<unk>",
74
+ "use_default_system_prompt": false,
75
+ "use_fast": true
76
+ }