TheBloke commited on
Commit
4dae34e
1 Parent(s): 826a8a1

GPTQ model commit

Browse files
added_tokens.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "</s>": 2,
3
+ "<CLS>": 41070,
4
+ "<EOD>": 41072,
5
+ "<MASK>": 41073,
6
+ "<PAD>": 41074,
7
+ "<SEP>": 41071,
8
+ "<s>": 1,
9
+ "<unk>": 0,
10
+ "<unused1>": 41075,
11
+ "<unused2>": 41076,
12
+ "<unused3>": 41077,
13
+ "<unused4>": 41078,
14
+ "<unused5>": 41079,
15
+ "<unused6>": 41080,
16
+ "<unused7>": 41081,
17
+ "<unused8>": 41081,
18
+ "<unused9>": 41082,
19
+ "<unused10>": 41082,
20
+ "<unused11>": 41083,
21
+ "<unused12>": 41084,
22
+ "<unused13>": 41085,
23
+ "<unused14>": 41086,
24
+ "<unused15>": 41087
25
+ }
config.json ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "/workspace/process/openthaigpt_openthaigpt-1.0.0-beta-13b-chat-hf/source",
3
+ "architectures": [
4
+ "LlamaForCausalLM"
5
+ ],
6
+ "attention_bias": false,
7
+ "attention_dropout": 0.0,
8
+ "bos_token_id": 1,
9
+ "eos_token_id": 2,
10
+ "hidden_act": "silu",
11
+ "hidden_size": 5120,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 13824,
14
+ "max_position_embeddings": 4096,
15
+ "model_type": "llama",
16
+ "num_attention_heads": 40,
17
+ "num_hidden_layers": 40,
18
+ "num_key_value_heads": 40,
19
+ "pad_token_id": 0,
20
+ "pretraining_tp": 1,
21
+ "rms_norm_eps": 1e-05,
22
+ "rope_scaling": null,
23
+ "rope_theta": 10000.0,
24
+ "tie_word_embeddings": false,
25
+ "torch_dtype": "float16",
26
+ "transformers_version": "4.36.2",
27
+ "use_cache": true,
28
+ "vocab_size": 41088,
29
+ "quantization_config": {
30
+ "bits": 4,
31
+ "group_size": 128,
32
+ "damp_percent": 0.1,
33
+ "desc_act": true,
34
+ "static_groups": false,
35
+ "sym": true,
36
+ "true_sequential": true,
37
+ "model_name_or_path": null,
38
+ "model_file_base_name": "model",
39
+ "quant_method": "gptq",
40
+ "modules_in_block_to_quantize": [
41
+ [
42
+ "self_attn.k_proj",
43
+ "self_attn.v_proj",
44
+ "self_attn.q_proj"
45
+ ],
46
+ [
47
+ "self_attn.o_proj"
48
+ ],
49
+ [
50
+ "mlp.up_proj",
51
+ "mlp.gate_proj"
52
+ ],
53
+ [
54
+ "mlp.down_proj"
55
+ ]
56
+ ]
57
+ }
58
+ }
generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 1,
4
+ "eos_token_id": 2,
5
+ "transformers_version": "4.33.2"
6
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4256c56aa0b5f62a213238cce303c8c3aaf3dc6c1f066067b1f7a4ac3a805d65
3
+ size 7445557432
quantize_config.json ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bits": 4,
3
+ "group_size": 128,
4
+ "damp_percent": 0.1,
5
+ "desc_act": true,
6
+ "static_groups": false,
7
+ "sym": true,
8
+ "true_sequential": true,
9
+ "model_name_or_path": null,
10
+ "model_file_base_name": "model"
11
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<s>",
3
+ "cls_token": "<CLS>",
4
+ "eos_token": "</s>",
5
+ "mask_token": "<MASK>",
6
+ "pad_token": "<PAD>",
7
+ "sep_token": "<SEP>",
8
+ "unk_token": "<unk>"
9
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:02df43dcae8c7b5b122d45f642e42c96577cdd09fd949c6996051886c72ab002
3
+ size 717508
tokenizer_config.json ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "added_tokens_decoder": {
5
+ "0": {
6
+ "content": "<unk>",
7
+ "lstrip": false,
8
+ "normalized": true,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "1": {
14
+ "content": "<s>",
15
+ "lstrip": false,
16
+ "normalized": true,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "2": {
22
+ "content": "</s>",
23
+ "lstrip": false,
24
+ "normalized": true,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "41070": {
30
+ "content": "<CLS>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ },
37
+ "41071": {
38
+ "content": "<SEP>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false,
43
+ "special": true
44
+ },
45
+ "41072": {
46
+ "content": "<EOD>",
47
+ "lstrip": false,
48
+ "normalized": false,
49
+ "rstrip": false,
50
+ "single_word": false,
51
+ "special": true
52
+ },
53
+ "41073": {
54
+ "content": "<MASK>",
55
+ "lstrip": false,
56
+ "normalized": false,
57
+ "rstrip": false,
58
+ "single_word": false,
59
+ "special": true
60
+ },
61
+ "41074": {
62
+ "content": "<PAD>",
63
+ "lstrip": false,
64
+ "normalized": false,
65
+ "rstrip": false,
66
+ "single_word": false,
67
+ "special": true
68
+ }
69
+ },
70
+ "additional_special_tokens": [],
71
+ "bos_token": "<s>",
72
+ "clean_up_tokenization_spaces": false,
73
+ "eos_token": "</s>",
74
+ "legacy": true,
75
+ "model_max_length": 1000000000000000019884624838656,
76
+ "pad_token": "<PAD>",
77
+ "sp_model_kwargs": {},
78
+ "spaces_between_special_tokens": false,
79
+ "tokenizer_class": "LlamaTokenizer",
80
+ "unk_token": "<unk>",
81
+ "use_default_system_prompt": true
82
+ }