Xenova HF staff commited on
Commit
9db9b7b
1 Parent(s): 19c01f4

Upload folder using huggingface_hub

Browse files
config.json CHANGED
@@ -1,4 +1,5 @@
1
  {
 
2
  "architectures": [
3
  "LlamaForCausalLM"
4
  ],
 
1
  {
2
+ "_name_or_path": "Xenova/llama2.c-stories42M",
3
  "architectures": [
4
  "LlamaForCausalLM"
5
  ],
onnx/decoder_model.onnx CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:57fecedae457d1811cd12abe22589f313aa15c1590e1e09bae3408052e6a74c7
3
- size 233102386
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2875c864cfa242d3ce797f1c8f47c8779bc4d4d22bedffc7c5220d3405a4b1de
3
+ size 167565936
onnx/decoder_model_merged.onnx CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:46464ab95c2c94ad1e493d0ca7746763f367343fce80cdcf7d0036911947564c
3
- size 233913496
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:315d6c6d96edac7ab954841322d443a69102c059e8ac20ba89faf3f0236769aa
3
+ size 168376680
onnx/decoder_model_merged_quantized.onnx CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:afb16b73d5c4f6e825abedc3ce65298ecd567e29c75dc98cc7f7b8dbf9e00074
3
- size 60022789
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b5e81e0464c3028cd88b9f457a8770a16c268258c0fdc290ca04035bdbb575f3
3
+ size 43636490
onnx/decoder_model_quantized.onnx CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ea09156167b318bfb6149e325a18c79e95e0948175dd3fb496da53ca0589f070
3
- size 59060350
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:44b2bc266767e042cb7277e7234c916ca678841ccaef3bc1f3e76bfe6c41f689
3
+ size 42675110
onnx/decoder_with_past_model.onnx CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:47d1d207d61288f734b55448c70e5b344262c01087900f6c258b7805e0ed86a9
3
- size 233104923
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7c13e4732ed2e01643955726fa276efa3f3bf49279e42c837ef50893f292b71a
3
+ size 167568570
onnx/decoder_with_past_model_quantized.onnx CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ca6889c5883f8686fd4ba12ee26e025b490d7ffcbe1408c8b92f766e3df6f199
3
- size 59062863
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:886b10a41ecb479dffa829513765f4e445ddcef1c972d5b95aadd10eecd854df
3
+ size 42677756
quantize_config.json CHANGED
@@ -2,104 +2,103 @@
2
  "per_channel": false,
3
  "reduce_range": false,
4
  "per_model_config": {
5
- "decoder_model": {
6
  "op_types": [
7
- "Sub",
8
- "Softmax",
9
- "Neg",
10
- "Sqrt",
11
- "Reshape",
12
- "If",
13
- "Shape",
14
- "Transpose",
15
  "Pow",
16
- "Mul",
17
- "MatMul",
18
  "Gather",
 
 
 
19
  "Slice",
20
- "Less",
21
- "ReduceMean",
22
- "Concat",
23
- "Range",
24
- "Unsqueeze",
25
- "Equal",
26
- "Cast",
27
  "ConstantOfShape",
28
- "Add",
29
  "Sigmoid",
30
- "Where",
31
- "Squeeze",
32
- "Constant",
33
  "Identity",
 
 
 
34
  "Div",
35
- "Expand"
 
 
 
 
 
 
 
 
 
36
  ],
37
  "weight_type": "QInt8"
38
  },
39
- "decoder_with_past_model": {
40
  "op_types": [
41
- "Sub",
42
- "Softmax",
43
- "Neg",
44
- "Sqrt",
45
- "Reshape",
46
- "If",
47
- "Shape",
48
- "Transpose",
49
  "Pow",
50
- "Mul",
51
- "MatMul",
52
  "Gather",
 
 
 
53
  "Slice",
54
- "ReduceMean",
55
- "Concat",
56
- "Range",
57
- "Unsqueeze",
58
- "Equal",
59
- "Cast",
60
  "ConstantOfShape",
61
- "Add",
62
  "Sigmoid",
63
- "Where",
64
- "Squeeze",
65
- "Constant",
66
  "Identity",
 
 
67
  "Div",
68
- "Expand"
 
 
 
 
 
 
 
 
 
 
 
69
  ],
70
  "weight_type": "QInt8"
71
  },
72
  "decoder_model_merged": {
73
  "op_types": [
74
- "Sub",
75
- "Softmax",
76
- "Neg",
77
- "Sqrt",
78
- "Reshape",
79
- "If",
80
- "Shape",
81
- "Transpose",
82
  "Pow",
83
- "Mul",
84
- "MatMul",
85
  "Gather",
 
 
 
86
  "Slice",
87
  "Less",
88
- "ReduceMean",
89
- "Concat",
90
- "Range",
91
- "Unsqueeze",
92
- "Equal",
93
- "Cast",
94
  "ConstantOfShape",
95
- "Add",
96
  "Sigmoid",
97
- "Where",
98
- "Squeeze",
99
- "Constant",
100
  "Identity",
 
 
101
  "Div",
102
- "Expand"
 
 
 
 
 
 
 
 
 
 
 
103
  ],
104
  "weight_type": "QInt8"
105
  }
 
2
  "per_channel": false,
3
  "reduce_range": false,
4
  "per_model_config": {
5
+ "decoder_with_past_model": {
6
  "op_types": [
7
+ "Unsqueeze",
 
 
 
 
 
 
 
8
  "Pow",
9
+ "If",
 
10
  "Gather",
11
+ "Constant",
12
+ "Squeeze",
13
+ "Reshape",
14
  "Slice",
15
+ "Neg",
 
 
 
 
 
 
16
  "ConstantOfShape",
 
17
  "Sigmoid",
18
+ "Softmax",
 
 
19
  "Identity",
20
+ "Sub",
21
+ "Cast",
22
+ "Expand",
23
  "Div",
24
+ "Mul",
25
+ "Add",
26
+ "Shape",
27
+ "Sqrt",
28
+ "Where",
29
+ "Equal",
30
+ "MatMul",
31
+ "Concat",
32
+ "Transpose",
33
+ "ReduceMean"
34
  ],
35
  "weight_type": "QInt8"
36
  },
37
+ "decoder_model": {
38
  "op_types": [
39
+ "Unsqueeze",
 
 
 
 
 
 
 
40
  "Pow",
41
+ "If",
 
42
  "Gather",
43
+ "Constant",
44
+ "Squeeze",
45
+ "Reshape",
46
  "Slice",
47
+ "Less",
48
+ "Neg",
 
 
 
 
49
  "ConstantOfShape",
 
50
  "Sigmoid",
51
+ "Softmax",
 
 
52
  "Identity",
53
+ "Sub",
54
+ "Expand",
55
  "Div",
56
+ "Mul",
57
+ "Add",
58
+ "Shape",
59
+ "Sqrt",
60
+ "Where",
61
+ "Equal",
62
+ "MatMul",
63
+ "Concat",
64
+ "Range",
65
+ "Transpose",
66
+ "Cast",
67
+ "ReduceMean"
68
  ],
69
  "weight_type": "QInt8"
70
  },
71
  "decoder_model_merged": {
72
  "op_types": [
73
+ "Unsqueeze",
 
 
 
 
 
 
 
74
  "Pow",
75
+ "If",
 
76
  "Gather",
77
+ "Constant",
78
+ "Squeeze",
79
+ "Reshape",
80
  "Slice",
81
  "Less",
82
+ "Neg",
 
 
 
 
 
83
  "ConstantOfShape",
 
84
  "Sigmoid",
85
+ "Softmax",
 
 
86
  "Identity",
87
+ "Sub",
88
+ "Expand",
89
  "Div",
90
+ "Mul",
91
+ "Add",
92
+ "Shape",
93
+ "Sqrt",
94
+ "Where",
95
+ "Equal",
96
+ "MatMul",
97
+ "Concat",
98
+ "Range",
99
+ "Transpose",
100
+ "Cast",
101
+ "ReduceMean"
102
  ],
103
  "weight_type": "QInt8"
104
  }
special_tokens_map.json CHANGED
@@ -1,23 +1,5 @@
1
  {
2
- "bos_token": {
3
- "content": "<s>",
4
- "lstrip": false,
5
- "normalized": true,
6
- "rstrip": false,
7
- "single_word": false
8
- },
9
- "eos_token": {
10
- "content": "</s>",
11
- "lstrip": false,
12
- "normalized": true,
13
- "rstrip": false,
14
- "single_word": false
15
- },
16
- "unk_token": {
17
- "content": "<unk>",
18
- "lstrip": false,
19
- "normalized": true,
20
- "rstrip": false,
21
- "single_word": false
22
- }
23
  }
 
1
  {
2
+ "bos_token": "<s>",
3
+ "eos_token": "</s>",
4
+ "unk_token": "<unk>"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
  }
tokenizer_config.json CHANGED
@@ -1,32 +1,38 @@
1
  {
2
- "bos_token": {
3
- "__type": "AddedToken",
4
- "content": "<s>",
5
- "lstrip": false,
6
- "normalized": true,
7
- "rstrip": false,
8
- "single_word": false
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
  },
 
 
10
  "clean_up_tokenization_spaces": false,
11
- "eos_token": {
12
- "__type": "AddedToken",
13
- "content": "</s>",
14
- "lstrip": false,
15
- "normalized": true,
16
- "rstrip": false,
17
- "single_word": false
18
- },
19
  "model_max_length": 2048,
20
  "pad_token": null,
21
  "sp_model_kwargs": {},
22
  "tokenizer_class": "LlamaTokenizer",
23
- "unk_token": {
24
- "__type": "AddedToken",
25
- "content": "<unk>",
26
- "lstrip": false,
27
- "normalized": true,
28
- "rstrip": false,
29
- "single_word": false
30
- },
31
  "use_default_system_prompt": true
32
  }
 
1
  {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "<unk>",
5
+ "lstrip": false,
6
+ "normalized": true,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "<s>",
13
+ "lstrip": false,
14
+ "normalized": true,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "</s>",
21
+ "lstrip": false,
22
+ "normalized": true,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ }
27
  },
28
+ "additional_special_tokens": [],
29
+ "bos_token": "<s>",
30
  "clean_up_tokenization_spaces": false,
31
+ "eos_token": "</s>",
 
 
 
 
 
 
 
32
  "model_max_length": 2048,
33
  "pad_token": null,
34
  "sp_model_kwargs": {},
35
  "tokenizer_class": "LlamaTokenizer",
36
+ "unk_token": "<unk>",
 
 
 
 
 
 
 
37
  "use_default_system_prompt": true
38
  }