Xenova HF staff commited on
Commit
70b41ee
1 Parent(s): 02528af

Upload folder using huggingface_hub

Browse files
config.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "Milos/slovak-gpt-j-405M",
3
+ "activation_function": "gelu_new",
4
+ "architectures": [
5
+ "GPTJForCausalLM"
6
+ ],
7
+ "attn_pdrop": 0.0,
8
+ "bos_token_id": 50256,
9
+ "embd_pdrop": 0.0,
10
+ "eos_token_id": 50256,
11
+ "gradient_checkpointing": false,
12
+ "initializer_range": 0.02,
13
+ "layer_norm_epsilon": 1e-05,
14
+ "model_type": "gptj",
15
+ "n_embd": 1024,
16
+ "n_head": 16,
17
+ "n_inner": null,
18
+ "n_layer": 24,
19
+ "n_positions": 2048,
20
+ "resid_pdrop": 0.0,
21
+ "rotary_dim": 64,
22
+ "summary_activation": null,
23
+ "summary_first_dropout": 0.1,
24
+ "summary_proj_to_labels": true,
25
+ "summary_type": "cls_index",
26
+ "summary_use_proj": true,
27
+ "task_specific_params": {
28
+ "text-generation": {
29
+ "do_sample": true,
30
+ "max_length": 50,
31
+ "temperature": 1.0
32
+ }
33
+ },
34
+ "tie_word_embeddings": false,
35
+ "tokenizer_class": "GPT2Tokenizer",
36
+ "transformers_version": "4.33.0.dev0",
37
+ "use_cache": true,
38
+ "vocab_size": 50256
39
+ }
generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 50256,
4
+ "eos_token_id": 50256,
5
+ "transformers_version": "4.33.0.dev0"
6
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
onnx/decoder_model.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:26337710f30b36158ac017d33b4678e8aeaed8bc95bbd8d694df27e4e8c5d4a1
3
+ size 1626056098
onnx/decoder_model_merged.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:09ffae99c215c626c023be34a76d9ce22ae749b766d51eb7ede2443fac4a844a
3
+ size 1631572955
onnx/decoder_model_merged_quantized.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:96701205103a08599560f558a4e44f1ad384f840c704fbf5c7cd3c28ddae17e4
3
+ size 417676668
onnx/decoder_model_quantized.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e9e880526d995c2ed0b25002669e03912e29ad1c9bd391ee1dc0f72638ec43a4
3
+ size 411737211
onnx/decoder_with_past_model.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:135e2692e8992ad8057913c080197ccaf8c469a717b5392b912e96d1fbb88cc3
3
+ size 1626069297
onnx/decoder_with_past_model_quantized.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:edf14050aac478dec2edbd85ea51cc645a53e41f4cdb3abf9b0779c771eeeda3
3
+ size 411753199
quantize_config.json ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "per_channel": false,
3
+ "reduce_range": false,
4
+ "per_model_config": {
5
+ "decoder_model": {
6
+ "op_types": [
7
+ "Equal",
8
+ "Split",
9
+ "Transpose",
10
+ "Shape",
11
+ "GatherElements",
12
+ "Concat",
13
+ "Tile",
14
+ "Mul",
15
+ "Unsqueeze",
16
+ "Cast",
17
+ "ReduceMean",
18
+ "Softmax",
19
+ "Pow",
20
+ "Gather",
21
+ "Loop",
22
+ "Expand",
23
+ "Reshape",
24
+ "SequenceEmpty",
25
+ "Tanh",
26
+ "Squeeze",
27
+ "SequenceInsert",
28
+ "Slice",
29
+ "Where",
30
+ "Constant",
31
+ "SequenceAt",
32
+ "SplitToSequence",
33
+ "ConcatFromSequence",
34
+ "Sqrt",
35
+ "Neg",
36
+ "Sub",
37
+ "Range",
38
+ "MatMul",
39
+ "Add",
40
+ "ConstantOfShape",
41
+ "Div"
42
+ ],
43
+ "weight_type": "QInt8"
44
+ },
45
+ "decoder_model_merged": {
46
+ "op_types": [
47
+ "Equal",
48
+ "Split",
49
+ "Transpose",
50
+ "Shape",
51
+ "GatherElements",
52
+ "Concat",
53
+ "If",
54
+ "Tile",
55
+ "Mul",
56
+ "Unsqueeze",
57
+ "Cast",
58
+ "ReduceMean",
59
+ "Softmax",
60
+ "Pow",
61
+ "Gather",
62
+ "Loop",
63
+ "Expand",
64
+ "Reshape",
65
+ "SequenceEmpty",
66
+ "Tanh",
67
+ "Squeeze",
68
+ "SequenceInsert",
69
+ "Slice",
70
+ "Where",
71
+ "Constant",
72
+ "SequenceAt",
73
+ "SplitToSequence",
74
+ "ConcatFromSequence",
75
+ "Sqrt",
76
+ "Neg",
77
+ "Sub",
78
+ "Range",
79
+ "MatMul",
80
+ "Add",
81
+ "ConstantOfShape",
82
+ "Div"
83
+ ],
84
+ "weight_type": "QInt8"
85
+ },
86
+ "decoder_with_past_model": {
87
+ "op_types": [
88
+ "Equal",
89
+ "Split",
90
+ "Transpose",
91
+ "Shape",
92
+ "GatherElements",
93
+ "Concat",
94
+ "Tile",
95
+ "Mul",
96
+ "Unsqueeze",
97
+ "Cast",
98
+ "ReduceMean",
99
+ "Softmax",
100
+ "Pow",
101
+ "Gather",
102
+ "Loop",
103
+ "Expand",
104
+ "Reshape",
105
+ "SequenceEmpty",
106
+ "Tanh",
107
+ "Squeeze",
108
+ "SequenceInsert",
109
+ "Slice",
110
+ "Where",
111
+ "Constant",
112
+ "SequenceAt",
113
+ "SplitToSequence",
114
+ "ConcatFromSequence",
115
+ "Sqrt",
116
+ "Neg",
117
+ "Sub",
118
+ "Range",
119
+ "MatMul",
120
+ "Add",
121
+ "ConstantOfShape",
122
+ "Div"
123
+ ],
124
+ "weight_type": "QInt8"
125
+ }
126
+ }
127
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<|endoftext|>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<|endoftext|>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "unk_token": {
17
+ "content": "<|endoftext|>",
18
+ "lstrip": false,
19
+ "normalized": true,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ }
23
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
4
+ "bos_token": {
5
+ "__type": "AddedToken",
6
+ "content": "<|endoftext|>",
7
+ "lstrip": false,
8
+ "normalized": true,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ },
12
+ "clean_up_tokenization_spaces": true,
13
+ "eos_token": {
14
+ "__type": "AddedToken",
15
+ "content": "<|endoftext|>",
16
+ "lstrip": false,
17
+ "normalized": true,
18
+ "rstrip": false,
19
+ "single_word": false
20
+ },
21
+ "errors": "replace",
22
+ "model_max_length": 1000000000000000019884624838656,
23
+ "pad_token": null,
24
+ "tokenizer_class": "GPT2Tokenizer",
25
+ "unk_token": {
26
+ "__type": "AddedToken",
27
+ "content": "<|endoftext|>",
28
+ "lstrip": false,
29
+ "normalized": true,
30
+ "rstrip": false,
31
+ "single_word": false
32
+ }
33
+ }
vocab.json ADDED
The diff for this file is too large to render. See raw diff