Xenova HF staff commited on
Commit
db5810c
1 Parent(s): b572a64

Upload folder using huggingface_hub

Browse files
config.json ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "MoritzLaurer/ernie-m-base-mnli-xnli",
3
+ "act_dropout": 0.0,
4
+ "architectures": [
5
+ "ErnieMForSequenceClassification"
6
+ ],
7
+ "attention_probs_dropout_prob": 0.1,
8
+ "classifier_dropout": null,
9
+ "dtype": "float32",
10
+ "hidden_act": "gelu",
11
+ "hidden_dropout_prob": 0.1,
12
+ "hidden_size": 768,
13
+ "id2label": {
14
+ "0": "entailment",
15
+ "1": "neutral",
16
+ "2": "contradiction"
17
+ },
18
+ "initializer_range": 0.02,
19
+ "intermediate_size": 3072,
20
+ "label2id": {
21
+ "contradiction": 2,
22
+ "entailment": 0,
23
+ "neutral": 1
24
+ },
25
+ "layer_norm_eps": 1e-05,
26
+ "max_position_embeddings": 514,
27
+ "model_type": "ernie_m",
28
+ "num_attention_heads": 12,
29
+ "num_hidden_layers": 12,
30
+ "pad_token_id": 1,
31
+ "paddlenlp_version": null,
32
+ "problem_type": "single_label_classification",
33
+ "transformers_version": "4.38.2",
34
+ "type_vocab_size": 16,
35
+ "vocab_size": 250002
36
+ }
onnx/model.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:190c1d3ac84b02d879915d9f7d6d42343aaa3bbb99055ac39a0ec43d8abae51c
3
+ size 1112448881
onnx/model_bnb4.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1a3de25f9dc2b254c9e09729ac8e4a7e2cbfa7bb13a7f0f83162465f6b951c41
3
+ size 820497204
onnx/model_fp16.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:34f31bb4471fa4522e2e052786b01b678afa7d13c361def9e3b948e9b611bdd4
3
+ size 556446854
onnx/model_int8.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c28fa7cbdf996a421cbc83887421c1cf907d8dd1df5296ce5f797b2183adb57f
3
+ size 279281982
onnx/model_q4.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5702bfdddfd22d86e78dba6ac6c7bdb797bd2d202187247f66b69029e940d3a7
3
+ size 825805092
onnx/model_quantized.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c28fa7cbdf996a421cbc83887421c1cf907d8dd1df5296ce5f797b2183adb57f
3
+ size 279281982
onnx/model_uint8.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c44a74933bd0554e948e0d8c11851813dd515933555817877a35e2a2459ca949
3
+ size 279281980
quantize_config.json ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "fp16": {},
3
+ "q8": {
4
+ "per_model_config": {
5
+ "model": {
6
+ "op_types": [
7
+ "Add",
8
+ "Cast",
9
+ "Concat",
10
+ "Constant",
11
+ "ConstantOfShape",
12
+ "CumSum",
13
+ "Div",
14
+ "Erf",
15
+ "Gather",
16
+ "Gemm",
17
+ "MatMul",
18
+ "Mul",
19
+ "Pow",
20
+ "ReduceMean",
21
+ "Reshape",
22
+ "Shape",
23
+ "Softmax",
24
+ "Sqrt",
25
+ "Sub",
26
+ "Tanh",
27
+ "Transpose",
28
+ "Unsqueeze"
29
+ ],
30
+ "weight_type": "QInt8"
31
+ }
32
+ },
33
+ "per_channel": true,
34
+ "reduce_range": true
35
+ },
36
+ "int8": {
37
+ "per_model_config": {
38
+ "model": {
39
+ "op_types": [
40
+ "Add",
41
+ "Cast",
42
+ "Concat",
43
+ "Constant",
44
+ "ConstantOfShape",
45
+ "CumSum",
46
+ "Div",
47
+ "Erf",
48
+ "Gather",
49
+ "Gemm",
50
+ "MatMul",
51
+ "Mul",
52
+ "Pow",
53
+ "ReduceMean",
54
+ "Reshape",
55
+ "Shape",
56
+ "Softmax",
57
+ "Sqrt",
58
+ "Sub",
59
+ "Tanh",
60
+ "Transpose",
61
+ "Unsqueeze"
62
+ ],
63
+ "weight_type": "QInt8"
64
+ }
65
+ },
66
+ "per_channel": true,
67
+ "reduce_range": true
68
+ },
69
+ "uint8": {
70
+ "per_model_config": {
71
+ "model": {
72
+ "op_types": [
73
+ "Add",
74
+ "Cast",
75
+ "Concat",
76
+ "Constant",
77
+ "ConstantOfShape",
78
+ "CumSum",
79
+ "Div",
80
+ "Erf",
81
+ "Gather",
82
+ "Gemm",
83
+ "MatMul",
84
+ "Mul",
85
+ "Pow",
86
+ "ReduceMean",
87
+ "Reshape",
88
+ "Shape",
89
+ "Softmax",
90
+ "Sqrt",
91
+ "Sub",
92
+ "Tanh",
93
+ "Transpose",
94
+ "Unsqueeze"
95
+ ],
96
+ "weight_type": "QUInt8"
97
+ }
98
+ },
99
+ "per_channel": true,
100
+ "reduce_range": true
101
+ },
102
+ "q4": {
103
+ "block_size": 32,
104
+ "is_symmetric": true,
105
+ "accuracy_level": null
106
+ },
107
+ "bnb4": {
108
+ "block_size": 64,
109
+ "quant_type": 1
110
+ }
111
+ }
sentencepiece.bpe.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cfc8146abe2a0488e9e2a0c56de7952f7c11ab059eca145a0a727afce0db2865
3
+ size 5069051
special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
tokenizer_config.json ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[CLS]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "[PAD]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "[SEP]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "3": {
28
+ "content": "[UNK]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "250001": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "clean_up_tokenization_spaces": true,
45
+ "cls_token": "[CLS]",
46
+ "do_lower_case": false,
47
+ "encoding": "utf8",
48
+ "mask_token": "[MASK]",
49
+ "model_max_length": 512,
50
+ "pad_token": "[PAD]",
51
+ "sep_token": "[SEP]",
52
+ "sp_model_kwargs": {},
53
+ "tokenizer_class": "ErnieMTokenizer",
54
+ "unk_token": "[UNK]"
55
+ }
vocab.txt ADDED
The diff for this file is too large to render. See raw diff