Xenova HF staff commited on
Commit
80773e3
1 Parent(s): c8b7236

Upload folder using huggingface_hub

Browse files
config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "lxyuan/distilbert-base-multilingual-cased-sentiments-student",
3
+ "activation": "gelu",
4
+ "architectures": [
5
+ "DistilBertForSequenceClassification"
6
+ ],
7
+ "attention_dropout": 0.1,
8
+ "dim": 768,
9
+ "dropout": 0.1,
10
+ "hidden_dim": 3072,
11
+ "id2label": {
12
+ "0": "positive",
13
+ "1": "neutral",
14
+ "2": "negative"
15
+ },
16
+ "initializer_range": 0.02,
17
+ "label2id": {
18
+ "negative": 2,
19
+ "neutral": 1,
20
+ "positive": 0
21
+ },
22
+ "max_position_embeddings": 512,
23
+ "model_type": "distilbert",
24
+ "n_heads": 12,
25
+ "n_layers": 6,
26
+ "output_past": true,
27
+ "pad_token_id": 0,
28
+ "qa_dropout": 0.1,
29
+ "seq_classif_dropout": 0.2,
30
+ "sinusoidal_pos_embds": false,
31
+ "tie_weights_": true,
32
+ "transformers_version": "4.38.2",
33
+ "vocab_size": 119547
34
+ }
onnx/model.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:089dbb9dabce36b1fe8154c8516aeb77b4da12aa3129d17949774de3cfeeb306
3
+ size 541443593
onnx/model_bnb4.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:204368ebee755b1ee7c78e3777765b5d1f5ddc2441083ef420f0539ae716c83a
3
+ size 395467692
onnx/model_fp16.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9151f52f67b8bc574cb79df836ad8aded66ad1110a80ef977c8670a49adde9af
3
+ size 270832222
onnx/model_int8.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9570fdd726d952dbaf559ba0b9f38a523554f08a8038d62d9263a90dcbb3cd3e
3
+ size 135953840
onnx/model_q4.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9a089a5caf381c68f4a8c789a40ca986362fd0ac4ed9d931786152963757e9aa
3
+ size 398121636
onnx/model_quantized.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9570fdd726d952dbaf559ba0b9f38a523554f08a8038d62d9263a90dcbb3cd3e
3
+ size 135953840
onnx/model_uint8.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4aed8ce9a4f1ff8e978a75daf10b114984495aa3ecf98138d9ecd8a1829fcf57
3
+ size 135953840
quantize_config.json ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "fp16": {},
3
+ "q8": {
4
+ "per_model_config": {
5
+ "model": {
6
+ "op_types": [
7
+ "Add",
8
+ "Cast",
9
+ "Concat",
10
+ "Constant",
11
+ "Div",
12
+ "Equal",
13
+ "Erf",
14
+ "Expand",
15
+ "Gather",
16
+ "Gemm",
17
+ "MatMul",
18
+ "Mul",
19
+ "Pow",
20
+ "ReduceMean",
21
+ "Relu",
22
+ "Reshape",
23
+ "Shape",
24
+ "Slice",
25
+ "Softmax",
26
+ "Sqrt",
27
+ "Sub",
28
+ "Transpose",
29
+ "Unsqueeze",
30
+ "Where"
31
+ ],
32
+ "weight_type": "QInt8"
33
+ }
34
+ },
35
+ "per_channel": true,
36
+ "reduce_range": true
37
+ },
38
+ "int8": {
39
+ "per_model_config": {
40
+ "model": {
41
+ "op_types": [
42
+ "Add",
43
+ "Cast",
44
+ "Concat",
45
+ "Constant",
46
+ "Div",
47
+ "Equal",
48
+ "Erf",
49
+ "Expand",
50
+ "Gather",
51
+ "Gemm",
52
+ "MatMul",
53
+ "Mul",
54
+ "Pow",
55
+ "ReduceMean",
56
+ "Relu",
57
+ "Reshape",
58
+ "Shape",
59
+ "Slice",
60
+ "Softmax",
61
+ "Sqrt",
62
+ "Sub",
63
+ "Transpose",
64
+ "Unsqueeze",
65
+ "Where"
66
+ ],
67
+ "weight_type": "QInt8"
68
+ }
69
+ },
70
+ "per_channel": true,
71
+ "reduce_range": true
72
+ },
73
+ "uint8": {
74
+ "per_model_config": {
75
+ "model": {
76
+ "op_types": [
77
+ "Add",
78
+ "Cast",
79
+ "Concat",
80
+ "Constant",
81
+ "Div",
82
+ "Equal",
83
+ "Erf",
84
+ "Expand",
85
+ "Gather",
86
+ "Gemm",
87
+ "MatMul",
88
+ "Mul",
89
+ "Pow",
90
+ "ReduceMean",
91
+ "Relu",
92
+ "Reshape",
93
+ "Shape",
94
+ "Slice",
95
+ "Softmax",
96
+ "Sqrt",
97
+ "Sub",
98
+ "Transpose",
99
+ "Unsqueeze",
100
+ "Where"
101
+ ],
102
+ "weight_type": "QUInt8"
103
+ }
104
+ },
105
+ "per_channel": true,
106
+ "reduce_range": true
107
+ },
108
+ "q4": {
109
+ "block_size": 32,
110
+ "is_symmetric": true,
111
+ "accuracy_level": null
112
+ },
113
+ "bnb4": {
114
+ "block_size": 64,
115
+ "quant_type": 1
116
+ }
117
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": {
3
+ "content": "[CLS]",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "mask_token": {
10
+ "content": "[MASK]",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "[PAD]",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "sep_token": {
24
+ "content": "[SEP]",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ },
30
+ "unk_token": {
31
+ "content": "[UNK]",
32
+ "lstrip": false,
33
+ "normalized": false,
34
+ "rstrip": false,
35
+ "single_word": false
36
+ }
37
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[PAD]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "100": {
12
+ "content": "[UNK]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "101": {
20
+ "content": "[CLS]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "102": {
28
+ "content": "[SEP]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "103": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "clean_up_tokenization_spaces": true,
45
+ "cls_token": "[CLS]",
46
+ "do_basic_tokenize": true,
47
+ "do_lower_case": false,
48
+ "mask_token": "[MASK]",
49
+ "max_length": 512,
50
+ "model_max_length": 512,
51
+ "never_split": null,
52
+ "pad_to_multiple_of": null,
53
+ "pad_token": "[PAD]",
54
+ "pad_token_type_id": 0,
55
+ "padding_side": "right",
56
+ "sep_token": "[SEP]",
57
+ "stride": 0,
58
+ "strip_accents": null,
59
+ "tokenize_chinese_chars": true,
60
+ "tokenizer_class": "DistilBertTokenizer",
61
+ "truncation_side": "right",
62
+ "truncation_strategy": "longest_first",
63
+ "unk_token": "[UNK]"
64
+ }
vocab.txt ADDED
The diff for this file is too large to render. See raw diff