Xenova HF staff commited on
Commit
22f3927
1 Parent(s): 45e1667

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
config.json ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "facebook/mbart-large-50",
3
+ "_num_labels": 3,
4
+ "activation_dropout": 0.0,
5
+ "activation_function": "gelu",
6
+ "add_bias_logits": false,
7
+ "add_final_layer_norm": true,
8
+ "architectures": [
9
+ "MBartForConditionalGeneration"
10
+ ],
11
+ "attention_dropout": 0.0,
12
+ "bos_token_id": 0,
13
+ "classif_dropout": 0.0,
14
+ "classifier_dropout": 0.0,
15
+ "d_model": 1024,
16
+ "decoder_attention_heads": 16,
17
+ "decoder_ffn_dim": 4096,
18
+ "decoder_layerdrop": 0.0,
19
+ "decoder_layers": 12,
20
+ "decoder_start_token_id": 2,
21
+ "dropout": 0.1,
22
+ "early_stopping": true,
23
+ "encoder_attention_heads": 16,
24
+ "encoder_ffn_dim": 4096,
25
+ "encoder_layerdrop": 0.0,
26
+ "encoder_layers": 12,
27
+ "eos_token_id": 2,
28
+ "forced_eos_token_id": 2,
29
+ "gradient_checkpointing": false,
30
+ "id2label": {
31
+ "0": "LABEL_0",
32
+ "1": "LABEL_1",
33
+ "2": "LABEL_2"
34
+ },
35
+ "init_std": 0.02,
36
+ "is_encoder_decoder": true,
37
+ "label2id": {
38
+ "LABEL_0": 0,
39
+ "LABEL_1": 1,
40
+ "LABEL_2": 2
41
+ },
42
+ "max_length": 200,
43
+ "max_position_embeddings": 1024,
44
+ "model_type": "mbart",
45
+ "normalize_before": true,
46
+ "normalize_embedding": true,
47
+ "num_beams": 5,
48
+ "num_hidden_layers": 12,
49
+ "output_past": true,
50
+ "pad_token_id": 1,
51
+ "scale_embedding": true,
52
+ "static_position_embeddings": false,
53
+ "tokenizer_class": "MBart50Tokenizer",
54
+ "transformers_version": "4.34.0.dev0",
55
+ "use_cache": true,
56
+ "vocab_size": 250054
57
+ }
generation_config.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 0,
4
+ "decoder_start_token_id": 2,
5
+ "early_stopping": true,
6
+ "eos_token_id": 2,
7
+ "forced_eos_token_id": 2,
8
+ "max_length": 200,
9
+ "num_beams": 5,
10
+ "pad_token_id": 1,
11
+ "transformers_version": "4.34.0.dev0"
12
+ }
onnx/decoder_model.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a2baa7b7f1a4730d6ba9cc37b1adc6f180a62dff70fef407e3723395a166a37b
3
+ size 1836283492
onnx/decoder_model_merged.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:97abe2d135a26bd68cb9b7f04ee489d9705069e50728e02f867d222006b86135
3
+ size 1836803813
onnx/decoder_model_merged_quantized.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:488bebddbf209fb891ef64f214b8245e40bd9eac713e8bc05f44167cd3481953
3
+ size 462910544
onnx/decoder_model_quantized.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5089532557ead0fa83c272a3c5068e8d98f54971cf405dab46e07b0759aeaee8
3
+ size 462114989
onnx/decoder_with_past_model.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dbc370b3c77904e19ec218f6a8ca51dd1c8de6e2bf7d73bdbbb580b4f62ca8de
3
+ size 1735435475
onnx/decoder_with_past_model_quantized.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fba7c7efeca3b01358e9b125e072bb7df90d0e0a282b860629d3581f1e72397b
3
+ size 436589371
onnx/encoder_model.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8c8414741722627aab1f827ac951c0d200ff50e625f8c3400e12484f44d4abf6
3
+ size 1633291266
onnx/encoder_model_quantized.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2f443f2d9c27aea400d8895f07cebec04f436636684ca04c4832c78c29ff7434
3
+ size 409694916
quantize_config.json ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "per_channel": true,
3
+ "reduce_range": true,
4
+ "per_model_config": {
5
+ "encoder_model": {
6
+ "op_types": [
7
+ "Softmax",
8
+ "Gather",
9
+ "Add",
10
+ "Constant",
11
+ "Equal",
12
+ "Where",
13
+ "Sqrt",
14
+ "Mul",
15
+ "ConstantOfShape",
16
+ "Transpose",
17
+ "Erf",
18
+ "Expand",
19
+ "Range",
20
+ "Reshape",
21
+ "Concat",
22
+ "Unsqueeze",
23
+ "MatMul",
24
+ "Shape",
25
+ "Cast",
26
+ "ReduceMean",
27
+ "Sub",
28
+ "Div",
29
+ "Pow"
30
+ ],
31
+ "weight_type": "QInt8"
32
+ },
33
+ "decoder_with_past_model": {
34
+ "op_types": [
35
+ "Softmax",
36
+ "Gather",
37
+ "Add",
38
+ "Constant",
39
+ "Equal",
40
+ "Where",
41
+ "Sqrt",
42
+ "Mul",
43
+ "ConstantOfShape",
44
+ "Transpose",
45
+ "Erf",
46
+ "Expand",
47
+ "Range",
48
+ "Reshape",
49
+ "Concat",
50
+ "Unsqueeze",
51
+ "MatMul",
52
+ "Shape",
53
+ "Cast",
54
+ "ReduceMean",
55
+ "Sub",
56
+ "Div",
57
+ "Pow"
58
+ ],
59
+ "weight_type": "QInt8"
60
+ },
61
+ "decoder_model_merged": {
62
+ "op_types": [
63
+ "Softmax",
64
+ "Slice",
65
+ "Gather",
66
+ "Add",
67
+ "Squeeze",
68
+ "Constant",
69
+ "Equal",
70
+ "Less",
71
+ "Where",
72
+ "Sqrt",
73
+ "If",
74
+ "Mul",
75
+ "ConstantOfShape",
76
+ "Transpose",
77
+ "Erf",
78
+ "Expand",
79
+ "Range",
80
+ "Reshape",
81
+ "Concat",
82
+ "Unsqueeze",
83
+ "MatMul",
84
+ "Shape",
85
+ "Cast",
86
+ "ReduceMean",
87
+ "Sub",
88
+ "Div",
89
+ "Pow"
90
+ ],
91
+ "weight_type": "QInt8"
92
+ },
93
+ "decoder_model": {
94
+ "op_types": [
95
+ "Softmax",
96
+ "Slice",
97
+ "Gather",
98
+ "Add",
99
+ "Squeeze",
100
+ "Constant",
101
+ "Equal",
102
+ "Less",
103
+ "Where",
104
+ "Sqrt",
105
+ "Mul",
106
+ "ConstantOfShape",
107
+ "Transpose",
108
+ "Erf",
109
+ "Expand",
110
+ "Range",
111
+ "Reshape",
112
+ "Concat",
113
+ "Unsqueeze",
114
+ "MatMul",
115
+ "Shape",
116
+ "Cast",
117
+ "ReduceMean",
118
+ "Sub",
119
+ "Div",
120
+ "Pow"
121
+ ],
122
+ "weight_type": "QInt8"
123
+ }
124
+ }
125
+ }
sentencepiece.bpe.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cfc8146abe2a0488e9e2a0c56de7952f7c11ab059eca145a0a727afce0db2865
3
+ size 5069051
special_tokens_map.json ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "ar_AR",
4
+ "cs_CZ",
5
+ "de_DE",
6
+ "en_XX",
7
+ "es_XX",
8
+ "et_EE",
9
+ "fi_FI",
10
+ "fr_XX",
11
+ "gu_IN",
12
+ "hi_IN",
13
+ "it_IT",
14
+ "ja_XX",
15
+ "kk_KZ",
16
+ "ko_KR",
17
+ "lt_LT",
18
+ "lv_LV",
19
+ "my_MM",
20
+ "ne_NP",
21
+ "nl_XX",
22
+ "ro_RO",
23
+ "ru_RU",
24
+ "si_LK",
25
+ "tr_TR",
26
+ "vi_VN",
27
+ "zh_CN",
28
+ "af_ZA",
29
+ "az_AZ",
30
+ "bn_IN",
31
+ "fa_IR",
32
+ "he_IL",
33
+ "hr_HR",
34
+ "id_ID",
35
+ "ka_GE",
36
+ "km_KH",
37
+ "mk_MK",
38
+ "ml_IN",
39
+ "mn_MN",
40
+ "mr_IN",
41
+ "pl_PL",
42
+ "ps_AF",
43
+ "pt_XX",
44
+ "sv_SE",
45
+ "sw_KE",
46
+ "ta_IN",
47
+ "te_IN",
48
+ "th_TH",
49
+ "tl_XX",
50
+ "uk_UA",
51
+ "ur_PK",
52
+ "xh_ZA",
53
+ "gl_ES",
54
+ "sl_SI"
55
+ ],
56
+ "bos_token": "<s>",
57
+ "cls_token": "<s>",
58
+ "eos_token": "</s>",
59
+ "mask_token": "<mask>",
60
+ "pad_token": "<pad>",
61
+ "sep_token": "</s>",
62
+ "unk_token": "<unk>"
63
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1d58a68c276b56fcc48c165c63f70e5e4d452b4182032a5f7a2d018f4aa1a889
3
+ size 17109752
tokenizer_config.json ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "ar_AR",
4
+ "cs_CZ",
5
+ "de_DE",
6
+ "en_XX",
7
+ "es_XX",
8
+ "et_EE",
9
+ "fi_FI",
10
+ "fr_XX",
11
+ "gu_IN",
12
+ "hi_IN",
13
+ "it_IT",
14
+ "ja_XX",
15
+ "kk_KZ",
16
+ "ko_KR",
17
+ "lt_LT",
18
+ "lv_LV",
19
+ "my_MM",
20
+ "ne_NP",
21
+ "nl_XX",
22
+ "ro_RO",
23
+ "ru_RU",
24
+ "si_LK",
25
+ "tr_TR",
26
+ "vi_VN",
27
+ "zh_CN",
28
+ "af_ZA",
29
+ "az_AZ",
30
+ "bn_IN",
31
+ "fa_IR",
32
+ "he_IL",
33
+ "hr_HR",
34
+ "id_ID",
35
+ "ka_GE",
36
+ "km_KH",
37
+ "mk_MK",
38
+ "ml_IN",
39
+ "mn_MN",
40
+ "mr_IN",
41
+ "pl_PL",
42
+ "ps_AF",
43
+ "pt_XX",
44
+ "sv_SE",
45
+ "sw_KE",
46
+ "ta_IN",
47
+ "te_IN",
48
+ "th_TH",
49
+ "tl_XX",
50
+ "uk_UA",
51
+ "ur_PK",
52
+ "xh_ZA",
53
+ "gl_ES",
54
+ "sl_SI"
55
+ ],
56
+ "bos_token": "<s>",
57
+ "clean_up_tokenization_spaces": true,
58
+ "cls_token": "<s>",
59
+ "eos_token": "</s>",
60
+ "mask_token": {
61
+ "__type": "AddedToken",
62
+ "content": "<mask>",
63
+ "lstrip": true,
64
+ "normalized": true,
65
+ "rstrip": false,
66
+ "single_word": false
67
+ },
68
+ "model_max_length": 1024,
69
+ "pad_token": "<pad>",
70
+ "sep_token": "</s>",
71
+ "sp_model_kwargs": {},
72
+ "src_lang": null,
73
+ "tgt_lang": null,
74
+ "tokenizer_class": "MBart50Tokenizer",
75
+ "unk_token": "<unk>"
76
+ }