Adding ONNX file of this model
Browse filesBeep boop I am the [ONNX export bot 🤖🏎️](https://huggingface.co/spaces/onnx/export). On behalf of [Gidz](https://huggingface.co/Gidz), I would like to add to this repository the model converted to ONNX.
What is ONNX? It stands for "Open Neural Network Exchange", and is the most commonly used open standard for machine learning interoperability. You can find out more at [onnx.ai](https://onnx.ai/)!
The exported ONNX model can be then be consumed by various backends as TensorRT or TVM, or simply be used in a few lines with 🤗 Optimum through ONNX Runtime, check out how [here](https://huggingface.co/docs/optimum/main/en/onnxruntime/usage_guides/models)!
- .gitattributes +4 -0
- onnx/config.json +33 -0
- onnx/decoder_model.onnx +3 -0
- onnx/decoder_model.onnx_data +3 -0
- onnx/decoder_model_merged.onnx +3 -0
- onnx/decoder_model_merged.onnx_data +3 -0
- onnx/decoder_with_past_model.onnx +3 -0
- onnx/decoder_with_past_model.onnx_data +3 -0
- onnx/encoder_model.onnx +3 -0
- onnx/encoder_model.onnx_data +3 -0
- onnx/generation_config.json +9 -0
- onnx/sentencepiece.bpe.model +3 -0
- onnx/special_tokens_map.json +219 -0
- onnx/tokenizer.json +3 -0
- onnx/tokenizer_config.json +24 -0
.gitattributes
CHANGED
@@ -26,3 +26,7 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
26 |
*.zstandard filter=lfs diff=lfs merge=lfs -text
|
27 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
28 |
tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
26 |
*.zstandard filter=lfs diff=lfs merge=lfs -text
|
27 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
28 |
tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
29 |
+
onnx/decoder_model_merged.onnx_data filter=lfs diff=lfs merge=lfs -text
|
30 |
+
onnx/encoder_model.onnx_data filter=lfs diff=lfs merge=lfs -text
|
31 |
+
onnx/decoder_with_past_model.onnx_data filter=lfs diff=lfs merge=lfs -text
|
32 |
+
onnx/decoder_model.onnx_data filter=lfs diff=lfs merge=lfs -text
|
onnx/config.json
ADDED
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "facebook/nllb-200-1.3B",
|
3 |
+
"activation_dropout": 0.0,
|
4 |
+
"activation_function": "relu",
|
5 |
+
"architectures": [
|
6 |
+
"M2M100ForConditionalGeneration"
|
7 |
+
],
|
8 |
+
"attention_dropout": 0.1,
|
9 |
+
"bos_token_id": 0,
|
10 |
+
"d_model": 1024,
|
11 |
+
"decoder_attention_heads": 16,
|
12 |
+
"decoder_ffn_dim": 8192,
|
13 |
+
"decoder_layerdrop": 0,
|
14 |
+
"decoder_layers": 24,
|
15 |
+
"decoder_start_token_id": 2,
|
16 |
+
"dropout": 0.1,
|
17 |
+
"encoder_attention_heads": 16,
|
18 |
+
"encoder_ffn_dim": 8192,
|
19 |
+
"encoder_layerdrop": 0,
|
20 |
+
"encoder_layers": 24,
|
21 |
+
"eos_token_id": 2,
|
22 |
+
"init_std": 0.02,
|
23 |
+
"is_encoder_decoder": true,
|
24 |
+
"max_length": 200,
|
25 |
+
"max_position_embeddings": 1024,
|
26 |
+
"model_type": "m2m_100",
|
27 |
+
"num_hidden_layers": 24,
|
28 |
+
"pad_token_id": 1,
|
29 |
+
"scale_embedding": true,
|
30 |
+
"transformers_version": "4.30.2",
|
31 |
+
"use_cache": true,
|
32 |
+
"vocab_size": 256206
|
33 |
+
}
|
onnx/decoder_model.onnx
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b649254847c204f4654347f544e56a3cb6557dd6a6a078ec2dd259e5b6a05f77
|
3 |
+
size 1200825
|
onnx/decoder_model.onnx_data
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6af4720b90872cfe3a9e02bb482fd490f77fabefadfa0547fb25fc4a49eebc5a
|
3 |
+
size 4521230336
|
onnx/decoder_model_merged.onnx
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e1a47d113a287ade1d55e15763b40e31fba5434bf618d52e2065e1def052ced5
|
3 |
+
size 2200948
|
onnx/decoder_model_merged.onnx_data
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6af4720b90872cfe3a9e02bb482fd490f77fabefadfa0547fb25fc4a49eebc5a
|
3 |
+
size 4521230336
|
onnx/decoder_with_past_model.onnx
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:fa7cc8962482e9f5c416b5af67b455c8d6466b94a2b3bafbef5b62c1049ff9f0
|
3 |
+
size 1031322
|
onnx/decoder_with_past_model.onnx_data
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:fa6ffdc26ddb4f4ad110fe64198fb42a5f85620a02b7d54208040c16f72ec3f7
|
3 |
+
size 4319707136
|
onnx/encoder_model.onnx
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:bce269f881baf808695616a085b740de3c31d8c2ed2185664459dfac09cc29c5
|
3 |
+
size 494826
|
onnx/encoder_model.onnx_data
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:adb1b8a9482a6c28e856d7236aa7a92c228e2b77afb27664ea09a16e8cdce384
|
3 |
+
size 3068567552
|
onnx/generation_config.json
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_from_model_config": true,
|
3 |
+
"bos_token_id": 0,
|
4 |
+
"decoder_start_token_id": 2,
|
5 |
+
"eos_token_id": 2,
|
6 |
+
"max_length": 200,
|
7 |
+
"pad_token_id": 1,
|
8 |
+
"transformers_version": "4.30.2"
|
9 |
+
}
|
onnx/sentencepiece.bpe.model
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:14bb8dfb35c0ffdea7bc01e56cea38b9e3d5efcdcb9c251d6b40538e1aab555a
|
3 |
+
size 4852054
|
onnx/special_tokens_map.json
ADDED
@@ -0,0 +1,219 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"additional_special_tokens": [
|
3 |
+
"ace_Arab",
|
4 |
+
"ace_Latn",
|
5 |
+
"acm_Arab",
|
6 |
+
"acq_Arab",
|
7 |
+
"aeb_Arab",
|
8 |
+
"afr_Latn",
|
9 |
+
"ajp_Arab",
|
10 |
+
"aka_Latn",
|
11 |
+
"amh_Ethi",
|
12 |
+
"apc_Arab",
|
13 |
+
"arb_Arab",
|
14 |
+
"ars_Arab",
|
15 |
+
"ary_Arab",
|
16 |
+
"arz_Arab",
|
17 |
+
"asm_Beng",
|
18 |
+
"ast_Latn",
|
19 |
+
"awa_Deva",
|
20 |
+
"ayr_Latn",
|
21 |
+
"azb_Arab",
|
22 |
+
"azj_Latn",
|
23 |
+
"bak_Cyrl",
|
24 |
+
"bam_Latn",
|
25 |
+
"ban_Latn",
|
26 |
+
"bel_Cyrl",
|
27 |
+
"bem_Latn",
|
28 |
+
"ben_Beng",
|
29 |
+
"bho_Deva",
|
30 |
+
"bjn_Arab",
|
31 |
+
"bjn_Latn",
|
32 |
+
"bod_Tibt",
|
33 |
+
"bos_Latn",
|
34 |
+
"bug_Latn",
|
35 |
+
"bul_Cyrl",
|
36 |
+
"cat_Latn",
|
37 |
+
"ceb_Latn",
|
38 |
+
"ces_Latn",
|
39 |
+
"cjk_Latn",
|
40 |
+
"ckb_Arab",
|
41 |
+
"crh_Latn",
|
42 |
+
"cym_Latn",
|
43 |
+
"dan_Latn",
|
44 |
+
"deu_Latn",
|
45 |
+
"dik_Latn",
|
46 |
+
"dyu_Latn",
|
47 |
+
"dzo_Tibt",
|
48 |
+
"ell_Grek",
|
49 |
+
"eng_Latn",
|
50 |
+
"epo_Latn",
|
51 |
+
"est_Latn",
|
52 |
+
"eus_Latn",
|
53 |
+
"ewe_Latn",
|
54 |
+
"fao_Latn",
|
55 |
+
"pes_Arab",
|
56 |
+
"fij_Latn",
|
57 |
+
"fin_Latn",
|
58 |
+
"fon_Latn",
|
59 |
+
"fra_Latn",
|
60 |
+
"fur_Latn",
|
61 |
+
"fuv_Latn",
|
62 |
+
"gla_Latn",
|
63 |
+
"gle_Latn",
|
64 |
+
"glg_Latn",
|
65 |
+
"grn_Latn",
|
66 |
+
"guj_Gujr",
|
67 |
+
"hat_Latn",
|
68 |
+
"hau_Latn",
|
69 |
+
"heb_Hebr",
|
70 |
+
"hin_Deva",
|
71 |
+
"hne_Deva",
|
72 |
+
"hrv_Latn",
|
73 |
+
"hun_Latn",
|
74 |
+
"hye_Armn",
|
75 |
+
"ibo_Latn",
|
76 |
+
"ilo_Latn",
|
77 |
+
"ind_Latn",
|
78 |
+
"isl_Latn",
|
79 |
+
"ita_Latn",
|
80 |
+
"jav_Latn",
|
81 |
+
"jpn_Jpan",
|
82 |
+
"kab_Latn",
|
83 |
+
"kac_Latn",
|
84 |
+
"kam_Latn",
|
85 |
+
"kan_Knda",
|
86 |
+
"kas_Arab",
|
87 |
+
"kas_Deva",
|
88 |
+
"kat_Geor",
|
89 |
+
"knc_Arab",
|
90 |
+
"knc_Latn",
|
91 |
+
"kaz_Cyrl",
|
92 |
+
"kbp_Latn",
|
93 |
+
"kea_Latn",
|
94 |
+
"khm_Khmr",
|
95 |
+
"kik_Latn",
|
96 |
+
"kin_Latn",
|
97 |
+
"kir_Cyrl",
|
98 |
+
"kmb_Latn",
|
99 |
+
"kon_Latn",
|
100 |
+
"kor_Hang",
|
101 |
+
"kmr_Latn",
|
102 |
+
"lao_Laoo",
|
103 |
+
"lvs_Latn",
|
104 |
+
"lij_Latn",
|
105 |
+
"lim_Latn",
|
106 |
+
"lin_Latn",
|
107 |
+
"lit_Latn",
|
108 |
+
"lmo_Latn",
|
109 |
+
"ltg_Latn",
|
110 |
+
"ltz_Latn",
|
111 |
+
"lua_Latn",
|
112 |
+
"lug_Latn",
|
113 |
+
"luo_Latn",
|
114 |
+
"lus_Latn",
|
115 |
+
"mag_Deva",
|
116 |
+
"mai_Deva",
|
117 |
+
"mal_Mlym",
|
118 |
+
"mar_Deva",
|
119 |
+
"min_Latn",
|
120 |
+
"mkd_Cyrl",
|
121 |
+
"plt_Latn",
|
122 |
+
"mlt_Latn",
|
123 |
+
"mni_Beng",
|
124 |
+
"khk_Cyrl",
|
125 |
+
"mos_Latn",
|
126 |
+
"mri_Latn",
|
127 |
+
"zsm_Latn",
|
128 |
+
"mya_Mymr",
|
129 |
+
"nld_Latn",
|
130 |
+
"nno_Latn",
|
131 |
+
"nob_Latn",
|
132 |
+
"npi_Deva",
|
133 |
+
"nso_Latn",
|
134 |
+
"nus_Latn",
|
135 |
+
"nya_Latn",
|
136 |
+
"oci_Latn",
|
137 |
+
"gaz_Latn",
|
138 |
+
"ory_Orya",
|
139 |
+
"pag_Latn",
|
140 |
+
"pan_Guru",
|
141 |
+
"pap_Latn",
|
142 |
+
"pol_Latn",
|
143 |
+
"por_Latn",
|
144 |
+
"prs_Arab",
|
145 |
+
"pbt_Arab",
|
146 |
+
"quy_Latn",
|
147 |
+
"ron_Latn",
|
148 |
+
"run_Latn",
|
149 |
+
"rus_Cyrl",
|
150 |
+
"sag_Latn",
|
151 |
+
"san_Deva",
|
152 |
+
"sat_Beng",
|
153 |
+
"scn_Latn",
|
154 |
+
"shn_Mymr",
|
155 |
+
"sin_Sinh",
|
156 |
+
"slk_Latn",
|
157 |
+
"slv_Latn",
|
158 |
+
"smo_Latn",
|
159 |
+
"sna_Latn",
|
160 |
+
"snd_Arab",
|
161 |
+
"som_Latn",
|
162 |
+
"sot_Latn",
|
163 |
+
"spa_Latn",
|
164 |
+
"als_Latn",
|
165 |
+
"srd_Latn",
|
166 |
+
"srp_Cyrl",
|
167 |
+
"ssw_Latn",
|
168 |
+
"sun_Latn",
|
169 |
+
"swe_Latn",
|
170 |
+
"swh_Latn",
|
171 |
+
"szl_Latn",
|
172 |
+
"tam_Taml",
|
173 |
+
"tat_Cyrl",
|
174 |
+
"tel_Telu",
|
175 |
+
"tgk_Cyrl",
|
176 |
+
"tgl_Latn",
|
177 |
+
"tha_Thai",
|
178 |
+
"tir_Ethi",
|
179 |
+
"taq_Latn",
|
180 |
+
"taq_Tfng",
|
181 |
+
"tpi_Latn",
|
182 |
+
"tsn_Latn",
|
183 |
+
"tso_Latn",
|
184 |
+
"tuk_Latn",
|
185 |
+
"tum_Latn",
|
186 |
+
"tur_Latn",
|
187 |
+
"twi_Latn",
|
188 |
+
"tzm_Tfng",
|
189 |
+
"uig_Arab",
|
190 |
+
"ukr_Cyrl",
|
191 |
+
"umb_Latn",
|
192 |
+
"urd_Arab",
|
193 |
+
"uzn_Latn",
|
194 |
+
"vec_Latn",
|
195 |
+
"vie_Latn",
|
196 |
+
"war_Latn",
|
197 |
+
"wol_Latn",
|
198 |
+
"xho_Latn",
|
199 |
+
"ydd_Hebr",
|
200 |
+
"yor_Latn",
|
201 |
+
"yue_Hant",
|
202 |
+
"zho_Hans",
|
203 |
+
"zho_Hant",
|
204 |
+
"zul_Latn"
|
205 |
+
],
|
206 |
+
"bos_token": "<s>",
|
207 |
+
"cls_token": "<s>",
|
208 |
+
"eos_token": "</s>",
|
209 |
+
"mask_token": {
|
210 |
+
"content": "<mask>",
|
211 |
+
"lstrip": true,
|
212 |
+
"normalized": true,
|
213 |
+
"rstrip": false,
|
214 |
+
"single_word": false
|
215 |
+
},
|
216 |
+
"pad_token": "<pad>",
|
217 |
+
"sep_token": "</s>",
|
218 |
+
"unk_token": "<unk>"
|
219 |
+
}
|
onnx/tokenizer.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8ac789ad7dabea44d41537822d48c516ba358374c51813e2cba78c006e150c94
|
3 |
+
size 17331224
|
onnx/tokenizer_config.json
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"additional_special_tokens": null,
|
3 |
+
"bos_token": "<s>",
|
4 |
+
"clean_up_tokenization_spaces": true,
|
5 |
+
"cls_token": "<s>",
|
6 |
+
"eos_token": "</s>",
|
7 |
+
"legacy_behaviour": false,
|
8 |
+
"mask_token": {
|
9 |
+
"__type": "AddedToken",
|
10 |
+
"content": "<mask>",
|
11 |
+
"lstrip": true,
|
12 |
+
"normalized": true,
|
13 |
+
"rstrip": false,
|
14 |
+
"single_word": false
|
15 |
+
},
|
16 |
+
"model_max_length": 1024,
|
17 |
+
"pad_token": "<pad>",
|
18 |
+
"sep_token": "</s>",
|
19 |
+
"sp_model_kwargs": {},
|
20 |
+
"src_lang": null,
|
21 |
+
"tgt_lang": null,
|
22 |
+
"tokenizer_class": "NllbTokenizer",
|
23 |
+
"unk_token": "<unk>"
|
24 |
+
}
|