Text2Text Generation
Transformers
PyTorch
English
Kinyarwanda
m2m_100
Inference Endpoints
Kleber commited on
Commit
56bd77f
1 Parent(s): d25a107

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "checkpoint-53168",
3
+ "activation_dropout": 0.0,
4
+ "activation_function": "relu",
5
+ "architectures": [
6
+ "M2M100ForConditionalGeneration"
7
+ ],
8
+ "attention_dropout": 0.1,
9
+ "bos_token_id": 0,
10
+ "d_model": 1024,
11
+ "decoder_attention_heads": 16,
12
+ "decoder_ffn_dim": 8192,
13
+ "decoder_layerdrop": 0,
14
+ "decoder_layers": 24,
15
+ "decoder_start_token_id": 2,
16
+ "dropout": 0.1,
17
+ "encoder_attention_heads": 16,
18
+ "encoder_ffn_dim": 8192,
19
+ "encoder_layerdrop": 0,
20
+ "encoder_layers": 24,
21
+ "eos_token_id": 2,
22
+ "init_std": 0.02,
23
+ "is_encoder_decoder": true,
24
+ "max_length": 200,
25
+ "max_position_embeddings": 1024,
26
+ "model_type": "m2m_100",
27
+ "num_hidden_layers": 24,
28
+ "pad_token_id": 1,
29
+ "scale_embedding": true,
30
+ "torch_dtype": "float32",
31
+ "transformers_version": "4.31.0",
32
+ "use_cache": true,
33
+ "vocab_size": 256206
34
+ }
generation_config.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 0,
3
+ "decoder_start_token_id": 2,
4
+ "eos_token_id": 2,
5
+ "max_length": 200,
6
+ "pad_token_id": 1,
7
+ "transformers_version": "4.31.0"
8
+ }
optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:88d7098c6d88733cea062740fbbf6f691ff82c156e4e34c1abebb1297cd6f3b6
3
+ size 10965715027
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:38820a77c622a03563be28191b1c502830a8dc5eaddb395fcdfaffe07029e49a
3
+ size 5482902982
rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8e95e409639f9c9d88073775f84c96a6033aff88b2547cc693ba9c7e7536cd4f
3
+ size 14575
scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8e76e819a0e81bcdfbc2387da3e713f3ec92d981087df2402fe03a386d40c70b
3
+ size 627
sentencepiece.bpe.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:14bb8dfb35c0ffdea7bc01e56cea38b9e3d5efcdcb9c251d6b40538e1aab555a
3
+ size 4852054
special_tokens_map.json ADDED
@@ -0,0 +1,220 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "ace_Arab",
4
+ "ace_Latn",
5
+ "acm_Arab",
6
+ "acq_Arab",
7
+ "aeb_Arab",
8
+ "afr_Latn",
9
+ "ajp_Arab",
10
+ "aka_Latn",
11
+ "amh_Ethi",
12
+ "apc_Arab",
13
+ "arb_Arab",
14
+ "ars_Arab",
15
+ "ary_Arab",
16
+ "arz_Arab",
17
+ "asm_Beng",
18
+ "ast_Latn",
19
+ "awa_Deva",
20
+ "ayr_Latn",
21
+ "azb_Arab",
22
+ "azj_Latn",
23
+ "bak_Cyrl",
24
+ "bam_Latn",
25
+ "ban_Latn",
26
+ "bel_Cyrl",
27
+ "bem_Latn",
28
+ "ben_Beng",
29
+ "bho_Deva",
30
+ "bjn_Arab",
31
+ "bjn_Latn",
32
+ "bod_Tibt",
33
+ "bos_Latn",
34
+ "bug_Latn",
35
+ "bul_Cyrl",
36
+ "cat_Latn",
37
+ "ceb_Latn",
38
+ "ces_Latn",
39
+ "cjk_Latn",
40
+ "ckb_Arab",
41
+ "crh_Latn",
42
+ "cym_Latn",
43
+ "dan_Latn",
44
+ "deu_Latn",
45
+ "dik_Latn",
46
+ "dyu_Latn",
47
+ "dzo_Tibt",
48
+ "ell_Grek",
49
+ "eng_Latn",
50
+ "epo_Latn",
51
+ "est_Latn",
52
+ "eus_Latn",
53
+ "ewe_Latn",
54
+ "fao_Latn",
55
+ "pes_Arab",
56
+ "fij_Latn",
57
+ "fin_Latn",
58
+ "fon_Latn",
59
+ "fra_Latn",
60
+ "fur_Latn",
61
+ "fuv_Latn",
62
+ "gla_Latn",
63
+ "gle_Latn",
64
+ "glg_Latn",
65
+ "grn_Latn",
66
+ "guj_Gujr",
67
+ "hat_Latn",
68
+ "hau_Latn",
69
+ "heb_Hebr",
70
+ "hin_Deva",
71
+ "hne_Deva",
72
+ "hrv_Latn",
73
+ "hun_Latn",
74
+ "hye_Armn",
75
+ "ibo_Latn",
76
+ "ilo_Latn",
77
+ "ind_Latn",
78
+ "isl_Latn",
79
+ "ita_Latn",
80
+ "jav_Latn",
81
+ "jpn_Jpan",
82
+ "kab_Latn",
83
+ "kac_Latn",
84
+ "kam_Latn",
85
+ "kan_Knda",
86
+ "kas_Arab",
87
+ "kas_Deva",
88
+ "kat_Geor",
89
+ "knc_Arab",
90
+ "knc_Latn",
91
+ "kaz_Cyrl",
92
+ "kbp_Latn",
93
+ "kea_Latn",
94
+ "khm_Khmr",
95
+ "kik_Latn",
96
+ "kin_Latn",
97
+ "kir_Cyrl",
98
+ "kmb_Latn",
99
+ "kon_Latn",
100
+ "kor_Hang",
101
+ "kmr_Latn",
102
+ "lao_Laoo",
103
+ "lvs_Latn",
104
+ "lij_Latn",
105
+ "lim_Latn",
106
+ "lin_Latn",
107
+ "lit_Latn",
108
+ "lmo_Latn",
109
+ "ltg_Latn",
110
+ "ltz_Latn",
111
+ "lua_Latn",
112
+ "lug_Latn",
113
+ "luo_Latn",
114
+ "lus_Latn",
115
+ "mag_Deva",
116
+ "mai_Deva",
117
+ "mal_Mlym",
118
+ "mar_Deva",
119
+ "min_Latn",
120
+ "mkd_Cyrl",
121
+ "plt_Latn",
122
+ "mlt_Latn",
123
+ "mni_Beng",
124
+ "khk_Cyrl",
125
+ "mos_Latn",
126
+ "mri_Latn",
127
+ "zsm_Latn",
128
+ "mya_Mymr",
129
+ "nld_Latn",
130
+ "nno_Latn",
131
+ "nob_Latn",
132
+ "npi_Deva",
133
+ "nso_Latn",
134
+ "nus_Latn",
135
+ "nya_Latn",
136
+ "oci_Latn",
137
+ "gaz_Latn",
138
+ "ory_Orya",
139
+ "pag_Latn",
140
+ "pan_Guru",
141
+ "pap_Latn",
142
+ "pol_Latn",
143
+ "por_Latn",
144
+ "prs_Arab",
145
+ "pbt_Arab",
146
+ "quy_Latn",
147
+ "ron_Latn",
148
+ "run_Latn",
149
+ "rus_Cyrl",
150
+ "sag_Latn",
151
+ "san_Deva",
152
+ "sat_Beng",
153
+ "scn_Latn",
154
+ "shn_Mymr",
155
+ "sin_Sinh",
156
+ "slk_Latn",
157
+ "slv_Latn",
158
+ "smo_Latn",
159
+ "sna_Latn",
160
+ "snd_Arab",
161
+ "som_Latn",
162
+ "sot_Latn",
163
+ "spa_Latn",
164
+ "als_Latn",
165
+ "srd_Latn",
166
+ "srp_Cyrl",
167
+ "ssw_Latn",
168
+ "sun_Latn",
169
+ "swe_Latn",
170
+ "swh_Latn",
171
+ "szl_Latn",
172
+ "tam_Taml",
173
+ "tat_Cyrl",
174
+ "tel_Telu",
175
+ "tgk_Cyrl",
176
+ "tgl_Latn",
177
+ "tha_Thai",
178
+ "tir_Ethi",
179
+ "taq_Latn",
180
+ "taq_Tfng",
181
+ "tpi_Latn",
182
+ "tsn_Latn",
183
+ "tso_Latn",
184
+ "tuk_Latn",
185
+ "tum_Latn",
186
+ "tur_Latn",
187
+ "twi_Latn",
188
+ "tzm_Tfng",
189
+ "uig_Arab",
190
+ "ukr_Cyrl",
191
+ "umb_Latn",
192
+ "urd_Arab",
193
+ "uzn_Latn",
194
+ "vec_Latn",
195
+ "vie_Latn",
196
+ "war_Latn",
197
+ "wol_Latn",
198
+ "xho_Latn",
199
+ "ydd_Hebr",
200
+ "yor_Latn",
201
+ "yue_Hant",
202
+ "zho_Hans",
203
+ "zho_Hant",
204
+ "zul_Latn"
205
+ ],
206
+ "bos_token": "<s>",
207
+ "cls_token": "<s>",
208
+ "eos_token": "</s>",
209
+ "mask_token": {
210
+ "content": "<mask>",
211
+ "lstrip": true,
212
+ "normalized": true,
213
+ "rstrip": false,
214
+ "single_word": false
215
+ },
216
+ "pad_token": "<pad>",
217
+ "sep_token": "</s>",
218
+ "unk_token": "<unk>"
219
+ }
220
+
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e316b82de11d0f951f370943b3c438311629547285129b0b81dadabd01bca665
3
+ size 17331176
tokenizer_config.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": null,
3
+ "bos_token": "<s>",
4
+ "cls_token": "<s>",
5
+ "eos_token": "</s>",
6
+ "mask_token": {
7
+ "__type": "AddedToken",
8
+ "content": "<mask>",
9
+ "lstrip": true,
10
+ "normalized": true,
11
+ "rstrip": false,
12
+ "single_word": false
13
+ },
14
+ "model_max_length": 1024,
15
+ "name_or_path": "facebook/nllb-200-distilled-600M",
16
+ "pad_token": "<pad>",
17
+ "sep_token": "</s>",
18
+ "sp_model_kwargs": {},
19
+ "special_tokens_map_file": null,
20
+ "src_lang": null,
21
+ "tgt_lang": null,
22
+ "tokenizer_class": "NllbTokenizer",
23
+ "unk_token": "<unk>"
24
+ }
25
+
trainer_state.json ADDED
@@ -0,0 +1,276 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 2.0,
5
+ "global_step": 19806,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 0.05,
12
+ "learning_rate": 4.87377562354842e-05,
13
+ "loss": 0.5877,
14
+ "step": 500
15
+ },
16
+ {
17
+ "epoch": 0.1,
18
+ "learning_rate": 4.74755124709684e-05,
19
+ "loss": 0.6317,
20
+ "step": 1000
21
+ },
22
+ {
23
+ "epoch": 0.15,
24
+ "learning_rate": 4.6213268706452596e-05,
25
+ "loss": 0.6456,
26
+ "step": 1500
27
+ },
28
+ {
29
+ "epoch": 0.2,
30
+ "learning_rate": 4.4951024941936785e-05,
31
+ "loss": 0.6473,
32
+ "step": 2000
33
+ },
34
+ {
35
+ "epoch": 0.25,
36
+ "learning_rate": 4.368878117742099e-05,
37
+ "loss": 0.6499,
38
+ "step": 2500
39
+ },
40
+ {
41
+ "epoch": 0.3,
42
+ "learning_rate": 4.242653741290518e-05,
43
+ "loss": 0.6602,
44
+ "step": 3000
45
+ },
46
+ {
47
+ "epoch": 0.35,
48
+ "learning_rate": 4.116429364838938e-05,
49
+ "loss": 0.6699,
50
+ "step": 3500
51
+ },
52
+ {
53
+ "epoch": 0.4,
54
+ "learning_rate": 3.9902049883873574e-05,
55
+ "loss": 0.6624,
56
+ "step": 4000
57
+ },
58
+ {
59
+ "epoch": 0.45,
60
+ "learning_rate": 3.8639806119357776e-05,
61
+ "loss": 0.651,
62
+ "step": 4500
63
+ },
64
+ {
65
+ "epoch": 0.5,
66
+ "learning_rate": 3.7377562354841965e-05,
67
+ "loss": 0.6709,
68
+ "step": 5000
69
+ },
70
+ {
71
+ "epoch": 0.56,
72
+ "learning_rate": 3.611531859032616e-05,
73
+ "loss": 0.6582,
74
+ "step": 5500
75
+ },
76
+ {
77
+ "epoch": 0.61,
78
+ "learning_rate": 3.485307482581036e-05,
79
+ "loss": 0.6698,
80
+ "step": 6000
81
+ },
82
+ {
83
+ "epoch": 0.66,
84
+ "learning_rate": 3.359083106129456e-05,
85
+ "loss": 0.6522,
86
+ "step": 6500
87
+ },
88
+ {
89
+ "epoch": 0.71,
90
+ "learning_rate": 3.2328587296778754e-05,
91
+ "loss": 0.6863,
92
+ "step": 7000
93
+ },
94
+ {
95
+ "epoch": 0.76,
96
+ "learning_rate": 3.1066343532262956e-05,
97
+ "loss": 0.6602,
98
+ "step": 7500
99
+ },
100
+ {
101
+ "epoch": 0.81,
102
+ "learning_rate": 2.9804099767747152e-05,
103
+ "loss": 0.6466,
104
+ "step": 8000
105
+ },
106
+ {
107
+ "epoch": 0.86,
108
+ "learning_rate": 2.8541856003231344e-05,
109
+ "loss": 0.7011,
110
+ "step": 8500
111
+ },
112
+ {
113
+ "epoch": 0.91,
114
+ "learning_rate": 2.727961223871554e-05,
115
+ "loss": 0.677,
116
+ "step": 9000
117
+ },
118
+ {
119
+ "epoch": 0.96,
120
+ "learning_rate": 2.601736847419974e-05,
121
+ "loss": 0.6581,
122
+ "step": 9500
123
+ },
124
+ {
125
+ "epoch": 1.0,
126
+ "eval_bleu": 30.9028,
127
+ "eval_chrf++": 57.3001,
128
+ "eval_gen_len": 27.1266,
129
+ "eval_loss": 1.247955083847046,
130
+ "eval_runtime": 1316.7682,
131
+ "eval_samples_per_second": 3.718,
132
+ "eval_spbleu": 43.0506,
133
+ "eval_steps_per_second": 0.744,
134
+ "eval_ter": 59.7925,
135
+ "step": 9903
136
+ },
137
+ {
138
+ "epoch": 1.01,
139
+ "learning_rate": 2.4755124709683934e-05,
140
+ "loss": 0.6144,
141
+ "step": 10000
142
+ },
143
+ {
144
+ "epoch": 1.06,
145
+ "learning_rate": 2.3492880945168133e-05,
146
+ "loss": 0.4286,
147
+ "step": 10500
148
+ },
149
+ {
150
+ "epoch": 1.11,
151
+ "learning_rate": 2.223063718065233e-05,
152
+ "loss": 0.4329,
153
+ "step": 11000
154
+ },
155
+ {
156
+ "epoch": 1.16,
157
+ "learning_rate": 2.0968393416136524e-05,
158
+ "loss": 0.4324,
159
+ "step": 11500
160
+ },
161
+ {
162
+ "epoch": 1.21,
163
+ "learning_rate": 1.9706149651620723e-05,
164
+ "loss": 0.4403,
165
+ "step": 12000
166
+ },
167
+ {
168
+ "epoch": 1.26,
169
+ "learning_rate": 1.844390588710492e-05,
170
+ "loss": 0.4353,
171
+ "step": 12500
172
+ },
173
+ {
174
+ "epoch": 1.31,
175
+ "learning_rate": 1.7181662122589115e-05,
176
+ "loss": 0.4312,
177
+ "step": 13000
178
+ },
179
+ {
180
+ "epoch": 1.36,
181
+ "learning_rate": 1.5919418358073314e-05,
182
+ "loss": 0.4212,
183
+ "step": 13500
184
+ },
185
+ {
186
+ "epoch": 1.41,
187
+ "learning_rate": 1.4657174593557507e-05,
188
+ "loss": 0.4158,
189
+ "step": 14000
190
+ },
191
+ {
192
+ "epoch": 1.46,
193
+ "learning_rate": 1.3394930829041705e-05,
194
+ "loss": 0.4061,
195
+ "step": 14500
196
+ },
197
+ {
198
+ "epoch": 1.51,
199
+ "learning_rate": 1.2132687064525902e-05,
200
+ "loss": 0.4202,
201
+ "step": 15000
202
+ },
203
+ {
204
+ "epoch": 1.57,
205
+ "learning_rate": 1.0870443300010098e-05,
206
+ "loss": 0.4448,
207
+ "step": 15500
208
+ },
209
+ {
210
+ "epoch": 1.62,
211
+ "learning_rate": 9.608199535494297e-06,
212
+ "loss": 0.4351,
213
+ "step": 16000
214
+ },
215
+ {
216
+ "epoch": 1.67,
217
+ "learning_rate": 8.345955770978492e-06,
218
+ "loss": 0.4072,
219
+ "step": 16500
220
+ },
221
+ {
222
+ "epoch": 1.72,
223
+ "learning_rate": 7.083712006462688e-06,
224
+ "loss": 0.4157,
225
+ "step": 17000
226
+ },
227
+ {
228
+ "epoch": 1.77,
229
+ "learning_rate": 5.821468241946885e-06,
230
+ "loss": 0.4338,
231
+ "step": 17500
232
+ },
233
+ {
234
+ "epoch": 1.82,
235
+ "learning_rate": 4.559224477431081e-06,
236
+ "loss": 0.4212,
237
+ "step": 18000
238
+ },
239
+ {
240
+ "epoch": 1.87,
241
+ "learning_rate": 3.2969807129152782e-06,
242
+ "loss": 0.411,
243
+ "step": 18500
244
+ },
245
+ {
246
+ "epoch": 1.92,
247
+ "learning_rate": 2.0347369483994747e-06,
248
+ "loss": 0.4179,
249
+ "step": 19000
250
+ },
251
+ {
252
+ "epoch": 1.97,
253
+ "learning_rate": 7.724931838836716e-07,
254
+ "loss": 0.4255,
255
+ "step": 19500
256
+ },
257
+ {
258
+ "epoch": 2.0,
259
+ "eval_bleu": 31.3408,
260
+ "eval_chrf++": 57.4539,
261
+ "eval_gen_len": 27.0633,
262
+ "eval_loss": 1.3426777124404907,
263
+ "eval_runtime": 1310.3387,
264
+ "eval_samples_per_second": 3.736,
265
+ "eval_spbleu": 43.1979,
266
+ "eval_steps_per_second": 0.748,
267
+ "eval_ter": 59.0383,
268
+ "step": 19806
269
+ }
270
+ ],
271
+ "max_steps": 19806,
272
+ "num_train_epochs": 2,
273
+ "total_flos": 8.428883485392896e+16,
274
+ "trial_name": null,
275
+ "trial_params": null
276
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ef1aa6540cbe88561036b418b0031f6e178a7ac67e834c2ad5e6a5eab47c9017
3
+ size 4091