tiedeman commited on
Commit
abf3a2e
1 Parent(s): 13b61ec

Initial commit

Browse files
.gitattributes CHANGED
@@ -29,3 +29,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
29
  *.zip filter=lfs diff=lfs merge=lfs -text
30
  *.zst filter=lfs diff=lfs merge=lfs -text
31
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
29
  *.zip filter=lfs diff=lfs merge=lfs -text
30
  *.zst filter=lfs diff=lfs merge=lfs -text
31
  *tfevents* filter=lfs diff=lfs merge=lfs -text
32
+ *.spm filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,526 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - da
4
+ - fo
5
+ - gmq
6
+ - is
7
+ - nb
8
+ - nn
9
+ - no
10
+ - sv
11
+
12
+ tags:
13
+ - translation
14
+ - opus-mt-tc
15
+
16
+ license: cc-by-4.0
17
+ model-index:
18
+ - name: opus-mt-tc-big-gmq-gmq
19
+ results:
20
+ - task:
21
+ name: Translation isl-swe
22
+ type: translation
23
+ args: isl-swe
24
+ dataset:
25
+ name: europeana2021
26
+ type: europeana2021
27
+ args: isl-swe
28
+ metrics:
29
+ - name: BLEU
30
+ type: bleu
31
+ value: 22.2
32
+ - name: chr-F
33
+ type: chrf
34
+ value: 0.45562
35
+ - task:
36
+ name: Translation nob-isl
37
+ type: translation
38
+ args: nob-isl
39
+ dataset:
40
+ name: europeana2021
41
+ type: europeana2021
42
+ args: nob-isl
43
+ metrics:
44
+ - name: BLEU
45
+ type: bleu
46
+ value: 29.7
47
+ - name: chr-F
48
+ type: chrf
49
+ value: 0.54171
50
+ - task:
51
+ name: Translation nob-swe
52
+ type: translation
53
+ args: nob-swe
54
+ dataset:
55
+ name: europeana2021
56
+ type: europeana2021
57
+ args: nob-swe
58
+ metrics:
59
+ - name: BLEU
60
+ type: bleu
61
+ value: 54.0
62
+ - name: chr-F
63
+ type: chrf
64
+ value: 0.73891
65
+ - task:
66
+ name: Translation dan-isl
67
+ type: translation
68
+ args: dan-isl
69
+ dataset:
70
+ name: flores101-devtest
71
+ type: flores_101
72
+ args: dan isl devtest
73
+ metrics:
74
+ - name: BLEU
75
+ type: bleu
76
+ value: 22.2
77
+ - name: chr-F
78
+ type: chrf
79
+ value: 0.50227
80
+ - task:
81
+ name: Translation dan-nob
82
+ type: translation
83
+ args: dan-nob
84
+ dataset:
85
+ name: flores101-devtest
86
+ type: flores_101
87
+ args: dan nob devtest
88
+ metrics:
89
+ - name: BLEU
90
+ type: bleu
91
+ value: 28.6
92
+ - name: chr-F
93
+ type: chrf
94
+ value: 0.58445
95
+ - task:
96
+ name: Translation dan-swe
97
+ type: translation
98
+ args: dan-swe
99
+ dataset:
100
+ name: flores101-devtest
101
+ type: flores_101
102
+ args: dan swe devtest
103
+ metrics:
104
+ - name: BLEU
105
+ type: bleu
106
+ value: 38.5
107
+ - name: chr-F
108
+ type: chrf
109
+ value: 0.65000
110
+ - task:
111
+ name: Translation isl-dan
112
+ type: translation
113
+ args: isl-dan
114
+ dataset:
115
+ name: flores101-devtest
116
+ type: flores_101
117
+ args: isl dan devtest
118
+ metrics:
119
+ - name: BLEU
120
+ type: bleu
121
+ value: 27.2
122
+ - name: chr-F
123
+ type: chrf
124
+ value: 0.53630
125
+ - task:
126
+ name: Translation isl-nob
127
+ type: translation
128
+ args: isl-nob
129
+ dataset:
130
+ name: flores101-devtest
131
+ type: flores_101
132
+ args: isl nob devtest
133
+ metrics:
134
+ - name: BLEU
135
+ type: bleu
136
+ value: 20.5
137
+ - name: chr-F
138
+ type: chrf
139
+ value: 0.49434
140
+ - task:
141
+ name: Translation isl-swe
142
+ type: translation
143
+ args: isl-swe
144
+ dataset:
145
+ name: flores101-devtest
146
+ type: flores_101
147
+ args: isl swe devtest
148
+ metrics:
149
+ - name: BLEU
150
+ type: bleu
151
+ value: 26.0
152
+ - name: chr-F
153
+ type: chrf
154
+ value: 0.53373
155
+ - task:
156
+ name: Translation nob-dan
157
+ type: translation
158
+ args: nob-dan
159
+ dataset:
160
+ name: flores101-devtest
161
+ type: flores_101
162
+ args: nob dan devtest
163
+ metrics:
164
+ - name: BLEU
165
+ type: bleu
166
+ value: 31.7
167
+ - name: chr-F
168
+ type: chrf
169
+ value: 0.59657
170
+ - task:
171
+ name: Translation nob-isl
172
+ type: translation
173
+ args: nob-isl
174
+ dataset:
175
+ name: flores101-devtest
176
+ type: flores_101
177
+ args: nob isl devtest
178
+ metrics:
179
+ - name: BLEU
180
+ type: bleu
181
+ value: 18.9
182
+ - name: chr-F
183
+ type: chrf
184
+ value: 0.47432
185
+ - task:
186
+ name: Translation nob-swe
187
+ type: translation
188
+ args: nob-swe
189
+ dataset:
190
+ name: flores101-devtest
191
+ type: flores_101
192
+ args: nob swe devtest
193
+ metrics:
194
+ - name: BLEU
195
+ type: bleu
196
+ value: 31.3
197
+ - name: chr-F
198
+ type: chrf
199
+ value: 0.60030
200
+ - task:
201
+ name: Translation swe-dan
202
+ type: translation
203
+ args: swe-dan
204
+ dataset:
205
+ name: flores101-devtest
206
+ type: flores_101
207
+ args: swe dan devtest
208
+ metrics:
209
+ - name: BLEU
210
+ type: bleu
211
+ value: 39.0
212
+ - name: chr-F
213
+ type: chrf
214
+ value: 0.64340
215
+ - task:
216
+ name: Translation swe-isl
217
+ type: translation
218
+ args: swe-isl
219
+ dataset:
220
+ name: flores101-devtest
221
+ type: flores_101
222
+ args: swe isl devtest
223
+ metrics:
224
+ - name: BLEU
225
+ type: bleu
226
+ value: 21.7
227
+ - name: chr-F
228
+ type: chrf
229
+ value: 0.49590
230
+ - task:
231
+ name: Translation swe-nob
232
+ type: translation
233
+ args: swe-nob
234
+ dataset:
235
+ name: flores101-devtest
236
+ type: flores_101
237
+ args: swe nob devtest
238
+ metrics:
239
+ - name: BLEU
240
+ type: bleu
241
+ value: 28.9
242
+ - name: chr-F
243
+ type: chrf
244
+ value: 0.58336
245
+ - task:
246
+ name: Translation dan-nob
247
+ type: translation
248
+ args: dan-nob
249
+ dataset:
250
+ name: tatoeba-test-v2021-08-07
251
+ type: tatoeba_mt
252
+ args: dan-nob
253
+ metrics:
254
+ - name: BLEU
255
+ type: bleu
256
+ value: 78.2
257
+ - name: chr-F
258
+ type: chrf
259
+ value: 0.87556
260
+ - task:
261
+ name: Translation dan-swe
262
+ type: translation
263
+ args: dan-swe
264
+ dataset:
265
+ name: tatoeba-test-v2021-08-07
266
+ type: tatoeba_mt
267
+ args: dan-swe
268
+ metrics:
269
+ - name: BLEU
270
+ type: bleu
271
+ value: 72.5
272
+ - name: chr-F
273
+ type: chrf
274
+ value: 0.83556
275
+ - task:
276
+ name: Translation nno-nob
277
+ type: translation
278
+ args: nno-nob
279
+ dataset:
280
+ name: tatoeba-test-v2021-08-07
281
+ type: tatoeba_mt
282
+ args: nno-nob
283
+ metrics:
284
+ - name: BLEU
285
+ type: bleu
286
+ value: 78.9
287
+ - name: chr-F
288
+ type: chrf
289
+ value: 0.88349
290
+ - task:
291
+ name: Translation nob-dan
292
+ type: translation
293
+ args: nob-dan
294
+ dataset:
295
+ name: tatoeba-test-v2021-08-07
296
+ type: tatoeba_mt
297
+ args: nob-dan
298
+ metrics:
299
+ - name: BLEU
300
+ type: bleu
301
+ value: 73.9
302
+ - name: chr-F
303
+ type: chrf
304
+ value: 0.85345
305
+ - task:
306
+ name: Translation nob-nno
307
+ type: translation
308
+ args: nob-nno
309
+ dataset:
310
+ name: tatoeba-test-v2021-08-07
311
+ type: tatoeba_mt
312
+ args: nob-nno
313
+ metrics:
314
+ - name: BLEU
315
+ type: bleu
316
+ value: 55.2
317
+ - name: chr-F
318
+ type: chrf
319
+ value: 0.74571
320
+ - task:
321
+ name: Translation nob-swe
322
+ type: translation
323
+ args: nob-swe
324
+ dataset:
325
+ name: tatoeba-test-v2021-08-07
326
+ type: tatoeba_mt
327
+ args: nob-swe
328
+ metrics:
329
+ - name: BLEU
330
+ type: bleu
331
+ value: 73.9
332
+ - name: chr-F
333
+ type: chrf
334
+ value: 0.84747
335
+ - task:
336
+ name: Translation swe-dan
337
+ type: translation
338
+ args: swe-dan
339
+ dataset:
340
+ name: tatoeba-test-v2021-08-07
341
+ type: tatoeba_mt
342
+ args: swe-dan
343
+ metrics:
344
+ - name: BLEU
345
+ type: bleu
346
+ value: 72.6
347
+ - name: chr-F
348
+ type: chrf
349
+ value: 0.83392
350
+ - task:
351
+ name: Translation swe-nob
352
+ type: translation
353
+ args: swe-nob
354
+ dataset:
355
+ name: tatoeba-test-v2021-08-07
356
+ type: tatoeba_mt
357
+ args: swe-nob
358
+ metrics:
359
+ - name: BLEU
360
+ type: bleu
361
+ value: 76.3
362
+ - name: chr-F
363
+ type: chrf
364
+ value: 0.85815
365
+ ---
366
+ # opus-mt-tc-big-gmq-gmq
367
+
368
+ ## Table of Contents
369
+ - [Model Details](#model-details)
370
+ - [Uses](#uses)
371
+ - [Risks, Limitations and Biases](#risks-limitations-and-biases)
372
+ - [How to Get Started With the Model](#how-to-get-started-with-the-model)
373
+ - [Training](#training)
374
+ - [Evaluation](#evaluation)
375
+ - [Citation Information](#citation-information)
376
+ - [Acknowledgements](#acknowledgements)
377
+
378
+ ## Model Details
379
+
380
+ Neural machine translation model for translating from North Germanic languages (gmq) to North Germanic languages (gmq).
381
+
382
+ This model is part of the [OPUS-MT project](https://github.com/Helsinki-NLP/Opus-MT), an effort to make neural machine translation models widely available and accessible for many languages in the world. All models are originally trained using the amazing framework of [Marian NMT](https://marian-nmt.github.io/), an efficient NMT implementation written in pure C++. The models have been converted to pyTorch using the transformers library by huggingface. Training data is taken from [OPUS](https://opus.nlpl.eu/) and training pipelines use the procedures of [OPUS-MT-train](https://github.com/Helsinki-NLP/Opus-MT-train).
383
+ **Model Description:**
384
+ - **Developed by:** Language Technology Research Group at the University of Helsinki
385
+ - **Model Type:** Translation (transformer-big)
386
+ - **Release**: 2022-07-29
387
+ - **License:** CC-BY-4.0
388
+ - **Language(s):**
389
+ - Source Language(s): dan fao isl nno nob nor swe
390
+ - Target Language(s): dan isl nno nob nor swe
391
+ - Valid Target Language Labels: >>dan<< >>isl<< >>nno<< >>nob<< >>nor<< >>swe<<
392
+ - **Original Model**: [opusTCv20210807_transformer-big_2022-07-29.zip](https://object.pouta.csc.fi/Tatoeba-MT-models/gmq-gmq/opusTCv20210807_transformer-big_2022-07-29.zip)
393
+ - **Resources for more information:**
394
+ - [OPUS-MT-train GitHub Repo](https://github.com/Helsinki-NLP/OPUS-MT-train)
395
+ - More information about released models for this language pair: [OPUS-MT gmq-gmq README](https://github.com/Helsinki-NLP/Tatoeba-Challenge/tree/master/models/gmq-gmq/README.md)
396
+ - [More information about MarianNMT models in the transformers library](https://huggingface.co/docs/transformers/model_doc/marian)
397
+ - [Tatoeba Translation Challenge](https://github.com/Helsinki-NLP/Tatoeba-Challenge/
398
+
399
+ This is a multilingual translation model with multiple target languages. A sentence initial language token is required in the form of `>>id<<` (id = valid target language ID), e.g. `>>dan<<`
400
+
401
+ ## Uses
402
+
403
+ This model can be used for translation and text-to-text generation.
404
+
405
+ ## Risks, Limitations and Biases
406
+
407
+ **CONTENT WARNING: Readers should be aware that the model is trained on various public data sets that may contain content that is disturbing, offensive, and can propagate historical and current stereotypes.**
408
+
409
+ Significant research has explored bias and fairness issues with language models (see, e.g., [Sheng et al. (2021)](https://aclanthology.org/2021.acl-long.330.pdf) and [Bender et al. (2021)](https://dl.acm.org/doi/pdf/10.1145/3442188.3445922)).
410
+
411
+ ## How to Get Started With the Model
412
+
413
+ A short example code:
414
+
415
+ ```python
416
+ from transformers import MarianMTModel, MarianTokenizer
417
+
418
+ src_text = [
419
+ ">>fao<< Jeg er bange for kakerlakker.",
420
+ ">>nob<< Vladivostok är en stad i Ryssland."
421
+ ]
422
+
423
+ model_name = "pytorch-models/opus-mt-tc-big-gmq-gmq"
424
+ tokenizer = MarianTokenizer.from_pretrained(model_name)
425
+ model = MarianMTModel.from_pretrained(model_name)
426
+ translated = model.generate(**tokenizer(src_text, return_tensors="pt", padding=True))
427
+
428
+ for t in translated:
429
+ print( tokenizer.decode(t, skip_special_tokens=True) )
430
+
431
+ # expected output:
432
+ # Tað eru uml.
433
+ # Vladivostok er en by i Russland.
434
+ ```
435
+
436
+ You can also use OPUS-MT models with the transformers pipelines, for example:
437
+
438
+ ```python
439
+ from transformers import pipeline
440
+ pipe = pipeline("translation", model="Helsinki-NLP/opus-mt-tc-big-gmq-gmq")
441
+ print(pipe(">>fao<< Jeg er bange for kakerlakker."))
442
+
443
+ # expected output: Tað eru uml.
444
+ ```
445
+
446
+ ## Training
447
+
448
+ - **Data**: opusTCv20210807 ([source](https://github.com/Helsinki-NLP/Tatoeba-Challenge))
449
+ - **Pre-processing**: SentencePiece (spm32k,spm32k)
450
+ - **Model Type:** transformer-big
451
+ - **Original MarianNMT Model**: [opusTCv20210807_transformer-big_2022-07-29.zip](https://object.pouta.csc.fi/Tatoeba-MT-models/gmq-gmq/opusTCv20210807_transformer-big_2022-07-29.zip)
452
+ - **Training Scripts**: [GitHub Repo](https://github.com/Helsinki-NLP/OPUS-MT-train)
453
+
454
+ ## Evaluation
455
+
456
+ * test set translations: [opusTCv20210807_transformer-big_2022-07-29.test.txt](https://object.pouta.csc.fi/Tatoeba-MT-models/gmq-gmq/opusTCv20210807_transformer-big_2022-07-29.test.txt)
457
+ * test set scores: [opusTCv20210807_transformer-big_2022-07-29.eval.txt](https://object.pouta.csc.fi/Tatoeba-MT-models/gmq-gmq/opusTCv20210807_transformer-big_2022-07-29.eval.txt)
458
+ * benchmark results: [benchmark_results.txt](benchmark_results.txt)
459
+ * benchmark output: [benchmark_translations.zip](benchmark_translations.zip)
460
+
461
+ | langpair | testset | chr-F | BLEU | #sent | #words |
462
+ |----------|---------|-------|-------|-------|--------|
463
+ | dan-nob | tatoeba-test-v2021-08-07 | 0.87556 | 78.2 | 1299 | 9620 |
464
+ | dan-swe | tatoeba-test-v2021-08-07 | 0.83556 | 72.5 | 1549 | 10060 |
465
+ | nno-nob | tatoeba-test-v2021-08-07 | 0.88349 | 78.9 | 467 | 3129 |
466
+ | nob-dan | tatoeba-test-v2021-08-07 | 0.85345 | 73.9 | 1299 | 9794 |
467
+ | nob-nno | tatoeba-test-v2021-08-07 | 0.74571 | 55.2 | 466 | 3141 |
468
+ | nob-swe | tatoeba-test-v2021-08-07 | 0.84747 | 73.9 | 563 | 3698 |
469
+ | swe-dan | tatoeba-test-v2021-08-07 | 0.83392 | 72.6 | 1549 | 10239 |
470
+ | swe-nob | tatoeba-test-v2021-08-07 | 0.85815 | 76.3 | 563 | 3708 |
471
+ | isl-swe | europeana2021 | 0.45562 | 22.2 | 563 | 10293 |
472
+ | nob-isl | europeana2021 | 0.54171 | 29.7 | 538 | 9932 |
473
+ | nob-swe | europeana2021 | 0.73891 | 54.0 | 538 | 9885 |
474
+ | dan-isl | flores101-devtest | 0.50227 | 22.2 | 1012 | 22834 |
475
+ | dan-nob | flores101-devtest | 0.58445 | 28.6 | 1012 | 23873 |
476
+ | dan-swe | flores101-devtest | 0.65000 | 38.5 | 1012 | 23121 |
477
+ | isl-dan | flores101-devtest | 0.53630 | 27.2 | 1012 | 24638 |
478
+ | isl-nob | flores101-devtest | 0.49434 | 20.5 | 1012 | 23873 |
479
+ | isl-swe | flores101-devtest | 0.53373 | 26.0 | 1012 | 23121 |
480
+ | nob-dan | flores101-devtest | 0.59657 | 31.7 | 1012 | 24638 |
481
+ | nob-isl | flores101-devtest | 0.47432 | 18.9 | 1012 | 22834 |
482
+ | nob-swe | flores101-devtest | 0.60030 | 31.3 | 1012 | 23121 |
483
+ | swe-dan | flores101-devtest | 0.64340 | 39.0 | 1012 | 24638 |
484
+ | swe-isl | flores101-devtest | 0.49590 | 21.7 | 1012 | 22834 |
485
+ | swe-nob | flores101-devtest | 0.58336 | 28.9 | 1012 | 23873 |
486
+
487
+ ## Citation Information
488
+
489
+ * Publications: [OPUS-MT – Building open translation services for the World](https://aclanthology.org/2020.eamt-1.61/) and [The Tatoeba Translation Challenge – Realistic Data Sets for Low Resource and Multilingual MT](https://aclanthology.org/2020.wmt-1.139/) (Please, cite if you use this model.)
490
+
491
+ ```
492
+ @inproceedings{tiedemann-thottingal-2020-opus,
493
+ title = "{OPUS}-{MT} {--} Building open translation services for the World",
494
+ author = {Tiedemann, J{\"o}rg and Thottingal, Santhosh},
495
+ booktitle = "Proceedings of the 22nd Annual Conference of the European Association for Machine Translation",
496
+ month = nov,
497
+ year = "2020",
498
+ address = "Lisboa, Portugal",
499
+ publisher = "European Association for Machine Translation",
500
+ url = "https://aclanthology.org/2020.eamt-1.61",
501
+ pages = "479--480",
502
+ }
503
+
504
+ @inproceedings{tiedemann-2020-tatoeba,
505
+ title = "The Tatoeba Translation Challenge {--} Realistic Data Sets for Low Resource and Multilingual {MT}",
506
+ author = {Tiedemann, J{\"o}rg},
507
+ booktitle = "Proceedings of the Fifth Conference on Machine Translation",
508
+ month = nov,
509
+ year = "2020",
510
+ address = "Online",
511
+ publisher = "Association for Computational Linguistics",
512
+ url = "https://aclanthology.org/2020.wmt-1.139",
513
+ pages = "1174--1182",
514
+ }
515
+ ```
516
+
517
+ ## Acknowledgements
518
+
519
+ The work is supported by the [European Language Grid](https://www.european-language-grid.eu/) as [pilot project 2866](https://live.european-language-grid.eu/catalogue/#/resource/projects/2866), by the [FoTran project](https://www.helsinki.fi/en/researchgroups/natural-language-understanding-with-cross-lingual-grounding), funded by the European Research Council (ERC) under the European Union’s Horizon 2020 research and innovation programme (grant agreement No 771113), and the [MeMAD project](https://memad.eu/), funded by the European Union’s Horizon 2020 Research and Innovation Programme under grant agreement No 780069. We are also grateful for the generous computational resources and IT infrastructure provided by [CSC -- IT Center for Science](https://www.csc.fi/), Finland.
520
+
521
+ ## Model conversion info
522
+
523
+ * transformers version: 4.16.2
524
+ * OPUS-MT git hash: 8b9f0b0
525
+ * port time: Fri Aug 12 13:30:22 EEST 2022
526
+ * port machine: LM0-400-22516.local
benchmark_results.txt ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ isl-swe europeana2021 0.45562 22.2 563 10293
2
+ nob-isl europeana2021 0.54171 29.7 538 9932
3
+ nob-swe europeana2021 0.73891 54.0 538 9885
4
+ dan-isl flores101-dev 0.50526 22.6 997 21857
5
+ dan-nob flores101-dev 0.58258 29.0 997 23157
6
+ dan-swe flores101-dev 0.64830 38.5 997 22417
7
+ isl-dan flores101-dev 0.53960 27.5 997 23685
8
+ isl-nob flores101-dev 0.49502 20.9 997 23157
9
+ isl-swe flores101-dev 0.53173 26.1 997 22417
10
+ nob-dan flores101-dev 0.59954 32.4 997 23685
11
+ nob-isl flores101-dev 0.48147 19.7 997 21857
12
+ nob-swe flores101-dev 0.59719 30.6 997 22417
13
+ swe-dan flores101-dev 0.64216 38.8 997 23685
14
+ swe-isl flores101-dev 0.49997 22.4 997 21857
15
+ swe-nob flores101-dev 0.57853 28.7 997 23157
16
+ dan-isl flores101-devtest 0.50227 22.2 1012 22834
17
+ dan-nob flores101-devtest 0.58445 28.6 1012 23873
18
+ dan-swe flores101-devtest 0.65000 38.5 1012 23121
19
+ isl-dan flores101-devtest 0.53630 27.2 1012 24638
20
+ isl-nob flores101-devtest 0.49434 20.5 1012 23873
21
+ isl-swe flores101-devtest 0.53373 26.0 1012 23121
22
+ nob-dan flores101-devtest 0.59657 31.7 1012 24638
23
+ nob-isl flores101-devtest 0.47432 18.9 1012 22834
24
+ nob-swe flores101-devtest 0.60030 31.3 1012 23121
25
+ swe-dan flores101-devtest 0.64340 39.0 1012 24638
26
+ swe-isl flores101-devtest 0.49590 21.7 1012 22834
27
+ swe-nob flores101-devtest 0.58336 28.9 1012 23873
28
+ dan-swe tatoeba-test-v2020-07-28 0.83565 72.6 1550 10082
29
+ nno-nob tatoeba-test-v2020-07-28 0.88417 79.1 474 3167
30
+ nob-nno tatoeba-test-v2020-07-28 0.74642 55.3 474 3184
31
+ nob-swe tatoeba-test-v2020-07-28 0.85123 74.5 560 3661
32
+ swe-dan tatoeba-test-v2020-07-28 0.83413 72.7 1550 10261
33
+ swe-nob tatoeba-test-v2020-07-28 0.86040 76.7 560 3672
34
+ dan-swe tatoeba-test-v2021-03-30 0.83565 72.6 1550 10082
35
+ nno-nob tatoeba-test-v2021-03-30 0.88607 79.2 500 3314
36
+ nob-nno tatoeba-test-v2021-03-30 0.74623 55.3 488 3321
37
+ nob-swe tatoeba-test-v2021-03-30 0.84518 73.4 570 3756
38
+ swe-dan tatoeba-test-v2021-03-30 0.83413 72.7 1550 10261
39
+ swe-nob tatoeba-test-v2021-03-30 0.85670 76.2 570 3765
40
+ dan-nob tatoeba-test-v2021-08-07 0.87556 78.2 1299 9620
41
+ dan-swe tatoeba-test-v2021-08-07 0.83556 72.5 1549 10060
42
+ nno-nob tatoeba-test-v2021-08-07 0.88349 78.9 467 3129
43
+ nob-dan tatoeba-test-v2021-08-07 0.85345 73.9 1299 9794
44
+ nob-nno tatoeba-test-v2021-08-07 0.74571 55.2 466 3141
45
+ nob-swe tatoeba-test-v2021-08-07 0.84747 73.9 563 3698
46
+ swe-dan tatoeba-test-v2021-08-07 0.83392 72.6 1549 10239
47
+ swe-nob tatoeba-test-v2021-08-07 0.85815 76.3 563 3708
benchmark_translations.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0dc88510fb37cbed988cbc7f0eb7314d465c88f373bd448d05b36f7f46cf0722
3
+ size 4304097
config.json ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation_dropout": 0.0,
3
+ "activation_function": "relu",
4
+ "architectures": [
5
+ "MarianMTModel"
6
+ ],
7
+ "attention_dropout": 0.0,
8
+ "bad_words_ids": [
9
+ [
10
+ 34890
11
+ ]
12
+ ],
13
+ "bos_token_id": 0,
14
+ "classifier_dropout": 0.0,
15
+ "d_model": 1024,
16
+ "decoder_attention_heads": 16,
17
+ "decoder_ffn_dim": 4096,
18
+ "decoder_layerdrop": 0.0,
19
+ "decoder_layers": 6,
20
+ "decoder_start_token_id": 34890,
21
+ "decoder_vocab_size": 34891,
22
+ "dropout": 0.1,
23
+ "encoder_attention_heads": 16,
24
+ "encoder_ffn_dim": 4096,
25
+ "encoder_layerdrop": 0.0,
26
+ "encoder_layers": 6,
27
+ "eos_token_id": 25972,
28
+ "forced_eos_token_id": 25972,
29
+ "init_std": 0.02,
30
+ "is_encoder_decoder": true,
31
+ "max_length": 512,
32
+ "max_position_embeddings": 1024,
33
+ "model_type": "marian",
34
+ "normalize_embedding": false,
35
+ "num_beams": 4,
36
+ "num_hidden_layers": 6,
37
+ "pad_token_id": 34890,
38
+ "scale_embedding": true,
39
+ "share_encoder_decoder_embeddings": true,
40
+ "static_position_embeddings": true,
41
+ "torch_dtype": "float16",
42
+ "transformers_version": "4.18.0.dev0",
43
+ "use_cache": true,
44
+ "vocab_size": 34891
45
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cf5e0d09da97d87c603652440c65504c7ca9d2dfed3cbdde3a7dda32d4b5426b
3
+ size 495789763
source.spm ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0c9800926dcbcc092f5d297ca2b8c3af64afc58877287cce0dd7beb94ad2feb9
3
+ size 802542
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>"}
target.spm ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3f06e9450b1536ecf625fb6e8daa1f7ce9bde964240138b1a5d6df233c9ab3fe
3
+ size 802737
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"source_lang": "gmq", "target_lang": "gmq", "unk_token": "<unk>", "eos_token": "</s>", "pad_token": "<pad>", "model_max_length": 512, "sp_model_kwargs": {}, "separate_vocabs": false, "special_tokens_map_file": null, "name_or_path": "marian-models/opusTCv20210807_transformer-big_2022-07-29/gmq-gmq", "tokenizer_class": "MarianTokenizer"}
vocab.json ADDED
The diff for this file is too large to render. See raw diff