tiedeman commited on
Commit
73fedce
1 Parent(s): 7bb802e

Initial commit

Browse files
.gitattributes CHANGED
@@ -25,3 +25,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
25
  *.zip filter=lfs diff=lfs merge=lfs -text
26
  *.zstandard filter=lfs diff=lfs merge=lfs -text
27
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
25
  *.zip filter=lfs diff=lfs merge=lfs -text
26
  *.zstandard filter=lfs diff=lfs merge=lfs -text
27
  *tfevents* filter=lfs diff=lfs merge=lfs -text
28
+ *.spm filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,241 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - da
4
+ - gmq
5
+ - is
6
+ - nb
7
+ - no
8
+ - ru
9
+ - sv
10
+ - uk
11
+ - zle
12
+
13
+ tags:
14
+ - translation
15
+
16
+ license: cc-by-4.0
17
+ model-index:
18
+ - name: opus-mt-tc-big-gmq-zle
19
+ results:
20
+ - task:
21
+ name: Translation dan-rus
22
+ type: translation
23
+ args: dan-rus
24
+ dataset:
25
+ name: flores101-devtest
26
+ type: flores_101
27
+ args: dan rus devtest
28
+ metrics:
29
+ - name: BLEU
30
+ type: bleu
31
+ value: 25.6
32
+ - task:
33
+ name: Translation dan-ukr
34
+ type: translation
35
+ args: dan-ukr
36
+ dataset:
37
+ name: flores101-devtest
38
+ type: flores_101
39
+ args: dan ukr devtest
40
+ metrics:
41
+ - name: BLEU
42
+ type: bleu
43
+ value: 25.5
44
+ - task:
45
+ name: Translation nob-rus
46
+ type: translation
47
+ args: nob-rus
48
+ dataset:
49
+ name: flores101-devtest
50
+ type: flores_101
51
+ args: nob rus devtest
52
+ metrics:
53
+ - name: BLEU
54
+ type: bleu
55
+ value: 22.1
56
+ - task:
57
+ name: Translation nob-ukr
58
+ type: translation
59
+ args: nob-ukr
60
+ dataset:
61
+ name: flores101-devtest
62
+ type: flores_101
63
+ args: nob ukr devtest
64
+ metrics:
65
+ - name: BLEU
66
+ type: bleu
67
+ value: 21.6
68
+ - task:
69
+ name: Translation swe-rus
70
+ type: translation
71
+ args: swe-rus
72
+ dataset:
73
+ name: flores101-devtest
74
+ type: flores_101
75
+ args: swe rus devtest
76
+ metrics:
77
+ - name: BLEU
78
+ type: bleu
79
+ value: 25.8
80
+ - task:
81
+ name: Translation swe-ukr
82
+ type: translation
83
+ args: swe-ukr
84
+ dataset:
85
+ name: flores101-devtest
86
+ type: flores_101
87
+ args: swe ukr devtest
88
+ metrics:
89
+ - name: BLEU
90
+ type: bleu
91
+ value: 25.7
92
+ - task:
93
+ name: Translation dan-rus
94
+ type: translation
95
+ args: dan-rus
96
+ dataset:
97
+ name: tatoeba-test-v2021-08-07
98
+ type: tatoeba_mt
99
+ args: dan-rus
100
+ metrics:
101
+ - name: BLEU
102
+ type: bleu
103
+ value: 53.9
104
+ - task:
105
+ name: Translation nob-rus
106
+ type: translation
107
+ args: nob-rus
108
+ dataset:
109
+ name: tatoeba-test-v2021-08-07
110
+ type: tatoeba_mt
111
+ args: nob-rus
112
+ metrics:
113
+ - name: BLEU
114
+ type: bleu
115
+ value: 45.8
116
+ - task:
117
+ name: Translation swe-rus
118
+ type: translation
119
+ args: swe-rus
120
+ dataset:
121
+ name: tatoeba-test-v2021-08-07
122
+ type: tatoeba_mt
123
+ args: swe-rus
124
+ metrics:
125
+ - name: BLEU
126
+ type: bleu
127
+ value: 45.9
128
+ ---
129
+ # opus-mt-tc-big-gmq-zle
130
+
131
+ Neural machine translation model for translating from North Germanic languages (gmq) to East Slavic languages (zle).
132
+
133
+ This model is part of the [OPUS-MT project](https://github.com/Helsinki-NLP/Opus-MT), an effort to make neural machine translation models widely available and accessible for many languages in the world. All models are originally trained using the amazing framework of [Marian NMT](https://marian-nmt.github.io/), an efficient NMT implementation written in pure C++. The models have been converted to pyTorch using the transformers library by huggingface. Training data is taken from [OPUS](https://opus.nlpl.eu/) and training pipelines use the procedures of [OPUS-MT-train](https://github.com/Helsinki-NLP/Opus-MT-train).
134
+
135
+ * Publications: [OPUS-MT – Building open translation services for the World](https://aclanthology.org/2020.eamt-1.61/) and [The Tatoeba Translation Challenge – Realistic Data Sets for Low Resource and Multilingual MT](https://aclanthology.org/2020.wmt-1.139/) (Please, cite if you use this model.)
136
+
137
+ ```
138
+ @inproceedings{tiedemann-thottingal-2020-opus,
139
+ title = "{OPUS}-{MT} {--} Building open translation services for the World",
140
+ author = {Tiedemann, J{\"o}rg and Thottingal, Santhosh},
141
+ booktitle = "Proceedings of the 22nd Annual Conference of the European Association for Machine Translation",
142
+ month = nov,
143
+ year = "2020",
144
+ address = "Lisboa, Portugal",
145
+ publisher = "European Association for Machine Translation",
146
+ url = "https://aclanthology.org/2020.eamt-1.61",
147
+ pages = "479--480",
148
+ }
149
+
150
+ @inproceedings{tiedemann-2020-tatoeba,
151
+ title = "The Tatoeba Translation Challenge {--} Realistic Data Sets for Low Resource and Multilingual {MT}",
152
+ author = {Tiedemann, J{\"o}rg},
153
+ booktitle = "Proceedings of the Fifth Conference on Machine Translation",
154
+ month = nov,
155
+ year = "2020",
156
+ address = "Online",
157
+ publisher = "Association for Computational Linguistics",
158
+ url = "https://aclanthology.org/2020.wmt-1.139",
159
+ pages = "1174--1182",
160
+ }
161
+ ```
162
+
163
+ ## Model info
164
+
165
+ * Release: 2022-03-23
166
+ * source language(s): dan isl nob nor swe
167
+ * target language(s): rus ukr
168
+ * valid target language labels: >>rus<< >>ukr<<
169
+ * model: transformer-big
170
+ * data: opusTCv20210807+pbt ([source](https://github.com/Helsinki-NLP/Tatoeba-Challenge))
171
+ * tokenization: SentencePiece (spm32k,spm32k)
172
+ * original model: [opusTCv20210807+pbt_transformer-big_2022-03-23.zip](https://object.pouta.csc.fi/Tatoeba-MT-models/gmq-zle/opusTCv20210807+pbt_transformer-big_2022-03-23.zip)
173
+ * more information released models: [OPUS-MT gmq-zle README](https://github.com/Helsinki-NLP/Tatoeba-Challenge/tree/master/models/gmq-zle/README.md)
174
+ * more information about the model: [MarianMT](https://huggingface.co/docs/transformers/model_doc/marian)
175
+
176
+ This is a multilingual translation model with multiple target languages. A sentence initial language token is required in the form of `>>id<<` (id = valid target language ID), e.g. `>>rus<<`
177
+
178
+ ## Usage
179
+
180
+ A short example code:
181
+
182
+ ```python
183
+ from transformers import MarianMTModel, MarianTokenizer
184
+
185
+ src_text = [
186
+ ">>bel<< Det er allerede torsdag i morgen.",
187
+ ">>ukr<< Tom lekte katt och råtta med Mary."
188
+ ]
189
+
190
+ model_name = "pytorch-models/opus-mt-tc-big-gmq-zle"
191
+ tokenizer = MarianTokenizer.from_pretrained(model_name)
192
+ model = MarianMTModel.from_pretrained(model_name)
193
+ translated = model.generate(**tokenizer(src_text, return_tensors="pt", padding=True))
194
+
195
+ for t in translated:
196
+ print( tokenizer.decode(t, skip_special_tokens=True) )
197
+
198
+ # expected output:
199
+ # Гэта ўжо чацвер заўтра.
200
+ # Том грав кішку і щура з Марією.
201
+ ```
202
+
203
+ You can also use OPUS-MT models with the transformers pipelines, for example:
204
+
205
+ ```python
206
+ from transformers import pipeline
207
+ pipe = pipeline("translation", model="Helsinki-NLP/opus-mt-tc-big-gmq-zle")
208
+ print(pipe(">>bel<< Det er allerede torsdag i morgen."))
209
+
210
+ # expected output: Гэта ўжо чацвер заўтра.
211
+ ```
212
+
213
+ ## Benchmarks
214
+
215
+ * test set translations: [opusTCv20210807+pbt_transformer-big_2022-03-23.test.txt](https://object.pouta.csc.fi/Tatoeba-MT-models/gmq-zle/opusTCv20210807+pbt_transformer-big_2022-03-23.test.txt)
216
+ * test set scores: [opusTCv20210807+pbt_transformer-big_2022-03-23.eval.txt](https://object.pouta.csc.fi/Tatoeba-MT-models/gmq-zle/opusTCv20210807+pbt_transformer-big_2022-03-23.eval.txt)
217
+ * benchmark results: [benchmark_results.txt](benchmark_results.txt)
218
+ * benchmark output: [benchmark_translations.zip](benchmark_translations.zip)
219
+
220
+ | langpair | testset | chr-F | BLEU | #sent | #words |
221
+ |----------|---------|-------|-------|-------|--------|
222
+ | dan-rus | tatoeba-test-v2021-08-07 | 0.72627 | 53.9 | 1713 | 10480 |
223
+ | nob-rus | tatoeba-test-v2021-08-07 | 0.66881 | 45.8 | 1277 | 10659 |
224
+ | swe-rus | tatoeba-test-v2021-08-07 | 0.66248 | 45.9 | 1282 | 7659 |
225
+ | dan-rus | flores101-devtest | 0.53271 | 25.6 | 1012 | 23295 |
226
+ | dan-ukr | flores101-devtest | 0.54273 | 25.5 | 1012 | 22810 |
227
+ | nob-rus | flores101-devtest | 0.50426 | 22.1 | 1012 | 23295 |
228
+ | nob-ukr | flores101-devtest | 0.51156 | 21.6 | 1012 | 22810 |
229
+ | swe-rus | flores101-devtest | 0.53226 | 25.8 | 1012 | 23295 |
230
+ | swe-ukr | flores101-devtest | 0.54257 | 25.7 | 1012 | 22810 |
231
+
232
+ ## Acknowledgements
233
+
234
+ The work is supported by the [European Language Grid](https://www.european-language-grid.eu/) as [pilot project 2866](https://live.european-language-grid.eu/catalogue/#/resource/projects/2866), by the [FoTran project](https://www.helsinki.fi/en/researchgroups/natural-language-understanding-with-cross-lingual-grounding), funded by the European Research Council (ERC) under the European Union’s Horizon 2020 research and innovation programme (grant agreement No 771113), and the [MeMAD project](https://memad.eu/), funded by the European Union’s Horizon 2020 Research and Innovation Programme under grant agreement No 780069. We are also grateful for the generous computational resources and IT infrastructure provided by [CSC -- IT Center for Science](https://www.csc.fi/), Finland.
235
+
236
+ ## Model conversion info
237
+
238
+ * transformers version: 4.16.2
239
+ * OPUS-MT git hash: 1bdabf7
240
+ * port time: Thu Mar 24 02:08:53 EET 2022
241
+ * port machine: LM0-400-22516.local
benchmark_results.txt ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ dan-bel flores101-dev 0.38828 9.2 997 23996
2
+ dan-rus flores101-dev 0.53475 26.0 997 22657
3
+ dan-ukr flores101-dev 0.53315 24.6 997 21841
4
+ isl-bel flores101-dev 0.29287 5.1 997 23996
5
+ isl-rus flores101-dev 0.38585 13.9 997 22657
6
+ isl-ukr flores101-dev 0.36700 11.4 997 21841
7
+ nob-bel flores101-dev 0.37191 8.3 997 23996
8
+ nob-rus flores101-dev 0.50038 22.1 997 22657
9
+ nob-ukr flores101-dev 0.50474 20.8 997 21841
10
+ swe-bel flores101-dev 0.38670 9.3 997 23996
11
+ swe-rus flores101-dev 0.53279 26.0 997 22657
12
+ swe-ukr flores101-dev 0.53440 24.7 997 21841
13
+ dan-bel flores101-devtest 0.39705 9.9 1012 24829
14
+ dan-rus flores101-devtest 0.53271 25.6 1012 23295
15
+ dan-ukr flores101-devtest 0.54273 25.5 1012 22810
16
+ isl-bel flores101-devtest 0.29058 5.4 1012 24829
17
+ isl-rus flores101-devtest 0.37416 12.6 1012 23295
18
+ isl-ukr flores101-devtest 0.36508 11.5 1012 22810
19
+ nob-bel flores101-devtest 0.38315 8.6 1012 24829
20
+ nob-rus flores101-devtest 0.50426 22.1 1012 23295
21
+ nob-ukr flores101-devtest 0.51156 21.6 1012 22810
22
+ swe-bel flores101-devtest 0.39250 9.6 1012 24829
23
+ swe-rus flores101-devtest 0.53226 25.8 1012 23295
24
+ swe-ukr flores101-devtest 0.54257 25.7 1012 22810
25
+ dan-rus tatoeba-test-v2021-08-07 0.72627 53.9 1713 10480
26
+ nob-rus tatoeba-test-v2021-08-07 0.66881 45.8 1277 10659
27
+ swe-rus tatoeba-test-v2021-08-07 0.66248 45.9 1282 7659
benchmark_translations.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9dab8282aeba3070b74172814ea0e3f0c8cc8e1f1236826449fea6cb43744235
3
+ size 5031235
config.json ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation_dropout": 0.0,
3
+ "activation_function": "relu",
4
+ "architectures": [
5
+ "MarianMTModel"
6
+ ],
7
+ "attention_dropout": 0.0,
8
+ "bad_words_ids": [
9
+ [
10
+ 61372
11
+ ]
12
+ ],
13
+ "bos_token_id": 0,
14
+ "classifier_dropout": 0.0,
15
+ "d_model": 1024,
16
+ "decoder_attention_heads": 16,
17
+ "decoder_ffn_dim": 4096,
18
+ "decoder_layerdrop": 0.0,
19
+ "decoder_layers": 6,
20
+ "decoder_start_token_id": 61372,
21
+ "decoder_vocab_size": 61373,
22
+ "dropout": 0.1,
23
+ "encoder_attention_heads": 16,
24
+ "encoder_ffn_dim": 4096,
25
+ "encoder_layerdrop": 0.0,
26
+ "encoder_layers": 6,
27
+ "eos_token_id": 24342,
28
+ "forced_eos_token_id": 24342,
29
+ "init_std": 0.02,
30
+ "is_encoder_decoder": true,
31
+ "max_length": 512,
32
+ "max_position_embeddings": 1024,
33
+ "model_type": "marian",
34
+ "normalize_embedding": false,
35
+ "num_beams": 4,
36
+ "num_hidden_layers": 6,
37
+ "pad_token_id": 61372,
38
+ "scale_embedding": true,
39
+ "share_encoder_decoder_embeddings": true,
40
+ "static_position_embeddings": true,
41
+ "torch_dtype": "float16",
42
+ "transformers_version": "4.18.0.dev0",
43
+ "use_cache": true,
44
+ "vocab_size": 61373
45
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:23e6aae16db8e63d9defdbe9c00168015ee40b9e4d0efa49eefb608cf7f0ec78
3
+ size 604312963
source.spm ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9cd1d66c501e588e5deb2a9dfda8cf5370c870a5755087b676e04b5336ff1bb0
3
+ size 807151
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>"}
target.spm ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a62fbc31052711d155dc46c2ce9a27f6987d4c3bdefdb721f9d70bcdf2cc54ce
3
+ size 994651
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"source_lang": "gmq", "target_lang": "zle", "unk_token": "<unk>", "eos_token": "</s>", "pad_token": "<pad>", "model_max_length": 512, "sp_model_kwargs": {}, "separate_vocabs": false, "special_tokens_map_file": null, "name_or_path": "marian-models/opusTCv20210807+pbt_transformer-big_2022-03-23/gmq-zle", "tokenizer_class": "MarianTokenizer"}
vocab.json ADDED
The diff for this file is too large to render. See raw diff