silsever commited on
Commit
3d677f0
1 Parent(s): cee0d51

Upload 8 files

Browse files
Files changed (8) hide show
  1. README.md +67 -0
  2. config.json +44 -0
  3. pytorch_model.bin +3 -0
  4. source.spm +0 -0
  5. special_tokens_map.json +5 -0
  6. target.spm +0 -0
  7. tokenizer_config.json +13 -0
  8. vocab.json +0 -0
README.md ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - en
4
+ - de
5
+ tags:
6
+ - translation
7
+ - opus-mt
8
+ license: cc-by-4.0
9
+ model-index:
10
+ - name: opus-mt-tc-big-eng-deu
11
+ results:
12
+ - task:
13
+ name: Translation eng-deu
14
+ type: translation
15
+ args: eng-deu
16
+ dataset:
17
+ name: Tatoeba-test.eng-deu
18
+ type: tatoeba_mt
19
+ args: eng-deu
20
+ metrics:
21
+ - name: BLEU
22
+ type: bleu
23
+ value: 45.7
24
+ ---
25
+
26
+ # Opus Tatoeba English-German
27
+
28
+ *This model was obtained by running the script [convert_marian_to_pytorch.py](https://github.com/huggingface/transformers/blob/master/src/transformers/models/marian/convert_marian_to_pytorch.py) - [Instruction available here](https://github.com/huggingface/transformers/tree/main/scripts/tatoeba). The original models were trained by [Jörg Tiedemann](https://blogs.helsinki.fi/tiedeman/) using the [MarianNMT](https://marian-nmt.github.io/) library. See all available `MarianMTModel` models on the profile of the [Helsinki NLP](https://huggingface.co/Helsinki-NLP) group.
29
+
30
+ This is the conversion of checkpoint [opus+bt-2021-04-13.zip](https://object.pouta.csc.fi/Tatoeba-MT-models/eng-deu/opus+bt-2021-04-13.zip)
31
+ *
32
+
33
+
34
+ ---
35
+
36
+ ### eng-deu
37
+
38
+ * source language name: English
39
+ * target language name: German
40
+ * OPUS readme: [README.md](https://object.pouta.csc.fi/Tatoeba-MT-models/eng-deu/README.md)
41
+
42
+ * model: transformer-align
43
+ * source language code: en
44
+ * target language code: de
45
+ * dataset: opus+bt
46
+ * release date: 2021-02-22
47
+ * pre-processing: normalization + SentencePiece (spm32k,spm32k)
48
+ * download original weights: [opus+bt-2021-04-13.zip](https://object.pouta.csc.fi/Tatoeba-MT-models/eng-deu/opus+bt-2021-04-13.zip)
49
+ * Test set translations data: [opus+bt-2021-04-13.test.txt](https://object.pouta.csc.fi/Tatoeba-MT-models/eng-deu/opus+bt-2021-04-13.test.txt)
50
+ * test set scores file: [opus+bt-2021-04-13.eval.txt](https://object.pouta.csc.fi/Tatoeba-MT-models/eng-deu/opus+bt-2021-04-13.eval.txt)
51
+ * Benchmarks
52
+ |Test set|BLEU|chr-F|
53
+ |---|---|---|
54
+ |newssyscomb2009.eng-deu|22.8|0.538|
55
+ |news-test2008.eng-deu|23.7|0.533|
56
+ |newstest2009.eng-deu|22.6|0.532|
57
+ |newstest2010.eng-deu|25.5|0.552|
58
+ |newstest2011.eng-deu|22.6|0.527|
59
+ |newstest2012.eng-deu|23.4|0.530|
60
+ |newstest2013.eng-deu|27.1|0.556|
61
+ |newstest2014-deen.eng-deu|29.6|0.599|
62
+ |newstest2015-ende.eng-deu|31.6|0.600|
63
+ |newstest2016-ende.eng-deu|37.2|0.644|
64
+ |newstest2017-ende.eng-deu|30.6|0.595|
65
+ |newstest2018-ende.eng-deu|45.6|0.696|
66
+ |newstest2019-ende.eng-deu|41.3|0.659|
67
+ |Tatoeba-test.eng-deu|45.7|0.654|
config.json ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation_dropout": 0.0,
3
+ "activation_function": "swish",
4
+ "architectures": [
5
+ "MarianMTModel"
6
+ ],
7
+ "attention_dropout": 0.0,
8
+ "bad_words_ids": [
9
+ [
10
+ 65000
11
+ ]
12
+ ],
13
+ "bos_token_id": 0,
14
+ "d_model": 512,
15
+ "decoder_attention_heads": 8,
16
+ "decoder_ffn_dim": 2048,
17
+ "decoder_layerdrop": 0.0,
18
+ "decoder_layers": 6,
19
+ "decoder_start_token_id": 65000,
20
+ "decoder_vocab_size": 65001,
21
+ "dropout": 0.1,
22
+ "encoder_attention_heads": 8,
23
+ "encoder_ffn_dim": 2048,
24
+ "encoder_layerdrop": 0.0,
25
+ "encoder_layers": 6,
26
+ "eos_token_id": 0,
27
+ "forced_eos_token_id": 0,
28
+ "init_std": 0.02,
29
+ "is_encoder_decoder": true,
30
+ "max_length": 512,
31
+ "max_position_embeddings": 512,
32
+ "model_type": "marian",
33
+ "normalize_embedding": false,
34
+ "num_beams": 6,
35
+ "num_hidden_layers": 6,
36
+ "pad_token_id": 65000,
37
+ "scale_embedding": true,
38
+ "share_encoder_decoder_embeddings": true,
39
+ "static_position_embeddings": true,
40
+ "torch_dtype": "float16",
41
+ "transformers_version": "4.26.0.dev0",
42
+ "use_cache": true,
43
+ "vocab_size": 65001
44
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:732239b875a9428a7cc5191e22c4b44d7edbc3867d202cdd74b1481350e587a5
3
+ size 221612169
source.spm ADDED
Binary file (794 kB). View file
 
special_tokens_map.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "eos_token": "</s>",
3
+ "pad_token": "<pad>",
4
+ "unk_token": "<unk>"
5
+ }
target.spm ADDED
Binary file (819 kB). View file
 
tokenizer_config.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "eos_token": "</s>",
3
+ "model_max_length": 512,
4
+ "name_or_path": "opus+bt-2021-04-13",
5
+ "pad_token": "<pad>",
6
+ "separate_vocabs": false,
7
+ "source_lang": "opus+bt-2021-04",
8
+ "sp_model_kwargs": {},
9
+ "special_tokens_map_file": "opus+bt-2021-04-13/special_tokens_map.json",
10
+ "target_lang": "13",
11
+ "tokenizer_class": "MarianTokenizer",
12
+ "unk_token": "<unk>"
13
+ }
vocab.json ADDED
The diff for this file is too large to render. See raw diff