valhalla commited on
Commit
337a4a6
1 Parent(s): 4b9ea3a
config.json ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation_dropout": 0.0,
3
+ "activation_function": "relu",
4
+ "architectures": [
5
+ "M2M100ForConditionalGeneration"
6
+ ],
7
+ "attention_dropout": 0.1,
8
+ "bos_token_id": 0,
9
+ "d_model": 24,
10
+ "decoder_attention_heads": 2,
11
+ "decoder_ffn_dim": 16,
12
+ "decoder_layerdrop": 0.05,
13
+ "decoder_layers": 2,
14
+ "decoder_start_token_id": 2,
15
+ "dropout": 0.1,
16
+ "early_stopping": true,
17
+ "encoder_attention_heads": 2,
18
+ "encoder_ffn_dim": 16,
19
+ "encoder_layerdrop": 0.05,
20
+ "encoder_layers": 2,
21
+ "eos_token_id": 2,
22
+ "gradient_checkpointing": false,
23
+ "init_std": 0.02,
24
+ "is_encoder_decoder": true,
25
+ "max_length": 200,
26
+ "max_position_embeddings": 1024,
27
+ "model_type": "m2m_100",
28
+ "num_beams": 5,
29
+ "num_hidden_layers": 2,
30
+ "pad_token_id": 1,
31
+ "scale_embedding": true,
32
+ "transformers_version": "4.4.0.dev0",
33
+ "use_cache": true,
34
+ "vocab_size": 128112
35
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:628a23aecff4d778b998793ddaaeabb848c6eb815e2036283aaec915c41396a2
3
+ size 12917483
sentencepiece.bpe.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d8f7c76ed2a5e0822be39f0a4f95a55eb19c78f4593ce609e2edbc2aea4d380a
3
+ size 2423393
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "sep_token": "</s>", "pad_token": "<pad>", "additional_special_tokens": ["__af__", "__am__", "__ar__", "__ast__", "__az__", "__ba__", "__be__", "__bg__", "__bn__", "__br__", "__bs__", "__ca__", "__ceb__", "__cs__", "__cy__", "__da__", "__de__", "__el__", "__en__", "__es__", "__et__", "__fa__", "__ff__", "__fi__", "__fr__", "__fy__", "__ga__", "__gd__", "__gl__", "__gu__", "__ha__", "__he__", "__hi__", "__hr__", "__ht__", "__hu__", "__hy__", "__id__", "__ig__", "__ilo__", "__is__", "__it__", "__ja__", "__jv__", "__ka__", "__kk__", "__km__", "__kn__", "__ko__", "__lb__", "__lg__", "__ln__", "__lo__", "__lt__", "__lv__", "__mg__", "__mk__", "__ml__", "__mn__", "__mr__", "__ms__", "__my__", "__ne__", "__nl__", "__no__", "__ns__", "__oc__", "__or__", "__pa__", "__pl__", "__ps__", "__pt__", "__ro__", "__ru__", "__sd__", "__si__", "__sk__", "__sl__", "__so__", "__sq__", "__sr__", "__ss__", "__su__", "__sv__", "__sw__", "__ta__", "__th__", "__tl__", "__tn__", "__tr__", "__uk__", "__ur__", "__uz__", "__vi__", "__wo__", "__xh__", "__yi__", "__yo__", "__zh__", "__zu__"]}
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"src_lang": null, "tgt_lang": null, "bos_token": "<s>", "eos_token": "</s>", "sep_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>", "special_tokens_map_file": "m2m_100_1.2B_v2/special_tokens_map.json", "tokenizer_file": null, "name_or_path": "m2m_100_1.2B_v2"}
vocab.json ADDED
The diff for this file is too large to render. See raw diff