yassine benlaria
commited on
Commit
•
8368ded
1
Parent(s):
c765ef4
Training in progress, epoch 0
Browse files- added_tokens.json +9 -0
- config.json +37 -0
- pytorch_model.bin +3 -0
- sentencepiece.bpe.model +3 -0
- special_tokens_map.json +116 -0
- tokenizer_config.json +125 -0
- training_args.bin +3 -0
- vocab.json +0 -0
added_tokens.json
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"__liv__": 128104,
|
3 |
+
"__sma__": 128106,
|
4 |
+
"__sme__": 128107,
|
5 |
+
"__smj__": 128110,
|
6 |
+
"__smn__": 128108,
|
7 |
+
"__sms__": 128109,
|
8 |
+
"__vro__": 128105
|
9 |
+
}
|
config.json
ADDED
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "facebook/m2m100_418M",
|
3 |
+
"activation_dropout": 0.0,
|
4 |
+
"activation_function": "relu",
|
5 |
+
"architectures": [
|
6 |
+
"M2M100ForConditionalGeneration"
|
7 |
+
],
|
8 |
+
"attention_dropout": 0.1,
|
9 |
+
"bos_token_id": 0,
|
10 |
+
"d_model": 1024,
|
11 |
+
"decoder_attention_heads": 16,
|
12 |
+
"decoder_ffn_dim": 4096,
|
13 |
+
"decoder_layerdrop": 0.05,
|
14 |
+
"decoder_layers": 12,
|
15 |
+
"decoder_start_token_id": 2,
|
16 |
+
"dropout": 0.1,
|
17 |
+
"early_stopping": true,
|
18 |
+
"encoder_attention_heads": 16,
|
19 |
+
"encoder_ffn_dim": 4096,
|
20 |
+
"encoder_layerdrop": 0.05,
|
21 |
+
"encoder_layers": 12,
|
22 |
+
"eos_token_id": 2,
|
23 |
+
"gradient_checkpointing": false,
|
24 |
+
"init_std": 0.02,
|
25 |
+
"is_encoder_decoder": true,
|
26 |
+
"max_length": 200,
|
27 |
+
"max_position_embeddings": 1024,
|
28 |
+
"model_type": "m2m_100",
|
29 |
+
"num_beams": 5,
|
30 |
+
"num_hidden_layers": 12,
|
31 |
+
"pad_token_id": 1,
|
32 |
+
"scale_embedding": true,
|
33 |
+
"torch_dtype": "float32",
|
34 |
+
"transformers_version": "4.32.0",
|
35 |
+
"use_cache": true,
|
36 |
+
"vocab_size": 128154
|
37 |
+
}
|
pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:79455a6e17a91c2e35a326f1a160bfe01a7850fc2fd1400441899d51fb7a3d30
|
3 |
+
size 1935967745
|
sentencepiece.bpe.model
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d8f7c76ed2a5e0822be39f0a4f95a55eb19c78f4593ce609e2edbc2aea4d380a
|
3 |
+
size 2423393
|
special_tokens_map.json
ADDED
@@ -0,0 +1,116 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"additional_special_tokens": [
|
3 |
+
"__liv__",
|
4 |
+
"__vro__",
|
5 |
+
"__sma__",
|
6 |
+
"__sme__",
|
7 |
+
"__smn__",
|
8 |
+
"__sms__",
|
9 |
+
"__smj__",
|
10 |
+
"__af__",
|
11 |
+
"__am__",
|
12 |
+
"__ar__",
|
13 |
+
"__ast__",
|
14 |
+
"__az__",
|
15 |
+
"__ba__",
|
16 |
+
"__be__",
|
17 |
+
"__bg__",
|
18 |
+
"__bn__",
|
19 |
+
"__br__",
|
20 |
+
"__bs__",
|
21 |
+
"__ca__",
|
22 |
+
"__ceb__",
|
23 |
+
"__cs__",
|
24 |
+
"__cy__",
|
25 |
+
"__da__",
|
26 |
+
"__de__",
|
27 |
+
"__el__",
|
28 |
+
"__en__",
|
29 |
+
"__es__",
|
30 |
+
"__et__",
|
31 |
+
"__fa__",
|
32 |
+
"__ff__",
|
33 |
+
"__fi__",
|
34 |
+
"__fr__",
|
35 |
+
"__fy__",
|
36 |
+
"__ga__",
|
37 |
+
"__gd__",
|
38 |
+
"__gl__",
|
39 |
+
"__gu__",
|
40 |
+
"__ha__",
|
41 |
+
"__he__",
|
42 |
+
"__hi__",
|
43 |
+
"__hr__",
|
44 |
+
"__ht__",
|
45 |
+
"__hu__",
|
46 |
+
"__hy__",
|
47 |
+
"__id__",
|
48 |
+
"__ig__",
|
49 |
+
"__ilo__",
|
50 |
+
"__is__",
|
51 |
+
"__it__",
|
52 |
+
"__ja__",
|
53 |
+
"__jv__",
|
54 |
+
"__ka__",
|
55 |
+
"__kk__",
|
56 |
+
"__km__",
|
57 |
+
"__kn__",
|
58 |
+
"__ko__",
|
59 |
+
"__lb__",
|
60 |
+
"__lg__",
|
61 |
+
"__ln__",
|
62 |
+
"__lo__",
|
63 |
+
"__lt__",
|
64 |
+
"__lv__",
|
65 |
+
"__mg__",
|
66 |
+
"__mk__",
|
67 |
+
"__ml__",
|
68 |
+
"__mn__",
|
69 |
+
"__mr__",
|
70 |
+
"__ms__",
|
71 |
+
"__my__",
|
72 |
+
"__ne__",
|
73 |
+
"__nl__",
|
74 |
+
"__no__",
|
75 |
+
"__ns__",
|
76 |
+
"__oc__",
|
77 |
+
"__or__",
|
78 |
+
"__pa__",
|
79 |
+
"__pl__",
|
80 |
+
"__ps__",
|
81 |
+
"__pt__",
|
82 |
+
"__ro__",
|
83 |
+
"__ru__",
|
84 |
+
"__sd__",
|
85 |
+
"__si__",
|
86 |
+
"__sk__",
|
87 |
+
"__sl__",
|
88 |
+
"__so__",
|
89 |
+
"__sq__",
|
90 |
+
"__sr__",
|
91 |
+
"__ss__",
|
92 |
+
"__su__",
|
93 |
+
"__sv__",
|
94 |
+
"__sw__",
|
95 |
+
"__ta__",
|
96 |
+
"__th__",
|
97 |
+
"__tl__",
|
98 |
+
"__tn__",
|
99 |
+
"__tr__",
|
100 |
+
"__uk__",
|
101 |
+
"__ur__",
|
102 |
+
"__uz__",
|
103 |
+
"__vi__",
|
104 |
+
"__wo__",
|
105 |
+
"__xh__",
|
106 |
+
"__yi__",
|
107 |
+
"__yo__",
|
108 |
+
"__zh__",
|
109 |
+
"__zu__"
|
110 |
+
],
|
111 |
+
"bos_token": "<s>",
|
112 |
+
"eos_token": "</s>",
|
113 |
+
"pad_token": "<pad>",
|
114 |
+
"sep_token": "</s>",
|
115 |
+
"unk_token": "<unk>"
|
116 |
+
}
|
tokenizer_config.json
ADDED
@@ -0,0 +1,125 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"additional_special_tokens": [
|
3 |
+
"__liv__",
|
4 |
+
"__vro__",
|
5 |
+
"__sma__",
|
6 |
+
"__sme__",
|
7 |
+
"__smn__",
|
8 |
+
"__sms__",
|
9 |
+
"__smj__",
|
10 |
+
"__af__",
|
11 |
+
"__am__",
|
12 |
+
"__ar__",
|
13 |
+
"__ast__",
|
14 |
+
"__az__",
|
15 |
+
"__ba__",
|
16 |
+
"__be__",
|
17 |
+
"__bg__",
|
18 |
+
"__bn__",
|
19 |
+
"__br__",
|
20 |
+
"__bs__",
|
21 |
+
"__ca__",
|
22 |
+
"__ceb__",
|
23 |
+
"__cs__",
|
24 |
+
"__cy__",
|
25 |
+
"__da__",
|
26 |
+
"__de__",
|
27 |
+
"__el__",
|
28 |
+
"__en__",
|
29 |
+
"__es__",
|
30 |
+
"__et__",
|
31 |
+
"__fa__",
|
32 |
+
"__ff__",
|
33 |
+
"__fi__",
|
34 |
+
"__fr__",
|
35 |
+
"__fy__",
|
36 |
+
"__ga__",
|
37 |
+
"__gd__",
|
38 |
+
"__gl__",
|
39 |
+
"__gu__",
|
40 |
+
"__ha__",
|
41 |
+
"__he__",
|
42 |
+
"__hi__",
|
43 |
+
"__hr__",
|
44 |
+
"__ht__",
|
45 |
+
"__hu__",
|
46 |
+
"__hy__",
|
47 |
+
"__id__",
|
48 |
+
"__ig__",
|
49 |
+
"__ilo__",
|
50 |
+
"__is__",
|
51 |
+
"__it__",
|
52 |
+
"__ja__",
|
53 |
+
"__jv__",
|
54 |
+
"__ka__",
|
55 |
+
"__kk__",
|
56 |
+
"__km__",
|
57 |
+
"__kn__",
|
58 |
+
"__ko__",
|
59 |
+
"__lb__",
|
60 |
+
"__lg__",
|
61 |
+
"__ln__",
|
62 |
+
"__lo__",
|
63 |
+
"__lt__",
|
64 |
+
"__lv__",
|
65 |
+
"__mg__",
|
66 |
+
"__mk__",
|
67 |
+
"__ml__",
|
68 |
+
"__mn__",
|
69 |
+
"__mr__",
|
70 |
+
"__ms__",
|
71 |
+
"__my__",
|
72 |
+
"__ne__",
|
73 |
+
"__nl__",
|
74 |
+
"__no__",
|
75 |
+
"__ns__",
|
76 |
+
"__oc__",
|
77 |
+
"__or__",
|
78 |
+
"__pa__",
|
79 |
+
"__pl__",
|
80 |
+
"__ps__",
|
81 |
+
"__pt__",
|
82 |
+
"__ro__",
|
83 |
+
"__ru__",
|
84 |
+
"__sd__",
|
85 |
+
"__si__",
|
86 |
+
"__sk__",
|
87 |
+
"__sl__",
|
88 |
+
"__so__",
|
89 |
+
"__sq__",
|
90 |
+
"__sr__",
|
91 |
+
"__ss__",
|
92 |
+
"__su__",
|
93 |
+
"__sv__",
|
94 |
+
"__sw__",
|
95 |
+
"__ta__",
|
96 |
+
"__th__",
|
97 |
+
"__tl__",
|
98 |
+
"__tn__",
|
99 |
+
"__tr__",
|
100 |
+
"__uk__",
|
101 |
+
"__ur__",
|
102 |
+
"__uz__",
|
103 |
+
"__vi__",
|
104 |
+
"__wo__",
|
105 |
+
"__xh__",
|
106 |
+
"__yi__",
|
107 |
+
"__yo__",
|
108 |
+
"__zh__",
|
109 |
+
"__zu__"
|
110 |
+
],
|
111 |
+
"bos_token": "<s>",
|
112 |
+
"clean_up_tokenization_spaces": true,
|
113 |
+
"eos_token": "</s>",
|
114 |
+
"language_codes": "m2m100",
|
115 |
+
"model_max_length": 1024,
|
116 |
+
"num_madeup_words": 8,
|
117 |
+
"pad_token": "<pad>",
|
118 |
+
"sep_token": "</s>",
|
119 |
+
"sp_model_kwargs": {},
|
120 |
+
"src_lang": null,
|
121 |
+
"tgt_lang": null,
|
122 |
+
"tokenizer_class": "M2M100Tokenizer",
|
123 |
+
"tokenizer_file": null,
|
124 |
+
"unk_token": "<unk>"
|
125 |
+
}
|
training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5288dba7c255397397617df08c9ceacc4a3c83e2c861a1578ea96fd1f2bc94d9
|
3 |
+
size 4155
|
vocab.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|