Upload 4 files
Browse files- sentencepiece.bpe.model +3 -0
- special_tokens_map.json +1 -0
- tokenizer_config.json +1 -0
- vocab.json +0 -0
sentencepiece.bpe.model
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:bfa7dae3b7caabbdfd569cc48edc55283b31a6a56a3befb699bc7b9526961f77
|
3 |
+
size 2585112
|
special_tokens_map.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "sep_token": "</s>", "pad_token": "<pad>", "additional_special_tokens": ["__en__", "__ha__", "__is__", "__ja__", "__cs__", "__ru__", "__zh__", "__de__"]}
|
tokenizer_config.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"src_lang": "en", "tgt_lang": null, "bos_token": "<s>", "eos_token": "</s>", "sep_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>", "sp_model_kwargs": {}, "language_codes": "wmt21", "num_madeup_words": 0, "additional_special_tokens": ["__en__", "__ha__", "__is__", "__ja__", "__cs__", "__ru__", "__zh__", "__de__"], "tokenizer_class": "M2M100Tokenizer"}
|
vocab.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|