|
{ |
|
"additional_special_tokens": [ |
|
"<s>NOTUSED", |
|
"</s>NOTUSED" |
|
], |
|
"bos_token": "<s>", |
|
"clean_up_tokenization_spaces": true, |
|
"cls_token": "<s>", |
|
"eos_token": "</s>", |
|
"mask_token": { |
|
"__type": "AddedToken", |
|
"content": "<mask>", |
|
"lstrip": true, |
|
"normalized": true, |
|
"rstrip": false, |
|
"single_word": false |
|
}, |
|
"max_len": 512, |
|
"model_max_length": 512, |
|
"name_or_path": "./sentencepiece_tokenizers_morphemes_v4/SentencePieceTokenizerMorphemesExcluded_PubMed_Abstracts_CHARS_lowercased_fixed_utf8_V4/", |
|
"pad_token": "<pad>", |
|
"sep_token": "</s>", |
|
"sp_model_kwargs": {}, |
|
"special_tokens_map_file": "./sentencepiece_tokenizers_morphemes_v4/SentencePieceTokenizerMorphemesExcluded_PubMed_Abstracts_CHARS_lowercased_fixed_utf8_V4/special_tokens_map.json", |
|
"tokenizer_class": "CamembertTokenizer", |
|
"unk_token": "<unk>" |
|
} |
|
|