{ "added_tokens_decoder": { "256": { "content": "[CLS]", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false, "special": true }, "257": { "content": "[SEP]", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false, "special": true }, "258": { "content": "[BOW]", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false, "special": true }, "259": { "content": "[EOW]", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false, "special": true }, "260": { "content": "[PAD]", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false, "special": true }, "261": { "content": "[MASK]", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false, "special": true } }, "additional_special_tokens": [ "[BOW]", "[EOW]" ], "clean_up_tokenization_spaces": true, "do_basic_tokenize": true, "do_lower_case": true, "cls_token": "[CLS]", "sep_token": "[SEP]", "mask_token": "[MASK]", "pad_token": "[PAD]", "max_word_length": 50, "model_max_length": 512, "never_split": null, "strip_accents": null, "tokenize_chinese_chars": true, "tokenizer_class": "CharacterBertTokenizer" }