roberta-small-hi-char / tokenizer_config.json
Satoru
feat: initial commit
82ed9b5
{"errors": "replace", "bos_token": "<s>", "eos_token": "</s>", "sep_token": "[SEP]", "cls_token": "[CLS]", "unk_token": "[UNK]", "pad_token": "[PAD]", "mask_token": "[MASK]", "add_prefix_space": false, "trim_offsets": true, "do_lower_case": false, "tokenize_chinese_chars": true, "strip_accents": false, "never_split": ["[PAD]", "[UNK]", "[CLS]", "[SEP]", "[MASK]"], "model_max_length": 128, "do_basic_tokenize": true, "special_tokens_map_file": "/home/jupyter/.cache/huggingface/transformers/4ecfda4a8d883cb8d97588a93c58a7c6afe25671a084d4dbd3dae56533179782.dd8bd9bfd3664b530ea4e645105f557769387b3da9f79bdb55ed556bdd80611d", "name_or_path": "roberta-small-japanese-aozora-char-custom", "tokenizer_class": "RobertaTokenizer"}