{ | |
"clean_up_tokenization_spaces": true, | |
"cls_token": "[CLS]", | |
"do_basic_tokenize": true, | |
"do_lower_case": false, | |
"full_tokenizer_file": null, | |
"mask_token": "[MASK]", | |
"max_len": 512, | |
"model_max_length": 512, | |
"never_split": [ | |
"+ูุง", | |
"ุณ+", | |
"[ู ุณุชุฎุฏู ]", | |
"+ู", | |
"+ูู ", | |
"+ุฉ", | |
"+ู", | |
"ูู+", | |
"[ุจุฑูุฏ]", | |
"[ุฑุงุจุท]", | |
"+ู", | |
"+ูู", | |
"+ุง", | |
"+ุงุช", | |
"+ู", | |
"ุจ+", | |
"+ูุง", | |
"+ูู", | |
"+ูู ", | |
"ู+", | |
"+ูู", | |
"+ูู ุง", | |
"ู+", | |
"+ูู ุง", | |
"+ุงู", | |
"+ุช", | |
"+ูู", | |
"ู+", | |
"+ูุง", | |
"ุงู+", | |
"ู+" | |
], | |
"pad_token": "[PAD]", | |
"sep_token": "[SEP]", | |
"strip_accents": null, | |
"tokenize_chinese_chars": true, | |
"tokenizer_class": "BertTokenizer", | |
"unk_token": "[UNK]" | |
} | |