GPT0.35B-ja-tokenizer-unigram-v1-CultulaX-default-filtered-ja-part-00000-00002-2000step
/
tokenizer_config.json
{ | |
"add_prefix_space": true, | |
"added_tokens_decoder": { | |
"0": { | |
"content": "<unk>", | |
"lstrip": false, | |
"normalized": false, | |
"rstrip": false, | |
"single_word": false, | |
"special": true | |
}, | |
"1": { | |
"content": "<s>", | |
"lstrip": false, | |
"normalized": false, | |
"rstrip": false, | |
"single_word": false, | |
"special": true | |
}, | |
"2": { | |
"content": "</s>", | |
"lstrip": false, | |
"normalized": false, | |
"rstrip": false, | |
"single_word": false, | |
"special": true | |
}, | |
"3": { | |
"content": "<pad>", | |
"lstrip": false, | |
"normalized": false, | |
"rstrip": false, | |
"single_word": false, | |
"special": true | |
}, | |
"4": { | |
"content": "<CLS>", | |
"lstrip": false, | |
"normalized": false, | |
"rstrip": false, | |
"single_word": false, | |
"special": true | |
}, | |
"5": { | |
"content": "<SEP>", | |
"lstrip": false, | |
"normalized": false, | |
"rstrip": false, | |
"single_word": false, | |
"special": true | |
}, | |
"6": { | |
"content": "<EOD>", | |
"lstrip": false, | |
"normalized": false, | |
"rstrip": false, | |
"single_word": false, | |
"special": true | |
}, | |
"7": { | |
"content": "<MASK>", | |
"lstrip": false, | |
"normalized": false, | |
"rstrip": false, | |
"single_word": false, | |
"special": true | |
} | |
}, | |
"additional_special_tokens": [ | |
"<EOD>" | |
], | |
"bos_token": "<s>", | |
"clean_up_tokenization_spaces": true, | |
"cls_token": "<CLS>", | |
"eos_token": "</s>", | |
"extra_ids": 0, | |
"legacy": true, | |
"mask_token": "<MASK>", | |
"model_max_length": 2048, | |
"pad_token": "<pad>", | |
"sep_token": "<SEP>", | |
"sp_model_kwargs": {}, | |
"split_special_tokens": true, | |
"tokenizer_class": "T5Tokenizer", | |
"unk_token": "<unk>" | |
} | |