{ "add_prefix_space": false, "added_tokens_decoder": { "0": { "content": "<|endoftext|>", "lstrip": false, "normalized": true, "rstrip": false, "single_word": false, "special": true }, "10000": { "content": "[R]", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false, "special": true }, "10001": { "content": "[X]", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false, "special": true }, "10002": { "content": "[S]", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false, "special": true }, "10003": { "content": "", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false, "special": true }, "10004": { "content": "", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false, "special": true }, "10005": { "content": "", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false, "special": true }, "10006": { "content": "", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false, "special": true }, "10007": { "content": "", "lstrip": false, "normalized": false, "rstrip": false, "single_word": false, "special": true } }, "bos_token": "<|endoftext|>", "clean_up_tokenization_spaces": true, "eos_token": "<|endoftext|>", "model_max_length": 1024, "tokenizer_class": "GPT2Tokenizer", "unk_token": "<|endoftext|>" }