{"unk_token": "", "bos_token": "", "eos_token": "", "add_prefix_space": true, "errors": "replace", "sep_token": "", "cls_token": "", "pad_token": "", "mask_token": "", "model_max_length": 512, "special_tokens_map_file": null, "name_or_path": "/home/nlp/shon711/fast-coref/hard_training/ft_ontonotes_lingmess_100_0_25/model", "tokenizer_class": "RobertaTokenizer"}