lcube-base / tokenizer_config.json
wonseok's picture
add tokenizer
db5ddb7
raw
history blame
683 Bytes
{"unk_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "bos_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "eos_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "add_prefix_space": true, "errors": "replace", "special_tokens_map_file": "../data/models/backbones/lbox-open-gpt-small-50k/special_tokens_map.json", "name_or_path": "../data/models/backbones/lbox-open-gpt-small-50k", "tokenizer_class": "GPT2Tokenizer"}