esm2_t6_8M_UR50D / tokenizer.json
Xenova's picture
Xenova HF staff
Upload tokenizer.json
9117913
raw
history blame
1.58 kB
{
"version": "1.0",
"truncation": null,
"padding": null,
"added_tokens": [],
"normalizer": null,
"pre_tokenizer": {
"type": "BertPreTokenizer"
},
"post_processor": {
"type": "TemplateProcessing",
"single": [
{
"SpecialToken": {
"id": "<cls>",
"type_id": 0
}
},
{
"Sequence": {
"id": "A",
"type_id": 0
}
}
],
"pair": [
{
"Sequence": {
"id": "A",
"type_id": 0
}
},
{
"Sequence": {
"id": "B",
"type_id": 1
}
}
],
"special_tokens": {
"<cls>": {
"id": "<cls>",
"ids": [
0
],
"tokens": [
"<cls>"
]
}
}
},
"decoder": {
"type": "WordPiece",
"prefix": "",
"cleanup": true
},
"model": {
"type": "WordPiece",
"unk_token": "<unk>",
"continuing_subword_prefix": "",
"max_input_chars_per_word": 10000000000,
"vocab": {
"<cls>": 0,
"<pad>": 1,
"<eos>": 2,
"<unk>": 3,
"L": 4,
"A": 5,
"G": 6,
"V": 7,
"S": 8,
"E": 9,
"R": 10,
"T": 11,
"I": 12,
"D": 13,
"P": 14,
"K": 15,
"Q": 16,
"N": 17,
"F": 18,
"Y": 19,
"M": 20,
"H": 21,
"W": 22,
"C": 23,
"X": 24,
"B": 25,
"U": 26,
"Z": 27,
"O": 28,
".": 29,
"-": 30,
"<null_1>": 31,
"<mask>": 32
}
}
}