usv3_usdc_predictor_0 / tokenizer.json
mlegls's picture
End of training
c8d90ff
raw
history blame
2.21 kB
{
"version": "1.0",
"truncation": {
"direction": "Right",
"max_length": 256,
"strategy": "LongestFirst",
"stride": 0
},
"padding": null,
"added_tokens": [
{
"id": 0,
"content": "<s>",
"single_word": false,
"lstrip": false,
"rstrip": false,
"normalized": false,
"special": true
},
{
"id": 1,
"content": "<pad>",
"single_word": false,
"lstrip": false,
"rstrip": false,
"normalized": false,
"special": true
},
{
"id": 2,
"content": "</s>",
"single_word": false,
"lstrip": false,
"rstrip": false,
"normalized": false,
"special": true
},
{
"id": 3,
"content": "<unk>",
"single_word": false,
"lstrip": false,
"rstrip": false,
"normalized": false,
"special": true
},
{
"id": 4,
"content": "<mask>",
"single_word": false,
"lstrip": false,
"rstrip": false,
"normalized": false,
"special": true
}
],
"normalizer": null,
"pre_tokenizer": {
"type": "WhitespaceSplit"
},
"post_processor": null,
"decoder": null,
"model": {
"type": "WordLevel",
"vocab": {
"<s>": 0,
"<pad>": 1,
"</s>": 2,
"<unk>": 3,
"<mask>": 4,
"SWAP": 5,
"7": 6,
"9": 7,
"45": 8,
"15": 9,
"44": 10,
"14": 11,
"35": 12,
"33": 13,
"8": 14,
"20": 15,
"43": 16,
"34": 17,
"32": 18,
"6": 19,
"46": 20,
"16": 21,
"17": 22,
"11": 23,
"19": 24,
"21": 25,
"36": 26,
"42": 27,
"5": 28,
"37": 29,
"25": 30,
"30": 31,
"13": 32,
"12": 33,
"29": 34,
"31": 35,
"18": 36,
"39": 37,
"40": 38,
"26": 39,
"4": 40,
"28": 41,
"23": 42,
"24": 43,
"38": 44,
"22": 45,
"41": 46,
"10": 47,
"27": 48,
"47": 49,
"3": 50,
"48": 51,
"2": 52,
"49": 53,
"1": 54,
"50": 55,
"BURN": 56,
"MINT": 57
},
"unk_token": "<unk>"
}
}