vinucmer-1mer-2k / tokenizer.json
LKarlo's picture
training roberta structure with 4786611 samples, 24054 test samples, 20 vocab size, 3 hidden layers, 256 hidden size, 4 attention heads, 0.15 mlm probability, 10 num process, 512 max length, 0.005 train test split, 50 min sub seq length, 2000 max sub seq length, 42 seed
e8054df
{
"version": "1.0",
"truncation": {
"direction": "Right",
"max_length": 512,
"strategy": "LongestFirst",
"stride": 0
},
"padding": null,
"added_tokens": [
{
"id": 0,
"content": "<UNK>",
"single_word": false,
"lstrip": false,
"rstrip": false,
"normalized": false,
"special": true
},
{
"id": 1,
"content": "<SEP>",
"single_word": false,
"lstrip": false,
"rstrip": false,
"normalized": false,
"special": true
},
{
"id": 2,
"content": "<MASK>",
"single_word": false,
"lstrip": false,
"rstrip": false,
"normalized": false,
"special": true
},
{
"id": 3,
"content": "<CLS>",
"single_word": false,
"lstrip": false,
"rstrip": false,
"normalized": false,
"special": true
},
{
"id": 4,
"content": "<PAD>",
"single_word": false,
"lstrip": false,
"rstrip": false,
"normalized": false,
"special": true
}
],
"normalizer": null,
"pre_tokenizer": {
"type": "Whitespace"
},
"post_processor": {
"type": "RobertaProcessing",
"sep": [
"<SEP>",
1
],
"cls": [
"<CLS>",
3
],
"trim_offsets": true,
"add_prefix_space": true
},
"decoder": null,
"model": {
"type": "BPE",
"dropout": null,
"unk_token": "<UNK>",
"continuing_subword_prefix": null,
"end_of_word_suffix": null,
"fuse_unk": false,
"byte_fallback": false,
"vocab": {
"<UNK>": 0,
"<SEP>": 1,
"<MASK>": 2,
"<CLS>": 3,
"<PAD>": 4,
"A": 5,
"B": 6,
"C": 7,
"D": 8,
"G": 9,
"H": 10,
"K": 11,
"M": 12,
"N": 13,
"R": 14,
"S": 15,
"T": 16,
"V": 17,
"W": 18,
"Y": 19
},
"merges": []
}
}