Updated model with better training and evaluation. Test and val data included as pickle files. Older Legacy files were removed to avoid confusion.
f4e3085
{ | |
"added_tokens_decoder": { | |
"0": { | |
"content": "<s>NOTUSED", | |
"lstrip": false, | |
"normalized": false, | |
"rstrip": false, | |
"single_word": false, | |
"special": true | |
}, | |
"1": { | |
"content": "<pad>", | |
"lstrip": false, | |
"normalized": false, | |
"rstrip": false, | |
"single_word": false, | |
"special": true | |
}, | |
"2": { | |
"content": "</s>NOTUSED", | |
"lstrip": false, | |
"normalized": false, | |
"rstrip": false, | |
"single_word": false, | |
"special": true | |
}, | |
"3": { | |
"content": "<unk>", | |
"lstrip": false, | |
"normalized": false, | |
"rstrip": false, | |
"single_word": false, | |
"special": true | |
}, | |
"5": { | |
"content": "<s>", | |
"lstrip": false, | |
"normalized": false, | |
"rstrip": false, | |
"single_word": false, | |
"special": true | |
}, | |
"6": { | |
"content": "</s>", | |
"lstrip": false, | |
"normalized": false, | |
"rstrip": false, | |
"single_word": false, | |
"special": true | |
}, | |
"32004": { | |
"content": "<mask>", | |
"lstrip": true, | |
"normalized": true, | |
"rstrip": false, | |
"single_word": false, | |
"special": true | |
} | |
}, | |
"additional_special_tokens": [ | |
"<s>NOTUSED", | |
"</s>NOTUSED" | |
], | |
"bos_token": "<s>", | |
"clean_up_tokenization_spaces": true, | |
"cls_token": "<s>", | |
"eos_token": "</s>", | |
"mask_token": "<mask>", | |
"model_max_length": 1000000000000000019884624838656, | |
"pad_token": "<pad>", | |
"sep_token": "</s>", | |
"sp_model_kwargs": {}, | |
"tokenizer_class": "CamembertTokenizer", | |
"unk_token": "<unk>" | |
} | |