RichardErkhov commited on
Commit
101dcab
1 Parent(s): fdda391

uploaded model

Browse files
Files changed (1) hide show
  1. tokenizer_config.json +49 -0
tokenizer_config.json ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "50256": {
5
+ "content": "<|endoftext|>",
6
+ "lstrip": false,
7
+ "normalized": true,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "50257": {
13
+ "content": "[CLS]",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "50258": {
21
+ "content": "[SEP]",
22
+ "lstrip": false,
23
+ "normalized": false,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ },
28
+ "50259": {
29
+ "content": "[PAD]",
30
+ "lstrip": false,
31
+ "normalized": false,
32
+ "rstrip": false,
33
+ "single_word": false,
34
+ "special": true
35
+ }
36
+ },
37
+ "bos_token": "[CLS]",
38
+ "clean_up_tokenization_spaces": true,
39
+ "eos_token": "<|endoftext|>",
40
+ "max_length": 384,
41
+ "model_max_length": 1024,
42
+ "pad_token": "[PAD]",
43
+ "sep_token": "[SEP]",
44
+ "stride": 128,
45
+ "tokenizer_class": "GPT2Tokenizer",
46
+ "truncation_side": "right",
47
+ "truncation_strategy": "only_second",
48
+ "unk_token": "<|endoftext|>"
49
+ }