Erantr1 commited on
Commit
74290b0
1 Parent(s): 459ebd6

Upload tokenizer

Browse files
Files changed (2) hide show
  1. tokenizer.json +5 -4
  2. tokenizer_config.json +43 -1
tokenizer.json CHANGED
@@ -9,7 +9,7 @@
9
  "single_word": false,
10
  "lstrip": false,
11
  "rstrip": false,
12
- "normalized": false,
13
  "special": true
14
  },
15
  {
@@ -18,7 +18,7 @@
18
  "single_word": false,
19
  "lstrip": false,
20
  "rstrip": false,
21
- "normalized": false,
22
  "special": true
23
  },
24
  {
@@ -27,7 +27,7 @@
27
  "single_word": false,
28
  "lstrip": false,
29
  "rstrip": false,
30
- "normalized": false,
31
  "special": true
32
  },
33
  {
@@ -36,7 +36,7 @@
36
  "single_word": false,
37
  "lstrip": false,
38
  "rstrip": false,
39
- "normalized": false,
40
  "special": true
41
  },
42
  {
@@ -83,6 +83,7 @@
83
  "end_of_word_suffix": "",
84
  "fuse_unk": false,
85
  "byte_fallback": false,
 
86
  "vocab": {
87
  "<s>": 0,
88
  "<pad>": 1,
 
9
  "single_word": false,
10
  "lstrip": false,
11
  "rstrip": false,
12
+ "normalized": true,
13
  "special": true
14
  },
15
  {
 
18
  "single_word": false,
19
  "lstrip": false,
20
  "rstrip": false,
21
+ "normalized": true,
22
  "special": true
23
  },
24
  {
 
27
  "single_word": false,
28
  "lstrip": false,
29
  "rstrip": false,
30
+ "normalized": true,
31
  "special": true
32
  },
33
  {
 
36
  "single_word": false,
37
  "lstrip": false,
38
  "rstrip": false,
39
+ "normalized": true,
40
  "special": true
41
  },
42
  {
 
83
  "end_of_word_suffix": "",
84
  "fuse_unk": false,
85
  "byte_fallback": false,
86
+ "ignore_merges": false,
87
  "vocab": {
88
  "<s>": 0,
89
  "<pad>": 1,
tokenizer_config.json CHANGED
@@ -1,5 +1,47 @@
1
  {
2
  "add_prefix_space": false,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  "bos_token": "<s>",
4
  "clean_up_tokenization_spaces": true,
5
  "cls_token": "<s>",
@@ -7,7 +49,7 @@
7
  "errors": "replace",
8
  "mask_token": "<mask>",
9
  "model_max_length": 512,
10
- "pad_token": "<pad>",
11
  "sep_token": "</s>",
12
  "tokenizer_class": "RobertaTokenizer",
13
  "trim_offsets": true,
 
1
  {
2
  "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "0": {
5
+ "content": "<s>",
6
+ "lstrip": false,
7
+ "normalized": true,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "1": {
13
+ "content": "<pad>",
14
+ "lstrip": false,
15
+ "normalized": true,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "2": {
21
+ "content": "</s>",
22
+ "lstrip": false,
23
+ "normalized": true,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ },
28
+ "3": {
29
+ "content": "<unk>",
30
+ "lstrip": false,
31
+ "normalized": true,
32
+ "rstrip": false,
33
+ "single_word": false,
34
+ "special": true
35
+ },
36
+ "50264": {
37
+ "content": "<mask>",
38
+ "lstrip": true,
39
+ "normalized": false,
40
+ "rstrip": false,
41
+ "single_word": false,
42
+ "special": true
43
+ }
44
+ },
45
  "bos_token": "<s>",
46
  "clean_up_tokenization_spaces": true,
47
  "cls_token": "<s>",
 
49
  "errors": "replace",
50
  "mask_token": "<mask>",
51
  "model_max_length": 512,
52
+ "pad_token": "</s>",
53
  "sep_token": "</s>",
54
  "tokenizer_class": "RobertaTokenizer",
55
  "trim_offsets": true,