ollieturnbull commited on
Commit
774da27
1 Parent(s): fa34385

Upload tokenizer

Browse files
Files changed (3) hide show
  1. special_tokens_map.json +9 -0
  2. tokenizer.json +73 -0
  3. tokenizer_config.json +16 -0
special_tokens_map.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "pad_token": {
3
+ "content": "<PAD>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ }
9
+ }
tokenizer.json ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "version": "1.0",
3
+ "truncation": null,
4
+ "padding": null,
5
+ "added_tokens": [
6
+ {
7
+ "id": 0,
8
+ "content": "<PAD>",
9
+ "single_word": false,
10
+ "lstrip": false,
11
+ "rstrip": false,
12
+ "normalized": false,
13
+ "special": true
14
+ }
15
+ ],
16
+ "normalizer": null,
17
+ "pre_tokenizer": {
18
+ "type": "ByteLevel",
19
+ "add_prefix_space": false,
20
+ "trim_offsets": true,
21
+ "use_regex": true
22
+ },
23
+ "post_processor": {
24
+ "type": "ByteLevel",
25
+ "add_prefix_space": true,
26
+ "trim_offsets": true,
27
+ "use_regex": true
28
+ },
29
+ "decoder": {
30
+ "type": "ByteLevel",
31
+ "add_prefix_space": true,
32
+ "trim_offsets": true,
33
+ "use_regex": true
34
+ },
35
+ "model": {
36
+ "type": "BPE",
37
+ "dropout": null,
38
+ "unk_token": null,
39
+ "continuing_subword_prefix": null,
40
+ "end_of_word_suffix": null,
41
+ "fuse_unk": false,
42
+ "byte_fallback": false,
43
+ "vocab": {
44
+ "<PAD>": 0,
45
+ "A": 1,
46
+ "R": 2,
47
+ "N": 3,
48
+ "D": 4,
49
+ "B": 5,
50
+ "C": 6,
51
+ "E": 7,
52
+ "Q": 8,
53
+ "Z": 9,
54
+ "G": 10,
55
+ "H": 11,
56
+ "I": 12,
57
+ "L": 13,
58
+ "K": 14,
59
+ "M": 15,
60
+ "F": 16,
61
+ "P": 17,
62
+ "S": 18,
63
+ "T": 19,
64
+ "W": 20,
65
+ "Y": 21,
66
+ "V": 22,
67
+ "1": 23,
68
+ "2": 24,
69
+ "3": 25
70
+ },
71
+ "merges": []
72
+ }
73
+ }
tokenizer_config.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "<PAD>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ }
11
+ },
12
+ "clean_up_tokenization_spaces": true,
13
+ "model_max_length": 512,
14
+ "pad_token": "<PAD>",
15
+ "tokenizer_class": "PreTrainedTokenizerFast"
16
+ }