lecraquito commited on
Commit
cf529ad
1 Parent(s): 6d82fb2

Upload tokenizer

Browse files
Files changed (3) hide show
  1. special_tokens_map.json +1 -0
  2. tokenizer.json +118 -0
  3. tokenizer_config.json +5 -0
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {}
tokenizer.json ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "version": "1.0",
3
+ "truncation": null,
4
+ "padding": null,
5
+ "added_tokens": [],
6
+ "normalizer": null,
7
+ "pre_tokenizer": null,
8
+ "post_processor": null,
9
+ "decoder": null,
10
+ "model": {
11
+ "type": "BPE",
12
+ "dropout": null,
13
+ "unk_token": null,
14
+ "continuing_subword_prefix": null,
15
+ "end_of_word_suffix": null,
16
+ "fuse_unk": false,
17
+ "byte_fallback": false,
18
+ "vocab": {
19
+ "\n": 0,
20
+ " ": 1,
21
+ "!": 2,
22
+ "\"": 3,
23
+ "#": 4,
24
+ "$": 5,
25
+ "%": 6,
26
+ "&": 7,
27
+ "'": 8,
28
+ "(": 9,
29
+ ")": 10,
30
+ "*": 11,
31
+ "+": 12,
32
+ ",": 13,
33
+ "-": 14,
34
+ ".": 15,
35
+ "/": 16,
36
+ "0": 17,
37
+ "1": 18,
38
+ "2": 19,
39
+ "3": 20,
40
+ "4": 21,
41
+ "5": 22,
42
+ "6": 23,
43
+ "7": 24,
44
+ "8": 25,
45
+ "9": 26,
46
+ ":": 27,
47
+ ";": 28,
48
+ "<": 29,
49
+ "=": 30,
50
+ ">": 31,
51
+ "?": 32,
52
+ "@": 33,
53
+ "A": 34,
54
+ "B": 35,
55
+ "C": 36,
56
+ "D": 37,
57
+ "E": 38,
58
+ "F": 39,
59
+ "G": 40,
60
+ "H": 41,
61
+ "I": 42,
62
+ "J": 43,
63
+ "K": 44,
64
+ "L": 45,
65
+ "M": 46,
66
+ "N": 47,
67
+ "O": 48,
68
+ "P": 49,
69
+ "Q": 50,
70
+ "R": 51,
71
+ "S": 52,
72
+ "T": 53,
73
+ "U": 54,
74
+ "V": 55,
75
+ "W": 56,
76
+ "X": 57,
77
+ "Y": 58,
78
+ "Z": 59,
79
+ "[": 60,
80
+ "\\": 61,
81
+ "]": 62,
82
+ "^": 63,
83
+ "_": 64,
84
+ "`": 65,
85
+ "a": 66,
86
+ "b": 67,
87
+ "c": 68,
88
+ "d": 69,
89
+ "e": 70,
90
+ "f": 71,
91
+ "g": 72,
92
+ "h": 73,
93
+ "i": 74,
94
+ "j": 75,
95
+ "k": 76,
96
+ "l": 77,
97
+ "m": 78,
98
+ "n": 79,
99
+ "o": 80,
100
+ "p": 81,
101
+ "q": 82,
102
+ "r": 83,
103
+ "s": 84,
104
+ "t": 85,
105
+ "u": 86,
106
+ "v": 87,
107
+ "w": 88,
108
+ "x": 89,
109
+ "y": 90,
110
+ "z": 91,
111
+ "{": 92,
112
+ "|": 93,
113
+ "}": 94,
114
+ "~": 95
115
+ },
116
+ "merges": []
117
+ }
118
+ }
tokenizer_config.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "clean_up_tokenization_spaces": true,
3
+ "model_max_length": 1000000000000000019884624838656,
4
+ "tokenizer_class": "PreTrainedTokenizerFast"
5
+ }