GinnM commited on
Commit
c3d3f40
1 Parent(s): 5c054bd

Upload tokenizer

Browse files
special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "<cls>",
3
+ "eos_token": "<eos>",
4
+ "mask_token": "<mask>",
5
+ "pad_token": "<pad>",
6
+ "unk_token": "<unk>"
7
+ }
tokenization_proprime.py ADDED
@@ -0,0 +1,139 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from typing import List, Optional
3
+ from pathlib import Path
4
+ from transformers.tokenization_utils import PreTrainedTokenizer
5
+ from transformers.utils import logging
6
+
7
+
8
+ logger = logging.get_logger(__name__)
9
+
10
+ VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"}
11
+
12
+
13
+ def load_vocab_file(vocab_file):
14
+ with open(vocab_file, "r") as f:
15
+ lines = f.read().splitlines()
16
+ return [l.strip() for l in lines]
17
+
18
+
19
+ class ProPrimeTokenizer(PreTrainedTokenizer):
20
+ vocab_files_names = VOCAB_FILES_NAMES
21
+ model_input_names = ["input_ids", "attention_mask"]
22
+
23
+ def __init__(
24
+ self,
25
+ vocab_file=None,
26
+ unk_token="<unk>",
27
+ cls_token="<cls>",
28
+ pad_token="<pad>",
29
+ mask_token="<mask>",
30
+ eos_token="<eos>",
31
+ **kwargs,
32
+ ):
33
+ if vocab_file is None:
34
+ vocab_file = Path(__file__).parent / "vocab.txt"
35
+ self.all_tokens = load_vocab_file(vocab_file)
36
+ self._id_to_token = dict(enumerate(self.all_tokens))
37
+ self._token_to_id = {tok: ind for ind, tok in enumerate(self.all_tokens)}
38
+ super().__init__(
39
+ unk_token=unk_token,
40
+ cls_token=cls_token,
41
+ pad_token=pad_token,
42
+ mask_token=mask_token,
43
+ eos_token=eos_token,
44
+ **kwargs,
45
+ )
46
+
47
+ # TODO, all the tokens are added? But they are also part of the vocab... bit strange.
48
+ # none of them are special, but they all need special splitting.
49
+
50
+ self.unique_no_split_tokens = self.all_tokens
51
+ self._update_trie(self.unique_no_split_tokens)
52
+
53
+ def _convert_id_to_token(self, index: int) -> str:
54
+ return self._id_to_token.get(index, self.unk_token)
55
+
56
+ def _convert_token_to_id(self, token: str) -> int:
57
+ return self._token_to_id.get(token, self._token_to_id.get(self.unk_token))
58
+
59
+ def _tokenize(self, text, **kwargs):
60
+ return text.split()
61
+
62
+ def get_vocab(self):
63
+ base_vocab = self._token_to_id.copy()
64
+ base_vocab.update(self.added_tokens_encoder)
65
+ return base_vocab
66
+
67
+ def token_to_id(self, token: str) -> int:
68
+ return self._token_to_id.get(token, self._token_to_id.get(self.unk_token))
69
+
70
+ def id_to_token(self, index: int) -> str:
71
+ return self._id_to_token.get(index, self.unk_token)
72
+
73
+ def build_inputs_with_special_tokens(
74
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
75
+ ) -> List[int]:
76
+ cls = [self.cls_token_id]
77
+ sep = [self.eos_token_id]
78
+ if token_ids_1 is None:
79
+ if self.eos_token_id is None:
80
+ return cls + token_ids_0
81
+ else:
82
+ return cls + token_ids_0 + sep
83
+ elif self.eos_token_id is None:
84
+ raise ValueError(
85
+ "Cannot tokenize multiple sequences when EOS token is not set!"
86
+ )
87
+ return (
88
+ cls + token_ids_0 + sep + token_ids_1 + sep
89
+ ) # Multiple inputs always have an EOS token
90
+
91
+ def get_special_tokens_mask(
92
+ self,
93
+ token_ids_0: List,
94
+ token_ids_1: Optional[List] = None,
95
+ already_has_special_tokens: bool = False,
96
+ ) -> List[int]:
97
+ """
98
+ Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding
99
+ special tokens using the tokenizer `prepare_for_model` or `encode_plus` methods.
100
+
101
+ Args:
102
+ token_ids_0 (`List[int]`):
103
+ List of ids of the first sequence.
104
+ token_ids_1 (`List[int]`, *optional*):
105
+ List of ids of the second sequence.
106
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
107
+ Whether or not the token list is already formatted with special tokens for the model.
108
+
109
+ Returns:
110
+ A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
111
+ """
112
+ if already_has_special_tokens:
113
+ if token_ids_1 is not None:
114
+ raise ValueError(
115
+ "You should not supply a second sequence if the provided sequence of "
116
+ "ids is already formatted with special tokens for the model."
117
+ )
118
+
119
+ return [1 if token in self.all_special_ids else 0 for token in token_ids_0]
120
+ mask = [1] + ([0] * len(token_ids_0)) + [1]
121
+ if token_ids_1 is not None:
122
+ mask += [0] * len(token_ids_1) + [1]
123
+ return mask
124
+
125
+ def save_vocabulary(self, save_directory, filename_prefix):
126
+ vocab_file = os.path.join(
127
+ save_directory,
128
+ (filename_prefix + "-" if filename_prefix else "") + "vocab.txt",
129
+ )
130
+ with open(vocab_file, "w") as f:
131
+ f.write("\n".join(self.all_tokens))
132
+ return (vocab_file,)
133
+
134
+ @property
135
+ def vocab_size(self) -> int:
136
+ return len(self.all_tokens)
137
+
138
+
139
+ ProPrimeTokenizer.register_for_auto_class("AutoTokenizer")
tokenizer_config.json ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "<cls>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "<pad>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "<eos>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "3": {
28
+ "content": "<unk>",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "32": {
36
+ "content": "<mask>",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "auto_map": {
45
+ "AutoTokenizer": [
46
+ "tokenization_proprime.ProPrimeTokenizer",
47
+ null
48
+ ]
49
+ },
50
+ "clean_up_tokenization_spaces": true,
51
+ "cls_token": "<cls>",
52
+ "eos_token": "<eos>",
53
+ "mask_token": "<mask>",
54
+ "model_max_length": 1000000000000000019884624838656,
55
+ "pad_token": "<pad>",
56
+ "tokenizer_class": "ProPrimeTokenizer",
57
+ "unk_token": "<unk>"
58
+ }
vocab.txt ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <cls>
2
+ <pad>
3
+ <eos>
4
+ <unk>
5
+ L
6
+ A
7
+ G
8
+ V
9
+ S
10
+ E
11
+ R
12
+ T
13
+ I
14
+ D
15
+ P
16
+ K
17
+ Q
18
+ N
19
+ F
20
+ Y
21
+ M
22
+ H
23
+ W
24
+ C
25
+ X
26
+ B
27
+ U
28
+ Z
29
+ O
30
+ .
31
+ -
32
+ <null_1>
33
+ <mask>