enijkamp commited on
Commit
f319d91
1 Parent(s): d4dc9dd

tokenizer fix

Browse files
Files changed (1) hide show
  1. tokenization_codegen25.py +11 -13
tokenization_codegen25.py CHANGED
@@ -59,18 +59,18 @@ def tiktoken_tokenizer(base="gpt2", pad_token=None, add_special=True):
59
  ]
60
  return fim_tokens
61
 
62
- def include_codegen2_tokens():
63
  tokens = []
64
  tokens += [f"<dummy_{i}>" for i in range(4)]
65
  tokens.append("<sep>") # 50317
66
  tokens.append("<eom>") # 50318
67
  tokens += [f"<mask_{i}>" for i in reversed(range(1, 51199-50318+1))]
68
- return tokens
69
 
70
  add_whitespaces = include_whitespace(n_min=2, n_max=32)
71
  add_tabs = include_tabs(n_min=2, n_max=10)
72
  fim_tokens = include_fim_tokens()
73
- codegen2_tokens = include_codegen2_tokens()
74
 
75
  tokenizer = tiktoken.get_encoding(base)
76
 
@@ -90,9 +90,9 @@ def tiktoken_tokenizer(base="gpt2", pad_token=None, add_special=True):
90
  for sp in fim_tokens:
91
  special_tokens[sp] = idx
92
  idx += 1
93
- for sp in codegen2_tokens:
94
  special_tokens[sp] = idx
95
- idx += 1
96
 
97
  if pad_token and pad_token not in tokenizer._special_tokens and pad_token not in special_tokens:
98
  special_tokens[pad_token] = idx
@@ -115,7 +115,7 @@ def tiktoken_tokenizer(base="gpt2", pad_token=None, add_special=True):
115
 
116
  class CodeGen25Tokenizer(PreTrainedTokenizer):
117
  """
118
- Construct a CodeGen2.5 tokenizer. Based on byte-level Byte-Pair-Encoding.
119
  Args:
120
  vocab_file (`str`):
121
  Path to the vocabulary file.
@@ -133,6 +133,8 @@ class CodeGen25Tokenizer(PreTrainedTokenizer):
133
  ):
134
  pad_token_added = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token
135
  eos_token_added = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token
 
 
136
  super().__init__(
137
  pad_token=pad_token_added,
138
  eos_token=eos_token_added,
@@ -140,8 +142,6 @@ class CodeGen25Tokenizer(PreTrainedTokenizer):
140
  add_special_tokens=add_special_tokens,
141
  **kwargs,
142
  )
143
- self.add_eos_token = add_eos_token
144
- self.encoder = tiktoken_tokenizer(base="gpt2", pad_token=pad_token, add_special=add_special_tokens)
145
 
146
  @property
147
  def vocab_size(self):
@@ -150,7 +150,7 @@ class CodeGen25Tokenizer(PreTrainedTokenizer):
150
 
151
  def get_vocab(self):
152
  """Returns vocab as a dict"""
153
- vocab = {self._convert_id_to_token(i): i for i in range(self.vocab_size)}
154
  return vocab
155
 
156
  def _tokenize(self, text, **kwargs):
@@ -168,9 +168,7 @@ class CodeGen25Tokenizer(PreTrainedTokenizer):
168
  """Converts an index (integer) in a token (str) using the vocab."""
169
  return self.encoder.decode_single_token_bytes(index).decode("utf-8")
170
 
171
- def _decode(self, token_ids: Union[int, List[int]], skip_special_tokens: bool = False, **kwargs):
172
- if isinstance(token_ids, int):
173
- token_ids = [token_ids]
174
  if skip_special_tokens:
175
  token_ids = [t for t in token_ids if t not in self.all_special_ids]
176
  return self.encoder.decode(token_ids)
@@ -244,4 +242,4 @@ class CodeGen25Tokenizer(PreTrainedTokenizer):
244
 
245
  # has no vocab file
246
  def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None):
247
- return ()
 
59
  ]
60
  return fim_tokens
61
 
62
+ def include_additional_tokens():
63
  tokens = []
64
  tokens += [f"<dummy_{i}>" for i in range(4)]
65
  tokens.append("<sep>") # 50317
66
  tokens.append("<eom>") # 50318
67
  tokens += [f"<mask_{i}>" for i in reversed(range(1, 51199-50318+1))]
68
+ return tokens
69
 
70
  add_whitespaces = include_whitespace(n_min=2, n_max=32)
71
  add_tabs = include_tabs(n_min=2, n_max=10)
72
  fim_tokens = include_fim_tokens()
73
+ additional_tokens = include_additional_tokens()
74
 
75
  tokenizer = tiktoken.get_encoding(base)
76
 
 
90
  for sp in fim_tokens:
91
  special_tokens[sp] = idx
92
  idx += 1
93
+ for sp in additional_tokens:
94
  special_tokens[sp] = idx
95
+ idx += 1
96
 
97
  if pad_token and pad_token not in tokenizer._special_tokens and pad_token not in special_tokens:
98
  special_tokens[pad_token] = idx
 
115
 
116
  class CodeGen25Tokenizer(PreTrainedTokenizer):
117
  """
118
+ Construct a CodeGen25 tokenizer. Based on byte-level Byte-Pair-Encoding.
119
  Args:
120
  vocab_file (`str`):
121
  Path to the vocabulary file.
 
133
  ):
134
  pad_token_added = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token
135
  eos_token_added = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token
136
+ self.add_eos_token = add_eos_token
137
+ self.encoder = tiktoken_tokenizer(base="gpt2", pad_token=pad_token, add_special=add_special_tokens)
138
  super().__init__(
139
  pad_token=pad_token_added,
140
  eos_token=eos_token_added,
 
142
  add_special_tokens=add_special_tokens,
143
  **kwargs,
144
  )
 
 
145
 
146
  @property
147
  def vocab_size(self):
 
150
 
151
  def get_vocab(self):
152
  """Returns vocab as a dict"""
153
+ vocab = {self.encoder.decode_single_token_bytes(i): i for i in range(self.vocab_size)}
154
  return vocab
155
 
156
  def _tokenize(self, text, **kwargs):
 
168
  """Converts an index (integer) in a token (str) using the vocab."""
169
  return self.encoder.decode_single_token_bytes(index).decode("utf-8")
170
 
171
+ def _decode(self, token_ids: List[int], skip_special_tokens: bool = False, **kwargs):
 
 
172
  if skip_special_tokens:
173
  token_ids = [t for t in token_ids if t not in self.all_special_ids]
174
  return self.encoder.decode(token_ids)
 
242
 
243
  # has no vocab file
244
  def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None):
245
+ return ()