enijkamp commited on
Commit
c87ed5d
1 Parent(s): d6734d6

Undo breaking change for now

Browse files
Files changed (1) hide show
  1. tokenization_codegen25.py +10 -10
tokenization_codegen25.py CHANGED
@@ -4,7 +4,7 @@
4
  # For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/Apache-2.0
5
  """Tokenization classes for CodeGen2.5."""
6
 
7
- from typing import List, Optional, Union
8
 
9
  from transformers.tokenization_utils import AddedToken, PreTrainedTokenizer
10
  from transformers.utils import logging
@@ -59,18 +59,18 @@ def tiktoken_tokenizer(base="gpt2", pad_token=None, add_special=True):
59
  ]
60
  return fim_tokens
61
 
62
- def include_additional_tokens():
63
  tokens = []
64
  tokens += [f"<dummy_{i}>" for i in range(4)]
65
  tokens.append("<sep>") # 50317
66
  tokens.append("<eom>") # 50318
67
  tokens += [f"<mask_{i}>" for i in reversed(range(1, 51199-50318+1))]
68
- return tokens
69
 
70
  add_whitespaces = include_whitespace(n_min=2, n_max=32)
71
  add_tabs = include_tabs(n_min=2, n_max=10)
72
  fim_tokens = include_fim_tokens()
73
- additional_tokens = include_additional_tokens()
74
 
75
  tokenizer = tiktoken.get_encoding(base)
76
 
@@ -90,9 +90,9 @@ def tiktoken_tokenizer(base="gpt2", pad_token=None, add_special=True):
90
  for sp in fim_tokens:
91
  special_tokens[sp] = idx
92
  idx += 1
93
- for sp in additional_tokens:
94
  special_tokens[sp] = idx
95
- idx += 1
96
 
97
  if pad_token and pad_token not in tokenizer._special_tokens and pad_token not in special_tokens:
98
  special_tokens[pad_token] = idx
@@ -115,7 +115,7 @@ def tiktoken_tokenizer(base="gpt2", pad_token=None, add_special=True):
115
 
116
  class CodeGen25Tokenizer(PreTrainedTokenizer):
117
  """
118
- Construct a CodeGen25 tokenizer. Based on byte-level Byte-Pair-Encoding.
119
  Args:
120
  vocab_file (`str`):
121
  Path to the vocabulary file.
@@ -133,8 +133,6 @@ class CodeGen25Tokenizer(PreTrainedTokenizer):
133
  ):
134
  pad_token_added = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token
135
  eos_token_added = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token
136
- self.add_eos_token = add_eos_token
137
- self.encoder = tiktoken_tokenizer(base="gpt2", pad_token=pad_token, add_special=add_special_tokens)
138
  super().__init__(
139
  pad_token=pad_token_added,
140
  eos_token=eos_token_added,
@@ -142,6 +140,8 @@ class CodeGen25Tokenizer(PreTrainedTokenizer):
142
  add_special_tokens=add_special_tokens,
143
  **kwargs,
144
  )
 
 
145
 
146
  @property
147
  def vocab_size(self):
@@ -150,7 +150,7 @@ class CodeGen25Tokenizer(PreTrainedTokenizer):
150
 
151
  def get_vocab(self):
152
  """Returns vocab as a dict"""
153
- vocab = {self.encoder.decode_single_token_bytes(i): i for i in range(self.vocab_size)}
154
  return vocab
155
 
156
  def _tokenize(self, text, **kwargs):
 
4
  # For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/Apache-2.0
5
  """Tokenization classes for CodeGen2.5."""
6
 
7
+ from typing import List, Optional
8
 
9
  from transformers.tokenization_utils import AddedToken, PreTrainedTokenizer
10
  from transformers.utils import logging
 
59
  ]
60
  return fim_tokens
61
 
62
+ def include_codegen2_tokens():
63
  tokens = []
64
  tokens += [f"<dummy_{i}>" for i in range(4)]
65
  tokens.append("<sep>") # 50317
66
  tokens.append("<eom>") # 50318
67
  tokens += [f"<mask_{i}>" for i in reversed(range(1, 51199-50318+1))]
68
+ return tokens
69
 
70
  add_whitespaces = include_whitespace(n_min=2, n_max=32)
71
  add_tabs = include_tabs(n_min=2, n_max=10)
72
  fim_tokens = include_fim_tokens()
73
+ codegen2_tokens = include_codegen2_tokens()
74
 
75
  tokenizer = tiktoken.get_encoding(base)
76
 
 
90
  for sp in fim_tokens:
91
  special_tokens[sp] = idx
92
  idx += 1
93
+ for sp in codegen2_tokens:
94
  special_tokens[sp] = idx
95
+ idx += 1
96
 
97
  if pad_token and pad_token not in tokenizer._special_tokens and pad_token not in special_tokens:
98
  special_tokens[pad_token] = idx
 
115
 
116
  class CodeGen25Tokenizer(PreTrainedTokenizer):
117
  """
118
+ Construct a CodeGen2.5 tokenizer. Based on byte-level Byte-Pair-Encoding.
119
  Args:
120
  vocab_file (`str`):
121
  Path to the vocabulary file.
 
133
  ):
134
  pad_token_added = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token
135
  eos_token_added = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token
 
 
136
  super().__init__(
137
  pad_token=pad_token_added,
138
  eos_token=eos_token_added,
 
140
  add_special_tokens=add_special_tokens,
141
  **kwargs,
142
  )
143
+ self.add_eos_token = add_eos_token
144
+ self.encoder = tiktoken_tokenizer(base="gpt2", pad_token=pad_token, add_special=add_special_tokens)
145
 
146
  @property
147
  def vocab_size(self):
 
150
 
151
  def get_vocab(self):
152
  """Returns vocab as a dict"""
153
+ vocab = {self._convert_id_to_token(i): i for i in range(self.vocab_size)}
154
  return vocab
155
 
156
  def _tokenize(self, text, **kwargs):