Text Generation
Transformers
Safetensors
Finnish
llama
finnish
conversational
text-generation-inference
aapot commited on
Commit
c554f4f
1 Parent(s): c32a512

Update tokenizer

Browse files
EasyLM/models/llama/llama_model.py CHANGED
@@ -20,9 +20,10 @@ import einops
20
 
21
  import sentencepiece as spm
22
  from transformers import AutoTokenizer
 
23
  from transformers.configuration_utils import PretrainedConfig
24
  from transformers.utils import logging
25
- from transformers.tokenization_utils import PreTrainedTokenizer
26
  from transformers.modeling_flax_outputs import FlaxBaseModelOutput, FlaxCausalLMOutput
27
  from transformers.modeling_flax_utils import ACT2FN, FlaxPreTrainedModel, append_call_sample_docstring
28
  from transformers.utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging
@@ -355,7 +356,7 @@ class LLaMAConfig(PretrainedConfig):
355
  truncation_side=truncation_side,
356
  )
357
  else:
358
- tokenizer = LLaMATokenizer(
359
  vocab_file=config.vocab_file,
360
  add_bos_token=config.add_bos_token,
361
  add_eos_token=config.add_eos_token,
@@ -1176,18 +1177,83 @@ class FlaxLLaMAForCausalLM(FlaxLLaMAPreTrainedModel):
1176
  VOCAB_FILES_NAMES = {"vocab_file": "tokenizer.model"}
1177
 
1178
  PRETRAINED_VOCAB_FILES_MAP = {}
 
 
1179
 
1180
 
1181
- class LLaMATokenizer(PreTrainedTokenizer):
1182
  """
1183
- Construct a LLaMA tokenizer. Based on byte-level Byte-Pair-Encoding.
 
 
1184
  Args:
1185
  vocab_file (`str`):
1186
  Path to the vocabulary file.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1187
  """
1188
 
1189
  vocab_files_names = VOCAB_FILES_NAMES
1190
  pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
 
1191
  model_input_names = ["input_ids", "attention_mask"]
1192
 
1193
  def __init__(
@@ -1196,44 +1262,91 @@ class LLaMATokenizer(PreTrainedTokenizer):
1196
  unk_token="<unk>",
1197
  bos_token="<s>",
1198
  eos_token="</s>",
 
1199
  sp_model_kwargs: Optional[Dict[str, Any]] = None,
1200
  add_bos_token=False,
1201
  add_eos_token=False,
 
 
 
 
1202
  **kwargs,
1203
  ):
1204
  self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
1205
- super().__init__(bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, **kwargs)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1206
  self.vocab_file = vocab_file
1207
  self.add_bos_token = add_bos_token
1208
  self.add_eos_token = add_eos_token
1209
- self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
 
1210
 
1211
- with tempfile.NamedTemporaryFile() as tfile:
1212
- with open_file(self.vocab_file, 'rb') as fin:
1213
- tfile.write(fin.read())
1214
- tfile.flush()
1215
- tfile.seek(0)
1216
- self.sp_model.Load(tfile.name)
1217
- """ Initialisation"""
1218
- self.add_special_tokens(dict(
1219
- unk_token=unk_token,
1220
  bos_token=bos_token,
1221
  eos_token=eos_token,
1222
- ))
1223
- self.pad_token_id = self.unk_token_id
 
 
 
 
 
 
 
 
 
1224
 
1225
  @property
1226
- def vocab_size(self):
1227
- """Returns vocab size"""
1228
- return self.sp_model.get_piece_size()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1229
 
1230
- @property
1231
- def bos_token_id(self) -> Optional[int]:
1232
- return self.sp_model.bos_id()
 
 
 
 
 
 
 
1233
 
1234
  @property
1235
- def eos_token_id(self) -> Optional[int]:
1236
- return self.sp_model.eos_id()
 
1237
 
1238
  def get_vocab(self):
1239
  """Returns vocab as a dict"""
@@ -1241,9 +1354,40 @@ class LLaMATokenizer(PreTrainedTokenizer):
1241
  vocab.update(self.added_tokens_encoder)
1242
  return vocab
1243
 
1244
- def _tokenize(self, text):
1245
- """Returns a tokenized string."""
1246
- return self.sp_model.encode(text, out_type=str)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1247
 
1248
  def _convert_token_to_id(self, token):
1249
  """Converts a token (str) in an id using the vocab."""
@@ -1256,13 +1400,17 @@ class LLaMATokenizer(PreTrainedTokenizer):
1256
 
1257
  def convert_tokens_to_string(self, tokens):
1258
  """Converts a sequence of tokens (string) in a single string."""
 
 
 
 
1259
  current_sub_tokens = []
1260
  out_string = ""
1261
  prev_is_special = False
1262
- for token in tokens:
1263
  # make sure that special tokens are not decoded using sentencepiece model
1264
  if token in self.all_special_tokens:
1265
- if not prev_is_special:
1266
  out_string += " "
1267
  out_string += self.sp_model.decode(current_sub_tokens) + token
1268
  prev_is_special = True
@@ -1271,14 +1419,16 @@ class LLaMATokenizer(PreTrainedTokenizer):
1271
  current_sub_tokens.append(token)
1272
  prev_is_special = False
1273
  out_string += self.sp_model.decode(current_sub_tokens)
1274
- return out_string.strip()
1275
 
1276
  def save_vocabulary(self, save_directory, filename_prefix: Optional[str] = None) -> Tuple[str]:
1277
  """
1278
  Save the vocabulary and special tokens file to a directory.
 
1279
  Args:
1280
  save_directory (`str`):
1281
  The directory in which to save the vocabulary.
 
1282
  Returns:
1283
  `Tuple(str)`: Paths to the files saved.
1284
  """
@@ -1299,18 +1449,13 @@ class LLaMATokenizer(PreTrainedTokenizer):
1299
  return (out_vocab_file,)
1300
 
1301
  def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
1302
- if self.add_bos_token:
1303
- bos_token_ids = [self.bos_token_id]
1304
- else:
1305
- bos_token_ids = []
1306
 
1307
- output = bos_token_ids + token_ids_0
1308
 
1309
  if token_ids_1 is not None:
1310
- output = output + token_ids_1
1311
-
1312
- if self.add_eos_token:
1313
- output = output + [self.eos_token_id]
1314
 
1315
  return output
1316
 
@@ -1320,6 +1465,7 @@ class LLaMATokenizer(PreTrainedTokenizer):
1320
  """
1321
  Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
1322
  special tokens using the tokenizer `prepare_for_model` method.
 
1323
  Args:
1324
  token_ids_0 (`List[int]`):
1325
  List of IDs.
@@ -1327,6 +1473,7 @@ class LLaMATokenizer(PreTrainedTokenizer):
1327
  Optional second list of IDs for sequence pairs.
1328
  already_has_special_tokens (`bool`, *optional*, defaults to `False`):
1329
  Whether or not the token list is already formatted with special tokens for the model.
 
1330
  Returns:
1331
  `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
1332
  """
@@ -1335,26 +1482,49 @@ class LLaMATokenizer(PreTrainedTokenizer):
1335
  token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
1336
  )
1337
 
 
 
 
1338
  if token_ids_1 is None:
1339
- return [1] + ([0] * len(token_ids_0)) + [1]
1340
- return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1]
 
 
 
 
 
 
 
1341
 
1342
  def create_token_type_ids_from_sequences(
1343
  self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
1344
  ) -> List[int]:
1345
  """
1346
- Create a mask from the two sequences passed to be used in a sequence-pair classification task. T5 does not make
1347
- use of token type ids, therefore a list of zeros is returned.
 
 
 
 
 
 
 
 
1348
  Args:
1349
  token_ids_0 (`List[int]`):
1350
- List of IDs.
1351
  token_ids_1 (`List[int]`, *optional*):
1352
  Optional second list of IDs for sequence pairs.
 
1353
  Returns:
1354
- `List[int]`: List of zeros.
1355
  """
1356
- eos = [self.eos_token_id]
 
1357
 
1358
- if token_ids_1 is None:
1359
- return len(token_ids_0 + eos) * [0]
1360
- return len(token_ids_0 + eos + token_ids_1 + eos) * [0]
 
 
 
 
20
 
21
  import sentencepiece as spm
22
  from transformers import AutoTokenizer
23
+ from transformers.convert_slow_tokenizer import import_protobuf
24
  from transformers.configuration_utils import PretrainedConfig
25
  from transformers.utils import logging
26
+ from transformers.tokenization_utils import AddedToken, PreTrainedTokenizer
27
  from transformers.modeling_flax_outputs import FlaxBaseModelOutput, FlaxCausalLMOutput
28
  from transformers.modeling_flax_utils import ACT2FN, FlaxPreTrainedModel, append_call_sample_docstring
29
  from transformers.utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging
 
356
  truncation_side=truncation_side,
357
  )
358
  else:
359
+ tokenizer = LlamaTokenizer(
360
  vocab_file=config.vocab_file,
361
  add_bos_token=config.add_bos_token,
362
  add_eos_token=config.add_eos_token,
 
1177
  VOCAB_FILES_NAMES = {"vocab_file": "tokenizer.model"}
1178
 
1179
  PRETRAINED_VOCAB_FILES_MAP = {}
1180
+ PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {}
1181
+ SPIECE_UNDERLINE = "▁"
1182
 
1183
 
1184
+ class LlamaTokenizer(PreTrainedTokenizer):
1185
  """
1186
+ Construct a Llama tokenizer. Based on byte-level Byte-Pair-Encoding. The default padding token is unset as there is
1187
+ no padding token in the original model.
1188
+
1189
  Args:
1190
  vocab_file (`str`):
1191
  Path to the vocabulary file.
1192
+ unk_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"<unk>"`):
1193
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
1194
+ token instead.
1195
+ bos_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"<s>"`):
1196
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
1197
+ eos_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"</s>"`):
1198
+ The end of sequence token.
1199
+ pad_token (`str` or `tokenizers.AddedToken`, *optional*):
1200
+ A special token used to make arrays of tokens the same size for batching purpose. Will then be ignored by
1201
+ attention mechanisms or loss computation.
1202
+ sp_model_kwargs (`Dict[str, Any]`, `Optional`, *optional*):
1203
+ Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
1204
+ SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
1205
+ to set:
1206
+
1207
+ - `enable_sampling`: Enable subword regularization.
1208
+ - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
1209
+
1210
+ - `nbest_size = {0,1}`: No sampling is performed.
1211
+ - `nbest_size > 1`: samples from the nbest_size results.
1212
+ - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
1213
+ using forward-filtering-and-backward-sampling algorithm.
1214
+
1215
+ - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
1216
+ BPE-dropout.
1217
+
1218
+ add_bos_token (`bool`, *optional*, defaults to `True`):
1219
+ Whether or not to add an `bos_token` at the start of sequences.
1220
+ add_eos_token (`bool`, *optional*, defaults to `False`):
1221
+ Whether or not to add an `eos_token` at the end of sequences.
1222
+ clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`):
1223
+ Whether or not to cleanup spaces after decoding, cleanup consists in removing potential artifacts like
1224
+ extra spaces.
1225
+ use_default_system_prompt (`bool`, *optional*, defaults to `False`):
1226
+ Whether or not the default system prompt for Llama should be used.
1227
+ spaces_between_special_tokens (`bool`, *optional*, defaults to `False`):
1228
+ Whether or not to add spaces between special tokens.
1229
+ legacy (`bool`, *optional*):
1230
+ Whether or not the `legacy` behavior of the tokenizer should be used. Legacy is before the merge of #24622
1231
+ and #25224 which includes fixes to properly handle tokens that appear after special tokens. A simple
1232
+ example:
1233
+
1234
+ - `legacy=True`:
1235
+ ```python
1236
+ >>> from transformers import T5Tokenizer
1237
+
1238
+ >>> tokenizer = T5Tokenizer.from_pretrained("t5-base", legacy=True)
1239
+ >>> tokenizer.encode("Hello <extra_id_0>.")
1240
+ [8774, 32099, 3, 5, 1]
1241
+ ```
1242
+ - `legacy=False`:
1243
+ ```python
1244
+ >>> from transformers import T5Tokenizer
1245
+
1246
+ >>> tokenizer = T5Tokenizer.from_pretrained("t5-base", legacy=False)
1247
+ >>> tokenizer.encode("Hello <extra_id_0>.") # the extra space `[3]` is no longer here
1248
+ [8774, 32099, 5, 1]
1249
+ ```
1250
+ Checkout the [pull request](https://github.com/huggingface/transformers/pull/24565) for more details.
1251
+
1252
  """
1253
 
1254
  vocab_files_names = VOCAB_FILES_NAMES
1255
  pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
1256
+ max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
1257
  model_input_names = ["input_ids", "attention_mask"]
1258
 
1259
  def __init__(
 
1262
  unk_token="<unk>",
1263
  bos_token="<s>",
1264
  eos_token="</s>",
1265
+ pad_token=None,
1266
  sp_model_kwargs: Optional[Dict[str, Any]] = None,
1267
  add_bos_token=False,
1268
  add_eos_token=False,
1269
+ clean_up_tokenization_spaces=False,
1270
+ use_default_system_prompt=False,
1271
+ spaces_between_special_tokens=False,
1272
+ legacy=None,
1273
  **kwargs,
1274
  ):
1275
  self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
1276
+ bos_token = AddedToken(bos_token, normalized=False, special=True) if isinstance(bos_token, str) else bos_token
1277
+ eos_token = AddedToken(eos_token, normalized=False, special=True) if isinstance(eos_token, str) else eos_token
1278
+ unk_token = AddedToken(unk_token, normalized=False, special=True) if isinstance(unk_token, str) else unk_token
1279
+ pad_token = AddedToken(pad_token, normalized=False, special=True) if isinstance(pad_token, str) else pad_token
1280
+
1281
+ if legacy is None:
1282
+ logger.warning_once(
1283
+ f"You are using the default legacy behaviour of the {self.__class__}. This is"
1284
+ " expected, and simply means that the `legacy` (previous) behavior will be used so nothing changes for you."
1285
+ " If you want to use the new behaviour, set `legacy=False`. This should only be set if you understand what it"
1286
+ " means, and thoroughly read the reason why this was added as explained in"
1287
+ " https://github.com/huggingface/transformers/pull/24565"
1288
+ )
1289
+ legacy = True
1290
+
1291
+ self.legacy = legacy
1292
  self.vocab_file = vocab_file
1293
  self.add_bos_token = add_bos_token
1294
  self.add_eos_token = add_eos_token
1295
+ self.use_default_system_prompt = use_default_system_prompt
1296
+ self.sp_model = self.get_spm_processor(kwargs.pop("from_slow", False))
1297
 
1298
+ super().__init__(
 
 
 
 
 
 
 
 
1299
  bos_token=bos_token,
1300
  eos_token=eos_token,
1301
+ unk_token=unk_token,
1302
+ pad_token=pad_token,
1303
+ add_bos_token=add_bos_token,
1304
+ add_eos_token=add_eos_token,
1305
+ sp_model_kwargs=self.sp_model_kwargs,
1306
+ clean_up_tokenization_spaces=clean_up_tokenization_spaces,
1307
+ use_default_system_prompt=use_default_system_prompt,
1308
+ spaces_between_special_tokens=spaces_between_special_tokens,
1309
+ legacy=legacy,
1310
+ **kwargs,
1311
+ )
1312
 
1313
  @property
1314
+ def unk_token_length(self):
1315
+ return len(self.sp_model.encode(str(self.unk_token)))
1316
+
1317
+ # Copied from transformers.models.t5.tokenization_t5.T5Tokenizer.get_spm_processor
1318
+ def get_spm_processor(self, from_slow=False):
1319
+ tokenizer = spm.SentencePieceProcessor(**self.sp_model_kwargs)
1320
+ if self.legacy or from_slow: # no dependency on protobuf
1321
+ tokenizer.Load(self.vocab_file)
1322
+ return tokenizer
1323
+
1324
+ with open(self.vocab_file, "rb") as f:
1325
+ sp_model = f.read()
1326
+ model_pb2 = import_protobuf(f"The new behaviour of {self.__class__.__name__} (with `self.legacy = False`)")
1327
+ model = model_pb2.ModelProto.FromString(sp_model)
1328
+ normalizer_spec = model_pb2.NormalizerSpec()
1329
+ normalizer_spec.add_dummy_prefix = False
1330
+ model.normalizer_spec.MergeFrom(normalizer_spec)
1331
+ sp_model = model.SerializeToString()
1332
+ tokenizer.LoadFromSerializedProto(sp_model)
1333
+ return tokenizer
1334
 
1335
+ def __getstate__(self):
1336
+ state = self.__dict__.copy()
1337
+ state["sp_model"] = None
1338
+ state["sp_model_proto"] = self.sp_model.serialized_model_proto()
1339
+ return state
1340
+
1341
+ def __setstate__(self, d):
1342
+ self.__dict__ = d
1343
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
1344
+ self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
1345
 
1346
  @property
1347
+ def vocab_size(self):
1348
+ """Returns vocab size"""
1349
+ return self.sp_model.get_piece_size()
1350
 
1351
  def get_vocab(self):
1352
  """Returns vocab as a dict"""
 
1354
  vocab.update(self.added_tokens_encoder)
1355
  return vocab
1356
 
1357
+ # Copied from transformers.models.t5.tokenization_t5.T5Tokenizer.tokenize
1358
+ def tokenize(self, text: "TextInput", add_special_tokens=False, **kwargs) -> List[str]:
1359
+ """
1360
+ Converts a string to a list of tokens. If `self.legacy` is set to `False`, a prefix token is added unless the
1361
+ first token is special.
1362
+ """
1363
+ if self.legacy or len(text) == 0:
1364
+ return super().tokenize(text, **kwargs)
1365
+
1366
+ tokens = super().tokenize(SPIECE_UNDERLINE + text.replace(SPIECE_UNDERLINE, " "), **kwargs)
1367
+
1368
+ if len(tokens) > 1 and tokens[0] == SPIECE_UNDERLINE and tokens[1] in self.all_special_tokens:
1369
+ tokens = tokens[1:]
1370
+ return tokens
1371
+
1372
+ # Copied from transformers.models.t5.tokenization_t5.T5Tokenizer._tokenize
1373
+ def _tokenize(self, text, **kwargs):
1374
+ """
1375
+ Returns a tokenized string.
1376
+
1377
+ We de-activated the `add_dummy_prefix` option, thus the sentencepiece internals will always strip any
1378
+ SPIECE_UNDERLINE. For example: `self.sp_model.encode(f"{SPIECE_UNDERLINE}Hey", out_type = str)` will give
1379
+ `['H', 'e', 'y']` instead of `['▁He', 'y']`. Thus we always encode `f"{unk_token}text"` and strip the
1380
+ `unk_token`. Here is an example with `unk_token = "<unk>"` and `unk_token_length = 4`.
1381
+ `self.tokenizer.sp_model.encode("<unk> Hey", out_type = str)[4:]`.
1382
+ """
1383
+ tokens = self.sp_model.encode(text, out_type=str)
1384
+ if self.legacy or not text.startswith((SPIECE_UNDERLINE, " ")):
1385
+ return tokens
1386
+
1387
+ # 1. Encode string + prefix ex: "<unk> Hey"
1388
+ tokens = self.sp_model.encode(self.unk_token + text, out_type=str)
1389
+ # 2. Remove self.unk_token from ['<','unk','>', '▁Hey']
1390
+ return tokens[self.unk_token_length :] if len(tokens) >= self.unk_token_length else tokens
1391
 
1392
  def _convert_token_to_id(self, token):
1393
  """Converts a token (str) in an id using the vocab."""
 
1400
 
1401
  def convert_tokens_to_string(self, tokens):
1402
  """Converts a sequence of tokens (string) in a single string."""
1403
+ # since we manually add the prefix space, we have to remove it when decoding
1404
+ if tokens[0].startswith(SPIECE_UNDERLINE):
1405
+ tokens[0] = tokens[0][1:]
1406
+
1407
  current_sub_tokens = []
1408
  out_string = ""
1409
  prev_is_special = False
1410
+ for i, token in enumerate(tokens):
1411
  # make sure that special tokens are not decoded using sentencepiece model
1412
  if token in self.all_special_tokens:
1413
+ if not prev_is_special and i != 0 and self.legacy:
1414
  out_string += " "
1415
  out_string += self.sp_model.decode(current_sub_tokens) + token
1416
  prev_is_special = True
 
1419
  current_sub_tokens.append(token)
1420
  prev_is_special = False
1421
  out_string += self.sp_model.decode(current_sub_tokens)
1422
+ return out_string
1423
 
1424
  def save_vocabulary(self, save_directory, filename_prefix: Optional[str] = None) -> Tuple[str]:
1425
  """
1426
  Save the vocabulary and special tokens file to a directory.
1427
+
1428
  Args:
1429
  save_directory (`str`):
1430
  The directory in which to save the vocabulary.
1431
+
1432
  Returns:
1433
  `Tuple(str)`: Paths to the files saved.
1434
  """
 
1449
  return (out_vocab_file,)
1450
 
1451
  def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
1452
+ bos_token_id = [self.bos_token_id] if self.add_bos_token else []
1453
+ eos_token_id = [self.eos_token_id] if self.add_eos_token else []
 
 
1454
 
1455
+ output = bos_token_id + token_ids_0 + eos_token_id
1456
 
1457
  if token_ids_1 is not None:
1458
+ output = output + bos_token_id + token_ids_1 + eos_token_id
 
 
 
1459
 
1460
  return output
1461
 
 
1465
  """
1466
  Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
1467
  special tokens using the tokenizer `prepare_for_model` method.
1468
+
1469
  Args:
1470
  token_ids_0 (`List[int]`):
1471
  List of IDs.
 
1473
  Optional second list of IDs for sequence pairs.
1474
  already_has_special_tokens (`bool`, *optional*, defaults to `False`):
1475
  Whether or not the token list is already formatted with special tokens for the model.
1476
+
1477
  Returns:
1478
  `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
1479
  """
 
1482
  token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
1483
  )
1484
 
1485
+ bos_token_id = [1] if self.add_bos_token else []
1486
+ eos_token_id = [1] if self.add_eos_token else []
1487
+
1488
  if token_ids_1 is None:
1489
+ return bos_token_id + ([0] * len(token_ids_0)) + eos_token_id
1490
+ return (
1491
+ bos_token_id
1492
+ + ([0] * len(token_ids_0))
1493
+ + eos_token_id
1494
+ + bos_token_id
1495
+ + ([0] * len(token_ids_1))
1496
+ + eos_token_id
1497
+ )
1498
 
1499
  def create_token_type_ids_from_sequences(
1500
  self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
1501
  ) -> List[int]:
1502
  """
1503
+ Creates a mask from the two sequences passed to be used in a sequence-pair classification task. An ALBERT
1504
+ sequence pair mask has the following format:
1505
+
1506
+ ```
1507
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
1508
+ | first sequence | second sequence |
1509
+ ```
1510
+
1511
+ if token_ids_1 is None, only returns the first portion of the mask (0s).
1512
+
1513
  Args:
1514
  token_ids_0 (`List[int]`):
1515
+ List of ids.
1516
  token_ids_1 (`List[int]`, *optional*):
1517
  Optional second list of IDs for sequence pairs.
1518
+
1519
  Returns:
1520
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
1521
  """
1522
+ bos_token_id = [self.bos_token_id] if self.add_bos_token else []
1523
+ eos_token_id = [self.eos_token_id] if self.add_eos_token else []
1524
 
1525
+ output = [0] * len(bos_token_id + token_ids_0 + eos_token_id)
1526
+
1527
+ if token_ids_1 is not None:
1528
+ output += [1] * len(bos_token_id + token_ids_1 + eos_token_id)
1529
+
1530
+ return output
tokenizer.model CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a5f2cc257a9b7f1031b398e29a604ed41bd02aa8f130f325bb78e24b125b6950
3
- size 1400560
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1980c00aa3cb5455177a39efa3e60e7b8887ee89c3f7b8950719592a08ad9456
3
+ size 1400411
tokenizer.vocab CHANGED
The diff for this file is too large to render. See raw diff
 
train_sentencepiece.py CHANGED
@@ -2,7 +2,9 @@ import sentencepiece as spm
2
 
3
  spm.SentencePieceTrainer.train(input="/researchdisk/training_dataset_sentences/train.txt", model_prefix="tokenizer",
4
  model_type="bpe", split_digits=True, vocab_size=64256, byte_fallback=True,
 
5
  user_defined_symbols=["[INST]", "[/INST]", "<<SYS>>", "<</SYS>>"],
 
6
  train_extremely_large_corpus=True,
7
  input_sentence_size=500000000, shuffle_input_sentence=True,
8
  num_threads=96)
 
2
 
3
  spm.SentencePieceTrainer.train(input="/researchdisk/training_dataset_sentences/train.txt", model_prefix="tokenizer",
4
  model_type="bpe", split_digits=True, vocab_size=64256, byte_fallback=True,
5
+ normalization_rule_name="nfkc",
6
  user_defined_symbols=["[INST]", "[/INST]", "<<SYS>>", "<</SYS>>"],
7
+ required_chars="abcdefghijklmnopqrstuvwxyzåäöABCDEFGHIJKLMNOPQRSTUVWXYZÅÄÖ",
8
  train_extremely_large_corpus=True,
9
  input_sentence_size=500000000, shuffle_input_sentence=True,
10
  num_threads=96)