Update replit_lm_tokenizer.py
Browse filesWhen initializing the tokenizer, error occurs that using sp_model before sp_model instance initialization.
```
Traceback (most recent call last):
File "/home/dobby/trainer/examples/function_calling/preprocessing.py", line 93, in <module>
preprocess(bpe_process, args)
File "/home/dobby/trainer/examples/function_calling/preprocessing.py", line 53, in preprocess
tokenizer = AutoTokenizer.from_pretrained(
File "/home/dobby/.cache/pypoetry/virtualenvs/trainer-lQlppuIx-py3.10/lib/python3.10/site-packages/transformers/models/auto/tokenization_auto.py", line 819, in from_pretrained
return tokenizer_class.from_pretrained(
File "/home/dobby/.cache/pypoetry/virtualenvs/trainer-lQlppuIx-py3.10/lib/python3.10/site-packages/transformers/tokenization_utils_base.py", line 2059, in from_pretrained
return cls._from_pretrained(
File "/home/dobby/.cache/pypoetry/virtualenvs/trainer-lQlppuIx-py3.10/lib/python3.10/site-packages/transformers/tokenization_utils_base.py", line 2298, in _from_pretrained
tokenizer = cls(*init_inputs, **init_kwargs)
File "/home/dobby/.cache/huggingface/modules/transformers_modules/replit/replit-code-v1-3b/cc0a4f17a8d72b71d62ea53cb0e23e4dac352067/replit_lm_tokenizer.py", line 66, in __init__
super().__init__(bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, pad_token=pad_token, sep_token=sep_token, sp_model_kwargs=self.sp_model_kwargs, **kwargs)
File "/home/dobby/.cache/pypoetry/virtualenvs/trainer-lQlppuIx-py3.10/lib/python3.10/site-packages/transformers/tokenization_utils.py", line 367, in __init__
self._add_tokens(
File "/home/dobby/.cache/pypoetry/virtualenvs/trainer-lQlppuIx-py3.10/lib/python3.10/site-packages/transformers/tokenization_utils.py", line 467, in _add_tokens
current_vocab = self.get_vocab().copy()
File "/home/dobby/.cache/huggingface/modules/transformers_modules/replit/replit-code-v1-3b/cc0a4f17a8d72b71d62ea53cb0e23e4dac352067/replit_lm_tokenizer.py", line 76, in get_vocab
vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
File "/home/dobby/.cache/huggingface/modules/transformers_modules/replit/replit-code-v1-3b/cc0a4f17a8d72b71d62ea53cb0e23e4dac352067/replit_lm_tokenizer.py", line 73, in vocab_size
return self.sp_model.get_piece_size()
AttributeError: 'ReplitLMTokenizer' object has no attribute 'sp_model'
```
So I change super class initialization at the last.
- replit_lm_tokenizer.py +2 -1
@@ -63,10 +63,11 @@ class ReplitLMTokenizer(PreTrainedTokenizer):
|
|
63 |
|
64 |
def __init__(self, vocab_file, bos_token=None, eos_token='<|endoftext|>', unk_token='<|unk|>', pad_token='<|pad|>', sep_token=None, sp_model_kwargs: Optional[Dict[str, Any]]=None, **kwargs) -> None:
|
65 |
self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
|
66 |
-
super().__init__(bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, pad_token=pad_token, sep_token=sep_token, sp_model_kwargs=self.sp_model_kwargs, **kwargs)
|
67 |
self.vocab_file = vocab_file
|
68 |
self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
|
69 |
self.sp_model.Load(vocab_file)
|
|
|
70 |
|
71 |
@property
|
72 |
def vocab_size(self):
|
|
|
63 |
|
64 |
def __init__(self, vocab_file, bos_token=None, eos_token='<|endoftext|>', unk_token='<|unk|>', pad_token='<|pad|>', sep_token=None, sp_model_kwargs: Optional[Dict[str, Any]]=None, **kwargs) -> None:
|
65 |
self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
|
66 |
+
# super().__init__(bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, pad_token=pad_token, sep_token=sep_token, sp_model_kwargs=self.sp_model_kwargs, **kwargs)
|
67 |
self.vocab_file = vocab_file
|
68 |
self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
|
69 |
self.sp_model.Load(vocab_file)
|
70 |
+
super().__init__(bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, pad_token=pad_token, sep_token=sep_token, sp_model_kwargs=self.sp_model_kwargs, **kwargs)
|
71 |
|
72 |
@property
|
73 |
def vocab_size(self):
|