RecursionError: maximum recursion depth exceeded while calling a Python object

#6
by havocy28 - opened

Using the example: qa_pipeline = pipeline("question-answering", model="medalpaca/medalpaca-13b", tokenizer="medalpaca/medalpaca-13b")

I get the error:

RecursionError Traceback (most recent call last)
Cell In[4], line 1
----> 1 qa_pipeline = pipeline("question-answering", model="medalpaca/medalpaca-13b", tokenizer="medalpaca/medalpaca-13b")

File ~/hf/lib/python3.10/site-packages/transformers/pipelines/init.py:885, in pipeline(task, model, config, tokenizer, feature_extractor, image_processor, framework, revision, use_fast, use_auth_token, device, device_map, torch_dtype, trust_remote_code, model_kwargs, pipeline_class, **kwargs)
882 tokenizer_kwargs = model_kwargs.copy()
883 tokenizer_kwargs.pop("torch_dtype", None)
--> 885 tokenizer = AutoTokenizer.from_pretrained(
886 tokenizer_identifier, use_fast=use_fast, _from_pipeline=task, **hub_kwargs, **tokenizer_kwargs
887 )
889 if load_image_processor:
890 # Try to infer image processor from model or config name (if provided as str)
891 if image_processor is None:

File ~/hf/lib/python3.10/site-packages/transformers/models/auto/tokenization_auto.py:694, in AutoTokenizer.from_pretrained(cls, pretrained_model_name_or_path, *inputs, **kwargs)
690 if tokenizer_class is None:
691 raise ValueError(
692 f"Tokenizer class {tokenizer_class_candidate} does not exist or is not currently imported."
693 )
--> 694 return tokenizer_class.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
696 # Otherwise we have to be creative.
697 # if model is an encoder decoder, the encoder tokenizer class is used by default
698 if isinstance(config, EncoderDecoderConfig):

File ~/hf/lib/python3.10/site-packages/transformers/tokenization_utils_base.py:1820, in PreTrainedTokenizerBase.from_pretrained(cls, pretrained_model_name_or_path, *init_inputs, **kwargs)
1817 else:
1818 logger.info(f"loading file {file_path} from cache at {resolved_vocab_files[file_id]}")
-> 1820 return cls._from_pretrained(
1821 resolved_vocab_files,
1822 pretrained_model_name_or_path,
1823 init_configuration,
1824 *init_inputs,
1825 use_auth_token=use_auth_token,
1826 cache_dir=cache_dir,
1827 local_files_only=local_files_only,
1828 _commit_hash=commit_hash,
1829 _is_local=is_local,
1830 **kwargs,
1831 )

File ~/hf/lib/python3.10/site-packages/transformers/tokenization_utils_base.py:1983, in PreTrainedTokenizerBase._from_pretrained(cls, resolved_vocab_files, pretrained_model_name_or_path, init_configuration, use_auth_token, cache_dir, local_files_only, _commit_hash, _is_local, *init_inputs, **kwargs)
1981 # Instantiate tokenizer.
1982 try:
-> 1983 tokenizer = cls(*init_inputs, **init_kwargs)
1984 except OSError:
1985 raise OSError(
1986 "Unable to load vocabulary from file. "
1987 "Please check that the provided vocabulary is accessible and not corrupted."
1988 )

File ~/hf/lib/python3.10/site-packages/transformers/models/llama/tokenization_llama_fast.py:104, in LlamaTokenizerFast.init(self, vocab_file, tokenizer_file, clean_up_tokenization_spaces, unk_token, bos_token, eos_token, add_bos_token, add_eos_token, **kwargs)
102 self._add_bos_token = add_bos_token
103 self._add_eos_token = add_eos_token
--> 104 self.update_post_processor()
106 self.vocab_file = vocab_file
107 self.can_save_slow_tokenizer = False if not self.vocab_file else True

File ~/hf/lib/python3.10/site-packages/transformers/models/llama/tokenization_llama_fast.py:111, in LlamaTokenizerFast.update_post_processor(self)
109 def update_post_processor(self):
110 bos = self.bos_token
--> 111 bos_token_id = self.bos_token_id
113 eos = self.eos_token
114 eos_token_id = self.eos_token_id

File ~/hf/lib/python3.10/site-packages/transformers/tokenization_utils_base.py:1131, in SpecialTokensMixin.bos_token_id(self)
1129 if self._bos_token is None:
1130 return None
-> 1131 return self.convert_tokens_to_ids(self.bos_token)

File ~/hf/lib/python3.10/site-packages/transformers/tokenization_utils_fast.py:250, in PreTrainedTokenizerFast.convert_tokens_to_ids(self, tokens)
247 return None
249 if isinstance(tokens, str):
--> 250 return self._convert_token_to_id_with_added_voc(tokens)
252 return [self._convert_token_to_id_with_added_voc(token) for token in tokens]

File ~/hf/lib/python3.10/site-packages/transformers/tokenization_utils_fast.py:257, in PreTrainedTokenizerFast._convert_token_to_id_with_added_voc(self, token)
255 index = self._tokenizer.token_to_id(token)
256 if index is None:
--> 257 return self.unk_token_id
258 return index

File ~/hf/lib/python3.10/site-packages/transformers/tokenization_utils_base.py:1150, in SpecialTokensMixin.unk_token_id(self)
1148 if self._unk_token is None:
1149 return None
-> 1150 return self.convert_tokens_to_ids(self.unk_token)

File ~/hf/lib/python3.10/site-packages/transformers/tokenization_utils_fast.py:250, in PreTrainedTokenizerFast.convert_tokens_to_ids(self, tokens)
247 return None
249 if isinstance(tokens, str):
--> 250 return self._convert_token_to_id_with_added_voc(tokens)
252 return [self._convert_token_to_id_with_added_voc(token) for token in tokens]

File ~/hf/lib/python3.10/site-packages/transformers/tokenization_utils_fast.py:257, in PreTrainedTokenizerFast._convert_token_to_id_with_added_voc(self, token)
255 index = self._tokenizer.token_to_id(token)
256 if index is None:
--> 257 return self.unk_token_id
258 return index

File ~/hf/lib/python3.10/site-packages/transformers/tokenization_utils_base.py:1150, in SpecialTokensMixin.unk_token_id(self)
1148 if self._unk_token is None:
1149 return None
-> 1150 return self.convert_tokens_to_ids(self.unk_token)

[... skipping similar frames: PreTrainedTokenizerFast._convert_token_to_id_with_added_voc at line 257 (985 times), PreTrainedTokenizerFast.convert_tokens_to_ids at line 250 (985 times), SpecialTokensMixin.unk_token_id at line 1150 (985 times)]

File ~/hf/lib/python3.10/site-packages/transformers/tokenization_utils_fast.py:250, in PreTrainedTokenizerFast.convert_tokens_to_ids(self, tokens)
247 return None
249 if isinstance(tokens, str):
--> 250 return self._convert_token_to_id_with_added_voc(tokens)
252 return [self._convert_token_to_id_with_added_voc(token) for token in tokens]

File ~/hf/lib/python3.10/site-packages/transformers/tokenization_utils_fast.py:257, in PreTrainedTokenizerFast._convert_token_to_id_with_added_voc(self, token)
255 index = self._tokenizer.token_to_id(token)
256 if index is None:
--> 257 return self.unk_token_id
258 return index

File ~/hf/lib/python3.10/site-packages/transformers/tokenization_utils_base.py:1150, in SpecialTokensMixin.unk_token_id(self)
1148 if self._unk_token is None:
1149 return None
-> 1150 return self.convert_tokens_to_ids(self.unk_token)

File ~/hf/lib/python3.10/site-packages/transformers/tokenization_utils_base.py:1030, in SpecialTokensMixin.unk_token(self)
1028 logger.error("Using unk_token, but it is not set yet.")
1029 return None
-> 1030 return str(self._unk_token)

RecursionError: maximum recursion depth exceeded while calling a Python object

I am also getting this error

I believe it's caused by the issue listed here where it's using an older tokenizer library in the model card: https://github.com/huggingface/transformers/issues/22762

Any chance you can confirm and get this updated @kbressem , @dtruhn or @peterhan91 ?

medalpaca org

We will probably update the model in the future. There are plenty of things we like to improve, but it will take some time. In the meantime, try out the inferer class on our GitHub repo. This should work.

kbressem changed discussion status to closed

Sign up or log in to comment