getting this error :

#1
by hemangjoshi37a - opened

code :

from transformers import AutoTokenizer, AutoModel
tokenizer = AutoTokenizer.from_pretrained("mrm8488/falcon-7b-ft-codeAlpaca_20k-v2", trust_remote_code=True)
model = AutoModel.from_pretrained("mrm8488/falcon-7b-ft-codeAlpaca_20k-v2", trust_remote_code=True, device='cpu')
model = model.eval()

Error :
```

HTTPError Traceback (most recent call last)
File ~/.local/lib/python3.11/site-packages/huggingface_hub/utils/_errors.py:261, in hf_raise_for_status(response, endpoint_name)
260 try:
--> 261 response.raise_for_status()
262 except HTTPError as e:

File ~/.local/lib/python3.11/site-packages/requests/models.py:941, in Response.raise_for_status(self)
940 if http_error_msg:
--> 941 raise HTTPError(http_error_msg, response=self)

HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/mrm8488/falcon-7b-ft-codeAlpaca_20k-v2/resolve/main/config.json

The above exception was the direct cause of the following exception:

EntryNotFoundError Traceback (most recent call last)
File ~/.local/lib/python3.11/site-packages/transformers/utils/hub.py:417, in cached_file(path_or_repo_id, filename, cache_dir, force_download, resume_download, proxies, use_auth_token, revision, local_files_only, subfolder, repo_type, user_agent, _raise_exceptions_for_missing_entries, _raise_exceptions_for_connection_errors, _commit_hash)
415 try:
416 # Load from URL or cache if already cached
--> 417 resolved_file = hf_hub_download(
418 path_or_repo_id,
419 filename,
420 subfolder=None if len(subfolder) == 0 else subfolder,
421 repo_type=repo_type,
422 revision=revision,
423 cache_dir=cache_dir,
424 user_agent=user_agent,
425 force_download=force_download,
426 proxies=proxies,
427 resume_download=resume_download,
428 use_auth_token=use_auth_token,
429 local_files_only=local_files_only,
430 )
432 except RepositoryNotFoundError:

File ~/.local/lib/python3.11/site-packages/huggingface_hub/utils/_validators.py:118, in validate_hf_hub_args.._inner_fn(*args, **kwargs)
116 kwargs = smoothly_deprecate_use_auth_token(fn_name=fn.name, has_token=has_token, kwargs=kwargs)
--> 118 return fn(*args, **kwargs)

File ~/.local/lib/python3.11/site-packages/huggingface_hub/file_download.py:1195, in hf_hub_download(repo_id, filename, subfolder, repo_type, revision, library_name, library_version, cache_dir, local_dir, local_dir_use_symlinks, user_agent, force_download, force_filename, proxies, etag_timeout, resume_download, token, local_files_only, legacy_cache_layout)
1194 try:
-> 1195 metadata = get_hf_file_metadata(
1196 url=url,
1197 token=token,
1198 proxies=proxies,
1199 timeout=etag_timeout,
1200 )
1201 except EntryNotFoundError as http_error:
1202 # Cache the non-existence of the file and raise

File ~/.local/lib/python3.11/site-packages/huggingface_hub/utils/_validators.py:118, in validate_hf_hub_args.._inner_fn(*args, **kwargs)
116 kwargs = smoothly_deprecate_use_auth_token(fn_name=fn.name, has_token=has_token, kwargs=kwargs)
--> 118 return fn(*args, **kwargs)

File ~/.local/lib/python3.11/site-packages/huggingface_hub/file_download.py:1541, in get_hf_file_metadata(url, token, proxies, timeout)
1532 r = _request_wrapper(
1533 method="HEAD",
1534 url=url,
(...)
1539 timeout=timeout,
1540 )
-> 1541 hf_raise_for_status(r)
1543 # Return

File ~/.local/lib/python3.11/site-packages/huggingface_hub/utils/_errors.py:271, in hf_raise_for_status(response, endpoint_name)
270 message = f"{response.status_code} Client Error." + "\n\n" + f"Entry Not Found for url: {response.url}."
--> 271 raise EntryNotFoundError(message, response) from e
273 elif error_code == "GatedRepo":

EntryNotFoundError: 404 Client Error. (Request ID: Root=1-64ca5159-4ce311de28fdd5074ac97cae;fab9434f-6048-4c81-8e98-2abc750ea954)

Entry Not Found for url: https://huggingface.co/mrm8488/falcon-7b-ft-codeAlpaca_20k-v2/resolve/main/config.json.

During handling of the above exception, another exception occurred:

OSError Traceback (most recent call last)
Cell In[3], line 3
1 from transformers import AutoTokenizer, AutoModel
2 tokenizer = AutoTokenizer.from_pretrained("mrm8488/falcon-7b-ft-codeAlpaca_20k-v2", trust_remote_code=True)
----> 3 model = AutoModel.from_pretrained("mrm8488/falcon-7b-ft-codeAlpaca_20k-v2", trust_remote_code=True, device='cpu')
4 model = model.eval()

File ~/.local/lib/python3.11/site-packages/transformers/models/auto/auto_factory.py:456, in _BaseAutoModelClass.from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs)
453 if kwargs.get("torch_dtype", None) == "auto":
454 _ = kwargs.pop("torch_dtype")
--> 456 config, kwargs = AutoConfig.from_pretrained(
457 pretrained_model_name_or_path,
458 return_unused_kwargs=True,
459 trust_remote_code=trust_remote_code,
460 **hub_kwargs,
461 **kwargs,
462 )
464 # if torch_dtype=auto was passed here, ensure to pass it on
465 if kwargs_orig.get("torch_dtype", None) == "auto":

File ~/.local/lib/python3.11/site-packages/transformers/models/auto/configuration_auto.py:944, in AutoConfig.from_pretrained(cls, pretrained_model_name_or_path, **kwargs)
942 kwargs["name_or_path"] = pretrained_model_name_or_path
943 trust_remote_code = kwargs.pop("trust_remote_code", None)
--> 944 config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs)
945 has_remote_code = "auto_map" in config_dict and "AutoConfig" in config_dict["auto_map"]
946 has_local_code = "model_type" in config_dict and config_dict["model_type"] in CONFIG_MAPPING

File ~/.local/lib/python3.11/site-packages/transformers/configuration_utils.py:574, in PretrainedConfig.get_config_dict(cls, pretrained_model_name_or_path, **kwargs)
572 original_kwargs = copy.deepcopy(kwargs)
573 # Get config dict associated with the base config file
--> 574 config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs)
575 if "_commit_hash" in config_dict:
576 original_kwargs["_commit_hash"] = config_dict["_commit_hash"]

File ~/.local/lib/python3.11/site-packages/transformers/configuration_utils.py:629, in PretrainedConfig._get_config_dict(cls, pretrained_model_name_or_path, **kwargs)
625 configuration_file = kwargs.pop("_configuration_file", CONFIG_NAME)
627 try:
628 # Load from local folder or from cache or download from model Hub and cache
--> 629 resolved_config_file = cached_file(
630 pretrained_model_name_or_path,
631 configuration_file,
632 cache_dir=cache_dir,
633 force_download=force_download,
634 proxies=proxies,
635 resume_download=resume_download,
636 local_files_only=local_files_only,
637 use_auth_token=use_auth_token,
638 user_agent=user_agent,
639 revision=revision,
640 subfolder=subfolder,
641 _commit_hash=commit_hash,
642 )
643 commit_hash = extract_commit_hash(resolved_config_file, commit_hash)
644 except EnvironmentError:
645 # Raise any environment error raise by cached_file. It will have a helpful error message adapted to
646 # the original exception.

File ~/.local/lib/python3.11/site-packages/transformers/utils/hub.py:463, in cached_file(path_or_repo_id, filename, cache_dir, force_download, resume_download, proxies, use_auth_token, revision, local_files_only, subfolder, repo_type, user_agent, _raise_exceptions_for_missing_entries, _raise_exceptions_for_connection_errors, _commit_hash)
461 if revision is None:
462 revision = "main"
--> 463 raise EnvironmentError(
464 f"{path_or_repo_id} does not appear to have a file named {full_filename}. Checkout "
465 f"'https://huggingface.co/{path_or_repo_id}/{revision}' for available files."
466 )
467 except HTTPError as err:
468 # First we try to see if we have a cached version (not up to date):
469 resolved_file = try_to_load_from_cache(path_or_repo_id, full_filename, cache_dir=cache_dir, revision=revision)

OSError: mrm8488/falcon-7b-ft-codeAlpaca_20k-v2 does not appear to have a file named config.json. Checkout 'https://huggingface.co/mrm8488/falcon-7b-ft-codeAlpaca_20k-v2/main' for available files.

https://hjlabs.in

The files only contains the adapter model you need to load the base model "ybelkada/falcon-7b-sharded-bf16" and then use peft to add the adapter.

model = AutoModelForCausalLM.from_pretrained(
"ybelkada/falcon-7b-sharded-bf16,
torch_dtype=torch.bfloat16,
device_map={"": 0}
)

model = PeftModel.from_pretrained(model, "mrm8488/falcon-7b-ft-codeAlpaca_20k-v2)

model = model.merge_and_unload()

Sign up or log in to comment