Can't load using HF diffusers pipeline
#6
by
cfchase
- opened
Trying to run
from diffusers import DiffusionPipeline
pipeline = DiffusionPipeline.from_pretrained("RunDiffusion/Juggernaut-XL-v9")
results in an error
OSError: Error no file named pytorch_model.bin, tf_model.h5, model.ckpt.index or flax_model.msgpack found in directory /opt/app-root/src/.cache/huggingface/hub/models--RunDiffusion--Juggernaut-XL-v9/snapshots/845f18ca754ac68e51db5144e6fa07835599b634/text_encoder_2.
Is there a dependency issue or something else? stabilityai/stable-diffusion-xl-base-1.0
seems to work fine for me.
Using the dependencies:
accelerate 0.28.0
diffusers 0.27.2
flash-attn 2.5.6
torch 2.2.2
torchvision 0.17.2
transformers 4.39.3
Full error:
Loading pipeline components...: 0%| | 0/7 [00:00<?, ?it/s]
---------------------------------------------------------------------------
OSError Traceback (most recent call last)
Cell In[20], line 3
1 from diffusers import DiffusionPipeline
----> 3 pipeline = DiffusionPipeline.from_pretrained("RunDiffusion/Juggernaut-XL-v9")
File /opt/app-root/lib64/python3.9/site-packages/huggingface_hub/utils/_validators.py:119, in validate_hf_hub_args.<locals>._inner_fn(*args, **kwargs)
116 if check_use_auth_token:
117 kwargs = smoothly_deprecate_use_auth_token(fn_name=fn.__name__, has_token=has_token, kwargs=kwargs)
--> 119 return fn(*args, **kwargs)
File /opt/app-root/lib64/python3.9/site-packages/diffusers/pipelines/pipeline_utils.py:819, in DiffusionPipeline.from_pretrained(cls, pretrained_model_name_or_path, **kwargs)
816 loaded_sub_model = passed_class_obj[name]
817 else:
818 # load sub model
--> 819 loaded_sub_model = load_sub_model(
820 library_name=library_name,
821 class_name=class_name,
822 importable_classes=importable_classes,
823 pipelines=pipelines,
824 is_pipeline_module=is_pipeline_module,
825 pipeline_class=pipeline_class,
826 torch_dtype=torch_dtype,
827 provider=provider,
828 sess_options=sess_options,
829 device_map=device_map,
830 max_memory=max_memory,
831 offload_folder=offload_folder,
832 offload_state_dict=offload_state_dict,
833 model_variants=model_variants,
834 name=name,
835 from_flax=from_flax,
836 variant=variant,
837 low_cpu_mem_usage=low_cpu_mem_usage,
838 cached_folder=cached_folder,
839 )
840 logger.info(
841 f"Loaded {name} as {class_name} from `{name}` subfolder of {pretrained_model_name_or_path}."
842 )
844 init_kwargs[name] = loaded_sub_model # UNet(...), # DiffusionSchedule(...)
File /opt/app-root/lib64/python3.9/site-packages/diffusers/pipelines/pipeline_loading_utils.py:491, in load_sub_model(library_name, class_name, importable_classes, pipelines, is_pipeline_module, pipeline_class, torch_dtype, provider, sess_options, device_map, max_memory, offload_folder, offload_state_dict, model_variants, name, from_flax, variant, low_cpu_mem_usage, cached_folder)
489 # check if the module is in a subdirectory
490 if os.path.isdir(os.path.join(cached_folder, name)):
--> 491 loaded_sub_model = load_method(os.path.join(cached_folder, name), **loading_kwargs)
492 else:
493 # else load from the root directory
494 loaded_sub_model = load_method(cached_folder, **loading_kwargs)
File /opt/app-root/lib64/python3.9/site-packages/transformers/modeling_utils.py:3144, in PreTrainedModel.from_pretrained(cls, pretrained_model_name_or_path, config, cache_dir, ignore_mismatched_sizes, force_download, local_files_only, token, revision, use_safetensors, *model_args, **kwargs)
3139 raise EnvironmentError(
3140 f"Error no file named {_add_variant(SAFE_WEIGHTS_NAME, variant)} found in directory"
3141 f" {pretrained_model_name_or_path}."
3142 )
3143 else:
-> 3144 raise EnvironmentError(
3145 f"Error no file named {_add_variant(WEIGHTS_NAME, variant)}, {TF2_WEIGHTS_NAME},"
3146 f" {TF_WEIGHTS_NAME + '.index'} or {FLAX_WEIGHTS_NAME} found in directory"
3147 f" {pretrained_model_name_or_path}."
3148 )
3149 elif os.path.isfile(os.path.join(subfolder, pretrained_model_name_or_path)):
3150 archive_file = pretrained_model_name_or_path
OSError: Error no file named pytorch_model.bin, tf_model.h5, model.ckpt.index or flax_model.msgpack found in directory /opt/app-root/src/.cache/huggingface/hub/models--RunDiffusion--Juggernaut-XL-v9/snapshots/845f18ca754ac68e51db5144e6fa07835599b634/text_encoder_2.
Works when loading in single file mode
from diffusers import StableDiffusionXLPipeline, StableDiffusionXLImg2ImgPipeline
import torch
pipeline = StableDiffusionXLPipeline.from_single_file(
"https://huggingface.co/RunDiffusion/Juggernaut-XL-v9/blob/main/Juggernaut-XL_v9_RunDiffusionPhoto_v2.safetensors",
torch_dtype=torch.float16
).to("cuda")
Might be beneficial to add that to the model card
cfchase
changed discussion status to
closed