Upload folder using huggingface_hub
Browse files- main/README.md +1 -1
- main/adaptive_mask_inpainting.py +6 -6
- main/composable_stable_diffusion.py +3 -3
- main/fresco_v2v.py +1 -1
- main/hd_painter.py +1 -1
- main/img2img_inpainting.py +1 -1
- main/instaflow_one_step.py +3 -3
- main/ip_adapter_face_id.py +3 -3
- main/kohya_hires_fix.py +1 -1
- main/latent_consistency_interpolate.py +1 -1
- main/llm_grounded_diffusion.py +3 -3
- main/lpw_stable_diffusion.py +2 -2
- main/lpw_stable_diffusion_xl.py +1 -1
- main/matryoshka.py +2 -2
- main/multilingual_stable_diffusion.py +1 -1
- main/pipeline_controlnet_xl_kolors_inpaint.py +1 -1
- main/pipeline_fabric.py +3 -3
- main/pipeline_kolors_inpainting.py +1 -1
- main/pipeline_prompt2prompt.py +3 -3
- main/pipeline_sdxl_style_aligned.py +1 -1
- main/pipeline_stable_diffusion_boxdiff.py +4 -4
- main/pipeline_stable_diffusion_pag.py +4 -4
- main/pipeline_stable_diffusion_upscale_ldm3d.py +1 -1
- main/pipeline_stable_diffusion_xl_attentive_eraser.py +1 -1
- main/pipeline_stable_diffusion_xl_controlnet_adapter.py +1 -1
- main/pipeline_stable_diffusion_xl_controlnet_adapter_inpaint.py +2 -2
- main/pipeline_zero1to3.py +4 -4
- main/rerender_a_video.py +1 -1
- main/run_onnx_controlnet.py +1 -1
- main/run_tensorrt_controlnet.py +1 -1
- main/sd_text2img_k_diffusion.py +1 -1
- main/sde_drag.py +1 -1
- main/stable_diffusion_comparison.py +1 -1
- main/stable_diffusion_controlnet_img2img.py +1 -1
- main/stable_diffusion_controlnet_inpaint.py +1 -1
- main/stable_diffusion_controlnet_inpaint_img2img.py +1 -1
- main/stable_diffusion_controlnet_reference.py +1 -1
- main/stable_diffusion_ipex.py +4 -4
- main/stable_diffusion_mega.py +1 -1
- main/stable_diffusion_reference.py +4 -4
- main/stable_diffusion_repaint.py +3 -3
- main/stable_diffusion_tensorrt_img2img.py +3 -3
- main/stable_diffusion_tensorrt_inpaint.py +3 -3
- main/stable_diffusion_tensorrt_txt2img.py +3 -3
- main/text_inpainting.py +1 -1
main/README.md
CHANGED
|
@@ -1328,7 +1328,7 @@ model = CLIPSegForImageSegmentation.from_pretrained("CIDAS/clipseg-rd64-refined"
|
|
| 1328 |
|
| 1329 |
# Load Stable Diffusion Inpainting Pipeline with custom pipeline
|
| 1330 |
pipe = DiffusionPipeline.from_pretrained(
|
| 1331 |
-
"
|
| 1332 |
custom_pipeline="text_inpainting",
|
| 1333 |
segmentation_model=model,
|
| 1334 |
segmentation_processor=processor
|
|
|
|
| 1328 |
|
| 1329 |
# Load Stable Diffusion Inpainting Pipeline with custom pipeline
|
| 1330 |
pipe = DiffusionPipeline.from_pretrained(
|
| 1331 |
+
"stable-diffusion-v1-5/stable-diffusion-inpainting",
|
| 1332 |
custom_pipeline="text_inpainting",
|
| 1333 |
segmentation_model=model,
|
| 1334 |
segmentation_processor=processor
|
main/adaptive_mask_inpainting.py
CHANGED
|
@@ -126,7 +126,7 @@ EXAMPLE_DOC_STRING = """
|
|
| 126 |
... "lllyasviel/control_v11p_sd15_inpaint", torch_dtype=torch.float16
|
| 127 |
... )
|
| 128 |
>>> pipe = StableDiffusionControlNetInpaintPipeline.from_pretrained(
|
| 129 |
-
... "
|
| 130 |
... )
|
| 131 |
|
| 132 |
>>> pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
|
|
@@ -347,7 +347,7 @@ class AdaptiveMaskInpaintPipeline(
|
|
| 347 |
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
| 348 |
safety_checker ([`StableDiffusionSafetyChecker`]):
|
| 349 |
Classification module that estimates whether generated images could be considered offensive or harmful.
|
| 350 |
-
Please refer to the [model card](https://huggingface.co/
|
| 351 |
about a model's potential harms.
|
| 352 |
feature_extractor ([`~transformers.CLIPImageProcessor`]):
|
| 353 |
A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
|
|
@@ -429,8 +429,8 @@ class AdaptiveMaskInpaintPipeline(
|
|
| 429 |
"The configuration file of the unet has set the default `sample_size` to smaller than"
|
| 430 |
" 64 which seems highly unlikely .If you're checkpoint is a fine-tuned version of any of the"
|
| 431 |
" following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
|
| 432 |
-
" CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n-
|
| 433 |
-
" \n-
|
| 434 |
" configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
|
| 435 |
" in the config might lead to incorrect results in future versions. If you have downloaded this"
|
| 436 |
" checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
|
|
@@ -970,7 +970,7 @@ class AdaptiveMaskInpaintPipeline(
|
|
| 970 |
>>> default_mask_image = download_image(mask_url).resize((512, 512))
|
| 971 |
|
| 972 |
>>> pipe = AdaptiveMaskInpaintPipeline.from_pretrained(
|
| 973 |
-
... "
|
| 974 |
... )
|
| 975 |
>>> pipe = pipe.to("cuda")
|
| 976 |
|
|
@@ -1095,7 +1095,7 @@ class AdaptiveMaskInpaintPipeline(
|
|
| 1095 |
|
| 1096 |
# 8. Check that sizes of mask, masked image and latents match
|
| 1097 |
if num_channels_unet == 9:
|
| 1098 |
-
# default case for
|
| 1099 |
num_channels_mask = mask.shape[1]
|
| 1100 |
num_channels_masked_image = masked_image_latents.shape[1]
|
| 1101 |
if num_channels_latents + num_channels_mask + num_channels_masked_image != self.unet.config.in_channels:
|
|
|
|
| 126 |
... "lllyasviel/control_v11p_sd15_inpaint", torch_dtype=torch.float16
|
| 127 |
... )
|
| 128 |
>>> pipe = StableDiffusionControlNetInpaintPipeline.from_pretrained(
|
| 129 |
+
... "stable-diffusion-v1-5/stable-diffusion-v1-5", controlnet=controlnet, torch_dtype=torch.float16
|
| 130 |
... )
|
| 131 |
|
| 132 |
>>> pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
|
|
|
|
| 347 |
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
| 348 |
safety_checker ([`StableDiffusionSafetyChecker`]):
|
| 349 |
Classification module that estimates whether generated images could be considered offensive or harmful.
|
| 350 |
+
Please refer to the [model card](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5) for more details
|
| 351 |
about a model's potential harms.
|
| 352 |
feature_extractor ([`~transformers.CLIPImageProcessor`]):
|
| 353 |
A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
|
|
|
|
| 429 |
"The configuration file of the unet has set the default `sample_size` to smaller than"
|
| 430 |
" 64 which seems highly unlikely .If you're checkpoint is a fine-tuned version of any of the"
|
| 431 |
" following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
|
| 432 |
+
" CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- stable-diffusion-v1-5/stable-diffusion-v1-5"
|
| 433 |
+
" \n- stable-diffusion-v1-5/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
|
| 434 |
" configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
|
| 435 |
" in the config might lead to incorrect results in future versions. If you have downloaded this"
|
| 436 |
" checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
|
|
|
|
| 970 |
>>> default_mask_image = download_image(mask_url).resize((512, 512))
|
| 971 |
|
| 972 |
>>> pipe = AdaptiveMaskInpaintPipeline.from_pretrained(
|
| 973 |
+
... "stable-diffusion-v1-5/stable-diffusion-inpainting", torch_dtype=torch.float16
|
| 974 |
... )
|
| 975 |
>>> pipe = pipe.to("cuda")
|
| 976 |
|
|
|
|
| 1095 |
|
| 1096 |
# 8. Check that sizes of mask, masked image and latents match
|
| 1097 |
if num_channels_unet == 9:
|
| 1098 |
+
# default case for stable-diffusion-v1-5/stable-diffusion-inpainting
|
| 1099 |
num_channels_mask = mask.shape[1]
|
| 1100 |
num_channels_masked_image = masked_image_latents.shape[1]
|
| 1101 |
if num_channels_latents + num_channels_mask + num_channels_masked_image != self.unet.config.in_channels:
|
main/composable_stable_diffusion.py
CHANGED
|
@@ -62,7 +62,7 @@ class ComposableStableDiffusionPipeline(DiffusionPipeline, StableDiffusionMixin)
|
|
| 62 |
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
| 63 |
safety_checker ([`StableDiffusionSafetyChecker`]):
|
| 64 |
Classification module that estimates whether generated images could be considered offensive or harmful.
|
| 65 |
-
Please, refer to the [model card](https://huggingface.co/
|
| 66 |
feature_extractor ([`CLIPImageProcessor`]):
|
| 67 |
Model that extracts features from generated images to be used as inputs for the `safety_checker`.
|
| 68 |
"""
|
|
@@ -145,8 +145,8 @@ class ComposableStableDiffusionPipeline(DiffusionPipeline, StableDiffusionMixin)
|
|
| 145 |
"The configuration file of the unet has set the default `sample_size` to smaller than"
|
| 146 |
" 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the"
|
| 147 |
" following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
|
| 148 |
-
" CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n-
|
| 149 |
-
" \n-
|
| 150 |
" configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
|
| 151 |
" in the config might lead to incorrect results in future versions. If you have downloaded this"
|
| 152 |
" checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
|
|
|
|
| 62 |
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
| 63 |
safety_checker ([`StableDiffusionSafetyChecker`]):
|
| 64 |
Classification module that estimates whether generated images could be considered offensive or harmful.
|
| 65 |
+
Please, refer to the [model card](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5) for details.
|
| 66 |
feature_extractor ([`CLIPImageProcessor`]):
|
| 67 |
Model that extracts features from generated images to be used as inputs for the `safety_checker`.
|
| 68 |
"""
|
|
|
|
| 145 |
"The configuration file of the unet has set the default `sample_size` to smaller than"
|
| 146 |
" 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the"
|
| 147 |
" following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
|
| 148 |
+
" CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- stable-diffusion-v1-5/stable-diffusion-v1-5"
|
| 149 |
+
" \n- stable-diffusion-v1-5/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
|
| 150 |
" configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
|
| 151 |
" in the config might lead to incorrect results in future versions. If you have downloaded this"
|
| 152 |
" checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
|
main/fresco_v2v.py
CHANGED
|
@@ -1276,7 +1276,7 @@ class FrescoV2VPipeline(StableDiffusionControlNetImg2ImgPipeline):
|
|
| 1276 |
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
| 1277 |
safety_checker ([`StableDiffusionSafetyChecker`]):
|
| 1278 |
Classification module that estimates whether generated images could be considered offensive or harmful.
|
| 1279 |
-
Please refer to the [model card](https://huggingface.co/
|
| 1280 |
about a model's potential harms.
|
| 1281 |
feature_extractor ([`~transformers.CLIPImageProcessor`]):
|
| 1282 |
A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
|
|
|
|
| 1276 |
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
| 1277 |
safety_checker ([`StableDiffusionSafetyChecker`]):
|
| 1278 |
Classification module that estimates whether generated images could be considered offensive or harmful.
|
| 1279 |
+
Please refer to the [model card](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5) for more details
|
| 1280 |
about a model's potential harms.
|
| 1281 |
feature_extractor ([`~transformers.CLIPImageProcessor`]):
|
| 1282 |
A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
|
main/hd_painter.py
CHANGED
|
@@ -678,7 +678,7 @@ class StableDiffusionHDPainterPipeline(StableDiffusionInpaintPipeline):
|
|
| 678 |
|
| 679 |
# 8. Check that sizes of mask, masked image and latents match
|
| 680 |
if num_channels_unet == 9:
|
| 681 |
-
# default case for
|
| 682 |
num_channels_mask = mask.shape[1]
|
| 683 |
num_channels_masked_image = masked_image_latents.shape[1]
|
| 684 |
if num_channels_latents + num_channels_mask + num_channels_masked_image != self.unet.config.in_channels:
|
|
|
|
| 678 |
|
| 679 |
# 8. Check that sizes of mask, masked image and latents match
|
| 680 |
if num_channels_unet == 9:
|
| 681 |
+
# default case for stable-diffusion-v1-5/stable-diffusion-inpainting
|
| 682 |
num_channels_mask = mask.shape[1]
|
| 683 |
num_channels_masked_image = masked_image_latents.shape[1]
|
| 684 |
if num_channels_latents + num_channels_mask + num_channels_masked_image != self.unet.config.in_channels:
|
main/img2img_inpainting.py
CHANGED
|
@@ -78,7 +78,7 @@ class ImageToImageInpaintingPipeline(DiffusionPipeline):
|
|
| 78 |
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
| 79 |
safety_checker ([`StableDiffusionSafetyChecker`]):
|
| 80 |
Classification module that estimates whether generated images could be considered offensive or harmful.
|
| 81 |
-
Please, refer to the [model card](https://huggingface.co/
|
| 82 |
feature_extractor ([`CLIPImageProcessor`]):
|
| 83 |
Model that extracts features from generated images to be used as inputs for the `safety_checker`.
|
| 84 |
"""
|
|
|
|
| 78 |
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
| 79 |
safety_checker ([`StableDiffusionSafetyChecker`]):
|
| 80 |
Classification module that estimates whether generated images could be considered offensive or harmful.
|
| 81 |
+
Please, refer to the [model card](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5) for details.
|
| 82 |
feature_extractor ([`CLIPImageProcessor`]):
|
| 83 |
Model that extracts features from generated images to be used as inputs for the `safety_checker`.
|
| 84 |
"""
|
main/instaflow_one_step.py
CHANGED
|
@@ -86,7 +86,7 @@ class InstaFlowPipeline(
|
|
| 86 |
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
| 87 |
safety_checker ([`StableDiffusionSafetyChecker`]):
|
| 88 |
Classification module that estimates whether generated images could be considered offensive or harmful.
|
| 89 |
-
Please refer to the [model card](https://huggingface.co/
|
| 90 |
about a model's potential harms.
|
| 91 |
feature_extractor ([`~transformers.CLIPImageProcessor`]):
|
| 92 |
A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
|
|
@@ -165,8 +165,8 @@ class InstaFlowPipeline(
|
|
| 165 |
"The configuration file of the unet has set the default `sample_size` to smaller than"
|
| 166 |
" 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the"
|
| 167 |
" following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
|
| 168 |
-
" CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n-
|
| 169 |
-
" \n-
|
| 170 |
" configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
|
| 171 |
" in the config might lead to incorrect results in future versions. If you have downloaded this"
|
| 172 |
" checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
|
|
|
|
| 86 |
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
| 87 |
safety_checker ([`StableDiffusionSafetyChecker`]):
|
| 88 |
Classification module that estimates whether generated images could be considered offensive or harmful.
|
| 89 |
+
Please refer to the [model card](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5) for more details
|
| 90 |
about a model's potential harms.
|
| 91 |
feature_extractor ([`~transformers.CLIPImageProcessor`]):
|
| 92 |
A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
|
|
|
|
| 165 |
"The configuration file of the unet has set the default `sample_size` to smaller than"
|
| 166 |
" 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the"
|
| 167 |
" following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
|
| 168 |
+
" CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- stable-diffusion-v1-5/stable-diffusion-v1-5"
|
| 169 |
+
" \n- stable-diffusion-v1-5/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
|
| 170 |
" configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
|
| 171 |
" in the config might lead to incorrect results in future versions. If you have downloaded this"
|
| 172 |
" checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
|
main/ip_adapter_face_id.py
CHANGED
|
@@ -166,7 +166,7 @@ class IPAdapterFaceIDStableDiffusionPipeline(
|
|
| 166 |
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
| 167 |
safety_checker ([`StableDiffusionSafetyChecker`]):
|
| 168 |
Classification module that estimates whether generated images could be considered offensive or harmful.
|
| 169 |
-
Please refer to the [model card](https://huggingface.co/
|
| 170 |
about a model's potential harms.
|
| 171 |
feature_extractor ([`~transformers.CLIPImageProcessor`]):
|
| 172 |
A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
|
|
@@ -247,8 +247,8 @@ class IPAdapterFaceIDStableDiffusionPipeline(
|
|
| 247 |
"The configuration file of the unet has set the default `sample_size` to smaller than"
|
| 248 |
" 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the"
|
| 249 |
" following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
|
| 250 |
-
" CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n-
|
| 251 |
-
" \n-
|
| 252 |
" configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
|
| 253 |
" in the config might lead to incorrect results in future versions. If you have downloaded this"
|
| 254 |
" checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
|
|
|
|
| 166 |
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
| 167 |
safety_checker ([`StableDiffusionSafetyChecker`]):
|
| 168 |
Classification module that estimates whether generated images could be considered offensive or harmful.
|
| 169 |
+
Please refer to the [model card](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5) for more details
|
| 170 |
about a model's potential harms.
|
| 171 |
feature_extractor ([`~transformers.CLIPImageProcessor`]):
|
| 172 |
A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
|
|
|
|
| 247 |
"The configuration file of the unet has set the default `sample_size` to smaller than"
|
| 248 |
" 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the"
|
| 249 |
" following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
|
| 250 |
+
" CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- stable-diffusion-v1-5/stable-diffusion-v1-5"
|
| 251 |
+
" \n- stable-diffusion-v1-5/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
|
| 252 |
" configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
|
| 253 |
" in the config might lead to incorrect results in future versions. If you have downloaded this"
|
| 254 |
" checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
|
main/kohya_hires_fix.py
CHANGED
|
@@ -414,7 +414,7 @@ class StableDiffusionHighResFixPipeline(StableDiffusionPipeline):
|
|
| 414 |
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
| 415 |
safety_checker ([`StableDiffusionSafetyChecker`]):
|
| 416 |
Classification module that estimates whether generated images could be considered offensive or harmful.
|
| 417 |
-
Please refer to the [model card](https://huggingface.co/
|
| 418 |
about a model's potential harms.
|
| 419 |
feature_extractor ([`~transformers.CLIPImageProcessor`]):
|
| 420 |
A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
|
|
|
|
| 414 |
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
| 415 |
safety_checker ([`StableDiffusionSafetyChecker`]):
|
| 416 |
Classification module that estimates whether generated images could be considered offensive or harmful.
|
| 417 |
+
Please refer to the [model card](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5) for more details
|
| 418 |
about a model's potential harms.
|
| 419 |
feature_extractor ([`~transformers.CLIPImageProcessor`]):
|
| 420 |
A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
|
main/latent_consistency_interpolate.py
CHANGED
|
@@ -222,7 +222,7 @@ class LatentConsistencyModelWalkPipeline(
|
|
| 222 |
supports [`LCMScheduler`].
|
| 223 |
safety_checker ([`StableDiffusionSafetyChecker`]):
|
| 224 |
Classification module that estimates whether generated images could be considered offensive or harmful.
|
| 225 |
-
Please refer to the [model card](https://huggingface.co/
|
| 226 |
about a model's potential harms.
|
| 227 |
feature_extractor ([`~transformers.CLIPImageProcessor`]):
|
| 228 |
A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
|
|
|
|
| 222 |
supports [`LCMScheduler`].
|
| 223 |
safety_checker ([`StableDiffusionSafetyChecker`]):
|
| 224 |
Classification module that estimates whether generated images could be considered offensive or harmful.
|
| 225 |
+
Please refer to the [model card](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5) for more details
|
| 226 |
about a model's potential harms.
|
| 227 |
feature_extractor ([`~transformers.CLIPImageProcessor`]):
|
| 228 |
A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
|
main/llm_grounded_diffusion.py
CHANGED
|
@@ -302,7 +302,7 @@ class LLMGroundedDiffusionPipeline(
|
|
| 302 |
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
| 303 |
safety_checker ([`StableDiffusionSafetyChecker`]):
|
| 304 |
Classification module that estimates whether generated images could be considered offensive or harmful.
|
| 305 |
-
Please refer to the [model card](https://huggingface.co/
|
| 306 |
about a model's potential harms.
|
| 307 |
feature_extractor ([`~transformers.CLIPImageProcessor`]):
|
| 308 |
A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
|
|
@@ -392,8 +392,8 @@ class LLMGroundedDiffusionPipeline(
|
|
| 392 |
"The configuration file of the unet has set the default `sample_size` to smaller than"
|
| 393 |
" 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the"
|
| 394 |
" following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
|
| 395 |
-
" CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n-
|
| 396 |
-
" \n-
|
| 397 |
" configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
|
| 398 |
" in the config might lead to incorrect results in future versions. If you have downloaded this"
|
| 399 |
" checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
|
|
|
|
| 302 |
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
| 303 |
safety_checker ([`StableDiffusionSafetyChecker`]):
|
| 304 |
Classification module that estimates whether generated images could be considered offensive or harmful.
|
| 305 |
+
Please refer to the [model card](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5) for more details
|
| 306 |
about a model's potential harms.
|
| 307 |
feature_extractor ([`~transformers.CLIPImageProcessor`]):
|
| 308 |
A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
|
|
|
|
| 392 |
"The configuration file of the unet has set the default `sample_size` to smaller than"
|
| 393 |
" 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the"
|
| 394 |
" following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
|
| 395 |
+
" CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- stable-diffusion-v1-5/stable-diffusion-v1-5"
|
| 396 |
+
" \n- stable-diffusion-v1-5/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
|
| 397 |
" configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
|
| 398 |
" in the config might lead to incorrect results in future versions. If you have downloaded this"
|
| 399 |
" checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
|
main/lpw_stable_diffusion.py
CHANGED
|
@@ -552,8 +552,8 @@ class StableDiffusionLongPromptWeightingPipeline(
|
|
| 552 |
"The configuration file of the unet has set the default `sample_size` to smaller than"
|
| 553 |
" 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the"
|
| 554 |
" following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
|
| 555 |
-
" CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n-
|
| 556 |
-
" \n-
|
| 557 |
" configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
|
| 558 |
" in the config might lead to incorrect results in future versions. If you have downloaded this"
|
| 559 |
" checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
|
|
|
|
| 552 |
"The configuration file of the unet has set the default `sample_size` to smaller than"
|
| 553 |
" 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the"
|
| 554 |
" following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
|
| 555 |
+
" CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- stable-diffusion-v1-5/stable-diffusion-v1-5"
|
| 556 |
+
" \n- stable-diffusion-v1-5/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
|
| 557 |
" configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
|
| 558 |
" in the config might lead to incorrect results in future versions. If you have downloaded this"
|
| 559 |
" checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
|
main/lpw_stable_diffusion_xl.py
CHANGED
|
@@ -1765,7 +1765,7 @@ class SDXLLongPromptWeightingPipeline(
|
|
| 1765 |
|
| 1766 |
# Check that sizes of mask, masked image and latents match
|
| 1767 |
if num_channels_unet == 9:
|
| 1768 |
-
# default case for
|
| 1769 |
num_channels_mask = mask.shape[1]
|
| 1770 |
num_channels_masked_image = masked_image_latents.shape[1]
|
| 1771 |
if num_channels_latents + num_channels_mask + num_channels_masked_image != num_channels_unet:
|
|
|
|
| 1765 |
|
| 1766 |
# Check that sizes of mask, masked image and latents match
|
| 1767 |
if num_channels_unet == 9:
|
| 1768 |
+
# default case for stable-diffusion-v1-5/stable-diffusion-inpainting
|
| 1769 |
num_channels_mask = mask.shape[1]
|
| 1770 |
num_channels_masked_image = masked_image_latents.shape[1]
|
| 1771 |
if num_channels_latents + num_channels_mask + num_channels_masked_image != num_channels_unet:
|
main/matryoshka.py
CHANGED
|
@@ -3729,8 +3729,8 @@ class MatryoshkaPipeline(
|
|
| 3729 |
"The configuration file of the unet has set the default `sample_size` to smaller than"
|
| 3730 |
" 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the"
|
| 3731 |
" following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
|
| 3732 |
-
" CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n-
|
| 3733 |
-
" \n-
|
| 3734 |
" configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
|
| 3735 |
" in the config might lead to incorrect results in future versions. If you have downloaded this"
|
| 3736 |
" checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
|
|
|
|
| 3729 |
"The configuration file of the unet has set the default `sample_size` to smaller than"
|
| 3730 |
" 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the"
|
| 3731 |
" following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
|
| 3732 |
+
" CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- stable-diffusion-v1-5/stable-diffusion-v1-5"
|
| 3733 |
+
" \n- stable-diffusion-v1-5/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
|
| 3734 |
" configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
|
| 3735 |
" in the config might lead to incorrect results in future versions. If you have downloaded this"
|
| 3736 |
" checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
|
main/multilingual_stable_diffusion.py
CHANGED
|
@@ -78,7 +78,7 @@ class MultilingualStableDiffusion(DiffusionPipeline, StableDiffusionMixin):
|
|
| 78 |
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
| 79 |
safety_checker ([`StableDiffusionSafetyChecker`]):
|
| 80 |
Classification module that estimates whether generated images could be considered offensive or harmful.
|
| 81 |
-
Please, refer to the [model card](https://huggingface.co/
|
| 82 |
feature_extractor ([`CLIPImageProcessor`]):
|
| 83 |
Model that extracts features from generated images to be used as inputs for the `safety_checker`.
|
| 84 |
"""
|
|
|
|
| 78 |
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
| 79 |
safety_checker ([`StableDiffusionSafetyChecker`]):
|
| 80 |
Classification module that estimates whether generated images could be considered offensive or harmful.
|
| 81 |
+
Please, refer to the [model card](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5) for details.
|
| 82 |
feature_extractor ([`CLIPImageProcessor`]):
|
| 83 |
Model that extracts features from generated images to be used as inputs for the `safety_checker`.
|
| 84 |
"""
|
main/pipeline_controlnet_xl_kolors_inpaint.py
CHANGED
|
@@ -1607,7 +1607,7 @@ class KolorsControlNetInpaintPipeline(
|
|
| 1607 |
|
| 1608 |
# 9. Check that sizes of mask, masked image and latents match
|
| 1609 |
if num_channels_unet == 9:
|
| 1610 |
-
# default case for
|
| 1611 |
num_channels_mask = mask.shape[1]
|
| 1612 |
num_channels_masked_image = masked_image_latents.shape[1]
|
| 1613 |
if num_channels_latents + num_channels_mask + num_channels_masked_image != self.unet.config.in_channels:
|
|
|
|
| 1607 |
|
| 1608 |
# 9. Check that sizes of mask, masked image and latents match
|
| 1609 |
if num_channels_unet == 9:
|
| 1610 |
+
# default case for stable-diffusion-v1-5/stable-diffusion-inpainting
|
| 1611 |
num_channels_mask = mask.shape[1]
|
| 1612 |
num_channels_masked_image = masked_image_latents.shape[1]
|
| 1613 |
if num_channels_latents + num_channels_mask + num_channels_masked_image != self.unet.config.in_channels:
|
main/pipeline_fabric.py
CHANGED
|
@@ -135,7 +135,7 @@ class FabricPipeline(DiffusionPipeline):
|
|
| 135 |
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
| 136 |
safety_checker ([`StableDiffusionSafetyChecker`]):
|
| 137 |
Classification module that estimates whether generated images could be considered offensive or harmful.
|
| 138 |
-
Please refer to the [model card](https://huggingface.co/
|
| 139 |
about a model's potential harms.
|
| 140 |
"""
|
| 141 |
|
|
@@ -163,8 +163,8 @@ class FabricPipeline(DiffusionPipeline):
|
|
| 163 |
"The configuration file of the unet has set the default `sample_size` to smaller than"
|
| 164 |
" 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the"
|
| 165 |
" following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
|
| 166 |
-
" CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n-
|
| 167 |
-
" \n-
|
| 168 |
" configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
|
| 169 |
" in the config might lead to incorrect results in future versions. If you have downloaded this"
|
| 170 |
" checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
|
|
|
|
| 135 |
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
| 136 |
safety_checker ([`StableDiffusionSafetyChecker`]):
|
| 137 |
Classification module that estimates whether generated images could be considered offensive or harmful.
|
| 138 |
+
Please refer to the [model card](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5) for more details
|
| 139 |
about a model's potential harms.
|
| 140 |
"""
|
| 141 |
|
|
|
|
| 163 |
"The configuration file of the unet has set the default `sample_size` to smaller than"
|
| 164 |
" 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the"
|
| 165 |
" following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
|
| 166 |
+
" CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- stable-diffusion-v1-5/stable-diffusion-v1-5"
|
| 167 |
+
" \n- stable-diffusion-v1-5/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
|
| 168 |
" configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
|
| 169 |
" in the config might lead to incorrect results in future versions. If you have downloaded this"
|
| 170 |
" checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
|
main/pipeline_kolors_inpainting.py
CHANGED
|
@@ -1487,7 +1487,7 @@ class KolorsInpaintPipeline(
|
|
| 1487 |
|
| 1488 |
# 8. Check that sizes of mask, masked image and latents match
|
| 1489 |
if num_channels_unet == 9:
|
| 1490 |
-
# default case for
|
| 1491 |
num_channels_mask = mask.shape[1]
|
| 1492 |
num_channels_masked_image = masked_image_latents.shape[1]
|
| 1493 |
if num_channels_latents + num_channels_mask + num_channels_masked_image != self.unet.config.in_channels:
|
|
|
|
| 1487 |
|
| 1488 |
# 8. Check that sizes of mask, masked image and latents match
|
| 1489 |
if num_channels_unet == 9:
|
| 1490 |
+
# default case for stable-diffusion-v1-5/stable-diffusion-inpainting
|
| 1491 |
num_channels_mask = mask.shape[1]
|
| 1492 |
num_channels_masked_image = masked_image_latents.shape[1]
|
| 1493 |
if num_channels_latents + num_channels_mask + num_channels_masked_image != self.unet.config.in_channels:
|
main/pipeline_prompt2prompt.py
CHANGED
|
@@ -106,7 +106,7 @@ class Prompt2PromptPipeline(
|
|
| 106 |
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
| 107 |
safety_checker ([`StableDiffusionSafetyChecker`]):
|
| 108 |
Classification module that estimates whether generated images could be considered offensive or harmful.
|
| 109 |
-
Please refer to the [model card](https://huggingface.co/
|
| 110 |
about a model's potential harms.
|
| 111 |
feature_extractor ([`~transformers.CLIPImageProcessor`]):
|
| 112 |
A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
|
|
@@ -187,8 +187,8 @@ class Prompt2PromptPipeline(
|
|
| 187 |
"The configuration file of the unet has set the default `sample_size` to smaller than"
|
| 188 |
" 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the"
|
| 189 |
" following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
|
| 190 |
-
" CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n-
|
| 191 |
-
" \n-
|
| 192 |
" configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
|
| 193 |
" in the config might lead to incorrect results in future versions. If you have downloaded this"
|
| 194 |
" checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
|
|
|
|
| 106 |
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
| 107 |
safety_checker ([`StableDiffusionSafetyChecker`]):
|
| 108 |
Classification module that estimates whether generated images could be considered offensive or harmful.
|
| 109 |
+
Please refer to the [model card](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5) for more details
|
| 110 |
about a model's potential harms.
|
| 111 |
feature_extractor ([`~transformers.CLIPImageProcessor`]):
|
| 112 |
A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
|
|
|
|
| 187 |
"The configuration file of the unet has set the default `sample_size` to smaller than"
|
| 188 |
" 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the"
|
| 189 |
" following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
|
| 190 |
+
" CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- stable-diffusion-v1-5/stable-diffusion-v1-5"
|
| 191 |
+
" \n- stable-diffusion-v1-5/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
|
| 192 |
" configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
|
| 193 |
" in the config might lead to incorrect results in future versions. If you have downloaded this"
|
| 194 |
" checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
|
main/pipeline_sdxl_style_aligned.py
CHANGED
|
@@ -1730,7 +1730,7 @@ class StyleAlignedSDXLPipeline(
|
|
| 1730 |
|
| 1731 |
# Check that sizes of mask, masked image and latents match
|
| 1732 |
if num_channels_unet == 9:
|
| 1733 |
-
# default case for
|
| 1734 |
num_channels_mask = mask.shape[1]
|
| 1735 |
num_channels_masked_image = masked_image_latents.shape[1]
|
| 1736 |
if num_channels_latents + num_channels_mask + num_channels_masked_image != num_channels_unet:
|
|
|
|
| 1730 |
|
| 1731 |
# Check that sizes of mask, masked image and latents match
|
| 1732 |
if num_channels_unet == 9:
|
| 1733 |
+
# default case for stable-diffusion-v1-5/stable-diffusion-inpainting
|
| 1734 |
num_channels_mask = mask.shape[1]
|
| 1735 |
num_channels_masked_image = masked_image_latents.shape[1]
|
| 1736 |
if num_channels_latents + num_channels_mask + num_channels_masked_image != num_channels_unet:
|
main/pipeline_stable_diffusion_boxdiff.py
CHANGED
|
@@ -59,7 +59,7 @@ EXAMPLE_DOC_STRING = """
|
|
| 59 |
>>> import torch
|
| 60 |
>>> from diffusers import StableDiffusionPipeline
|
| 61 |
|
| 62 |
-
>>> pipe = StableDiffusionPipeline.from_pretrained("
|
| 63 |
>>> pipe = pipe.to("cuda")
|
| 64 |
|
| 65 |
>>> prompt = "a photo of an astronaut riding a horse on mars"
|
|
@@ -392,7 +392,7 @@ class StableDiffusionBoxDiffPipeline(
|
|
| 392 |
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
| 393 |
safety_checker ([`StableDiffusionSafetyChecker`]):
|
| 394 |
Classification module that estimates whether generated images could be considered offensive or harmful.
|
| 395 |
-
Please refer to the [model card](https://huggingface.co/
|
| 396 |
about a model's potential harms.
|
| 397 |
feature_extractor ([`~transformers.CLIPImageProcessor`]):
|
| 398 |
A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
|
|
@@ -473,8 +473,8 @@ class StableDiffusionBoxDiffPipeline(
|
|
| 473 |
"The configuration file of the unet has set the default `sample_size` to smaller than"
|
| 474 |
" 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the"
|
| 475 |
" following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
|
| 476 |
-
" CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n-
|
| 477 |
-
" \n-
|
| 478 |
" configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
|
| 479 |
" in the config might lead to incorrect results in future versions. If you have downloaded this"
|
| 480 |
" checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
|
|
|
|
| 59 |
>>> import torch
|
| 60 |
>>> from diffusers import StableDiffusionPipeline
|
| 61 |
|
| 62 |
+
>>> pipe = StableDiffusionPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5", torch_dtype=torch.float16)
|
| 63 |
>>> pipe = pipe.to("cuda")
|
| 64 |
|
| 65 |
>>> prompt = "a photo of an astronaut riding a horse on mars"
|
|
|
|
| 392 |
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
| 393 |
safety_checker ([`StableDiffusionSafetyChecker`]):
|
| 394 |
Classification module that estimates whether generated images could be considered offensive or harmful.
|
| 395 |
+
Please refer to the [model card](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5) for more details
|
| 396 |
about a model's potential harms.
|
| 397 |
feature_extractor ([`~transformers.CLIPImageProcessor`]):
|
| 398 |
A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
|
|
|
|
| 473 |
"The configuration file of the unet has set the default `sample_size` to smaller than"
|
| 474 |
" 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the"
|
| 475 |
" following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
|
| 476 |
+
" CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- stable-diffusion-v1-5/stable-diffusion-v1-5"
|
| 477 |
+
" \n- stable-diffusion-v1-5/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
|
| 478 |
" configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
|
| 479 |
" in the config might lead to incorrect results in future versions. If you have downloaded this"
|
| 480 |
" checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
|
main/pipeline_stable_diffusion_pag.py
CHANGED
|
@@ -42,7 +42,7 @@ EXAMPLE_DOC_STRING = """
|
|
| 42 |
```py
|
| 43 |
>>> import torch
|
| 44 |
>>> from diffusers import StableDiffusionPipeline
|
| 45 |
-
>>> pipe = StableDiffusionPipeline.from_pretrained("
|
| 46 |
>>> pipe = pipe.to("cuda")
|
| 47 |
>>> prompt = "a photo of an astronaut riding a horse on mars"
|
| 48 |
>>> image = pipe(prompt).images[0]
|
|
@@ -359,7 +359,7 @@ class StableDiffusionPAGPipeline(
|
|
| 359 |
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
| 360 |
safety_checker ([`StableDiffusionSafetyChecker`]):
|
| 361 |
Classification module that estimates whether generated images could be considered offensive or harmful.
|
| 362 |
-
Please refer to the [model card](https://huggingface.co/
|
| 363 |
about a model's potential harms.
|
| 364 |
feature_extractor ([`~transformers.CLIPImageProcessor`]):
|
| 365 |
A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
|
|
@@ -440,8 +440,8 @@ class StableDiffusionPAGPipeline(
|
|
| 440 |
"The configuration file of the unet has set the default `sample_size` to smaller than"
|
| 441 |
" 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the"
|
| 442 |
" following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
|
| 443 |
-
" CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n-
|
| 444 |
-
" \n-
|
| 445 |
" configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
|
| 446 |
" in the config might lead to incorrect results in future versions. If you have downloaded this"
|
| 447 |
" checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
|
|
|
|
| 42 |
```py
|
| 43 |
>>> import torch
|
| 44 |
>>> from diffusers import StableDiffusionPipeline
|
| 45 |
+
>>> pipe = StableDiffusionPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5", torch_dtype=torch.float16)
|
| 46 |
>>> pipe = pipe.to("cuda")
|
| 47 |
>>> prompt = "a photo of an astronaut riding a horse on mars"
|
| 48 |
>>> image = pipe(prompt).images[0]
|
|
|
|
| 359 |
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
| 360 |
safety_checker ([`StableDiffusionSafetyChecker`]):
|
| 361 |
Classification module that estimates whether generated images could be considered offensive or harmful.
|
| 362 |
+
Please refer to the [model card](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5) for more details
|
| 363 |
about a model's potential harms.
|
| 364 |
feature_extractor ([`~transformers.CLIPImageProcessor`]):
|
| 365 |
A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
|
|
|
|
| 440 |
"The configuration file of the unet has set the default `sample_size` to smaller than"
|
| 441 |
" 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the"
|
| 442 |
" following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
|
| 443 |
+
" CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- stable-diffusion-v1-5/stable-diffusion-v1-5"
|
| 444 |
+
" \n- stable-diffusion-v1-5/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
|
| 445 |
" configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
|
| 446 |
" in the config might lead to incorrect results in future versions. If you have downloaded this"
|
| 447 |
" checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
|
main/pipeline_stable_diffusion_upscale_ldm3d.py
CHANGED
|
@@ -100,7 +100,7 @@ class StableDiffusionUpscaleLDM3DPipeline(
|
|
| 100 |
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
| 101 |
safety_checker ([`StableDiffusionSafetyChecker`]):
|
| 102 |
Classification module that estimates whether generated images could be considered offensive or harmful.
|
| 103 |
-
Please refer to the [model card](https://huggingface.co/
|
| 104 |
about a model's potential harms.
|
| 105 |
feature_extractor ([`~transformers.CLIPImageProcessor`]):
|
| 106 |
A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
|
|
|
|
| 100 |
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
| 101 |
safety_checker ([`StableDiffusionSafetyChecker`]):
|
| 102 |
Classification module that estimates whether generated images could be considered offensive or harmful.
|
| 103 |
+
Please refer to the [model card](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5) for more details
|
| 104 |
about a model's potential harms.
|
| 105 |
feature_extractor ([`~transformers.CLIPImageProcessor`]):
|
| 106 |
A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
|
main/pipeline_stable_diffusion_xl_attentive_eraser.py
CHANGED
|
@@ -2042,7 +2042,7 @@ class StableDiffusionXL_AE_Pipeline(
|
|
| 2042 |
|
| 2043 |
# 8. Check that sizes of mask, masked image and latents match
|
| 2044 |
if num_channels_unet == 9:
|
| 2045 |
-
# default case for
|
| 2046 |
num_channels_mask = mask.shape[1]
|
| 2047 |
num_channels_masked_image = masked_image_latents.shape[1]
|
| 2048 |
if num_channels_latents + num_channels_mask + num_channels_masked_image != self.unet.config.in_channels:
|
|
|
|
| 2042 |
|
| 2043 |
# 8. Check that sizes of mask, masked image and latents match
|
| 2044 |
if num_channels_unet == 9:
|
| 2045 |
+
# default case for stable-diffusion-v1-5/stable-diffusion-inpainting
|
| 2046 |
num_channels_mask = mask.shape[1]
|
| 2047 |
num_channels_masked_image = masked_image_latents.shape[1]
|
| 2048 |
if num_channels_latents + num_channels_mask + num_channels_masked_image != self.unet.config.in_channels:
|
main/pipeline_stable_diffusion_xl_controlnet_adapter.py
CHANGED
|
@@ -188,7 +188,7 @@ class StableDiffusionXLControlNetAdapterPipeline(
|
|
| 188 |
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
| 189 |
safety_checker ([`StableDiffusionSafetyChecker`]):
|
| 190 |
Classification module that estimates whether generated images could be considered offensive or harmful.
|
| 191 |
-
Please, refer to the [model card](https://huggingface.co/
|
| 192 |
feature_extractor ([`CLIPImageProcessor`]):
|
| 193 |
Model that extracts features from generated images to be used as inputs for the `safety_checker`.
|
| 194 |
"""
|
|
|
|
| 188 |
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
| 189 |
safety_checker ([`StableDiffusionSafetyChecker`]):
|
| 190 |
Classification module that estimates whether generated images could be considered offensive or harmful.
|
| 191 |
+
Please, refer to the [model card](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5) for details.
|
| 192 |
feature_extractor ([`CLIPImageProcessor`]):
|
| 193 |
Model that extracts features from generated images to be used as inputs for the `safety_checker`.
|
| 194 |
"""
|
main/pipeline_stable_diffusion_xl_controlnet_adapter_inpaint.py
CHANGED
|
@@ -330,7 +330,7 @@ class StableDiffusionXLControlNetAdapterInpaintPipeline(
|
|
| 330 |
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
| 331 |
safety_checker ([`StableDiffusionSafetyChecker`]):
|
| 332 |
Classification module that estimates whether generated images could be considered offensive or harmful.
|
| 333 |
-
Please, refer to the [model card](https://huggingface.co/
|
| 334 |
feature_extractor ([`CLIPImageProcessor`]):
|
| 335 |
Model that extracts features from generated images to be used as inputs for the `safety_checker`.
|
| 336 |
requires_aesthetics_score (`bool`, *optional*, defaults to `"False"`):
|
|
@@ -1569,7 +1569,7 @@ class StableDiffusionXLControlNetAdapterInpaintPipeline(
|
|
| 1569 |
|
| 1570 |
# 8. Check that sizes of mask, masked image and latents match
|
| 1571 |
if num_channels_unet == 9:
|
| 1572 |
-
# default case for
|
| 1573 |
num_channels_mask = mask.shape[1]
|
| 1574 |
num_channels_masked_image = masked_image_latents.shape[1]
|
| 1575 |
if num_channels_latents + num_channels_mask + num_channels_masked_image != self.unet.config.in_channels:
|
|
|
|
| 330 |
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
| 331 |
safety_checker ([`StableDiffusionSafetyChecker`]):
|
| 332 |
Classification module that estimates whether generated images could be considered offensive or harmful.
|
| 333 |
+
Please, refer to the [model card](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5) for details.
|
| 334 |
feature_extractor ([`CLIPImageProcessor`]):
|
| 335 |
Model that extracts features from generated images to be used as inputs for the `safety_checker`.
|
| 336 |
requires_aesthetics_score (`bool`, *optional*, defaults to `"False"`):
|
|
|
|
| 1569 |
|
| 1570 |
# 8. Check that sizes of mask, masked image and latents match
|
| 1571 |
if num_channels_unet == 9:
|
| 1572 |
+
# default case for stable-diffusion-v1-5/stable-diffusion-inpainting
|
| 1573 |
num_channels_mask = mask.shape[1]
|
| 1574 |
num_channels_masked_image = masked_image_latents.shape[1]
|
| 1575 |
if num_channels_latents + num_channels_mask + num_channels_masked_image != self.unet.config.in_channels:
|
main/pipeline_zero1to3.py
CHANGED
|
@@ -46,7 +46,7 @@ EXAMPLE_DOC_STRING = """
|
|
| 46 |
>>> import torch
|
| 47 |
>>> from diffusers import StableDiffusionPipeline
|
| 48 |
|
| 49 |
-
>>> pipe = StableDiffusionPipeline.from_pretrained("
|
| 50 |
>>> pipe = pipe.to("cuda")
|
| 51 |
|
| 52 |
>>> prompt = "a photo of an astronaut riding a horse on mars"
|
|
@@ -86,7 +86,7 @@ class Zero1to3StableDiffusionPipeline(DiffusionPipeline, StableDiffusionMixin):
|
|
| 86 |
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
| 87 |
safety_checker ([`StableDiffusionSafetyChecker`]):
|
| 88 |
Classification module that estimates whether generated images could be considered offensive or harmful.
|
| 89 |
-
Please, refer to the [model card](https://huggingface.co/
|
| 90 |
feature_extractor ([`CLIPImageProcessor`]):
|
| 91 |
Model that extracts features from generated images to be used as inputs for the `safety_checker`.
|
| 92 |
cc_projection ([`CCProjection`]):
|
|
@@ -164,8 +164,8 @@ class Zero1to3StableDiffusionPipeline(DiffusionPipeline, StableDiffusionMixin):
|
|
| 164 |
"The configuration file of the unet has set the default `sample_size` to smaller than"
|
| 165 |
" 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the"
|
| 166 |
" following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
|
| 167 |
-
" CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n-
|
| 168 |
-
" \n-
|
| 169 |
" configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
|
| 170 |
" in the config might lead to incorrect results in future versions. If you have downloaded this"
|
| 171 |
" checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
|
|
|
|
| 46 |
>>> import torch
|
| 47 |
>>> from diffusers import StableDiffusionPipeline
|
| 48 |
|
| 49 |
+
>>> pipe = StableDiffusionPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5", torch_dtype=torch.float16)
|
| 50 |
>>> pipe = pipe.to("cuda")
|
| 51 |
|
| 52 |
>>> prompt = "a photo of an astronaut riding a horse on mars"
|
|
|
|
| 86 |
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
| 87 |
safety_checker ([`StableDiffusionSafetyChecker`]):
|
| 88 |
Classification module that estimates whether generated images could be considered offensive or harmful.
|
| 89 |
+
Please, refer to the [model card](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5) for details.
|
| 90 |
feature_extractor ([`CLIPImageProcessor`]):
|
| 91 |
Model that extracts features from generated images to be used as inputs for the `safety_checker`.
|
| 92 |
cc_projection ([`CCProjection`]):
|
|
|
|
| 164 |
"The configuration file of the unet has set the default `sample_size` to smaller than"
|
| 165 |
" 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the"
|
| 166 |
" following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
|
| 167 |
+
" CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- stable-diffusion-v1-5/stable-diffusion-v1-5"
|
| 168 |
+
" \n- stable-diffusion-v1-5/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
|
| 169 |
" configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
|
| 170 |
" in the config might lead to incorrect results in future versions. If you have downloaded this"
|
| 171 |
" checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
|
main/rerender_a_video.py
CHANGED
|
@@ -288,7 +288,7 @@ class RerenderAVideoPipeline(StableDiffusionControlNetImg2ImgPipeline):
|
|
| 288 |
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
| 289 |
safety_checker ([`StableDiffusionSafetyChecker`]):
|
| 290 |
Classification module that estimates whether generated images could be considered offensive or harmful.
|
| 291 |
-
Please, refer to the [model card](https://huggingface.co/
|
| 292 |
feature_extractor ([`CLIPImageProcessor`]):
|
| 293 |
Model that extracts features from generated images to be used as inputs for the `safety_checker`.
|
| 294 |
"""
|
|
|
|
| 288 |
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
| 289 |
safety_checker ([`StableDiffusionSafetyChecker`]):
|
| 290 |
Classification module that estimates whether generated images could be considered offensive or harmful.
|
| 291 |
+
Please, refer to the [model card](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5) for details.
|
| 292 |
feature_extractor ([`CLIPImageProcessor`]):
|
| 293 |
Model that extracts features from generated images to be used as inputs for the `safety_checker`.
|
| 294 |
"""
|
main/run_onnx_controlnet.py
CHANGED
|
@@ -54,7 +54,7 @@ EXAMPLE_DOC_STRING = """
|
|
| 54 |
>>> # load control net and stable diffusion v1-5
|
| 55 |
>>> controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny", torch_dtype=torch.float16)
|
| 56 |
>>> pipe = StableDiffusionControlNetImg2ImgPipeline.from_pretrained(
|
| 57 |
-
... "
|
| 58 |
... )
|
| 59 |
|
| 60 |
>>> # speed up diffusion process with faster scheduler and memory optimization
|
|
|
|
| 54 |
>>> # load control net and stable diffusion v1-5
|
| 55 |
>>> controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny", torch_dtype=torch.float16)
|
| 56 |
>>> pipe = StableDiffusionControlNetImg2ImgPipeline.from_pretrained(
|
| 57 |
+
... "stable-diffusion-v1-5/stable-diffusion-v1-5", controlnet=controlnet, torch_dtype=torch.float16
|
| 58 |
... )
|
| 59 |
|
| 60 |
>>> # speed up diffusion process with faster scheduler and memory optimization
|
main/run_tensorrt_controlnet.py
CHANGED
|
@@ -158,7 +158,7 @@ EXAMPLE_DOC_STRING = """
|
|
| 158 |
>>> # load control net and stable diffusion v1-5
|
| 159 |
>>> controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny", torch_dtype=torch.float16)
|
| 160 |
>>> pipe = StableDiffusionControlNetImg2ImgPipeline.from_pretrained(
|
| 161 |
-
... "
|
| 162 |
... )
|
| 163 |
|
| 164 |
>>> # speed up diffusion process with faster scheduler and memory optimization
|
|
|
|
| 158 |
>>> # load control net and stable diffusion v1-5
|
| 159 |
>>> controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny", torch_dtype=torch.float16)
|
| 160 |
>>> pipe = StableDiffusionControlNetImg2ImgPipeline.from_pretrained(
|
| 161 |
+
... "stable-diffusion-v1-5/stable-diffusion-v1-5", controlnet=controlnet, torch_dtype=torch.float16
|
| 162 |
... )
|
| 163 |
|
| 164 |
>>> # speed up diffusion process with faster scheduler and memory optimization
|
main/sd_text2img_k_diffusion.py
CHANGED
|
@@ -64,7 +64,7 @@ class StableDiffusionPipeline(DiffusionPipeline, StableDiffusionMixin):
|
|
| 64 |
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
| 65 |
safety_checker ([`StableDiffusionSafetyChecker`]):
|
| 66 |
Classification module that estimates whether generated images could be considered offensive or harmful.
|
| 67 |
-
Please, refer to the [model card](https://huggingface.co/
|
| 68 |
feature_extractor ([`CLIPImageProcessor`]):
|
| 69 |
Model that extracts features from generated images to be used as inputs for the `safety_checker`.
|
| 70 |
"""
|
|
|
|
| 64 |
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
| 65 |
safety_checker ([`StableDiffusionSafetyChecker`]):
|
| 66 |
Classification module that estimates whether generated images could be considered offensive or harmful.
|
| 67 |
+
Please, refer to the [model card](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5) for details.
|
| 68 |
feature_extractor ([`CLIPImageProcessor`]):
|
| 69 |
Model that extracts features from generated images to be used as inputs for the `safety_checker`.
|
| 70 |
"""
|
main/sde_drag.py
CHANGED
|
@@ -114,7 +114,7 @@ class SdeDragPipeline(DiffusionPipeline):
|
|
| 114 |
>>> from diffusers import DDIMScheduler, DiffusionPipeline
|
| 115 |
|
| 116 |
>>> # Load the pipeline
|
| 117 |
-
>>> model_path = "
|
| 118 |
>>> scheduler = DDIMScheduler.from_pretrained(model_path, subfolder="scheduler")
|
| 119 |
>>> pipe = DiffusionPipeline.from_pretrained(model_path, scheduler=scheduler, custom_pipeline="sde_drag")
|
| 120 |
>>> pipe.to('cuda')
|
|
|
|
| 114 |
>>> from diffusers import DDIMScheduler, DiffusionPipeline
|
| 115 |
|
| 116 |
>>> # Load the pipeline
|
| 117 |
+
>>> model_path = "stable-diffusion-v1-5/stable-diffusion-v1-5"
|
| 118 |
>>> scheduler = DDIMScheduler.from_pretrained(model_path, subfolder="scheduler")
|
| 119 |
>>> pipe = DiffusionPipeline.from_pretrained(model_path, scheduler=scheduler, custom_pipeline="sde_drag")
|
| 120 |
>>> pipe.to('cuda')
|
main/stable_diffusion_comparison.py
CHANGED
|
@@ -46,7 +46,7 @@ class StableDiffusionComparisonPipeline(DiffusionPipeline, StableDiffusionMixin)
|
|
| 46 |
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
| 47 |
safety_checker ([`StableDiffusionMegaSafetyChecker`]):
|
| 48 |
Classification module that estimates whether generated images could be considered offensive or harmful.
|
| 49 |
-
Please, refer to the [model card](https://huggingface.co/
|
| 50 |
feature_extractor ([`CLIPImageProcessor`]):
|
| 51 |
Model that extracts features from generated images to be used as inputs for the `safety_checker`.
|
| 52 |
"""
|
|
|
|
| 46 |
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
| 47 |
safety_checker ([`StableDiffusionMegaSafetyChecker`]):
|
| 48 |
Classification module that estimates whether generated images could be considered offensive or harmful.
|
| 49 |
+
Please, refer to the [model card](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5) for details.
|
| 50 |
feature_extractor ([`CLIPImageProcessor`]):
|
| 51 |
Model that extracts features from generated images to be used as inputs for the `safety_checker`.
|
| 52 |
"""
|
main/stable_diffusion_controlnet_img2img.py
CHANGED
|
@@ -36,7 +36,7 @@ EXAMPLE_DOC_STRING = """
|
|
| 36 |
>>> controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny", torch_dtype=torch.float16)
|
| 37 |
|
| 38 |
>>> pipe_controlnet = StableDiffusionControlNetImg2ImgPipeline.from_pretrained(
|
| 39 |
-
"
|
| 40 |
controlnet=controlnet,
|
| 41 |
safety_checker=None,
|
| 42 |
torch_dtype=torch.float16
|
|
|
|
| 36 |
>>> controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny", torch_dtype=torch.float16)
|
| 37 |
|
| 38 |
>>> pipe_controlnet = StableDiffusionControlNetImg2ImgPipeline.from_pretrained(
|
| 39 |
+
"stable-diffusion-v1-5/stable-diffusion-v1-5",
|
| 40 |
controlnet=controlnet,
|
| 41 |
safety_checker=None,
|
| 42 |
torch_dtype=torch.float16
|
main/stable_diffusion_controlnet_inpaint.py
CHANGED
|
@@ -81,7 +81,7 @@ EXAMPLE_DOC_STRING = """
|
|
| 81 |
>>> controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-seg", torch_dtype=torch.float16)
|
| 82 |
|
| 83 |
>>> pipe = StableDiffusionControlNetInpaintPipeline.from_pretrained(
|
| 84 |
-
"
|
| 85 |
)
|
| 86 |
|
| 87 |
>>> pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
|
|
|
|
| 81 |
>>> controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-seg", torch_dtype=torch.float16)
|
| 82 |
|
| 83 |
>>> pipe = StableDiffusionControlNetInpaintPipeline.from_pretrained(
|
| 84 |
+
"stable-diffusion-v1-5/stable-diffusion-inpainting", controlnet=controlnet, safety_checker=None, torch_dtype=torch.float16
|
| 85 |
)
|
| 86 |
|
| 87 |
>>> pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
|
main/stable_diffusion_controlnet_inpaint_img2img.py
CHANGED
|
@@ -80,7 +80,7 @@ EXAMPLE_DOC_STRING = """
|
|
| 80 |
>>> controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-seg", torch_dtype=torch.float16)
|
| 81 |
|
| 82 |
>>> pipe = StableDiffusionControlNetInpaintImg2ImgPipeline.from_pretrained(
|
| 83 |
-
"
|
| 84 |
)
|
| 85 |
|
| 86 |
>>> pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
|
|
|
|
| 80 |
>>> controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-seg", torch_dtype=torch.float16)
|
| 81 |
|
| 82 |
>>> pipe = StableDiffusionControlNetInpaintImg2ImgPipeline.from_pretrained(
|
| 83 |
+
"stable-diffusion-v1-5/stable-diffusion-inpainting", controlnet=controlnet, safety_checker=None, torch_dtype=torch.float16
|
| 84 |
)
|
| 85 |
|
| 86 |
>>> pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
|
main/stable_diffusion_controlnet_reference.py
CHANGED
|
@@ -37,7 +37,7 @@ EXAMPLE_DOC_STRING = """
|
|
| 37 |
|
| 38 |
>>> controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny", torch_dtype=torch.float16)
|
| 39 |
>>> pipe = StableDiffusionControlNetReferencePipeline.from_pretrained(
|
| 40 |
-
"
|
| 41 |
controlnet=controlnet,
|
| 42 |
safety_checker=None,
|
| 43 |
torch_dtype=torch.float16
|
|
|
|
| 37 |
|
| 38 |
>>> controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny", torch_dtype=torch.float16)
|
| 39 |
>>> pipe = StableDiffusionControlNetReferencePipeline.from_pretrained(
|
| 40 |
+
"stable-diffusion-v1-5/stable-diffusion-v1-5",
|
| 41 |
controlnet=controlnet,
|
| 42 |
safety_checker=None,
|
| 43 |
torch_dtype=torch.float16
|
main/stable_diffusion_ipex.py
CHANGED
|
@@ -43,7 +43,7 @@ EXAMPLE_DOC_STRING = """
|
|
| 43 |
>>> import torch
|
| 44 |
>>> from diffusers import StableDiffusionPipeline
|
| 45 |
|
| 46 |
-
>>> pipe = DiffusionPipeline.from_pretrained("
|
| 47 |
|
| 48 |
>>> # For Float32
|
| 49 |
>>> pipe.prepare_for_ipex(prompt, dtype=torch.float32, height=512, width=512) #value of image height/width should be consistent with the pipeline inference
|
|
@@ -85,7 +85,7 @@ class StableDiffusionIPEXPipeline(
|
|
| 85 |
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
| 86 |
safety_checker ([`StableDiffusionSafetyChecker`]):
|
| 87 |
Classification module that estimates whether generated images could be considered offensive or harmful.
|
| 88 |
-
Please, refer to the [model card](https://huggingface.co/
|
| 89 |
feature_extractor ([`CLIPImageProcessor`]):
|
| 90 |
Model that extracts features from generated images to be used as inputs for the `safety_checker`.
|
| 91 |
"""
|
|
@@ -161,8 +161,8 @@ class StableDiffusionIPEXPipeline(
|
|
| 161 |
"The configuration file of the unet has set the default `sample_size` to smaller than"
|
| 162 |
" 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the"
|
| 163 |
" following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
|
| 164 |
-
" CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n-
|
| 165 |
-
" \n-
|
| 166 |
" configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
|
| 167 |
" in the config might lead to incorrect results in future versions. If you have downloaded this"
|
| 168 |
" checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
|
|
|
|
| 43 |
>>> import torch
|
| 44 |
>>> from diffusers import StableDiffusionPipeline
|
| 45 |
|
| 46 |
+
>>> pipe = DiffusionPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5", custom_pipeline="stable_diffusion_ipex")
|
| 47 |
|
| 48 |
>>> # For Float32
|
| 49 |
>>> pipe.prepare_for_ipex(prompt, dtype=torch.float32, height=512, width=512) #value of image height/width should be consistent with the pipeline inference
|
|
|
|
| 85 |
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
| 86 |
safety_checker ([`StableDiffusionSafetyChecker`]):
|
| 87 |
Classification module that estimates whether generated images could be considered offensive or harmful.
|
| 88 |
+
Please, refer to the [model card](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5) for details.
|
| 89 |
feature_extractor ([`CLIPImageProcessor`]):
|
| 90 |
Model that extracts features from generated images to be used as inputs for the `safety_checker`.
|
| 91 |
"""
|
|
|
|
| 161 |
"The configuration file of the unet has set the default `sample_size` to smaller than"
|
| 162 |
" 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the"
|
| 163 |
" following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
|
| 164 |
+
" CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- stable-diffusion-v1-5/stable-diffusion-v1-5"
|
| 165 |
+
" \n- stable-diffusion-v1-5/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
|
| 166 |
" configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
|
| 167 |
" in the config might lead to incorrect results in future versions. If you have downloaded this"
|
| 168 |
" checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
|
main/stable_diffusion_mega.py
CHANGED
|
@@ -47,7 +47,7 @@ class StableDiffusionMegaPipeline(DiffusionPipeline, StableDiffusionMixin):
|
|
| 47 |
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
| 48 |
safety_checker ([`StableDiffusionMegaSafetyChecker`]):
|
| 49 |
Classification module that estimates whether generated images could be considered offensive or harmful.
|
| 50 |
-
Please, refer to the [model card](https://huggingface.co/
|
| 51 |
feature_extractor ([`CLIPImageProcessor`]):
|
| 52 |
Model that extracts features from generated images to be used as inputs for the `safety_checker`.
|
| 53 |
"""
|
|
|
|
| 47 |
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
| 48 |
safety_checker ([`StableDiffusionMegaSafetyChecker`]):
|
| 49 |
Classification module that estimates whether generated images could be considered offensive or harmful.
|
| 50 |
+
Please, refer to the [model card](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5) for details.
|
| 51 |
feature_extractor ([`CLIPImageProcessor`]):
|
| 52 |
Model that extracts features from generated images to be used as inputs for the `safety_checker`.
|
| 53 |
"""
|
main/stable_diffusion_reference.py
CHANGED
|
@@ -46,7 +46,7 @@ EXAMPLE_DOC_STRING = """
|
|
| 46 |
>>> input_image = load_image("https://hf.co/datasets/huggingface/documentation-images/resolve/main/diffusers/input_image_vermeer.png")
|
| 47 |
|
| 48 |
>>> pipe = StableDiffusionReferencePipeline.from_pretrained(
|
| 49 |
-
"
|
| 50 |
safety_checker=None,
|
| 51 |
torch_dtype=torch.float16
|
| 52 |
).to('cuda:0')
|
|
@@ -112,7 +112,7 @@ class StableDiffusionReferencePipeline(
|
|
| 112 |
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
| 113 |
safety_checker ([`StableDiffusionSafetyChecker`]):
|
| 114 |
Classification module that estimates whether generated images could be considered offensive or harmful.
|
| 115 |
-
Please, refer to the [model card](https://huggingface.co/
|
| 116 |
feature_extractor ([`CLIPImageProcessor`]):
|
| 117 |
Model that extracts features from generated images to be used as inputs for the `safety_checker`.
|
| 118 |
"""
|
|
@@ -194,8 +194,8 @@ class StableDiffusionReferencePipeline(
|
|
| 194 |
"The configuration file of the unet has set the default `sample_size` to smaller than"
|
| 195 |
" 64 which seems highly unlikely .If you're checkpoint is a fine-tuned version of any of the"
|
| 196 |
" following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
|
| 197 |
-
" CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n-
|
| 198 |
-
" \n-
|
| 199 |
" configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
|
| 200 |
" in the config might lead to incorrect results in future versions. If you have downloaded this"
|
| 201 |
" checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
|
|
|
|
| 46 |
>>> input_image = load_image("https://hf.co/datasets/huggingface/documentation-images/resolve/main/diffusers/input_image_vermeer.png")
|
| 47 |
|
| 48 |
>>> pipe = StableDiffusionReferencePipeline.from_pretrained(
|
| 49 |
+
"stable-diffusion-v1-5/stable-diffusion-v1-5",
|
| 50 |
safety_checker=None,
|
| 51 |
torch_dtype=torch.float16
|
| 52 |
).to('cuda:0')
|
|
|
|
| 112 |
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
| 113 |
safety_checker ([`StableDiffusionSafetyChecker`]):
|
| 114 |
Classification module that estimates whether generated images could be considered offensive or harmful.
|
| 115 |
+
Please, refer to the [model card](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5) for details.
|
| 116 |
feature_extractor ([`CLIPImageProcessor`]):
|
| 117 |
Model that extracts features from generated images to be used as inputs for the `safety_checker`.
|
| 118 |
"""
|
|
|
|
| 194 |
"The configuration file of the unet has set the default `sample_size` to smaller than"
|
| 195 |
" 64 which seems highly unlikely .If you're checkpoint is a fine-tuned version of any of the"
|
| 196 |
" following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
|
| 197 |
+
" CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- stable-diffusion-v1-5/stable-diffusion-v1-5"
|
| 198 |
+
" \n- stable-diffusion-v1-5/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
|
| 199 |
" configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
|
| 200 |
" in the config might lead to incorrect results in future versions. If you have downloaded this"
|
| 201 |
" checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
|
main/stable_diffusion_repaint.py
CHANGED
|
@@ -167,7 +167,7 @@ class StableDiffusionRepaintPipeline(
|
|
| 167 |
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
| 168 |
safety_checker ([`StableDiffusionSafetyChecker`]):
|
| 169 |
Classification module that estimates whether generated images could be considered offensive or harmful.
|
| 170 |
-
Please, refer to the [model card](https://huggingface.co/
|
| 171 |
feature_extractor ([`CLIPImageProcessor`]):
|
| 172 |
Model that extracts features from generated images to be used as inputs for the `safety_checker`.
|
| 173 |
"""
|
|
@@ -249,8 +249,8 @@ class StableDiffusionRepaintPipeline(
|
|
| 249 |
"The configuration file of the unet has set the default `sample_size` to smaller than"
|
| 250 |
" 64 which seems highly unlikely .If you're checkpoint is a fine-tuned version of any of the"
|
| 251 |
" following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
|
| 252 |
-
" CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n-
|
| 253 |
-
" \n-
|
| 254 |
" configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
|
| 255 |
" in the config might lead to incorrect results in future versions. If you have downloaded this"
|
| 256 |
" checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
|
|
|
|
| 167 |
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
| 168 |
safety_checker ([`StableDiffusionSafetyChecker`]):
|
| 169 |
Classification module that estimates whether generated images could be considered offensive or harmful.
|
| 170 |
+
Please, refer to the [model card](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5) for details.
|
| 171 |
feature_extractor ([`CLIPImageProcessor`]):
|
| 172 |
Model that extracts features from generated images to be used as inputs for the `safety_checker`.
|
| 173 |
"""
|
|
|
|
| 249 |
"The configuration file of the unet has set the default `sample_size` to smaller than"
|
| 250 |
" 64 which seems highly unlikely .If you're checkpoint is a fine-tuned version of any of the"
|
| 251 |
" following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
|
| 252 |
+
" CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- stable-diffusion-v1-5/stable-diffusion-v1-5"
|
| 253 |
+
" \n- stable-diffusion-v1-5/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
|
| 254 |
" configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
|
| 255 |
" in the config might lead to incorrect results in future versions. If you have downloaded this"
|
| 256 |
" checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
|
main/stable_diffusion_tensorrt_img2img.py
CHANGED
|
@@ -678,7 +678,7 @@ class TensorRTStableDiffusionImg2ImgPipeline(DiffusionPipeline):
|
|
| 678 |
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
| 679 |
safety_checker ([`StableDiffusionSafetyChecker`]):
|
| 680 |
Classification module that estimates whether generated images could be considered offensive or harmful.
|
| 681 |
-
Please, refer to the [model card](https://huggingface.co/
|
| 682 |
feature_extractor ([`CLIPImageProcessor`]):
|
| 683 |
Model that extracts features from generated images to be used as inputs for the `safety_checker`.
|
| 684 |
"""
|
|
@@ -766,8 +766,8 @@ class TensorRTStableDiffusionImg2ImgPipeline(DiffusionPipeline):
|
|
| 766 |
"The configuration file of the unet has set the default `sample_size` to smaller than"
|
| 767 |
" 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the"
|
| 768 |
" following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
|
| 769 |
-
" CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n-
|
| 770 |
-
" \n-
|
| 771 |
" configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
|
| 772 |
" in the config might lead to incorrect results in future versions. If you have downloaded this"
|
| 773 |
" checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
|
|
|
|
| 678 |
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
| 679 |
safety_checker ([`StableDiffusionSafetyChecker`]):
|
| 680 |
Classification module that estimates whether generated images could be considered offensive or harmful.
|
| 681 |
+
Please, refer to the [model card](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5) for details.
|
| 682 |
feature_extractor ([`CLIPImageProcessor`]):
|
| 683 |
Model that extracts features from generated images to be used as inputs for the `safety_checker`.
|
| 684 |
"""
|
|
|
|
| 766 |
"The configuration file of the unet has set the default `sample_size` to smaller than"
|
| 767 |
" 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the"
|
| 768 |
" following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
|
| 769 |
+
" CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- stable-diffusion-v1-5/stable-diffusion-v1-5"
|
| 770 |
+
" \n- stable-diffusion-v1-5/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
|
| 771 |
" configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
|
| 772 |
" in the config might lead to incorrect results in future versions. If you have downloaded this"
|
| 773 |
" checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
|
main/stable_diffusion_tensorrt_inpaint.py
CHANGED
|
@@ -682,7 +682,7 @@ class TensorRTStableDiffusionInpaintPipeline(DiffusionPipeline):
|
|
| 682 |
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
| 683 |
safety_checker ([`StableDiffusionSafetyChecker`]):
|
| 684 |
Classification module that estimates whether generated images could be considered offensive or harmful.
|
| 685 |
-
Please, refer to the [model card](https://huggingface.co/
|
| 686 |
feature_extractor ([`CLIPImageProcessor`]):
|
| 687 |
Model that extracts features from generated images to be used as inputs for the `safety_checker`.
|
| 688 |
"""
|
|
@@ -770,8 +770,8 @@ class TensorRTStableDiffusionInpaintPipeline(DiffusionPipeline):
|
|
| 770 |
"The configuration file of the unet has set the default `sample_size` to smaller than"
|
| 771 |
" 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the"
|
| 772 |
" following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
|
| 773 |
-
" CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n-
|
| 774 |
-
" \n-
|
| 775 |
" configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
|
| 776 |
" in the config might lead to incorrect results in future versions. If you have downloaded this"
|
| 777 |
" checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
|
|
|
|
| 682 |
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
| 683 |
safety_checker ([`StableDiffusionSafetyChecker`]):
|
| 684 |
Classification module that estimates whether generated images could be considered offensive or harmful.
|
| 685 |
+
Please, refer to the [model card](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5) for details.
|
| 686 |
feature_extractor ([`CLIPImageProcessor`]):
|
| 687 |
Model that extracts features from generated images to be used as inputs for the `safety_checker`.
|
| 688 |
"""
|
|
|
|
| 770 |
"The configuration file of the unet has set the default `sample_size` to smaller than"
|
| 771 |
" 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the"
|
| 772 |
" following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
|
| 773 |
+
" CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- stable-diffusion-v1-5/stable-diffusion-v1-5"
|
| 774 |
+
" \n- stable-diffusion-v1-5/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
|
| 775 |
" configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
|
| 776 |
" in the config might lead to incorrect results in future versions. If you have downloaded this"
|
| 777 |
" checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
|
main/stable_diffusion_tensorrt_txt2img.py
CHANGED
|
@@ -594,7 +594,7 @@ class TensorRTStableDiffusionPipeline(DiffusionPipeline):
|
|
| 594 |
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
| 595 |
safety_checker ([`StableDiffusionSafetyChecker`]):
|
| 596 |
Classification module that estimates whether generated images could be considered offensive or harmful.
|
| 597 |
-
Please, refer to the [model card](https://huggingface.co/
|
| 598 |
feature_extractor ([`CLIPImageProcessor`]):
|
| 599 |
Model that extracts features from generated images to be used as inputs for the `safety_checker`.
|
| 600 |
"""
|
|
@@ -682,8 +682,8 @@ class TensorRTStableDiffusionPipeline(DiffusionPipeline):
|
|
| 682 |
"The configuration file of the unet has set the default `sample_size` to smaller than"
|
| 683 |
" 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the"
|
| 684 |
" following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
|
| 685 |
-
" CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n-
|
| 686 |
-
" \n-
|
| 687 |
" configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
|
| 688 |
" in the config might lead to incorrect results in future versions. If you have downloaded this"
|
| 689 |
" checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
|
|
|
|
| 594 |
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
| 595 |
safety_checker ([`StableDiffusionSafetyChecker`]):
|
| 596 |
Classification module that estimates whether generated images could be considered offensive or harmful.
|
| 597 |
+
Please, refer to the [model card](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5) for details.
|
| 598 |
feature_extractor ([`CLIPImageProcessor`]):
|
| 599 |
Model that extracts features from generated images to be used as inputs for the `safety_checker`.
|
| 600 |
"""
|
|
|
|
| 682 |
"The configuration file of the unet has set the default `sample_size` to smaller than"
|
| 683 |
" 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the"
|
| 684 |
" following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
|
| 685 |
+
" CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- stable-diffusion-v1-5/stable-diffusion-v1-5"
|
| 686 |
+
" \n- stable-diffusion-v1-5/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
|
| 687 |
" configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
|
| 688 |
" in the config might lead to incorrect results in future versions. If you have downloaded this"
|
| 689 |
" checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
|
main/text_inpainting.py
CHANGED
|
@@ -52,7 +52,7 @@ class TextInpainting(DiffusionPipeline, StableDiffusionMixin):
|
|
| 52 |
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
| 53 |
safety_checker ([`StableDiffusionSafetyChecker`]):
|
| 54 |
Classification module that estimates whether generated images could be considered offensive or harmful.
|
| 55 |
-
Please, refer to the [model card](https://huggingface.co/
|
| 56 |
feature_extractor ([`CLIPImageProcessor`]):
|
| 57 |
Model that extracts features from generated images to be used as inputs for the `safety_checker`.
|
| 58 |
"""
|
|
|
|
| 52 |
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
|
| 53 |
safety_checker ([`StableDiffusionSafetyChecker`]):
|
| 54 |
Classification module that estimates whether generated images could be considered offensive or harmful.
|
| 55 |
+
Please, refer to the [model card](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5) for details.
|
| 56 |
feature_extractor ([`CLIPImageProcessor`]):
|
| 57 |
Model that extracts features from generated images to be used as inputs for the `safety_checker`.
|
| 58 |
"""
|