| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| import gc |
| import unittest |
|
|
| import numpy as np |
| import torch |
| from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer |
|
|
| from diffusers import ( |
| AutoencoderKL, |
| DDIMScheduler, |
| DPMSolverMultistepScheduler, |
| EulerAncestralDiscreteScheduler, |
| EulerDiscreteScheduler, |
| LMSDiscreteScheduler, |
| PNDMScheduler, |
| StableDiffusionPipeline, |
| UNet2DConditionModel, |
| logging, |
| ) |
| from diffusers.utils.testing_utils import ( |
| CaptureLogger, |
| backend_empty_cache, |
| enable_full_determinism, |
| load_numpy, |
| nightly, |
| numpy_cosine_similarity_distance, |
| require_torch_accelerator, |
| require_torch_gpu, |
| skip_mps, |
| slow, |
| torch_device, |
| ) |
|
|
| from ..pipeline_params import ( |
| TEXT_TO_IMAGE_BATCH_PARAMS, |
| TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS, |
| TEXT_TO_IMAGE_IMAGE_PARAMS, |
| TEXT_TO_IMAGE_PARAMS, |
| ) |
| from ..test_pipelines_common import ( |
| PipelineKarrasSchedulerTesterMixin, |
| PipelineLatentTesterMixin, |
| PipelineTesterMixin, |
| SDFunctionTesterMixin, |
| ) |
|
|
|
|
| enable_full_determinism() |
|
|
|
|
| class StableDiffusion2PipelineFastTests( |
| SDFunctionTesterMixin, |
| PipelineLatentTesterMixin, |
| PipelineKarrasSchedulerTesterMixin, |
| PipelineTesterMixin, |
| unittest.TestCase, |
| ): |
| pipeline_class = StableDiffusionPipeline |
| params = TEXT_TO_IMAGE_PARAMS |
| batch_params = TEXT_TO_IMAGE_BATCH_PARAMS |
| image_params = TEXT_TO_IMAGE_IMAGE_PARAMS |
| image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS |
| callback_cfg_params = TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS |
|
|
| def get_dummy_components(self): |
| torch.manual_seed(0) |
| unet = UNet2DConditionModel( |
| block_out_channels=(32, 64), |
| layers_per_block=2, |
| sample_size=32, |
| in_channels=4, |
| out_channels=4, |
| down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), |
| up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), |
| cross_attention_dim=32, |
| |
| attention_head_dim=(2, 4), |
| use_linear_projection=True, |
| ) |
| scheduler = DDIMScheduler( |
| beta_start=0.00085, |
| beta_end=0.012, |
| beta_schedule="scaled_linear", |
| clip_sample=False, |
| set_alpha_to_one=False, |
| ) |
| torch.manual_seed(0) |
| vae = AutoencoderKL( |
| block_out_channels=[32, 64], |
| in_channels=3, |
| out_channels=3, |
| down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], |
| up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], |
| latent_channels=4, |
| sample_size=128, |
| ) |
| torch.manual_seed(0) |
| text_encoder_config = CLIPTextConfig( |
| bos_token_id=0, |
| eos_token_id=2, |
| hidden_size=32, |
| intermediate_size=37, |
| layer_norm_eps=1e-05, |
| num_attention_heads=4, |
| num_hidden_layers=5, |
| pad_token_id=1, |
| vocab_size=1000, |
| |
| hidden_act="gelu", |
| projection_dim=512, |
| ) |
| text_encoder = CLIPTextModel(text_encoder_config) |
| tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") |
|
|
| components = { |
| "unet": unet, |
| "scheduler": scheduler, |
| "vae": vae, |
| "text_encoder": text_encoder, |
| "tokenizer": tokenizer, |
| "safety_checker": None, |
| "feature_extractor": None, |
| "image_encoder": None, |
| } |
| return components |
|
|
| def get_dummy_inputs(self, device, seed=0): |
| generator_device = "cpu" if not device.startswith("cuda") else "cuda" |
| if not str(device).startswith("mps"): |
| generator = torch.Generator(device=generator_device).manual_seed(seed) |
| else: |
| generator = torch.manual_seed(seed) |
|
|
| inputs = { |
| "prompt": "A painting of a squirrel eating a burger", |
| "generator": generator, |
| "num_inference_steps": 2, |
| "guidance_scale": 6.0, |
| "output_type": "np", |
| } |
| return inputs |
|
|
| def test_stable_diffusion_ddim(self): |
| device = "cpu" |
| components = self.get_dummy_components() |
| sd_pipe = StableDiffusionPipeline(**components) |
| sd_pipe = sd_pipe.to(device) |
| sd_pipe.set_progress_bar_config(disable=None) |
|
|
| inputs = self.get_dummy_inputs(device) |
| image = sd_pipe(**inputs).images |
| image_slice = image[0, -3:, -3:, -1] |
|
|
| assert image.shape == (1, 64, 64, 3) |
| expected_slice = np.array([0.5753, 0.6113, 0.5005, 0.5036, 0.5464, 0.4725, 0.4982, 0.4865, 0.4861]) |
|
|
| assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 |
|
|
| def test_stable_diffusion_pndm(self): |
| device = "cpu" |
| components = self.get_dummy_components() |
| components["scheduler"] = PNDMScheduler(skip_prk_steps=True) |
| sd_pipe = StableDiffusionPipeline(**components) |
| sd_pipe = sd_pipe.to(device) |
| sd_pipe.set_progress_bar_config(disable=None) |
|
|
| inputs = self.get_dummy_inputs(device) |
| image = sd_pipe(**inputs).images |
| image_slice = image[0, -3:, -3:, -1] |
|
|
| assert image.shape == (1, 64, 64, 3) |
| expected_slice = np.array([0.5121, 0.5714, 0.4827, 0.5057, 0.5646, 0.4766, 0.5189, 0.4895, 0.4990]) |
|
|
| assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 |
|
|
| def test_stable_diffusion_k_lms(self): |
| device = "cpu" |
| components = self.get_dummy_components() |
| components["scheduler"] = LMSDiscreteScheduler.from_config(components["scheduler"].config) |
| sd_pipe = StableDiffusionPipeline(**components) |
| sd_pipe = sd_pipe.to(device) |
| sd_pipe.set_progress_bar_config(disable=None) |
|
|
| inputs = self.get_dummy_inputs(device) |
| image = sd_pipe(**inputs).images |
| image_slice = image[0, -3:, -3:, -1] |
|
|
| assert image.shape == (1, 64, 64, 3) |
| expected_slice = np.array([0.4865, 0.5439, 0.4840, 0.4995, 0.5543, 0.4846, 0.5199, 0.4942, 0.5061]) |
|
|
| assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 |
|
|
| def test_stable_diffusion_k_euler_ancestral(self): |
| device = "cpu" |
| components = self.get_dummy_components() |
| components["scheduler"] = EulerAncestralDiscreteScheduler.from_config(components["scheduler"].config) |
| sd_pipe = StableDiffusionPipeline(**components) |
| sd_pipe = sd_pipe.to(device) |
| sd_pipe.set_progress_bar_config(disable=None) |
|
|
| inputs = self.get_dummy_inputs(device) |
| image = sd_pipe(**inputs).images |
| image_slice = image[0, -3:, -3:, -1] |
|
|
| assert image.shape == (1, 64, 64, 3) |
| expected_slice = np.array([0.4864, 0.5440, 0.4842, 0.4994, 0.5543, 0.4846, 0.5196, 0.4942, 0.5063]) |
|
|
| assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 |
|
|
| def test_stable_diffusion_k_euler(self): |
| device = "cpu" |
| components = self.get_dummy_components() |
| components["scheduler"] = EulerDiscreteScheduler.from_config(components["scheduler"].config) |
| sd_pipe = StableDiffusionPipeline(**components) |
| sd_pipe = sd_pipe.to(device) |
| sd_pipe.set_progress_bar_config(disable=None) |
|
|
| inputs = self.get_dummy_inputs(device) |
| image = sd_pipe(**inputs).images |
| image_slice = image[0, -3:, -3:, -1] |
|
|
| assert image.shape == (1, 64, 64, 3) |
| expected_slice = np.array([0.4865, 0.5439, 0.4840, 0.4995, 0.5543, 0.4846, 0.5199, 0.4942, 0.5061]) |
|
|
| assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 |
|
|
| def test_stable_diffusion_unflawed(self): |
| device = "cpu" |
| components = self.get_dummy_components() |
| components["scheduler"] = DDIMScheduler.from_config( |
| components["scheduler"].config, timestep_spacing="trailing" |
| ) |
| sd_pipe = StableDiffusionPipeline(**components) |
| sd_pipe = sd_pipe.to(device) |
| sd_pipe.set_progress_bar_config(disable=None) |
|
|
| inputs = self.get_dummy_inputs(device) |
| inputs["guidance_rescale"] = 0.7 |
| inputs["num_inference_steps"] = 10 |
| image = sd_pipe(**inputs).images |
| image_slice = image[0, -3:, -3:, -1] |
|
|
| assert image.shape == (1, 64, 64, 3) |
| expected_slice = np.array([0.4736, 0.5405, 0.4705, 0.4955, 0.5675, 0.4812, 0.5310, 0.4967, 0.5064]) |
|
|
| assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 |
|
|
| def test_stable_diffusion_long_prompt(self): |
| components = self.get_dummy_components() |
| components["scheduler"] = LMSDiscreteScheduler.from_config(components["scheduler"].config) |
| sd_pipe = StableDiffusionPipeline(**components) |
| sd_pipe = sd_pipe.to(torch_device) |
| sd_pipe.set_progress_bar_config(disable=None) |
|
|
| do_classifier_free_guidance = True |
| negative_prompt = None |
| num_images_per_prompt = 1 |
| logger = logging.get_logger("diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion") |
| logger.setLevel(logging.WARNING) |
|
|
| prompt = 25 * "@" |
| with CaptureLogger(logger) as cap_logger_3: |
| text_embeddings_3, negeative_text_embeddings_3 = sd_pipe.encode_prompt( |
| prompt, torch_device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt |
| ) |
| if negeative_text_embeddings_3 is not None: |
| text_embeddings_3 = torch.cat([negeative_text_embeddings_3, text_embeddings_3]) |
|
|
| prompt = 100 * "@" |
| with CaptureLogger(logger) as cap_logger: |
| text_embeddings, negative_embeddings = sd_pipe.encode_prompt( |
| prompt, torch_device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt |
| ) |
| if negative_embeddings is not None: |
| text_embeddings = torch.cat([negative_embeddings, text_embeddings]) |
|
|
| negative_prompt = "Hello" |
| with CaptureLogger(logger) as cap_logger_2: |
| text_embeddings_2, negative_text_embeddings_2 = sd_pipe.encode_prompt( |
| prompt, torch_device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt |
| ) |
| if negative_text_embeddings_2 is not None: |
| text_embeddings_2 = torch.cat([negative_text_embeddings_2, text_embeddings_2]) |
|
|
| assert text_embeddings_3.shape == text_embeddings_2.shape == text_embeddings.shape |
| assert text_embeddings.shape[1] == 77 |
|
|
| assert cap_logger.out == cap_logger_2.out |
| |
| assert cap_logger.out.count("@") == 25 |
| assert cap_logger_3.out == "" |
|
|
| def test_attention_slicing_forward_pass(self): |
| super().test_attention_slicing_forward_pass(expected_max_diff=3e-3) |
|
|
| def test_inference_batch_single_identical(self): |
| super().test_inference_batch_single_identical(expected_max_diff=3e-3) |
|
|
|
|
| @slow |
| @require_torch_accelerator |
| @skip_mps |
| class StableDiffusion2PipelineSlowTests(unittest.TestCase): |
| def tearDown(self): |
| super().tearDown() |
| gc.collect() |
| backend_empty_cache(torch_device) |
|
|
| def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): |
| _generator_device = "cpu" if not generator_device.startswith("cuda") else "cuda" |
| if not str(device).startswith("mps"): |
| generator = torch.Generator(device=_generator_device).manual_seed(seed) |
| else: |
| generator = torch.manual_seed(seed) |
|
|
| latents = np.random.RandomState(seed).standard_normal((1, 4, 64, 64)) |
| latents = torch.from_numpy(latents).to(device=device, dtype=dtype) |
| inputs = { |
| "prompt": "a photograph of an astronaut riding a horse", |
| "latents": latents, |
| "generator": generator, |
| "num_inference_steps": 3, |
| "guidance_scale": 7.5, |
| "output_type": "np", |
| } |
| return inputs |
|
|
| def test_stable_diffusion_default_ddim(self): |
| pipe = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-base") |
| pipe.to(torch_device) |
| pipe.set_progress_bar_config(disable=None) |
|
|
| inputs = self.get_inputs(torch_device) |
| image = pipe(**inputs).images |
| image_slice = image[0, -3:, -3:, -1].flatten() |
|
|
| assert image.shape == (1, 512, 512, 3) |
| expected_slice = np.array([0.49493, 0.47896, 0.40798, 0.54214, 0.53212, 0.48202, 0.47656, 0.46329, 0.48506]) |
| assert np.abs(image_slice - expected_slice).max() < 7e-3 |
|
|
| def test_stable_diffusion_pndm(self): |
| pipe = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-base") |
| pipe.scheduler = PNDMScheduler.from_config(pipe.scheduler.config) |
| pipe.to(torch_device) |
| pipe.set_progress_bar_config(disable=None) |
|
|
| inputs = self.get_inputs(torch_device) |
| image = pipe(**inputs).images |
| image_slice = image[0, -3:, -3:, -1].flatten() |
|
|
| assert image.shape == (1, 512, 512, 3) |
| expected_slice = np.array([0.49493, 0.47896, 0.40798, 0.54214, 0.53212, 0.48202, 0.47656, 0.46329, 0.48506]) |
| assert np.abs(image_slice - expected_slice).max() < 7e-3 |
|
|
| def test_stable_diffusion_k_lms(self): |
| pipe = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-base") |
| pipe.scheduler = LMSDiscreteScheduler.from_config(pipe.scheduler.config) |
| pipe.to(torch_device) |
| pipe.set_progress_bar_config(disable=None) |
|
|
| inputs = self.get_inputs(torch_device) |
| image = pipe(**inputs).images |
| image_slice = image[0, -3:, -3:, -1].flatten() |
|
|
| assert image.shape == (1, 512, 512, 3) |
| expected_slice = np.array([0.10440, 0.13115, 0.11100, 0.10141, 0.11440, 0.07215, 0.11332, 0.09693, 0.10006]) |
| assert np.abs(image_slice - expected_slice).max() < 3e-3 |
|
|
| @require_torch_gpu |
| def test_stable_diffusion_attention_slicing(self): |
| torch.cuda.reset_peak_memory_stats() |
| pipe = StableDiffusionPipeline.from_pretrained( |
| "stabilityai/stable-diffusion-2-base", torch_dtype=torch.float16 |
| ) |
| pipe.unet.set_default_attn_processor() |
| pipe = pipe.to(torch_device) |
| pipe.set_progress_bar_config(disable=None) |
|
|
| |
| pipe.enable_attention_slicing() |
| inputs = self.get_inputs(torch_device, dtype=torch.float16) |
| image_sliced = pipe(**inputs).images |
|
|
| mem_bytes = torch.cuda.max_memory_allocated() |
| torch.cuda.reset_peak_memory_stats() |
| |
| assert mem_bytes < 3.3 * 10**9 |
|
|
| |
| pipe.disable_attention_slicing() |
| pipe.unet.set_default_attn_processor() |
| inputs = self.get_inputs(torch_device, dtype=torch.float16) |
| image = pipe(**inputs).images |
|
|
| |
| mem_bytes = torch.cuda.max_memory_allocated() |
| assert mem_bytes > 3.3 * 10**9 |
| max_diff = numpy_cosine_similarity_distance(image.flatten(), image_sliced.flatten()) |
| assert max_diff < 5e-3 |
|
|
| def test_stable_diffusion_text2img_intermediate_state(self): |
| number_of_steps = 0 |
|
|
| def callback_fn(step: int, timestep: int, latents: torch.Tensor) -> None: |
| callback_fn.has_been_called = True |
| nonlocal number_of_steps |
| number_of_steps += 1 |
| if step == 1: |
| latents = latents.detach().cpu().numpy() |
| assert latents.shape == (1, 4, 64, 64) |
| latents_slice = latents[0, -3:, -3:, -1] |
| expected_slice = np.array( |
| [-0.3862, -0.4507, -1.1729, 0.0686, -1.1045, 0.7124, -1.8301, 0.1903, 1.2773] |
| ) |
|
|
| assert np.abs(latents_slice.flatten() - expected_slice).max() < 5e-2 |
| elif step == 2: |
| latents = latents.detach().cpu().numpy() |
| assert latents.shape == (1, 4, 64, 64) |
| latents_slice = latents[0, -3:, -3:, -1] |
| expected_slice = np.array( |
| [0.2720, -0.1863, -0.7383, -0.5029, -0.7534, 0.3970, -0.7646, 0.4468, 1.2686] |
| ) |
|
|
| assert np.abs(latents_slice.flatten() - expected_slice).max() < 5e-2 |
|
|
| callback_fn.has_been_called = False |
|
|
| pipe = StableDiffusionPipeline.from_pretrained( |
| "stabilityai/stable-diffusion-2-base", torch_dtype=torch.float16 |
| ) |
| pipe = pipe.to(torch_device) |
| pipe.set_progress_bar_config(disable=None) |
| pipe.enable_attention_slicing() |
|
|
| inputs = self.get_inputs(torch_device, dtype=torch.float16) |
| pipe(**inputs, callback=callback_fn, callback_steps=1) |
| assert callback_fn.has_been_called |
| assert number_of_steps == inputs["num_inference_steps"] |
|
|
| @require_torch_gpu |
| def test_stable_diffusion_pipeline_with_sequential_cpu_offloading(self): |
| torch.cuda.empty_cache() |
| torch.cuda.reset_max_memory_allocated() |
| torch.cuda.reset_peak_memory_stats() |
|
|
| pipe = StableDiffusionPipeline.from_pretrained( |
| "stabilityai/stable-diffusion-2-base", torch_dtype=torch.float16 |
| ) |
| pipe = pipe.to(torch_device) |
| pipe.set_progress_bar_config(disable=None) |
| pipe.enable_attention_slicing(1) |
| pipe.enable_sequential_cpu_offload() |
|
|
| inputs = self.get_inputs(torch_device, dtype=torch.float16) |
| _ = pipe(**inputs) |
|
|
| mem_bytes = torch.cuda.max_memory_allocated() |
| |
| assert mem_bytes < 2.8 * 10**9 |
|
|
| @require_torch_gpu |
| def test_stable_diffusion_pipeline_with_model_offloading(self): |
| torch.cuda.empty_cache() |
| torch.cuda.reset_max_memory_allocated() |
| torch.cuda.reset_peak_memory_stats() |
|
|
| inputs = self.get_inputs(torch_device, dtype=torch.float16) |
|
|
| |
|
|
| pipe = StableDiffusionPipeline.from_pretrained( |
| "stabilityai/stable-diffusion-2-base", |
| torch_dtype=torch.float16, |
| ) |
| pipe.unet.set_default_attn_processor() |
| pipe.to(torch_device) |
| pipe.set_progress_bar_config(disable=None) |
| outputs = pipe(**inputs) |
| mem_bytes = torch.cuda.max_memory_allocated() |
|
|
| |
|
|
| |
| pipe = StableDiffusionPipeline.from_pretrained( |
| "stabilityai/stable-diffusion-2-base", |
| torch_dtype=torch.float16, |
| ) |
| pipe.unet.set_default_attn_processor() |
|
|
| torch.cuda.empty_cache() |
| torch.cuda.reset_max_memory_allocated() |
| torch.cuda.reset_peak_memory_stats() |
|
|
| pipe.enable_model_cpu_offload() |
| pipe.set_progress_bar_config(disable=None) |
| inputs = self.get_inputs(torch_device, dtype=torch.float16) |
| outputs_offloaded = pipe(**inputs) |
| mem_bytes_offloaded = torch.cuda.max_memory_allocated() |
|
|
| images = outputs.images |
| images_offloaded = outputs_offloaded.images |
| max_diff = numpy_cosine_similarity_distance(images.flatten(), images_offloaded.flatten()) |
| assert max_diff < 1e-3 |
| assert mem_bytes_offloaded < mem_bytes |
| assert mem_bytes_offloaded < 3 * 10**9 |
| for module in pipe.text_encoder, pipe.unet, pipe.vae: |
| assert module.device == torch.device("cpu") |
|
|
| |
| torch.cuda.empty_cache() |
| torch.cuda.reset_max_memory_allocated() |
| torch.cuda.reset_peak_memory_stats() |
|
|
| pipe.enable_attention_slicing() |
| _ = pipe(**inputs) |
| mem_bytes_slicing = torch.cuda.max_memory_allocated() |
| assert mem_bytes_slicing < mem_bytes_offloaded |
|
|
|
|
| @nightly |
| @require_torch_accelerator |
| @skip_mps |
| class StableDiffusion2PipelineNightlyTests(unittest.TestCase): |
| def tearDown(self): |
| super().tearDown() |
| gc.collect() |
| backend_empty_cache(torch_device) |
|
|
| def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): |
| _generator_device = "cpu" if not generator_device.startswith("cuda") else "cuda" |
| if not str(device).startswith("mps"): |
| generator = torch.Generator(device=_generator_device).manual_seed(seed) |
| else: |
| generator = torch.manual_seed(seed) |
|
|
| latents = np.random.RandomState(seed).standard_normal((1, 4, 64, 64)) |
| latents = torch.from_numpy(latents).to(device=device, dtype=dtype) |
| inputs = { |
| "prompt": "a photograph of an astronaut riding a horse", |
| "latents": latents, |
| "generator": generator, |
| "num_inference_steps": 50, |
| "guidance_scale": 7.5, |
| "output_type": "np", |
| } |
| return inputs |
|
|
| def test_stable_diffusion_2_0_default_ddim(self): |
| sd_pipe = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-base").to(torch_device) |
| sd_pipe.set_progress_bar_config(disable=None) |
|
|
| inputs = self.get_inputs(torch_device) |
| image = sd_pipe(**inputs).images[0] |
|
|
| expected_image = load_numpy( |
| "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" |
| "/stable_diffusion_2_text2img/stable_diffusion_2_0_base_ddim.npy" |
| ) |
| max_diff = np.abs(expected_image - image).max() |
| assert max_diff < 1e-3 |
|
|
| def test_stable_diffusion_2_1_default_pndm(self): |
| sd_pipe = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base").to(torch_device) |
| sd_pipe.set_progress_bar_config(disable=None) |
|
|
| inputs = self.get_inputs(torch_device) |
| image = sd_pipe(**inputs).images[0] |
|
|
| expected_image = load_numpy( |
| "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" |
| "/stable_diffusion_2_text2img/stable_diffusion_2_1_base_pndm.npy" |
| ) |
| max_diff = np.abs(expected_image - image).max() |
| assert max_diff < 1e-3 |
|
|
| def test_stable_diffusion_ddim(self): |
| sd_pipe = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base").to(torch_device) |
| sd_pipe.scheduler = DDIMScheduler.from_config(sd_pipe.scheduler.config) |
| sd_pipe.set_progress_bar_config(disable=None) |
|
|
| inputs = self.get_inputs(torch_device) |
| image = sd_pipe(**inputs).images[0] |
|
|
| expected_image = load_numpy( |
| "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" |
| "/stable_diffusion_2_text2img/stable_diffusion_2_1_base_ddim.npy" |
| ) |
| max_diff = np.abs(expected_image - image).max() |
| assert max_diff < 1e-3 |
|
|
| def test_stable_diffusion_lms(self): |
| sd_pipe = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base").to(torch_device) |
| sd_pipe.scheduler = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config) |
| sd_pipe.set_progress_bar_config(disable=None) |
|
|
| inputs = self.get_inputs(torch_device) |
| image = sd_pipe(**inputs).images[0] |
|
|
| expected_image = load_numpy( |
| "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" |
| "/stable_diffusion_2_text2img/stable_diffusion_2_1_base_lms.npy" |
| ) |
| max_diff = np.abs(expected_image - image).max() |
| assert max_diff < 1e-3 |
|
|
| def test_stable_diffusion_euler(self): |
| sd_pipe = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base").to(torch_device) |
| sd_pipe.scheduler = EulerDiscreteScheduler.from_config(sd_pipe.scheduler.config) |
| sd_pipe.set_progress_bar_config(disable=None) |
|
|
| inputs = self.get_inputs(torch_device) |
| image = sd_pipe(**inputs).images[0] |
|
|
| expected_image = load_numpy( |
| "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" |
| "/stable_diffusion_2_text2img/stable_diffusion_2_1_base_euler.npy" |
| ) |
| max_diff = np.abs(expected_image - image).max() |
| assert max_diff < 1e-3 |
|
|
| def test_stable_diffusion_dpm(self): |
| sd_pipe = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base").to(torch_device) |
| sd_pipe.scheduler = DPMSolverMultistepScheduler.from_config( |
| sd_pipe.scheduler.config, final_sigmas_type="sigma_min" |
| ) |
| sd_pipe.set_progress_bar_config(disable=None) |
|
|
| inputs = self.get_inputs(torch_device) |
| inputs["num_inference_steps"] = 25 |
| image = sd_pipe(**inputs).images[0] |
|
|
| expected_image = load_numpy( |
| "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" |
| "/stable_diffusion_2_text2img/stable_diffusion_2_1_base_dpm_multi.npy" |
| ) |
| max_diff = np.abs(expected_image - image).max() |
| assert max_diff < 1e-3 |
|
|