|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | import inspect | 
					
						
						|  | import unittest | 
					
						
						|  |  | 
					
						
						|  | import numpy as np | 
					
						
						|  | import torch | 
					
						
						|  | from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer | 
					
						
						|  |  | 
					
						
						|  | from diffusers import ( | 
					
						
						|  | AutoencoderKL, | 
					
						
						|  | ControlNetModel, | 
					
						
						|  | DDIMScheduler, | 
					
						
						|  | StableDiffusionControlNetPAGPipeline, | 
					
						
						|  | StableDiffusionControlNetPipeline, | 
					
						
						|  | UNet2DConditionModel, | 
					
						
						|  | ) | 
					
						
						|  | from diffusers.utils.testing_utils import ( | 
					
						
						|  | enable_full_determinism, | 
					
						
						|  | ) | 
					
						
						|  | from diffusers.utils.torch_utils import randn_tensor | 
					
						
						|  |  | 
					
						
						|  | from ..pipeline_params import ( | 
					
						
						|  | TEXT_TO_IMAGE_BATCH_PARAMS, | 
					
						
						|  | TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS, | 
					
						
						|  | TEXT_TO_IMAGE_IMAGE_PARAMS, | 
					
						
						|  | TEXT_TO_IMAGE_PARAMS, | 
					
						
						|  | ) | 
					
						
						|  | from ..test_pipelines_common import ( | 
					
						
						|  | IPAdapterTesterMixin, | 
					
						
						|  | PipelineFromPipeTesterMixin, | 
					
						
						|  | PipelineLatentTesterMixin, | 
					
						
						|  | PipelineTesterMixin, | 
					
						
						|  | ) | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | enable_full_determinism() | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | class StableDiffusionControlNetPAGPipelineFastTests( | 
					
						
						|  | PipelineTesterMixin, | 
					
						
						|  | IPAdapterTesterMixin, | 
					
						
						|  | PipelineLatentTesterMixin, | 
					
						
						|  | PipelineFromPipeTesterMixin, | 
					
						
						|  | unittest.TestCase, | 
					
						
						|  | ): | 
					
						
						|  | pipeline_class = StableDiffusionControlNetPAGPipeline | 
					
						
						|  | params = TEXT_TO_IMAGE_PARAMS.union({"pag_scale", "pag_adaptive_scale"}) | 
					
						
						|  | batch_params = TEXT_TO_IMAGE_BATCH_PARAMS | 
					
						
						|  | image_params = TEXT_TO_IMAGE_IMAGE_PARAMS | 
					
						
						|  | image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS | 
					
						
						|  | callback_cfg_params = TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS.union({"add_text_embeds", "add_time_ids"}) | 
					
						
						|  |  | 
					
						
						|  | def get_dummy_components(self, time_cond_proj_dim=None): | 
					
						
						|  |  | 
					
						
						|  | torch.manual_seed(0) | 
					
						
						|  | unet = UNet2DConditionModel( | 
					
						
						|  | block_out_channels=(4, 8), | 
					
						
						|  | layers_per_block=2, | 
					
						
						|  | sample_size=32, | 
					
						
						|  | in_channels=4, | 
					
						
						|  | out_channels=4, | 
					
						
						|  | down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), | 
					
						
						|  | up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), | 
					
						
						|  | cross_attention_dim=8, | 
					
						
						|  | time_cond_proj_dim=time_cond_proj_dim, | 
					
						
						|  | norm_num_groups=2, | 
					
						
						|  | ) | 
					
						
						|  | torch.manual_seed(0) | 
					
						
						|  | controlnet = ControlNetModel( | 
					
						
						|  | block_out_channels=(4, 8), | 
					
						
						|  | layers_per_block=2, | 
					
						
						|  | in_channels=4, | 
					
						
						|  | down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), | 
					
						
						|  | conditioning_embedding_out_channels=(2, 4), | 
					
						
						|  | cross_attention_dim=8, | 
					
						
						|  | norm_num_groups=2, | 
					
						
						|  | ) | 
					
						
						|  | torch.manual_seed(0) | 
					
						
						|  | scheduler = DDIMScheduler( | 
					
						
						|  | beta_start=0.00085, | 
					
						
						|  | beta_end=0.012, | 
					
						
						|  | beta_schedule="scaled_linear", | 
					
						
						|  | clip_sample=False, | 
					
						
						|  | set_alpha_to_one=False, | 
					
						
						|  | ) | 
					
						
						|  | torch.manual_seed(0) | 
					
						
						|  | vae = AutoencoderKL( | 
					
						
						|  | block_out_channels=[4, 8], | 
					
						
						|  | in_channels=3, | 
					
						
						|  | out_channels=3, | 
					
						
						|  | down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], | 
					
						
						|  | up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], | 
					
						
						|  | latent_channels=4, | 
					
						
						|  | norm_num_groups=2, | 
					
						
						|  | ) | 
					
						
						|  | torch.manual_seed(0) | 
					
						
						|  | text_encoder_config = CLIPTextConfig( | 
					
						
						|  | bos_token_id=0, | 
					
						
						|  | eos_token_id=2, | 
					
						
						|  | hidden_size=8, | 
					
						
						|  | intermediate_size=16, | 
					
						
						|  | layer_norm_eps=1e-05, | 
					
						
						|  | num_attention_heads=2, | 
					
						
						|  | num_hidden_layers=2, | 
					
						
						|  | pad_token_id=1, | 
					
						
						|  | vocab_size=1000, | 
					
						
						|  | ) | 
					
						
						|  | text_encoder = CLIPTextModel(text_encoder_config) | 
					
						
						|  | tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") | 
					
						
						|  |  | 
					
						
						|  | components = { | 
					
						
						|  | "unet": unet, | 
					
						
						|  | "controlnet": controlnet, | 
					
						
						|  | "scheduler": scheduler, | 
					
						
						|  | "vae": vae, | 
					
						
						|  | "text_encoder": text_encoder, | 
					
						
						|  | "tokenizer": tokenizer, | 
					
						
						|  | "safety_checker": None, | 
					
						
						|  | "feature_extractor": None, | 
					
						
						|  | "image_encoder": None, | 
					
						
						|  | } | 
					
						
						|  | return components | 
					
						
						|  |  | 
					
						
						|  | def get_dummy_inputs(self, device, seed=0): | 
					
						
						|  | if str(device).startswith("mps"): | 
					
						
						|  | generator = torch.manual_seed(seed) | 
					
						
						|  | else: | 
					
						
						|  | generator = torch.Generator(device=device).manual_seed(seed) | 
					
						
						|  |  | 
					
						
						|  | controlnet_embedder_scale_factor = 2 | 
					
						
						|  | image = randn_tensor( | 
					
						
						|  | (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), | 
					
						
						|  | generator=generator, | 
					
						
						|  | device=torch.device(device), | 
					
						
						|  | ) | 
					
						
						|  |  | 
					
						
						|  | inputs = { | 
					
						
						|  | "prompt": "A painting of a squirrel eating a burger", | 
					
						
						|  | "generator": generator, | 
					
						
						|  | "num_inference_steps": 2, | 
					
						
						|  | "guidance_scale": 6.0, | 
					
						
						|  | "pag_scale": 3.0, | 
					
						
						|  | "output_type": "np", | 
					
						
						|  | "image": image, | 
					
						
						|  | } | 
					
						
						|  |  | 
					
						
						|  | return inputs | 
					
						
						|  |  | 
					
						
						|  | def test_pag_disable_enable(self): | 
					
						
						|  | device = "cpu" | 
					
						
						|  | components = self.get_dummy_components() | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | pipe_sd = StableDiffusionControlNetPipeline(**components) | 
					
						
						|  | pipe_sd = pipe_sd.to(device) | 
					
						
						|  | pipe_sd.set_progress_bar_config(disable=None) | 
					
						
						|  |  | 
					
						
						|  | inputs = self.get_dummy_inputs(device) | 
					
						
						|  | del inputs["pag_scale"] | 
					
						
						|  | assert ( | 
					
						
						|  | "pag_scale" not in inspect.signature(pipe_sd.__call__).parameters | 
					
						
						|  | ), f"`pag_scale` should not be a call parameter of the base pipeline {pipe_sd.__class__.__name__}." | 
					
						
						|  | out = pipe_sd(**inputs).images[0, -3:, -3:, -1] | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | pipe_pag = self.pipeline_class(**components) | 
					
						
						|  | pipe_pag = pipe_pag.to(device) | 
					
						
						|  | pipe_pag.set_progress_bar_config(disable=None) | 
					
						
						|  |  | 
					
						
						|  | inputs = self.get_dummy_inputs(device) | 
					
						
						|  | inputs["pag_scale"] = 0.0 | 
					
						
						|  | out_pag_disabled = pipe_pag(**inputs).images[0, -3:, -3:, -1] | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | pipe_pag = self.pipeline_class(**components, pag_applied_layers=["mid", "up", "down"]) | 
					
						
						|  | pipe_pag = pipe_pag.to(device) | 
					
						
						|  | pipe_pag.set_progress_bar_config(disable=None) | 
					
						
						|  |  | 
					
						
						|  | inputs = self.get_dummy_inputs(device) | 
					
						
						|  | out_pag_enabled = pipe_pag(**inputs).images[0, -3:, -3:, -1] | 
					
						
						|  |  | 
					
						
						|  | assert np.abs(out.flatten() - out_pag_disabled.flatten()).max() < 1e-3 | 
					
						
						|  | assert np.abs(out.flatten() - out_pag_enabled.flatten()).max() > 1e-3 | 
					
						
						|  |  | 
					
						
						|  | def test_pag_cfg(self): | 
					
						
						|  | device = "cpu" | 
					
						
						|  | components = self.get_dummy_components() | 
					
						
						|  |  | 
					
						
						|  | pipe_pag = self.pipeline_class(**components, pag_applied_layers=["mid", "up", "down"]) | 
					
						
						|  | pipe_pag = pipe_pag.to(device) | 
					
						
						|  | pipe_pag.set_progress_bar_config(disable=None) | 
					
						
						|  |  | 
					
						
						|  | inputs = self.get_dummy_inputs(device) | 
					
						
						|  | image = pipe_pag(**inputs).images | 
					
						
						|  | image_slice = image[0, -3:, -3:, -1] | 
					
						
						|  |  | 
					
						
						|  | assert image.shape == ( | 
					
						
						|  | 1, | 
					
						
						|  | 64, | 
					
						
						|  | 64, | 
					
						
						|  | 3, | 
					
						
						|  | ), f"the shape of the output image should be (1, 64, 64, 3) but got {image.shape}" | 
					
						
						|  | expected_slice = np.array( | 
					
						
						|  | [0.45505235, 0.2785938, 0.16334778, 0.79689944, 0.53095645, 0.40135607, 0.7052706, 0.69065094, 0.41548574] | 
					
						
						|  | ) | 
					
						
						|  |  | 
					
						
						|  | max_diff = np.abs(image_slice.flatten() - expected_slice).max() | 
					
						
						|  | assert max_diff < 1e-3, f"output is different from expected, {image_slice.flatten()}" | 
					
						
						|  |  | 
					
						
						|  | def test_pag_uncond(self): | 
					
						
						|  | device = "cpu" | 
					
						
						|  | components = self.get_dummy_components() | 
					
						
						|  |  | 
					
						
						|  | pipe_pag = self.pipeline_class(**components, pag_applied_layers=["mid", "up", "down"]) | 
					
						
						|  | pipe_pag = pipe_pag.to(device) | 
					
						
						|  | pipe_pag.set_progress_bar_config(disable=None) | 
					
						
						|  |  | 
					
						
						|  | inputs = self.get_dummy_inputs(device) | 
					
						
						|  | inputs["guidance_scale"] = 0.0 | 
					
						
						|  | image = pipe_pag(**inputs).images | 
					
						
						|  | image_slice = image[0, -3:, -3:, -1] | 
					
						
						|  |  | 
					
						
						|  | assert image.shape == ( | 
					
						
						|  | 1, | 
					
						
						|  | 64, | 
					
						
						|  | 64, | 
					
						
						|  | 3, | 
					
						
						|  | ), f"the shape of the output image should be (1, 64, 64, 3) but got {image.shape}" | 
					
						
						|  | expected_slice = np.array( | 
					
						
						|  | [0.45127502, 0.2797252, 0.15970308, 0.7993157, 0.5414344, 0.40160775, 0.7114598, 0.69803864, 0.4217583] | 
					
						
						|  | ) | 
					
						
						|  |  | 
					
						
						|  | max_diff = np.abs(image_slice.flatten() - expected_slice).max() | 
					
						
						|  | assert max_diff < 1e-3, f"output is different from expected, {image_slice.flatten()}" | 
					
						
						|  |  |