| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| import gc |
| import inspect |
| import unittest |
|
|
| import numpy as np |
| import torch |
| from PIL import Image |
| from transformers import AutoTokenizer, T5EncoderModel |
|
|
| from diffusers import AutoencoderKLCogVideoX, CogVideoXImageToVideoPipeline, CogVideoXTransformer3DModel, DDIMScheduler |
| from diffusers.utils import load_image |
|
|
| from ...testing_utils import ( |
| backend_empty_cache, |
| enable_full_determinism, |
| numpy_cosine_similarity_distance, |
| require_torch_accelerator, |
| slow, |
| torch_device, |
| ) |
| from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS |
| from ..test_pipelines_common import ( |
| PipelineTesterMixin, |
| check_qkv_fusion_matches_attn_procs_length, |
| check_qkv_fusion_processors_exist, |
| to_np, |
| ) |
|
|
|
|
| enable_full_determinism() |
|
|
|
|
| class CogVideoXImageToVideoPipelineFastTests(PipelineTesterMixin, unittest.TestCase): |
| pipeline_class = CogVideoXImageToVideoPipeline |
| params = TEXT_TO_IMAGE_PARAMS - {"cross_attention_kwargs"} |
| batch_params = TEXT_TO_IMAGE_BATCH_PARAMS.union({"image"}) |
| image_params = TEXT_TO_IMAGE_IMAGE_PARAMS |
| image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS |
| required_optional_params = frozenset( |
| [ |
| "num_inference_steps", |
| "generator", |
| "latents", |
| "return_dict", |
| "callback_on_step_end", |
| "callback_on_step_end_tensor_inputs", |
| ] |
| ) |
| test_xformers_attention = False |
|
|
| def get_dummy_components(self): |
| torch.manual_seed(0) |
| transformer = CogVideoXTransformer3DModel( |
| |
| |
| |
| |
| |
| |
| num_attention_heads=2, |
| attention_head_dim=16, |
| in_channels=8, |
| out_channels=4, |
| time_embed_dim=2, |
| text_embed_dim=32, |
| num_layers=1, |
| sample_width=2, |
| sample_height=2, |
| sample_frames=9, |
| patch_size=2, |
| temporal_compression_ratio=4, |
| max_text_seq_length=16, |
| use_rotary_positional_embeddings=True, |
| use_learned_positional_embeddings=True, |
| ) |
|
|
| torch.manual_seed(0) |
| vae = AutoencoderKLCogVideoX( |
| in_channels=3, |
| out_channels=3, |
| down_block_types=( |
| "CogVideoXDownBlock3D", |
| "CogVideoXDownBlock3D", |
| "CogVideoXDownBlock3D", |
| "CogVideoXDownBlock3D", |
| ), |
| up_block_types=( |
| "CogVideoXUpBlock3D", |
| "CogVideoXUpBlock3D", |
| "CogVideoXUpBlock3D", |
| "CogVideoXUpBlock3D", |
| ), |
| block_out_channels=(8, 8, 8, 8), |
| latent_channels=4, |
| layers_per_block=1, |
| norm_num_groups=2, |
| temporal_compression_ratio=4, |
| ) |
|
|
| torch.manual_seed(0) |
| scheduler = DDIMScheduler() |
| text_encoder = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5") |
| tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") |
|
|
| components = { |
| "transformer": transformer, |
| "vae": vae, |
| "scheduler": scheduler, |
| "text_encoder": text_encoder, |
| "tokenizer": tokenizer, |
| } |
| return components |
|
|
| def get_dummy_inputs(self, device, seed=0): |
| if str(device).startswith("mps"): |
| generator = torch.manual_seed(seed) |
| else: |
| generator = torch.Generator(device=device).manual_seed(seed) |
|
|
| |
| |
| image_height = 16 |
| image_width = 16 |
| image = Image.new("RGB", (image_width, image_height)) |
| inputs = { |
| "image": image, |
| "prompt": "dance monkey", |
| "negative_prompt": "", |
| "generator": generator, |
| "num_inference_steps": 2, |
| "guidance_scale": 6.0, |
| "height": image_height, |
| "width": image_width, |
| "num_frames": 8, |
| "max_sequence_length": 16, |
| "output_type": "pt", |
| } |
| return inputs |
|
|
| def test_inference(self): |
| device = "cpu" |
|
|
| components = self.get_dummy_components() |
| pipe = self.pipeline_class(**components) |
| pipe.to(device) |
| pipe.set_progress_bar_config(disable=None) |
|
|
| inputs = self.get_dummy_inputs(device) |
| video = pipe(**inputs).frames |
| generated_video = video[0] |
|
|
| self.assertEqual(generated_video.shape, (8, 3, 16, 16)) |
| expected_video = torch.randn(8, 3, 16, 16) |
| max_diff = np.abs(generated_video - expected_video).max() |
| self.assertLessEqual(max_diff, 1e10) |
|
|
| def test_callback_inputs(self): |
| sig = inspect.signature(self.pipeline_class.__call__) |
| has_callback_tensor_inputs = "callback_on_step_end_tensor_inputs" in sig.parameters |
| has_callback_step_end = "callback_on_step_end" in sig.parameters |
|
|
| if not (has_callback_tensor_inputs and has_callback_step_end): |
| return |
|
|
| components = self.get_dummy_components() |
| pipe = self.pipeline_class(**components) |
| pipe = pipe.to(torch_device) |
| pipe.set_progress_bar_config(disable=None) |
| self.assertTrue( |
| hasattr(pipe, "_callback_tensor_inputs"), |
| f" {self.pipeline_class} should have `_callback_tensor_inputs` that defines a list of tensor variables its callback function can use as inputs", |
| ) |
|
|
| def callback_inputs_subset(pipe, i, t, callback_kwargs): |
| |
| for tensor_name, tensor_value in callback_kwargs.items(): |
| |
| assert tensor_name in pipe._callback_tensor_inputs |
|
|
| return callback_kwargs |
|
|
| def callback_inputs_all(pipe, i, t, callback_kwargs): |
| for tensor_name in pipe._callback_tensor_inputs: |
| assert tensor_name in callback_kwargs |
|
|
| |
| for tensor_name, tensor_value in callback_kwargs.items(): |
| |
| assert tensor_name in pipe._callback_tensor_inputs |
|
|
| return callback_kwargs |
|
|
| inputs = self.get_dummy_inputs(torch_device) |
|
|
| |
| inputs["callback_on_step_end"] = callback_inputs_subset |
| inputs["callback_on_step_end_tensor_inputs"] = ["latents"] |
| output = pipe(**inputs)[0] |
|
|
| |
| inputs["callback_on_step_end"] = callback_inputs_all |
| inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs |
| output = pipe(**inputs)[0] |
|
|
| def callback_inputs_change_tensor(pipe, i, t, callback_kwargs): |
| is_last = i == (pipe.num_timesteps - 1) |
| if is_last: |
| callback_kwargs["latents"] = torch.zeros_like(callback_kwargs["latents"]) |
| return callback_kwargs |
|
|
| inputs["callback_on_step_end"] = callback_inputs_change_tensor |
| inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs |
| output = pipe(**inputs)[0] |
| assert output.abs().sum() < 1e10 |
|
|
| def test_inference_batch_single_identical(self): |
| self._test_inference_batch_single_identical(batch_size=3, expected_max_diff=1e-3) |
|
|
| def test_attention_slicing_forward_pass( |
| self, test_max_difference=True, test_mean_pixel_difference=True, expected_max_diff=1e-3 |
| ): |
| if not self.test_attention_slicing: |
| return |
|
|
| components = self.get_dummy_components() |
| pipe = self.pipeline_class(**components) |
| for component in pipe.components.values(): |
| if hasattr(component, "set_default_attn_processor"): |
| component.set_default_attn_processor() |
| pipe.to(torch_device) |
| pipe.set_progress_bar_config(disable=None) |
|
|
| generator_device = "cpu" |
| inputs = self.get_dummy_inputs(generator_device) |
| output_without_slicing = pipe(**inputs)[0] |
|
|
| pipe.enable_attention_slicing(slice_size=1) |
| inputs = self.get_dummy_inputs(generator_device) |
| output_with_slicing1 = pipe(**inputs)[0] |
|
|
| pipe.enable_attention_slicing(slice_size=2) |
| inputs = self.get_dummy_inputs(generator_device) |
| output_with_slicing2 = pipe(**inputs)[0] |
|
|
| if test_max_difference: |
| max_diff1 = np.abs(to_np(output_with_slicing1) - to_np(output_without_slicing)).max() |
| max_diff2 = np.abs(to_np(output_with_slicing2) - to_np(output_without_slicing)).max() |
| self.assertLess( |
| max(max_diff1, max_diff2), |
| expected_max_diff, |
| "Attention slicing should not affect the inference results", |
| ) |
|
|
| def test_vae_tiling(self, expected_diff_max: float = 0.3): |
| |
| generator_device = "cpu" |
| components = self.get_dummy_components() |
|
|
| |
| |
| |
| components["transformer"] = CogVideoXTransformer3DModel.from_config( |
| components["transformer"].config, |
| sample_height=16, |
| sample_width=16, |
| ) |
|
|
| pipe = self.pipeline_class(**components) |
| pipe.to("cpu") |
| pipe.set_progress_bar_config(disable=None) |
|
|
| |
| inputs = self.get_dummy_inputs(generator_device) |
| inputs["height"] = inputs["width"] = 128 |
| output_without_tiling = pipe(**inputs)[0] |
|
|
| |
| pipe.vae.enable_tiling( |
| tile_sample_min_height=96, |
| tile_sample_min_width=96, |
| tile_overlap_factor_height=1 / 12, |
| tile_overlap_factor_width=1 / 12, |
| ) |
| inputs = self.get_dummy_inputs(generator_device) |
| inputs["height"] = inputs["width"] = 128 |
| output_with_tiling = pipe(**inputs)[0] |
|
|
| self.assertLess( |
| (to_np(output_without_tiling) - to_np(output_with_tiling)).max(), |
| expected_diff_max, |
| "VAE tiling should not affect the inference results", |
| ) |
|
|
| def test_fused_qkv_projections(self): |
| device = "cpu" |
| components = self.get_dummy_components() |
| pipe = self.pipeline_class(**components) |
| pipe = pipe.to(device) |
| pipe.set_progress_bar_config(disable=None) |
|
|
| inputs = self.get_dummy_inputs(device) |
| frames = pipe(**inputs).frames |
| original_image_slice = frames[0, -2:, -1, -3:, -3:] |
|
|
| pipe.fuse_qkv_projections() |
| assert check_qkv_fusion_processors_exist(pipe.transformer), ( |
| "Something wrong with the fused attention processors. Expected all the attention processors to be fused." |
| ) |
| assert check_qkv_fusion_matches_attn_procs_length( |
| pipe.transformer, pipe.transformer.original_attn_processors |
| ), "Something wrong with the attention processors concerning the fused QKV projections." |
|
|
| inputs = self.get_dummy_inputs(device) |
| frames = pipe(**inputs).frames |
| image_slice_fused = frames[0, -2:, -1, -3:, -3:] |
|
|
| pipe.transformer.unfuse_qkv_projections() |
| inputs = self.get_dummy_inputs(device) |
| frames = pipe(**inputs).frames |
| image_slice_disabled = frames[0, -2:, -1, -3:, -3:] |
|
|
| assert np.allclose(original_image_slice, image_slice_fused, atol=1e-3, rtol=1e-3), ( |
| "Fusion of QKV projections shouldn't affect the outputs." |
| ) |
| assert np.allclose(image_slice_fused, image_slice_disabled, atol=1e-3, rtol=1e-3), ( |
| "Outputs, with QKV projection fusion enabled, shouldn't change when fused QKV projections are disabled." |
| ) |
| assert np.allclose(original_image_slice, image_slice_disabled, atol=1e-2, rtol=1e-2), ( |
| "Original outputs should match when fused QKV projections are disabled." |
| ) |
|
|
|
|
| @slow |
| @require_torch_accelerator |
| class CogVideoXImageToVideoPipelineIntegrationTests(unittest.TestCase): |
| prompt = "A painting of a squirrel eating a burger." |
|
|
| def setUp(self): |
| super().setUp() |
| gc.collect() |
| backend_empty_cache(torch_device) |
|
|
| def tearDown(self): |
| super().tearDown() |
| gc.collect() |
| backend_empty_cache(torch_device) |
|
|
| def test_cogvideox(self): |
| generator = torch.Generator("cpu").manual_seed(0) |
|
|
| pipe = CogVideoXImageToVideoPipeline.from_pretrained("THUDM/CogVideoX-5b-I2V", torch_dtype=torch.bfloat16) |
| pipe.enable_model_cpu_offload(device=torch_device) |
|
|
| prompt = self.prompt |
| image = load_image( |
| "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/astronaut.jpg" |
| ) |
|
|
| videos = pipe( |
| image=image, |
| prompt=prompt, |
| height=480, |
| width=720, |
| num_frames=16, |
| generator=generator, |
| num_inference_steps=2, |
| output_type="pt", |
| ).frames |
|
|
| video = videos[0] |
| expected_video = torch.randn(1, 16, 480, 720, 3).numpy() |
|
|
| max_diff = numpy_cosine_similarity_distance(video, expected_video) |
| assert max_diff < 1e-3, f"Max diff is too high. got {video}" |
|
|