============================= test session starts ============================== platform linux -- Python 3.8.10, pytest-8.2.0, pluggy-1.5.0 rootdir: /diffusers configfile: pyproject.toml plugins: xdist-3.6.1, timeout-2.3.1, requests-mock-1.10.0 collected 7 items tests/single_file/test_stable_diffusion_upscale_single_file.py ....FF. [100%] =================================== FAILURES =================================== _ StableDiffusionUpscalePipelineSingleFileSlowTests.test_single_file_components_with_original_config _ self = pipe = StableDiffusionUpscalePipeline { "_class_name": "StableDiffusionUpscalePipeline", "_diffusers_version": "0.28.0.de...DConditionModel" ], "vae": [ "diffusers", "AutoencoderKL" ], "watermarker": [ null, null ] } single_file_pipe = StableDiffusionUpscalePipeline { "_class_name": "StableDiffusionUpscalePipeline", "_diffusers_version": "0.28.0.de...DConditionModel" ], "vae": [ "diffusers", "AutoencoderKL" ], "watermarker": [ null, null ] } def test_single_file_components_with_original_config( self, pipe=None, single_file_pipe=None, ): pipe = pipe or self.pipeline_class.from_pretrained(self.repo_id, safety_checker=None) # Not possible to infer this value when original config is provided # we just pass it in here otherwise this test will fail upcast_attention = pipe.unet.config.upcast_attention single_file_pipe = single_file_pipe or self.pipeline_class.from_single_file( self.ckpt_path, original_config=self.original_config, safety_checker=None, upcast_attention=upcast_attention, ) > self._compare_component_configs(pipe, single_file_pipe) tests/single_file/single_file_testing_utils.py:127: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = pipe = StableDiffusionUpscalePipeline { "_class_name": "StableDiffusionUpscalePipeline", "_diffusers_version": "0.28.0.de...DConditionModel" ], "vae": [ "diffusers", "AutoencoderKL" ], "watermarker": [ null, null ] } single_file_pipe = StableDiffusionUpscalePipeline { "_class_name": "StableDiffusionUpscalePipeline", "_diffusers_version": "0.28.0.de...DConditionModel" ], "vae": [ "diffusers", "AutoencoderKL" ], "watermarker": [ null, null ] } def _compare_component_configs(self, pipe, single_file_pipe): for param_name, param_value in single_file_pipe.text_encoder.config.to_dict().items(): if param_name in ["torch_dtype", "architectures", "_name_or_path"]: continue assert pipe.text_encoder.config.to_dict()[param_name] == param_value PARAMS_TO_IGNORE = [ "torch_dtype", "_name_or_path", "architectures", "_use_default_values", "_diffusers_version", ] for component_name, component in single_file_pipe.components.items(): if component_name in single_file_pipe._optional_components: continue # skip testing transformer based components here # skip text encoders / safety checkers since they have already been tested if component_name in ["text_encoder", "tokenizer", "safety_checker", "feature_extractor"]: continue assert component_name in pipe.components, f"single file {component_name} not found in pretrained pipeline" assert isinstance( component, pipe.components[component_name].__class__ ), f"single file {component.__class__.__name__} and pretrained {pipe.components[component_name].__class__.__name__} are not the same" for param_name, param_value in component.config.items(): if param_name in PARAMS_TO_IGNORE: continue # Some pretrained configs will set upcast attention to None # In single file loading it defaults to the value in the class __init__ which is False if param_name == "upcast_attention" and pipe.components[component_name].config[param_name] is None: pipe.components[component_name].config[param_name] = param_value > assert ( pipe.components[component_name].config[param_name] == param_value ), f"single file {param_name}: {param_value} differs from pretrained {pipe.components[component_name].config[param_name]}" E AssertionError: single file sample_size: 512 differs from pretrained 256 tests/single_file/single_file_testing_utils.py:85: AssertionError ----------------------------- Captured stderr call ----------------------------- Loading pipeline components...: 0%| | 0/6 [00:00 pipe = StableDiffusionUpscalePipeline { "_class_name": "StableDiffusionUpscalePipeline", "_diffusers_version": "0.28.0.de...DConditionModel" ], "vae": [ "diffusers", "AutoencoderKL" ], "watermarker": [ null, null ] } single_file_pipe = StableDiffusionUpscalePipeline { "_class_name": "StableDiffusionUpscalePipeline", "_diffusers_version": "0.28.0.de...DConditionModel" ], "vae": [ "diffusers", "AutoencoderKL" ], "watermarker": [ null, null ] } def test_single_file_components_with_original_config_local_files_only( self, pipe=None, single_file_pipe=None, ): pipe = pipe or self.pipeline_class.from_pretrained(self.repo_id, safety_checker=None) # Not possible to infer this value when original config is provided # we just pass it in here otherwise this test will fail upcast_attention = pipe.unet.config.upcast_attention with tempfile.TemporaryDirectory() as tmpdir: ckpt_filename = self.ckpt_path.split("/")[-1] local_ckpt_path = download_single_file_checkpoint(self.repo_id, ckpt_filename, tmpdir) local_original_config = download_original_config(self.original_config, tmpdir) single_file_pipe = single_file_pipe or self.pipeline_class.from_single_file( local_ckpt_path, original_config=local_original_config, safety_checker=None, upcast_attention=upcast_attention, local_files_only=True, ) > self._compare_component_configs(pipe, single_file_pipe) tests/single_file/single_file_testing_utils.py:153: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = pipe = StableDiffusionUpscalePipeline { "_class_name": "StableDiffusionUpscalePipeline", "_diffusers_version": "0.28.0.de...DConditionModel" ], "vae": [ "diffusers", "AutoencoderKL" ], "watermarker": [ null, null ] } single_file_pipe = StableDiffusionUpscalePipeline { "_class_name": "StableDiffusionUpscalePipeline", "_diffusers_version": "0.28.0.de...DConditionModel" ], "vae": [ "diffusers", "AutoencoderKL" ], "watermarker": [ null, null ] } def _compare_component_configs(self, pipe, single_file_pipe): for param_name, param_value in single_file_pipe.text_encoder.config.to_dict().items(): if param_name in ["torch_dtype", "architectures", "_name_or_path"]: continue assert pipe.text_encoder.config.to_dict()[param_name] == param_value PARAMS_TO_IGNORE = [ "torch_dtype", "_name_or_path", "architectures", "_use_default_values", "_diffusers_version", ] for component_name, component in single_file_pipe.components.items(): if component_name in single_file_pipe._optional_components: continue # skip testing transformer based components here # skip text encoders / safety checkers since they have already been tested if component_name in ["text_encoder", "tokenizer", "safety_checker", "feature_extractor"]: continue assert component_name in pipe.components, f"single file {component_name} not found in pretrained pipeline" assert isinstance( component, pipe.components[component_name].__class__ ), f"single file {component.__class__.__name__} and pretrained {pipe.components[component_name].__class__.__name__} are not the same" for param_name, param_value in component.config.items(): if param_name in PARAMS_TO_IGNORE: continue # Some pretrained configs will set upcast attention to None # In single file loading it defaults to the value in the class __init__ which is False if param_name == "upcast_attention" and pipe.components[component_name].config[param_name] is None: pipe.components[component_name].config[param_name] = param_value > assert ( pipe.components[component_name].config[param_name] == param_value ), f"single file {param_name}: {param_value} differs from pretrained {pipe.components[component_name].config[param_name]}" E AssertionError: single file sample_size: 512 differs from pretrained 256 tests/single_file/single_file_testing_utils.py:85: AssertionError ----------------------------- Captured stderr call ----------------------------- Loading pipeline components...: 0%| | 0/6 [00:00