diff --git a/exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/utils/dummy_transformers_and_torch_and_note_seq_objects.py b/exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/utils/dummy_transformers_and_torch_and_note_seq_objects.py new file mode 100644 index 0000000000000000000000000000000000000000..fbde04e33f0abd86d12f3dee048a4f0585c9f19d --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/utils/dummy_transformers_and_torch_and_note_seq_objects.py @@ -0,0 +1,17 @@ +# This file is autogenerated by the command `make fix-copies`, do not edit. +from ..utils import DummyObject, requires_backends + + +class SpectrogramDiffusionPipeline(metaclass=DummyObject): + _backends = ["transformers", "torch", "note_seq"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["transformers", "torch", "note_seq"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["transformers", "torch", "note_seq"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["transformers", "torch", "note_seq"]) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/utils/dynamic_modules_utils.py b/exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/utils/dynamic_modules_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..674eb65773f0c626b9d114e26b0ede2ad1bf5878 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/utils/dynamic_modules_utils.py @@ -0,0 +1,507 @@ +# coding=utf-8 +# Copyright 2025 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Utilities to dynamically load objects from the Hub.""" + +import importlib +import inspect +import json +import os +import re +import shutil +import sys +import threading +from pathlib import Path +from types import ModuleType +from typing import Dict, Optional, Union +from urllib import request + +from huggingface_hub import hf_hub_download, model_info +from huggingface_hub.utils import RevisionNotFoundError, validate_hf_hub_args +from packaging import version + +from .. import __version__ +from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging +from .constants import DIFFUSERS_DISABLE_REMOTE_CODE + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +# See https://huggingface.co/datasets/diffusers/community-pipelines-mirror +COMMUNITY_PIPELINES_MIRROR_ID = "diffusers/community-pipelines-mirror" +TIME_OUT_REMOTE_CODE = int(os.getenv("DIFFUSERS_TIMEOUT_REMOTE_CODE", 15)) +_HF_REMOTE_CODE_LOCK = threading.Lock() + + +def get_diffusers_versions(): + url = "https://pypi.org/pypi/diffusers/json" + releases = json.loads(request.urlopen(url).read())["releases"].keys() + return sorted(releases, key=lambda x: version.Version(x)) + + +def init_hf_modules(): + """ + Creates the cache directory for modules with an init, and adds it to the Python path. + """ + # This function has already been executed if HF_MODULES_CACHE already is in the Python path. + if HF_MODULES_CACHE in sys.path: + return + + sys.path.append(HF_MODULES_CACHE) + os.makedirs(HF_MODULES_CACHE, exist_ok=True) + init_path = Path(HF_MODULES_CACHE) / "__init__.py" + if not init_path.exists(): + init_path.touch() + + +def create_dynamic_module(name: Union[str, os.PathLike]): + """ + Creates a dynamic module in the cache directory for modules. + """ + init_hf_modules() + dynamic_module_path = Path(HF_MODULES_CACHE) / name + # If the parent module does not exist yet, recursively create it. + if not dynamic_module_path.parent.exists(): + create_dynamic_module(dynamic_module_path.parent) + os.makedirs(dynamic_module_path, exist_ok=True) + init_path = dynamic_module_path / "__init__.py" + if not init_path.exists(): + init_path.touch() + + +def get_relative_imports(module_file): + """ + Get the list of modules that are relatively imported in a module file. + + Args: + module_file (`str` or `os.PathLike`): The module file to inspect. + """ + with open(module_file, "r", encoding="utf-8") as f: + content = f.read() + + # Imports of the form `import .xxx` + relative_imports = re.findall(r"^\s*import\s+\.(\S+)\s*$", content, flags=re.MULTILINE) + # Imports of the form `from .xxx import yyy` + relative_imports += re.findall(r"^\s*from\s+\.(\S+)\s+import", content, flags=re.MULTILINE) + # Unique-ify + return list(set(relative_imports)) + + +def get_relative_import_files(module_file): + """ + Get the list of all files that are needed for a given module. Note that this function recurses through the relative + imports (if a imports b and b imports c, it will return module files for b and c). + + Args: + module_file (`str` or `os.PathLike`): The module file to inspect. + """ + no_change = False + files_to_check = [module_file] + all_relative_imports = [] + + # Let's recurse through all relative imports + while not no_change: + new_imports = [] + for f in files_to_check: + new_imports.extend(get_relative_imports(f)) + + module_path = Path(module_file).parent + new_import_files = [str(module_path / m) for m in new_imports] + new_import_files = [f for f in new_import_files if f not in all_relative_imports] + files_to_check = [f"{f}.py" for f in new_import_files] + + no_change = len(new_import_files) == 0 + all_relative_imports.extend(files_to_check) + + return all_relative_imports + + +def check_imports(filename): + """ + Check if the current Python environment contains all the libraries that are imported in a file. + """ + with open(filename, "r", encoding="utf-8") as f: + content = f.read() + + # Imports of the form `import xxx` + imports = re.findall(r"^\s*import\s+(\S+)\s*$", content, flags=re.MULTILINE) + # Imports of the form `from xxx import yyy` + imports += re.findall(r"^\s*from\s+(\S+)\s+import", content, flags=re.MULTILINE) + # Only keep the top-level module + imports = [imp.split(".")[0] for imp in imports if not imp.startswith(".")] + + # Unique-ify and test we got them all + imports = list(set(imports)) + missing_packages = [] + for imp in imports: + try: + importlib.import_module(imp) + except ImportError: + missing_packages.append(imp) + + if len(missing_packages) > 0: + raise ImportError( + "This modeling file requires the following packages that were not found in your environment: " + f"{', '.join(missing_packages)}. Run `pip install {' '.join(missing_packages)}`" + ) + + return get_relative_imports(filename) + + +def resolve_trust_remote_code(trust_remote_code, model_name, has_remote_code): + trust_remote_code = trust_remote_code and not DIFFUSERS_DISABLE_REMOTE_CODE + if DIFFUSERS_DISABLE_REMOTE_CODE: + logger.warning( + "Downloading remote code is disabled globally via the DIFFUSERS_DISABLE_REMOTE_CODE environment variable. Ignoring `trust_remote_code`." + ) + + if has_remote_code and not trust_remote_code: + error_msg = f"The repository for {model_name} contains custom code. " + error_msg += ( + "Downloading remote code is disabled globally via the DIFFUSERS_DISABLE_REMOTE_CODE environment variable." + if DIFFUSERS_DISABLE_REMOTE_CODE + else "Pass `trust_remote_code=True` to allow loading remote code modules." + ) + raise ValueError(error_msg) + + elif has_remote_code and trust_remote_code: + logger.warning( + f"`trust_remote_code` is enabled. Downloading code from {model_name}. Please ensure you trust the contents of this repository" + ) + + return trust_remote_code + + +def get_class_in_module(class_name, module_path, force_reload=False): + """ + Import a module on the cache directory for modules and extract a class from it. + """ + name = os.path.normpath(module_path) + if name.endswith(".py"): + name = name[:-3] + name = name.replace(os.path.sep, ".") + module_file: Path = Path(HF_MODULES_CACHE) / module_path + + with _HF_REMOTE_CODE_LOCK: + if force_reload: + sys.modules.pop(name, None) + importlib.invalidate_caches() + cached_module: Optional[ModuleType] = sys.modules.get(name) + module_spec = importlib.util.spec_from_file_location(name, location=module_file) + + module: ModuleType + if cached_module is None: + module = importlib.util.module_from_spec(module_spec) + # insert it into sys.modules before any loading begins + sys.modules[name] = module + else: + module = cached_module + + module_spec.loader.exec_module(module) + + if class_name is None: + return find_pipeline_class(module) + + return getattr(module, class_name) + + +def find_pipeline_class(loaded_module): + """ + Retrieve pipeline class that inherits from `DiffusionPipeline`. Note that there has to be exactly one class + inheriting from `DiffusionPipeline`. + """ + from ..pipelines import DiffusionPipeline + + cls_members = dict(inspect.getmembers(loaded_module, inspect.isclass)) + + pipeline_class = None + for cls_name, cls in cls_members.items(): + if ( + cls_name != DiffusionPipeline.__name__ + and issubclass(cls, DiffusionPipeline) + and cls.__module__.split(".")[0] != "diffusers" + ): + if pipeline_class is not None: + raise ValueError( + f"Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:" + f" {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in" + f" {loaded_module}." + ) + pipeline_class = cls + + return pipeline_class + + +@validate_hf_hub_args +def get_cached_module_file( + pretrained_model_name_or_path: Union[str, os.PathLike], + module_file: str, + cache_dir: Optional[Union[str, os.PathLike]] = None, + force_download: bool = False, + proxies: Optional[Dict[str, str]] = None, + token: Optional[Union[bool, str]] = None, + revision: Optional[str] = None, + local_files_only: bool = False, +): + """ + Prepares Downloads a module from a local folder or a distant repo and returns its path inside the cached + Transformers module. + + Args: + pretrained_model_name_or_path (`str` or `os.PathLike`): + This can be either: + + - a string, the *model id* of a pretrained model configuration hosted inside a model repo on + huggingface.co. Valid model ids can be located at the root-level, like `bert-base-uncased`, or namespaced + under a user or organization name, like `dbmdz/bert-base-german-cased`. + - a path to a *directory* containing a configuration file saved using the + [`~PreTrainedTokenizer.save_pretrained`] method, e.g., `./my_model_directory/`. + + module_file (`str`): + The name of the module file containing the class to look for. + cache_dir (`str` or `os.PathLike`, *optional*): + Path to a directory in which a downloaded pretrained model configuration should be cached if the standard + cache should not be used. + force_download (`bool`, *optional*, defaults to `False`): + Whether or not to force to (re-)download the configuration files and override the cached versions if they + exist. + proxies (`Dict[str, str]`, *optional*): + A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', + 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request. + token (`str` or *bool*, *optional*): + The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated + when running `transformers-cli login` (stored in `~/.huggingface`). + revision (`str`, *optional*, defaults to `"main"`): + The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a + git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any + identifier allowed by git. + local_files_only (`bool`, *optional*, defaults to `False`): + If `True`, will only try to load the tokenizer configuration from local files. + + + + You may pass a token in `token` if you are not logged in (`hf auth login`) and want to use private or [gated + models](https://huggingface.co/docs/hub/models-gated#gated-models). + + + + Returns: + `str`: The path to the module inside the cache. + """ + # Download and cache module_file from the repo `pretrained_model_name_or_path` of grab it if it's a local file. + pretrained_model_name_or_path = str(pretrained_model_name_or_path) + + module_file_or_url = os.path.join(pretrained_model_name_or_path, module_file) + + if os.path.isfile(module_file_or_url): + resolved_module_file = module_file_or_url + submodule = "local" + elif pretrained_model_name_or_path.count("/") == 0: + available_versions = get_diffusers_versions() + # cut ".dev0" + latest_version = "v" + ".".join(__version__.split(".")[:3]) + + # retrieve github version that matches + if revision is None: + revision = latest_version if latest_version[1:] in available_versions else "main" + logger.info(f"Defaulting to latest_version: {revision}.") + elif revision in available_versions: + revision = f"v{revision}" + elif revision == "main": + revision = revision + else: + raise ValueError( + f"`custom_revision`: {revision} does not exist. Please make sure to choose one of" + f" {', '.join(available_versions + ['main'])}." + ) + + try: + resolved_module_file = hf_hub_download( + repo_id=COMMUNITY_PIPELINES_MIRROR_ID, + repo_type="dataset", + filename=f"{revision}/{pretrained_model_name_or_path}.py", + cache_dir=cache_dir, + force_download=force_download, + proxies=proxies, + local_files_only=local_files_only, + ) + submodule = "git" + module_file = pretrained_model_name_or_path + ".py" + except RevisionNotFoundError as e: + raise EnvironmentError( + f"Revision '{revision}' not found in the community pipelines mirror. Check available revisions on" + " https://huggingface.co/datasets/diffusers/community-pipelines-mirror/tree/main." + " If you don't find the revision you are looking for, please open an issue on https://github.com/huggingface/diffusers/issues." + ) from e + except EnvironmentError: + logger.error(f"Could not locate the {module_file} inside {pretrained_model_name_or_path}.") + raise + else: + try: + # Load from URL or cache if already cached + resolved_module_file = hf_hub_download( + pretrained_model_name_or_path, + module_file, + cache_dir=cache_dir, + force_download=force_download, + proxies=proxies, + local_files_only=local_files_only, + token=token, + ) + submodule = os.path.join("local", "--".join(pretrained_model_name_or_path.split("/"))) + except EnvironmentError: + logger.error(f"Could not locate the {module_file} inside {pretrained_model_name_or_path}.") + raise + + # Check we have all the requirements in our environment + modules_needed = check_imports(resolved_module_file) + + # Now we move the module inside our cached dynamic modules. + full_submodule = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule + create_dynamic_module(full_submodule) + submodule_path = Path(HF_MODULES_CACHE) / full_submodule + if submodule == "local" or submodule == "git": + # We always copy local files (we could hash the file to see if there was a change, and give them the name of + # that hash, to only copy when there is a modification but it seems overkill for now). + # The only reason we do the copy is to avoid putting too many folders in sys.path. + shutil.copyfile(resolved_module_file, submodule_path / module_file) + for module_needed in modules_needed: + if len(module_needed.split(".")) == 2: + module_needed = "/".join(module_needed.split(".")) + module_folder = module_needed.split("/")[0] + if not os.path.exists(submodule_path / module_folder): + os.makedirs(submodule_path / module_folder) + module_needed = f"{module_needed}.py" + shutil.copyfile(os.path.join(pretrained_model_name_or_path, module_needed), submodule_path / module_needed) + else: + # Get the commit hash + # TODO: we will get this info in the etag soon, so retrieve it from there and not here. + commit_hash = model_info(pretrained_model_name_or_path, revision=revision, token=token).sha + + # The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the + # benefit of versioning. + submodule_path = submodule_path / commit_hash + full_submodule = full_submodule + os.path.sep + commit_hash + create_dynamic_module(full_submodule) + + if not (submodule_path / module_file).exists(): + if len(module_file.split("/")) == 2: + module_folder = module_file.split("/")[0] + if not os.path.exists(submodule_path / module_folder): + os.makedirs(submodule_path / module_folder) + shutil.copyfile(resolved_module_file, submodule_path / module_file) + + # Make sure we also have every file with relative + for module_needed in modules_needed: + if len(module_needed.split(".")) == 2: + module_needed = "/".join(module_needed.split(".")) + if not (submodule_path / module_needed).exists(): + get_cached_module_file( + pretrained_model_name_or_path, + f"{module_needed}.py", + cache_dir=cache_dir, + force_download=force_download, + proxies=proxies, + token=token, + revision=revision, + local_files_only=local_files_only, + ) + return os.path.join(full_submodule, module_file) + + +@validate_hf_hub_args +def get_class_from_dynamic_module( + pretrained_model_name_or_path: Union[str, os.PathLike], + module_file: str, + class_name: Optional[str] = None, + cache_dir: Optional[Union[str, os.PathLike]] = None, + force_download: bool = False, + proxies: Optional[Dict[str, str]] = None, + token: Optional[Union[bool, str]] = None, + revision: Optional[str] = None, + local_files_only: bool = False, + **kwargs, +): + """ + Extracts a class from a module file, present in the local folder or repository of a model. + + + + Calling this function will execute the code in the module file found locally or downloaded from the Hub. It should + therefore only be called on trusted repos. + + + + Args: + pretrained_model_name_or_path (`str` or `os.PathLike`): + This can be either: + + - a string, the *model id* of a pretrained model configuration hosted inside a model repo on + huggingface.co. Valid model ids can be located at the root-level, like `bert-base-uncased`, or namespaced + under a user or organization name, like `dbmdz/bert-base-german-cased`. + - a path to a *directory* containing a configuration file saved using the + [`~PreTrainedTokenizer.save_pretrained`] method, e.g., `./my_model_directory/`. + + module_file (`str`): + The name of the module file containing the class to look for. + class_name (`str`): + The name of the class to import in the module. + cache_dir (`str` or `os.PathLike`, *optional*): + Path to a directory in which a downloaded pretrained model configuration should be cached if the standard + cache should not be used. + force_download (`bool`, *optional*, defaults to `False`): + Whether or not to force to (re-)download the configuration files and override the cached versions if they + exist. + proxies (`Dict[str, str]`, *optional*): + A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', + 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request. + token (`str` or `bool`, *optional*): + The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated + when running `transformers-cli login` (stored in `~/.huggingface`). + revision (`str`, *optional*, defaults to `"main"`): + The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a + git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any + identifier allowed by git. + local_files_only (`bool`, *optional*, defaults to `False`): + If `True`, will only try to load the tokenizer configuration from local files. + + + + You may pass a token in `token` if you are not logged in (`hf auth login`) and want to use private or [gated + models](https://huggingface.co/docs/hub/models-gated#gated-models). + + + + Returns: + `type`: The class, dynamically imported from the module. + + Examples: + + ```python + # Download module `modeling.py` from huggingface.co and cache then extract the class `MyBertModel` from this + # module. + cls = get_class_from_dynamic_module("sgugger/my-bert-model", "modeling.py", "MyBertModel") + ```""" + # And lastly we get the class inside our newly created module + final_module = get_cached_module_file( + pretrained_model_name_or_path, + module_file, + cache_dir=cache_dir, + force_download=force_download, + proxies=proxies, + token=token, + revision=revision, + local_files_only=local_files_only, + ) + return get_class_in_module(class_name, final_module) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/utils/export_utils.py b/exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/utils/export_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..07cf46928a44df0b27337e5d007c6a9bdb0e7c28 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/utils/export_utils.py @@ -0,0 +1,209 @@ +import io +import random +import struct +import tempfile +from contextlib import contextmanager +from typing import List, Optional, Union + +import numpy as np +import PIL.Image +import PIL.ImageOps + +from .import_utils import BACKENDS_MAPPING, is_imageio_available, is_opencv_available +from .logging import get_logger + + +global_rng = random.Random() + +logger = get_logger(__name__) + + +@contextmanager +def buffered_writer(raw_f): + f = io.BufferedWriter(raw_f) + yield f + f.flush() + + +def export_to_gif(image: List[PIL.Image.Image], output_gif_path: str = None, fps: int = 10) -> str: + if output_gif_path is None: + output_gif_path = tempfile.NamedTemporaryFile(suffix=".gif").name + + image[0].save( + output_gif_path, + save_all=True, + append_images=image[1:], + optimize=False, + duration=1000 // fps, + loop=0, + ) + return output_gif_path + + +def export_to_ply(mesh, output_ply_path: str = None): + """ + Write a PLY file for a mesh. + """ + if output_ply_path is None: + output_ply_path = tempfile.NamedTemporaryFile(suffix=".ply").name + + coords = mesh.verts.detach().cpu().numpy() + faces = mesh.faces.cpu().numpy() + rgb = np.stack([mesh.vertex_channels[x].detach().cpu().numpy() for x in "RGB"], axis=1) + + with buffered_writer(open(output_ply_path, "wb")) as f: + f.write(b"ply\n") + f.write(b"format binary_little_endian 1.0\n") + f.write(bytes(f"element vertex {len(coords)}\n", "ascii")) + f.write(b"property float x\n") + f.write(b"property float y\n") + f.write(b"property float z\n") + if rgb is not None: + f.write(b"property uchar red\n") + f.write(b"property uchar green\n") + f.write(b"property uchar blue\n") + if faces is not None: + f.write(bytes(f"element face {len(faces)}\n", "ascii")) + f.write(b"property list uchar int vertex_index\n") + f.write(b"end_header\n") + + if rgb is not None: + rgb = (rgb * 255.499).round().astype(int) + vertices = [ + (*coord, *rgb) + for coord, rgb in zip( + coords.tolist(), + rgb.tolist(), + ) + ] + format = struct.Struct("<3f3B") + for item in vertices: + f.write(format.pack(*item)) + else: + format = struct.Struct("<3f") + for vertex in coords.tolist(): + f.write(format.pack(*vertex)) + + if faces is not None: + format = struct.Struct(" str: + """ + quality: + Video output quality. Default is 5. Uses variable bit rate. Highest quality is 10, lowest is 0. Set to None to + prevent variable bitrate flags to FFMPEG so you can manually specify them using output_params instead. + Specifying a fixed bitrate using `bitrate` disables this parameter. + + bitrate: + Set a constant bitrate for the video encoding. Default is None causing `quality` parameter to be used instead. + Better quality videos with smaller file sizes will result from using the `quality` variable bitrate parameter + rather than specifying a fixed bitrate with this parameter. + + macro_block_size: + Size constraint for video. Width and height, must be divisible by this number. If not divisible by this number + imageio will tell ffmpeg to scale the image up to the next closest size divisible by this number. Most codecs + are compatible with a macroblock size of 16 (default), some can go smaller (4, 8). To disable this automatic + feature set it to None or 1, however be warned many players can't decode videos that are odd in size and some + codecs will produce poor results or fail. See https://en.wikipedia.org/wiki/Macroblock. + """ + # TODO: Dhruv. Remove by Diffusers release 0.33.0 + # Added to prevent breaking existing code + if not is_imageio_available(): + logger.warning( + ( + "It is recommended to use `export_to_video` with `imageio` and `imageio-ffmpeg` as a backend. \n" + "These libraries are not present in your environment. Attempting to use legacy OpenCV backend to export video. \n" + "Support for the OpenCV backend will be deprecated in a future Diffusers version" + ) + ) + return _legacy_export_to_video(video_frames, output_video_path, fps) + + if is_imageio_available(): + import imageio + else: + raise ImportError(BACKENDS_MAPPING["imageio"][1].format("export_to_video")) + + try: + imageio.plugins.ffmpeg.get_exe() + except AttributeError: + raise AttributeError( + ( + "Found an existing imageio backend in your environment. Attempting to export video with imageio. \n" + "Unable to find a compatible ffmpeg installation in your environment to use with imageio. Please install via `pip install imageio-ffmpeg" + ) + ) + + if output_video_path is None: + output_video_path = tempfile.NamedTemporaryFile(suffix=".mp4").name + + if isinstance(video_frames[0], np.ndarray): + video_frames = [(frame * 255).astype(np.uint8) for frame in video_frames] + + elif isinstance(video_frames[0], PIL.Image.Image): + video_frames = [np.array(frame) for frame in video_frames] + + with imageio.get_writer( + output_video_path, fps=fps, quality=quality, bitrate=bitrate, macro_block_size=macro_block_size + ) as writer: + for frame in video_frames: + writer.append_data(frame) + + return output_video_path diff --git a/exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/utils/hub_utils.py b/exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/utils/hub_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..fcdf49156a8f21f17f5019fda41a6155edc5ca76 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/utils/hub_utils.py @@ -0,0 +1,573 @@ +# coding=utf-8 +# Copyright 2025 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import json +import os +import re +import sys +import tempfile +import warnings +from pathlib import Path +from typing import Dict, List, Optional, Union +from uuid import uuid4 + +from huggingface_hub import ( + DDUFEntry, + ModelCard, + ModelCardData, + create_repo, + hf_hub_download, + model_info, + snapshot_download, + upload_folder, +) +from huggingface_hub.constants import HF_HUB_DISABLE_TELEMETRY, HF_HUB_OFFLINE +from huggingface_hub.file_download import REGEX_COMMIT_HASH +from huggingface_hub.utils import ( + EntryNotFoundError, + RepositoryNotFoundError, + RevisionNotFoundError, + is_jinja_available, + validate_hf_hub_args, +) +from packaging import version +from requests import HTTPError + +from .. import __version__ +from .constants import ( + DEPRECATED_REVISION_ARGS, + HUGGINGFACE_CO_RESOLVE_ENDPOINT, + SAFETENSORS_WEIGHTS_NAME, + WEIGHTS_NAME, +) +from .import_utils import ( + ENV_VARS_TRUE_VALUES, + _flax_version, + _jax_version, + _onnxruntime_version, + _torch_version, + is_flax_available, + is_onnx_available, + is_torch_available, +) +from .logging import get_logger + + +logger = get_logger(__name__) + +MODEL_CARD_TEMPLATE_PATH = Path(__file__).parent / "model_card_template.md" +SESSION_ID = uuid4().hex + + +def http_user_agent(user_agent: Union[Dict, str, None] = None) -> str: + """ + Formats a user-agent string with basic info about a request. + """ + ua = f"diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}" + if HF_HUB_DISABLE_TELEMETRY or HF_HUB_OFFLINE: + return ua + "; telemetry/off" + if is_torch_available(): + ua += f"; torch/{_torch_version}" + if is_flax_available(): + ua += f"; jax/{_jax_version}" + ua += f"; flax/{_flax_version}" + if is_onnx_available(): + ua += f"; onnxruntime/{_onnxruntime_version}" + # CI will set this value to True + if os.environ.get("DIFFUSERS_IS_CI", "").upper() in ENV_VARS_TRUE_VALUES: + ua += "; is_ci/true" + if isinstance(user_agent, dict): + ua += "; " + "; ".join(f"{k}/{v}" for k, v in user_agent.items()) + elif isinstance(user_agent, str): + ua += "; " + user_agent + return ua + + +def load_or_create_model_card( + repo_id_or_path: str = None, + token: Optional[str] = None, + is_pipeline: bool = False, + from_training: bool = False, + model_description: Optional[str] = None, + base_model: str = None, + prompt: Optional[str] = None, + license: Optional[str] = None, + widget: Optional[List[dict]] = None, + inference: Optional[bool] = None, +) -> ModelCard: + """ + Loads or creates a model card. + + Args: + repo_id_or_path (`str`): + The repo id (e.g., "runwayml/stable-diffusion-v1-5") or local path where to look for the model card. + token (`str`, *optional*): + Authentication token. Will default to the stored token. See https://huggingface.co/settings/token for more + details. + is_pipeline (`bool`): + Boolean to indicate if we're adding tag to a [`DiffusionPipeline`]. + from_training: (`bool`): Boolean flag to denote if the model card is being created from a training script. + model_description (`str`, *optional*): Model description to add to the model card. Helpful when using + `load_or_create_model_card` from a training script. + base_model (`str`): Base model identifier (e.g., "stabilityai/stable-diffusion-xl-base-1.0"). Useful + for DreamBooth-like training. + prompt (`str`, *optional*): Prompt used for training. Useful for DreamBooth-like training. + license: (`str`, *optional*): License of the output artifact. Helpful when using + `load_or_create_model_card` from a training script. + widget (`List[dict]`, *optional*): Widget to accompany a gallery template. + inference: (`bool`, optional): Whether to turn on inference widget. Helpful when using + `load_or_create_model_card` from a training script. + """ + if not is_jinja_available(): + raise ValueError( + "Modelcard rendering is based on Jinja templates." + " Please make sure to have `jinja` installed before using `load_or_create_model_card`." + " To install it, please run `pip install Jinja2`." + ) + + try: + # Check if the model card is present on the remote repo + model_card = ModelCard.load(repo_id_or_path, token=token) + except (EntryNotFoundError, RepositoryNotFoundError): + # Otherwise create a model card from template + if from_training: + model_card = ModelCard.from_template( + card_data=ModelCardData( # Card metadata object that will be converted to YAML block + license=license, + library_name="diffusers", + inference=inference, + base_model=base_model, + instance_prompt=prompt, + widget=widget, + ), + template_path=MODEL_CARD_TEMPLATE_PATH, + model_description=model_description, + ) + else: + card_data = ModelCardData() + component = "pipeline" if is_pipeline else "model" + if model_description is None: + model_description = f"This is the model card of a 🧨 diffusers {component} that has been pushed on the Hub. This model card has been automatically generated." + model_card = ModelCard.from_template(card_data, model_description=model_description) + + return model_card + + +def populate_model_card(model_card: ModelCard, tags: Union[str, List[str]] = None) -> ModelCard: + """Populates the `model_card` with library name and optional tags.""" + if model_card.data.library_name is None: + model_card.data.library_name = "diffusers" + + if tags is not None: + if isinstance(tags, str): + tags = [tags] + if model_card.data.tags is None: + model_card.data.tags = [] + for tag in tags: + model_card.data.tags.append(tag) + + return model_card + + +def extract_commit_hash(resolved_file: Optional[str], commit_hash: Optional[str] = None): + """ + Extracts the commit hash from a resolved filename toward a cache file. + """ + if resolved_file is None or commit_hash is not None: + return commit_hash + resolved_file = str(Path(resolved_file).as_posix()) + search = re.search(r"snapshots/([^/]+)/", resolved_file) + if search is None: + return None + commit_hash = search.groups()[0] + return commit_hash if REGEX_COMMIT_HASH.match(commit_hash) else None + + +def _add_variant(weights_name: str, variant: Optional[str] = None) -> str: + if variant is not None: + splits = weights_name.split(".") + splits = splits[:-1] + [variant] + splits[-1:] + weights_name = ".".join(splits) + + return weights_name + + +@validate_hf_hub_args +def _get_model_file( + pretrained_model_name_or_path: Union[str, Path], + *, + weights_name: str, + subfolder: Optional[str] = None, + cache_dir: Optional[str] = None, + force_download: bool = False, + proxies: Optional[Dict] = None, + local_files_only: bool = False, + token: Optional[str] = None, + user_agent: Optional[Union[Dict, str]] = None, + revision: Optional[str] = None, + commit_hash: Optional[str] = None, + dduf_entries: Optional[Dict[str, DDUFEntry]] = None, +): + pretrained_model_name_or_path = str(pretrained_model_name_or_path) + + if dduf_entries: + if subfolder is not None: + raise ValueError( + "DDUF file only allow for 1 level of directory (e.g transformer/model1/model.safetentors is not allowed). " + "Please check the DDUF structure" + ) + model_file = ( + weights_name + if pretrained_model_name_or_path == "" + else "/".join([pretrained_model_name_or_path, weights_name]) + ) + if model_file in dduf_entries: + return model_file + else: + raise EnvironmentError(f"Error no file named {weights_name} found in archive {dduf_entries.keys()}.") + elif os.path.isfile(pretrained_model_name_or_path): + return pretrained_model_name_or_path + elif os.path.isdir(pretrained_model_name_or_path): + if os.path.isfile(os.path.join(pretrained_model_name_or_path, weights_name)): + # Load from a PyTorch checkpoint + model_file = os.path.join(pretrained_model_name_or_path, weights_name) + return model_file + elif subfolder is not None and os.path.isfile( + os.path.join(pretrained_model_name_or_path, subfolder, weights_name) + ): + model_file = os.path.join(pretrained_model_name_or_path, subfolder, weights_name) + return model_file + else: + raise EnvironmentError( + f"Error no file named {weights_name} found in directory {pretrained_model_name_or_path}." + ) + else: + # 1. First check if deprecated way of loading from branches is used + if ( + revision in DEPRECATED_REVISION_ARGS + and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME) + and version.parse(version.parse(__version__).base_version) >= version.parse("0.22.0") + ): + try: + model_file = hf_hub_download( + pretrained_model_name_or_path, + filename=_add_variant(weights_name, revision), + cache_dir=cache_dir, + force_download=force_download, + proxies=proxies, + local_files_only=local_files_only, + token=token, + user_agent=user_agent, + subfolder=subfolder, + revision=revision or commit_hash, + ) + warnings.warn( + f"Loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'` is deprecated. Loading instead from `revision='main'` with `variant={revision}`. Loading model variants via `revision='{revision}'` will be removed in diffusers v1. Please use `variant='{revision}'` instead.", + FutureWarning, + ) + return model_file + except: # noqa: E722 + warnings.warn( + f"You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant='{revision}'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(weights_name, revision)} file in the 'main' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title '{pretrained_model_name_or_path} is missing {_add_variant(weights_name, revision)}' so that the correct variant file can be added.", + FutureWarning, + ) + try: + # 2. Load model file as usual + model_file = hf_hub_download( + pretrained_model_name_or_path, + filename=weights_name, + cache_dir=cache_dir, + force_download=force_download, + proxies=proxies, + local_files_only=local_files_only, + token=token, + user_agent=user_agent, + subfolder=subfolder, + revision=revision or commit_hash, + ) + return model_file + + except RepositoryNotFoundError as e: + raise EnvironmentError( + f"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier " + "listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a " + "token having permission to this repo with `token` or log in with `hf auth login`." + ) from e + except RevisionNotFoundError as e: + raise EnvironmentError( + f"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for " + "this model name. Check the model page at " + f"'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions." + ) from e + except EntryNotFoundError as e: + raise EnvironmentError( + f"{pretrained_model_name_or_path} does not appear to have a file named {weights_name}." + ) from e + except HTTPError as e: + raise EnvironmentError( + f"There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{e}" + ) from e + except ValueError as e: + raise EnvironmentError( + f"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it" + f" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a" + f" directory containing a file named {weights_name} or" + " \nCheckout your internet connection or see how to run the library in" + " offline mode at 'https://huggingface.co/docs/diffusers/installation#offline-mode'." + ) from e + except EnvironmentError as e: + raise EnvironmentError( + f"Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from " + "'https://huggingface.co/models', make sure you don't have a local directory with the same name. " + f"Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory " + f"containing a file named {weights_name}" + ) from e + + +def _get_checkpoint_shard_files( + pretrained_model_name_or_path, + index_filename, + cache_dir=None, + proxies=None, + local_files_only=False, + token=None, + user_agent=None, + revision=None, + subfolder="", + dduf_entries: Optional[Dict[str, DDUFEntry]] = None, +): + """ + For a given model: + + - download and cache all the shards of a sharded checkpoint if `pretrained_model_name_or_path` is a model ID on the + Hub + - returns the list of paths to all the shards, as well as some metadata. + + For the description of each arg, see [`PreTrainedModel.from_pretrained`]. `index_filename` is the full path to the + index (downloaded and cached if `pretrained_model_name_or_path` is a model ID on the Hub). + """ + if dduf_entries: + if index_filename not in dduf_entries: + raise ValueError(f"Can't find a checkpoint index ({index_filename}) in {pretrained_model_name_or_path}.") + else: + if not os.path.isfile(index_filename): + raise ValueError(f"Can't find a checkpoint index ({index_filename}) in {pretrained_model_name_or_path}.") + + if dduf_entries: + index = json.loads(dduf_entries[index_filename].read_text()) + else: + with open(index_filename, "r") as f: + index = json.loads(f.read()) + + original_shard_filenames = sorted(set(index["weight_map"].values())) + sharded_metadata = index["metadata"] + sharded_metadata["all_checkpoint_keys"] = list(index["weight_map"].keys()) + sharded_metadata["weight_map"] = index["weight_map"].copy() + shards_path = os.path.join(pretrained_model_name_or_path, subfolder) + + # First, let's deal with local folder. + if os.path.isdir(pretrained_model_name_or_path) or dduf_entries: + shard_filenames = [os.path.join(shards_path, f) for f in original_shard_filenames] + for shard_file in shard_filenames: + if dduf_entries: + if shard_file not in dduf_entries: + raise FileNotFoundError( + f"{shards_path} does not appear to have a file named {shard_file} which is " + "required according to the checkpoint index." + ) + else: + if not os.path.exists(shard_file): + raise FileNotFoundError( + f"{shards_path} does not appear to have a file named {shard_file} which is " + "required according to the checkpoint index." + ) + return shard_filenames, sharded_metadata + + # At this stage pretrained_model_name_or_path is a model identifier on the Hub + allow_patterns = original_shard_filenames + if subfolder is not None: + allow_patterns = [os.path.join(subfolder, p) for p in allow_patterns] + + ignore_patterns = ["*.json", "*.md"] + + # If the repo doesn't have the required shards, error out early even before downloading anything. + if not local_files_only: + model_files_info = model_info(pretrained_model_name_or_path, revision=revision, token=token) + for shard_file in original_shard_filenames: + shard_file_present = any(shard_file in k.rfilename for k in model_files_info.siblings) + if not shard_file_present: + raise EnvironmentError( + f"{shards_path} does not appear to have a file named {shard_file} which is " + "required according to the checkpoint index." + ) + + try: + # Load from URL + cached_folder = snapshot_download( + pretrained_model_name_or_path, + cache_dir=cache_dir, + proxies=proxies, + local_files_only=local_files_only, + token=token, + revision=revision, + allow_patterns=allow_patterns, + ignore_patterns=ignore_patterns, + user_agent=user_agent, + ) + if subfolder is not None: + cached_folder = os.path.join(cached_folder, subfolder) + + # We have already dealt with RepositoryNotFoundError and RevisionNotFoundError when getting the index, so + # we don't have to catch them here. We have also dealt with EntryNotFoundError. + except HTTPError as e: + raise EnvironmentError( + f"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load {pretrained_model_name_or_path}. You should try" + " again after checking your internet connection." + ) from e + + cached_filenames = [os.path.join(cached_folder, f) for f in original_shard_filenames] + for cached_file in cached_filenames: + if not os.path.isfile(cached_file): + raise EnvironmentError( + f"{cached_folder} does not have a file named {cached_file} which is required according to the checkpoint index." + ) + + return cached_filenames, sharded_metadata + + +def _check_legacy_sharding_variant_format(folder: str = None, filenames: List[str] = None, variant: str = None): + if filenames and folder: + raise ValueError("Both `filenames` and `folder` cannot be provided.") + if not filenames: + filenames = [] + for _, _, files in os.walk(folder): + for file in files: + filenames.append(os.path.basename(file)) + transformers_index_format = r"\d{5}-of-\d{5}" + variant_file_re = re.compile(rf".*-{transformers_index_format}\.{variant}\.[a-z]+$") + return any(variant_file_re.match(f) is not None for f in filenames) + + +class PushToHubMixin: + """ + A Mixin to push a model, scheduler, or pipeline to the Hugging Face Hub. + """ + + def _upload_folder( + self, + working_dir: Union[str, os.PathLike], + repo_id: str, + token: Optional[str] = None, + commit_message: Optional[str] = None, + create_pr: bool = False, + subfolder: Optional[str] = None, + ): + """ + Uploads all files in `working_dir` to `repo_id`. + """ + if commit_message is None: + if "Model" in self.__class__.__name__: + commit_message = "Upload model" + elif "Scheduler" in self.__class__.__name__: + commit_message = "Upload scheduler" + else: + commit_message = f"Upload {self.__class__.__name__}" + + logger.info(f"Uploading the files of {working_dir} to {repo_id}.") + return upload_folder( + repo_id=repo_id, + folder_path=working_dir, + token=token, + commit_message=commit_message, + create_pr=create_pr, + path_in_repo=subfolder, + ) + + def push_to_hub( + self, + repo_id: str, + commit_message: Optional[str] = None, + private: Optional[bool] = None, + token: Optional[str] = None, + create_pr: bool = False, + safe_serialization: bool = True, + variant: Optional[str] = None, + subfolder: Optional[str] = None, + ) -> str: + """ + Upload model, scheduler, or pipeline files to the 🤗 Hugging Face Hub. + + Parameters: + repo_id (`str`): + The name of the repository you want to push your model, scheduler, or pipeline files to. It should + contain your organization name when pushing to an organization. `repo_id` can also be a path to a local + directory. + commit_message (`str`, *optional*): + Message to commit while pushing. Default to `"Upload {object}"`. + private (`bool`, *optional*): + Whether to make the repo private. If `None` (default), the repo will be public unless the + organization's default is private. This value is ignored if the repo already exists. + token (`str`, *optional*): + The token to use as HTTP bearer authorization for remote files. The token generated when running `hf + auth login` (stored in `~/.huggingface`). + create_pr (`bool`, *optional*, defaults to `False`): + Whether or not to create a PR with the uploaded files or directly commit. + safe_serialization (`bool`, *optional*, defaults to `True`): + Whether or not to convert the model weights to the `safetensors` format. + variant (`str`, *optional*): + If specified, weights are saved in the format `pytorch_model..bin`. + + Examples: + + ```python + from diffusers import UNet2DConditionModel + + unet = UNet2DConditionModel.from_pretrained("stabilityai/stable-diffusion-2", subfolder="unet") + + # Push the `unet` to your namespace with the name "my-finetuned-unet". + unet.push_to_hub("my-finetuned-unet") + + # Push the `unet` to an organization with the name "my-finetuned-unet". + unet.push_to_hub("your-org/my-finetuned-unet") + ``` + """ + repo_id = create_repo(repo_id, private=private, token=token, exist_ok=True).repo_id + + # Create a new empty model card and eventually tag it + if not subfolder: + model_card = load_or_create_model_card(repo_id, token=token) + model_card = populate_model_card(model_card) + + # Save all files. + save_kwargs = {"safe_serialization": safe_serialization} + if "Scheduler" not in self.__class__.__name__: + save_kwargs.update({"variant": variant}) + + with tempfile.TemporaryDirectory() as tmpdir: + self.save_pretrained(tmpdir, **save_kwargs) + + # Update model card if needed: + if not subfolder: + model_card.save(os.path.join(tmpdir, "README.md")) + + return self._upload_folder( + tmpdir, + repo_id, + token=token, + commit_message=commit_message, + create_pr=create_pr, + subfolder=subfolder, + ) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/utils/import_utils.py b/exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/utils/import_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..153be057381d8a3781fd4796ae305c14fd04698b --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/utils/import_utils.py @@ -0,0 +1,962 @@ +# Copyright 2025 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Import utilities: Utilities related to imports and our lazy inits. +""" + +import importlib.util +import inspect +import operator as op +import os +import sys +from collections import OrderedDict, defaultdict +from itertools import chain +from types import ModuleType +from typing import Any, Tuple, Union + +from huggingface_hub.utils import is_jinja_available # noqa: F401 +from packaging.version import Version, parse + +from . import logging + + +# The package importlib_metadata is in a different place, depending on the python version. +if sys.version_info < (3, 8): + import importlib_metadata +else: + import importlib.metadata as importlib_metadata +try: + _package_map = importlib_metadata.packages_distributions() # load-once to avoid expensive calls +except Exception: + _package_map = None + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +ENV_VARS_TRUE_VALUES = {"1", "ON", "YES", "TRUE"} +ENV_VARS_TRUE_AND_AUTO_VALUES = ENV_VARS_TRUE_VALUES.union({"AUTO"}) + +USE_TF = os.environ.get("USE_TF", "AUTO").upper() +USE_TORCH = os.environ.get("USE_TORCH", "AUTO").upper() +USE_JAX = os.environ.get("USE_FLAX", "AUTO").upper() +USE_SAFETENSORS = os.environ.get("USE_SAFETENSORS", "AUTO").upper() +DIFFUSERS_SLOW_IMPORT = os.environ.get("DIFFUSERS_SLOW_IMPORT", "FALSE").upper() +DIFFUSERS_SLOW_IMPORT = DIFFUSERS_SLOW_IMPORT in ENV_VARS_TRUE_VALUES + +STR_OPERATION_TO_FUNC = {">": op.gt, ">=": op.ge, "==": op.eq, "!=": op.ne, "<=": op.le, "<": op.lt} + +_is_google_colab = "google.colab" in sys.modules or any(k.startswith("COLAB_") for k in os.environ) + + +def _is_package_available(pkg_name: str, get_dist_name: bool = False) -> Tuple[bool, str]: + global _package_map + pkg_exists = importlib.util.find_spec(pkg_name) is not None + pkg_version = "N/A" + + if pkg_exists: + if _package_map is None: + _package_map = defaultdict(list) + try: + # Fallback for Python < 3.10 + for dist in importlib_metadata.distributions(): + _top_level_declared = (dist.read_text("top_level.txt") or "").split() + # Infer top-level package names from file structure + _inferred_opt_names = { + f.parts[0] if len(f.parts) > 1 else inspect.getmodulename(f) for f in (dist.files or []) + } - {None} + _top_level_inferred = filter(lambda name: "." not in name, _inferred_opt_names) + for pkg in _top_level_declared or _top_level_inferred: + _package_map[pkg].append(dist.metadata["Name"]) + except Exception as _: + pass + try: + if get_dist_name and pkg_name in _package_map and _package_map[pkg_name]: + if len(_package_map[pkg_name]) > 1: + logger.warning( + f"Multiple distributions found for package {pkg_name}. Picked distribution: {_package_map[pkg_name][0]}" + ) + pkg_name = _package_map[pkg_name][0] + pkg_version = importlib_metadata.version(pkg_name) + logger.debug(f"Successfully imported {pkg_name} version {pkg_version}") + except (ImportError, importlib_metadata.PackageNotFoundError): + pkg_exists = False + + return pkg_exists, pkg_version + + +if USE_TORCH in ENV_VARS_TRUE_AND_AUTO_VALUES and USE_TF not in ENV_VARS_TRUE_VALUES: + _torch_available, _torch_version = _is_package_available("torch") + +else: + logger.info("Disabling PyTorch because USE_TORCH is set") + _torch_available = False + _torch_version = "N/A" + +_jax_version = "N/A" +_flax_version = "N/A" +if USE_JAX in ENV_VARS_TRUE_AND_AUTO_VALUES: + _flax_available = importlib.util.find_spec("jax") is not None and importlib.util.find_spec("flax") is not None + if _flax_available: + try: + _jax_version = importlib_metadata.version("jax") + _flax_version = importlib_metadata.version("flax") + logger.info(f"JAX version {_jax_version}, Flax version {_flax_version} available.") + except importlib_metadata.PackageNotFoundError: + _flax_available = False +else: + _flax_available = False + +if USE_SAFETENSORS in ENV_VARS_TRUE_AND_AUTO_VALUES: + _safetensors_available, _safetensors_version = _is_package_available("safetensors") + +else: + logger.info("Disabling Safetensors because USE_SAFETENSORS is set") + _safetensors_available = False + +_onnxruntime_version = "N/A" +_onnx_available = importlib.util.find_spec("onnxruntime") is not None +if _onnx_available: + candidates = ( + "onnxruntime", + "onnxruntime-cann", + "onnxruntime-directml", + "ort_nightly_directml", + "onnxruntime-gpu", + "ort_nightly_gpu", + "onnxruntime-migraphx", + "onnxruntime-openvino", + "onnxruntime-qnn", + "onnxruntime-rocm", + "onnxruntime-training", + "onnxruntime-vitisai", + ) + _onnxruntime_version = None + # For the metadata, we have to look for both onnxruntime and onnxruntime-x + for pkg in candidates: + try: + _onnxruntime_version = importlib_metadata.version(pkg) + break + except importlib_metadata.PackageNotFoundError: + pass + _onnx_available = _onnxruntime_version is not None + if _onnx_available: + logger.debug(f"Successfully imported onnxruntime version {_onnxruntime_version}") + +# (sayakpaul): importlib.util.find_spec("opencv-python") returns None even when it's installed. +# _opencv_available = importlib.util.find_spec("opencv-python") is not None +try: + candidates = ( + "opencv-python", + "opencv-contrib-python", + "opencv-python-headless", + "opencv-contrib-python-headless", + ) + _opencv_version = None + for pkg in candidates: + try: + _opencv_version = importlib_metadata.version(pkg) + break + except importlib_metadata.PackageNotFoundError: + pass + _opencv_available = _opencv_version is not None + if _opencv_available: + logger.debug(f"Successfully imported cv2 version {_opencv_version}") +except importlib_metadata.PackageNotFoundError: + _opencv_available = False + +_bs4_available = importlib.util.find_spec("bs4") is not None +try: + # importlib metadata under different name + _bs4_version = importlib_metadata.version("beautifulsoup4") + logger.debug(f"Successfully imported ftfy version {_bs4_version}") +except importlib_metadata.PackageNotFoundError: + _bs4_available = False + +_invisible_watermark_available = importlib.util.find_spec("imwatermark") is not None +try: + _invisible_watermark_version = importlib_metadata.version("invisible-watermark") + logger.debug(f"Successfully imported invisible-watermark version {_invisible_watermark_version}") +except importlib_metadata.PackageNotFoundError: + _invisible_watermark_available = False + +_torch_xla_available, _torch_xla_version = _is_package_available("torch_xla") +_torch_npu_available, _torch_npu_version = _is_package_available("torch_npu") +_transformers_available, _transformers_version = _is_package_available("transformers") +_hf_hub_available, _hf_hub_version = _is_package_available("huggingface_hub") +_kernels_available, _kernels_version = _is_package_available("kernels") +_inflect_available, _inflect_version = _is_package_available("inflect") +_unidecode_available, _unidecode_version = _is_package_available("unidecode") +_k_diffusion_available, _k_diffusion_version = _is_package_available("k_diffusion") +_note_seq_available, _note_seq_version = _is_package_available("note_seq") +_wandb_available, _wandb_version = _is_package_available("wandb") +_tensorboard_available, _tensorboard_version = _is_package_available("tensorboard") +_compel_available, _compel_version = _is_package_available("compel") +_sentencepiece_available, _sentencepiece_version = _is_package_available("sentencepiece") +_torchsde_available, _torchsde_version = _is_package_available("torchsde") +_peft_available, _peft_version = _is_package_available("peft") +_torchvision_available, _torchvision_version = _is_package_available("torchvision") +_matplotlib_available, _matplotlib_version = _is_package_available("matplotlib") +_timm_available, _timm_version = _is_package_available("timm") +_bitsandbytes_available, _bitsandbytes_version = _is_package_available("bitsandbytes") +_imageio_available, _imageio_version = _is_package_available("imageio") +_ftfy_available, _ftfy_version = _is_package_available("ftfy") +_scipy_available, _scipy_version = _is_package_available("scipy") +_librosa_available, _librosa_version = _is_package_available("librosa") +_accelerate_available, _accelerate_version = _is_package_available("accelerate") +_xformers_available, _xformers_version = _is_package_available("xformers") +_gguf_available, _gguf_version = _is_package_available("gguf") +_torchao_available, _torchao_version = _is_package_available("torchao") +_bitsandbytes_available, _bitsandbytes_version = _is_package_available("bitsandbytes") +_optimum_quanto_available, _optimum_quanto_version = _is_package_available("optimum", get_dist_name=True) +_pytorch_retinaface_available, _pytorch_retinaface_version = _is_package_available("pytorch_retinaface") +_better_profanity_available, _better_profanity_version = _is_package_available("better_profanity") +_nltk_available, _nltk_version = _is_package_available("nltk") +_cosmos_guardrail_available, _cosmos_guardrail_version = _is_package_available("cosmos_guardrail") +_sageattention_available, _sageattention_version = _is_package_available("sageattention") +_flash_attn_available, _flash_attn_version = _is_package_available("flash_attn") +_flash_attn_3_available, _flash_attn_3_version = _is_package_available("flash_attn_3") +_kornia_available, _kornia_version = _is_package_available("kornia") + + +def is_torch_available(): + return _torch_available + + +def is_torch_xla_available(): + return _torch_xla_available + + +def is_torch_npu_available(): + return _torch_npu_available + + +def is_flax_available(): + return _flax_available + + +def is_transformers_available(): + return _transformers_available + + +def is_inflect_available(): + return _inflect_available + + +def is_unidecode_available(): + return _unidecode_available + + +def is_onnx_available(): + return _onnx_available + + +def is_opencv_available(): + return _opencv_available + + +def is_scipy_available(): + return _scipy_available + + +def is_librosa_available(): + return _librosa_available + + +def is_xformers_available(): + return _xformers_available + + +def is_accelerate_available(): + return _accelerate_available + + +def is_kernels_available(): + return _kernels_available + + +def is_k_diffusion_available(): + return _k_diffusion_available + + +def is_note_seq_available(): + return _note_seq_available + + +def is_wandb_available(): + return _wandb_available + + +def is_tensorboard_available(): + return _tensorboard_available + + +def is_compel_available(): + return _compel_available + + +def is_ftfy_available(): + return _ftfy_available + + +def is_bs4_available(): + return _bs4_available + + +def is_torchsde_available(): + return _torchsde_available + + +def is_invisible_watermark_available(): + return _invisible_watermark_available + + +def is_peft_available(): + return _peft_available + + +def is_torchvision_available(): + return _torchvision_available + + +def is_matplotlib_available(): + return _matplotlib_available + + +def is_safetensors_available(): + return _safetensors_available + + +def is_bitsandbytes_available(): + return _bitsandbytes_available + + +def is_google_colab(): + return _is_google_colab + + +def is_sentencepiece_available(): + return _sentencepiece_available + + +def is_imageio_available(): + return _imageio_available + + +def is_gguf_available(): + return _gguf_available + + +def is_torchao_available(): + return _torchao_available + + +def is_optimum_quanto_available(): + return _optimum_quanto_available + + +def is_timm_available(): + return _timm_available + + +def is_pytorch_retinaface_available(): + return _pytorch_retinaface_available + + +def is_better_profanity_available(): + return _better_profanity_available + + +def is_nltk_available(): + return _nltk_available + + +def is_cosmos_guardrail_available(): + return _cosmos_guardrail_available + + +def is_hpu_available(): + return all(importlib.util.find_spec(lib) for lib in ("habana_frameworks", "habana_frameworks.torch")) + + +def is_sageattention_available(): + return _sageattention_available + + +def is_flash_attn_available(): + return _flash_attn_available + + +def is_flash_attn_3_available(): + return _flash_attn_3_available + + +def is_kornia_available(): + return _kornia_available + + +# docstyle-ignore +FLAX_IMPORT_ERROR = """ +{0} requires the FLAX library but it was not found in your environment. Checkout the instructions on the +installation page: https://github.com/google/flax and follow the ones that match your environment. +""" + +# docstyle-ignore +INFLECT_IMPORT_ERROR = """ +{0} requires the inflect library but it was not found in your environment. You can install it with pip: `pip install +inflect` +""" + +# docstyle-ignore +PYTORCH_IMPORT_ERROR = """ +{0} requires the PyTorch library but it was not found in your environment. Checkout the instructions on the +installation page: https://pytorch.org/get-started/locally/ and follow the ones that match your environment. +""" + +# docstyle-ignore +ONNX_IMPORT_ERROR = """ +{0} requires the onnxruntime library but it was not found in your environment. You can install it with pip: `pip +install onnxruntime` +""" + +# docstyle-ignore +OPENCV_IMPORT_ERROR = """ +{0} requires the OpenCV library but it was not found in your environment. You can install it with pip: `pip +install opencv-python` +""" + +# docstyle-ignore +SCIPY_IMPORT_ERROR = """ +{0} requires the scipy library but it was not found in your environment. You can install it with pip: `pip install +scipy` +""" + +# docstyle-ignore +LIBROSA_IMPORT_ERROR = """ +{0} requires the librosa library but it was not found in your environment. Checkout the instructions on the +installation page: https://librosa.org/doc/latest/install.html and follow the ones that match your environment. +""" + +# docstyle-ignore +TRANSFORMERS_IMPORT_ERROR = """ +{0} requires the transformers library but it was not found in your environment. You can install it with pip: `pip +install transformers` +""" + +# docstyle-ignore +UNIDECODE_IMPORT_ERROR = """ +{0} requires the unidecode library but it was not found in your environment. You can install it with pip: `pip install +Unidecode` +""" + +# docstyle-ignore +K_DIFFUSION_IMPORT_ERROR = """ +{0} requires the k-diffusion library but it was not found in your environment. You can install it with pip: `pip +install k-diffusion` +""" + +# docstyle-ignore +NOTE_SEQ_IMPORT_ERROR = """ +{0} requires the note-seq library but it was not found in your environment. You can install it with pip: `pip +install note-seq` +""" + +# docstyle-ignore +WANDB_IMPORT_ERROR = """ +{0} requires the wandb library but it was not found in your environment. You can install it with pip: `pip +install wandb` +""" + +# docstyle-ignore +TENSORBOARD_IMPORT_ERROR = """ +{0} requires the tensorboard library but it was not found in your environment. You can install it with pip: `pip +install tensorboard` +""" + + +# docstyle-ignore +COMPEL_IMPORT_ERROR = """ +{0} requires the compel library but it was not found in your environment. You can install it with pip: `pip install compel` +""" + +# docstyle-ignore +BS4_IMPORT_ERROR = """ +{0} requires the Beautiful Soup library but it was not found in your environment. You can install it with pip: +`pip install beautifulsoup4`. Please note that you may need to restart your runtime after installation. +""" + +# docstyle-ignore +FTFY_IMPORT_ERROR = """ +{0} requires the ftfy library but it was not found in your environment. Checkout the instructions on the +installation section: https://github.com/rspeer/python-ftfy/tree/master#installing and follow the ones +that match your environment. Please note that you may need to restart your runtime after installation. +""" + +# docstyle-ignore +TORCHSDE_IMPORT_ERROR = """ +{0} requires the torchsde library but it was not found in your environment. You can install it with pip: `pip install torchsde` +""" + +# docstyle-ignore +INVISIBLE_WATERMARK_IMPORT_ERROR = """ +{0} requires the invisible-watermark library but it was not found in your environment. You can install it with pip: `pip install invisible-watermark>=0.2.0` +""" + +# docstyle-ignore +PEFT_IMPORT_ERROR = """ +{0} requires the peft library but it was not found in your environment. You can install it with pip: `pip install peft` +""" + +# docstyle-ignore +SAFETENSORS_IMPORT_ERROR = """ +{0} requires the safetensors library but it was not found in your environment. You can install it with pip: `pip install safetensors` +""" + +# docstyle-ignore +SENTENCEPIECE_IMPORT_ERROR = """ +{0} requires the sentencepiece library but it was not found in your environment. You can install it with pip: `pip install sentencepiece` +""" + + +# docstyle-ignore +BITSANDBYTES_IMPORT_ERROR = """ +{0} requires the bitsandbytes library but it was not found in your environment. You can install it with pip: `pip install bitsandbytes` +""" + +# docstyle-ignore +IMAGEIO_IMPORT_ERROR = """ +{0} requires the imageio library and ffmpeg but it was not found in your environment. You can install it with pip: `pip install imageio imageio-ffmpeg` +""" + +# docstyle-ignore +GGUF_IMPORT_ERROR = """ +{0} requires the gguf library but it was not found in your environment. You can install it with pip: `pip install gguf` +""" + +TORCHAO_IMPORT_ERROR = """ +{0} requires the torchao library but it was not found in your environment. You can install it with pip: `pip install +torchao` +""" + +QUANTO_IMPORT_ERROR = """ +{0} requires the optimum-quanto library but it was not found in your environment. You can install it with pip: `pip +install optimum-quanto` +""" + +# docstyle-ignore +PYTORCH_RETINAFACE_IMPORT_ERROR = """ +{0} requires the pytorch_retinaface library but it was not found in your environment. You can install it with pip: `pip install pytorch_retinaface` +""" + +# docstyle-ignore +BETTER_PROFANITY_IMPORT_ERROR = """ +{0} requires the better_profanity library but it was not found in your environment. You can install it with pip: `pip install better_profanity` +""" + +# docstyle-ignore +NLTK_IMPORT_ERROR = """ +{0} requires the nltk library but it was not found in your environment. You can install it with pip: `pip install nltk` +""" + + +BACKENDS_MAPPING = OrderedDict( + [ + ("bs4", (is_bs4_available, BS4_IMPORT_ERROR)), + ("flax", (is_flax_available, FLAX_IMPORT_ERROR)), + ("inflect", (is_inflect_available, INFLECT_IMPORT_ERROR)), + ("onnx", (is_onnx_available, ONNX_IMPORT_ERROR)), + ("opencv", (is_opencv_available, OPENCV_IMPORT_ERROR)), + ("scipy", (is_scipy_available, SCIPY_IMPORT_ERROR)), + ("torch", (is_torch_available, PYTORCH_IMPORT_ERROR)), + ("transformers", (is_transformers_available, TRANSFORMERS_IMPORT_ERROR)), + ("unidecode", (is_unidecode_available, UNIDECODE_IMPORT_ERROR)), + ("librosa", (is_librosa_available, LIBROSA_IMPORT_ERROR)), + ("k_diffusion", (is_k_diffusion_available, K_DIFFUSION_IMPORT_ERROR)), + ("note_seq", (is_note_seq_available, NOTE_SEQ_IMPORT_ERROR)), + ("wandb", (is_wandb_available, WANDB_IMPORT_ERROR)), + ("tensorboard", (is_tensorboard_available, TENSORBOARD_IMPORT_ERROR)), + ("compel", (is_compel_available, COMPEL_IMPORT_ERROR)), + ("ftfy", (is_ftfy_available, FTFY_IMPORT_ERROR)), + ("torchsde", (is_torchsde_available, TORCHSDE_IMPORT_ERROR)), + ("invisible_watermark", (is_invisible_watermark_available, INVISIBLE_WATERMARK_IMPORT_ERROR)), + ("peft", (is_peft_available, PEFT_IMPORT_ERROR)), + ("safetensors", (is_safetensors_available, SAFETENSORS_IMPORT_ERROR)), + ("bitsandbytes", (is_bitsandbytes_available, BITSANDBYTES_IMPORT_ERROR)), + ("sentencepiece", (is_sentencepiece_available, SENTENCEPIECE_IMPORT_ERROR)), + ("imageio", (is_imageio_available, IMAGEIO_IMPORT_ERROR)), + ("gguf", (is_gguf_available, GGUF_IMPORT_ERROR)), + ("torchao", (is_torchao_available, TORCHAO_IMPORT_ERROR)), + ("quanto", (is_optimum_quanto_available, QUANTO_IMPORT_ERROR)), + ("pytorch_retinaface", (is_pytorch_retinaface_available, PYTORCH_RETINAFACE_IMPORT_ERROR)), + ("better_profanity", (is_better_profanity_available, BETTER_PROFANITY_IMPORT_ERROR)), + ("nltk", (is_nltk_available, NLTK_IMPORT_ERROR)), + ] +) + + +def requires_backends(obj, backends): + if not isinstance(backends, (list, tuple)): + backends = [backends] + + name = obj.__name__ if hasattr(obj, "__name__") else obj.__class__.__name__ + checks = (BACKENDS_MAPPING[backend] for backend in backends) + failed = [msg.format(name) for available, msg in checks if not available()] + if failed: + raise ImportError("".join(failed)) + + if name in [ + "VersatileDiffusionTextToImagePipeline", + "VersatileDiffusionPipeline", + "VersatileDiffusionDualGuidedPipeline", + "StableDiffusionImageVariationPipeline", + "UnCLIPPipeline", + ] and is_transformers_version("<", "4.25.0"): + raise ImportError( + f"You need to install `transformers>=4.25` in order to use {name}: \n```\n pip install" + " --upgrade transformers \n```" + ) + + if name in ["StableDiffusionDepth2ImgPipeline", "StableDiffusionPix2PixZeroPipeline"] and is_transformers_version( + "<", "4.26.0" + ): + raise ImportError( + f"You need to install `transformers>=4.26` in order to use {name}: \n```\n pip install" + " --upgrade transformers \n```" + ) + + +class DummyObject(type): + """ + Metaclass for the dummy objects. Any class inheriting from it will return the ImportError generated by + `requires_backend` each time a user tries to access any method of that class. + """ + + def __getattr__(cls, key): + if key.startswith("_") and key not in ["_load_connected_pipes", "_is_onnx"]: + return super().__getattr__(cls, key) + requires_backends(cls, cls._backends) + + +# This function was copied from: https://github.com/huggingface/accelerate/blob/874c4967d94badd24f893064cc3bef45f57cadf7/src/accelerate/utils/versions.py#L319 +def compare_versions(library_or_version: Union[str, Version], operation: str, requirement_version: str): + """ + Compares a library version to some requirement using a given operation. + + Args: + library_or_version (`str` or `packaging.version.Version`): + A library name or a version to check. + operation (`str`): + A string representation of an operator, such as `">"` or `"<="`. + requirement_version (`str`): + The version to compare the library version against + """ + if operation not in STR_OPERATION_TO_FUNC.keys(): + raise ValueError(f"`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys())}, received {operation}") + operation = STR_OPERATION_TO_FUNC[operation] + if isinstance(library_or_version, str): + library_or_version = parse(importlib_metadata.version(library_or_version)) + return operation(library_or_version, parse(requirement_version)) + + +# This function was copied from: https://github.com/huggingface/accelerate/blob/874c4967d94badd24f893064cc3bef45f57cadf7/src/accelerate/utils/versions.py#L338 +def is_torch_version(operation: str, version: str): + """ + Compares the current PyTorch version to a given reference with an operation. + + Args: + operation (`str`): + A string representation of an operator, such as `">"` or `"<="` + version (`str`): + A string version of PyTorch + """ + return compare_versions(parse(_torch_version), operation, version) + + +def is_torch_xla_version(operation: str, version: str): + """ + Compares the current torch_xla version to a given reference with an operation. + + Args: + operation (`str`): + A string representation of an operator, such as `">"` or `"<="` + version (`str`): + A string version of torch_xla + """ + if not is_torch_xla_available: + return False + return compare_versions(parse(_torch_xla_version), operation, version) + + +def is_transformers_version(operation: str, version: str): + """ + Compares the current Transformers version to a given reference with an operation. + + Args: + operation (`str`): + A string representation of an operator, such as `">"` or `"<="` + version (`str`): + A version string + """ + if not _transformers_available: + return False + return compare_versions(parse(_transformers_version), operation, version) + + +def is_hf_hub_version(operation: str, version: str): + """ + Compares the current Hugging Face Hub version to a given reference with an operation. + + Args: + operation (`str`): + A string representation of an operator, such as `">"` or `"<="` + version (`str`): + A version string + """ + if not _hf_hub_available: + return False + return compare_versions(parse(_hf_hub_version), operation, version) + + +def is_accelerate_version(operation: str, version: str): + """ + Compares the current Accelerate version to a given reference with an operation. + + Args: + operation (`str`): + A string representation of an operator, such as `">"` or `"<="` + version (`str`): + A version string + """ + if not _accelerate_available: + return False + return compare_versions(parse(_accelerate_version), operation, version) + + +def is_peft_version(operation: str, version: str): + """ + Compares the current PEFT version to a given reference with an operation. + + Args: + operation (`str`): + A string representation of an operator, such as `">"` or `"<="` + version (`str`): + A version string + """ + if not _peft_available: + return False + return compare_versions(parse(_peft_version), operation, version) + + +def is_bitsandbytes_version(operation: str, version: str): + """ + Args: + Compares the current bitsandbytes version to a given reference with an operation. + operation (`str`): + A string representation of an operator, such as `">"` or `"<="` + version (`str`): + A version string + """ + if not _bitsandbytes_available: + return False + return compare_versions(parse(_bitsandbytes_version), operation, version) + + +def is_gguf_version(operation: str, version: str): + """ + Compares the current Accelerate version to a given reference with an operation. + + Args: + operation (`str`): + A string representation of an operator, such as `">"` or `"<="` + version (`str`): + A version string + """ + if not _gguf_available: + return False + return compare_versions(parse(_gguf_version), operation, version) + + +def is_torchao_version(operation: str, version: str): + """ + Compares the current torchao version to a given reference with an operation. + + Args: + operation (`str`): + A string representation of an operator, such as `">"` or `"<="` + version (`str`): + A version string + """ + if not _torchao_available: + return False + return compare_versions(parse(_torchao_version), operation, version) + + +def is_k_diffusion_version(operation: str, version: str): + """ + Compares the current k-diffusion version to a given reference with an operation. + + Args: + operation (`str`): + A string representation of an operator, such as `">"` or `"<="` + version (`str`): + A version string + """ + if not _k_diffusion_available: + return False + return compare_versions(parse(_k_diffusion_version), operation, version) + + +def is_optimum_quanto_version(operation: str, version: str): + """ + Compares the current Accelerate version to a given reference with an operation. + + Args: + operation (`str`): + A string representation of an operator, such as `">"` or `"<="` + version (`str`): + A version string + """ + if not _optimum_quanto_available: + return False + return compare_versions(parse(_optimum_quanto_version), operation, version) + + +def is_xformers_version(operation: str, version: str): + """ + Compares the current xformers version to a given reference with an operation. + + Args: + operation (`str`): + A string representation of an operator, such as `">"` or `"<="` + version (`str`): + A version string + """ + if not _xformers_available: + return False + return compare_versions(parse(_xformers_version), operation, version) + + +def is_sageattention_version(operation: str, version: str): + """ + Compares the current sageattention version to a given reference with an operation. + + Args: + operation (`str`): + A string representation of an operator, such as `">"` or `"<="` + version (`str`): + A version string + """ + if not _sageattention_available: + return False + return compare_versions(parse(_sageattention_version), operation, version) + + +def is_flash_attn_version(operation: str, version: str): + """ + Compares the current flash-attention version to a given reference with an operation. + + Args: + operation (`str`): + A string representation of an operator, such as `">"` or `"<="` + version (`str`): + A version string + """ + if not _flash_attn_available: + return False + return compare_versions(parse(_flash_attn_version), operation, version) + + +def get_objects_from_module(module): + """ + Returns a dict of object names and values in a module, while skipping private/internal objects + + Args: + module (ModuleType): + Module to extract the objects from. + + Returns: + dict: Dictionary of object names and corresponding values + """ + + objects = {} + for name in dir(module): + if name.startswith("_"): + continue + objects[name] = getattr(module, name) + + return objects + + +class OptionalDependencyNotAvailable(BaseException): + """ + An error indicating that an optional dependency of Diffusers was not found in the environment. + """ + + +class _LazyModule(ModuleType): + """ + Module class that surfaces all objects but only performs associated imports when the objects are requested. + """ + + # Very heavily inspired by optuna.integration._IntegrationModule + # https://github.com/optuna/optuna/blob/master/optuna/integration/__init__.py + def __init__(self, name, module_file, import_structure, module_spec=None, extra_objects=None): + super().__init__(name) + self._modules = set(import_structure.keys()) + self._class_to_module = {} + for key, values in import_structure.items(): + for value in values: + self._class_to_module[value] = key + # Needed for autocompletion in an IDE + self.__all__ = list(import_structure.keys()) + list(chain(*import_structure.values())) + self.__file__ = module_file + self.__spec__ = module_spec + self.__path__ = [os.path.dirname(module_file)] + self._objects = {} if extra_objects is None else extra_objects + self._name = name + self._import_structure = import_structure + + # Needed for autocompletion in an IDE + def __dir__(self): + result = super().__dir__() + # The elements of self.__all__ that are submodules may or may not be in the dir already, depending on whether + # they have been accessed or not. So we only add the elements of self.__all__ that are not already in the dir. + for attr in self.__all__: + if attr not in result: + result.append(attr) + return result + + def __getattr__(self, name: str) -> Any: + if name in self._objects: + return self._objects[name] + if name in self._modules: + value = self._get_module(name) + elif name in self._class_to_module.keys(): + module = self._get_module(self._class_to_module[name]) + value = getattr(module, name) + else: + raise AttributeError(f"module {self.__name__} has no attribute {name}") + + setattr(self, name, value) + return value + + def _get_module(self, module_name: str): + try: + return importlib.import_module("." + module_name, self.__name__) + except Exception as e: + raise RuntimeError( + f"Failed to import {self.__name__}.{module_name} because of the following error (look up to see its" + f" traceback):\n{e}" + ) from e + + def __reduce__(self): + return (self.__class__, (self._name, self.__file__, self._import_structure)) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/utils/loading_utils.py b/exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/utils/loading_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..31aec907d71202fc7dfc15eb235eae4cac96934c --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/utils/loading_utils.py @@ -0,0 +1,210 @@ +import os +import tempfile +from typing import Any, Callable, List, Optional, Tuple, Union +from urllib.parse import unquote, urlparse + +import librosa +import numpy +import PIL.Image +import PIL.ImageOps +import requests + +from .constants import DIFFUSERS_REQUEST_TIMEOUT +from .import_utils import BACKENDS_MAPPING, is_imageio_available + + +def load_image( + image: Union[str, PIL.Image.Image], convert_method: Optional[Callable[[PIL.Image.Image], PIL.Image.Image]] = None +) -> PIL.Image.Image: + """ + Loads `image` to a PIL Image. + + Args: + image (`str` or `PIL.Image.Image`): + The image to convert to the PIL Image format. + convert_method (Callable[[PIL.Image.Image], PIL.Image.Image], *optional*): + A conversion method to apply to the image after loading it. When set to `None` the image will be converted + "RGB". + + Returns: + `PIL.Image.Image`: + A PIL Image. + """ + if isinstance(image, str): + if image.startswith("http://") or image.startswith("https://"): + image = PIL.Image.open(requests.get(image, stream=True, timeout=DIFFUSERS_REQUEST_TIMEOUT).raw) + elif os.path.isfile(image): + image = PIL.Image.open(image) + else: + raise ValueError( + f"Incorrect path or URL. URLs must start with `http://` or `https://`, and {image} is not a valid path." + ) + elif isinstance(image, PIL.Image.Image): + image = image + else: + raise ValueError( + "Incorrect format used for the image. Should be a URL linking to an image, a local path, or a PIL image." + ) + + image = PIL.ImageOps.exif_transpose(image) + + if convert_method is not None: + image = convert_method(image) + else: + image = image.convert("RGB") + + return image + + +def load_video( + video: str, + convert_method: Optional[Callable[[List[PIL.Image.Image]], List[PIL.Image.Image]]] = None, +) -> List[PIL.Image.Image]: + """ + Loads `video` to a list of PIL Image. + + Args: + video (`str`): + A URL or Path to a video to convert to a list of PIL Image format. + convert_method (Callable[[List[PIL.Image.Image]], List[PIL.Image.Image]], *optional*): + A conversion method to apply to the video after loading it. When set to `None` the images will be converted + to "RGB". + + Returns: + `List[PIL.Image.Image]`: + The video as a list of PIL images. + """ + is_url = video.startswith("http://") or video.startswith("https://") + is_file = os.path.isfile(video) + was_tempfile_created = False + + if not (is_url or is_file): + raise ValueError( + f"Incorrect path or URL. URLs must start with `http://` or `https://`, and {video} is not a valid path." + ) + + if is_url: + response = requests.get(video, stream=True) + if response.status_code != 200: + raise ValueError(f"Failed to download video. Status code: {response.status_code}") + + parsed_url = urlparse(video) + file_name = os.path.basename(unquote(parsed_url.path)) + + suffix = os.path.splitext(file_name)[1] or ".mp4" + video_path = tempfile.NamedTemporaryFile(suffix=suffix, delete=False).name + + was_tempfile_created = True + + video_data = response.iter_content(chunk_size=8192) + with open(video_path, "wb") as f: + for chunk in video_data: + f.write(chunk) + + video = video_path + + pil_images = [] + if video.endswith(".gif"): + gif = PIL.Image.open(video) + try: + while True: + pil_images.append(gif.copy()) + gif.seek(gif.tell() + 1) + except EOFError: + pass + + else: + if is_imageio_available(): + import imageio + else: + raise ImportError(BACKENDS_MAPPING["imageio"][1].format("load_video")) + + try: + imageio.plugins.ffmpeg.get_exe() + except AttributeError: + raise AttributeError( + "`Unable to find an ffmpeg installation on your machine. Please install via `pip install imageio-ffmpeg" + ) + + with imageio.get_reader(video) as reader: + # Read all frames + for frame in reader: + pil_images.append(PIL.Image.fromarray(frame)) + + if was_tempfile_created: + os.remove(video_path) + + if convert_method is not None: + pil_images = convert_method(pil_images) + + return pil_images + + +def load_audio( + audio: Union[str, numpy.ndarray], convert_method: Optional[Callable[[numpy.ndarray], numpy.ndarray]] = None +) -> numpy.ndarray: + """ + Loads `audio` to a numpy array. + + Args: + audio (`str` or `numpy.ndarray`): + The audio to convert to the numpy array format. + convert_method (Callable[[numpy.ndarray], numpy.ndarray], *optional*): + A conversion method to apply to the audio after loading it. When set to `None` the audio will be converted + to a specific format. + + Returns: + `numpy.ndarray`: + A Librosa audio object. + `int`: + The sample rate of the audio. + """ + if isinstance(audio, str): + if audio.startswith("http://") or audio.startswith("https://"): + audio = PIL.Image.open(requests.get(audio, stream=True, timeout=DIFFUSERS_REQUEST_TIMEOUT).raw) + elif os.path.isfile(audio): + audio, sample_rate = librosa.load(audio, sr=16000) + else: + raise ValueError( + f"Incorrect path or URL. URLs must start with `http://` or `https://`, and {audio} is not a valid path." + ) + elif isinstance(audio, numpy.ndarray): + audio = audio + else: + raise ValueError( + "Incorrect format used for the audio. Should be a URL linking to an audio, a local path, or a PIL audio." + ) + + # audio = PIL.ImageOps.exif_transpose(audio) + + if convert_method is not None: + audio = convert_method(audio) + else: + audio = audio.convert("RGB") + + return audio, sample_rate + + +# Taken from `transformers`. +def get_module_from_name(module, tensor_name: str) -> Tuple[Any, str]: + if "." in tensor_name: + splits = tensor_name.split(".") + for split in splits[:-1]: + new_module = getattr(module, split) + if new_module is None: + raise ValueError(f"{module} has no attribute {split}.") + module = new_module + tensor_name = splits[-1] + return module, tensor_name + + +def get_submodule_by_name(root_module, module_path: str): + current = root_module + parts = module_path.split(".") + for part in parts: + if part.isdigit(): + idx = int(part) + current = current[idx] # e.g., for nn.ModuleList or nn.Sequential + else: + current = getattr(current, part) + return current diff --git a/exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/utils/logging.py b/exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/utils/logging.py new file mode 100644 index 0000000000000000000000000000000000000000..2ad6d3a476076dbc7f4d3e9ab1ab9f48de24d76f --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/utils/logging.py @@ -0,0 +1,340 @@ +# coding=utf-8 +# Copyright 2025 Optuna, Hugging Face +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Logging utilities.""" + +import logging +import os +import sys +import threading +from logging import ( + CRITICAL, # NOQA + DEBUG, # NOQA + ERROR, # NOQA + FATAL, # NOQA + INFO, # NOQA + NOTSET, # NOQA + WARN, # NOQA + WARNING, # NOQA +) +from typing import Dict, Optional + +from tqdm import auto as tqdm_lib + + +_lock = threading.Lock() +_default_handler: Optional[logging.Handler] = None + +log_levels = { + "debug": logging.DEBUG, + "info": logging.INFO, + "warning": logging.WARNING, + "error": logging.ERROR, + "critical": logging.CRITICAL, +} + +_default_log_level = logging.WARNING + +_tqdm_active = True + + +def _get_default_logging_level() -> int: + """ + If DIFFUSERS_VERBOSITY env var is set to one of the valid choices return that as the new default level. If it is + not - fall back to `_default_log_level` + """ + env_level_str = os.getenv("DIFFUSERS_VERBOSITY", None) + if env_level_str: + if env_level_str in log_levels: + return log_levels[env_level_str] + else: + logging.getLogger().warning( + f"Unknown option DIFFUSERS_VERBOSITY={env_level_str}, has to be one of: {', '.join(log_levels.keys())}" + ) + return _default_log_level + + +def _get_library_name() -> str: + return __name__.split(".")[0] + + +def _get_library_root_logger() -> logging.Logger: + return logging.getLogger(_get_library_name()) + + +def _configure_library_root_logger() -> None: + global _default_handler + + with _lock: + if _default_handler: + # This library has already configured the library root logger. + return + _default_handler = logging.StreamHandler() # Set sys.stderr as stream. + + if sys.stderr: # only if sys.stderr exists, e.g. when not using pythonw in windows + _default_handler.flush = sys.stderr.flush + + # Apply our default configuration to the library root logger. + library_root_logger = _get_library_root_logger() + library_root_logger.addHandler(_default_handler) + library_root_logger.setLevel(_get_default_logging_level()) + library_root_logger.propagate = False + + +def _reset_library_root_logger() -> None: + global _default_handler + + with _lock: + if not _default_handler: + return + + library_root_logger = _get_library_root_logger() + library_root_logger.removeHandler(_default_handler) + library_root_logger.setLevel(logging.NOTSET) + _default_handler = None + + +def get_log_levels_dict() -> Dict[str, int]: + return log_levels + + +def get_logger(name: Optional[str] = None) -> logging.Logger: + """ + Return a logger with the specified name. + + This function is not supposed to be directly accessed unless you are writing a custom diffusers module. + """ + + if name is None: + name = _get_library_name() + + _configure_library_root_logger() + return logging.getLogger(name) + + +def get_verbosity() -> int: + """ + Return the current level for the 🤗 Diffusers' root logger as an `int`. + + Returns: + `int`: + Logging level integers which can be one of: + + - `50`: `diffusers.logging.CRITICAL` or `diffusers.logging.FATAL` + - `40`: `diffusers.logging.ERROR` + - `30`: `diffusers.logging.WARNING` or `diffusers.logging.WARN` + - `20`: `diffusers.logging.INFO` + - `10`: `diffusers.logging.DEBUG` + + """ + + _configure_library_root_logger() + return _get_library_root_logger().getEffectiveLevel() + + +def set_verbosity(verbosity: int) -> None: + """ + Set the verbosity level for the 🤗 Diffusers' root logger. + + Args: + verbosity (`int`): + Logging level which can be one of: + + - `diffusers.logging.CRITICAL` or `diffusers.logging.FATAL` + - `diffusers.logging.ERROR` + - `diffusers.logging.WARNING` or `diffusers.logging.WARN` + - `diffusers.logging.INFO` + - `diffusers.logging.DEBUG` + """ + + _configure_library_root_logger() + _get_library_root_logger().setLevel(verbosity) + + +def set_verbosity_info() -> None: + """Set the verbosity to the `INFO` level.""" + return set_verbosity(INFO) + + +def set_verbosity_warning() -> None: + """Set the verbosity to the `WARNING` level.""" + return set_verbosity(WARNING) + + +def set_verbosity_debug() -> None: + """Set the verbosity to the `DEBUG` level.""" + return set_verbosity(DEBUG) + + +def set_verbosity_error() -> None: + """Set the verbosity to the `ERROR` level.""" + return set_verbosity(ERROR) + + +def disable_default_handler() -> None: + """Disable the default handler of the 🤗 Diffusers' root logger.""" + + _configure_library_root_logger() + + assert _default_handler is not None + _get_library_root_logger().removeHandler(_default_handler) + + +def enable_default_handler() -> None: + """Enable the default handler of the 🤗 Diffusers' root logger.""" + + _configure_library_root_logger() + + assert _default_handler is not None + _get_library_root_logger().addHandler(_default_handler) + + +def add_handler(handler: logging.Handler) -> None: + """adds a handler to the HuggingFace Diffusers' root logger.""" + + _configure_library_root_logger() + + assert handler is not None + _get_library_root_logger().addHandler(handler) + + +def remove_handler(handler: logging.Handler) -> None: + """removes given handler from the HuggingFace Diffusers' root logger.""" + + _configure_library_root_logger() + + assert handler is not None and handler in _get_library_root_logger().handlers + _get_library_root_logger().removeHandler(handler) + + +def disable_propagation() -> None: + """ + Disable propagation of the library log outputs. Note that log propagation is disabled by default. + """ + + _configure_library_root_logger() + _get_library_root_logger().propagate = False + + +def enable_propagation() -> None: + """ + Enable propagation of the library log outputs. Please disable the HuggingFace Diffusers' default handler to prevent + double logging if the root logger has been configured. + """ + + _configure_library_root_logger() + _get_library_root_logger().propagate = True + + +def enable_explicit_format() -> None: + """ + Enable explicit formatting for every 🤗 Diffusers' logger. The explicit formatter is as follows: + ``` + [LEVELNAME|FILENAME|LINE NUMBER] TIME >> MESSAGE + ``` + All handlers currently bound to the root logger are affected by this method. + """ + handlers = _get_library_root_logger().handlers + + for handler in handlers: + formatter = logging.Formatter("[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s") + handler.setFormatter(formatter) + + +def reset_format() -> None: + """ + Resets the formatting for 🤗 Diffusers' loggers. + + All handlers currently bound to the root logger are affected by this method. + """ + handlers = _get_library_root_logger().handlers + + for handler in handlers: + handler.setFormatter(None) + + +def warning_advice(self, *args, **kwargs) -> None: + """ + This method is identical to `logger.warning()`, but if env var DIFFUSERS_NO_ADVISORY_WARNINGS=1 is set, this + warning will not be printed + """ + no_advisory_warnings = os.getenv("DIFFUSERS_NO_ADVISORY_WARNINGS", False) + if no_advisory_warnings: + return + self.warning(*args, **kwargs) + + +logging.Logger.warning_advice = warning_advice + + +class EmptyTqdm: + """Dummy tqdm which doesn't do anything.""" + + def __init__(self, *args, **kwargs): # pylint: disable=unused-argument + self._iterator = args[0] if args else None + + def __iter__(self): + return iter(self._iterator) + + def __getattr__(self, _): + """Return empty function.""" + + def empty_fn(*args, **kwargs): # pylint: disable=unused-argument + return + + return empty_fn + + def __enter__(self): + return self + + def __exit__(self, type_, value, traceback): + return + + +class _tqdm_cls: + def __call__(self, *args, **kwargs): + if _tqdm_active: + return tqdm_lib.tqdm(*args, **kwargs) + else: + return EmptyTqdm(*args, **kwargs) + + def set_lock(self, *args, **kwargs): + self._lock = None + if _tqdm_active: + return tqdm_lib.tqdm.set_lock(*args, **kwargs) + + def get_lock(self): + if _tqdm_active: + return tqdm_lib.tqdm.get_lock() + + +tqdm = _tqdm_cls() + + +def is_progress_bar_enabled() -> bool: + """Return a boolean indicating whether tqdm progress bars are enabled.""" + global _tqdm_active + return bool(_tqdm_active) + + +def enable_progress_bar() -> None: + """Enable tqdm progress bar.""" + global _tqdm_active + _tqdm_active = True + + +def disable_progress_bar() -> None: + """Disable tqdm progress bar.""" + global _tqdm_active + _tqdm_active = False diff --git a/exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/utils/model_card_template.md b/exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/utils/model_card_template.md new file mode 100644 index 0000000000000000000000000000000000000000..f41b71e24e2081425d3049ae51b50036d5d28b6a --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/utils/model_card_template.md @@ -0,0 +1,24 @@ +--- +{{ card_data }} +--- + + + +{{ model_description }} + +## Intended uses & limitations + +#### How to use + +```python +# TODO: add an example code snippet for running this diffusion pipeline +``` + +#### Limitations and bias + +[TODO: provide examples of latent issues and potential remediations] + +## Training details + +[TODO: describe the data used to train the model] diff --git a/exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/utils/outputs.py b/exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/utils/outputs.py new file mode 100644 index 0000000000000000000000000000000000000000..35691496a182cacc209f300deb4fbf1b1a889cb6 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/utils/outputs.py @@ -0,0 +1,138 @@ +# Copyright 2025 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Generic utilities +""" + +from collections import OrderedDict +from dataclasses import fields, is_dataclass +from typing import Any, Tuple + +import numpy as np + +from .import_utils import is_torch_available, is_torch_version + + +def is_tensor(x) -> bool: + """ + Tests if `x` is a `torch.Tensor` or `np.ndarray`. + """ + if is_torch_available(): + import torch + + if isinstance(x, torch.Tensor): + return True + + return isinstance(x, np.ndarray) + + +class BaseOutput(OrderedDict): + """ + Base class for all model outputs as dataclass. Has a `__getitem__` that allows indexing by integer or slice (like a + tuple) or strings (like a dictionary) that will ignore the `None` attributes. Otherwise behaves like a regular + Python dictionary. + + + + You can't unpack a [`BaseOutput`] directly. Use the [`~utils.BaseOutput.to_tuple`] method to convert it to a tuple + first. + + + """ + + def __init_subclass__(cls) -> None: + """Register subclasses as pytree nodes. + + This is necessary to synchronize gradients when using `torch.nn.parallel.DistributedDataParallel` with + `static_graph=True` with modules that output `ModelOutput` subclasses. + """ + if is_torch_available(): + import torch.utils._pytree + + if is_torch_version("<", "2.2"): + torch.utils._pytree._register_pytree_node( + cls, + torch.utils._pytree._dict_flatten, + lambda values, context: cls(**torch.utils._pytree._dict_unflatten(values, context)), + ) + else: + torch.utils._pytree.register_pytree_node( + cls, + torch.utils._pytree._dict_flatten, + lambda values, context: cls(**torch.utils._pytree._dict_unflatten(values, context)), + serialized_type_name=f"{cls.__module__}.{cls.__name__}", + ) + + def __post_init__(self) -> None: + class_fields = fields(self) + + # Safety and consistency checks + if not len(class_fields): + raise ValueError(f"{self.__class__.__name__} has no fields.") + + first_field = getattr(self, class_fields[0].name) + other_fields_are_none = all(getattr(self, field.name) is None for field in class_fields[1:]) + + if other_fields_are_none and isinstance(first_field, dict): + for key, value in first_field.items(): + self[key] = value + else: + for field in class_fields: + v = getattr(self, field.name) + if v is not None: + self[field.name] = v + + def __delitem__(self, *args, **kwargs): + raise Exception(f"You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.") + + def setdefault(self, *args, **kwargs): + raise Exception(f"You cannot use ``setdefault`` on a {self.__class__.__name__} instance.") + + def pop(self, *args, **kwargs): + raise Exception(f"You cannot use ``pop`` on a {self.__class__.__name__} instance.") + + def update(self, *args, **kwargs): + raise Exception(f"You cannot use ``update`` on a {self.__class__.__name__} instance.") + + def __getitem__(self, k: Any) -> Any: + if isinstance(k, str): + inner_dict = dict(self.items()) + return inner_dict[k] + else: + return self.to_tuple()[k] + + def __setattr__(self, name: Any, value: Any) -> None: + if name in self.keys() and value is not None: + # Don't call self.__setitem__ to avoid recursion errors + super().__setitem__(name, value) + super().__setattr__(name, value) + + def __setitem__(self, key, value): + # Will raise a KeyException if needed + super().__setitem__(key, value) + # Don't call self.__setattr__ to avoid recursion errors + super().__setattr__(key, value) + + def __reduce__(self): + if not is_dataclass(self): + return super().__reduce__() + callable, _args, *remaining = super().__reduce__() + args = tuple(getattr(self, field.name) for field in fields(self)) + return callable, args, *remaining + + def to_tuple(self) -> Tuple[Any, ...]: + """ + Convert self to a tuple containing all the attributes/keys that are not `None`. + """ + return tuple(self[k] for k in self.keys()) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/utils/peft_utils.py b/exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/utils/peft_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..12066ee3f89b3ca862b1ecaaf6b93c8e2505224f --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/utils/peft_utils.py @@ -0,0 +1,376 @@ +# Copyright 2025 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +PEFT utilities: Utilities related to peft library +""" + +import collections +import importlib +from typing import Optional + +from packaging import version + +from . import logging +from .import_utils import is_peft_available, is_peft_version, is_torch_available +from .torch_utils import empty_device_cache + + +logger = logging.get_logger(__name__) + +if is_torch_available(): + import torch + + +def recurse_remove_peft_layers(model): + r""" + Recursively replace all instances of `LoraLayer` with corresponding new layers in `model`. + """ + from peft.tuners.tuners_utils import BaseTunerLayer + + has_base_layer_pattern = False + for module in model.modules(): + if isinstance(module, BaseTunerLayer): + has_base_layer_pattern = hasattr(module, "base_layer") + break + + if has_base_layer_pattern: + from peft.utils import _get_submodules + + key_list = [key for key, _ in model.named_modules() if "lora" not in key] + for key in key_list: + try: + parent, target, target_name = _get_submodules(model, key) + except AttributeError: + continue + if hasattr(target, "base_layer"): + setattr(parent, target_name, target.get_base_layer()) + else: + # This is for backwards compatibility with PEFT <= 0.6.2. + # TODO can be removed once that PEFT version is no longer supported. + from peft.tuners.lora import LoraLayer + + for name, module in model.named_children(): + if len(list(module.children())) > 0: + ## compound module, go inside it + recurse_remove_peft_layers(module) + + module_replaced = False + + if isinstance(module, LoraLayer) and isinstance(module, torch.nn.Linear): + new_module = torch.nn.Linear( + module.in_features, + module.out_features, + bias=module.bias is not None, + ).to(module.weight.device) + new_module.weight = module.weight + if module.bias is not None: + new_module.bias = module.bias + + module_replaced = True + elif isinstance(module, LoraLayer) and isinstance(module, torch.nn.Conv2d): + new_module = torch.nn.Conv2d( + module.in_channels, + module.out_channels, + module.kernel_size, + module.stride, + module.padding, + module.dilation, + module.groups, + ).to(module.weight.device) + + new_module.weight = module.weight + if module.bias is not None: + new_module.bias = module.bias + + module_replaced = True + + if module_replaced: + setattr(model, name, new_module) + del module + + empty_device_cache() + return model + + +def scale_lora_layers(model, weight): + """ + Adjust the weightage given to the LoRA layers of the model. + + Args: + model (`torch.nn.Module`): + The model to scale. + weight (`float`): + The weight to be given to the LoRA layers. + """ + from peft.tuners.tuners_utils import BaseTunerLayer + + if weight == 1.0: + return + + for module in model.modules(): + if isinstance(module, BaseTunerLayer): + module.scale_layer(weight) + + +def unscale_lora_layers(model, weight: Optional[float] = None): + """ + Removes the previously passed weight given to the LoRA layers of the model. + + Args: + model (`torch.nn.Module`): + The model to scale. + weight (`float`, *optional*): + The weight to be given to the LoRA layers. If no scale is passed the scale of the lora layer will be + re-initialized to the correct value. If 0.0 is passed, we will re-initialize the scale with the correct + value. + """ + from peft.tuners.tuners_utils import BaseTunerLayer + + if weight is None or weight == 1.0: + return + + for module in model.modules(): + if isinstance(module, BaseTunerLayer): + if weight != 0: + module.unscale_layer(weight) + else: + for adapter_name in module.active_adapters: + # if weight == 0 unscale should re-set the scale to the original value. + module.set_scale(adapter_name, 1.0) + + +def get_peft_kwargs( + rank_dict, network_alpha_dict, peft_state_dict, is_unet=True, model_state_dict=None, adapter_name=None +): + rank_pattern = {} + alpha_pattern = {} + r = lora_alpha = list(rank_dict.values())[0] + + if len(set(rank_dict.values())) > 1: + # get the rank occurring the most number of times + r = collections.Counter(rank_dict.values()).most_common()[0][0] + + # for modules with rank different from the most occurring rank, add it to the `rank_pattern` + rank_pattern = dict(filter(lambda x: x[1] != r, rank_dict.items())) + rank_pattern = {k.split(".lora_B.")[0]: v for k, v in rank_pattern.items()} + + if network_alpha_dict is not None and len(network_alpha_dict) > 0: + if len(set(network_alpha_dict.values())) > 1: + # get the alpha occurring the most number of times + lora_alpha = collections.Counter(network_alpha_dict.values()).most_common()[0][0] + + # for modules with alpha different from the most occurring alpha, add it to the `alpha_pattern` + alpha_pattern = dict(filter(lambda x: x[1] != lora_alpha, network_alpha_dict.items())) + if is_unet: + alpha_pattern = { + ".".join(k.split(".lora_A.")[0].split(".")).replace(".alpha", ""): v + for k, v in alpha_pattern.items() + } + else: + alpha_pattern = {".".join(k.split(".down.")[0].split(".")[:-1]): v for k, v in alpha_pattern.items()} + else: + lora_alpha = set(network_alpha_dict.values()).pop() + + target_modules = list({name.split(".lora")[0] for name in peft_state_dict.keys()}) + use_dora = any("lora_magnitude_vector" in k for k in peft_state_dict) + # for now we know that the "bias" keys are only associated with `lora_B`. + lora_bias = any("lora_B" in k and k.endswith(".bias") for k in peft_state_dict) + + lora_config_kwargs = { + "r": r, + "lora_alpha": lora_alpha, + "rank_pattern": rank_pattern, + "alpha_pattern": alpha_pattern, + "target_modules": target_modules, + "use_dora": use_dora, + "lora_bias": lora_bias, + } + + return lora_config_kwargs + + +def get_adapter_name(model): + from peft.tuners.tuners_utils import BaseTunerLayer + + for module in model.modules(): + if isinstance(module, BaseTunerLayer): + return f"default_{len(module.r)}" + return "default_0" + + +def set_adapter_layers(model, enabled=True): + from peft.tuners.tuners_utils import BaseTunerLayer + + for module in model.modules(): + if isinstance(module, BaseTunerLayer): + # The recent version of PEFT needs to call `enable_adapters` instead + if hasattr(module, "enable_adapters"): + module.enable_adapters(enabled=enabled) + else: + module.disable_adapters = not enabled + + +def delete_adapter_layers(model, adapter_name): + from peft.tuners.tuners_utils import BaseTunerLayer + + for module in model.modules(): + if isinstance(module, BaseTunerLayer): + if hasattr(module, "delete_adapter"): + module.delete_adapter(adapter_name) + else: + raise ValueError( + "The version of PEFT you are using is not compatible, please use a version that is greater than 0.6.1" + ) + + # For transformers integration - we need to pop the adapter from the config + if getattr(model, "_hf_peft_config_loaded", False) and hasattr(model, "peft_config"): + model.peft_config.pop(adapter_name, None) + # In case all adapters are deleted, we need to delete the config + # and make sure to set the flag to False + if len(model.peft_config) == 0: + del model.peft_config + model._hf_peft_config_loaded = None + + +def set_weights_and_activate_adapters(model, adapter_names, weights): + from peft.tuners.tuners_utils import BaseTunerLayer + + def get_module_weight(weight_for_adapter, module_name): + if not isinstance(weight_for_adapter, dict): + # If weight_for_adapter is a single number, always return it. + return weight_for_adapter + + for layer_name, weight_ in weight_for_adapter.items(): + if layer_name in module_name: + return weight_ + + parts = module_name.split(".") + # e.g. key = "down_blocks.1.attentions.0" + key = f"{parts[0]}.{parts[1]}.attentions.{parts[3]}" + block_weight = weight_for_adapter.get(key, 1.0) + + return block_weight + + for module_name, module in model.named_modules(): + if isinstance(module, BaseTunerLayer): + # For backward compatibility with previous PEFT versions, set multiple active adapters + if hasattr(module, "set_adapter"): + module.set_adapter(adapter_names) + else: + module.active_adapter = adapter_names + + # Set the scaling weight for each adapter for this module + for adapter_name, weight in zip(adapter_names, weights): + module.set_scale(adapter_name, get_module_weight(weight, module_name)) + + +def check_peft_version(min_version: str) -> None: + r""" + Checks if the version of PEFT is compatible. + + Args: + version (`str`): + The version of PEFT to check against. + """ + if not is_peft_available(): + raise ValueError("PEFT is not installed. Please install it with `pip install peft`") + + is_peft_version_compatible = version.parse(importlib.metadata.version("peft")) > version.parse(min_version) + + if not is_peft_version_compatible: + raise ValueError( + f"The version of PEFT you are using is not compatible, please use a version that is greater" + f" than {min_version}" + ) + + +def _create_lora_config( + state_dict, network_alphas, metadata, rank_pattern_dict, is_unet=True, model_state_dict=None, adapter_name=None +): + from peft import LoraConfig + + if metadata is not None: + lora_config_kwargs = metadata + else: + lora_config_kwargs = get_peft_kwargs( + rank_pattern_dict, + network_alpha_dict=network_alphas, + peft_state_dict=state_dict, + is_unet=is_unet, + model_state_dict=model_state_dict, + adapter_name=adapter_name, + ) + + _maybe_raise_error_for_ambiguous_keys(lora_config_kwargs) + + # Version checks for DoRA and lora_bias + if "use_dora" in lora_config_kwargs and lora_config_kwargs["use_dora"]: + if is_peft_version("<", "0.9.0"): + raise ValueError("DoRA requires PEFT >= 0.9.0. Please upgrade.") + + if "lora_bias" in lora_config_kwargs and lora_config_kwargs["lora_bias"]: + if is_peft_version("<=", "0.13.2"): + raise ValueError("lora_bias requires PEFT >= 0.14.0. Please upgrade.") + + try: + return LoraConfig(**lora_config_kwargs) + except TypeError as e: + raise TypeError("`LoraConfig` class could not be instantiated.") from e + + +def _maybe_raise_error_for_ambiguous_keys(config): + rank_pattern = config["rank_pattern"].copy() + target_modules = config["target_modules"] + + for key in list(rank_pattern.keys()): + # try to detect ambiguity + # `target_modules` can also be a str, in which case this loop would loop + # over the chars of the str. The technically correct way to match LoRA keys + # in PEFT is to use LoraModel._check_target_module_exists (lora_config, key). + # But this cuts it for now. + exact_matches = [mod for mod in target_modules if mod == key] + substring_matches = [mod for mod in target_modules if key in mod and mod != key] + + if exact_matches and substring_matches: + if is_peft_version("<", "0.14.1"): + raise ValueError( + "There are ambiguous keys present in this LoRA. To load it, please update your `peft` installation - `pip install -U peft`." + ) + + +def _maybe_warn_for_unhandled_keys(incompatible_keys, adapter_name): + warn_msg = "" + if incompatible_keys is not None: + # Check only for unexpected keys. + unexpected_keys = getattr(incompatible_keys, "unexpected_keys", None) + if unexpected_keys: + lora_unexpected_keys = [k for k in unexpected_keys if "lora_" in k and adapter_name in k] + if lora_unexpected_keys: + warn_msg = ( + f"Loading adapter weights from state_dict led to unexpected keys found in the model:" + f" {', '.join(lora_unexpected_keys)}. " + ) + + # Filter missing keys specific to the current adapter. + missing_keys = getattr(incompatible_keys, "missing_keys", None) + if missing_keys: + lora_missing_keys = [k for k in missing_keys if "lora_" in k and adapter_name in k] + if lora_missing_keys: + warn_msg += ( + f"Loading adapter weights from state_dict led to missing keys in the model:" + f" {', '.join(lora_missing_keys)}." + ) + + if warn_msg: + logger.warning(warn_msg) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/utils/pil_utils.py b/exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/utils/pil_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..76678070b697c7d87fc3691d9bc5bb3bea83c5b1 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/utils/pil_utils.py @@ -0,0 +1,67 @@ +from typing import List + +import PIL.Image +import PIL.ImageOps +from packaging import version +from PIL import Image + + +if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"): + PIL_INTERPOLATION = { + "linear": PIL.Image.Resampling.BILINEAR, + "bilinear": PIL.Image.Resampling.BILINEAR, + "bicubic": PIL.Image.Resampling.BICUBIC, + "lanczos": PIL.Image.Resampling.LANCZOS, + "nearest": PIL.Image.Resampling.NEAREST, + } +else: + PIL_INTERPOLATION = { + "linear": PIL.Image.LINEAR, + "bilinear": PIL.Image.BILINEAR, + "bicubic": PIL.Image.BICUBIC, + "lanczos": PIL.Image.LANCZOS, + "nearest": PIL.Image.NEAREST, + } + + +def pt_to_pil(images): + """ + Convert a torch image to a PIL image. + """ + images = (images / 2 + 0.5).clamp(0, 1) + images = images.cpu().permute(0, 2, 3, 1).float().numpy() + images = numpy_to_pil(images) + return images + + +def numpy_to_pil(images): + """ + Convert a numpy image or a batch of images to a PIL image. + """ + if images.ndim == 3: + images = images[None, ...] + images = (images * 255).round().astype("uint8") + if images.shape[-1] == 1: + # special case for grayscale (single channel) images + pil_images = [Image.fromarray(image.squeeze(), mode="L") for image in images] + else: + pil_images = [Image.fromarray(image) for image in images] + + return pil_images + + +def make_image_grid(images: List[PIL.Image.Image], rows: int, cols: int, resize: int = None) -> PIL.Image.Image: + """ + Prepares a single grid of images. Useful for visualization purposes. + """ + assert len(images) == rows * cols + + if resize is not None: + images = [img.resize((resize, resize)) for img in images] + + w, h = images[0].size + grid = Image.new("RGB", size=(cols * w, rows * h)) + + for i, img in enumerate(images): + grid.paste(img, box=(i % cols * w, i // cols * h)) + return grid diff --git a/exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/utils/remote_utils.py b/exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/utils/remote_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..6494dc14171a935cc7efc247f8b4870e76b575d8 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/utils/remote_utils.py @@ -0,0 +1,425 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import io +import json +from typing import List, Literal, Optional, Union, cast + +import requests + +from .deprecation_utils import deprecate +from .import_utils import is_safetensors_available, is_torch_available + + +if is_torch_available(): + import torch + + from ..image_processor import VaeImageProcessor + from ..video_processor import VideoProcessor + + if is_safetensors_available(): + import safetensors.torch + + DTYPE_MAP = { + "float16": torch.float16, + "float32": torch.float32, + "bfloat16": torch.bfloat16, + "uint8": torch.uint8, + } + + +from PIL import Image + + +def detect_image_type(data: bytes) -> str: + if data.startswith(b"\xff\xd8"): + return "jpeg" + elif data.startswith(b"\x89PNG\r\n\x1a\n"): + return "png" + elif data.startswith(b"GIF87a") or data.startswith(b"GIF89a"): + return "gif" + elif data.startswith(b"BM"): + return "bmp" + return "unknown" + + +def check_inputs_decode( + endpoint: str, + tensor: "torch.Tensor", + processor: Optional[Union["VaeImageProcessor", "VideoProcessor"]] = None, + do_scaling: bool = True, + scaling_factor: Optional[float] = None, + shift_factor: Optional[float] = None, + output_type: Literal["mp4", "pil", "pt"] = "pil", + return_type: Literal["mp4", "pil", "pt"] = "pil", + image_format: Literal["png", "jpg"] = "jpg", + partial_postprocess: bool = False, + input_tensor_type: Literal["binary"] = "binary", + output_tensor_type: Literal["binary"] = "binary", + height: Optional[int] = None, + width: Optional[int] = None, +): + if tensor.ndim == 3 and height is None and width is None: + raise ValueError("`height` and `width` required for packed latents.") + if ( + output_type == "pt" + and return_type == "pil" + and not partial_postprocess + and not isinstance(processor, (VaeImageProcessor, VideoProcessor)) + ): + raise ValueError("`processor` is required.") + if do_scaling and scaling_factor is None: + deprecate( + "do_scaling", + "1.0.0", + "`do_scaling` is deprecated, pass `scaling_factor` and `shift_factor` if required.", + standard_warn=False, + ) + + +def postprocess_decode( + response: requests.Response, + processor: Optional[Union["VaeImageProcessor", "VideoProcessor"]] = None, + output_type: Literal["mp4", "pil", "pt"] = "pil", + return_type: Literal["mp4", "pil", "pt"] = "pil", + partial_postprocess: bool = False, +): + if output_type == "pt" or (output_type == "pil" and processor is not None): + output_tensor = response.content + parameters = response.headers + shape = json.loads(parameters["shape"]) + dtype = parameters["dtype"] + torch_dtype = DTYPE_MAP[dtype] + output_tensor = torch.frombuffer(bytearray(output_tensor), dtype=torch_dtype).reshape(shape) + if output_type == "pt": + if partial_postprocess: + if return_type == "pil": + output = [Image.fromarray(image.numpy()) for image in output_tensor] + if len(output) == 1: + output = output[0] + elif return_type == "pt": + output = output_tensor + else: + if processor is None or return_type == "pt": + output = output_tensor + else: + if isinstance(processor, VideoProcessor): + output = cast( + List[Image.Image], + processor.postprocess_video(output_tensor, output_type="pil")[0], + ) + else: + output = cast( + Image.Image, + processor.postprocess(output_tensor, output_type="pil")[0], + ) + elif output_type == "pil" and return_type == "pil" and processor is None: + output = Image.open(io.BytesIO(response.content)).convert("RGB") + detected_format = detect_image_type(response.content) + output.format = detected_format + elif output_type == "pil" and processor is not None: + if return_type == "pil": + output = [ + Image.fromarray(image) + for image in (output_tensor.permute(0, 2, 3, 1).float().numpy() * 255).round().astype("uint8") + ] + elif return_type == "pt": + output = output_tensor + elif output_type == "mp4" and return_type == "mp4": + output = response.content + return output + + +def prepare_decode( + tensor: "torch.Tensor", + processor: Optional[Union["VaeImageProcessor", "VideoProcessor"]] = None, + do_scaling: bool = True, + scaling_factor: Optional[float] = None, + shift_factor: Optional[float] = None, + output_type: Literal["mp4", "pil", "pt"] = "pil", + image_format: Literal["png", "jpg"] = "jpg", + partial_postprocess: bool = False, + height: Optional[int] = None, + width: Optional[int] = None, +): + headers = {} + parameters = { + "image_format": image_format, + "output_type": output_type, + "partial_postprocess": partial_postprocess, + "shape": list(tensor.shape), + "dtype": str(tensor.dtype).split(".")[-1], + } + if do_scaling and scaling_factor is not None: + parameters["scaling_factor"] = scaling_factor + if do_scaling and shift_factor is not None: + parameters["shift_factor"] = shift_factor + if do_scaling and scaling_factor is None: + parameters["do_scaling"] = do_scaling + elif do_scaling and scaling_factor is None and shift_factor is None: + parameters["do_scaling"] = do_scaling + if height is not None and width is not None: + parameters["height"] = height + parameters["width"] = width + headers["Content-Type"] = "tensor/binary" + headers["Accept"] = "tensor/binary" + if output_type == "pil" and image_format == "jpg" and processor is None: + headers["Accept"] = "image/jpeg" + elif output_type == "pil" and image_format == "png" and processor is None: + headers["Accept"] = "image/png" + elif output_type == "mp4": + headers["Accept"] = "text/plain" + tensor_data = safetensors.torch._tobytes(tensor, "tensor") + return {"data": tensor_data, "params": parameters, "headers": headers} + + +def remote_decode( + endpoint: str, + tensor: "torch.Tensor", + processor: Optional[Union["VaeImageProcessor", "VideoProcessor"]] = None, + do_scaling: bool = True, + scaling_factor: Optional[float] = None, + shift_factor: Optional[float] = None, + output_type: Literal["mp4", "pil", "pt"] = "pil", + return_type: Literal["mp4", "pil", "pt"] = "pil", + image_format: Literal["png", "jpg"] = "jpg", + partial_postprocess: bool = False, + input_tensor_type: Literal["binary"] = "binary", + output_tensor_type: Literal["binary"] = "binary", + height: Optional[int] = None, + width: Optional[int] = None, +) -> Union[Image.Image, List[Image.Image], bytes, "torch.Tensor"]: + """ + Hugging Face Hybrid Inference that allow running VAE decode remotely. + + Args: + endpoint (`str`): + Endpoint for Remote Decode. + tensor (`torch.Tensor`): + Tensor to be decoded. + processor (`VaeImageProcessor` or `VideoProcessor`, *optional*): + Used with `return_type="pt"`, and `return_type="pil"` for Video models. + do_scaling (`bool`, default `True`, *optional*): + **DEPRECATED**. **pass `scaling_factor`/`shift_factor` instead.** **still set + do_scaling=None/do_scaling=False for no scaling until option is removed** When `True` scaling e.g. `latents + / self.vae.config.scaling_factor` is applied remotely. If `False`, input must be passed with scaling + applied. + scaling_factor (`float`, *optional*): + Scaling is applied when passed e.g. [`latents / + self.vae.config.scaling_factor`](https://github.com/huggingface/diffusers/blob/7007febae5cff000d4df9059d9cf35133e8b2ca9/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py#L1083C37-L1083C77). + - SD v1: 0.18215 + - SD XL: 0.13025 + - Flux: 0.3611 + If `None`, input must be passed with scaling applied. + shift_factor (`float`, *optional*): + Shift is applied when passed e.g. `latents + self.vae.config.shift_factor`. + - Flux: 0.1159 + If `None`, input must be passed with scaling applied. + output_type (`"mp4"` or `"pil"` or `"pt", default `"pil"): + **Endpoint** output type. Subject to change. Report feedback on preferred type. + + `"mp4": Supported by video models. Endpoint returns `bytes` of video. `"pil"`: Supported by image and video + models. + Image models: Endpoint returns `bytes` of an image in `image_format`. Video models: Endpoint returns + `torch.Tensor` with partial `postprocessing` applied. + Requires `processor` as a flag (any `None` value will work). + `"pt"`: Support by image and video models. Endpoint returns `torch.Tensor`. + With `partial_postprocess=True` the tensor is postprocessed `uint8` image tensor. + + Recommendations: + `"pt"` with `partial_postprocess=True` is the smallest transfer for full quality. `"pt"` with + `partial_postprocess=False` is the most compatible with third party code. `"pil"` with + `image_format="jpg"` is the smallest transfer overall. + + return_type (`"mp4"` or `"pil"` or `"pt", default `"pil"): + **Function** return type. + + `"mp4": Function returns `bytes` of video. `"pil"`: Function returns `PIL.Image.Image`. + With `output_type="pil" no further processing is applied. With `output_type="pt" a `PIL.Image.Image` is + created. + `partial_postprocess=False` `processor` is required. `partial_postprocess=True` `processor` is + **not** required. + `"pt"`: Function returns `torch.Tensor`. + `processor` is **not** required. `partial_postprocess=False` tensor is `float16` or `bfloat16`, without + denormalization. `partial_postprocess=True` tensor is `uint8`, denormalized. + + image_format (`"png"` or `"jpg"`, default `jpg`): + Used with `output_type="pil"`. Endpoint returns `jpg` or `png`. + + partial_postprocess (`bool`, default `False`): + Used with `output_type="pt"`. `partial_postprocess=False` tensor is `float16` or `bfloat16`, without + denormalization. `partial_postprocess=True` tensor is `uint8`, denormalized. + + input_tensor_type (`"binary"`, default `"binary"`): + Tensor transfer type. + + output_tensor_type (`"binary"`, default `"binary"`): + Tensor transfer type. + + height (`int`, **optional**): + Required for `"packed"` latents. + + width (`int`, **optional**): + Required for `"packed"` latents. + + Returns: + output (`Image.Image` or `List[Image.Image]` or `bytes` or `torch.Tensor`). + """ + if input_tensor_type == "base64": + deprecate( + "input_tensor_type='base64'", + "1.0.0", + "input_tensor_type='base64' is deprecated. Using `binary`.", + standard_warn=False, + ) + input_tensor_type = "binary" + if output_tensor_type == "base64": + deprecate( + "output_tensor_type='base64'", + "1.0.0", + "output_tensor_type='base64' is deprecated. Using `binary`.", + standard_warn=False, + ) + output_tensor_type = "binary" + check_inputs_decode( + endpoint, + tensor, + processor, + do_scaling, + scaling_factor, + shift_factor, + output_type, + return_type, + image_format, + partial_postprocess, + input_tensor_type, + output_tensor_type, + height, + width, + ) + kwargs = prepare_decode( + tensor=tensor, + processor=processor, + do_scaling=do_scaling, + scaling_factor=scaling_factor, + shift_factor=shift_factor, + output_type=output_type, + image_format=image_format, + partial_postprocess=partial_postprocess, + height=height, + width=width, + ) + response = requests.post(endpoint, **kwargs) + if not response.ok: + raise RuntimeError(response.json()) + output = postprocess_decode( + response=response, + processor=processor, + output_type=output_type, + return_type=return_type, + partial_postprocess=partial_postprocess, + ) + return output + + +def check_inputs_encode( + endpoint: str, + image: Union["torch.Tensor", Image.Image], + scaling_factor: Optional[float] = None, + shift_factor: Optional[float] = None, +): + pass + + +def postprocess_encode( + response: requests.Response, +): + output_tensor = response.content + parameters = response.headers + shape = json.loads(parameters["shape"]) + dtype = parameters["dtype"] + torch_dtype = DTYPE_MAP[dtype] + output_tensor = torch.frombuffer(bytearray(output_tensor), dtype=torch_dtype).reshape(shape) + return output_tensor + + +def prepare_encode( + image: Union["torch.Tensor", Image.Image], + scaling_factor: Optional[float] = None, + shift_factor: Optional[float] = None, +): + headers = {} + parameters = {} + if scaling_factor is not None: + parameters["scaling_factor"] = scaling_factor + if shift_factor is not None: + parameters["shift_factor"] = shift_factor + if isinstance(image, torch.Tensor): + data = safetensors.torch._tobytes(image.contiguous(), "tensor") + parameters["shape"] = list(image.shape) + parameters["dtype"] = str(image.dtype).split(".")[-1] + else: + buffer = io.BytesIO() + image.save(buffer, format="PNG") + data = buffer.getvalue() + return {"data": data, "params": parameters, "headers": headers} + + +def remote_encode( + endpoint: str, + image: Union["torch.Tensor", Image.Image], + scaling_factor: Optional[float] = None, + shift_factor: Optional[float] = None, +) -> "torch.Tensor": + """ + Hugging Face Hybrid Inference that allow running VAE encode remotely. + + Args: + endpoint (`str`): + Endpoint for Remote Decode. + image (`torch.Tensor` or `PIL.Image.Image`): + Image to be encoded. + scaling_factor (`float`, *optional*): + Scaling is applied when passed e.g. [`latents * self.vae.config.scaling_factor`]. + - SD v1: 0.18215 + - SD XL: 0.13025 + - Flux: 0.3611 + If `None`, input must be passed with scaling applied. + shift_factor (`float`, *optional*): + Shift is applied when passed e.g. `latents - self.vae.config.shift_factor`. + - Flux: 0.1159 + If `None`, input must be passed with scaling applied. + + Returns: + output (`torch.Tensor`). + """ + check_inputs_encode( + endpoint, + image, + scaling_factor, + shift_factor, + ) + kwargs = prepare_encode( + image=image, + scaling_factor=scaling_factor, + shift_factor=shift_factor, + ) + response = requests.post(endpoint, **kwargs) + if not response.ok: + raise RuntimeError(response.json()) + output = postprocess_encode( + response=response, + ) + return output diff --git a/exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/utils/source_code_parsing_utils.py b/exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/utils/source_code_parsing_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..5f94711c21d825f4baa0b32bbc37fd94c89aba03 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/utils/source_code_parsing_utils.py @@ -0,0 +1,52 @@ +import ast +import importlib +import inspect +import textwrap + + +class ReturnNameVisitor(ast.NodeVisitor): + """Thanks to ChatGPT for pairing.""" + + def __init__(self): + self.return_names = [] + + def visit_Return(self, node): + # Check if the return value is a tuple. + if isinstance(node.value, ast.Tuple): + for elt in node.value.elts: + if isinstance(elt, ast.Name): + self.return_names.append(elt.id) + else: + try: + self.return_names.append(ast.unparse(elt)) + except Exception: + self.return_names.append(str(elt)) + else: + if isinstance(node.value, ast.Name): + self.return_names.append(node.value.id) + else: + try: + self.return_names.append(ast.unparse(node.value)) + except Exception: + self.return_names.append(str(node.value)) + self.generic_visit(node) + + def _determine_parent_module(self, cls): + from diffusers import DiffusionPipeline + from diffusers.models.modeling_utils import ModelMixin + + if issubclass(cls, DiffusionPipeline): + return "pipelines" + elif issubclass(cls, ModelMixin): + return "models" + else: + raise NotImplementedError + + def get_ast_tree(self, cls, attribute_name="encode_prompt"): + parent_module_name = self._determine_parent_module(cls) + main_module = importlib.import_module(f"diffusers.{parent_module_name}") + current_cls_module = getattr(main_module, cls.__name__) + source_code = inspect.getsource(getattr(current_cls_module, attribute_name)) + source_code = textwrap.dedent(source_code) + tree = ast.parse(source_code) + return tree diff --git a/exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/utils/state_dict_utils.py b/exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/utils/state_dict_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..50bfce8b15eb09fc2289f7eefa1d497d97bdbd5b --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/utils/state_dict_utils.py @@ -0,0 +1,366 @@ +# Copyright 2025 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +State dict utilities: utility methods for converting state dicts easily +""" + +import enum +import json + +from .import_utils import is_torch_available +from .logging import get_logger + + +if is_torch_available(): + import torch + + +logger = get_logger(__name__) + + +class StateDictType(enum.Enum): + """ + The mode to use when converting state dicts. + """ + + DIFFUSERS_OLD = "diffusers_old" + KOHYA_SS = "kohya_ss" + PEFT = "peft" + DIFFUSERS = "diffusers" + + +# We need to define a proper mapping for Unet since it uses different output keys than text encoder +# e.g. to_q_lora -> q_proj / to_q +UNET_TO_DIFFUSERS = { + ".to_out_lora.up": ".to_out.0.lora_B", + ".to_out_lora.down": ".to_out.0.lora_A", + ".to_q_lora.down": ".to_q.lora_A", + ".to_q_lora.up": ".to_q.lora_B", + ".to_k_lora.down": ".to_k.lora_A", + ".to_k_lora.up": ".to_k.lora_B", + ".to_v_lora.down": ".to_v.lora_A", + ".to_v_lora.up": ".to_v.lora_B", + ".lora.up": ".lora_B", + ".lora.down": ".lora_A", + ".to_out.lora_magnitude_vector": ".to_out.0.lora_magnitude_vector", +} + + +DIFFUSERS_TO_PEFT = { + ".q_proj.lora_linear_layer.up": ".q_proj.lora_B", + ".q_proj.lora_linear_layer.down": ".q_proj.lora_A", + ".k_proj.lora_linear_layer.up": ".k_proj.lora_B", + ".k_proj.lora_linear_layer.down": ".k_proj.lora_A", + ".v_proj.lora_linear_layer.up": ".v_proj.lora_B", + ".v_proj.lora_linear_layer.down": ".v_proj.lora_A", + ".out_proj.lora_linear_layer.up": ".out_proj.lora_B", + ".out_proj.lora_linear_layer.down": ".out_proj.lora_A", + ".lora_linear_layer.up": ".lora_B", + ".lora_linear_layer.down": ".lora_A", + "text_projection.lora.down.weight": "text_projection.lora_A.weight", + "text_projection.lora.up.weight": "text_projection.lora_B.weight", +} + +DIFFUSERS_OLD_TO_PEFT = { + ".to_q_lora.up": ".q_proj.lora_B", + ".to_q_lora.down": ".q_proj.lora_A", + ".to_k_lora.up": ".k_proj.lora_B", + ".to_k_lora.down": ".k_proj.lora_A", + ".to_v_lora.up": ".v_proj.lora_B", + ".to_v_lora.down": ".v_proj.lora_A", + ".to_out_lora.up": ".out_proj.lora_B", + ".to_out_lora.down": ".out_proj.lora_A", + ".lora_linear_layer.up": ".lora_B", + ".lora_linear_layer.down": ".lora_A", +} + +PEFT_TO_DIFFUSERS = { + ".q_proj.lora_B": ".q_proj.lora_linear_layer.up", + ".q_proj.lora_A": ".q_proj.lora_linear_layer.down", + ".k_proj.lora_B": ".k_proj.lora_linear_layer.up", + ".k_proj.lora_A": ".k_proj.lora_linear_layer.down", + ".v_proj.lora_B": ".v_proj.lora_linear_layer.up", + ".v_proj.lora_A": ".v_proj.lora_linear_layer.down", + ".out_proj.lora_B": ".out_proj.lora_linear_layer.up", + ".out_proj.lora_A": ".out_proj.lora_linear_layer.down", + "to_k.lora_A": "to_k.lora.down", + "to_k.lora_B": "to_k.lora.up", + "to_q.lora_A": "to_q.lora.down", + "to_q.lora_B": "to_q.lora.up", + "to_v.lora_A": "to_v.lora.down", + "to_v.lora_B": "to_v.lora.up", + "to_out.0.lora_A": "to_out.0.lora.down", + "to_out.0.lora_B": "to_out.0.lora.up", +} + +DIFFUSERS_OLD_TO_DIFFUSERS = { + ".to_q_lora.up": ".q_proj.lora_linear_layer.up", + ".to_q_lora.down": ".q_proj.lora_linear_layer.down", + ".to_k_lora.up": ".k_proj.lora_linear_layer.up", + ".to_k_lora.down": ".k_proj.lora_linear_layer.down", + ".to_v_lora.up": ".v_proj.lora_linear_layer.up", + ".to_v_lora.down": ".v_proj.lora_linear_layer.down", + ".to_out_lora.up": ".out_proj.lora_linear_layer.up", + ".to_out_lora.down": ".out_proj.lora_linear_layer.down", + ".to_k.lora_magnitude_vector": ".k_proj.lora_magnitude_vector", + ".to_v.lora_magnitude_vector": ".v_proj.lora_magnitude_vector", + ".to_q.lora_magnitude_vector": ".q_proj.lora_magnitude_vector", + ".to_out.lora_magnitude_vector": ".out_proj.lora_magnitude_vector", +} + +PEFT_TO_KOHYA_SS = { + "lora_A": "lora_down", + "lora_B": "lora_up", + # This is not a comprehensive dict as kohya format requires replacing `.` with `_` in keys, + # adding prefixes and adding alpha values + # Check `convert_state_dict_to_kohya` for more +} + +PEFT_STATE_DICT_MAPPINGS = { + StateDictType.DIFFUSERS_OLD: DIFFUSERS_OLD_TO_PEFT, + StateDictType.DIFFUSERS: DIFFUSERS_TO_PEFT, +} + +DIFFUSERS_STATE_DICT_MAPPINGS = { + StateDictType.DIFFUSERS_OLD: DIFFUSERS_OLD_TO_DIFFUSERS, + StateDictType.PEFT: PEFT_TO_DIFFUSERS, +} + +KOHYA_STATE_DICT_MAPPINGS = {StateDictType.PEFT: PEFT_TO_KOHYA_SS} + +KEYS_TO_ALWAYS_REPLACE = { + ".processor.": ".", +} + + +def convert_state_dict(state_dict, mapping): + r""" + Simply iterates over the state dict and replaces the patterns in `mapping` with the corresponding values. + + Args: + state_dict (`dict[str, torch.Tensor]`): + The state dict to convert. + mapping (`dict[str, str]`): + The mapping to use for conversion, the mapping should be a dictionary with the following structure: + - key: the pattern to replace + - value: the pattern to replace with + + Returns: + converted_state_dict (`dict`) + The converted state dict. + """ + converted_state_dict = {} + for k, v in state_dict.items(): + # First, filter out the keys that we always want to replace + for pattern in KEYS_TO_ALWAYS_REPLACE.keys(): + if pattern in k: + new_pattern = KEYS_TO_ALWAYS_REPLACE[pattern] + k = k.replace(pattern, new_pattern) + + for pattern in mapping.keys(): + if pattern in k: + new_pattern = mapping[pattern] + k = k.replace(pattern, new_pattern) + break + converted_state_dict[k] = v + return converted_state_dict + + +def convert_state_dict_to_peft(state_dict, original_type=None, **kwargs): + r""" + Converts a state dict to the PEFT format The state dict can be from previous diffusers format (`OLD_DIFFUSERS`), or + new diffusers format (`DIFFUSERS`). The method only supports the conversion from diffusers old/new to PEFT for now. + + Args: + state_dict (`dict[str, torch.Tensor]`): + The state dict to convert. + original_type (`StateDictType`, *optional*): + The original type of the state dict, if not provided, the method will try to infer it automatically. + """ + if original_type is None: + # Old diffusers to PEFT + if any("to_out_lora" in k for k in state_dict.keys()): + original_type = StateDictType.DIFFUSERS_OLD + elif any("lora_linear_layer" in k for k in state_dict.keys()): + original_type = StateDictType.DIFFUSERS + else: + raise ValueError("Could not automatically infer state dict type") + + if original_type not in PEFT_STATE_DICT_MAPPINGS.keys(): + raise ValueError(f"Original type {original_type} is not supported") + + mapping = PEFT_STATE_DICT_MAPPINGS[original_type] + return convert_state_dict(state_dict, mapping) + + +def convert_state_dict_to_diffusers(state_dict, original_type=None, **kwargs): + r""" + Converts a state dict to new diffusers format. The state dict can be from previous diffusers format + (`OLD_DIFFUSERS`), or PEFT format (`PEFT`) or new diffusers format (`DIFFUSERS`). In the last case the method will + return the state dict as is. + + The method only supports the conversion from diffusers old, PEFT to diffusers new for now. + + Args: + state_dict (`dict[str, torch.Tensor]`): + The state dict to convert. + original_type (`StateDictType`, *optional*): + The original type of the state dict, if not provided, the method will try to infer it automatically. + kwargs (`dict`, *args*): + Additional arguments to pass to the method. + + - **adapter_name**: For example, in case of PEFT, some keys will be prepended + with the adapter name, therefore needs a special handling. By default PEFT also takes care of that in + `get_peft_model_state_dict` method: + https://github.com/huggingface/peft/blob/ba0477f2985b1ba311b83459d29895c809404e99/src/peft/utils/save_and_load.py#L92 + but we add it here in case we don't want to rely on that method. + """ + peft_adapter_name = kwargs.pop("adapter_name", None) + if peft_adapter_name is not None: + peft_adapter_name = "." + peft_adapter_name + else: + peft_adapter_name = "" + + if original_type is None: + # Old diffusers to PEFT + if any("to_out_lora" in k for k in state_dict.keys()): + original_type = StateDictType.DIFFUSERS_OLD + elif any(f".lora_A{peft_adapter_name}.weight" in k for k in state_dict.keys()): + original_type = StateDictType.PEFT + elif any("lora_linear_layer" in k for k in state_dict.keys()): + # nothing to do + return state_dict + else: + raise ValueError("Could not automatically infer state dict type") + + if original_type not in DIFFUSERS_STATE_DICT_MAPPINGS.keys(): + raise ValueError(f"Original type {original_type} is not supported") + + mapping = DIFFUSERS_STATE_DICT_MAPPINGS[original_type] + return convert_state_dict(state_dict, mapping) + + +def convert_unet_state_dict_to_peft(state_dict): + r""" + Converts a state dict from UNet format to diffusers format - i.e. by removing some keys + """ + mapping = UNET_TO_DIFFUSERS + return convert_state_dict(state_dict, mapping) + + +def convert_all_state_dict_to_peft(state_dict): + r""" + Attempts to first `convert_state_dict_to_peft`, and if it doesn't detect `lora_linear_layer` for a valid + `DIFFUSERS` LoRA for example, attempts to exclusively convert the Unet `convert_unet_state_dict_to_peft` + """ + try: + peft_dict = convert_state_dict_to_peft(state_dict) + except Exception as e: + if str(e) == "Could not automatically infer state dict type": + peft_dict = convert_unet_state_dict_to_peft(state_dict) + else: + raise + + if not any("lora_A" in key or "lora_B" in key for key in peft_dict.keys()): + raise ValueError("Your LoRA was not converted to PEFT") + + return peft_dict + + +def convert_state_dict_to_kohya(state_dict, original_type=None, **kwargs): + r""" + Converts a `PEFT` state dict to `Kohya` format that can be used in AUTOMATIC1111, ComfyUI, SD.Next, InvokeAI, etc. + The method only supports the conversion from PEFT to Kohya for now. + + Args: + state_dict (`dict[str, torch.Tensor]`): + The state dict to convert. + original_type (`StateDictType`, *optional*): + The original type of the state dict, if not provided, the method will try to infer it automatically. + kwargs (`dict`, *args*): + Additional arguments to pass to the method. + + - **adapter_name**: For example, in case of PEFT, some keys will be prepended + with the adapter name, therefore needs a special handling. By default PEFT also takes care of that in + `get_peft_model_state_dict` method: + https://github.com/huggingface/peft/blob/ba0477f2985b1ba311b83459d29895c809404e99/src/peft/utils/save_and_load.py#L92 + but we add it here in case we don't want to rely on that method. + """ + try: + import torch + except ImportError: + logger.error("Converting PEFT state dicts to Kohya requires torch to be installed.") + raise + + peft_adapter_name = kwargs.pop("adapter_name", None) + if peft_adapter_name is not None: + peft_adapter_name = "." + peft_adapter_name + else: + peft_adapter_name = "" + + if original_type is None: + if any(f".lora_A{peft_adapter_name}.weight" in k for k in state_dict.keys()): + original_type = StateDictType.PEFT + + if original_type not in KOHYA_STATE_DICT_MAPPINGS.keys(): + raise ValueError(f"Original type {original_type} is not supported") + + # Use the convert_state_dict function with the appropriate mapping + kohya_ss_partial_state_dict = convert_state_dict(state_dict, KOHYA_STATE_DICT_MAPPINGS[StateDictType.PEFT]) + kohya_ss_state_dict = {} + + # Additional logic for replacing header, alpha parameters `.` with `_` in all keys + for kohya_key, weight in kohya_ss_partial_state_dict.items(): + if "text_encoder_2." in kohya_key: + kohya_key = kohya_key.replace("text_encoder_2.", "lora_te2.") + elif "text_encoder." in kohya_key: + kohya_key = kohya_key.replace("text_encoder.", "lora_te1.") + elif "unet" in kohya_key: + kohya_key = kohya_key.replace("unet", "lora_unet") + elif "lora_magnitude_vector" in kohya_key: + kohya_key = kohya_key.replace("lora_magnitude_vector", "dora_scale") + + kohya_key = kohya_key.replace(".", "_", kohya_key.count(".") - 2) + kohya_key = kohya_key.replace(peft_adapter_name, "") # Kohya doesn't take names + kohya_ss_state_dict[kohya_key] = weight + if "lora_down" in kohya_key: + alpha_key = f"{kohya_key.split('.')[0]}.alpha" + kohya_ss_state_dict[alpha_key] = torch.tensor(len(weight)) + + return kohya_ss_state_dict + + +def state_dict_all_zero(state_dict, filter_str=None): + if filter_str is not None: + if isinstance(filter_str, str): + filter_str = [filter_str] + state_dict = {k: v for k, v in state_dict.items() if any(f in k for f in filter_str)} + + return all(torch.all(param == 0).item() for param in state_dict.values()) + + +def _load_sft_state_dict_metadata(model_file: str): + import safetensors.torch + + from ..loaders.lora_base import LORA_ADAPTER_METADATA_KEY + + with safetensors.torch.safe_open(model_file, framework="pt", device="cpu") as f: + metadata = f.metadata() or {} + + metadata.pop("format", None) + if metadata: + raw = metadata.get(LORA_ADAPTER_METADATA_KEY) + return json.loads(raw) if raw else None + else: + return None diff --git a/exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/utils/testing_utils.py b/exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/utils/testing_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..6d6a7d6ce4bbd91611bbf0e7b535cbcd2fbbfca5 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/utils/testing_utils.py @@ -0,0 +1,1601 @@ +import functools +import glob +import importlib +import importlib.metadata +import inspect +import io +import logging +import multiprocessing +import os +import random +import re +import struct +import sys +import tempfile +import time +import unittest +import urllib.parse +from collections import UserDict +from contextlib import contextmanager +from io import BytesIO, StringIO +from pathlib import Path +from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Set, Tuple, Union + +import numpy as np +import PIL.Image +import PIL.ImageOps +import requests +from numpy.linalg import norm +from packaging import version + +from .constants import DIFFUSERS_REQUEST_TIMEOUT +from .import_utils import ( + BACKENDS_MAPPING, + is_accelerate_available, + is_bitsandbytes_available, + is_compel_available, + is_flax_available, + is_gguf_available, + is_kernels_available, + is_note_seq_available, + is_onnx_available, + is_opencv_available, + is_optimum_quanto_available, + is_peft_available, + is_timm_available, + is_torch_available, + is_torch_version, + is_torchao_available, + is_torchsde_available, + is_transformers_available, +) +from .logging import get_logger + + +if is_torch_available(): + import torch + + IS_ROCM_SYSTEM = torch.version.hip is not None + IS_CUDA_SYSTEM = torch.version.cuda is not None + IS_XPU_SYSTEM = getattr(torch.version, "xpu", None) is not None +else: + IS_ROCM_SYSTEM = False + IS_CUDA_SYSTEM = False + IS_XPU_SYSTEM = False + +global_rng = random.Random() + +logger = get_logger(__name__) +logger.warning( + "diffusers.utils.testing_utils' is deprecated and will be removed in a future version. " + "Determinism and device backend utilities have been moved to `diffusers.utils.torch_utils`. " +) +_required_peft_version = is_peft_available() and version.parse( + version.parse(importlib.metadata.version("peft")).base_version +) > version.parse("0.5") +_required_transformers_version = is_transformers_available() and version.parse( + version.parse(importlib.metadata.version("transformers")).base_version +) > version.parse("4.33") + +USE_PEFT_BACKEND = _required_peft_version and _required_transformers_version +BIG_GPU_MEMORY = int(os.getenv("BIG_GPU_MEMORY", 40)) + +if is_torch_available(): + import torch + + # Set a backend environment variable for any extra module import required for a custom accelerator + if "DIFFUSERS_TEST_BACKEND" in os.environ: + backend = os.environ["DIFFUSERS_TEST_BACKEND"] + try: + _ = importlib.import_module(backend) + except ModuleNotFoundError as e: + raise ModuleNotFoundError( + f"Failed to import `DIFFUSERS_TEST_BACKEND` '{backend}'! This should be the name of an installed module \ + to enable a specified backend.):\n{e}" + ) from e + + if "DIFFUSERS_TEST_DEVICE" in os.environ: + torch_device = os.environ["DIFFUSERS_TEST_DEVICE"] + try: + # try creating device to see if provided device is valid + _ = torch.device(torch_device) + except RuntimeError as e: + raise RuntimeError( + f"Unknown testing device specified by environment variable `DIFFUSERS_TEST_DEVICE`: {torch_device}" + ) from e + logger.info(f"torch_device overrode to {torch_device}") + else: + if torch.cuda.is_available(): + torch_device = "cuda" + elif torch.xpu.is_available(): + torch_device = "xpu" + else: + torch_device = "cpu" + is_torch_higher_equal_than_1_12 = version.parse( + version.parse(torch.__version__).base_version + ) >= version.parse("1.12") + + if is_torch_higher_equal_than_1_12: + # Some builds of torch 1.12 don't have the mps backend registered. See #892 for more details + mps_backend_registered = hasattr(torch.backends, "mps") + torch_device = "mps" if (mps_backend_registered and torch.backends.mps.is_available()) else torch_device + + from .torch_utils import get_torch_cuda_device_capability + + +def torch_all_close(a, b, *args, **kwargs): + if not is_torch_available(): + raise ValueError("PyTorch needs to be installed to use this function.") + if not torch.allclose(a, b, *args, **kwargs): + assert False, f"Max diff is absolute {(a - b).abs().max()}. Diff tensor is {(a - b).abs()}." + return True + + +def numpy_cosine_similarity_distance(a, b): + similarity = np.dot(a, b) / (norm(a) * norm(b)) + distance = 1.0 - similarity.mean() + + return distance + + +def check_if_dicts_are_equal(dict1, dict2): + dict1, dict2 = dict1.copy(), dict2.copy() + + for key, value in dict1.items(): + if isinstance(value, set): + dict1[key] = sorted(value) + for key, value in dict2.items(): + if isinstance(value, set): + dict2[key] = sorted(value) + + for key in dict1: + if key not in dict2: + return False + if dict1[key] != dict2[key]: + return False + + for key in dict2: + if key not in dict1: + return False + + return True + + +def print_tensor_test( + tensor, + limit_to_slices=None, + max_torch_print=None, + filename="test_corrections.txt", + expected_tensor_name="expected_slice", +): + if max_torch_print: + torch.set_printoptions(threshold=10_000) + + test_name = os.environ.get("PYTEST_CURRENT_TEST") + if not torch.is_tensor(tensor): + tensor = torch.from_numpy(tensor) + if limit_to_slices: + tensor = tensor[0, -3:, -3:, -1] + + tensor_str = str(tensor.detach().cpu().flatten().to(torch.float32)).replace("\n", "") + # format is usually: + # expected_slice = np.array([-0.5713, -0.3018, -0.9814, 0.04663, -0.879, 0.76, -1.734, 0.1044, 1.161]) + output_str = tensor_str.replace("tensor", f"{expected_tensor_name} = np.array") + test_file, test_class, test_fn = test_name.split("::") + test_fn = test_fn.split()[0] + with open(filename, "a") as f: + print("::".join([test_file, test_class, test_fn, output_str]), file=f) + + +def get_tests_dir(append_path=None): + """ + Args: + append_path: optional path to append to the tests dir path + Return: + The full path to the `tests` dir, so that the tests can be invoked from anywhere. Optionally `append_path` is + joined after the `tests` dir the former is provided. + """ + # this function caller's __file__ + caller__file__ = inspect.stack()[1][1] + tests_dir = os.path.abspath(os.path.dirname(caller__file__)) + + while not tests_dir.endswith("tests"): + tests_dir = os.path.dirname(tests_dir) + + if append_path: + return Path(tests_dir, append_path).as_posix() + else: + return tests_dir + + +# Taken from the following PR: +# https://github.com/huggingface/accelerate/pull/1964 +def str_to_bool(value) -> int: + """ + Converts a string representation of truth to `True` (1) or `False` (0). True values are `y`, `yes`, `t`, `true`, + `on`, and `1`; False value are `n`, `no`, `f`, `false`, `off`, and `0`; + """ + value = value.lower() + if value in ("y", "yes", "t", "true", "on", "1"): + return 1 + elif value in ("n", "no", "f", "false", "off", "0"): + return 0 + else: + raise ValueError(f"invalid truth value {value}") + + +def parse_flag_from_env(key, default=False): + try: + value = os.environ[key] + except KeyError: + # KEY isn't set, default to `default`. + _value = default + else: + # KEY is set, convert it to True or False. + try: + _value = str_to_bool(value) + except ValueError: + # More values are supported, but let's keep the message simple. + raise ValueError(f"If set, {key} must be yes or no.") + return _value + + +_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False) +_run_nightly_tests = parse_flag_from_env("RUN_NIGHTLY", default=False) +_run_compile_tests = parse_flag_from_env("RUN_COMPILE", default=False) + + +def floats_tensor(shape, scale=1.0, rng=None, name=None): + """Creates a random float32 tensor""" + if rng is None: + rng = global_rng + + total_dims = 1 + for dim in shape: + total_dims *= dim + + values = [] + for _ in range(total_dims): + values.append(rng.random() * scale) + + return torch.tensor(data=values, dtype=torch.float).view(shape).contiguous() + + +def slow(test_case): + """ + Decorator marking a test as slow. + + Slow tests are skipped by default. Set the RUN_SLOW environment variable to a truthy value to run them. + + """ + return unittest.skipUnless(_run_slow_tests, "test is slow")(test_case) + + +def nightly(test_case): + """ + Decorator marking a test that runs nightly in the diffusers CI. + + Slow tests are skipped by default. Set the RUN_NIGHTLY environment variable to a truthy value to run them. + + """ + return unittest.skipUnless(_run_nightly_tests, "test is nightly")(test_case) + + +def is_torch_compile(test_case): + """ + Decorator marking a test that runs compile tests in the diffusers CI. + + Compile tests are skipped by default. Set the RUN_COMPILE environment variable to a truthy value to run them. + + """ + return unittest.skipUnless(_run_compile_tests, "test is torch compile")(test_case) + + +def require_torch(test_case): + """ + Decorator marking a test that requires PyTorch. These tests are skipped when PyTorch isn't installed. + """ + return unittest.skipUnless(is_torch_available(), "test requires PyTorch")(test_case) + + +def require_torch_2(test_case): + """ + Decorator marking a test that requires PyTorch 2. These tests are skipped when it isn't installed. + """ + return unittest.skipUnless(is_torch_available() and is_torch_version(">=", "2.0.0"), "test requires PyTorch 2")( + test_case + ) + + +def require_torch_version_greater_equal(torch_version): + """Decorator marking a test that requires torch with a specific version or greater.""" + + def decorator(test_case): + correct_torch_version = is_torch_available() and is_torch_version(">=", torch_version) + return unittest.skipUnless( + correct_torch_version, f"test requires torch with the version greater than or equal to {torch_version}" + )(test_case) + + return decorator + + +def require_torch_version_greater(torch_version): + """Decorator marking a test that requires torch with a specific version greater.""" + + def decorator(test_case): + correct_torch_version = is_torch_available() and is_torch_version(">", torch_version) + return unittest.skipUnless( + correct_torch_version, f"test requires torch with the version greater than {torch_version}" + )(test_case) + + return decorator + + +def require_torch_gpu(test_case): + """Decorator marking a test that requires CUDA and PyTorch.""" + return unittest.skipUnless(is_torch_available() and torch_device == "cuda", "test requires PyTorch+CUDA")( + test_case + ) + + +def require_torch_cuda_compatibility(expected_compute_capability): + def decorator(test_case): + if torch.cuda.is_available(): + current_compute_capability = get_torch_cuda_device_capability() + return unittest.skipUnless( + float(current_compute_capability) == float(expected_compute_capability), + "Test not supported for this compute capability.", + ) + + return decorator + + +# These decorators are for accelerator-specific behaviours that are not GPU-specific +def require_torch_accelerator(test_case): + """Decorator marking a test that requires an accelerator backend and PyTorch.""" + return unittest.skipUnless(is_torch_available() and torch_device != "cpu", "test requires accelerator+PyTorch")( + test_case + ) + + +def require_torch_multi_gpu(test_case): + """ + Decorator marking a test that requires a multi-GPU setup (in PyTorch). These tests are skipped on a machine without + multiple GPUs. To run *only* the multi_gpu tests, assuming all test names contain multi_gpu: $ pytest -sv ./tests + -k "multi_gpu" + """ + if not is_torch_available(): + return unittest.skip("test requires PyTorch")(test_case) + + import torch + + return unittest.skipUnless(torch.cuda.device_count() > 1, "test requires multiple GPUs")(test_case) + + +def require_torch_multi_accelerator(test_case): + """ + Decorator marking a test that requires a multi-accelerator setup (in PyTorch). These tests are skipped on a machine + without multiple hardware accelerators. + """ + if not is_torch_available(): + return unittest.skip("test requires PyTorch")(test_case) + + import torch + + return unittest.skipUnless( + torch.cuda.device_count() > 1 or torch.xpu.device_count() > 1, "test requires multiple hardware accelerators" + )(test_case) + + +def require_torch_accelerator_with_fp16(test_case): + """Decorator marking a test that requires an accelerator with support for the FP16 data type.""" + return unittest.skipUnless(_is_torch_fp16_available(torch_device), "test requires accelerator with fp16 support")( + test_case + ) + + +def require_torch_accelerator_with_fp64(test_case): + """Decorator marking a test that requires an accelerator with support for the FP64 data type.""" + return unittest.skipUnless(_is_torch_fp64_available(torch_device), "test requires accelerator with fp64 support")( + test_case + ) + + +def require_big_gpu_with_torch_cuda(test_case): + """ + Decorator marking a test that requires a bigger GPU (24GB) for execution. Some example pipelines: Flux, SD3, Cog, + etc. + """ + if not is_torch_available(): + return unittest.skip("test requires PyTorch")(test_case) + + import torch + + if not torch.cuda.is_available(): + return unittest.skip("test requires PyTorch CUDA")(test_case) + + device_properties = torch.cuda.get_device_properties(0) + total_memory = device_properties.total_memory / (1024**3) + return unittest.skipUnless( + total_memory >= BIG_GPU_MEMORY, f"test requires a GPU with at least {BIG_GPU_MEMORY} GB memory" + )(test_case) + + +def require_big_accelerator(test_case): + """ + Decorator marking a test that requires a bigger hardware accelerator (24GB) for execution. Some example pipelines: + Flux, SD3, Cog, etc. + """ + import pytest + + test_case = pytest.mark.big_accelerator(test_case) + + if not is_torch_available(): + return unittest.skip("test requires PyTorch")(test_case) + + import torch + + if not (torch.cuda.is_available() or torch.xpu.is_available()): + return unittest.skip("test requires PyTorch CUDA")(test_case) + + if torch.xpu.is_available(): + device_properties = torch.xpu.get_device_properties(0) + else: + device_properties = torch.cuda.get_device_properties(0) + + total_memory = device_properties.total_memory / (1024**3) + return unittest.skipUnless( + total_memory >= BIG_GPU_MEMORY, + f"test requires a hardware accelerator with at least {BIG_GPU_MEMORY} GB memory", + )(test_case) + + +def require_torch_accelerator_with_training(test_case): + """Decorator marking a test that requires an accelerator with support for training.""" + return unittest.skipUnless( + is_torch_available() and backend_supports_training(torch_device), + "test requires accelerator with training support", + )(test_case) + + +def skip_mps(test_case): + """Decorator marking a test to skip if torch_device is 'mps'""" + return unittest.skipUnless(torch_device != "mps", "test requires non 'mps' device")(test_case) + + +def require_flax(test_case): + """ + Decorator marking a test that requires JAX & Flax. These tests are skipped when one / both are not installed + """ + return unittest.skipUnless(is_flax_available(), "test requires JAX & Flax")(test_case) + + +def require_compel(test_case): + """ + Decorator marking a test that requires compel: https://github.com/damian0815/compel. These tests are skipped when + the library is not installed. + """ + return unittest.skipUnless(is_compel_available(), "test requires compel")(test_case) + + +def require_onnxruntime(test_case): + """ + Decorator marking a test that requires onnxruntime. These tests are skipped when onnxruntime isn't installed. + """ + return unittest.skipUnless(is_onnx_available(), "test requires onnxruntime")(test_case) + + +def require_note_seq(test_case): + """ + Decorator marking a test that requires note_seq. These tests are skipped when note_seq isn't installed. + """ + return unittest.skipUnless(is_note_seq_available(), "test requires note_seq")(test_case) + + +def require_accelerator(test_case): + """ + Decorator marking a test that requires a hardware accelerator backend. These tests are skipped when there are no + hardware accelerator available. + """ + return unittest.skipUnless(torch_device != "cpu", "test requires a hardware accelerator")(test_case) + + +def require_torchsde(test_case): + """ + Decorator marking a test that requires torchsde. These tests are skipped when torchsde isn't installed. + """ + return unittest.skipUnless(is_torchsde_available(), "test requires torchsde")(test_case) + + +def require_peft_backend(test_case): + """ + Decorator marking a test that requires PEFT backend, this would require some specific versions of PEFT and + transformers. + """ + return unittest.skipUnless(USE_PEFT_BACKEND, "test requires PEFT backend")(test_case) + + +def require_timm(test_case): + """ + Decorator marking a test that requires timm. These tests are skipped when timm isn't installed. + """ + return unittest.skipUnless(is_timm_available(), "test requires timm")(test_case) + + +def require_bitsandbytes(test_case): + """ + Decorator marking a test that requires bitsandbytes. These tests are skipped when bitsandbytes isn't installed. + """ + return unittest.skipUnless(is_bitsandbytes_available(), "test requires bitsandbytes")(test_case) + + +def require_quanto(test_case): + """ + Decorator marking a test that requires quanto. These tests are skipped when quanto isn't installed. + """ + return unittest.skipUnless(is_optimum_quanto_available(), "test requires quanto")(test_case) + + +def require_accelerate(test_case): + """ + Decorator marking a test that requires accelerate. These tests are skipped when accelerate isn't installed. + """ + return unittest.skipUnless(is_accelerate_available(), "test requires accelerate")(test_case) + + +def require_peft_version_greater(peft_version): + """ + Decorator marking a test that requires PEFT backend with a specific version, this would require some specific + versions of PEFT and transformers. + """ + + def decorator(test_case): + correct_peft_version = is_peft_available() and version.parse( + version.parse(importlib.metadata.version("peft")).base_version + ) > version.parse(peft_version) + return unittest.skipUnless( + correct_peft_version, f"test requires PEFT backend with the version greater than {peft_version}" + )(test_case) + + return decorator + + +def require_transformers_version_greater(transformers_version): + """ + Decorator marking a test that requires transformers with a specific version, this would require some specific + versions of PEFT and transformers. + """ + + def decorator(test_case): + correct_transformers_version = is_transformers_available() and version.parse( + version.parse(importlib.metadata.version("transformers")).base_version + ) > version.parse(transformers_version) + return unittest.skipUnless( + correct_transformers_version, + f"test requires transformers with the version greater than {transformers_version}", + )(test_case) + + return decorator + + +def require_accelerate_version_greater(accelerate_version): + def decorator(test_case): + correct_accelerate_version = is_accelerate_available() and version.parse( + version.parse(importlib.metadata.version("accelerate")).base_version + ) > version.parse(accelerate_version) + return unittest.skipUnless( + correct_accelerate_version, f"Test requires accelerate with the version greater than {accelerate_version}." + )(test_case) + + return decorator + + +def require_bitsandbytes_version_greater(bnb_version): + def decorator(test_case): + correct_bnb_version = is_bitsandbytes_available() and version.parse( + version.parse(importlib.metadata.version("bitsandbytes")).base_version + ) > version.parse(bnb_version) + return unittest.skipUnless( + correct_bnb_version, f"Test requires bitsandbytes with the version greater than {bnb_version}." + )(test_case) + + return decorator + + +def require_hf_hub_version_greater(hf_hub_version): + def decorator(test_case): + correct_hf_hub_version = version.parse( + version.parse(importlib.metadata.version("huggingface_hub")).base_version + ) > version.parse(hf_hub_version) + return unittest.skipUnless( + correct_hf_hub_version, f"Test requires huggingface_hub with the version greater than {hf_hub_version}." + )(test_case) + + return decorator + + +def require_gguf_version_greater_or_equal(gguf_version): + def decorator(test_case): + correct_gguf_version = is_gguf_available() and version.parse( + version.parse(importlib.metadata.version("gguf")).base_version + ) >= version.parse(gguf_version) + return unittest.skipUnless( + correct_gguf_version, f"Test requires gguf with the version greater than {gguf_version}." + )(test_case) + + return decorator + + +def require_torchao_version_greater_or_equal(torchao_version): + def decorator(test_case): + correct_torchao_version = is_torchao_available() and version.parse( + version.parse(importlib.metadata.version("torchao")).base_version + ) >= version.parse(torchao_version) + return unittest.skipUnless( + correct_torchao_version, f"Test requires torchao with version greater than {torchao_version}." + )(test_case) + + return decorator + + +def require_kernels_version_greater_or_equal(kernels_version): + def decorator(test_case): + correct_kernels_version = is_kernels_available() and version.parse( + version.parse(importlib.metadata.version("kernels")).base_version + ) >= version.parse(kernels_version) + return unittest.skipUnless( + correct_kernels_version, f"Test requires kernels with version greater than {kernels_version}." + )(test_case) + + return decorator + + +def deprecate_after_peft_backend(test_case): + """ + Decorator marking a test that will be skipped after PEFT backend + """ + return unittest.skipUnless(not USE_PEFT_BACKEND, "test skipped in favor of PEFT backend")(test_case) + + +def get_python_version(): + sys_info = sys.version_info + major, minor = sys_info.major, sys_info.minor + return major, minor + + +def load_numpy(arry: Union[str, np.ndarray], local_path: Optional[str] = None) -> np.ndarray: + if isinstance(arry, str): + if local_path is not None: + # local_path can be passed to correct images of tests + return Path(local_path, arry.split("/")[-5], arry.split("/")[-2], arry.split("/")[-1]).as_posix() + elif arry.startswith("http://") or arry.startswith("https://"): + response = requests.get(arry, timeout=DIFFUSERS_REQUEST_TIMEOUT) + response.raise_for_status() + arry = np.load(BytesIO(response.content)) + elif os.path.isfile(arry): + arry = np.load(arry) + else: + raise ValueError( + f"Incorrect path or url, URLs must start with `http://` or `https://`, and {arry} is not a valid path" + ) + elif isinstance(arry, np.ndarray): + pass + else: + raise ValueError( + "Incorrect format used for numpy ndarray. Should be an url linking to an image, a local path, or a" + " ndarray." + ) + + return arry + + +def load_pt(url: str, map_location: Optional[str] = None, weights_only: Optional[bool] = True): + response = requests.get(url, timeout=DIFFUSERS_REQUEST_TIMEOUT) + response.raise_for_status() + arry = torch.load(BytesIO(response.content), map_location=map_location, weights_only=weights_only) + return arry + + +def load_image(image: Union[str, PIL.Image.Image]) -> PIL.Image.Image: + """ + Loads `image` to a PIL Image. + + Args: + image (`str` or `PIL.Image.Image`): + The image to convert to the PIL Image format. + Returns: + `PIL.Image.Image`: + A PIL Image. + """ + if isinstance(image, str): + if image.startswith("http://") or image.startswith("https://"): + image = PIL.Image.open(requests.get(image, stream=True, timeout=DIFFUSERS_REQUEST_TIMEOUT).raw) + elif os.path.isfile(image): + image = PIL.Image.open(image) + else: + raise ValueError( + f"Incorrect path or url, URLs must start with `http://` or `https://`, and {image} is not a valid path" + ) + elif isinstance(image, PIL.Image.Image): + image = image + else: + raise ValueError( + "Incorrect format used for image. Should be an url linking to an image, a local path, or a PIL image." + ) + image = PIL.ImageOps.exif_transpose(image) + image = image.convert("RGB") + return image + + +def preprocess_image(image: PIL.Image, batch_size: int): + w, h = image.size + w, h = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8 + image = image.resize((w, h), resample=PIL.Image.LANCZOS) + image = np.array(image).astype(np.float32) / 255.0 + image = np.vstack([image[None].transpose(0, 3, 1, 2)] * batch_size) + image = torch.from_numpy(image) + return 2.0 * image - 1.0 + + +def export_to_gif(image: List[PIL.Image.Image], output_gif_path: str = None) -> str: + if output_gif_path is None: + output_gif_path = tempfile.NamedTemporaryFile(suffix=".gif").name + + image[0].save( + output_gif_path, + save_all=True, + append_images=image[1:], + optimize=False, + duration=100, + loop=0, + ) + return output_gif_path + + +@contextmanager +def buffered_writer(raw_f): + f = io.BufferedWriter(raw_f) + yield f + f.flush() + + +def export_to_ply(mesh, output_ply_path: str = None): + """ + Write a PLY file for a mesh. + """ + if output_ply_path is None: + output_ply_path = tempfile.NamedTemporaryFile(suffix=".ply").name + + coords = mesh.verts.detach().cpu().numpy() + faces = mesh.faces.cpu().numpy() + rgb = np.stack([mesh.vertex_channels[x].detach().cpu().numpy() for x in "RGB"], axis=1) + + with buffered_writer(open(output_ply_path, "wb")) as f: + f.write(b"ply\n") + f.write(b"format binary_little_endian 1.0\n") + f.write(bytes(f"element vertex {len(coords)}\n", "ascii")) + f.write(b"property float x\n") + f.write(b"property float y\n") + f.write(b"property float z\n") + if rgb is not None: + f.write(b"property uchar red\n") + f.write(b"property uchar green\n") + f.write(b"property uchar blue\n") + if faces is not None: + f.write(bytes(f"element face {len(faces)}\n", "ascii")) + f.write(b"property list uchar int vertex_index\n") + f.write(b"end_header\n") + + if rgb is not None: + rgb = (rgb * 255.499).round().astype(int) + vertices = [ + (*coord, *rgb) + for coord, rgb in zip( + coords.tolist(), + rgb.tolist(), + ) + ] + format = struct.Struct("<3f3B") + for item in vertices: + f.write(format.pack(*item)) + else: + format = struct.Struct("<3f") + for vertex in coords.tolist(): + f.write(format.pack(*vertex)) + + if faces is not None: + for tri in faces.tolist(): + f.write(format.pack(len(tri), *tri)) + format = struct.Struct(" str: + if is_opencv_available(): + import cv2 + else: + raise ImportError(BACKENDS_MAPPING["opencv"][1].format("export_to_video")) + if output_video_path is None: + output_video_path = tempfile.NamedTemporaryFile(suffix=".mp4").name + + fourcc = cv2.VideoWriter_fourcc(*"mp4v") + h, w, c = video_frames[0].shape + video_writer = cv2.VideoWriter(output_video_path, fourcc, fps=8, frameSize=(w, h)) + for i in range(len(video_frames)): + img = cv2.cvtColor(video_frames[i], cv2.COLOR_RGB2BGR) + video_writer.write(img) + return output_video_path + + +def load_hf_numpy(path) -> np.ndarray: + base_url = "https://huggingface.co/datasets/fusing/diffusers-testing/resolve/main" + + if not path.startswith("http://") and not path.startswith("https://"): + path = os.path.join(base_url, urllib.parse.quote(path)) + + return load_numpy(path) + + +# --- pytest conf functions --- # + +# to avoid multiple invocation from tests/conftest.py and examples/conftest.py - make sure it's called only once +pytest_opt_registered = {} + + +def pytest_addoption_shared(parser): + """ + This function is to be called from `conftest.py` via `pytest_addoption` wrapper that has to be defined there. + + It allows loading both `conftest.py` files at once without causing a failure due to adding the same `pytest` + option. + + """ + option = "--make-reports" + if option not in pytest_opt_registered: + parser.addoption( + option, + action="store", + default=False, + help="generate report files. The value of this option is used as a prefix to report names", + ) + pytest_opt_registered[option] = 1 + + +def pytest_terminal_summary_main(tr, id): + """ + Generate multiple reports at the end of test suite run - each report goes into a dedicated file in the current + directory. The report files are prefixed with the test suite name. + + This function emulates --duration and -rA pytest arguments. + + This function is to be called from `conftest.py` via `pytest_terminal_summary` wrapper that has to be defined + there. + + Args: + - tr: `terminalreporter` passed from `conftest.py` + - id: unique id like `tests` or `examples` that will be incorporated into the final reports filenames - this is + needed as some jobs have multiple runs of pytest, so we can't have them overwrite each other. + + NB: this functions taps into a private _pytest API and while unlikely, it could break should + pytest do internal changes - also it calls default internal methods of terminalreporter which + can be hijacked by various `pytest-` plugins and interfere. + + """ + from _pytest.config import create_terminal_writer + + if not len(id): + id = "tests" + + config = tr.config + orig_writer = config.get_terminal_writer() + orig_tbstyle = config.option.tbstyle + orig_reportchars = tr.reportchars + + dir = "reports" + Path(dir).mkdir(parents=True, exist_ok=True) + report_files = { + k: f"{dir}/{id}_{k}.txt" + for k in [ + "durations", + "errors", + "failures_long", + "failures_short", + "failures_line", + "passes", + "stats", + "summary_short", + "warnings", + ] + } + + # custom durations report + # note: there is no need to call pytest --durations=XX to get this separate report + # adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/runner.py#L66 + dlist = [] + for replist in tr.stats.values(): + for rep in replist: + if hasattr(rep, "duration"): + dlist.append(rep) + if dlist: + dlist.sort(key=lambda x: x.duration, reverse=True) + with open(report_files["durations"], "w") as f: + durations_min = 0.05 # sec + f.write("slowest durations\n") + for i, rep in enumerate(dlist): + if rep.duration < durations_min: + f.write(f"{len(dlist) - i} durations < {durations_min} secs were omitted") + break + f.write(f"{rep.duration:02.2f}s {rep.when:<8} {rep.nodeid}\n") + + def summary_failures_short(tr): + # expecting that the reports were --tb=long (default) so we chop them off here to the last frame + reports = tr.getreports("failed") + if not reports: + return + tr.write_sep("=", "FAILURES SHORT STACK") + for rep in reports: + msg = tr._getfailureheadline(rep) + tr.write_sep("_", msg, red=True, bold=True) + # chop off the optional leading extra frames, leaving only the last one + longrepr = re.sub(r".*_ _ _ (_ ){10,}_ _ ", "", rep.longreprtext, 0, re.M | re.S) + tr._tw.line(longrepr) + # note: not printing out any rep.sections to keep the report short + + # use ready-made report funcs, we are just hijacking the filehandle to log to a dedicated file each + # adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/terminal.py#L814 + # note: some pytest plugins may interfere by hijacking the default `terminalreporter` (e.g. + # pytest-instafail does that) + + # report failures with line/short/long styles + config.option.tbstyle = "auto" # full tb + with open(report_files["failures_long"], "w") as f: + tr._tw = create_terminal_writer(config, f) + tr.summary_failures() + + # config.option.tbstyle = "short" # short tb + with open(report_files["failures_short"], "w") as f: + tr._tw = create_terminal_writer(config, f) + summary_failures_short(tr) + + config.option.tbstyle = "line" # one line per error + with open(report_files["failures_line"], "w") as f: + tr._tw = create_terminal_writer(config, f) + tr.summary_failures() + + with open(report_files["errors"], "w") as f: + tr._tw = create_terminal_writer(config, f) + tr.summary_errors() + + with open(report_files["warnings"], "w") as f: + tr._tw = create_terminal_writer(config, f) + tr.summary_warnings() # normal warnings + tr.summary_warnings() # final warnings + + tr.reportchars = "wPpsxXEf" # emulate -rA (used in summary_passes() and short_test_summary()) + with open(report_files["passes"], "w") as f: + tr._tw = create_terminal_writer(config, f) + tr.summary_passes() + + with open(report_files["summary_short"], "w") as f: + tr._tw = create_terminal_writer(config, f) + tr.short_test_summary() + + with open(report_files["stats"], "w") as f: + tr._tw = create_terminal_writer(config, f) + tr.summary_stats() + + # restore: + tr._tw = orig_writer + tr.reportchars = orig_reportchars + config.option.tbstyle = orig_tbstyle + + +# Adapted from https://github.com/huggingface/transformers/blob/000e52aec8850d3fe2f360adc6fd256e5b47fe4c/src/transformers/testing_utils.py#L1905 +def is_flaky(max_attempts: int = 5, wait_before_retry: Optional[float] = None, description: Optional[str] = None): + """ + To decorate flaky tests (methods or entire classes). They will be retried on failures. + + Args: + max_attempts (`int`, *optional*, defaults to 5): + The maximum number of attempts to retry the flaky test. + wait_before_retry (`float`, *optional*): + If provided, will wait that number of seconds before retrying the test. + description (`str`, *optional*): + A string to describe the situation (what / where / why is flaky, link to GH issue/PR comments, errors, + etc.) + """ + + def decorator(obj): + # If decorating a class, wrap each test method on it + if inspect.isclass(obj): + for attr_name, attr_value in list(obj.__dict__.items()): + if callable(attr_value) and attr_name.startswith("test"): + # recursively decorate the method + setattr(obj, attr_name, decorator(attr_value)) + return obj + + # Otherwise we're decorating a single test function / method + @functools.wraps(obj) + def wrapper(*args, **kwargs): + retry_count = 1 + while retry_count < max_attempts: + try: + return obj(*args, **kwargs) + except Exception as err: + msg = ( + f"[FLAKY] {description or obj.__name__!r} " + f"failed on attempt {retry_count}/{max_attempts}: {err}" + ) + print(msg, file=sys.stderr) + if wait_before_retry is not None: + time.sleep(wait_before_retry) + retry_count += 1 + + return obj(*args, **kwargs) + + return wrapper + + return decorator + + +# Taken from: https://github.com/huggingface/transformers/blob/3658488ff77ff8d45101293e749263acf437f4d5/src/transformers/testing_utils.py#L1787 +def run_test_in_subprocess(test_case, target_func, inputs=None, timeout=None): + """ + To run a test in a subprocess. In particular, this can avoid (GPU) memory issue. + + Args: + test_case (`unittest.TestCase`): + The test that will run `target_func`. + target_func (`Callable`): + The function implementing the actual testing logic. + inputs (`dict`, *optional*, defaults to `None`): + The inputs that will be passed to `target_func` through an (input) queue. + timeout (`int`, *optional*, defaults to `None`): + The timeout (in seconds) that will be passed to the input and output queues. If not specified, the env. + variable `PYTEST_TIMEOUT` will be checked. If still `None`, its value will be set to `600`. + """ + if timeout is None: + timeout = int(os.environ.get("PYTEST_TIMEOUT", 600)) + + start_methohd = "spawn" + ctx = multiprocessing.get_context(start_methohd) + + input_queue = ctx.Queue(1) + output_queue = ctx.JoinableQueue(1) + + # We can't send `unittest.TestCase` to the child, otherwise we get issues regarding pickle. + input_queue.put(inputs, timeout=timeout) + + process = ctx.Process(target=target_func, args=(input_queue, output_queue, timeout)) + process.start() + # Kill the child process if we can't get outputs from it in time: otherwise, the hanging subprocess prevents + # the test to exit properly. + try: + results = output_queue.get(timeout=timeout) + output_queue.task_done() + except Exception as e: + process.terminate() + test_case.fail(e) + process.join(timeout=timeout) + + if results["error"] is not None: + test_case.fail(f"{results['error']}") + + +class CaptureLogger: + """ + Args: + Context manager to capture `logging` streams + logger: 'logging` logger object + Returns: + The captured output is available via `self.out` + Example: + ```python + >>> from diffusers import logging + >>> from diffusers.testing_utils import CaptureLogger + + >>> msg = "Testing 1, 2, 3" + >>> logging.set_verbosity_info() + >>> logger = logging.get_logger("diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.py") + >>> with CaptureLogger(logger) as cl: + ... logger.info(msg) + >>> assert cl.out, msg + "\n" + ``` + """ + + def __init__(self, logger): + self.logger = logger + self.io = StringIO() + self.sh = logging.StreamHandler(self.io) + self.out = "" + + def __enter__(self): + self.logger.addHandler(self.sh) + return self + + def __exit__(self, *exc): + self.logger.removeHandler(self.sh) + self.out = self.io.getvalue() + + def __repr__(self): + return f"captured: {self.out}\n" + + +def enable_full_determinism(): + """ + Helper function for reproducible behavior during distributed training. See + - https://pytorch.org/docs/stable/notes/randomness.html for pytorch + """ + from .torch_utils import enable_full_determinism as _enable_full_determinism + + logger.warning( + "enable_full_determinism has been moved to diffusers.utils.torch_utils. " + "Importing from diffusers.utils.testing_utils is deprecated and will be removed in a future version." + ) + return _enable_full_determinism() + + +def disable_full_determinism(): + from .torch_utils import disable_full_determinism as _disable_full_determinism + + logger.warning( + "disable_full_determinism has been moved to diffusers.utils.torch_utils. " + "Importing from diffusers.utils.testing_utils is deprecated and will be removed in a future version." + ) + return _disable_full_determinism() + + +# Utils for custom and alternative accelerator devices +def _is_torch_fp16_available(device): + if not is_torch_available(): + return False + + import torch + + device = torch.device(device) + + try: + x = torch.zeros((2, 2), dtype=torch.float16).to(device) + _ = torch.mul(x, x) + return True + + except Exception as e: + if device.type == "cuda": + raise ValueError( + f"You have passed a device of type 'cuda' which should work with 'fp16', but 'cuda' does not seem to be correctly installed on your machine: {e}" + ) + + return False + + +def _is_torch_fp64_available(device): + if not is_torch_available(): + return False + + import torch + + device = torch.device(device) + + try: + x = torch.zeros((2, 2), dtype=torch.float64).to(device) + _ = torch.mul(x, x) + return True + + except Exception as e: + if device.type == "cuda": + raise ValueError( + f"You have passed a device of type 'cuda' which should work with 'fp64', but 'cuda' does not seem to be correctly installed on your machine: {e}" + ) + + return False + + +# Guard these lookups for when Torch is not used - alternative accelerator support is for PyTorch +if is_torch_available(): + # Behaviour flags + BACKEND_SUPPORTS_TRAINING = {"cuda": True, "xpu": True, "cpu": True, "mps": False, "default": True} + + # Function definitions + BACKEND_EMPTY_CACHE = { + "cuda": torch.cuda.empty_cache, + "xpu": torch.xpu.empty_cache, + "cpu": None, + "mps": torch.mps.empty_cache, + "default": None, + } + BACKEND_DEVICE_COUNT = { + "cuda": torch.cuda.device_count, + "xpu": torch.xpu.device_count, + "cpu": lambda: 0, + "mps": lambda: 0, + "default": 0, + } + BACKEND_MANUAL_SEED = { + "cuda": torch.cuda.manual_seed, + "xpu": torch.xpu.manual_seed, + "cpu": torch.manual_seed, + "mps": torch.mps.manual_seed, + "default": torch.manual_seed, + } + BACKEND_RESET_PEAK_MEMORY_STATS = { + "cuda": torch.cuda.reset_peak_memory_stats, + "xpu": getattr(torch.xpu, "reset_peak_memory_stats", None), + "cpu": None, + "mps": None, + "default": None, + } + BACKEND_RESET_MAX_MEMORY_ALLOCATED = { + "cuda": torch.cuda.reset_max_memory_allocated, + "xpu": getattr(torch.xpu, "reset_peak_memory_stats", None), + "cpu": None, + "mps": None, + "default": None, + } + BACKEND_MAX_MEMORY_ALLOCATED = { + "cuda": torch.cuda.max_memory_allocated, + "xpu": getattr(torch.xpu, "max_memory_allocated", None), + "cpu": 0, + "mps": 0, + "default": 0, + } + BACKEND_SYNCHRONIZE = { + "cuda": torch.cuda.synchronize, + "xpu": getattr(torch.xpu, "synchronize", None), + "cpu": None, + "mps": None, + "default": None, + } + + +# This dispatches a defined function according to the accelerator from the function definitions. +def _device_agnostic_dispatch(device: str, dispatch_table: Dict[str, Callable], *args, **kwargs): + if device not in dispatch_table: + return dispatch_table["default"](*args, **kwargs) + + fn = dispatch_table[device] + + # Some device agnostic functions return values. Need to guard against 'None' instead at + # user level + if not callable(fn): + return fn + + return fn(*args, **kwargs) + + +# These are callables which automatically dispatch the function specific to the accelerator +def backend_manual_seed(device: str, seed: int): + from .torch_utils import backend_manual_seed as _backend_manual_seed + + logger.warning( + "backend_manual_seed has been moved to diffusers.utils.torch_utils. " + "diffusers.utils.testing_utils is deprecated and will be removed in a future version." + ) + return _backend_manual_seed(device, seed) + + +def backend_synchronize(device: str): + from .torch_utils import backend_synchronize as _backend_synchronize + + logger.warning( + "backend_synchronize has been moved to diffusers.utils.torch_utils. " + "diffusers.utils.testing_utils is deprecated and will be removed in a future version." + ) + return _backend_synchronize(device) + + +def backend_empty_cache(device: str): + from .torch_utils import backend_empty_cache as _backend_empty_cache + + logger.warning( + "backend_empty_cache has been moved to diffusers.utils.torch_utils. " + "diffusers.utils.testing_utils is deprecated and will be removed in a future version." + ) + return _backend_empty_cache(device) + + +def backend_device_count(device: str): + from .torch_utils import backend_device_count as _backend_device_count + + logger.warning( + "backend_device_count has been moved to diffusers.utils.torch_utils. " + "diffusers.utils.testing_utils is deprecated and will be removed in a future version." + ) + return _backend_device_count(device) + + +def backend_reset_peak_memory_stats(device: str): + from .torch_utils import backend_reset_peak_memory_stats as _backend_reset_peak_memory_stats + + logger.warning( + "backend_reset_peak_memory_stats has been moved to diffusers.utils.torch_utils. " + "diffusers.utils.testing_utils is deprecated and will be removed in a future version." + ) + return _backend_reset_peak_memory_stats(device) + + +def backend_reset_max_memory_allocated(device: str): + from .torch_utils import backend_reset_max_memory_allocated as _backend_reset_max_memory_allocated + + logger.warning( + "backend_reset_max_memory_allocated has been moved to diffusers.utils.torch_utils. " + "diffusers.utils.testing_utils is deprecated and will be removed in a future version." + ) + return _backend_reset_max_memory_allocated(device) + + +def backend_max_memory_allocated(device: str): + from .torch_utils import backend_max_memory_allocated as _backend_max_memory_allocated + + logger.warning( + "backend_max_memory_allocated has been moved to diffusers.utils.torch_utils. " + "diffusers.utils.testing_utils is deprecated and will be removed in a future version." + ) + return _backend_max_memory_allocated(device) + + +# These are callables which return boolean behaviour flags and can be used to specify some +# device agnostic alternative where the feature is unsupported. +def backend_supports_training(device: str): + from .torch_utils import backend_supports_training as _backend_supports_training + + logger.warning( + "backend_supports_training has been moved to diffusers.utils.torch_utils. " + "diffusers.utils.testing_utils is deprecated and will be removed in a future version." + ) + return _backend_supports_training(device) + + +# Guard for when Torch is not available +if is_torch_available(): + # Update device function dict mapping + def update_mapping_from_spec(device_fn_dict: Dict[str, Callable], attribute_name: str): + try: + # Try to import the function directly + spec_fn = getattr(device_spec_module, attribute_name) + device_fn_dict[torch_device] = spec_fn + except AttributeError as e: + # If the function doesn't exist, and there is no default, throw an error + if "default" not in device_fn_dict: + raise AttributeError( + f"`{attribute_name}` not found in '{device_spec_path}' and no default fallback function found." + ) from e + + if "DIFFUSERS_TEST_DEVICE_SPEC" in os.environ: + device_spec_path = os.environ["DIFFUSERS_TEST_DEVICE_SPEC"] + if not Path(device_spec_path).is_file(): + raise ValueError(f"Specified path to device specification file is not found. Received {device_spec_path}") + + try: + import_name = device_spec_path[: device_spec_path.index(".py")] + except ValueError as e: + raise ValueError(f"Provided device spec file is not a Python file! Received {device_spec_path}") from e + + device_spec_module = importlib.import_module(import_name) + + try: + device_name = device_spec_module.DEVICE_NAME + except AttributeError: + raise AttributeError("Device spec file did not contain `DEVICE_NAME`") + + if "DIFFUSERS_TEST_DEVICE" in os.environ and torch_device != device_name: + msg = f"Mismatch between environment variable `DIFFUSERS_TEST_DEVICE` '{torch_device}' and device found in spec '{device_name}'\n" + msg += "Either unset `DIFFUSERS_TEST_DEVICE` or ensure it matches device spec name." + raise ValueError(msg) + + torch_device = device_name + + # Add one entry here for each `BACKEND_*` dictionary. + update_mapping_from_spec(BACKEND_MANUAL_SEED, "MANUAL_SEED_FN") + update_mapping_from_spec(BACKEND_EMPTY_CACHE, "EMPTY_CACHE_FN") + update_mapping_from_spec(BACKEND_DEVICE_COUNT, "DEVICE_COUNT_FN") + update_mapping_from_spec(BACKEND_SUPPORTS_TRAINING, "SUPPORTS_TRAINING") + update_mapping_from_spec(BACKEND_RESET_PEAK_MEMORY_STATS, "RESET_PEAK_MEMORY_STATS_FN") + update_mapping_from_spec(BACKEND_RESET_MAX_MEMORY_ALLOCATED, "RESET_MAX_MEMORY_ALLOCATED_FN") + update_mapping_from_spec(BACKEND_MAX_MEMORY_ALLOCATED, "MAX_MEMORY_ALLOCATED_FN") + + +# Modified from https://github.com/huggingface/transformers/blob/cdfb018d0300fef3b07d9220f3efe9c2a9974662/src/transformers/testing_utils.py#L3090 + +# Type definition of key used in `Expectations` class. +DeviceProperties = Tuple[Union[str, None], Union[int, None]] + + +@functools.lru_cache +def get_device_properties() -> DeviceProperties: + """ + Get environment device properties. + """ + if IS_CUDA_SYSTEM or IS_ROCM_SYSTEM: + import torch + + major, _ = torch.cuda.get_device_capability() + if IS_ROCM_SYSTEM: + return ("rocm", major) + else: + return ("cuda", major) + elif IS_XPU_SYSTEM: + import torch + + # To get more info of the architecture meaning and bit allocation, refer to https://github.com/intel/llvm/blob/sycl/sycl/include/sycl/ext/oneapi/experimental/device_architecture.def + arch = torch.xpu.get_device_capability()["architecture"] + gen_mask = 0x000000FF00000000 + gen = (arch & gen_mask) >> 32 + return ("xpu", gen) + else: + return (torch_device, None) + + +if TYPE_CHECKING: + DevicePropertiesUserDict = UserDict[DeviceProperties, Any] +else: + DevicePropertiesUserDict = UserDict + +if is_torch_available(): + from diffusers.hooks._common import _GO_LC_SUPPORTED_PYTORCH_LAYERS + from diffusers.hooks.group_offloading import ( + _GROUP_ID_LAZY_LEAF, + _compute_group_hash, + _find_parent_module_in_module_dict, + _gather_buffers_with_no_group_offloading_parent, + _gather_parameters_with_no_group_offloading_parent, + ) + + def _get_expected_safetensors_files( + module: torch.nn.Module, + offload_to_disk_path: str, + offload_type: str, + num_blocks_per_group: Optional[int] = None, + ) -> Set[str]: + expected_files = set() + + def get_hashed_filename(group_id: str) -> str: + short_hash = _compute_group_hash(group_id) + return os.path.join(offload_to_disk_path, f"group_{short_hash}.safetensors") + + if offload_type == "block_level": + if num_blocks_per_group is None: + raise ValueError("num_blocks_per_group must be provided for 'block_level' offloading.") + + # Handle groups of ModuleList and Sequential blocks + unmatched_modules = [] + for name, submodule in module.named_children(): + if not isinstance(submodule, (torch.nn.ModuleList, torch.nn.Sequential)): + unmatched_modules.append(module) + continue + + for i in range(0, len(submodule), num_blocks_per_group): + current_modules = submodule[i : i + num_blocks_per_group] + if not current_modules: + continue + group_id = f"{name}_{i}_{i + len(current_modules) - 1}" + expected_files.add(get_hashed_filename(group_id)) + + # Handle the group for unmatched top-level modules and parameters + for module in unmatched_modules: + expected_files.add(get_hashed_filename(f"{module.__class__.__name__}_unmatched_group")) + + elif offload_type == "leaf_level": + # Handle leaf-level module groups + for name, submodule in module.named_modules(): + if isinstance(submodule, _GO_LC_SUPPORTED_PYTORCH_LAYERS): + # These groups will always have parameters, so a file is expected + expected_files.add(get_hashed_filename(name)) + + # Handle groups for non-leaf parameters/buffers + modules_with_group_offloading = { + name for name, sm in module.named_modules() if isinstance(sm, _GO_LC_SUPPORTED_PYTORCH_LAYERS) + } + parameters = _gather_parameters_with_no_group_offloading_parent(module, modules_with_group_offloading) + buffers = _gather_buffers_with_no_group_offloading_parent(module, modules_with_group_offloading) + + all_orphans = parameters + buffers + if all_orphans: + parent_to_tensors = {} + module_dict = dict(module.named_modules()) + for tensor_name, _ in all_orphans: + parent_name = _find_parent_module_in_module_dict(tensor_name, module_dict) + if parent_name not in parent_to_tensors: + parent_to_tensors[parent_name] = [] + parent_to_tensors[parent_name].append(tensor_name) + + for parent_name in parent_to_tensors: + # A file is expected for each parent that gathers orphaned tensors + expected_files.add(get_hashed_filename(parent_name)) + expected_files.add(get_hashed_filename(_GROUP_ID_LAZY_LEAF)) + + else: + raise ValueError(f"Unsupported offload_type: {offload_type}") + + return expected_files + + def _check_safetensors_serialization( + module: torch.nn.Module, + offload_to_disk_path: str, + offload_type: str, + num_blocks_per_group: Optional[int] = None, + ) -> bool: + if not os.path.isdir(offload_to_disk_path): + return False, None, None + + expected_files = _get_expected_safetensors_files( + module, offload_to_disk_path, offload_type, num_blocks_per_group + ) + actual_files = set(glob.glob(os.path.join(offload_to_disk_path, "*.safetensors"))) + missing_files = expected_files - actual_files + extra_files = actual_files - expected_files + + is_correct = not missing_files and not extra_files + return is_correct, extra_files, missing_files + + +class Expectations(DevicePropertiesUserDict): + def get_expectation(self) -> Any: + """ + Find best matching expectation based on environment device properties. + """ + return self.find_expectation(get_device_properties()) + + @staticmethod + def is_default(key: DeviceProperties) -> bool: + return all(p is None for p in key) + + @staticmethod + def score(key: DeviceProperties, other: DeviceProperties) -> int: + """ + Returns score indicating how similar two instances of the `Properties` tuple are. Points are calculated using + bits, but documented as int. Rules are as follows: + * Matching `type` gives 8 points. + * Semi-matching `type`, for example cuda and rocm, gives 4 points. + * Matching `major` (compute capability major version) gives 2 points. + * Default expectation (if present) gives 1 points. + """ + (device_type, major) = key + (other_device_type, other_major) = other + + score = 0b0 + if device_type == other_device_type: + score |= 0b1000 + elif device_type in ["cuda", "rocm"] and other_device_type in ["cuda", "rocm"]: + score |= 0b100 + + if major == other_major and other_major is not None: + score |= 0b10 + + if Expectations.is_default(other): + score |= 0b1 + + return int(score) + + def find_expectation(self, key: DeviceProperties = (None, None)) -> Any: + """ + Find best matching expectation based on provided device properties. + """ + (result_key, result) = max(self.data.items(), key=lambda x: Expectations.score(key, x[0])) + + if Expectations.score(key, result_key) == 0: + raise ValueError(f"No matching expectation found for {key}") + + return result + + def __repr__(self): + return f"{self.data}" diff --git a/exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/utils/torch_utils.py b/exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/utils/torch_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..a1ab8cda431f0e3b5620b63d7d282a747f616b57 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/utils/torch_utils.py @@ -0,0 +1,334 @@ +# Copyright 2025 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +PyTorch utilities: Utilities related to PyTorch +""" + +import functools +import os +from typing import Callable, Dict, List, Optional, Tuple, Union + +from . import logging +from .import_utils import is_torch_available, is_torch_npu_available, is_torch_version + + +if is_torch_available(): + import torch + from torch.fft import fftn, fftshift, ifftn, ifftshift + + BACKEND_SUPPORTS_TRAINING = {"cuda": True, "xpu": True, "cpu": True, "mps": False, "default": True} + BACKEND_EMPTY_CACHE = { + "cuda": torch.cuda.empty_cache, + "xpu": torch.xpu.empty_cache, + "cpu": None, + "mps": torch.mps.empty_cache, + "default": None, + } + BACKEND_DEVICE_COUNT = { + "cuda": torch.cuda.device_count, + "xpu": torch.xpu.device_count, + "cpu": lambda: 0, + "mps": lambda: 0, + "default": 0, + } + BACKEND_MANUAL_SEED = { + "cuda": torch.cuda.manual_seed, + "xpu": torch.xpu.manual_seed, + "cpu": torch.manual_seed, + "mps": torch.mps.manual_seed, + "default": torch.manual_seed, + } + BACKEND_RESET_PEAK_MEMORY_STATS = { + "cuda": torch.cuda.reset_peak_memory_stats, + "xpu": getattr(torch.xpu, "reset_peak_memory_stats", None), + "cpu": None, + "mps": None, + "default": None, + } + BACKEND_RESET_MAX_MEMORY_ALLOCATED = { + "cuda": torch.cuda.reset_max_memory_allocated, + "xpu": getattr(torch.xpu, "reset_peak_memory_stats", None), + "cpu": None, + "mps": None, + "default": None, + } + BACKEND_MAX_MEMORY_ALLOCATED = { + "cuda": torch.cuda.max_memory_allocated, + "xpu": getattr(torch.xpu, "max_memory_allocated", None), + "cpu": 0, + "mps": 0, + "default": 0, + } + BACKEND_SYNCHRONIZE = { + "cuda": torch.cuda.synchronize, + "xpu": getattr(torch.xpu, "synchronize", None), + "cpu": None, + "mps": None, + "default": None, + } +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +try: + from torch._dynamo import allow_in_graph as maybe_allow_in_graph +except (ImportError, ModuleNotFoundError): + + def maybe_allow_in_graph(cls): + return cls + + +# This dispatches a defined function according to the accelerator from the function definitions. +def _device_agnostic_dispatch(device: str, dispatch_table: Dict[str, Callable], *args, **kwargs): + if device not in dispatch_table: + return dispatch_table["default"](*args, **kwargs) + + fn = dispatch_table[device] + + # Some device agnostic functions return values. Need to guard against 'None' instead at + # user level + if not callable(fn): + return fn + + return fn(*args, **kwargs) + + +# These are callables which automatically dispatch the function specific to the accelerator +def backend_manual_seed(device: str, seed: int): + return _device_agnostic_dispatch(device, BACKEND_MANUAL_SEED, seed) + + +def backend_synchronize(device: str): + return _device_agnostic_dispatch(device, BACKEND_SYNCHRONIZE) + + +def backend_empty_cache(device: str): + return _device_agnostic_dispatch(device, BACKEND_EMPTY_CACHE) + + +def backend_device_count(device: str): + return _device_agnostic_dispatch(device, BACKEND_DEVICE_COUNT) + + +def backend_reset_peak_memory_stats(device: str): + return _device_agnostic_dispatch(device, BACKEND_RESET_PEAK_MEMORY_STATS) + + +def backend_reset_max_memory_allocated(device: str): + return _device_agnostic_dispatch(device, BACKEND_RESET_MAX_MEMORY_ALLOCATED) + + +def backend_max_memory_allocated(device: str): + return _device_agnostic_dispatch(device, BACKEND_MAX_MEMORY_ALLOCATED) + + +# These are callables which return boolean behaviour flags and can be used to specify some +# device agnostic alternative where the feature is unsupported. +def backend_supports_training(device: str): + if not is_torch_available(): + return False + + if device not in BACKEND_SUPPORTS_TRAINING: + device = "default" + + return BACKEND_SUPPORTS_TRAINING[device] + + +def randn_tensor( + shape: Union[Tuple, List], + generator: Optional[Union[List["torch.Generator"], "torch.Generator"]] = None, + device: Optional[Union[str, "torch.device"]] = None, + dtype: Optional["torch.dtype"] = None, + layout: Optional["torch.layout"] = None, +): + """A helper function to create random tensors on the desired `device` with the desired `dtype`. When + passing a list of generators, you can seed each batch size individually. If CPU generators are passed, the tensor + is always created on the CPU. + """ + # device on which tensor is created defaults to device + if isinstance(device, str): + device = torch.device(device) + rand_device = device + batch_size = shape[0] + + layout = layout or torch.strided + device = device or torch.device("cpu") + + if generator is not None: + gen_device_type = generator.device.type if not isinstance(generator, list) else generator[0].device.type + if gen_device_type != device.type and gen_device_type == "cpu": + rand_device = "cpu" + if device != "mps": + logger.info( + f"The passed generator was created on 'cpu' even though a tensor on {device} was expected." + f" Tensors will be created on 'cpu' and then moved to {device}. Note that one can probably" + f" slightly speed up this function by passing a generator that was created on the {device} device." + ) + elif gen_device_type != device.type and gen_device_type == "cuda": + raise ValueError(f"Cannot generate a {device} tensor from a generator of type {gen_device_type}.") + + # make sure generator list of length 1 is treated like a non-list + if isinstance(generator, list) and len(generator) == 1: + generator = generator[0] + + if isinstance(generator, list): + shape = (1,) + shape[1:] + latents = [ + torch.randn(shape, generator=generator[i], device=rand_device, dtype=dtype, layout=layout) + for i in range(batch_size) + ] + latents = torch.cat(latents, dim=0).to(device) + else: + latents = torch.randn(shape, generator=generator, device=rand_device, dtype=dtype, layout=layout).to(device) + + return latents + + +def is_compiled_module(module) -> bool: + """Check whether the module was compiled with torch.compile()""" + if is_torch_version("<", "2.0.0") or not hasattr(torch, "_dynamo"): + return False + return isinstance(module, torch._dynamo.eval_frame.OptimizedModule) + + +def unwrap_module(module): + """Unwraps a module if it was compiled with torch.compile()""" + return module._orig_mod if is_compiled_module(module) else module + + +def fourier_filter(x_in: "torch.Tensor", threshold: int, scale: int) -> "torch.Tensor": + """Fourier filter as introduced in FreeU (https://huggingface.co/papers/2309.11497). + + This version of the method comes from here: + https://github.com/huggingface/diffusers/pull/5164#issuecomment-1732638706 + """ + x = x_in + B, C, H, W = x.shape + + # Non-power of 2 images must be float32 + if (W & (W - 1)) != 0 or (H & (H - 1)) != 0: + x = x.to(dtype=torch.float32) + # fftn does not support bfloat16 + elif x.dtype == torch.bfloat16: + x = x.to(dtype=torch.float32) + + # FFT + x_freq = fftn(x, dim=(-2, -1)) + x_freq = fftshift(x_freq, dim=(-2, -1)) + + B, C, H, W = x_freq.shape + mask = torch.ones((B, C, H, W), device=x.device) + + crow, ccol = H // 2, W // 2 + mask[..., crow - threshold : crow + threshold, ccol - threshold : ccol + threshold] = scale + x_freq = x_freq * mask + + # IFFT + x_freq = ifftshift(x_freq, dim=(-2, -1)) + x_filtered = ifftn(x_freq, dim=(-2, -1)).real + + return x_filtered.to(dtype=x_in.dtype) + + +def apply_freeu( + resolution_idx: int, hidden_states: "torch.Tensor", res_hidden_states: "torch.Tensor", **freeu_kwargs +) -> Tuple["torch.Tensor", "torch.Tensor"]: + """Applies the FreeU mechanism as introduced in https: + //arxiv.org/abs/2309.11497. Adapted from the official code repository: https://github.com/ChenyangSi/FreeU. + + Args: + resolution_idx (`int`): Integer denoting the UNet block where FreeU is being applied. + hidden_states (`torch.Tensor`): Inputs to the underlying block. + res_hidden_states (`torch.Tensor`): Features from the skip block corresponding to the underlying block. + s1 (`float`): Scaling factor for stage 1 to attenuate the contributions of the skip features. + s2 (`float`): Scaling factor for stage 2 to attenuate the contributions of the skip features. + b1 (`float`): Scaling factor for stage 1 to amplify the contributions of backbone features. + b2 (`float`): Scaling factor for stage 2 to amplify the contributions of backbone features. + """ + if resolution_idx == 0: + num_half_channels = hidden_states.shape[1] // 2 + hidden_states[:, :num_half_channels] = hidden_states[:, :num_half_channels] * freeu_kwargs["b1"] + res_hidden_states = fourier_filter(res_hidden_states, threshold=1, scale=freeu_kwargs["s1"]) + if resolution_idx == 1: + num_half_channels = hidden_states.shape[1] // 2 + hidden_states[:, :num_half_channels] = hidden_states[:, :num_half_channels] * freeu_kwargs["b2"] + res_hidden_states = fourier_filter(res_hidden_states, threshold=1, scale=freeu_kwargs["s2"]) + + return hidden_states, res_hidden_states + + +def get_torch_cuda_device_capability(): + if torch.cuda.is_available(): + device = torch.device("cuda") + compute_capability = torch.cuda.get_device_capability(device) + compute_capability = f"{compute_capability[0]}.{compute_capability[1]}" + return float(compute_capability) + else: + return None + + +@functools.lru_cache +def get_device(): + if torch.cuda.is_available(): + return "cuda" + elif is_torch_npu_available(): + return "npu" + elif hasattr(torch, "xpu") and torch.xpu.is_available(): + return "xpu" + elif torch.backends.mps.is_available(): + return "mps" + else: + return "cpu" + + +def empty_device_cache(device_type: Optional[str] = None): + if device_type is None: + device_type = get_device() + if device_type in ["cpu"]: + return + device_mod = getattr(torch, device_type, torch.cuda) + device_mod.empty_cache() + + +def device_synchronize(device_type: Optional[str] = None): + if device_type is None: + device_type = get_device() + device_mod = getattr(torch, device_type, torch.cuda) + device_mod.synchronize() + + +def enable_full_determinism(): + """ + Helper function for reproducible behavior during distributed training. See + - https://pytorch.org/docs/stable/notes/randomness.html for pytorch + """ + # Enable PyTorch deterministic mode. This potentially requires either the environment + # variable 'CUDA_LAUNCH_BLOCKING' or 'CUBLAS_WORKSPACE_CONFIG' to be set, + # depending on the CUDA version, so we set them both here + os.environ["CUDA_LAUNCH_BLOCKING"] = "1" + os.environ["CUBLAS_WORKSPACE_CONFIG"] = ":16:8" + torch.use_deterministic_algorithms(True) + + # Enable CUDNN deterministic mode + torch.backends.cudnn.deterministic = True + torch.backends.cudnn.benchmark = False + torch.backends.cuda.matmul.allow_tf32 = False + + +def disable_full_determinism(): + os.environ["CUDA_LAUNCH_BLOCKING"] = "0" + os.environ["CUBLAS_WORKSPACE_CONFIG"] = "" + torch.use_deterministic_algorithms(False) + + +if is_torch_available(): + torch_device = get_device() diff --git a/exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/utils/typing_utils.py b/exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/utils/typing_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..2b5b1a4f5ab5510856bf453a7436f6deb9c61abe --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/utils/typing_utils.py @@ -0,0 +1,91 @@ +# Copyright 2025 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Typing utilities: Utilities related to type checking and validation +""" + +from typing import Any, Dict, List, Set, Tuple, Type, Union, get_args, get_origin + + +def _is_valid_type(obj: Any, class_or_tuple: Union[Type, Tuple[Type, ...]]) -> bool: + """ + Checks if an object is an instance of any of the provided types. For collections, it checks if every element is of + the correct type as well. + """ + if not isinstance(class_or_tuple, tuple): + class_or_tuple = (class_or_tuple,) + + # Unpack unions + unpacked_class_or_tuple = [] + for t in class_or_tuple: + if get_origin(t) is Union: + unpacked_class_or_tuple.extend(get_args(t)) + else: + unpacked_class_or_tuple.append(t) + class_or_tuple = tuple(unpacked_class_or_tuple) + + if Any in class_or_tuple: + return True + + obj_type = type(obj) + # Classes with obj's type + class_or_tuple = {t for t in class_or_tuple if isinstance(obj, get_origin(t) or t)} + + # Singular types (e.g. int, ControlNet, ...) + # Untyped collections (e.g. List, but not List[int]) + elem_class_or_tuple = {get_args(t) for t in class_or_tuple} + if () in elem_class_or_tuple: + return True + # Typed lists or sets + elif obj_type in (list, set): + return any(all(_is_valid_type(x, t) for x in obj) for t in elem_class_or_tuple) + # Typed tuples + elif obj_type is tuple: + return any( + # Tuples with any length and single type (e.g. Tuple[int, ...]) + (len(t) == 2 and t[-1] is Ellipsis and all(_is_valid_type(x, t[0]) for x in obj)) + or + # Tuples with fixed length and any types (e.g. Tuple[int, str]) + (len(obj) == len(t) and all(_is_valid_type(x, tt) for x, tt in zip(obj, t))) + for t in elem_class_or_tuple + ) + # Typed dicts + elif obj_type is dict: + return any( + all(_is_valid_type(k, kt) and _is_valid_type(v, vt) for k, v in obj.items()) + for kt, vt in elem_class_or_tuple + ) + + else: + return False + + +def _get_detailed_type(obj: Any) -> Type: + """ + Gets a detailed type for an object, including nested types for collections. + """ + obj_type = type(obj) + + if obj_type in (list, set): + obj_origin_type = List if obj_type is list else Set + elems_type = Union[tuple({_get_detailed_type(x) for x in obj})] + return obj_origin_type[elems_type] + elif obj_type is tuple: + return Tuple[tuple(_get_detailed_type(x) for x in obj)] + elif obj_type is dict: + keys_type = Union[tuple({_get_detailed_type(k) for k in obj.keys()})] + values_type = Union[tuple({_get_detailed_type(k) for k in obj.values()})] + return Dict[keys_type, values_type] + else: + return obj_type diff --git a/exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/utils/versions.py b/exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/utils/versions.py new file mode 100644 index 0000000000000000000000000000000000000000..945a3977ce62a9a55307862193e4be6f12c3c17f --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/utils/versions.py @@ -0,0 +1,117 @@ +# Copyright 2020 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Utilities for working with package versions +""" + +import importlib.metadata +import operator +import re +import sys +from typing import Optional + +from packaging import version + + +ops = { + "<": operator.lt, + "<=": operator.le, + "==": operator.eq, + "!=": operator.ne, + ">=": operator.ge, + ">": operator.gt, +} + + +def _compare_versions(op, got_ver, want_ver, requirement, pkg, hint): + if got_ver is None or want_ver is None: + raise ValueError( + f"Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider" + f" reinstalling {pkg}." + ) + if not ops[op](version.parse(got_ver), version.parse(want_ver)): + raise ImportError( + f"{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}" + ) + + +def require_version(requirement: str, hint: Optional[str] = None) -> None: + """ + Perform a runtime check of the dependency versions, using the exact same syntax used by pip. + + The installed module version comes from the *site-packages* dir via *importlib.metadata*. + + Args: + requirement (`str`): pip style definition, e.g., "tokenizers==0.9.4", "tqdm>=4.27", "numpy" + hint (`str`, *optional*): what suggestion to print in case of requirements not being met + + Example: + + ```python + require_version("pandas>1.1.2") + require_version("numpy>1.18.5", "this is important to have for whatever reason") + ```""" + + hint = f"\n{hint}" if hint is not None else "" + + # non-versioned check + if re.match(r"^[\w_\-\d]+$", requirement): + pkg, op, want_ver = requirement, None, None + else: + match = re.findall(r"^([^!=<>\s]+)([\s!=<>]{1,2}.+)", requirement) + if not match: + raise ValueError( + "requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but" + f" got {requirement}" + ) + pkg, want_full = match[0] + want_range = want_full.split(",") # there could be multiple requirements + wanted = {} + for w in want_range: + match = re.findall(r"^([\s!=<>]{1,2})(.+)", w) + if not match: + raise ValueError( + "requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23," + f" but got {requirement}" + ) + op, want_ver = match[0] + wanted[op] = want_ver + if op not in ops: + raise ValueError(f"{requirement}: need one of {list(ops.keys())}, but got {op}") + + # special case + if pkg == "python": + got_ver = ".".join([str(x) for x in sys.version_info[:3]]) + for op, want_ver in wanted.items(): + _compare_versions(op, got_ver, want_ver, requirement, pkg, hint) + return + + # check if any version is installed + try: + got_ver = importlib.metadata.version(pkg) + except importlib.metadata.PackageNotFoundError: + raise importlib.metadata.PackageNotFoundError( + f"The '{requirement}' distribution was not found and is required by this application. {hint}" + ) + + # check that the right version is installed if version number or a range was provided + if want_ver is not None: + for op, want_ver in wanted.items(): + _compare_versions(op, got_ver, want_ver, requirement, pkg, hint) + + +def require_version_core(requirement): + """require_version wrapper which emits a core-specific hint on failure""" + hint = "Try: pip install transformers -U or pip install -e '.[dev]' if you're working with git main" + return require_version(requirement, hint) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/__init__.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/conftest.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/conftest.py new file mode 100644 index 0000000000000000000000000000000000000000..fd76d1c84ee72a683688a1a6ad426994c64a886a --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/conftest.py @@ -0,0 +1,48 @@ +# Copyright 2025 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# tests directory-specific settings - this file is run automatically +# by pytest before any tests are run + +import sys +import warnings +from os.path import abspath, dirname, join + + +# allow having multiple repository checkouts and not needing to remember to rerun +# 'pip install -e .[dev]' when switching between checkouts and running tests. +git_repo_path = abspath(join(dirname(dirname(__file__)), "src")) +sys.path.insert(1, git_repo_path) + +# silence FutureWarning warnings in tests since often we can't act on them until +# they become normal warnings - i.e. the tests still need to test the current functionality +warnings.simplefilter(action="ignore", category=FutureWarning) + + +def pytest_configure(config): + config.addinivalue_line("markers", "big_accelerator: marks tests as requiring big accelerator resources") + + +def pytest_addoption(parser): + from .testing_utils import pytest_addoption_shared + + pytest_addoption_shared(parser) + + +def pytest_terminal_summary(terminalreporter): + from .testing_utils import pytest_terminal_summary_main + + make_reports = terminalreporter.config.getoption("--make-reports") + if make_reports: + pytest_terminal_summary_main(terminalreporter, id=make_reports) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/fixtures/custom_pipeline/pipeline.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/fixtures/custom_pipeline/pipeline.py new file mode 100644 index 0000000000000000000000000000000000000000..25673e566549984cc91413a27558f6ee6a1f1d82 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/fixtures/custom_pipeline/pipeline.py @@ -0,0 +1,102 @@ +# Copyright 2025 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# limitations under the License. + + +from typing import Optional, Tuple, Union + +import torch + +from diffusers import DiffusionPipeline, ImagePipelineOutput, SchedulerMixin, UNet2DModel + + +class CustomLocalPipeline(DiffusionPipeline): + r""" + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + Parameters: + unet ([`UNet2DModel`]): U-Net architecture to denoise the encoded image. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image. Can be one of + [`DDPMScheduler`], or [`DDIMScheduler`]. + """ + + def __init__(self, unet: UNet2DModel, scheduler: SchedulerMixin): + super().__init__() + self.register_modules(unet=unet, scheduler=scheduler) + + @torch.no_grad() + def __call__( + self, + batch_size: int = 1, + generator: Optional[torch.Generator] = None, + num_inference_steps: int = 50, + output_type: Optional[str] = "pil", + return_dict: bool = True, + **kwargs, + ) -> Union[ImagePipelineOutput, Tuple]: + r""" + Args: + batch_size (`int`, *optional*, defaults to 1): + The number of images to generate. + generator (`torch.Generator`, *optional*): + A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation + deterministic. + eta (`float`, *optional*, defaults to 0.0): + The eta parameter which controls the scale of the variance (0 is DDIM and 1 is one type of DDPM). + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between + [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple. + + Returns: + [`~pipelines.ImagePipelineOutput`] or `tuple`: [`~pipelines.utils.ImagePipelineOutput`] if + `return_dict` is True, otherwise a `tuple. When returning a tuple, the first element is a list with the + generated images. + """ + + # Sample gaussian noise to begin loop + image = torch.randn( + (batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size), + generator=generator, + ) + image = image.to(self.device) + + # set step values + self.scheduler.set_timesteps(num_inference_steps) + + for t in self.progress_bar(self.scheduler.timesteps): + # 1. predict noise model_output + model_output = self.unet(image, t).sample + + # 2. predict previous mean of image x_t-1 and add variance depending on eta + # eta corresponds to η in paper and should be between [0, 1] + # do x_t -> x_t-1 + image = self.scheduler.step(model_output, t, image).prev_sample + + image = (image / 2 + 0.5).clamp(0, 1) + image = image.cpu().permute(0, 2, 3, 1).numpy() + if output_type == "pil": + image = self.numpy_to_pil(image) + + if not return_dict: + return (image,), "This is a local test" + + return ImagePipelineOutput(images=image), "This is a local test" diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/fixtures/custom_pipeline/what_ever.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/fixtures/custom_pipeline/what_ever.py new file mode 100644 index 0000000000000000000000000000000000000000..7504940780e835cec03b0f45fde0cdd5998fff7f --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/fixtures/custom_pipeline/what_ever.py @@ -0,0 +1,103 @@ +# Copyright 2025 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# limitations under the License. + + +from typing import Optional, Tuple, Union + +import torch + +from diffusers import SchedulerMixin, UNet2DModel +from diffusers.pipelines.pipeline_utils import DiffusionPipeline, ImagePipelineOutput + + +class CustomLocalPipeline(DiffusionPipeline): + r""" + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + Parameters: + unet ([`UNet2DModel`]): U-Net architecture to denoise the encoded image. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image. Can be one of + [`DDPMScheduler`], or [`DDIMScheduler`]. + """ + + def __init__(self, unet: UNet2DModel, scheduler: SchedulerMixin): + super().__init__() + self.register_modules(unet=unet, scheduler=scheduler) + + @torch.no_grad() + def __call__( + self, + batch_size: int = 1, + generator: Optional[torch.Generator] = None, + num_inference_steps: int = 50, + output_type: Optional[str] = "pil", + return_dict: bool = True, + **kwargs, + ) -> Union[ImagePipelineOutput, Tuple]: + r""" + Args: + batch_size (`int`, *optional*, defaults to 1): + The number of images to generate. + generator (`torch.Generator`, *optional*): + A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation + deterministic. + eta (`float`, *optional*, defaults to 0.0): + The eta parameter which controls the scale of the variance (0 is DDIM and 1 is one type of DDPM). + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between + [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipeline_utils.ImagePipelineOutput`] instead of a plain tuple. + + Returns: + [`~pipeline_utils.ImagePipelineOutput`] or `tuple`: [`~pipelines.utils.ImagePipelineOutput`] if + `return_dict` is True, otherwise a `tuple. When returning a tuple, the first element is a list with the + generated images. + """ + + # Sample gaussian noise to begin loop + image = torch.randn( + (batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size), + generator=generator, + ) + image = image.to(self.device) + + # set step values + self.scheduler.set_timesteps(num_inference_steps) + + for t in self.progress_bar(self.scheduler.timesteps): + # 1. predict noise model_output + model_output = self.unet(image, t).sample + + # 2. predict previous mean of image x_t-1 and add variance depending on eta + # eta corresponds to η in paper and should be between [0, 1] + # do x_t -> x_t-1 + image = self.scheduler.step(model_output, t, image).prev_sample + + image = (image / 2 + 0.5).clamp(0, 1) + image = image.cpu().permute(0, 2, 3, 1).numpy() + if output_type == "pil": + image = self.numpy_to_pil(image) + + if not return_dict: + return (image,), "This is a local test" + + return ImagePipelineOutput(images=image), "This is a local test" diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/fixtures/elise_format0.mid b/exp_code/1_benchmark/diffusers-WanS2V/tests/fixtures/elise_format0.mid new file mode 100644 index 0000000000000000000000000000000000000000..33dbabe7ab1d4d28e43d9911255a510a8a672d77 Binary files /dev/null and b/exp_code/1_benchmark/diffusers-WanS2V/tests/fixtures/elise_format0.mid differ diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/hooks/__init__.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/hooks/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/hooks/test_group_offloading.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/hooks/test_group_offloading.py new file mode 100644 index 0000000000000000000000000000000000000000..96cbecfbf530be51a79044f6701e71d32174dfc4 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/hooks/test_group_offloading.py @@ -0,0 +1,364 @@ +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import contextlib +import gc +import unittest + +import torch +from parameterized import parameterized + +from diffusers.hooks import HookRegistry, ModelHook +from diffusers.models import ModelMixin +from diffusers.pipelines.pipeline_utils import DiffusionPipeline +from diffusers.utils import get_logger +from diffusers.utils.import_utils import compare_versions + +from ..testing_utils import ( + backend_empty_cache, + backend_max_memory_allocated, + backend_reset_peak_memory_stats, + require_torch_accelerator, + torch_device, +) + + +class DummyBlock(torch.nn.Module): + def __init__(self, in_features: int, hidden_features: int, out_features: int) -> None: + super().__init__() + + self.proj_in = torch.nn.Linear(in_features, hidden_features) + self.activation = torch.nn.ReLU() + self.proj_out = torch.nn.Linear(hidden_features, out_features) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x = self.proj_in(x) + x = self.activation(x) + x = self.proj_out(x) + return x + + +class DummyModel(ModelMixin): + def __init__(self, in_features: int, hidden_features: int, out_features: int, num_layers: int) -> None: + super().__init__() + + self.linear_1 = torch.nn.Linear(in_features, hidden_features) + self.activation = torch.nn.ReLU() + self.blocks = torch.nn.ModuleList( + [DummyBlock(hidden_features, hidden_features, hidden_features) for _ in range(num_layers)] + ) + self.linear_2 = torch.nn.Linear(hidden_features, out_features) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x = self.linear_1(x) + x = self.activation(x) + for block in self.blocks: + x = block(x) + x = self.linear_2(x) + return x + + +# This model implementation contains one type of block (single_blocks) instantiated before another type of block (double_blocks). +# The invocation order of these blocks, however, is first the double_blocks and then the single_blocks. +# With group offloading implementation before https://github.com/huggingface/diffusers/pull/11375, such a modeling implementation +# would result in a device mismatch error because of the assumptions made by the code. The failure case occurs when using: +# offload_type="block_level", num_blocks_per_group=2, use_stream=True +# Post the linked PR, the implementation will work as expected. +class DummyModelWithMultipleBlocks(ModelMixin): + def __init__( + self, in_features: int, hidden_features: int, out_features: int, num_layers: int, num_single_layers: int + ) -> None: + super().__init__() + + self.linear_1 = torch.nn.Linear(in_features, hidden_features) + self.activation = torch.nn.ReLU() + self.single_blocks = torch.nn.ModuleList( + [DummyBlock(hidden_features, hidden_features, hidden_features) for _ in range(num_single_layers)] + ) + self.double_blocks = torch.nn.ModuleList( + [DummyBlock(hidden_features, hidden_features, hidden_features) for _ in range(num_layers)] + ) + self.linear_2 = torch.nn.Linear(hidden_features, out_features) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x = self.linear_1(x) + x = self.activation(x) + for block in self.double_blocks: + x = block(x) + for block in self.single_blocks: + x = block(x) + x = self.linear_2(x) + return x + + +# Test for https://github.com/huggingface/diffusers/pull/12077 +class DummyModelWithLayerNorm(ModelMixin): + def __init__(self, in_features: int, hidden_features: int, out_features: int, num_layers: int) -> None: + super().__init__() + + self.linear_1 = torch.nn.Linear(in_features, hidden_features) + self.activation = torch.nn.ReLU() + self.blocks = torch.nn.ModuleList( + [DummyBlock(hidden_features, hidden_features, hidden_features) for _ in range(num_layers)] + ) + self.layer_norm = torch.nn.LayerNorm(hidden_features, elementwise_affine=True) + self.linear_2 = torch.nn.Linear(hidden_features, out_features) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x = self.linear_1(x) + x = self.activation(x) + for block in self.blocks: + x = block(x) + x = self.layer_norm(x) + x = self.linear_2(x) + return x + + +class DummyPipeline(DiffusionPipeline): + model_cpu_offload_seq = "model" + + def __init__(self, model: torch.nn.Module) -> None: + super().__init__() + + self.register_modules(model=model) + + def __call__(self, x: torch.Tensor) -> torch.Tensor: + for _ in range(2): + x = x + 0.1 * self.model(x) + return x + + +class LayerOutputTrackerHook(ModelHook): + def __init__(self): + super().__init__() + self.outputs = [] + + def post_forward(self, module, output): + self.outputs.append(output) + return output + + +@require_torch_accelerator +class GroupOffloadTests(unittest.TestCase): + in_features = 64 + hidden_features = 256 + out_features = 64 + num_layers = 4 + + def setUp(self): + with torch.no_grad(): + self.model = self.get_model() + self.input = torch.randn((4, self.in_features)).to(torch_device) + + def tearDown(self): + super().tearDown() + + del self.model + del self.input + gc.collect() + backend_empty_cache(torch_device) + backend_reset_peak_memory_stats(torch_device) + + def get_model(self): + torch.manual_seed(0) + return DummyModel( + in_features=self.in_features, + hidden_features=self.hidden_features, + out_features=self.out_features, + num_layers=self.num_layers, + ) + + def test_offloading_forward_pass(self): + @torch.no_grad() + def run_forward(model): + gc.collect() + backend_empty_cache(torch_device) + backend_reset_peak_memory_stats(torch_device) + self.assertTrue( + all( + module._diffusers_hook.get_hook("group_offloading") is not None + for module in model.modules() + if hasattr(module, "_diffusers_hook") + ) + ) + model.eval() + output = model(self.input)[0].cpu() + max_memory_allocated = backend_max_memory_allocated(torch_device) + return output, max_memory_allocated + + self.model.to(torch_device) + output_without_group_offloading, mem_baseline = run_forward(self.model) + self.model.to("cpu") + + model = self.get_model() + model.enable_group_offload(torch_device, offload_type="block_level", num_blocks_per_group=3) + output_with_group_offloading1, mem1 = run_forward(model) + + model = self.get_model() + model.enable_group_offload(torch_device, offload_type="block_level", num_blocks_per_group=1) + output_with_group_offloading2, mem2 = run_forward(model) + + model = self.get_model() + model.enable_group_offload(torch_device, offload_type="block_level", num_blocks_per_group=1, use_stream=True) + output_with_group_offloading3, mem3 = run_forward(model) + + model = self.get_model() + model.enable_group_offload(torch_device, offload_type="leaf_level") + output_with_group_offloading4, mem4 = run_forward(model) + + model = self.get_model() + model.enable_group_offload(torch_device, offload_type="leaf_level", use_stream=True) + output_with_group_offloading5, mem5 = run_forward(model) + + # Precision assertions - offloading should not impact the output + self.assertTrue(torch.allclose(output_without_group_offloading, output_with_group_offloading1, atol=1e-5)) + self.assertTrue(torch.allclose(output_without_group_offloading, output_with_group_offloading2, atol=1e-5)) + self.assertTrue(torch.allclose(output_without_group_offloading, output_with_group_offloading3, atol=1e-5)) + self.assertTrue(torch.allclose(output_without_group_offloading, output_with_group_offloading4, atol=1e-5)) + self.assertTrue(torch.allclose(output_without_group_offloading, output_with_group_offloading5, atol=1e-5)) + + # Memory assertions - offloading should reduce memory usage + self.assertTrue(mem4 <= mem5 < mem2 <= mem3 < mem1 < mem_baseline) + + def test_warning_logged_if_group_offloaded_module_moved_to_accelerator(self): + if torch.device(torch_device).type not in ["cuda", "xpu"]: + return + self.model.enable_group_offload(torch_device, offload_type="block_level", num_blocks_per_group=3) + logger = get_logger("diffusers.models.modeling_utils") + logger.setLevel("INFO") + with self.assertLogs(logger, level="WARNING") as cm: + self.model.to(torch_device) + self.assertIn(f"The module '{self.model.__class__.__name__}' is group offloaded", cm.output[0]) + + def test_warning_logged_if_group_offloaded_pipe_moved_to_accelerator(self): + if torch.device(torch_device).type not in ["cuda", "xpu"]: + return + pipe = DummyPipeline(self.model) + self.model.enable_group_offload(torch_device, offload_type="block_level", num_blocks_per_group=3) + logger = get_logger("diffusers.pipelines.pipeline_utils") + logger.setLevel("INFO") + with self.assertLogs(logger, level="WARNING") as cm: + pipe.to(torch_device) + self.assertIn(f"The module '{self.model.__class__.__name__}' is group offloaded", cm.output[0]) + + def test_error_raised_if_streams_used_and_no_accelerator_device(self): + torch_accelerator_module = getattr(torch, torch_device, torch.cuda) + original_is_available = torch_accelerator_module.is_available + torch_accelerator_module.is_available = lambda: False + with self.assertRaises(ValueError): + self.model.enable_group_offload( + onload_device=torch.device(torch_device), offload_type="leaf_level", use_stream=True + ) + torch_accelerator_module.is_available = original_is_available + + def test_error_raised_if_supports_group_offloading_false(self): + self.model._supports_group_offloading = False + with self.assertRaisesRegex(ValueError, "does not support group offloading"): + self.model.enable_group_offload(onload_device=torch.device(torch_device)) + + def test_error_raised_if_model_offloading_applied_on_group_offloaded_module(self): + pipe = DummyPipeline(self.model) + pipe.model.enable_group_offload(torch_device, offload_type="block_level", num_blocks_per_group=3) + with self.assertRaisesRegex(ValueError, "You are trying to apply model/sequential CPU offloading"): + pipe.enable_model_cpu_offload() + + def test_error_raised_if_sequential_offloading_applied_on_group_offloaded_module(self): + pipe = DummyPipeline(self.model) + pipe.model.enable_group_offload(torch_device, offload_type="block_level", num_blocks_per_group=3) + with self.assertRaisesRegex(ValueError, "You are trying to apply model/sequential CPU offloading"): + pipe.enable_sequential_cpu_offload() + + def test_error_raised_if_group_offloading_applied_on_model_offloaded_module(self): + pipe = DummyPipeline(self.model) + pipe.enable_model_cpu_offload() + with self.assertRaisesRegex(ValueError, "Cannot apply group offloading"): + pipe.model.enable_group_offload(torch_device, offload_type="block_level", num_blocks_per_group=3) + + def test_error_raised_if_group_offloading_applied_on_sequential_offloaded_module(self): + pipe = DummyPipeline(self.model) + pipe.enable_sequential_cpu_offload() + with self.assertRaisesRegex(ValueError, "Cannot apply group offloading"): + pipe.model.enable_group_offload(torch_device, offload_type="block_level", num_blocks_per_group=3) + + def test_block_level_stream_with_invocation_order_different_from_initialization_order(self): + if torch.device(torch_device).type not in ["cuda", "xpu"]: + return + + model = DummyModelWithMultipleBlocks( + in_features=self.in_features, + hidden_features=self.hidden_features, + out_features=self.out_features, + num_layers=self.num_layers, + num_single_layers=self.num_layers + 1, + ) + model.enable_group_offload(torch_device, offload_type="block_level", num_blocks_per_group=1, use_stream=True) + + context = contextlib.nullcontext() + if compare_versions("diffusers", "<=", "0.33.0"): + # Will raise a device mismatch RuntimeError mentioning weights are on CPU but input is on device + context = self.assertRaisesRegex(RuntimeError, "Expected all tensors to be on the same device") + + with context: + model(self.input) + + @parameterized.expand([("block_level",), ("leaf_level",)]) + def test_block_level_offloading_with_parameter_only_module_group(self, offload_type: str): + if torch.device(torch_device).type not in ["cuda", "xpu"]: + return + + def apply_layer_output_tracker_hook(model: DummyModelWithLayerNorm): + for name, module in model.named_modules(): + registry = HookRegistry.check_if_exists_or_initialize(module) + hook = LayerOutputTrackerHook() + registry.register_hook(hook, "layer_output_tracker") + + model_ref = DummyModelWithLayerNorm(128, 256, 128, 2) + model = DummyModelWithLayerNorm(128, 256, 128, 2) + + model.load_state_dict(model_ref.state_dict(), strict=True) + + model_ref.to(torch_device) + model.enable_group_offload(torch_device, offload_type=offload_type, num_blocks_per_group=1, use_stream=True) + + apply_layer_output_tracker_hook(model_ref) + apply_layer_output_tracker_hook(model) + + x = torch.randn(2, 128).to(torch_device) + + out_ref = model_ref(x) + out = model(x) + self.assertTrue(torch.allclose(out_ref, out, atol=1e-5), "Outputs do not match.") + + num_repeats = 4 + for i in range(num_repeats): + out_ref = model_ref(x) + out = model(x) + + self.assertTrue(torch.allclose(out_ref, out, atol=1e-5), "Outputs do not match after multiple invocations.") + + for (ref_name, ref_module), (name, module) in zip(model_ref.named_modules(), model.named_modules()): + assert ref_name == name + ref_outputs = ( + HookRegistry.check_if_exists_or_initialize(ref_module).get_hook("layer_output_tracker").outputs + ) + outputs = HookRegistry.check_if_exists_or_initialize(module).get_hook("layer_output_tracker").outputs + cumulated_absmax = 0.0 + for i in range(len(outputs)): + diff = ref_outputs[0] - outputs[i] + absdiff = diff.abs() + absmax = absdiff.max().item() + cumulated_absmax += absmax + self.assertLess( + cumulated_absmax, 1e-5, f"Output differences for {name} exceeded threshold: {cumulated_absmax:.5f}" + ) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/hooks/test_hooks.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/hooks/test_hooks.py new file mode 100644 index 0000000000000000000000000000000000000000..8a83f60ff278cdec95ebe7bf857f435bfb1ec8bb --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/hooks/test_hooks.py @@ -0,0 +1,377 @@ +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import gc +import unittest + +import torch + +from diffusers.hooks import HookRegistry, ModelHook +from diffusers.training_utils import free_memory +from diffusers.utils.logging import get_logger + +from ..testing_utils import CaptureLogger, torch_device + + +logger = get_logger(__name__) # pylint: disable=invalid-name + + +class DummyBlock(torch.nn.Module): + def __init__(self, in_features: int, hidden_features: int, out_features: int) -> None: + super().__init__() + + self.proj_in = torch.nn.Linear(in_features, hidden_features) + self.activation = torch.nn.ReLU() + self.proj_out = torch.nn.Linear(hidden_features, out_features) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x = self.proj_in(x) + x = self.activation(x) + x = self.proj_out(x) + return x + + +class DummyModel(torch.nn.Module): + def __init__(self, in_features: int, hidden_features: int, out_features: int, num_layers: int) -> None: + super().__init__() + + self.linear_1 = torch.nn.Linear(in_features, hidden_features) + self.activation = torch.nn.ReLU() + self.blocks = torch.nn.ModuleList( + [DummyBlock(hidden_features, hidden_features, hidden_features) for _ in range(num_layers)] + ) + self.linear_2 = torch.nn.Linear(hidden_features, out_features) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x = self.linear_1(x) + x = self.activation(x) + for block in self.blocks: + x = block(x) + x = self.linear_2(x) + return x + + +class AddHook(ModelHook): + def __init__(self, value: int): + super().__init__() + self.value = value + + def pre_forward(self, module: torch.nn.Module, *args, **kwargs): + logger.debug("AddHook pre_forward") + args = ((x + self.value) if torch.is_tensor(x) else x for x in args) + return args, kwargs + + def post_forward(self, module, output): + logger.debug("AddHook post_forward") + return output + + +class MultiplyHook(ModelHook): + def __init__(self, value: int): + super().__init__() + self.value = value + + def pre_forward(self, module, *args, **kwargs): + logger.debug("MultiplyHook pre_forward") + args = ((x * self.value) if torch.is_tensor(x) else x for x in args) + return args, kwargs + + def post_forward(self, module, output): + logger.debug("MultiplyHook post_forward") + return output + + def __repr__(self): + return f"MultiplyHook(value={self.value})" + + +class StatefulAddHook(ModelHook): + _is_stateful = True + + def __init__(self, value: int): + super().__init__() + self.value = value + self.increment = 0 + + def pre_forward(self, module, *args, **kwargs): + logger.debug("StatefulAddHook pre_forward") + add_value = self.value + self.increment + self.increment += 1 + args = ((x + add_value) if torch.is_tensor(x) else x for x in args) + return args, kwargs + + def reset_state(self, module): + self.increment = 0 + + +class SkipLayerHook(ModelHook): + def __init__(self, skip_layer: bool): + super().__init__() + self.skip_layer = skip_layer + + def pre_forward(self, module, *args, **kwargs): + logger.debug("SkipLayerHook pre_forward") + return args, kwargs + + def new_forward(self, module, *args, **kwargs): + logger.debug("SkipLayerHook new_forward") + if self.skip_layer: + return args[0] + return self.fn_ref.original_forward(*args, **kwargs) + + def post_forward(self, module, output): + logger.debug("SkipLayerHook post_forward") + return output + + +class HookTests(unittest.TestCase): + in_features = 4 + hidden_features = 8 + out_features = 4 + num_layers = 2 + + def setUp(self): + params = self.get_module_parameters() + self.model = DummyModel(**params) + self.model.to(torch_device) + + def tearDown(self): + super().tearDown() + + del self.model + gc.collect() + free_memory() + + def get_module_parameters(self): + return { + "in_features": self.in_features, + "hidden_features": self.hidden_features, + "out_features": self.out_features, + "num_layers": self.num_layers, + } + + def get_generator(self): + return torch.manual_seed(0) + + def test_hook_registry(self): + registry = HookRegistry.check_if_exists_or_initialize(self.model) + registry.register_hook(AddHook(1), "add_hook") + registry.register_hook(MultiplyHook(2), "multiply_hook") + + registry_repr = repr(registry) + expected_repr = "HookRegistry(\n (0) add_hook - AddHook\n (1) multiply_hook - MultiplyHook(value=2)\n)" + + self.assertEqual(len(registry.hooks), 2) + self.assertEqual(registry._hook_order, ["add_hook", "multiply_hook"]) + self.assertEqual(registry_repr, expected_repr) + + registry.remove_hook("add_hook") + + self.assertEqual(len(registry.hooks), 1) + self.assertEqual(registry._hook_order, ["multiply_hook"]) + + def test_stateful_hook(self): + registry = HookRegistry.check_if_exists_or_initialize(self.model) + registry.register_hook(StatefulAddHook(1), "stateful_add_hook") + + self.assertEqual(registry.hooks["stateful_add_hook"].increment, 0) + + input = torch.randn(1, 4, device=torch_device, generator=self.get_generator()) + num_repeats = 3 + + for i in range(num_repeats): + result = self.model(input) + if i == 0: + output1 = result + + self.assertEqual(registry.get_hook("stateful_add_hook").increment, num_repeats) + + registry.reset_stateful_hooks() + output2 = self.model(input) + + self.assertEqual(registry.get_hook("stateful_add_hook").increment, 1) + self.assertTrue(torch.allclose(output1, output2)) + + def test_inference(self): + registry = HookRegistry.check_if_exists_or_initialize(self.model) + registry.register_hook(AddHook(1), "add_hook") + registry.register_hook(MultiplyHook(2), "multiply_hook") + + input = torch.randn(1, 4, device=torch_device, generator=self.get_generator()) + output1 = self.model(input).mean().detach().cpu().item() + + registry.remove_hook("multiply_hook") + new_input = input * 2 + output2 = self.model(new_input).mean().detach().cpu().item() + + registry.remove_hook("add_hook") + new_input = input * 2 + 1 + output3 = self.model(new_input).mean().detach().cpu().item() + + self.assertAlmostEqual(output1, output2, places=5) + self.assertAlmostEqual(output1, output3, places=5) + self.assertAlmostEqual(output2, output3, places=5) + + def test_skip_layer_hook(self): + registry = HookRegistry.check_if_exists_or_initialize(self.model) + registry.register_hook(SkipLayerHook(skip_layer=True), "skip_layer_hook") + + input = torch.zeros(1, 4, device=torch_device) + output = self.model(input).mean().detach().cpu().item() + self.assertEqual(output, 0.0) + + registry.remove_hook("skip_layer_hook") + registry.register_hook(SkipLayerHook(skip_layer=False), "skip_layer_hook") + output = self.model(input).mean().detach().cpu().item() + self.assertNotEqual(output, 0.0) + + def test_skip_layer_internal_block(self): + registry = HookRegistry.check_if_exists_or_initialize(self.model.linear_1) + input = torch.zeros(1, 4, device=torch_device) + + registry.register_hook(SkipLayerHook(skip_layer=True), "skip_layer_hook") + with self.assertRaises(RuntimeError) as cm: + self.model(input).mean().detach().cpu().item() + self.assertIn("mat1 and mat2 shapes cannot be multiplied", str(cm.exception)) + + registry.remove_hook("skip_layer_hook") + output = self.model(input).mean().detach().cpu().item() + self.assertNotEqual(output, 0.0) + + registry = HookRegistry.check_if_exists_or_initialize(self.model.blocks[1]) + registry.register_hook(SkipLayerHook(skip_layer=True), "skip_layer_hook") + output = self.model(input).mean().detach().cpu().item() + self.assertNotEqual(output, 0.0) + + def test_invocation_order_stateful_first(self): + registry = HookRegistry.check_if_exists_or_initialize(self.model) + registry.register_hook(StatefulAddHook(1), "add_hook") + registry.register_hook(AddHook(2), "add_hook_2") + registry.register_hook(MultiplyHook(3), "multiply_hook") + + input = torch.randn(1, 4, device=torch_device, generator=self.get_generator()) + + logger = get_logger(__name__) + logger.setLevel("DEBUG") + + with CaptureLogger(logger) as cap_logger: + self.model(input) + output = cap_logger.out.replace(" ", "").replace("\n", "") + expected_invocation_order_log = ( + ( + "MultiplyHook pre_forward\n" + "AddHook pre_forward\n" + "StatefulAddHook pre_forward\n" + "AddHook post_forward\n" + "MultiplyHook post_forward\n" + ) + .replace(" ", "") + .replace("\n", "") + ) + self.assertEqual(output, expected_invocation_order_log) + + registry.remove_hook("add_hook") + with CaptureLogger(logger) as cap_logger: + self.model(input) + output = cap_logger.out.replace(" ", "").replace("\n", "") + expected_invocation_order_log = ( + ("MultiplyHook pre_forward\nAddHook pre_forward\nAddHook post_forward\nMultiplyHook post_forward\n") + .replace(" ", "") + .replace("\n", "") + ) + self.assertEqual(output, expected_invocation_order_log) + + def test_invocation_order_stateful_middle(self): + registry = HookRegistry.check_if_exists_or_initialize(self.model) + registry.register_hook(AddHook(2), "add_hook") + registry.register_hook(StatefulAddHook(1), "add_hook_2") + registry.register_hook(MultiplyHook(3), "multiply_hook") + + input = torch.randn(1, 4, device=torch_device, generator=self.get_generator()) + + logger = get_logger(__name__) + logger.setLevel("DEBUG") + + with CaptureLogger(logger) as cap_logger: + self.model(input) + output = cap_logger.out.replace(" ", "").replace("\n", "") + expected_invocation_order_log = ( + ( + "MultiplyHook pre_forward\n" + "StatefulAddHook pre_forward\n" + "AddHook pre_forward\n" + "AddHook post_forward\n" + "MultiplyHook post_forward\n" + ) + .replace(" ", "") + .replace("\n", "") + ) + self.assertEqual(output, expected_invocation_order_log) + + registry.remove_hook("add_hook") + with CaptureLogger(logger) as cap_logger: + self.model(input) + output = cap_logger.out.replace(" ", "").replace("\n", "") + expected_invocation_order_log = ( + ("MultiplyHook pre_forward\nStatefulAddHook pre_forward\nMultiplyHook post_forward\n") + .replace(" ", "") + .replace("\n", "") + ) + self.assertEqual(output, expected_invocation_order_log) + + registry.remove_hook("add_hook_2") + with CaptureLogger(logger) as cap_logger: + self.model(input) + output = cap_logger.out.replace(" ", "").replace("\n", "") + expected_invocation_order_log = ( + ("MultiplyHook pre_forward\nMultiplyHook post_forward\n").replace(" ", "").replace("\n", "") + ) + self.assertEqual(output, expected_invocation_order_log) + + def test_invocation_order_stateful_last(self): + registry = HookRegistry.check_if_exists_or_initialize(self.model) + registry.register_hook(AddHook(1), "add_hook") + registry.register_hook(MultiplyHook(2), "multiply_hook") + registry.register_hook(StatefulAddHook(3), "add_hook_2") + + input = torch.randn(1, 4, device=torch_device, generator=self.get_generator()) + + logger = get_logger(__name__) + logger.setLevel("DEBUG") + + with CaptureLogger(logger) as cap_logger: + self.model(input) + output = cap_logger.out.replace(" ", "").replace("\n", "") + expected_invocation_order_log = ( + ( + "StatefulAddHook pre_forward\n" + "MultiplyHook pre_forward\n" + "AddHook pre_forward\n" + "AddHook post_forward\n" + "MultiplyHook post_forward\n" + ) + .replace(" ", "") + .replace("\n", "") + ) + self.assertEqual(output, expected_invocation_order_log) + + registry.remove_hook("add_hook") + with CaptureLogger(logger) as cap_logger: + self.model(input) + output = cap_logger.out.replace(" ", "").replace("\n", "") + expected_invocation_order_log = ( + ("StatefulAddHook pre_forward\nMultiplyHook pre_forward\nMultiplyHook post_forward\n") + .replace(" ", "") + .replace("\n", "") + ) + self.assertEqual(output, expected_invocation_order_log) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/lora/__init__.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/lora/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/lora/test_lora_layers_auraflow.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/lora/test_lora_layers_auraflow.py new file mode 100644 index 0000000000000000000000000000000000000000..67084dd6d07800650c2d465fd13254f4dd0d9fc7 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/lora/test_lora_layers_auraflow.py @@ -0,0 +1,137 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import sys +import unittest + +import torch +from transformers import AutoTokenizer, UMT5EncoderModel + +from diffusers import ( + AuraFlowPipeline, + AuraFlowTransformer2DModel, + FlowMatchEulerDiscreteScheduler, +) + +from ..testing_utils import ( + floats_tensor, + is_peft_available, + require_peft_backend, +) + + +if is_peft_available(): + pass + +sys.path.append(".") + +from .utils import PeftLoraLoaderMixinTests # noqa: E402 + + +@require_peft_backend +class AuraFlowLoRATests(unittest.TestCase, PeftLoraLoaderMixinTests): + pipeline_class = AuraFlowPipeline + scheduler_cls = FlowMatchEulerDiscreteScheduler + scheduler_classes = [FlowMatchEulerDiscreteScheduler] + scheduler_kwargs = {} + + transformer_kwargs = { + "sample_size": 64, + "patch_size": 1, + "in_channels": 4, + "num_mmdit_layers": 1, + "num_single_dit_layers": 1, + "attention_head_dim": 16, + "num_attention_heads": 2, + "joint_attention_dim": 32, + "caption_projection_dim": 32, + "pos_embed_max_size": 64, + } + transformer_cls = AuraFlowTransformer2DModel + vae_kwargs = { + "sample_size": 32, + "in_channels": 3, + "out_channels": 3, + "block_out_channels": (4,), + "layers_per_block": 1, + "latent_channels": 4, + "norm_num_groups": 1, + "use_quant_conv": False, + "use_post_quant_conv": False, + "shift_factor": 0.0609, + "scaling_factor": 1.5035, + } + tokenizer_cls, tokenizer_id = AutoTokenizer, "hf-internal-testing/tiny-random-t5" + text_encoder_cls, text_encoder_id = UMT5EncoderModel, "hf-internal-testing/tiny-random-umt5" + text_encoder_target_modules = ["q", "k", "v", "o"] + denoiser_target_modules = ["to_q", "to_k", "to_v", "to_out.0", "linear_1"] + + @property + def output_shape(self): + return (1, 8, 8, 3) + + def get_dummy_inputs(self, with_generator=True): + batch_size = 1 + sequence_length = 10 + num_channels = 4 + sizes = (32, 32) + + generator = torch.manual_seed(0) + noise = floats_tensor((batch_size, num_channels) + sizes) + input_ids = torch.randint(1, sequence_length, size=(batch_size, sequence_length), generator=generator) + + pipeline_inputs = { + "prompt": "A painting of a squirrel eating a burger", + "num_inference_steps": 4, + "guidance_scale": 0.0, + "height": 8, + "width": 8, + "output_type": "np", + } + if with_generator: + pipeline_inputs.update({"generator": generator}) + + return noise, input_ids, pipeline_inputs + + @unittest.skip("Not supported in AuraFlow.") + def test_simple_inference_with_text_denoiser_block_scale(self): + pass + + @unittest.skip("Not supported in AuraFlow.") + def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self): + pass + + @unittest.skip("Not supported in AuraFlow.") + def test_modify_padding_mode(self): + pass + + @unittest.skip("Text encoder LoRA is not supported in AuraFlow.") + def test_simple_inference_with_partial_text_lora(self): + pass + + @unittest.skip("Text encoder LoRA is not supported in AuraFlow.") + def test_simple_inference_with_text_lora(self): + pass + + @unittest.skip("Text encoder LoRA is not supported in AuraFlow.") + def test_simple_inference_with_text_lora_and_scale(self): + pass + + @unittest.skip("Text encoder LoRA is not supported in AuraFlow.") + def test_simple_inference_with_text_lora_fused(self): + pass + + @unittest.skip("Text encoder LoRA is not supported in AuraFlow.") + def test_simple_inference_with_text_lora_save_load(self): + pass diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/lora/test_lora_layers_cogvideox.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/lora/test_lora_layers_cogvideox.py new file mode 100644 index 0000000000000000000000000000000000000000..16147f35c71df80fed90c61ef07fdc1cdb2c5cc8 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/lora/test_lora_layers_cogvideox.py @@ -0,0 +1,174 @@ +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +import unittest + +import torch +from parameterized import parameterized +from transformers import AutoTokenizer, T5EncoderModel + +from diffusers import ( + AutoencoderKLCogVideoX, + CogVideoXDDIMScheduler, + CogVideoXDPMScheduler, + CogVideoXPipeline, + CogVideoXTransformer3DModel, +) + +from ..testing_utils import ( + floats_tensor, + require_peft_backend, + require_torch_accelerator, +) + + +sys.path.append(".") + +from .utils import PeftLoraLoaderMixinTests # noqa: E402 + + +@require_peft_backend +class CogVideoXLoRATests(unittest.TestCase, PeftLoraLoaderMixinTests): + pipeline_class = CogVideoXPipeline + scheduler_cls = CogVideoXDPMScheduler + scheduler_kwargs = {"timestep_spacing": "trailing"} + scheduler_classes = [CogVideoXDDIMScheduler, CogVideoXDPMScheduler] + + transformer_kwargs = { + "num_attention_heads": 4, + "attention_head_dim": 8, + "in_channels": 4, + "out_channels": 4, + "time_embed_dim": 2, + "text_embed_dim": 32, + "num_layers": 1, + "sample_width": 16, + "sample_height": 16, + "sample_frames": 9, + "patch_size": 2, + "temporal_compression_ratio": 4, + "max_text_seq_length": 16, + } + transformer_cls = CogVideoXTransformer3DModel + vae_kwargs = { + "in_channels": 3, + "out_channels": 3, + "down_block_types": ( + "CogVideoXDownBlock3D", + "CogVideoXDownBlock3D", + "CogVideoXDownBlock3D", + "CogVideoXDownBlock3D", + ), + "up_block_types": ( + "CogVideoXUpBlock3D", + "CogVideoXUpBlock3D", + "CogVideoXUpBlock3D", + "CogVideoXUpBlock3D", + ), + "block_out_channels": (8, 8, 8, 8), + "latent_channels": 4, + "layers_per_block": 1, + "norm_num_groups": 2, + "temporal_compression_ratio": 4, + } + vae_cls = AutoencoderKLCogVideoX + tokenizer_cls, tokenizer_id = AutoTokenizer, "hf-internal-testing/tiny-random-t5" + text_encoder_cls, text_encoder_id = T5EncoderModel, "hf-internal-testing/tiny-random-t5" + + text_encoder_target_modules = ["q", "k", "v", "o"] + + @property + def output_shape(self): + return (1, 9, 16, 16, 3) + + def get_dummy_inputs(self, with_generator=True): + batch_size = 1 + sequence_length = 16 + num_channels = 4 + num_frames = 9 + num_latent_frames = 3 # (num_frames - 1) // temporal_compression_ratio + 1 + sizes = (2, 2) + + generator = torch.manual_seed(0) + noise = floats_tensor((batch_size, num_latent_frames, num_channels) + sizes) + input_ids = torch.randint(1, sequence_length, size=(batch_size, sequence_length), generator=generator) + + pipeline_inputs = { + "prompt": "dance monkey", + "num_frames": num_frames, + "num_inference_steps": 4, + "guidance_scale": 6.0, + # Cannot reduce because convolution kernel becomes bigger than sample + "height": 16, + "width": 16, + "max_sequence_length": sequence_length, + "output_type": "np", + } + if with_generator: + pipeline_inputs.update({"generator": generator}) + + return noise, input_ids, pipeline_inputs + + def test_simple_inference_with_text_lora_denoiser_fused_multi(self): + super().test_simple_inference_with_text_lora_denoiser_fused_multi(expected_atol=9e-3) + + def test_simple_inference_with_text_denoiser_lora_unfused(self): + super().test_simple_inference_with_text_denoiser_lora_unfused(expected_atol=9e-3) + + def test_lora_scale_kwargs_match_fusion(self): + super().test_lora_scale_kwargs_match_fusion(expected_atol=9e-3, expected_rtol=9e-3) + + @parameterized.expand([("block_level", True), ("leaf_level", False)]) + @require_torch_accelerator + def test_group_offloading_inference_denoiser(self, offload_type, use_stream): + # TODO: We don't run the (leaf_level, True) test here that is enabled for other models. + # The reason for this can be found here: https://github.com/huggingface/diffusers/pull/11804#issuecomment-3013325338 + super()._test_group_offloading_inference_denoiser(offload_type, use_stream) + + @unittest.skip("Not supported in CogVideoX.") + def test_simple_inference_with_text_denoiser_block_scale(self): + pass + + @unittest.skip("Not supported in CogVideoX.") + def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self): + pass + + @unittest.skip("Not supported in CogVideoX.") + def test_modify_padding_mode(self): + pass + + @unittest.skip("Text encoder LoRA is not supported in CogVideoX.") + def test_simple_inference_with_partial_text_lora(self): + pass + + @unittest.skip("Text encoder LoRA is not supported in CogVideoX.") + def test_simple_inference_with_text_lora(self): + pass + + @unittest.skip("Text encoder LoRA is not supported in CogVideoX.") + def test_simple_inference_with_text_lora_and_scale(self): + pass + + @unittest.skip("Text encoder LoRA is not supported in CogVideoX.") + def test_simple_inference_with_text_lora_fused(self): + pass + + @unittest.skip("Text encoder LoRA is not supported in CogVideoX.") + def test_simple_inference_with_text_lora_save_load(self): + pass + + @unittest.skip("Not supported in CogVideoX.") + def test_simple_inference_with_text_denoiser_multi_adapter_block_lora(self): + pass diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/lora/test_lora_layers_cogview4.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/lora/test_lora_layers_cogview4.py new file mode 100644 index 0000000000000000000000000000000000000000..3b8a56c40302f9501f515ddcf5751a8866d95b56 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/lora/test_lora_layers_cogview4.py @@ -0,0 +1,189 @@ +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +import tempfile +import unittest + +import numpy as np +import torch +from parameterized import parameterized +from transformers import AutoTokenizer, GlmModel + +from diffusers import AutoencoderKL, CogView4Pipeline, CogView4Transformer2DModel, FlowMatchEulerDiscreteScheduler + +from ..testing_utils import ( + floats_tensor, + require_peft_backend, + require_torch_accelerator, + skip_mps, + torch_device, +) + + +sys.path.append(".") + +from .utils import PeftLoraLoaderMixinTests # noqa: E402 + + +class TokenizerWrapper: + @staticmethod + def from_pretrained(*args, **kwargs): + return AutoTokenizer.from_pretrained( + "hf-internal-testing/tiny-random-cogview4", subfolder="tokenizer", trust_remote_code=True + ) + + +@require_peft_backend +@skip_mps +class CogView4LoRATests(unittest.TestCase, PeftLoraLoaderMixinTests): + pipeline_class = CogView4Pipeline + scheduler_cls = FlowMatchEulerDiscreteScheduler + scheduler_classes = [FlowMatchEulerDiscreteScheduler] + scheduler_kwargs = {} + + transformer_kwargs = { + "patch_size": 2, + "in_channels": 4, + "num_layers": 2, + "attention_head_dim": 4, + "num_attention_heads": 4, + "out_channels": 4, + "text_embed_dim": 32, + "time_embed_dim": 8, + "condition_dim": 4, + } + transformer_cls = CogView4Transformer2DModel + vae_kwargs = { + "block_out_channels": [32, 64], + "in_channels": 3, + "out_channels": 3, + "down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"], + "up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"], + "latent_channels": 4, + "sample_size": 128, + } + vae_cls = AutoencoderKL + tokenizer_cls, tokenizer_id, tokenizer_subfolder = ( + TokenizerWrapper, + "hf-internal-testing/tiny-random-cogview4", + "tokenizer", + ) + text_encoder_cls, text_encoder_id, text_encoder_subfolder = ( + GlmModel, + "hf-internal-testing/tiny-random-cogview4", + "text_encoder", + ) + + @property + def output_shape(self): + return (1, 32, 32, 3) + + def get_dummy_inputs(self, with_generator=True): + batch_size = 1 + sequence_length = 16 + num_channels = 4 + sizes = (4, 4) + + generator = torch.manual_seed(0) + noise = floats_tensor((batch_size, num_channels) + sizes) + input_ids = torch.randint(1, sequence_length, size=(batch_size, sequence_length), generator=generator) + + pipeline_inputs = { + "prompt": "", + "num_inference_steps": 1, + "guidance_scale": 6.0, + "height": 32, + "width": 32, + "max_sequence_length": sequence_length, + "output_type": "np", + } + if with_generator: + pipeline_inputs.update({"generator": generator}) + + return noise, input_ids, pipeline_inputs + + def test_simple_inference_with_text_lora_denoiser_fused_multi(self): + super().test_simple_inference_with_text_lora_denoiser_fused_multi(expected_atol=9e-3) + + def test_simple_inference_with_text_denoiser_lora_unfused(self): + super().test_simple_inference_with_text_denoiser_lora_unfused(expected_atol=9e-3) + + def test_simple_inference_save_pretrained(self): + """ + Tests a simple usecase where users could use saving utilities for LoRA through save_pretrained + """ + for scheduler_cls in self.scheduler_classes: + components, _, _ = self.get_dummy_components(scheduler_cls) + pipe = self.pipeline_class(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + _, _, inputs = self.get_dummy_inputs(with_generator=False) + + output_no_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] + self.assertTrue(output_no_lora.shape == self.output_shape) + + images_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] + + with tempfile.TemporaryDirectory() as tmpdirname: + pipe.save_pretrained(tmpdirname) + + pipe_from_pretrained = self.pipeline_class.from_pretrained(tmpdirname) + pipe_from_pretrained.to(torch_device) + + images_lora_save_pretrained = pipe_from_pretrained(**inputs, generator=torch.manual_seed(0))[0] + + self.assertTrue( + np.allclose(images_lora, images_lora_save_pretrained, atol=1e-3, rtol=1e-3), + "Loading from saved checkpoints should give same results.", + ) + + @parameterized.expand([("block_level", True), ("leaf_level", False)]) + @require_torch_accelerator + def test_group_offloading_inference_denoiser(self, offload_type, use_stream): + # TODO: We don't run the (leaf_level, True) test here that is enabled for other models. + # The reason for this can be found here: https://github.com/huggingface/diffusers/pull/11804#issuecomment-3013325338 + super()._test_group_offloading_inference_denoiser(offload_type, use_stream) + + @unittest.skip("Not supported in CogView4.") + def test_simple_inference_with_text_denoiser_block_scale(self): + pass + + @unittest.skip("Not supported in CogView4.") + def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self): + pass + + @unittest.skip("Not supported in CogView4.") + def test_modify_padding_mode(self): + pass + + @unittest.skip("Text encoder LoRA is not supported in CogView4.") + def test_simple_inference_with_partial_text_lora(self): + pass + + @unittest.skip("Text encoder LoRA is not supported in CogView4.") + def test_simple_inference_with_text_lora(self): + pass + + @unittest.skip("Text encoder LoRA is not supported in CogView4.") + def test_simple_inference_with_text_lora_and_scale(self): + pass + + @unittest.skip("Text encoder LoRA is not supported in CogView4.") + def test_simple_inference_with_text_lora_fused(self): + pass + + @unittest.skip("Text encoder LoRA is not supported in CogView4.") + def test_simple_inference_with_text_lora_save_load(self): + pass diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/lora/test_lora_layers_flux.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/lora/test_lora_layers_flux.py new file mode 100644 index 0000000000000000000000000000000000000000..7d99bcad80872f8788107d0f02bd64251008c2bb --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/lora/test_lora_layers_flux.py @@ -0,0 +1,1054 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import copy +import gc +import os +import sys +import tempfile +import unittest + +import numpy as np +import safetensors.torch +import torch +from parameterized import parameterized +from PIL import Image +from transformers import AutoTokenizer, CLIPTextModel, CLIPTokenizer, T5EncoderModel + +from diffusers import FlowMatchEulerDiscreteScheduler, FluxControlPipeline, FluxPipeline, FluxTransformer2DModel +from diffusers.utils import load_image, logging + +from ..testing_utils import ( + CaptureLogger, + backend_empty_cache, + floats_tensor, + is_peft_available, + nightly, + numpy_cosine_similarity_distance, + require_big_accelerator, + require_peft_backend, + require_torch_accelerator, + slow, + torch_device, +) + + +if is_peft_available(): + from peft.utils import get_peft_model_state_dict + +sys.path.append(".") + +from .utils import PeftLoraLoaderMixinTests, check_if_lora_correctly_set # noqa: E402 + + +@require_peft_backend +class FluxLoRATests(unittest.TestCase, PeftLoraLoaderMixinTests): + pipeline_class = FluxPipeline + scheduler_cls = FlowMatchEulerDiscreteScheduler() + scheduler_kwargs = {} + scheduler_classes = [FlowMatchEulerDiscreteScheduler] + transformer_kwargs = { + "patch_size": 1, + "in_channels": 4, + "num_layers": 1, + "num_single_layers": 1, + "attention_head_dim": 16, + "num_attention_heads": 2, + "joint_attention_dim": 32, + "pooled_projection_dim": 32, + "axes_dims_rope": [4, 4, 8], + } + transformer_cls = FluxTransformer2DModel + vae_kwargs = { + "sample_size": 32, + "in_channels": 3, + "out_channels": 3, + "block_out_channels": (4,), + "layers_per_block": 1, + "latent_channels": 1, + "norm_num_groups": 1, + "use_quant_conv": False, + "use_post_quant_conv": False, + "shift_factor": 0.0609, + "scaling_factor": 1.5035, + } + has_two_text_encoders = True + tokenizer_cls, tokenizer_id = CLIPTokenizer, "peft-internal-testing/tiny-clip-text-2" + tokenizer_2_cls, tokenizer_2_id = AutoTokenizer, "hf-internal-testing/tiny-random-t5" + text_encoder_cls, text_encoder_id = CLIPTextModel, "peft-internal-testing/tiny-clip-text-2" + text_encoder_2_cls, text_encoder_2_id = T5EncoderModel, "hf-internal-testing/tiny-random-t5" + + @property + def output_shape(self): + return (1, 8, 8, 3) + + def get_dummy_inputs(self, with_generator=True): + batch_size = 1 + sequence_length = 10 + num_channels = 4 + sizes = (32, 32) + + generator = torch.manual_seed(0) + noise = floats_tensor((batch_size, num_channels) + sizes) + input_ids = torch.randint(1, sequence_length, size=(batch_size, sequence_length), generator=generator) + + pipeline_inputs = { + "prompt": "A painting of a squirrel eating a burger", + "num_inference_steps": 4, + "guidance_scale": 0.0, + "height": 8, + "width": 8, + "output_type": "np", + } + if with_generator: + pipeline_inputs.update({"generator": generator}) + + return noise, input_ids, pipeline_inputs + + def test_with_alpha_in_state_dict(self): + components, _, denoiser_lora_config = self.get_dummy_components(FlowMatchEulerDiscreteScheduler) + pipe = self.pipeline_class(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + _, _, inputs = self.get_dummy_inputs(with_generator=False) + + output_no_lora = pipe(**inputs, generator=torch.manual_seed(0)).images + self.assertTrue(output_no_lora.shape == self.output_shape) + + pipe.transformer.add_adapter(denoiser_lora_config) + self.assertTrue(check_if_lora_correctly_set(pipe.transformer), "Lora not correctly set in transformer") + + images_lora = pipe(**inputs, generator=torch.manual_seed(0)).images + + with tempfile.TemporaryDirectory() as tmpdirname: + denoiser_state_dict = get_peft_model_state_dict(pipe.transformer) + self.pipeline_class.save_lora_weights(tmpdirname, transformer_lora_layers=denoiser_state_dict) + + self.assertTrue(os.path.isfile(os.path.join(tmpdirname, "pytorch_lora_weights.safetensors"))) + pipe.unload_lora_weights() + pipe.load_lora_weights(os.path.join(tmpdirname, "pytorch_lora_weights.safetensors")) + + # modify the state dict to have alpha values following + # https://huggingface.co/TheLastBen/Jon_Snow_Flux_LoRA/blob/main/jon_snow.safetensors + state_dict_with_alpha = safetensors.torch.load_file( + os.path.join(tmpdirname, "pytorch_lora_weights.safetensors") + ) + alpha_dict = {} + for k, v in state_dict_with_alpha.items(): + # only do for `transformer` and for the k projections -- should be enough to test. + if "transformer" in k and "to_k" in k and "lora_A" in k: + alpha_dict[f"{k}.alpha"] = float(torch.randint(10, 100, size=())) + state_dict_with_alpha.update(alpha_dict) + + images_lora_from_pretrained = pipe(**inputs, generator=torch.manual_seed(0)).images + self.assertTrue(check_if_lora_correctly_set(pipe.transformer), "Lora not correctly set in denoiser") + + pipe.unload_lora_weights() + pipe.load_lora_weights(state_dict_with_alpha) + images_lora_with_alpha = pipe(**inputs, generator=torch.manual_seed(0)).images + + self.assertTrue( + np.allclose(images_lora, images_lora_from_pretrained, atol=1e-3, rtol=1e-3), + "Loading from saved checkpoints should give same results.", + ) + self.assertFalse(np.allclose(images_lora_with_alpha, images_lora, atol=1e-3, rtol=1e-3)) + + def test_lora_expansion_works_for_absent_keys(self): + components, _, denoiser_lora_config = self.get_dummy_components(FlowMatchEulerDiscreteScheduler) + pipe = self.pipeline_class(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + _, _, inputs = self.get_dummy_inputs(with_generator=False) + + output_no_lora = pipe(**inputs, generator=torch.manual_seed(0)).images + self.assertTrue(output_no_lora.shape == self.output_shape) + + # Modify the config to have a layer which won't be present in the second LoRA we will load. + modified_denoiser_lora_config = copy.deepcopy(denoiser_lora_config) + modified_denoiser_lora_config.target_modules.add("x_embedder") + + pipe.transformer.add_adapter(modified_denoiser_lora_config) + self.assertTrue(check_if_lora_correctly_set(pipe.transformer), "Lora not correctly set in transformer") + + images_lora = pipe(**inputs, generator=torch.manual_seed(0)).images + self.assertFalse( + np.allclose(images_lora, output_no_lora, atol=1e-3, rtol=1e-3), + "LoRA should lead to different results.", + ) + + with tempfile.TemporaryDirectory() as tmpdirname: + denoiser_state_dict = get_peft_model_state_dict(pipe.transformer) + self.pipeline_class.save_lora_weights(tmpdirname, transformer_lora_layers=denoiser_state_dict) + + self.assertTrue(os.path.isfile(os.path.join(tmpdirname, "pytorch_lora_weights.safetensors"))) + pipe.unload_lora_weights() + pipe.load_lora_weights(os.path.join(tmpdirname, "pytorch_lora_weights.safetensors"), adapter_name="one") + + # Modify the state dict to exclude "x_embedder" related LoRA params. + lora_state_dict = safetensors.torch.load_file(os.path.join(tmpdirname, "pytorch_lora_weights.safetensors")) + lora_state_dict_without_xembedder = {k: v for k, v in lora_state_dict.items() if "x_embedder" not in k} + + pipe.load_lora_weights(lora_state_dict_without_xembedder, adapter_name="two") + pipe.set_adapters(["one", "two"]) + self.assertTrue(check_if_lora_correctly_set(pipe.transformer), "Lora not correctly set in transformer") + images_lora_with_absent_keys = pipe(**inputs, generator=torch.manual_seed(0)).images + + self.assertFalse( + np.allclose(images_lora, images_lora_with_absent_keys, atol=1e-3, rtol=1e-3), + "Different LoRAs should lead to different results.", + ) + self.assertFalse( + np.allclose(output_no_lora, images_lora_with_absent_keys, atol=1e-3, rtol=1e-3), + "LoRA should lead to different results.", + ) + + def test_lora_expansion_works_for_extra_keys(self): + components, _, denoiser_lora_config = self.get_dummy_components(FlowMatchEulerDiscreteScheduler) + pipe = self.pipeline_class(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + _, _, inputs = self.get_dummy_inputs(with_generator=False) + + output_no_lora = pipe(**inputs, generator=torch.manual_seed(0)).images + self.assertTrue(output_no_lora.shape == self.output_shape) + + # Modify the config to have a layer which won't be present in the first LoRA we will load. + modified_denoiser_lora_config = copy.deepcopy(denoiser_lora_config) + modified_denoiser_lora_config.target_modules.add("x_embedder") + + pipe.transformer.add_adapter(modified_denoiser_lora_config) + self.assertTrue(check_if_lora_correctly_set(pipe.transformer), "Lora not correctly set in transformer") + + images_lora = pipe(**inputs, generator=torch.manual_seed(0)).images + self.assertFalse( + np.allclose(images_lora, output_no_lora, atol=1e-3, rtol=1e-3), + "LoRA should lead to different results.", + ) + + with tempfile.TemporaryDirectory() as tmpdirname: + denoiser_state_dict = get_peft_model_state_dict(pipe.transformer) + self.pipeline_class.save_lora_weights(tmpdirname, transformer_lora_layers=denoiser_state_dict) + + self.assertTrue(os.path.isfile(os.path.join(tmpdirname, "pytorch_lora_weights.safetensors"))) + pipe.unload_lora_weights() + # Modify the state dict to exclude "x_embedder" related LoRA params. + lora_state_dict = safetensors.torch.load_file(os.path.join(tmpdirname, "pytorch_lora_weights.safetensors")) + lora_state_dict_without_xembedder = {k: v for k, v in lora_state_dict.items() if "x_embedder" not in k} + pipe.load_lora_weights(lora_state_dict_without_xembedder, adapter_name="one") + + # Load state dict with `x_embedder`. + pipe.load_lora_weights(os.path.join(tmpdirname, "pytorch_lora_weights.safetensors"), adapter_name="two") + + pipe.set_adapters(["one", "two"]) + self.assertTrue(check_if_lora_correctly_set(pipe.transformer), "Lora not correctly set in transformer") + images_lora_with_extra_keys = pipe(**inputs, generator=torch.manual_seed(0)).images + + self.assertFalse( + np.allclose(images_lora, images_lora_with_extra_keys, atol=1e-3, rtol=1e-3), + "Different LoRAs should lead to different results.", + ) + self.assertFalse( + np.allclose(output_no_lora, images_lora_with_extra_keys, atol=1e-3, rtol=1e-3), + "LoRA should lead to different results.", + ) + + @unittest.skip("Not supported in Flux.") + def test_simple_inference_with_text_denoiser_block_scale(self): + pass + + @unittest.skip("Not supported in Flux.") + def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self): + pass + + @unittest.skip("Not supported in Flux.") + def test_modify_padding_mode(self): + pass + + @unittest.skip("Not supported in Flux.") + def test_simple_inference_with_text_denoiser_multi_adapter_block_lora(self): + pass + + +class FluxControlLoRATests(unittest.TestCase, PeftLoraLoaderMixinTests): + pipeline_class = FluxControlPipeline + scheduler_cls = FlowMatchEulerDiscreteScheduler() + scheduler_kwargs = {} + scheduler_classes = [FlowMatchEulerDiscreteScheduler] + transformer_kwargs = { + "patch_size": 1, + "in_channels": 8, + "out_channels": 4, + "num_layers": 1, + "num_single_layers": 1, + "attention_head_dim": 16, + "num_attention_heads": 2, + "joint_attention_dim": 32, + "pooled_projection_dim": 32, + "axes_dims_rope": [4, 4, 8], + } + transformer_cls = FluxTransformer2DModel + vae_kwargs = { + "sample_size": 32, + "in_channels": 3, + "out_channels": 3, + "block_out_channels": (4,), + "layers_per_block": 1, + "latent_channels": 1, + "norm_num_groups": 1, + "use_quant_conv": False, + "use_post_quant_conv": False, + "shift_factor": 0.0609, + "scaling_factor": 1.5035, + } + has_two_text_encoders = True + tokenizer_cls, tokenizer_id = CLIPTokenizer, "peft-internal-testing/tiny-clip-text-2" + tokenizer_2_cls, tokenizer_2_id = AutoTokenizer, "hf-internal-testing/tiny-random-t5" + text_encoder_cls, text_encoder_id = CLIPTextModel, "peft-internal-testing/tiny-clip-text-2" + text_encoder_2_cls, text_encoder_2_id = T5EncoderModel, "hf-internal-testing/tiny-random-t5" + + @property + def output_shape(self): + return (1, 8, 8, 3) + + def get_dummy_inputs(self, with_generator=True): + batch_size = 1 + sequence_length = 10 + num_channels = 4 + sizes = (32, 32) + + generator = torch.manual_seed(0) + noise = floats_tensor((batch_size, num_channels) + sizes) + input_ids = torch.randint(1, sequence_length, size=(batch_size, sequence_length), generator=generator) + + pipeline_inputs = { + "prompt": "A painting of a squirrel eating a burger", + "control_image": Image.fromarray(np.random.randint(0, 255, size=(32, 32, 3), dtype="uint8")), + "num_inference_steps": 4, + "guidance_scale": 0.0, + "height": 8, + "width": 8, + "output_type": "np", + } + if with_generator: + pipeline_inputs.update({"generator": generator}) + + return noise, input_ids, pipeline_inputs + + def test_with_norm_in_state_dict(self): + components, _, denoiser_lora_config = self.get_dummy_components(FlowMatchEulerDiscreteScheduler) + pipe = self.pipeline_class(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + _, _, inputs = self.get_dummy_inputs(with_generator=False) + + logger = logging.get_logger("diffusers.loaders.lora_pipeline") + logger.setLevel(logging.INFO) + + original_output = pipe(**inputs, generator=torch.manual_seed(0))[0] + + for norm_layer in ["norm_q", "norm_k", "norm_added_q", "norm_added_k"]: + norm_state_dict = {} + for name, module in pipe.transformer.named_modules(): + if norm_layer not in name or not hasattr(module, "weight") or module.weight is None: + continue + norm_state_dict[f"transformer.{name}.weight"] = torch.randn( + module.weight.shape, device=module.weight.device, dtype=module.weight.dtype + ) + + with CaptureLogger(logger) as cap_logger: + pipe.load_lora_weights(norm_state_dict) + lora_load_output = pipe(**inputs, generator=torch.manual_seed(0))[0] + + self.assertTrue( + "The provided state dict contains normalization layers in addition to LoRA layers" + in cap_logger.out + ) + self.assertTrue(len(pipe.transformer._transformer_norm_layers) > 0) + + pipe.unload_lora_weights() + lora_unload_output = pipe(**inputs, generator=torch.manual_seed(0))[0] + + self.assertTrue(pipe.transformer._transformer_norm_layers is None) + self.assertTrue(np.allclose(original_output, lora_unload_output, atol=1e-5, rtol=1e-5)) + self.assertFalse( + np.allclose(original_output, lora_load_output, atol=1e-6, rtol=1e-6), f"{norm_layer} is tested" + ) + + with CaptureLogger(logger) as cap_logger: + for key in list(norm_state_dict.keys()): + norm_state_dict[key.replace("norm", "norm_k_something_random")] = norm_state_dict.pop(key) + pipe.load_lora_weights(norm_state_dict) + + self.assertTrue( + "Unsupported keys found in state dict when trying to load normalization layers" in cap_logger.out + ) + + def test_lora_parameter_expanded_shapes(self): + components, _, _ = self.get_dummy_components(FlowMatchEulerDiscreteScheduler) + pipe = self.pipeline_class(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + _, _, inputs = self.get_dummy_inputs(with_generator=False) + original_out = pipe(**inputs, generator=torch.manual_seed(0))[0] + + logger = logging.get_logger("diffusers.loaders.lora_pipeline") + logger.setLevel(logging.DEBUG) + + # Change the transformer config to mimic a real use case. + num_channels_without_control = 4 + transformer = FluxTransformer2DModel.from_config( + components["transformer"].config, in_channels=num_channels_without_control + ).to(torch_device) + self.assertTrue( + transformer.config.in_channels == num_channels_without_control, + f"Expected {num_channels_without_control} channels in the modified transformer but has {transformer.config.in_channels=}", + ) + + original_transformer_state_dict = pipe.transformer.state_dict() + x_embedder_weight = original_transformer_state_dict.pop("x_embedder.weight") + incompatible_keys = transformer.load_state_dict(original_transformer_state_dict, strict=False) + self.assertTrue( + "x_embedder.weight" in incompatible_keys.missing_keys, + "Could not find x_embedder.weight in the missing keys.", + ) + transformer.x_embedder.weight.data.copy_(x_embedder_weight[..., :num_channels_without_control]) + pipe.transformer = transformer + + out_features, in_features = pipe.transformer.x_embedder.weight.shape + rank = 4 + + dummy_lora_A = torch.nn.Linear(2 * in_features, rank, bias=False) + dummy_lora_B = torch.nn.Linear(rank, out_features, bias=False) + lora_state_dict = { + "transformer.x_embedder.lora_A.weight": dummy_lora_A.weight, + "transformer.x_embedder.lora_B.weight": dummy_lora_B.weight, + } + with CaptureLogger(logger) as cap_logger: + pipe.load_lora_weights(lora_state_dict, "adapter-1") + + self.assertTrue(check_if_lora_correctly_set(pipe.transformer), "Lora not correctly set in denoiser") + + lora_out = pipe(**inputs, generator=torch.manual_seed(0))[0] + + self.assertFalse(np.allclose(original_out, lora_out, rtol=1e-4, atol=1e-4)) + self.assertTrue(pipe.transformer.x_embedder.weight.data.shape[1] == 2 * in_features) + self.assertTrue(pipe.transformer.config.in_channels == 2 * in_features) + self.assertTrue(cap_logger.out.startswith("Expanding the nn.Linear input/output features for module")) + + # Testing opposite direction where the LoRA params are zero-padded. + components, _, _ = self.get_dummy_components(FlowMatchEulerDiscreteScheduler) + pipe = self.pipeline_class(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + dummy_lora_A = torch.nn.Linear(1, rank, bias=False) + dummy_lora_B = torch.nn.Linear(rank, out_features, bias=False) + lora_state_dict = { + "transformer.x_embedder.lora_A.weight": dummy_lora_A.weight, + "transformer.x_embedder.lora_B.weight": dummy_lora_B.weight, + } + with CaptureLogger(logger) as cap_logger: + pipe.load_lora_weights(lora_state_dict, "adapter-1") + + self.assertTrue(check_if_lora_correctly_set(pipe.transformer), "Lora not correctly set in denoiser") + + lora_out = pipe(**inputs, generator=torch.manual_seed(0))[0] + + self.assertFalse(np.allclose(original_out, lora_out, rtol=1e-4, atol=1e-4)) + self.assertTrue(pipe.transformer.x_embedder.weight.data.shape[1] == 2 * in_features) + self.assertTrue(pipe.transformer.config.in_channels == 2 * in_features) + self.assertTrue("The following LoRA modules were zero padded to match the state dict of" in cap_logger.out) + + def test_normal_lora_with_expanded_lora_raises_error(self): + # Test the following situation. Load a regular LoRA (such as the ones trained on Flux.1-Dev). And then + # load shape expanded LoRA (such as Control LoRA). + components, _, _ = self.get_dummy_components(FlowMatchEulerDiscreteScheduler) + + # Change the transformer config to mimic a real use case. + num_channels_without_control = 4 + transformer = FluxTransformer2DModel.from_config( + components["transformer"].config, in_channels=num_channels_without_control + ).to(torch_device) + components["transformer"] = transformer + + pipe = self.pipeline_class(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + logger = logging.get_logger("diffusers.loaders.lora_pipeline") + logger.setLevel(logging.DEBUG) + + out_features, in_features = pipe.transformer.x_embedder.weight.shape + rank = 4 + + shape_expander_lora_A = torch.nn.Linear(2 * in_features, rank, bias=False) + shape_expander_lora_B = torch.nn.Linear(rank, out_features, bias=False) + lora_state_dict = { + "transformer.x_embedder.lora_A.weight": shape_expander_lora_A.weight, + "transformer.x_embedder.lora_B.weight": shape_expander_lora_B.weight, + } + with CaptureLogger(logger) as cap_logger: + pipe.load_lora_weights(lora_state_dict, "adapter-1") + + self.assertTrue(check_if_lora_correctly_set(pipe.transformer), "Lora not correctly set in denoiser") + self.assertTrue(pipe.get_active_adapters() == ["adapter-1"]) + self.assertTrue(pipe.transformer.x_embedder.weight.data.shape[1] == 2 * in_features) + self.assertTrue(pipe.transformer.config.in_channels == 2 * in_features) + self.assertTrue(cap_logger.out.startswith("Expanding the nn.Linear input/output features for module")) + + _, _, inputs = self.get_dummy_inputs(with_generator=False) + lora_output = pipe(**inputs, generator=torch.manual_seed(0))[0] + + normal_lora_A = torch.nn.Linear(in_features, rank, bias=False) + normal_lora_B = torch.nn.Linear(rank, out_features, bias=False) + lora_state_dict = { + "transformer.x_embedder.lora_A.weight": normal_lora_A.weight, + "transformer.x_embedder.lora_B.weight": normal_lora_B.weight, + } + + with CaptureLogger(logger) as cap_logger: + pipe.load_lora_weights(lora_state_dict, "adapter-2") + + self.assertTrue(check_if_lora_correctly_set(pipe.transformer), "Lora not correctly set in denoiser") + self.assertTrue("The following LoRA modules were zero padded to match the state dict of" in cap_logger.out) + self.assertTrue(pipe.get_active_adapters() == ["adapter-2"]) + + lora_output_2 = pipe(**inputs, generator=torch.manual_seed(0))[0] + self.assertFalse(np.allclose(lora_output, lora_output_2, atol=1e-3, rtol=1e-3)) + + # Test the opposite case where the first lora has the correct input features and the second lora has expanded input features. + # This should raise a runtime error on input shapes being incompatible. + components, _, _ = self.get_dummy_components(FlowMatchEulerDiscreteScheduler) + # Change the transformer config to mimic a real use case. + num_channels_without_control = 4 + transformer = FluxTransformer2DModel.from_config( + components["transformer"].config, in_channels=num_channels_without_control + ).to(torch_device) + components["transformer"] = transformer + + pipe = self.pipeline_class(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + logger = logging.get_logger("diffusers.loaders.lora_pipeline") + logger.setLevel(logging.DEBUG) + + out_features, in_features = pipe.transformer.x_embedder.weight.shape + rank = 4 + + lora_state_dict = { + "transformer.x_embedder.lora_A.weight": normal_lora_A.weight, + "transformer.x_embedder.lora_B.weight": normal_lora_B.weight, + } + pipe.load_lora_weights(lora_state_dict, "adapter-1") + + self.assertTrue(check_if_lora_correctly_set(pipe.transformer), "Lora not correctly set in denoiser") + self.assertTrue(pipe.transformer.x_embedder.weight.data.shape[1] == in_features) + self.assertTrue(pipe.transformer.config.in_channels == in_features) + + lora_state_dict = { + "transformer.x_embedder.lora_A.weight": shape_expander_lora_A.weight, + "transformer.x_embedder.lora_B.weight": shape_expander_lora_B.weight, + } + + # We should check for input shapes being incompatible here. But because above mentioned issue is + # not a supported use case, and because of the PEFT renaming, we will currently have a shape + # mismatch error. + self.assertRaisesRegex( + RuntimeError, + "size mismatch for x_embedder.lora_A.adapter-2.weight", + pipe.load_lora_weights, + lora_state_dict, + "adapter-2", + ) + + def test_fuse_expanded_lora_with_regular_lora(self): + # This test checks if it works when a lora with expanded shapes (like control loras) but + # another lora with correct shapes is loaded. The opposite direction isn't supported and is + # tested with it. + components, _, _ = self.get_dummy_components(FlowMatchEulerDiscreteScheduler) + + # Change the transformer config to mimic a real use case. + num_channels_without_control = 4 + transformer = FluxTransformer2DModel.from_config( + components["transformer"].config, in_channels=num_channels_without_control + ).to(torch_device) + components["transformer"] = transformer + + pipe = self.pipeline_class(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + logger = logging.get_logger("diffusers.loaders.lora_pipeline") + logger.setLevel(logging.DEBUG) + + out_features, in_features = pipe.transformer.x_embedder.weight.shape + rank = 4 + + shape_expander_lora_A = torch.nn.Linear(2 * in_features, rank, bias=False) + shape_expander_lora_B = torch.nn.Linear(rank, out_features, bias=False) + lora_state_dict = { + "transformer.x_embedder.lora_A.weight": shape_expander_lora_A.weight, + "transformer.x_embedder.lora_B.weight": shape_expander_lora_B.weight, + } + pipe.load_lora_weights(lora_state_dict, "adapter-1") + self.assertTrue(check_if_lora_correctly_set(pipe.transformer), "Lora not correctly set in denoiser") + + _, _, inputs = self.get_dummy_inputs(with_generator=False) + lora_output = pipe(**inputs, generator=torch.manual_seed(0))[0] + + normal_lora_A = torch.nn.Linear(in_features, rank, bias=False) + normal_lora_B = torch.nn.Linear(rank, out_features, bias=False) + lora_state_dict = { + "transformer.x_embedder.lora_A.weight": normal_lora_A.weight, + "transformer.x_embedder.lora_B.weight": normal_lora_B.weight, + } + + pipe.load_lora_weights(lora_state_dict, "adapter-2") + self.assertTrue(check_if_lora_correctly_set(pipe.transformer), "Lora not correctly set in denoiser") + + lora_output_2 = pipe(**inputs, generator=torch.manual_seed(0))[0] + + pipe.set_adapters(["adapter-1", "adapter-2"], [1.0, 1.0]) + lora_output_3 = pipe(**inputs, generator=torch.manual_seed(0))[0] + + self.assertFalse(np.allclose(lora_output, lora_output_2, atol=1e-3, rtol=1e-3)) + self.assertFalse(np.allclose(lora_output, lora_output_3, atol=1e-3, rtol=1e-3)) + self.assertFalse(np.allclose(lora_output_2, lora_output_3, atol=1e-3, rtol=1e-3)) + + pipe.fuse_lora(lora_scale=1.0, adapter_names=["adapter-1", "adapter-2"]) + lora_output_4 = pipe(**inputs, generator=torch.manual_seed(0))[0] + self.assertTrue(np.allclose(lora_output_3, lora_output_4, atol=1e-3, rtol=1e-3)) + + def test_load_regular_lora(self): + # This test checks if a regular lora (think of one trained on Flux.1 Dev for example) can be loaded + # into the transformer with more input channels than Flux.1 Dev, for example. Some examples of those + # transformers include Flux Fill, Flux Control, etc. + components, _, _ = self.get_dummy_components(FlowMatchEulerDiscreteScheduler) + pipe = self.pipeline_class(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + _, _, inputs = self.get_dummy_inputs(with_generator=False) + + original_output = pipe(**inputs, generator=torch.manual_seed(0))[0] + + out_features, in_features = pipe.transformer.x_embedder.weight.shape + rank = 4 + in_features = in_features // 2 # to mimic the Flux.1-Dev LoRA. + normal_lora_A = torch.nn.Linear(in_features, rank, bias=False) + normal_lora_B = torch.nn.Linear(rank, out_features, bias=False) + lora_state_dict = { + "transformer.x_embedder.lora_A.weight": normal_lora_A.weight, + "transformer.x_embedder.lora_B.weight": normal_lora_B.weight, + } + + logger = logging.get_logger("diffusers.loaders.lora_pipeline") + logger.setLevel(logging.INFO) + with CaptureLogger(logger) as cap_logger: + pipe.load_lora_weights(lora_state_dict, "adapter-1") + self.assertTrue(check_if_lora_correctly_set(pipe.transformer), "Lora not correctly set in denoiser") + + lora_output = pipe(**inputs, generator=torch.manual_seed(0))[0] + + self.assertTrue("The following LoRA modules were zero padded to match the state dict of" in cap_logger.out) + self.assertTrue(pipe.transformer.x_embedder.weight.data.shape[1] == in_features * 2) + self.assertFalse(np.allclose(original_output, lora_output, atol=1e-3, rtol=1e-3)) + + def test_lora_unload_with_parameter_expanded_shapes(self): + components, _, _ = self.get_dummy_components(FlowMatchEulerDiscreteScheduler) + + logger = logging.get_logger("diffusers.loaders.lora_pipeline") + logger.setLevel(logging.DEBUG) + + # Change the transformer config to mimic a real use case. + num_channels_without_control = 4 + transformer = FluxTransformer2DModel.from_config( + components["transformer"].config, in_channels=num_channels_without_control + ).to(torch_device) + self.assertTrue( + transformer.config.in_channels == num_channels_without_control, + f"Expected {num_channels_without_control} channels in the modified transformer but has {transformer.config.in_channels=}", + ) + + # This should be initialized with a Flux pipeline variant that doesn't accept `control_image`. + components["transformer"] = transformer + pipe = FluxPipeline(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + _, _, inputs = self.get_dummy_inputs(with_generator=False) + control_image = inputs.pop("control_image") + original_out = pipe(**inputs, generator=torch.manual_seed(0))[0] + + control_pipe = self.pipeline_class(**components) + out_features, in_features = control_pipe.transformer.x_embedder.weight.shape + rank = 4 + + dummy_lora_A = torch.nn.Linear(2 * in_features, rank, bias=False) + dummy_lora_B = torch.nn.Linear(rank, out_features, bias=False) + lora_state_dict = { + "transformer.x_embedder.lora_A.weight": dummy_lora_A.weight, + "transformer.x_embedder.lora_B.weight": dummy_lora_B.weight, + } + with CaptureLogger(logger) as cap_logger: + control_pipe.load_lora_weights(lora_state_dict, "adapter-1") + self.assertTrue(check_if_lora_correctly_set(pipe.transformer), "Lora not correctly set in denoiser") + + inputs["control_image"] = control_image + lora_out = control_pipe(**inputs, generator=torch.manual_seed(0))[0] + + self.assertFalse(np.allclose(original_out, lora_out, rtol=1e-4, atol=1e-4)) + self.assertTrue(pipe.transformer.x_embedder.weight.data.shape[1] == 2 * in_features) + self.assertTrue(pipe.transformer.config.in_channels == 2 * in_features) + self.assertTrue(cap_logger.out.startswith("Expanding the nn.Linear input/output features for module")) + + control_pipe.unload_lora_weights(reset_to_overwritten_params=True) + self.assertTrue( + control_pipe.transformer.config.in_channels == num_channels_without_control, + f"Expected {num_channels_without_control} channels in the modified transformer but has {control_pipe.transformer.config.in_channels=}", + ) + loaded_pipe = FluxPipeline.from_pipe(control_pipe) + self.assertTrue( + loaded_pipe.transformer.config.in_channels == num_channels_without_control, + f"Expected {num_channels_without_control} channels in the modified transformer but has {loaded_pipe.transformer.config.in_channels=}", + ) + inputs.pop("control_image") + unloaded_lora_out = loaded_pipe(**inputs, generator=torch.manual_seed(0))[0] + + self.assertFalse(np.allclose(unloaded_lora_out, lora_out, rtol=1e-4, atol=1e-4)) + self.assertTrue(np.allclose(unloaded_lora_out, original_out, atol=1e-4, rtol=1e-4)) + self.assertTrue(pipe.transformer.x_embedder.weight.data.shape[1] == in_features) + self.assertTrue(pipe.transformer.config.in_channels == in_features) + + def test_lora_unload_with_parameter_expanded_shapes_and_no_reset(self): + components, _, _ = self.get_dummy_components(FlowMatchEulerDiscreteScheduler) + + logger = logging.get_logger("diffusers.loaders.lora_pipeline") + logger.setLevel(logging.DEBUG) + + # Change the transformer config to mimic a real use case. + num_channels_without_control = 4 + transformer = FluxTransformer2DModel.from_config( + components["transformer"].config, in_channels=num_channels_without_control + ).to(torch_device) + self.assertTrue( + transformer.config.in_channels == num_channels_without_control, + f"Expected {num_channels_without_control} channels in the modified transformer but has {transformer.config.in_channels=}", + ) + + # This should be initialized with a Flux pipeline variant that doesn't accept `control_image`. + components["transformer"] = transformer + pipe = FluxPipeline(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + _, _, inputs = self.get_dummy_inputs(with_generator=False) + control_image = inputs.pop("control_image") + original_out = pipe(**inputs, generator=torch.manual_seed(0))[0] + + control_pipe = self.pipeline_class(**components) + out_features, in_features = control_pipe.transformer.x_embedder.weight.shape + rank = 4 + + dummy_lora_A = torch.nn.Linear(2 * in_features, rank, bias=False) + dummy_lora_B = torch.nn.Linear(rank, out_features, bias=False) + lora_state_dict = { + "transformer.x_embedder.lora_A.weight": dummy_lora_A.weight, + "transformer.x_embedder.lora_B.weight": dummy_lora_B.weight, + } + with CaptureLogger(logger) as cap_logger: + control_pipe.load_lora_weights(lora_state_dict, "adapter-1") + self.assertTrue(check_if_lora_correctly_set(pipe.transformer), "Lora not correctly set in denoiser") + + inputs["control_image"] = control_image + lora_out = control_pipe(**inputs, generator=torch.manual_seed(0))[0] + + self.assertFalse(np.allclose(original_out, lora_out, rtol=1e-4, atol=1e-4)) + self.assertTrue(pipe.transformer.x_embedder.weight.data.shape[1] == 2 * in_features) + self.assertTrue(pipe.transformer.config.in_channels == 2 * in_features) + self.assertTrue(cap_logger.out.startswith("Expanding the nn.Linear input/output features for module")) + + control_pipe.unload_lora_weights(reset_to_overwritten_params=False) + self.assertTrue( + control_pipe.transformer.config.in_channels == 2 * num_channels_without_control, + f"Expected {num_channels_without_control} channels in the modified transformer but has {control_pipe.transformer.config.in_channels=}", + ) + no_lora_out = control_pipe(**inputs, generator=torch.manual_seed(0))[0] + + self.assertFalse(np.allclose(no_lora_out, lora_out, rtol=1e-4, atol=1e-4)) + self.assertTrue(pipe.transformer.x_embedder.weight.data.shape[1] == in_features * 2) + self.assertTrue(pipe.transformer.config.in_channels == in_features * 2) + + @unittest.skip("Not supported in Flux.") + def test_simple_inference_with_text_denoiser_block_scale(self): + pass + + @unittest.skip("Not supported in Flux.") + def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self): + pass + + @unittest.skip("Not supported in Flux.") + def test_modify_padding_mode(self): + pass + + @unittest.skip("Not supported in Flux.") + def test_simple_inference_with_text_denoiser_multi_adapter_block_lora(self): + pass + + +@slow +@nightly +@require_torch_accelerator +@require_peft_backend +@require_big_accelerator +class FluxLoRAIntegrationTests(unittest.TestCase): + """internal note: The integration slices were obtained on audace. + + torch: 2.6.0.dev20241006+cu124 with CUDA 12.5. Need the same setup for the + assertions to pass. + """ + + num_inference_steps = 10 + seed = 0 + + def setUp(self): + super().setUp() + + gc.collect() + backend_empty_cache(torch_device) + + self.pipeline = FluxPipeline.from_pretrained("black-forest-labs/FLUX.1-dev", torch_dtype=torch.bfloat16) + + def tearDown(self): + super().tearDown() + + del self.pipeline + gc.collect() + backend_empty_cache(torch_device) + + def test_flux_the_last_ben(self): + self.pipeline.load_lora_weights("TheLastBen/Jon_Snow_Flux_LoRA", weight_name="jon_snow.safetensors") + self.pipeline.fuse_lora() + self.pipeline.unload_lora_weights() + # Instead of calling `enable_model_cpu_offload()`, we do a accelerator placement here because the CI + # run supports it. We have about 34GB RAM in the CI runner which kills the test when run with + # `enable_model_cpu_offload()`. We repeat this for the other tests, too. + self.pipeline = self.pipeline.to(torch_device) + + prompt = "jon snow eating pizza with ketchup" + + out = self.pipeline( + prompt, + num_inference_steps=self.num_inference_steps, + guidance_scale=4.0, + output_type="np", + generator=torch.manual_seed(self.seed), + ).images + out_slice = out[0, -3:, -3:, -1].flatten() + expected_slice = np.array([0.1855, 0.1855, 0.1836, 0.1855, 0.1836, 0.1875, 0.1777, 0.1758, 0.2246]) + + max_diff = numpy_cosine_similarity_distance(expected_slice.flatten(), out_slice) + + assert max_diff < 1e-3 + + def test_flux_kohya(self): + self.pipeline.load_lora_weights("Norod78/brain-slug-flux") + self.pipeline.fuse_lora() + self.pipeline.unload_lora_weights() + self.pipeline = self.pipeline.to(torch_device) + + prompt = "The cat with a brain slug earring" + out = self.pipeline( + prompt, + num_inference_steps=self.num_inference_steps, + guidance_scale=4.5, + output_type="np", + generator=torch.manual_seed(self.seed), + ).images + + out_slice = out[0, -3:, -3:, -1].flatten() + expected_slice = np.array([0.6367, 0.6367, 0.6328, 0.6367, 0.6328, 0.6289, 0.6367, 0.6328, 0.6484]) + + max_diff = numpy_cosine_similarity_distance(expected_slice.flatten(), out_slice) + + assert max_diff < 1e-3 + + def test_flux_kohya_with_text_encoder(self): + self.pipeline.load_lora_weights("cocktailpeanut/optimus", weight_name="optimus.safetensors") + self.pipeline.fuse_lora() + self.pipeline.unload_lora_weights() + self.pipeline = self.pipeline.to(torch_device) + + prompt = "optimus is cleaning the house with broomstick" + out = self.pipeline( + prompt, + num_inference_steps=self.num_inference_steps, + guidance_scale=4.5, + output_type="np", + generator=torch.manual_seed(self.seed), + ).images + + out_slice = out[0, -3:, -3:, -1].flatten() + expected_slice = np.array([0.4023, 0.4023, 0.4023, 0.3965, 0.3984, 0.3965, 0.3926, 0.3906, 0.4219]) + + max_diff = numpy_cosine_similarity_distance(expected_slice.flatten(), out_slice) + + assert max_diff < 1e-3 + + def test_flux_xlabs(self): + self.pipeline.load_lora_weights("XLabs-AI/flux-lora-collection", weight_name="disney_lora.safetensors") + self.pipeline.fuse_lora() + self.pipeline.unload_lora_weights() + self.pipeline = self.pipeline.to(torch_device) + + prompt = "A blue jay standing on a large basket of rainbow macarons, disney style" + + out = self.pipeline( + prompt, + num_inference_steps=self.num_inference_steps, + guidance_scale=3.5, + output_type="np", + generator=torch.manual_seed(self.seed), + ).images + out_slice = out[0, -3:, -3:, -1].flatten() + expected_slice = np.array([0.3965, 0.4180, 0.4434, 0.4082, 0.4375, 0.4590, 0.4141, 0.4375, 0.4980]) + + max_diff = numpy_cosine_similarity_distance(expected_slice.flatten(), out_slice) + + assert max_diff < 1e-3 + + def test_flux_xlabs_load_lora_with_single_blocks(self): + self.pipeline.load_lora_weights( + "salinasr/test_xlabs_flux_lora_with_singleblocks", weight_name="lora.safetensors" + ) + self.pipeline.fuse_lora() + self.pipeline.unload_lora_weights() + self.pipeline.enable_model_cpu_offload() + + prompt = "a wizard mouse playing chess" + + out = self.pipeline( + prompt, + num_inference_steps=self.num_inference_steps, + guidance_scale=3.5, + output_type="np", + generator=torch.manual_seed(self.seed), + ).images + out_slice = out[0, -3:, -3:, -1].flatten() + expected_slice = np.array( + [0.04882812, 0.04101562, 0.04882812, 0.03710938, 0.02929688, 0.02734375, 0.0234375, 0.01757812, 0.0390625] + ) + max_diff = numpy_cosine_similarity_distance(expected_slice.flatten(), out_slice) + + assert max_diff < 1e-3 + + +@nightly +@require_torch_accelerator +@require_peft_backend +@require_big_accelerator +class FluxControlLoRAIntegrationTests(unittest.TestCase): + num_inference_steps = 10 + seed = 0 + prompt = "A robot made of exotic candies and chocolates of different kinds." + + def setUp(self): + super().setUp() + + gc.collect() + backend_empty_cache(torch_device) + + self.pipeline = FluxControlPipeline.from_pretrained( + "black-forest-labs/FLUX.1-dev", torch_dtype=torch.bfloat16 + ).to(torch_device) + + def tearDown(self): + super().tearDown() + + gc.collect() + backend_empty_cache(torch_device) + + @parameterized.expand(["black-forest-labs/FLUX.1-Canny-dev-lora", "black-forest-labs/FLUX.1-Depth-dev-lora"]) + def test_lora(self, lora_ckpt_id): + self.pipeline.load_lora_weights(lora_ckpt_id) + self.pipeline.fuse_lora() + self.pipeline.unload_lora_weights() + + if "Canny" in lora_ckpt_id: + control_image = load_image( + "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/flux-control-lora/canny_condition_image.png" + ) + else: + control_image = load_image( + "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/flux-control-lora/depth_condition_image.png" + ) + + image = self.pipeline( + prompt=self.prompt, + control_image=control_image, + height=1024, + width=1024, + num_inference_steps=self.num_inference_steps, + guidance_scale=30.0 if "Canny" in lora_ckpt_id else 10.0, + output_type="np", + generator=torch.manual_seed(self.seed), + ).images + + out_slice = image[0, -3:, -3:, -1].flatten() + if "Canny" in lora_ckpt_id: + expected_slice = np.array([0.8438, 0.8438, 0.8438, 0.8438, 0.8438, 0.8398, 0.8438, 0.8438, 0.8516]) + else: + expected_slice = np.array([0.8203, 0.8320, 0.8359, 0.8203, 0.8281, 0.8281, 0.8203, 0.8242, 0.8359]) + + max_diff = numpy_cosine_similarity_distance(expected_slice.flatten(), out_slice) + + assert max_diff < 1e-3 + + @parameterized.expand(["black-forest-labs/FLUX.1-Canny-dev-lora", "black-forest-labs/FLUX.1-Depth-dev-lora"]) + def test_lora_with_turbo(self, lora_ckpt_id): + self.pipeline.load_lora_weights(lora_ckpt_id) + self.pipeline.load_lora_weights("ByteDance/Hyper-SD", weight_name="Hyper-FLUX.1-dev-8steps-lora.safetensors") + self.pipeline.fuse_lora() + self.pipeline.unload_lora_weights() + + if "Canny" in lora_ckpt_id: + control_image = load_image( + "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/flux-control-lora/canny_condition_image.png" + ) + else: + control_image = load_image( + "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/flux-control-lora/depth_condition_image.png" + ) + + image = self.pipeline( + prompt=self.prompt, + control_image=control_image, + height=1024, + width=1024, + num_inference_steps=self.num_inference_steps, + guidance_scale=30.0 if "Canny" in lora_ckpt_id else 10.0, + output_type="np", + generator=torch.manual_seed(self.seed), + ).images + + out_slice = image[0, -3:, -3:, -1].flatten() + if "Canny" in lora_ckpt_id: + expected_slice = np.array([0.6562, 0.7266, 0.7578, 0.6367, 0.6758, 0.7031, 0.6172, 0.6602, 0.6484]) + else: + expected_slice = np.array([0.6680, 0.7344, 0.7656, 0.6484, 0.6875, 0.7109, 0.6328, 0.6719, 0.6562]) + + max_diff = numpy_cosine_similarity_distance(expected_slice.flatten(), out_slice) + + assert max_diff < 1e-3 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/lora/test_lora_layers_hunyuanvideo.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/lora/test_lora_layers_hunyuanvideo.py new file mode 100644 index 0000000000000000000000000000000000000000..62d045f8364dc6627358ebe08c4e55cce0767a7f --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/lora/test_lora_layers_hunyuanvideo.py @@ -0,0 +1,264 @@ +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import gc +import sys +import unittest + +import numpy as np +import torch +from transformers import CLIPTextModel, CLIPTokenizer, LlamaModel, LlamaTokenizerFast + +from diffusers import ( + AutoencoderKLHunyuanVideo, + FlowMatchEulerDiscreteScheduler, + HunyuanVideoPipeline, + HunyuanVideoTransformer3DModel, +) + +from ..testing_utils import ( + Expectations, + backend_empty_cache, + floats_tensor, + nightly, + numpy_cosine_similarity_distance, + require_big_accelerator, + require_peft_backend, + require_torch_accelerator, + skip_mps, + torch_device, +) + + +sys.path.append(".") + +from .utils import PeftLoraLoaderMixinTests # noqa: E402 + + +@require_peft_backend +@skip_mps +class HunyuanVideoLoRATests(unittest.TestCase, PeftLoraLoaderMixinTests): + pipeline_class = HunyuanVideoPipeline + scheduler_cls = FlowMatchEulerDiscreteScheduler + scheduler_classes = [FlowMatchEulerDiscreteScheduler] + scheduler_kwargs = {} + + transformer_kwargs = { + "in_channels": 4, + "out_channels": 4, + "num_attention_heads": 2, + "attention_head_dim": 10, + "num_layers": 1, + "num_single_layers": 1, + "num_refiner_layers": 1, + "patch_size": 1, + "patch_size_t": 1, + "guidance_embeds": True, + "text_embed_dim": 16, + "pooled_projection_dim": 8, + "rope_axes_dim": (2, 4, 4), + } + transformer_cls = HunyuanVideoTransformer3DModel + vae_kwargs = { + "in_channels": 3, + "out_channels": 3, + "latent_channels": 4, + "down_block_types": ( + "HunyuanVideoDownBlock3D", + "HunyuanVideoDownBlock3D", + "HunyuanVideoDownBlock3D", + "HunyuanVideoDownBlock3D", + ), + "up_block_types": ( + "HunyuanVideoUpBlock3D", + "HunyuanVideoUpBlock3D", + "HunyuanVideoUpBlock3D", + "HunyuanVideoUpBlock3D", + ), + "block_out_channels": (8, 8, 8, 8), + "layers_per_block": 1, + "act_fn": "silu", + "norm_num_groups": 4, + "scaling_factor": 0.476986, + "spatial_compression_ratio": 8, + "temporal_compression_ratio": 4, + "mid_block_add_attention": True, + } + vae_cls = AutoencoderKLHunyuanVideo + has_two_text_encoders = True + tokenizer_cls, tokenizer_id, tokenizer_subfolder = ( + LlamaTokenizerFast, + "hf-internal-testing/tiny-random-hunyuanvideo", + "tokenizer", + ) + tokenizer_2_cls, tokenizer_2_id, tokenizer_2_subfolder = ( + CLIPTokenizer, + "hf-internal-testing/tiny-random-hunyuanvideo", + "tokenizer_2", + ) + text_encoder_cls, text_encoder_id, text_encoder_subfolder = ( + LlamaModel, + "hf-internal-testing/tiny-random-hunyuanvideo", + "text_encoder", + ) + text_encoder_2_cls, text_encoder_2_id, text_encoder_2_subfolder = ( + CLIPTextModel, + "hf-internal-testing/tiny-random-hunyuanvideo", + "text_encoder_2", + ) + + @property + def output_shape(self): + return (1, 9, 32, 32, 3) + + def get_dummy_inputs(self, with_generator=True): + batch_size = 1 + sequence_length = 16 + num_channels = 4 + num_frames = 9 + num_latent_frames = 3 # (num_frames - 1) // temporal_compression_ratio + 1 + sizes = (4, 4) + + generator = torch.manual_seed(0) + noise = floats_tensor((batch_size, num_latent_frames, num_channels) + sizes) + input_ids = torch.randint(1, sequence_length, size=(batch_size, sequence_length), generator=generator) + + pipeline_inputs = { + "prompt": "", + "num_frames": num_frames, + "num_inference_steps": 1, + "guidance_scale": 6.0, + "height": 32, + "width": 32, + "max_sequence_length": sequence_length, + "prompt_template": {"template": "{}", "crop_start": 0}, + "output_type": "np", + } + if with_generator: + pipeline_inputs.update({"generator": generator}) + + return noise, input_ids, pipeline_inputs + + def test_simple_inference_with_text_lora_denoiser_fused_multi(self): + super().test_simple_inference_with_text_lora_denoiser_fused_multi(expected_atol=9e-3) + + def test_simple_inference_with_text_denoiser_lora_unfused(self): + super().test_simple_inference_with_text_denoiser_lora_unfused(expected_atol=9e-3) + + # TODO(aryan): Fix the following test + @unittest.skip("This test fails with an error I haven't been able to debug yet.") + def test_simple_inference_save_pretrained(self): + pass + + @unittest.skip("Not supported in HunyuanVideo.") + def test_simple_inference_with_text_denoiser_block_scale(self): + pass + + @unittest.skip("Not supported in HunyuanVideo.") + def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self): + pass + + @unittest.skip("Not supported in HunyuanVideo.") + def test_modify_padding_mode(self): + pass + + @unittest.skip("Text encoder LoRA is not supported in HunyuanVideo.") + def test_simple_inference_with_partial_text_lora(self): + pass + + @unittest.skip("Text encoder LoRA is not supported in HunyuanVideo.") + def test_simple_inference_with_text_lora(self): + pass + + @unittest.skip("Text encoder LoRA is not supported in HunyuanVideo.") + def test_simple_inference_with_text_lora_and_scale(self): + pass + + @unittest.skip("Text encoder LoRA is not supported in HunyuanVideo.") + def test_simple_inference_with_text_lora_fused(self): + pass + + @unittest.skip("Text encoder LoRA is not supported in HunyuanVideo.") + def test_simple_inference_with_text_lora_save_load(self): + pass + + +@nightly +@require_torch_accelerator +@require_peft_backend +@require_big_accelerator +class HunyuanVideoLoRAIntegrationTests(unittest.TestCase): + """internal note: The integration slices were obtained on DGX. + + torch: 2.5.1+cu124 with CUDA 12.5. Need the same setup for the + assertions to pass. + """ + + num_inference_steps = 10 + seed = 0 + + def setUp(self): + super().setUp() + + gc.collect() + backend_empty_cache(torch_device) + + model_id = "hunyuanvideo-community/HunyuanVideo" + transformer = HunyuanVideoTransformer3DModel.from_pretrained( + model_id, subfolder="transformer", torch_dtype=torch.bfloat16 + ) + self.pipeline = HunyuanVideoPipeline.from_pretrained( + model_id, transformer=transformer, torch_dtype=torch.float16 + ).to(torch_device) + + def tearDown(self): + super().tearDown() + + gc.collect() + backend_empty_cache(torch_device) + + def test_original_format_cseti(self): + self.pipeline.load_lora_weights( + "Cseti/HunyuanVideo-LoRA-Arcane_Jinx-v1", weight_name="csetiarcane-nfjinx-v1-6000.safetensors" + ) + self.pipeline.fuse_lora() + self.pipeline.unload_lora_weights() + self.pipeline.vae.enable_tiling() + + prompt = "CSETIARCANE. A cat walks on the grass, realistic" + + out = self.pipeline( + prompt=prompt, + height=320, + width=512, + num_frames=9, + num_inference_steps=self.num_inference_steps, + output_type="np", + generator=torch.manual_seed(self.seed), + ).frames[0] + out = out.flatten() + out_slice = np.concatenate((out[:8], out[-8:])) + + # fmt: off + expected_slices = Expectations( + { + ("cuda", 7): np.array([0.1013, 0.1924, 0.0078, 0.1021, 0.1929, 0.0078, 0.1023, 0.1919, 0.7402, 0.104, 0.4482, 0.7354, 0.0925, 0.4382, 0.7275, 0.0815]), + } + ) + # fmt: on + expected_slice = expected_slices.get_expectation() + + max_diff = numpy_cosine_similarity_distance(expected_slice.flatten(), out_slice) + + assert max_diff < 1e-3 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/lora/test_lora_layers_ltx_video.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/lora/test_lora_layers_ltx_video.py new file mode 100644 index 0000000000000000000000000000000000000000..a8ad30e44827036f127a5a66fb55e3b1c72146aa --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/lora/test_lora_layers_ltx_video.py @@ -0,0 +1,148 @@ +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +import unittest + +import torch +from transformers import AutoTokenizer, T5EncoderModel + +from diffusers import ( + AutoencoderKLLTXVideo, + FlowMatchEulerDiscreteScheduler, + LTXPipeline, + LTXVideoTransformer3DModel, +) + +from ..testing_utils import floats_tensor, require_peft_backend + + +sys.path.append(".") + +from .utils import PeftLoraLoaderMixinTests # noqa: E402 + + +@require_peft_backend +class LTXVideoLoRATests(unittest.TestCase, PeftLoraLoaderMixinTests): + pipeline_class = LTXPipeline + scheduler_cls = FlowMatchEulerDiscreteScheduler + scheduler_classes = [FlowMatchEulerDiscreteScheduler] + scheduler_kwargs = {} + + transformer_kwargs = { + "in_channels": 8, + "out_channels": 8, + "patch_size": 1, + "patch_size_t": 1, + "num_attention_heads": 4, + "attention_head_dim": 8, + "cross_attention_dim": 32, + "num_layers": 1, + "caption_channels": 32, + } + transformer_cls = LTXVideoTransformer3DModel + vae_kwargs = { + "in_channels": 3, + "out_channels": 3, + "latent_channels": 8, + "block_out_channels": (8, 8, 8, 8), + "decoder_block_out_channels": (8, 8, 8, 8), + "layers_per_block": (1, 1, 1, 1, 1), + "decoder_layers_per_block": (1, 1, 1, 1, 1), + "spatio_temporal_scaling": (True, True, False, False), + "decoder_spatio_temporal_scaling": (True, True, False, False), + "decoder_inject_noise": (False, False, False, False, False), + "upsample_residual": (False, False, False, False), + "upsample_factor": (1, 1, 1, 1), + "timestep_conditioning": False, + "patch_size": 1, + "patch_size_t": 1, + "encoder_causal": True, + "decoder_causal": False, + } + vae_cls = AutoencoderKLLTXVideo + tokenizer_cls, tokenizer_id = AutoTokenizer, "hf-internal-testing/tiny-random-t5" + text_encoder_cls, text_encoder_id = T5EncoderModel, "hf-internal-testing/tiny-random-t5" + + text_encoder_target_modules = ["q", "k", "v", "o"] + + @property + def output_shape(self): + return (1, 9, 32, 32, 3) + + def get_dummy_inputs(self, with_generator=True): + batch_size = 1 + sequence_length = 16 + num_channels = 8 + num_frames = 9 + num_latent_frames = 3 # (num_frames - 1) // temporal_compression_ratio + 1 + latent_height = 8 + latent_width = 8 + + generator = torch.manual_seed(0) + noise = floats_tensor((batch_size, num_latent_frames, num_channels, latent_height, latent_width)) + input_ids = torch.randint(1, sequence_length, size=(batch_size, sequence_length), generator=generator) + + pipeline_inputs = { + "prompt": "dance monkey", + "num_frames": num_frames, + "num_inference_steps": 4, + "guidance_scale": 6.0, + "height": 32, + "width": 32, + "max_sequence_length": sequence_length, + "output_type": "np", + } + if with_generator: + pipeline_inputs.update({"generator": generator}) + + return noise, input_ids, pipeline_inputs + + def test_simple_inference_with_text_lora_denoiser_fused_multi(self): + super().test_simple_inference_with_text_lora_denoiser_fused_multi(expected_atol=9e-3) + + def test_simple_inference_with_text_denoiser_lora_unfused(self): + super().test_simple_inference_with_text_denoiser_lora_unfused(expected_atol=9e-3) + + @unittest.skip("Not supported in LTXVideo.") + def test_simple_inference_with_text_denoiser_block_scale(self): + pass + + @unittest.skip("Not supported in LTXVideo.") + def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self): + pass + + @unittest.skip("Not supported in LTXVideo.") + def test_modify_padding_mode(self): + pass + + @unittest.skip("Text encoder LoRA is not supported in LTXVideo.") + def test_simple_inference_with_partial_text_lora(self): + pass + + @unittest.skip("Text encoder LoRA is not supported in LTXVideo.") + def test_simple_inference_with_text_lora(self): + pass + + @unittest.skip("Text encoder LoRA is not supported in LTXVideo.") + def test_simple_inference_with_text_lora_and_scale(self): + pass + + @unittest.skip("Text encoder LoRA is not supported in LTXVideo.") + def test_simple_inference_with_text_lora_fused(self): + pass + + @unittest.skip("Text encoder LoRA is not supported in LTXVideo.") + def test_simple_inference_with_text_lora_save_load(self): + pass diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/lora/test_lora_layers_lumina2.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/lora/test_lora_layers_lumina2.py new file mode 100644 index 0000000000000000000000000000000000000000..0ebc831b11474b4290a462e68533a54d4ea1b5f9 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/lora/test_lora_layers_lumina2.py @@ -0,0 +1,173 @@ +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +import unittest + +import numpy as np +import pytest +import torch +from transformers import AutoTokenizer, GemmaForCausalLM + +from diffusers import ( + AutoencoderKL, + FlowMatchEulerDiscreteScheduler, + Lumina2Pipeline, + Lumina2Transformer2DModel, +) + +from ..testing_utils import floats_tensor, is_torch_version, require_peft_backend, skip_mps, torch_device + + +sys.path.append(".") + +from .utils import PeftLoraLoaderMixinTests, check_if_lora_correctly_set # noqa: E402 + + +@require_peft_backend +class Lumina2LoRATests(unittest.TestCase, PeftLoraLoaderMixinTests): + pipeline_class = Lumina2Pipeline + scheduler_cls = FlowMatchEulerDiscreteScheduler + scheduler_classes = [FlowMatchEulerDiscreteScheduler] + scheduler_kwargs = {} + + transformer_kwargs = { + "sample_size": 4, + "patch_size": 2, + "in_channels": 4, + "hidden_size": 8, + "num_layers": 2, + "num_attention_heads": 1, + "num_kv_heads": 1, + "multiple_of": 16, + "ffn_dim_multiplier": None, + "norm_eps": 1e-5, + "scaling_factor": 1.0, + "axes_dim_rope": [4, 2, 2], + "cap_feat_dim": 8, + } + transformer_cls = Lumina2Transformer2DModel + vae_kwargs = { + "sample_size": 32, + "in_channels": 3, + "out_channels": 3, + "block_out_channels": (4,), + "layers_per_block": 1, + "latent_channels": 4, + "norm_num_groups": 1, + "use_quant_conv": False, + "use_post_quant_conv": False, + "shift_factor": 0.0609, + "scaling_factor": 1.5035, + } + vae_cls = AutoencoderKL + tokenizer_cls, tokenizer_id = AutoTokenizer, "hf-internal-testing/dummy-gemma" + text_encoder_cls, text_encoder_id = GemmaForCausalLM, "hf-internal-testing/dummy-gemma-diffusers" + + @property + def output_shape(self): + return (1, 4, 4, 3) + + def get_dummy_inputs(self, with_generator=True): + batch_size = 1 + sequence_length = 16 + num_channels = 4 + sizes = (32, 32) + + generator = torch.manual_seed(0) + noise = floats_tensor((batch_size, num_channels) + sizes) + input_ids = torch.randint(1, sequence_length, size=(batch_size, sequence_length), generator=generator) + + pipeline_inputs = { + "prompt": "A painting of a squirrel eating a burger", + "num_inference_steps": 2, + "guidance_scale": 5.0, + "height": 32, + "width": 32, + "output_type": "np", + } + if with_generator: + pipeline_inputs.update({"generator": generator}) + + return noise, input_ids, pipeline_inputs + + @unittest.skip("Not supported in Lumina2.") + def test_simple_inference_with_text_denoiser_block_scale(self): + pass + + @unittest.skip("Not supported in Lumina2.") + def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self): + pass + + @unittest.skip("Not supported in Lumina2.") + def test_modify_padding_mode(self): + pass + + @unittest.skip("Text encoder LoRA is not supported in Lumina2.") + def test_simple_inference_with_partial_text_lora(self): + pass + + @unittest.skip("Text encoder LoRA is not supported in Lumina2.") + def test_simple_inference_with_text_lora(self): + pass + + @unittest.skip("Text encoder LoRA is not supported in Lumina2.") + def test_simple_inference_with_text_lora_and_scale(self): + pass + + @unittest.skip("Text encoder LoRA is not supported in Lumina2.") + def test_simple_inference_with_text_lora_fused(self): + pass + + @unittest.skip("Text encoder LoRA is not supported in Lumina2.") + def test_simple_inference_with_text_lora_save_load(self): + pass + + @skip_mps + @pytest.mark.xfail( + condition=torch.device(torch_device).type == "cpu" and is_torch_version(">=", "2.5"), + reason="Test currently fails on CPU and PyTorch 2.5.1 but not on PyTorch 2.4.1.", + strict=False, + ) + def test_lora_fuse_nan(self): + for scheduler_cls in self.scheduler_classes: + components, text_lora_config, denoiser_lora_config = self.get_dummy_components(scheduler_cls) + pipe = self.pipeline_class(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + _, _, inputs = self.get_dummy_inputs(with_generator=False) + + if "text_encoder" in self.pipeline_class._lora_loadable_modules: + pipe.text_encoder.add_adapter(text_lora_config, "adapter-1") + self.assertTrue( + check_if_lora_correctly_set(pipe.text_encoder), "Lora not correctly set in text encoder" + ) + + denoiser = pipe.transformer if self.unet_kwargs is None else pipe.unet + denoiser.add_adapter(denoiser_lora_config, "adapter-1") + self.assertTrue(check_if_lora_correctly_set(denoiser), "Lora not correctly set in denoiser.") + + # corrupt one LoRA weight with `inf` values + with torch.no_grad(): + pipe.transformer.layers[0].attn.to_q.lora_A["adapter-1"].weight += float("inf") + + # with `safe_fusing=True` we should see an Error + with self.assertRaises(ValueError): + pipe.fuse_lora(components=self.pipeline_class._lora_loadable_modules, safe_fusing=True) + + # without we should not see an error, but every image will be black + pipe.fuse_lora(components=self.pipeline_class._lora_loadable_modules, safe_fusing=False) + out = pipe(**inputs)[0] + + self.assertTrue(np.isnan(out).all()) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/lora/test_lora_layers_mochi.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/lora/test_lora_layers_mochi.py new file mode 100644 index 0000000000000000000000000000000000000000..21cc5f11a352f27d57b01b37ddacff302b4d9890 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/lora/test_lora_layers_mochi.py @@ -0,0 +1,143 @@ +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +import unittest + +import torch +from transformers import AutoTokenizer, T5EncoderModel + +from diffusers import AutoencoderKLMochi, FlowMatchEulerDiscreteScheduler, MochiPipeline, MochiTransformer3DModel + +from ..testing_utils import ( + floats_tensor, + require_peft_backend, + skip_mps, +) + + +sys.path.append(".") + +from .utils import PeftLoraLoaderMixinTests # noqa: E402 + + +@require_peft_backend +@skip_mps +class MochiLoRATests(unittest.TestCase, PeftLoraLoaderMixinTests): + pipeline_class = MochiPipeline + scheduler_cls = FlowMatchEulerDiscreteScheduler + scheduler_classes = [FlowMatchEulerDiscreteScheduler] + scheduler_kwargs = {} + + transformer_kwargs = { + "patch_size": 2, + "num_attention_heads": 2, + "attention_head_dim": 8, + "num_layers": 2, + "pooled_projection_dim": 16, + "in_channels": 12, + "out_channels": None, + "qk_norm": "rms_norm", + "text_embed_dim": 32, + "time_embed_dim": 4, + "activation_fn": "swiglu", + "max_sequence_length": 16, + } + transformer_cls = MochiTransformer3DModel + vae_kwargs = { + "latent_channels": 12, + "out_channels": 3, + "encoder_block_out_channels": (32, 32, 32, 32), + "decoder_block_out_channels": (32, 32, 32, 32), + "layers_per_block": (1, 1, 1, 1, 1), + } + vae_cls = AutoencoderKLMochi + tokenizer_cls, tokenizer_id = AutoTokenizer, "hf-internal-testing/tiny-random-t5" + text_encoder_cls, text_encoder_id = T5EncoderModel, "hf-internal-testing/tiny-random-t5" + + text_encoder_target_modules = ["q", "k", "v", "o"] + + @property + def output_shape(self): + return (1, 7, 16, 16, 3) + + def get_dummy_inputs(self, with_generator=True): + batch_size = 1 + sequence_length = 16 + num_channels = 4 + num_frames = 7 + num_latent_frames = 3 + sizes = (2, 2) + + generator = torch.manual_seed(0) + noise = floats_tensor((batch_size, num_latent_frames, num_channels) + sizes) + input_ids = torch.randint(1, sequence_length, size=(batch_size, sequence_length), generator=generator) + + pipeline_inputs = { + "prompt": "dance monkey", + "num_frames": num_frames, + "num_inference_steps": 4, + "guidance_scale": 6.0, + # Cannot reduce because convolution kernel becomes bigger than sample + "height": 16, + "width": 16, + "max_sequence_length": sequence_length, + "output_type": "np", + } + if with_generator: + pipeline_inputs.update({"generator": generator}) + + return noise, input_ids, pipeline_inputs + + def test_simple_inference_with_text_lora_denoiser_fused_multi(self): + super().test_simple_inference_with_text_lora_denoiser_fused_multi(expected_atol=9e-3) + + def test_simple_inference_with_text_denoiser_lora_unfused(self): + super().test_simple_inference_with_text_denoiser_lora_unfused(expected_atol=9e-3) + + @unittest.skip("Not supported in Mochi.") + def test_simple_inference_with_text_denoiser_block_scale(self): + pass + + @unittest.skip("Not supported in Mochi.") + def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self): + pass + + @unittest.skip("Not supported in Mochi.") + def test_modify_padding_mode(self): + pass + + @unittest.skip("Text encoder LoRA is not supported in Mochi.") + def test_simple_inference_with_partial_text_lora(self): + pass + + @unittest.skip("Text encoder LoRA is not supported in Mochi.") + def test_simple_inference_with_text_lora(self): + pass + + @unittest.skip("Text encoder LoRA is not supported in Mochi.") + def test_simple_inference_with_text_lora_and_scale(self): + pass + + @unittest.skip("Text encoder LoRA is not supported in Mochi.") + def test_simple_inference_with_text_lora_fused(self): + pass + + @unittest.skip("Text encoder LoRA is not supported in Mochi.") + def test_simple_inference_with_text_lora_save_load(self): + pass + + @unittest.skip("Not supported in CogVideoX.") + def test_simple_inference_with_text_denoiser_multi_adapter_block_lora(self): + pass diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/lora/test_lora_layers_qwenimage.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/lora/test_lora_layers_qwenimage.py new file mode 100644 index 0000000000000000000000000000000000000000..44ef9b0a37b3c9ea7023a42c764e09d4a8de6d9f --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/lora/test_lora_layers_qwenimage.py @@ -0,0 +1,130 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import sys +import unittest + +import torch +from transformers import Qwen2_5_VLForConditionalGeneration, Qwen2Tokenizer + +from diffusers import ( + AutoencoderKLQwenImage, + FlowMatchEulerDiscreteScheduler, + QwenImagePipeline, + QwenImageTransformer2DModel, +) + +from ..testing_utils import floats_tensor, require_peft_backend + + +sys.path.append(".") + +from .utils import PeftLoraLoaderMixinTests # noqa: E402 + + +@require_peft_backend +class QwenImageLoRATests(unittest.TestCase, PeftLoraLoaderMixinTests): + pipeline_class = QwenImagePipeline + scheduler_cls = FlowMatchEulerDiscreteScheduler + scheduler_classes = [FlowMatchEulerDiscreteScheduler] + scheduler_kwargs = {} + + transformer_kwargs = { + "patch_size": 2, + "in_channels": 16, + "out_channels": 4, + "num_layers": 2, + "attention_head_dim": 16, + "num_attention_heads": 3, + "joint_attention_dim": 16, + "guidance_embeds": False, + "axes_dims_rope": (8, 4, 4), + } + transformer_cls = QwenImageTransformer2DModel + z_dim = 4 + vae_kwargs = { + "base_dim": z_dim * 6, + "z_dim": z_dim, + "dim_mult": [1, 2, 4], + "num_res_blocks": 1, + "temperal_downsample": [False, True], + "latents_mean": [0.0] * 4, + "latents_std": [1.0] * 4, + } + vae_cls = AutoencoderKLQwenImage + tokenizer_cls, tokenizer_id = Qwen2Tokenizer, "hf-internal-testing/tiny-random-Qwen25VLForCondGen" + text_encoder_cls, text_encoder_id = ( + Qwen2_5_VLForConditionalGeneration, + "hf-internal-testing/tiny-random-Qwen25VLForCondGen", + ) + denoiser_target_modules = ["to_q", "to_k", "to_v", "to_out.0"] + + @property + def output_shape(self): + return (1, 8, 8, 3) + + def get_dummy_inputs(self, with_generator=True): + batch_size = 1 + sequence_length = 10 + num_channels = 4 + sizes = (32, 32) + + generator = torch.manual_seed(0) + noise = floats_tensor((batch_size, num_channels) + sizes) + input_ids = torch.randint(1, sequence_length, size=(batch_size, sequence_length), generator=generator) + + pipeline_inputs = { + "prompt": "A painting of a squirrel eating a burger", + "num_inference_steps": 4, + "guidance_scale": 0.0, + "height": 8, + "width": 8, + "output_type": "np", + } + if with_generator: + pipeline_inputs.update({"generator": generator}) + + return noise, input_ids, pipeline_inputs + + @unittest.skip("Not supported in Qwen Image.") + def test_simple_inference_with_text_denoiser_block_scale(self): + pass + + @unittest.skip("Not supported in Qwen Image.") + def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self): + pass + + @unittest.skip("Not supported in Qwen Image.") + def test_modify_padding_mode(self): + pass + + @unittest.skip("Text encoder LoRA is not supported in Qwen Image.") + def test_simple_inference_with_partial_text_lora(self): + pass + + @unittest.skip("Text encoder LoRA is not supported in Qwen Image.") + def test_simple_inference_with_text_lora(self): + pass + + @unittest.skip("Text encoder LoRA is not supported in Qwen Image.") + def test_simple_inference_with_text_lora_and_scale(self): + pass + + @unittest.skip("Text encoder LoRA is not supported in Qwen Image.") + def test_simple_inference_with_text_lora_fused(self): + pass + + @unittest.skip("Text encoder LoRA is not supported in Qwen Image.") + def test_simple_inference_with_text_lora_save_load(self): + pass diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/lora/test_lora_layers_sana.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/lora/test_lora_layers_sana.py new file mode 100644 index 0000000000000000000000000000000000000000..a08908c6108a15314a7e76e7acb7269995116f2a --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/lora/test_lora_layers_sana.py @@ -0,0 +1,139 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import sys +import unittest + +import torch +from transformers import Gemma2Model, GemmaTokenizer + +from diffusers import AutoencoderDC, FlowMatchEulerDiscreteScheduler, SanaPipeline, SanaTransformer2DModel + +from ..testing_utils import floats_tensor, require_peft_backend + + +sys.path.append(".") + +from .utils import PeftLoraLoaderMixinTests # noqa: E402 + + +@require_peft_backend +class SanaLoRATests(unittest.TestCase, PeftLoraLoaderMixinTests): + pipeline_class = SanaPipeline + scheduler_cls = FlowMatchEulerDiscreteScheduler(shift=7.0) + scheduler_kwargs = {} + scheduler_classes = [FlowMatchEulerDiscreteScheduler] + transformer_kwargs = { + "patch_size": 1, + "in_channels": 4, + "out_channels": 4, + "num_layers": 1, + "num_attention_heads": 2, + "attention_head_dim": 4, + "num_cross_attention_heads": 2, + "cross_attention_head_dim": 4, + "cross_attention_dim": 8, + "caption_channels": 8, + "sample_size": 32, + } + transformer_cls = SanaTransformer2DModel + vae_kwargs = { + "in_channels": 3, + "latent_channels": 4, + "attention_head_dim": 2, + "encoder_block_types": ( + "ResBlock", + "EfficientViTBlock", + ), + "decoder_block_types": ( + "ResBlock", + "EfficientViTBlock", + ), + "encoder_block_out_channels": (8, 8), + "decoder_block_out_channels": (8, 8), + "encoder_qkv_multiscales": ((), (5,)), + "decoder_qkv_multiscales": ((), (5,)), + "encoder_layers_per_block": (1, 1), + "decoder_layers_per_block": [1, 1], + "downsample_block_type": "conv", + "upsample_block_type": "interpolate", + "decoder_norm_types": "rms_norm", + "decoder_act_fns": "silu", + "scaling_factor": 0.41407, + } + vae_cls = AutoencoderDC + tokenizer_cls, tokenizer_id = GemmaTokenizer, "hf-internal-testing/dummy-gemma" + text_encoder_cls, text_encoder_id = Gemma2Model, "hf-internal-testing/dummy-gemma-for-diffusers" + + @property + def output_shape(self): + return (1, 32, 32, 3) + + def get_dummy_inputs(self, with_generator=True): + batch_size = 1 + sequence_length = 16 + num_channels = 4 + sizes = (32, 32) + + generator = torch.manual_seed(0) + noise = floats_tensor((batch_size, num_channels) + sizes) + input_ids = torch.randint(1, sequence_length, size=(batch_size, sequence_length), generator=generator) + + pipeline_inputs = { + "prompt": "", + "negative_prompt": "", + "num_inference_steps": 4, + "guidance_scale": 4.5, + "height": 32, + "width": 32, + "max_sequence_length": sequence_length, + "output_type": "np", + "complex_human_instruction": None, + } + if with_generator: + pipeline_inputs.update({"generator": generator}) + + return noise, input_ids, pipeline_inputs + + @unittest.skip("Not supported in SANA.") + def test_modify_padding_mode(self): + pass + + @unittest.skip("Not supported in SANA.") + def test_simple_inference_with_text_denoiser_block_scale(self): + pass + + @unittest.skip("Not supported in SANA.") + def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self): + pass + + @unittest.skip("Text encoder LoRA is not supported in SANA.") + def test_simple_inference_with_partial_text_lora(self): + pass + + @unittest.skip("Text encoder LoRA is not supported in SANA.") + def test_simple_inference_with_text_lora(self): + pass + + @unittest.skip("Text encoder LoRA is not supported in SANA.") + def test_simple_inference_with_text_lora_and_scale(self): + pass + + @unittest.skip("Text encoder LoRA is not supported in SANA.") + def test_simple_inference_with_text_lora_fused(self): + pass + + @unittest.skip("Text encoder LoRA is not supported in SANA.") + def test_simple_inference_with_text_lora_save_load(self): + pass diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/lora/test_lora_layers_sd.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/lora/test_lora_layers_sd.py new file mode 100644 index 0000000000000000000000000000000000000000..933bf2336a59b841e3de63565c6e32318ee334e5 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/lora/test_lora_layers_sd.py @@ -0,0 +1,769 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import gc +import sys +import unittest + +import numpy as np +import torch +import torch.nn as nn +from huggingface_hub import hf_hub_download +from safetensors.torch import load_file +from transformers import CLIPTextModel, CLIPTokenizer + +from diffusers import ( + AutoPipelineForImage2Image, + AutoPipelineForText2Image, + DDIMScheduler, + DiffusionPipeline, + LCMScheduler, + StableDiffusionPipeline, +) +from diffusers.utils.import_utils import is_accelerate_available + +from ..testing_utils import ( + Expectations, + backend_empty_cache, + load_image, + nightly, + numpy_cosine_similarity_distance, + require_peft_backend, + require_torch_accelerator, + slow, + torch_device, +) + + +sys.path.append(".") + +from .utils import PeftLoraLoaderMixinTests, check_if_lora_correctly_set # noqa: E402 + + +if is_accelerate_available(): + from accelerate.utils import release_memory + + +class StableDiffusionLoRATests(PeftLoraLoaderMixinTests, unittest.TestCase): + pipeline_class = StableDiffusionPipeline + scheduler_cls = DDIMScheduler + scheduler_kwargs = { + "beta_start": 0.00085, + "beta_end": 0.012, + "beta_schedule": "scaled_linear", + "clip_sample": False, + "set_alpha_to_one": False, + "steps_offset": 1, + } + unet_kwargs = { + "block_out_channels": (32, 64), + "layers_per_block": 2, + "sample_size": 32, + "in_channels": 4, + "out_channels": 4, + "down_block_types": ("DownBlock2D", "CrossAttnDownBlock2D"), + "up_block_types": ("CrossAttnUpBlock2D", "UpBlock2D"), + "cross_attention_dim": 32, + } + vae_kwargs = { + "block_out_channels": [32, 64], + "in_channels": 3, + "out_channels": 3, + "down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"], + "up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"], + "latent_channels": 4, + } + text_encoder_cls, text_encoder_id = CLIPTextModel, "peft-internal-testing/tiny-clip-text-2" + tokenizer_cls, tokenizer_id = CLIPTokenizer, "peft-internal-testing/tiny-clip-text-2" + + @property + def output_shape(self): + return (1, 64, 64, 3) + + def setUp(self): + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + # Keeping this test here makes sense because it doesn't look any integration + # (value assertions on logits). + @slow + @require_torch_accelerator + def test_integration_move_lora_cpu(self): + path = "stable-diffusion-v1-5/stable-diffusion-v1-5" + lora_id = "takuma104/lora-test-text-encoder-lora-target" + + pipe = StableDiffusionPipeline.from_pretrained(path, torch_dtype=torch.float16) + pipe.load_lora_weights(lora_id, adapter_name="adapter-1") + pipe.load_lora_weights(lora_id, adapter_name="adapter-2") + pipe = pipe.to(torch_device) + + self.assertTrue( + check_if_lora_correctly_set(pipe.text_encoder), + "Lora not correctly set in text encoder", + ) + + self.assertTrue( + check_if_lora_correctly_set(pipe.unet), + "Lora not correctly set in unet", + ) + + # We will offload the first adapter in CPU and check if the offloading + # has been performed correctly + pipe.set_lora_device(["adapter-1"], "cpu") + + for name, module in pipe.unet.named_modules(): + if "adapter-1" in name and not isinstance(module, (nn.Dropout, nn.Identity)): + self.assertTrue(module.weight.device == torch.device("cpu")) + elif "adapter-2" in name and not isinstance(module, (nn.Dropout, nn.Identity)): + self.assertTrue(module.weight.device != torch.device("cpu")) + + for name, module in pipe.text_encoder.named_modules(): + if "adapter-1" in name and not isinstance(module, (nn.Dropout, nn.Identity)): + self.assertTrue(module.weight.device == torch.device("cpu")) + elif "adapter-2" in name and not isinstance(module, (nn.Dropout, nn.Identity)): + self.assertTrue(module.weight.device != torch.device("cpu")) + + pipe.set_lora_device(["adapter-1"], 0) + + for n, m in pipe.unet.named_modules(): + if "adapter-1" in n and not isinstance(m, (nn.Dropout, nn.Identity)): + self.assertTrue(m.weight.device != torch.device("cpu")) + + for n, m in pipe.text_encoder.named_modules(): + if "adapter-1" in n and not isinstance(m, (nn.Dropout, nn.Identity)): + self.assertTrue(m.weight.device != torch.device("cpu")) + + pipe.set_lora_device(["adapter-1", "adapter-2"], torch_device) + + for n, m in pipe.unet.named_modules(): + if ("adapter-1" in n or "adapter-2" in n) and not isinstance(m, (nn.Dropout, nn.Identity)): + self.assertTrue(m.weight.device != torch.device("cpu")) + + for n, m in pipe.text_encoder.named_modules(): + if ("adapter-1" in n or "adapter-2" in n) and not isinstance(m, (nn.Dropout, nn.Identity)): + self.assertTrue(m.weight.device != torch.device("cpu")) + + @slow + @require_torch_accelerator + def test_integration_move_lora_dora_cpu(self): + from peft import LoraConfig + + path = "stable-diffusion-v1-5/stable-diffusion-v1-5" + unet_lora_config = LoraConfig( + init_lora_weights="gaussian", + target_modules=["to_k", "to_q", "to_v", "to_out.0"], + use_dora=True, + ) + text_lora_config = LoraConfig( + init_lora_weights="gaussian", + target_modules=["q_proj", "k_proj", "v_proj", "out_proj"], + use_dora=True, + ) + + pipe = StableDiffusionPipeline.from_pretrained(path, torch_dtype=torch.float16) + pipe.unet.add_adapter(unet_lora_config, "adapter-1") + pipe.text_encoder.add_adapter(text_lora_config, "adapter-1") + + self.assertTrue( + check_if_lora_correctly_set(pipe.text_encoder), + "Lora not correctly set in text encoder", + ) + + self.assertTrue( + check_if_lora_correctly_set(pipe.unet), + "Lora not correctly set in unet", + ) + + for name, param in pipe.unet.named_parameters(): + if "lora_" in name: + self.assertEqual(param.device, torch.device("cpu")) + + for name, param in pipe.text_encoder.named_parameters(): + if "lora_" in name: + self.assertEqual(param.device, torch.device("cpu")) + + pipe.set_lora_device(["adapter-1"], torch_device) + + for name, param in pipe.unet.named_parameters(): + if "lora_" in name: + self.assertNotEqual(param.device, torch.device("cpu")) + + for name, param in pipe.text_encoder.named_parameters(): + if "lora_" in name: + self.assertNotEqual(param.device, torch.device("cpu")) + + @slow + @require_torch_accelerator + def test_integration_set_lora_device_different_target_layers(self): + # fixes a bug that occurred when calling set_lora_device with multiple adapters loaded that target different + # layers, see #11833 + from peft import LoraConfig + + path = "stable-diffusion-v1-5/stable-diffusion-v1-5" + pipe = StableDiffusionPipeline.from_pretrained(path, torch_dtype=torch.float16) + # configs partly target the same, partly different layers + config0 = LoraConfig(target_modules=["to_k", "to_v"]) + config1 = LoraConfig(target_modules=["to_k", "to_q"]) + pipe.unet.add_adapter(config0, adapter_name="adapter-0") + pipe.unet.add_adapter(config1, adapter_name="adapter-1") + pipe = pipe.to(torch_device) + + self.assertTrue( + check_if_lora_correctly_set(pipe.unet), + "Lora not correctly set in unet", + ) + + # sanity check that the adapters don't target the same layers, otherwise the test passes even without the fix + modules_adapter_0 = {n for n, _ in pipe.unet.named_modules() if n.endswith(".adapter-0")} + modules_adapter_1 = {n for n, _ in pipe.unet.named_modules() if n.endswith(".adapter-1")} + self.assertNotEqual(modules_adapter_0, modules_adapter_1) + self.assertTrue(modules_adapter_0 - modules_adapter_1) + self.assertTrue(modules_adapter_1 - modules_adapter_0) + + # setting both separately works + pipe.set_lora_device(["adapter-0"], "cpu") + pipe.set_lora_device(["adapter-1"], "cpu") + + for name, module in pipe.unet.named_modules(): + if "adapter-0" in name and not isinstance(module, (nn.Dropout, nn.Identity)): + self.assertTrue(module.weight.device == torch.device("cpu")) + elif "adapter-1" in name and not isinstance(module, (nn.Dropout, nn.Identity)): + self.assertTrue(module.weight.device == torch.device("cpu")) + + # setting both at once also works + pipe.set_lora_device(["adapter-0", "adapter-1"], torch_device) + + for name, module in pipe.unet.named_modules(): + if "adapter-0" in name and not isinstance(module, (nn.Dropout, nn.Identity)): + self.assertTrue(module.weight.device != torch.device("cpu")) + elif "adapter-1" in name and not isinstance(module, (nn.Dropout, nn.Identity)): + self.assertTrue(module.weight.device != torch.device("cpu")) + + +@slow +@nightly +@require_torch_accelerator +@require_peft_backend +class LoraIntegrationTests(unittest.TestCase): + def setUp(self): + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def test_integration_logits_with_scale(self): + path = "stable-diffusion-v1-5/stable-diffusion-v1-5" + lora_id = "takuma104/lora-test-text-encoder-lora-target" + + pipe = StableDiffusionPipeline.from_pretrained(path, torch_dtype=torch.float32) + pipe.load_lora_weights(lora_id) + pipe = pipe.to(torch_device) + + self.assertTrue( + check_if_lora_correctly_set(pipe.text_encoder), + "Lora not correctly set in text encoder", + ) + + prompt = "a red sks dog" + + images = pipe( + prompt=prompt, + num_inference_steps=15, + cross_attention_kwargs={"scale": 0.5}, + generator=torch.manual_seed(0), + output_type="np", + ).images + + expected_slice_scale = np.array([0.307, 0.283, 0.310, 0.310, 0.300, 0.314, 0.336, 0.314, 0.321]) + predicted_slice = images[0, -3:, -3:, -1].flatten() + + max_diff = numpy_cosine_similarity_distance(expected_slice_scale, predicted_slice) + assert max_diff < 1e-3 + + pipe.unload_lora_weights() + release_memory(pipe) + + def test_integration_logits_no_scale(self): + path = "stable-diffusion-v1-5/stable-diffusion-v1-5" + lora_id = "takuma104/lora-test-text-encoder-lora-target" + + pipe = StableDiffusionPipeline.from_pretrained(path, torch_dtype=torch.float32) + pipe.load_lora_weights(lora_id) + pipe = pipe.to(torch_device) + + self.assertTrue( + check_if_lora_correctly_set(pipe.text_encoder), + "Lora not correctly set in text encoder", + ) + + prompt = "a red sks dog" + + images = pipe(prompt=prompt, num_inference_steps=30, generator=torch.manual_seed(0), output_type="np").images + + expected_slice_scale = np.array([0.074, 0.064, 0.073, 0.0842, 0.069, 0.0641, 0.0794, 0.076, 0.084]) + predicted_slice = images[0, -3:, -3:, -1].flatten() + + max_diff = numpy_cosine_similarity_distance(expected_slice_scale, predicted_slice) + + assert max_diff < 1e-3 + + pipe.unload_lora_weights() + release_memory(pipe) + + def test_dreambooth_old_format(self): + generator = torch.Generator("cpu").manual_seed(0) + + lora_model_id = "hf-internal-testing/lora_dreambooth_dog_example" + + base_model_id = "stable-diffusion-v1-5/stable-diffusion-v1-5" + + pipe = StableDiffusionPipeline.from_pretrained(base_model_id, safety_checker=None) + pipe = pipe.to(torch_device) + pipe.load_lora_weights(lora_model_id) + + images = pipe( + "A photo of a sks dog floating in the river", output_type="np", generator=generator, num_inference_steps=2 + ).images + + images = images[0, -3:, -3:, -1].flatten() + expected = np.array([0.7207, 0.6787, 0.6010, 0.7478, 0.6838, 0.6064, 0.6984, 0.6443, 0.5785]) + + max_diff = numpy_cosine_similarity_distance(expected, images) + assert max_diff < 1e-4 + + pipe.unload_lora_weights() + release_memory(pipe) + + def test_dreambooth_text_encoder_new_format(self): + generator = torch.Generator().manual_seed(0) + + lora_model_id = "hf-internal-testing/lora-trained" + + base_model_id = "stable-diffusion-v1-5/stable-diffusion-v1-5" + + pipe = StableDiffusionPipeline.from_pretrained(base_model_id, safety_checker=None) + pipe = pipe.to(torch_device) + pipe.load_lora_weights(lora_model_id) + + images = pipe("A photo of a sks dog", output_type="np", generator=generator, num_inference_steps=2).images + + images = images[0, -3:, -3:, -1].flatten() + + expected = np.array([0.6628, 0.6138, 0.5390, 0.6625, 0.6130, 0.5463, 0.6166, 0.5788, 0.5359]) + + max_diff = numpy_cosine_similarity_distance(expected, images) + assert max_diff < 1e-4 + + pipe.unload_lora_weights() + release_memory(pipe) + + def test_a1111(self): + generator = torch.Generator().manual_seed(0) + + pipe = StableDiffusionPipeline.from_pretrained("hf-internal-testing/Counterfeit-V2.5", safety_checker=None).to( + torch_device + ) + lora_model_id = "hf-internal-testing/civitai-light-shadow-lora" + lora_filename = "light_and_shadow.safetensors" + pipe.load_lora_weights(lora_model_id, weight_name=lora_filename) + + images = pipe( + "masterpiece, best quality, mountain", output_type="np", generator=generator, num_inference_steps=2 + ).images + + images = images[0, -3:, -3:, -1].flatten() + expected = np.array([0.3636, 0.3708, 0.3694, 0.3679, 0.3829, 0.3677, 0.3692, 0.3688, 0.3292]) + + max_diff = numpy_cosine_similarity_distance(expected, images) + assert max_diff < 1e-3 + + pipe.unload_lora_weights() + release_memory(pipe) + + def test_lycoris(self): + generator = torch.Generator().manual_seed(0) + + pipe = StableDiffusionPipeline.from_pretrained( + "hf-internal-testing/Amixx", safety_checker=None, use_safetensors=True, variant="fp16" + ).to(torch_device) + lora_model_id = "hf-internal-testing/edgLycorisMugler-light" + lora_filename = "edgLycorisMugler-light.safetensors" + pipe.load_lora_weights(lora_model_id, weight_name=lora_filename) + + images = pipe( + "masterpiece, best quality, mountain", output_type="np", generator=generator, num_inference_steps=2 + ).images + + images = images[0, -3:, -3:, -1].flatten() + expected = np.array([0.6463, 0.658, 0.599, 0.6542, 0.6512, 0.6213, 0.658, 0.6485, 0.6017]) + + max_diff = numpy_cosine_similarity_distance(expected, images) + assert max_diff < 1e-3 + + pipe.unload_lora_weights() + release_memory(pipe) + + def test_a1111_with_model_cpu_offload(self): + generator = torch.Generator().manual_seed(0) + + pipe = StableDiffusionPipeline.from_pretrained("hf-internal-testing/Counterfeit-V2.5", safety_checker=None) + pipe.enable_model_cpu_offload(device=torch_device) + lora_model_id = "hf-internal-testing/civitai-light-shadow-lora" + lora_filename = "light_and_shadow.safetensors" + pipe.load_lora_weights(lora_model_id, weight_name=lora_filename) + + images = pipe( + "masterpiece, best quality, mountain", output_type="np", generator=generator, num_inference_steps=2 + ).images + + images = images[0, -3:, -3:, -1].flatten() + expected = np.array([0.3636, 0.3708, 0.3694, 0.3679, 0.3829, 0.3677, 0.3692, 0.3688, 0.3292]) + + max_diff = numpy_cosine_similarity_distance(expected, images) + assert max_diff < 1e-3 + + pipe.unload_lora_weights() + release_memory(pipe) + + def test_a1111_with_sequential_cpu_offload(self): + generator = torch.Generator().manual_seed(0) + + pipe = StableDiffusionPipeline.from_pretrained("hf-internal-testing/Counterfeit-V2.5", safety_checker=None) + pipe.enable_sequential_cpu_offload(device=torch_device) + lora_model_id = "hf-internal-testing/civitai-light-shadow-lora" + lora_filename = "light_and_shadow.safetensors" + pipe.load_lora_weights(lora_model_id, weight_name=lora_filename) + + images = pipe( + "masterpiece, best quality, mountain", output_type="np", generator=generator, num_inference_steps=2 + ).images + + images = images[0, -3:, -3:, -1].flatten() + expected = np.array([0.3636, 0.3708, 0.3694, 0.3679, 0.3829, 0.3677, 0.3692, 0.3688, 0.3292]) + + max_diff = numpy_cosine_similarity_distance(expected, images) + assert max_diff < 1e-3 + + pipe.unload_lora_weights() + release_memory(pipe) + + def test_kohya_sd_v15_with_higher_dimensions(self): + generator = torch.Generator().manual_seed(0) + + pipe = StableDiffusionPipeline.from_pretrained( + "stable-diffusion-v1-5/stable-diffusion-v1-5", safety_checker=None + ).to(torch_device) + lora_model_id = "hf-internal-testing/urushisato-lora" + lora_filename = "urushisato_v15.safetensors" + pipe.load_lora_weights(lora_model_id, weight_name=lora_filename) + + images = pipe( + "masterpiece, best quality, mountain", output_type="np", generator=generator, num_inference_steps=2 + ).images + + images = images[0, -3:, -3:, -1].flatten() + expected = np.array([0.7165, 0.6616, 0.5833, 0.7504, 0.6718, 0.587, 0.6871, 0.6361, 0.5694]) + + max_diff = numpy_cosine_similarity_distance(expected, images) + assert max_diff < 1e-3 + + pipe.unload_lora_weights() + release_memory(pipe) + + def test_vanilla_funetuning(self): + generator = torch.Generator().manual_seed(0) + + lora_model_id = "hf-internal-testing/sd-model-finetuned-lora-t4" + + base_model_id = "stable-diffusion-v1-5/stable-diffusion-v1-5" + + pipe = StableDiffusionPipeline.from_pretrained(base_model_id, safety_checker=None) + pipe = pipe.to(torch_device) + pipe.load_lora_weights(lora_model_id) + + images = pipe("A pokemon with blue eyes.", output_type="np", generator=generator, num_inference_steps=2).images + + image_slice = images[0, -3:, -3:, -1].flatten() + + expected_slices = Expectations( + { + ("xpu", 3): np.array( + [ + 0.6544, + 0.6127, + 0.5397, + 0.6845, + 0.6047, + 0.5469, + 0.6349, + 0.5906, + 0.5382, + ] + ), + ("cuda", 7): np.array( + [ + 0.7406, + 0.699, + 0.5963, + 0.7493, + 0.7045, + 0.6096, + 0.6886, + 0.6388, + 0.583, + ] + ), + ("cuda", 8): np.array( + [ + 0.6542, + 0.61253, + 0.5396, + 0.6843, + 0.6044, + 0.5468, + 0.6349, + 0.5905, + 0.5381, + ] + ), + } + ) + expected_slice = expected_slices.get_expectation() + + max_diff = numpy_cosine_similarity_distance(expected_slice, image_slice) + assert max_diff < 1e-4 + + pipe.unload_lora_weights() + release_memory(pipe) + + def test_unload_kohya_lora(self): + generator = torch.manual_seed(0) + prompt = "masterpiece, best quality, mountain" + num_inference_steps = 2 + + pipe = StableDiffusionPipeline.from_pretrained( + "stable-diffusion-v1-5/stable-diffusion-v1-5", safety_checker=None + ).to(torch_device) + initial_images = pipe( + prompt, output_type="np", generator=generator, num_inference_steps=num_inference_steps + ).images + initial_images = initial_images[0, -3:, -3:, -1].flatten() + + lora_model_id = "hf-internal-testing/civitai-colored-icons-lora" + lora_filename = "Colored_Icons_by_vizsumit.safetensors" + + pipe.load_lora_weights(lora_model_id, weight_name=lora_filename) + generator = torch.manual_seed(0) + lora_images = pipe( + prompt, output_type="np", generator=generator, num_inference_steps=num_inference_steps + ).images + lora_images = lora_images[0, -3:, -3:, -1].flatten() + + pipe.unload_lora_weights() + generator = torch.manual_seed(0) + unloaded_lora_images = pipe( + prompt, output_type="np", generator=generator, num_inference_steps=num_inference_steps + ).images + unloaded_lora_images = unloaded_lora_images[0, -3:, -3:, -1].flatten() + + self.assertFalse(np.allclose(initial_images, lora_images)) + self.assertTrue(np.allclose(initial_images, unloaded_lora_images, atol=1e-3)) + + release_memory(pipe) + + def test_load_unload_load_kohya_lora(self): + # This test ensures that a Kohya-style LoRA can be safely unloaded and then loaded + # without introducing any side-effects. Even though the test uses a Kohya-style + # LoRA, the underlying adapter handling mechanism is format-agnostic. + generator = torch.manual_seed(0) + prompt = "masterpiece, best quality, mountain" + num_inference_steps = 2 + + pipe = StableDiffusionPipeline.from_pretrained( + "stable-diffusion-v1-5/stable-diffusion-v1-5", safety_checker=None + ).to(torch_device) + initial_images = pipe( + prompt, output_type="np", generator=generator, num_inference_steps=num_inference_steps + ).images + initial_images = initial_images[0, -3:, -3:, -1].flatten() + + lora_model_id = "hf-internal-testing/civitai-colored-icons-lora" + lora_filename = "Colored_Icons_by_vizsumit.safetensors" + + pipe.load_lora_weights(lora_model_id, weight_name=lora_filename) + generator = torch.manual_seed(0) + lora_images = pipe( + prompt, output_type="np", generator=generator, num_inference_steps=num_inference_steps + ).images + lora_images = lora_images[0, -3:, -3:, -1].flatten() + + pipe.unload_lora_weights() + generator = torch.manual_seed(0) + unloaded_lora_images = pipe( + prompt, output_type="np", generator=generator, num_inference_steps=num_inference_steps + ).images + unloaded_lora_images = unloaded_lora_images[0, -3:, -3:, -1].flatten() + + self.assertFalse(np.allclose(initial_images, lora_images)) + self.assertTrue(np.allclose(initial_images, unloaded_lora_images, atol=1e-3)) + + # make sure we can load a LoRA again after unloading and they don't have + # any undesired effects. + pipe.load_lora_weights(lora_model_id, weight_name=lora_filename) + generator = torch.manual_seed(0) + lora_images_again = pipe( + prompt, output_type="np", generator=generator, num_inference_steps=num_inference_steps + ).images + lora_images_again = lora_images_again[0, -3:, -3:, -1].flatten() + + self.assertTrue(np.allclose(lora_images, lora_images_again, atol=1e-3)) + release_memory(pipe) + + def test_not_empty_state_dict(self): + # Makes sure https://github.com/huggingface/diffusers/issues/7054 does not happen again + pipe = AutoPipelineForText2Image.from_pretrained( + "stable-diffusion-v1-5/stable-diffusion-v1-5", torch_dtype=torch.float16 + ).to(torch_device) + pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config) + + cached_file = hf_hub_download("hf-internal-testing/lcm-lora-test-sd-v1-5", "test_lora.safetensors") + lcm_lora = load_file(cached_file) + + pipe.load_lora_weights(lcm_lora, adapter_name="lcm") + self.assertTrue(lcm_lora != {}) + release_memory(pipe) + + def test_load_unload_load_state_dict(self): + # Makes sure https://github.com/huggingface/diffusers/issues/7054 does not happen again + pipe = AutoPipelineForText2Image.from_pretrained( + "stable-diffusion-v1-5/stable-diffusion-v1-5", torch_dtype=torch.float16 + ).to(torch_device) + pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config) + + cached_file = hf_hub_download("hf-internal-testing/lcm-lora-test-sd-v1-5", "test_lora.safetensors") + lcm_lora = load_file(cached_file) + previous_state_dict = lcm_lora.copy() + + pipe.load_lora_weights(lcm_lora, adapter_name="lcm") + self.assertDictEqual(lcm_lora, previous_state_dict) + + pipe.unload_lora_weights() + pipe.load_lora_weights(lcm_lora, adapter_name="lcm") + self.assertDictEqual(lcm_lora, previous_state_dict) + + release_memory(pipe) + + def test_sdv1_5_lcm_lora(self): + pipe = DiffusionPipeline.from_pretrained( + "stable-diffusion-v1-5/stable-diffusion-v1-5", torch_dtype=torch.float16 + ) + pipe.to(torch_device) + pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config) + + generator = torch.Generator("cpu").manual_seed(0) + + lora_model_id = "latent-consistency/lcm-lora-sdv1-5" + pipe.load_lora_weights(lora_model_id) + + image = pipe( + "masterpiece, best quality, mountain", generator=generator, num_inference_steps=4, guidance_scale=0.5 + ).images[0] + + expected_image = load_image( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/lcm_lora/sdv15_lcm_lora.png" + ) + + image_np = pipe.image_processor.pil_to_numpy(image) + expected_image_np = pipe.image_processor.pil_to_numpy(expected_image) + + max_diff = numpy_cosine_similarity_distance(image_np.flatten(), expected_image_np.flatten()) + assert max_diff < 1e-4 + + pipe.unload_lora_weights() + + release_memory(pipe) + + def test_sdv1_5_lcm_lora_img2img(self): + pipe = AutoPipelineForImage2Image.from_pretrained( + "stable-diffusion-v1-5/stable-diffusion-v1-5", torch_dtype=torch.float16 + ) + pipe.to(torch_device) + pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config) + + init_image = load_image( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape.png" + ) + + generator = torch.Generator("cpu").manual_seed(0) + + lora_model_id = "latent-consistency/lcm-lora-sdv1-5" + pipe.load_lora_weights(lora_model_id) + + image = pipe( + "snowy mountain", + generator=generator, + image=init_image, + strength=0.5, + num_inference_steps=4, + guidance_scale=0.5, + ).images[0] + + expected_image = load_image( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/lcm_lora/sdv15_lcm_lora_img2img.png" + ) + + image_np = pipe.image_processor.pil_to_numpy(image) + expected_image_np = pipe.image_processor.pil_to_numpy(expected_image) + + max_diff = numpy_cosine_similarity_distance(image_np.flatten(), expected_image_np.flatten()) + assert max_diff < 1e-4 + + pipe.unload_lora_weights() + + release_memory(pipe) + + def test_sd_load_civitai_empty_network_alpha(self): + """ + This test simply checks that loading a LoRA with an empty network alpha works fine + See: https://github.com/huggingface/diffusers/issues/5606 + """ + pipeline = StableDiffusionPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5") + pipeline.enable_sequential_cpu_offload(device=torch_device) + civitai_path = hf_hub_download("ybelkada/test-ahi-civitai", "ahi_lora_weights.safetensors") + pipeline.load_lora_weights(civitai_path, adapter_name="ahri") + + images = pipeline( + "ahri, masterpiece, league of legends", + output_type="np", + generator=torch.manual_seed(156), + num_inference_steps=5, + ).images + images = images[0, -3:, -3:, -1].flatten() + expected = np.array([0.0, 0.0, 0.0, 0.002557, 0.020954, 0.001792, 0.006581, 0.00591, 0.002995]) + + max_diff = numpy_cosine_similarity_distance(expected, images) + assert max_diff < 1e-3 + + pipeline.unload_lora_weights() + release_memory(pipeline) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/lora/test_lora_layers_sd3.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/lora/test_lora_layers_sd3.py new file mode 100644 index 0000000000000000000000000000000000000000..95f6f325e4c71eeaac41ad2683925ef93407d7bf --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/lora/test_lora_layers_sd3.py @@ -0,0 +1,191 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import gc +import sys +import unittest + +import numpy as np +import torch +from transformers import AutoTokenizer, CLIPTextModelWithProjection, CLIPTokenizer, T5EncoderModel + +from diffusers import ( + FlowMatchEulerDiscreteScheduler, + SD3Transformer2DModel, + StableDiffusion3Img2ImgPipeline, + StableDiffusion3Pipeline, +) +from diffusers.utils import load_image +from diffusers.utils.import_utils import is_accelerate_available + +from ..testing_utils import ( + backend_empty_cache, + is_flaky, + nightly, + numpy_cosine_similarity_distance, + require_big_accelerator, + require_peft_backend, + require_torch_accelerator, + torch_device, +) + + +sys.path.append(".") + +from .utils import PeftLoraLoaderMixinTests # noqa: E402 + + +if is_accelerate_available(): + from accelerate.utils import release_memory + + +@require_peft_backend +class SD3LoRATests(unittest.TestCase, PeftLoraLoaderMixinTests): + pipeline_class = StableDiffusion3Pipeline + scheduler_cls = FlowMatchEulerDiscreteScheduler + scheduler_kwargs = {} + scheduler_classes = [FlowMatchEulerDiscreteScheduler] + transformer_kwargs = { + "sample_size": 32, + "patch_size": 1, + "in_channels": 4, + "num_layers": 1, + "attention_head_dim": 8, + "num_attention_heads": 4, + "caption_projection_dim": 32, + "joint_attention_dim": 32, + "pooled_projection_dim": 64, + "out_channels": 4, + } + transformer_cls = SD3Transformer2DModel + vae_kwargs = { + "sample_size": 32, + "in_channels": 3, + "out_channels": 3, + "block_out_channels": (4,), + "layers_per_block": 1, + "latent_channels": 4, + "norm_num_groups": 1, + "use_quant_conv": False, + "use_post_quant_conv": False, + "shift_factor": 0.0609, + "scaling_factor": 1.5035, + } + has_three_text_encoders = True + tokenizer_cls, tokenizer_id = CLIPTokenizer, "hf-internal-testing/tiny-random-clip" + tokenizer_2_cls, tokenizer_2_id = CLIPTokenizer, "hf-internal-testing/tiny-random-clip" + tokenizer_3_cls, tokenizer_3_id = AutoTokenizer, "hf-internal-testing/tiny-random-t5" + text_encoder_cls, text_encoder_id = CLIPTextModelWithProjection, "hf-internal-testing/tiny-sd3-text_encoder" + text_encoder_2_cls, text_encoder_2_id = CLIPTextModelWithProjection, "hf-internal-testing/tiny-sd3-text_encoder-2" + text_encoder_3_cls, text_encoder_3_id = T5EncoderModel, "hf-internal-testing/tiny-random-t5" + + @property + def output_shape(self): + return (1, 32, 32, 3) + + @require_torch_accelerator + def test_sd3_lora(self): + """ + Test loading the loras that are saved with the diffusers and peft formats. + Related PR: https://github.com/huggingface/diffusers/pull/8584 + """ + components = self.get_dummy_components() + pipe = self.pipeline_class(**components[0]) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + lora_model_id = "hf-internal-testing/tiny-sd3-loras" + + lora_filename = "lora_diffusers_format.safetensors" + pipe.load_lora_weights(lora_model_id, weight_name=lora_filename) + pipe.unload_lora_weights() + + lora_filename = "lora_peft_format.safetensors" + pipe.load_lora_weights(lora_model_id, weight_name=lora_filename) + + @unittest.skip("Not supported in SD3.") + def test_simple_inference_with_text_denoiser_block_scale(self): + pass + + @unittest.skip("Not supported in SD3.") + def test_simple_inference_with_text_denoiser_multi_adapter_block_lora(self): + pass + + @unittest.skip("Not supported in SD3.") + def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self): + pass + + @unittest.skip("Not supported in SD3.") + def test_modify_padding_mode(self): + pass + + @is_flaky + def test_multiple_wrong_adapter_name_raises_error(self): + super().test_multiple_wrong_adapter_name_raises_error() + + +@nightly +@require_torch_accelerator +@require_peft_backend +@require_big_accelerator +class SD3LoraIntegrationTests(unittest.TestCase): + pipeline_class = StableDiffusion3Img2ImgPipeline + repo_id = "stabilityai/stable-diffusion-3-medium-diffusers" + + def setUp(self): + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def get_inputs(self, device, seed=0): + init_image = load_image( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" + ) + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device="cpu").manual_seed(seed) + + return { + "prompt": "corgi", + "num_inference_steps": 2, + "guidance_scale": 5.0, + "output_type": "np", + "generator": generator, + "image": init_image, + } + + def test_sd3_img2img_lora(self): + pipe = self.pipeline_class.from_pretrained(self.repo_id, torch_dtype=torch.float16) + pipe.load_lora_weights("zwloong/sd3-lora-training-rank16-v2") + pipe.fuse_lora() + pipe.unload_lora_weights() + pipe = pipe.to(torch_device) + + inputs = self.get_inputs(torch_device) + + image = pipe(**inputs).images[0] + image_slice = image[0, -3:, -3:] + expected_slice = np.array([0.5649, 0.5405, 0.5488, 0.5688, 0.5449, 0.5513, 0.5337, 0.5107, 0.5059]) + + max_diff = numpy_cosine_similarity_distance(expected_slice.flatten(), image_slice.flatten()) + + assert max_diff < 1e-4, f"Outputs are not close enough, got {max_diff}" + pipe.unload_lora_weights() + release_memory(pipe) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/lora/test_lora_layers_sdxl.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/lora/test_lora_layers_sdxl.py new file mode 100644 index 0000000000000000000000000000000000000000..ac1d65abdaa7c4a176f3923f89f640f0fe6d081f --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/lora/test_lora_layers_sdxl.py @@ -0,0 +1,681 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import copy +import gc +import importlib +import sys +import time +import unittest + +import numpy as np +import torch +from packaging import version +from transformers import CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer + +from diffusers import ( + ControlNetModel, + EulerDiscreteScheduler, + LCMScheduler, + StableDiffusionXLAdapterPipeline, + StableDiffusionXLControlNetPipeline, + StableDiffusionXLPipeline, + T2IAdapter, +) +from diffusers.utils import logging +from diffusers.utils.import_utils import is_accelerate_available + +from ..testing_utils import ( + CaptureLogger, + backend_empty_cache, + is_flaky, + load_image, + nightly, + numpy_cosine_similarity_distance, + require_peft_backend, + require_torch_accelerator, + slow, + torch_device, +) + + +sys.path.append(".") + +from .utils import PeftLoraLoaderMixinTests, check_if_lora_correctly_set, state_dicts_almost_equal # noqa: E402 + + +if is_accelerate_available(): + from accelerate.utils import release_memory + + +class StableDiffusionXLLoRATests(PeftLoraLoaderMixinTests, unittest.TestCase): + has_two_text_encoders = True + pipeline_class = StableDiffusionXLPipeline + scheduler_cls = EulerDiscreteScheduler + scheduler_kwargs = { + "beta_start": 0.00085, + "beta_end": 0.012, + "beta_schedule": "scaled_linear", + "timestep_spacing": "leading", + "steps_offset": 1, + } + unet_kwargs = { + "block_out_channels": (32, 64), + "layers_per_block": 2, + "sample_size": 32, + "in_channels": 4, + "out_channels": 4, + "down_block_types": ("DownBlock2D", "CrossAttnDownBlock2D"), + "up_block_types": ("CrossAttnUpBlock2D", "UpBlock2D"), + "attention_head_dim": (2, 4), + "use_linear_projection": True, + "addition_embed_type": "text_time", + "addition_time_embed_dim": 8, + "transformer_layers_per_block": (1, 2), + "projection_class_embeddings_input_dim": 80, # 6 * 8 + 32 + "cross_attention_dim": 64, + } + vae_kwargs = { + "block_out_channels": [32, 64], + "in_channels": 3, + "out_channels": 3, + "down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"], + "up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"], + "latent_channels": 4, + "sample_size": 128, + } + text_encoder_cls, text_encoder_id = CLIPTextModel, "peft-internal-testing/tiny-clip-text-2" + tokenizer_cls, tokenizer_id = CLIPTokenizer, "peft-internal-testing/tiny-clip-text-2" + text_encoder_2_cls, text_encoder_2_id = CLIPTextModelWithProjection, "peft-internal-testing/tiny-clip-text-2" + tokenizer_2_cls, tokenizer_2_id = CLIPTokenizer, "peft-internal-testing/tiny-clip-text-2" + + @property + def output_shape(self): + return (1, 64, 64, 3) + + def setUp(self): + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + @is_flaky + def test_multiple_wrong_adapter_name_raises_error(self): + super().test_multiple_wrong_adapter_name_raises_error() + + def test_simple_inference_with_text_denoiser_lora_unfused(self): + if torch.cuda.is_available(): + expected_atol = 9e-2 + expected_rtol = 9e-2 + else: + expected_atol = 1e-3 + expected_rtol = 1e-3 + + super().test_simple_inference_with_text_denoiser_lora_unfused( + expected_atol=expected_atol, expected_rtol=expected_rtol + ) + + def test_simple_inference_with_text_lora_denoiser_fused_multi(self): + if torch.cuda.is_available(): + expected_atol = 9e-2 + expected_rtol = 9e-2 + else: + expected_atol = 1e-3 + expected_rtol = 1e-3 + + super().test_simple_inference_with_text_lora_denoiser_fused_multi( + expected_atol=expected_atol, expected_rtol=expected_rtol + ) + + def test_lora_scale_kwargs_match_fusion(self): + if torch.cuda.is_available(): + expected_atol = 9e-2 + expected_rtol = 9e-2 + else: + expected_atol = 1e-3 + expected_rtol = 1e-3 + + super().test_lora_scale_kwargs_match_fusion(expected_atol=expected_atol, expected_rtol=expected_rtol) + + +@slow +@nightly +@require_torch_accelerator +@require_peft_backend +class LoraSDXLIntegrationTests(unittest.TestCase): + def setUp(self): + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def test_sdxl_1_0_lora(self): + generator = torch.Generator("cpu").manual_seed(0) + + pipe = StableDiffusionXLPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0") + pipe.enable_model_cpu_offload() + lora_model_id = "hf-internal-testing/sdxl-1.0-lora" + lora_filename = "sd_xl_offset_example-lora_1.0.safetensors" + pipe.load_lora_weights(lora_model_id, weight_name=lora_filename) + + images = pipe( + "masterpiece, best quality, mountain", output_type="np", generator=generator, num_inference_steps=2 + ).images + + images = images[0, -3:, -3:, -1].flatten() + expected = np.array([0.4468, 0.4061, 0.4134, 0.3637, 0.3202, 0.365, 0.3786, 0.3725, 0.3535]) + + max_diff = numpy_cosine_similarity_distance(expected, images) + assert max_diff < 1e-4 + + pipe.unload_lora_weights() + release_memory(pipe) + + def test_sdxl_1_0_blockwise_lora(self): + generator = torch.Generator("cpu").manual_seed(0) + + pipe = StableDiffusionXLPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0") + pipe.enable_model_cpu_offload() + lora_model_id = "hf-internal-testing/sdxl-1.0-lora" + lora_filename = "sd_xl_offset_example-lora_1.0.safetensors" + pipe.load_lora_weights(lora_model_id, weight_name=lora_filename, adapter_name="offset") + scales = { + "unet": { + "down": {"block_1": [1.0, 1.0], "block_2": [1.0, 1.0]}, + "mid": 1.0, + "up": {"block_0": [1.0, 1.0, 1.0], "block_1": [1.0, 1.0, 1.0]}, + }, + } + pipe.set_adapters(["offset"], [scales]) + + images = pipe( + "masterpiece, best quality, mountain", output_type="np", generator=generator, num_inference_steps=2 + ).images + + images = images[0, -3:, -3:, -1].flatten() + expected = np.array([00.4468, 0.4061, 0.4134, 0.3637, 0.3202, 0.365, 0.3786, 0.3725, 0.3535]) + + max_diff = numpy_cosine_similarity_distance(expected, images) + assert max_diff < 1e-4 + + pipe.unload_lora_weights() + release_memory(pipe) + + def test_sdxl_lcm_lora(self): + pipe = StableDiffusionXLPipeline.from_pretrained( + "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16 + ) + pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config) + pipe.enable_model_cpu_offload() + + generator = torch.Generator("cpu").manual_seed(0) + + lora_model_id = "latent-consistency/lcm-lora-sdxl" + + pipe.load_lora_weights(lora_model_id) + + image = pipe( + "masterpiece, best quality, mountain", generator=generator, num_inference_steps=4, guidance_scale=0.5 + ).images[0] + + expected_image = load_image( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/lcm_lora/sdxl_lcm_lora.png" + ) + + image_np = pipe.image_processor.pil_to_numpy(image) + expected_image_np = pipe.image_processor.pil_to_numpy(expected_image) + + max_diff = numpy_cosine_similarity_distance(image_np.flatten(), expected_image_np.flatten()) + assert max_diff < 1e-4 + + pipe.unload_lora_weights() + release_memory(pipe) + + def test_sdxl_1_0_lora_fusion(self): + generator = torch.Generator().manual_seed(0) + + pipe = StableDiffusionXLPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0") + lora_model_id = "hf-internal-testing/sdxl-1.0-lora" + lora_filename = "sd_xl_offset_example-lora_1.0.safetensors" + pipe.load_lora_weights(lora_model_id, weight_name=lora_filename) + + pipe.fuse_lora() + # We need to unload the lora weights since in the previous API `fuse_lora` led to lora weights being + # silently deleted - otherwise this will CPU OOM + pipe.unload_lora_weights() + + pipe.enable_model_cpu_offload() + + images = pipe( + "masterpiece, best quality, mountain", output_type="np", generator=generator, num_inference_steps=2 + ).images + + images = images[0, -3:, -3:, -1].flatten() + # This way we also test equivalence between LoRA fusion and the non-fusion behaviour. + expected = np.array([0.4468, 0.4061, 0.4134, 0.3637, 0.3202, 0.365, 0.3786, 0.3725, 0.3535]) + + max_diff = numpy_cosine_similarity_distance(expected, images) + assert max_diff < 1e-4 + + release_memory(pipe) + + def test_sdxl_1_0_lora_unfusion(self): + generator = torch.Generator("cpu").manual_seed(0) + + pipe = StableDiffusionXLPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0") + lora_model_id = "hf-internal-testing/sdxl-1.0-lora" + lora_filename = "sd_xl_offset_example-lora_1.0.safetensors" + pipe.load_lora_weights(lora_model_id, weight_name=lora_filename) + pipe.fuse_lora() + + pipe.enable_model_cpu_offload() + + images = pipe( + "masterpiece, best quality, mountain", output_type="np", generator=generator, num_inference_steps=3 + ).images + images_with_fusion = images.flatten() + + pipe.unfuse_lora() + generator = torch.Generator("cpu").manual_seed(0) + images = pipe( + "masterpiece, best quality, mountain", output_type="np", generator=generator, num_inference_steps=3 + ).images + images_without_fusion = images.flatten() + + max_diff = numpy_cosine_similarity_distance(images_with_fusion, images_without_fusion) + assert max_diff < 1e-4 + + release_memory(pipe) + + def test_sdxl_1_0_lora_unfusion_effectivity(self): + pipe = StableDiffusionXLPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0") + pipe.enable_model_cpu_offload() + + generator = torch.Generator().manual_seed(0) + images = pipe( + "masterpiece, best quality, mountain", output_type="np", generator=generator, num_inference_steps=2 + ).images + original_image_slice = images[0, -3:, -3:, -1].flatten() + + lora_model_id = "hf-internal-testing/sdxl-1.0-lora" + lora_filename = "sd_xl_offset_example-lora_1.0.safetensors" + pipe.load_lora_weights(lora_model_id, weight_name=lora_filename) + pipe.fuse_lora() + + generator = torch.Generator().manual_seed(0) + _ = pipe( + "masterpiece, best quality, mountain", output_type="np", generator=generator, num_inference_steps=2 + ).images + + pipe.unfuse_lora() + + # We need to unload the lora weights - in the old API unfuse led to unloading the adapter weights + pipe.unload_lora_weights() + + generator = torch.Generator().manual_seed(0) + images = pipe( + "masterpiece, best quality, mountain", output_type="np", generator=generator, num_inference_steps=2 + ).images + images_without_fusion_slice = images[0, -3:, -3:, -1].flatten() + + max_diff = numpy_cosine_similarity_distance(images_without_fusion_slice, original_image_slice) + assert max_diff < 1e-3 + + release_memory(pipe) + + def test_sdxl_1_0_lora_fusion_efficiency(self): + generator = torch.Generator().manual_seed(0) + lora_model_id = "hf-internal-testing/sdxl-1.0-lora" + lora_filename = "sd_xl_offset_example-lora_1.0.safetensors" + + pipe = StableDiffusionXLPipeline.from_pretrained( + "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16 + ) + pipe.load_lora_weights(lora_model_id, weight_name=lora_filename, torch_dtype=torch.float16) + pipe.enable_model_cpu_offload() + + start_time = time.time() + for _ in range(3): + pipe( + "masterpiece, best quality, mountain", output_type="np", generator=generator, num_inference_steps=2 + ).images + end_time = time.time() + elapsed_time_non_fusion = end_time - start_time + + del pipe + + pipe = StableDiffusionXLPipeline.from_pretrained( + "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16 + ) + pipe.load_lora_weights(lora_model_id, weight_name=lora_filename, torch_dtype=torch.float16) + pipe.fuse_lora() + + # We need to unload the lora weights since in the previous API `fuse_lora` led to lora weights being + # silently deleted - otherwise this will CPU OOM + pipe.unload_lora_weights() + pipe.enable_model_cpu_offload() + + generator = torch.Generator().manual_seed(0) + start_time = time.time() + for _ in range(3): + pipe( + "masterpiece, best quality, mountain", output_type="np", generator=generator, num_inference_steps=2 + ).images + end_time = time.time() + elapsed_time_fusion = end_time - start_time + + self.assertTrue(elapsed_time_fusion < elapsed_time_non_fusion) + + release_memory(pipe) + + def test_sdxl_1_0_last_ben(self): + generator = torch.Generator().manual_seed(0) + + pipe = StableDiffusionXLPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0") + pipe.enable_model_cpu_offload() + lora_model_id = "TheLastBen/Papercut_SDXL" + lora_filename = "papercut.safetensors" + pipe.load_lora_weights(lora_model_id, weight_name=lora_filename) + + images = pipe("papercut.safetensors", output_type="np", generator=generator, num_inference_steps=2).images + + images = images[0, -3:, -3:, -1].flatten() + expected = np.array([0.5244, 0.4347, 0.4312, 0.4246, 0.4398, 0.4409, 0.4884, 0.4938, 0.4094]) + + max_diff = numpy_cosine_similarity_distance(expected, images) + assert max_diff < 1e-3 + + pipe.unload_lora_weights() + release_memory(pipe) + + def test_sdxl_1_0_fuse_unfuse_all(self): + pipe = StableDiffusionXLPipeline.from_pretrained( + "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16 + ) + text_encoder_1_sd = copy.deepcopy(pipe.text_encoder.state_dict()) + text_encoder_2_sd = copy.deepcopy(pipe.text_encoder_2.state_dict()) + unet_sd = copy.deepcopy(pipe.unet.state_dict()) + + pipe.load_lora_weights( + "davizca87/sun-flower", weight_name="snfw3rXL-000004.safetensors", torch_dtype=torch.float16 + ) + + fused_te_state_dict = pipe.text_encoder.state_dict() + fused_te_2_state_dict = pipe.text_encoder_2.state_dict() + unet_state_dict = pipe.unet.state_dict() + + peft_ge_070 = version.parse(importlib.metadata.version("peft")) >= version.parse("0.7.0") + + def remap_key(key, sd): + # some keys have moved around for PEFT >= 0.7.0, but they should still be loaded correctly + if (key in sd) or (not peft_ge_070): + return key + + # instead of linear.weight, we now have linear.base_layer.weight, etc. + if key.endswith(".weight"): + key = key[:-7] + ".base_layer.weight" + elif key.endswith(".bias"): + key = key[:-5] + ".base_layer.bias" + return key + + for key, value in text_encoder_1_sd.items(): + key = remap_key(key, fused_te_state_dict) + self.assertTrue(torch.allclose(fused_te_state_dict[key], value)) + + for key, value in text_encoder_2_sd.items(): + key = remap_key(key, fused_te_2_state_dict) + self.assertTrue(torch.allclose(fused_te_2_state_dict[key], value)) + + for key, value in unet_state_dict.items(): + self.assertTrue(torch.allclose(unet_state_dict[key], value)) + + pipe.fuse_lora() + pipe.unload_lora_weights() + + assert not state_dicts_almost_equal(text_encoder_1_sd, pipe.text_encoder.state_dict()) + assert not state_dicts_almost_equal(text_encoder_2_sd, pipe.text_encoder_2.state_dict()) + assert not state_dicts_almost_equal(unet_sd, pipe.unet.state_dict()) + + release_memory(pipe) + del unet_sd, text_encoder_1_sd, text_encoder_2_sd + + def test_sdxl_1_0_lora_with_sequential_cpu_offloading(self): + generator = torch.Generator().manual_seed(0) + + pipe = StableDiffusionXLPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0") + pipe.enable_sequential_cpu_offload() + lora_model_id = "hf-internal-testing/sdxl-1.0-lora" + lora_filename = "sd_xl_offset_example-lora_1.0.safetensors" + + pipe.load_lora_weights(lora_model_id, weight_name=lora_filename) + + images = pipe( + "masterpiece, best quality, mountain", output_type="np", generator=generator, num_inference_steps=2 + ).images + + images = images[0, -3:, -3:, -1].flatten() + expected = np.array([0.4468, 0.4087, 0.4134, 0.366, 0.3202, 0.3505, 0.3786, 0.387, 0.3535]) + + max_diff = numpy_cosine_similarity_distance(expected, images) + assert max_diff < 1e-3 + + pipe.unload_lora_weights() + release_memory(pipe) + + def test_controlnet_canny_lora(self): + controlnet = ControlNetModel.from_pretrained("diffusers/controlnet-canny-sdxl-1.0") + + pipe = StableDiffusionXLControlNetPipeline.from_pretrained( + "stabilityai/stable-diffusion-xl-base-1.0", controlnet=controlnet + ) + pipe.load_lora_weights("nerijs/pixel-art-xl", weight_name="pixel-art-xl.safetensors") + pipe.enable_sequential_cpu_offload() + + generator = torch.Generator(device="cpu").manual_seed(0) + prompt = "corgi" + image = load_image( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" + ) + images = pipe(prompt, image=image, generator=generator, output_type="np", num_inference_steps=3).images + + assert images[0].shape == (768, 512, 3) + + original_image = images[0, -3:, -3:, -1].flatten() + expected_image = np.array([0.4574, 0.4487, 0.4435, 0.5163, 0.4396, 0.4411, 0.518, 0.4465, 0.4333]) + + max_diff = numpy_cosine_similarity_distance(expected_image, original_image) + assert max_diff < 1e-4 + + pipe.unload_lora_weights() + release_memory(pipe) + + def test_sdxl_t2i_adapter_canny_lora(self): + adapter = T2IAdapter.from_pretrained("TencentARC/t2i-adapter-lineart-sdxl-1.0", torch_dtype=torch.float16).to( + "cpu" + ) + pipe = StableDiffusionXLAdapterPipeline.from_pretrained( + "stabilityai/stable-diffusion-xl-base-1.0", + adapter=adapter, + torch_dtype=torch.float16, + variant="fp16", + ) + pipe.load_lora_weights("CiroN2022/toy-face", weight_name="toy_face_sdxl.safetensors") + pipe.enable_model_cpu_offload() + pipe.set_progress_bar_config(disable=None) + + generator = torch.Generator(device="cpu").manual_seed(0) + prompt = "toy" + image = load_image( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/toy_canny.png" + ) + + images = pipe(prompt, image=image, generator=generator, output_type="np", num_inference_steps=3).images + + assert images[0].shape == (768, 512, 3) + + image_slice = images[0, -3:, -3:, -1].flatten() + expected_slice = np.array([0.4284, 0.4337, 0.4319, 0.4255, 0.4329, 0.4280, 0.4338, 0.4420, 0.4226]) + assert numpy_cosine_similarity_distance(image_slice, expected_slice) < 1e-4 + + @nightly + def test_sequential_fuse_unfuse(self): + pipe = StableDiffusionXLPipeline.from_pretrained( + "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16 + ) + + # 1. round + pipe.load_lora_weights("Pclanglais/TintinIA", torch_dtype=torch.float16) + pipe.to(torch_device) + pipe.fuse_lora() + + generator = torch.Generator().manual_seed(0) + images = pipe( + "masterpiece, best quality, mountain", output_type="np", generator=generator, num_inference_steps=2 + ).images + image_slice = images[0, -3:, -3:, -1].flatten() + + pipe.unfuse_lora() + + # 2. round + pipe.load_lora_weights("ProomptEngineer/pe-balloon-diffusion-style", torch_dtype=torch.float16) + pipe.fuse_lora() + pipe.unfuse_lora() + + # 3. round + pipe.load_lora_weights("ostris/crayon_style_lora_sdxl", torch_dtype=torch.float16) + pipe.fuse_lora() + pipe.unfuse_lora() + + # 4. back to 1st round + pipe.load_lora_weights("Pclanglais/TintinIA", torch_dtype=torch.float16) + pipe.fuse_lora() + + generator = torch.Generator().manual_seed(0) + images_2 = pipe( + "masterpiece, best quality, mountain", output_type="np", generator=generator, num_inference_steps=2 + ).images + image_slice_2 = images_2[0, -3:, -3:, -1].flatten() + + max_diff = numpy_cosine_similarity_distance(image_slice, image_slice_2) + assert max_diff < 1e-3 + pipe.unload_lora_weights() + release_memory(pipe) + + @nightly + def test_integration_logits_multi_adapter(self): + path = "stabilityai/stable-diffusion-xl-base-1.0" + lora_id = "CiroN2022/toy-face" + + pipe = StableDiffusionXLPipeline.from_pretrained(path, torch_dtype=torch.float16) + pipe.load_lora_weights(lora_id, weight_name="toy_face_sdxl.safetensors", adapter_name="toy") + pipe = pipe.to(torch_device) + + self.assertTrue(check_if_lora_correctly_set(pipe.unet), "Lora not correctly set in Unet") + + prompt = "toy_face of a hacker with a hoodie" + + lora_scale = 0.9 + + images = pipe( + prompt=prompt, + num_inference_steps=30, + generator=torch.manual_seed(0), + cross_attention_kwargs={"scale": lora_scale}, + output_type="np", + ).images + expected_slice_scale = np.array([0.538, 0.539, 0.540, 0.540, 0.542, 0.539, 0.538, 0.541, 0.539]) + + predicted_slice = images[0, -3:, -3:, -1].flatten() + max_diff = numpy_cosine_similarity_distance(expected_slice_scale, predicted_slice) + assert max_diff < 1e-3 + + pipe.load_lora_weights("nerijs/pixel-art-xl", weight_name="pixel-art-xl.safetensors", adapter_name="pixel") + pipe.set_adapters("pixel") + + prompt = "pixel art, a hacker with a hoodie, simple, flat colors" + images = pipe( + prompt, + num_inference_steps=30, + guidance_scale=7.5, + cross_attention_kwargs={"scale": lora_scale}, + generator=torch.manual_seed(0), + output_type="np", + ).images + + predicted_slice = images[0, -3:, -3:, -1].flatten() + expected_slice_scale = np.array( + [0.61973065, 0.62018543, 0.62181497, 0.61933696, 0.6208608, 0.620576, 0.6200281, 0.62258327, 0.6259889] + ) + max_diff = numpy_cosine_similarity_distance(expected_slice_scale, predicted_slice) + assert max_diff < 1e-3 + + # multi-adapter inference + pipe.set_adapters(["pixel", "toy"], adapter_weights=[0.5, 1.0]) + images = pipe( + prompt, + num_inference_steps=30, + guidance_scale=7.5, + cross_attention_kwargs={"scale": 1.0}, + generator=torch.manual_seed(0), + output_type="np", + ).images + predicted_slice = images[0, -3:, -3:, -1].flatten() + expected_slice_scale = np.array([0.5888, 0.5897, 0.5946, 0.5888, 0.5935, 0.5946, 0.5857, 0.5891, 0.5909]) + max_diff = numpy_cosine_similarity_distance(expected_slice_scale, predicted_slice) + assert max_diff < 1e-3 + + # Lora disabled + pipe.disable_lora() + images = pipe( + prompt, + num_inference_steps=30, + guidance_scale=7.5, + cross_attention_kwargs={"scale": lora_scale}, + generator=torch.manual_seed(0), + output_type="np", + ).images + predicted_slice = images[0, -3:, -3:, -1].flatten() + expected_slice_scale = np.array([0.5456, 0.5466, 0.5487, 0.5458, 0.5469, 0.5454, 0.5446, 0.5479, 0.5487]) + max_diff = numpy_cosine_similarity_distance(expected_slice_scale, predicted_slice) + assert max_diff < 1e-3 + + @nightly + def test_integration_logits_for_dora_lora(self): + pipeline = StableDiffusionXLPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0") + + logger = logging.get_logger("diffusers.loaders.lora_pipeline") + logger.setLevel(30) + with CaptureLogger(logger) as cap_logger: + pipeline.load_lora_weights("hf-internal-testing/dora-trained-on-kohya") + pipeline.enable_model_cpu_offload() + images = pipeline( + "photo of ohwx dog", + num_inference_steps=10, + generator=torch.manual_seed(0), + output_type="np", + ).images + assert "It seems like you are using a DoRA checkpoint" in cap_logger.out + + predicted_slice = images[0, -3:, -3:, -1].flatten() + expected_slice_scale = np.array([0.1817, 0.0697, 0.2346, 0.0900, 0.1261, 0.2279, 0.1767, 0.1991, 0.2886]) + max_diff = numpy_cosine_similarity_distance(expected_slice_scale, predicted_slice) + assert max_diff < 1e-3 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/lora/test_lora_layers_wan.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/lora/test_lora_layers_wan.py new file mode 100644 index 0000000000000000000000000000000000000000..0ba80d2be1d1a2e162faa4cc63741f02a55b91ae --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/lora/test_lora_layers_wan.py @@ -0,0 +1,144 @@ +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +import unittest + +import torch +from transformers import AutoTokenizer, T5EncoderModel + +from diffusers import ( + AutoencoderKLWan, + FlowMatchEulerDiscreteScheduler, + WanPipeline, + WanTransformer3DModel, +) + +from ..testing_utils import ( + floats_tensor, + require_peft_backend, + skip_mps, +) + + +sys.path.append(".") + +from .utils import PeftLoraLoaderMixinTests # noqa: E402 + + +@require_peft_backend +@skip_mps +class WanLoRATests(unittest.TestCase, PeftLoraLoaderMixinTests): + pipeline_class = WanPipeline + scheduler_cls = FlowMatchEulerDiscreteScheduler + scheduler_classes = [FlowMatchEulerDiscreteScheduler] + scheduler_kwargs = {} + + transformer_kwargs = { + "patch_size": (1, 2, 2), + "num_attention_heads": 2, + "attention_head_dim": 12, + "in_channels": 16, + "out_channels": 16, + "text_dim": 32, + "freq_dim": 256, + "ffn_dim": 32, + "num_layers": 2, + "cross_attn_norm": True, + "qk_norm": "rms_norm_across_heads", + "rope_max_seq_len": 32, + } + transformer_cls = WanTransformer3DModel + vae_kwargs = { + "base_dim": 3, + "z_dim": 16, + "dim_mult": [1, 1, 1, 1], + "num_res_blocks": 1, + "temperal_downsample": [False, True, True], + } + vae_cls = AutoencoderKLWan + has_two_text_encoders = True + tokenizer_cls, tokenizer_id = AutoTokenizer, "hf-internal-testing/tiny-random-t5" + text_encoder_cls, text_encoder_id = T5EncoderModel, "hf-internal-testing/tiny-random-t5" + + text_encoder_target_modules = ["q", "k", "v", "o"] + + @property + def output_shape(self): + return (1, 9, 32, 32, 3) + + def get_dummy_inputs(self, with_generator=True): + batch_size = 1 + sequence_length = 16 + num_channels = 4 + num_frames = 9 + num_latent_frames = 3 # (num_frames - 1) // temporal_compression_ratio + 1 + sizes = (4, 4) + + generator = torch.manual_seed(0) + noise = floats_tensor((batch_size, num_latent_frames, num_channels) + sizes) + input_ids = torch.randint(1, sequence_length, size=(batch_size, sequence_length), generator=generator) + + pipeline_inputs = { + "prompt": "", + "num_frames": num_frames, + "num_inference_steps": 1, + "guidance_scale": 6.0, + "height": 32, + "width": 32, + "max_sequence_length": sequence_length, + "output_type": "np", + } + if with_generator: + pipeline_inputs.update({"generator": generator}) + + return noise, input_ids, pipeline_inputs + + def test_simple_inference_with_text_lora_denoiser_fused_multi(self): + super().test_simple_inference_with_text_lora_denoiser_fused_multi(expected_atol=9e-3) + + def test_simple_inference_with_text_denoiser_lora_unfused(self): + super().test_simple_inference_with_text_denoiser_lora_unfused(expected_atol=9e-3) + + @unittest.skip("Not supported in Wan.") + def test_simple_inference_with_text_denoiser_block_scale(self): + pass + + @unittest.skip("Not supported in Wan.") + def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self): + pass + + @unittest.skip("Not supported in Wan.") + def test_modify_padding_mode(self): + pass + + @unittest.skip("Text encoder LoRA is not supported in Wan.") + def test_simple_inference_with_partial_text_lora(self): + pass + + @unittest.skip("Text encoder LoRA is not supported in Wan.") + def test_simple_inference_with_text_lora(self): + pass + + @unittest.skip("Text encoder LoRA is not supported in Wan.") + def test_simple_inference_with_text_lora_and_scale(self): + pass + + @unittest.skip("Text encoder LoRA is not supported in Wan.") + def test_simple_inference_with_text_lora_fused(self): + pass + + @unittest.skip("Text encoder LoRA is not supported in Wan.") + def test_simple_inference_with_text_lora_save_load(self): + pass diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/lora/test_lora_layers_wanvace.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/lora/test_lora_layers_wanvace.py new file mode 100644 index 0000000000000000000000000000000000000000..d8dde32dd8ec7d78a376d3f82192d94dd0342965 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/lora/test_lora_layers_wanvace.py @@ -0,0 +1,217 @@ +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import sys +import tempfile +import unittest + +import numpy as np +import safetensors.torch +import torch +from PIL import Image +from transformers import AutoTokenizer, T5EncoderModel + +from diffusers import AutoencoderKLWan, FlowMatchEulerDiscreteScheduler, WanVACEPipeline, WanVACETransformer3DModel +from diffusers.utils.import_utils import is_peft_available + +from ..testing_utils import ( + floats_tensor, + is_flaky, + require_peft_backend, + require_peft_version_greater, + skip_mps, + torch_device, +) + + +if is_peft_available(): + from peft.utils import get_peft_model_state_dict + +sys.path.append(".") + +from .utils import PeftLoraLoaderMixinTests # noqa: E402 + + +@require_peft_backend +@skip_mps +@is_flaky(max_attempts=10, description="very flaky class") +class WanVACELoRATests(unittest.TestCase, PeftLoraLoaderMixinTests): + pipeline_class = WanVACEPipeline + scheduler_cls = FlowMatchEulerDiscreteScheduler + scheduler_classes = [FlowMatchEulerDiscreteScheduler] + scheduler_kwargs = {} + + transformer_kwargs = { + "patch_size": (1, 2, 2), + "num_attention_heads": 2, + "attention_head_dim": 8, + "in_channels": 4, + "out_channels": 4, + "text_dim": 32, + "freq_dim": 16, + "ffn_dim": 16, + "num_layers": 2, + "cross_attn_norm": True, + "qk_norm": "rms_norm_across_heads", + "rope_max_seq_len": 16, + "vace_layers": [0], + "vace_in_channels": 72, + } + transformer_cls = WanVACETransformer3DModel + vae_kwargs = { + "base_dim": 3, + "z_dim": 4, + "dim_mult": [1, 1, 1, 1], + "latents_mean": torch.randn(4).numpy().tolist(), + "latents_std": torch.randn(4).numpy().tolist(), + "num_res_blocks": 1, + "temperal_downsample": [False, True, True], + } + vae_cls = AutoencoderKLWan + has_two_text_encoders = True + tokenizer_cls, tokenizer_id = AutoTokenizer, "hf-internal-testing/tiny-random-t5" + text_encoder_cls, text_encoder_id = T5EncoderModel, "hf-internal-testing/tiny-random-t5" + + text_encoder_target_modules = ["q", "k", "v", "o"] + + @property + def output_shape(self): + return (1, 9, 16, 16, 3) + + def get_dummy_inputs(self, with_generator=True): + batch_size = 1 + sequence_length = 16 + num_channels = 4 + num_frames = 9 + num_latent_frames = 3 # (num_frames - 1) // temporal_compression_ratio + 1 + sizes = (4, 4) + height, width = 16, 16 + + generator = torch.manual_seed(0) + noise = floats_tensor((batch_size, num_latent_frames, num_channels) + sizes) + input_ids = torch.randint(1, sequence_length, size=(batch_size, sequence_length), generator=generator) + video = [Image.new("RGB", (height, width))] * num_frames + mask = [Image.new("L", (height, width), 0)] * num_frames + + pipeline_inputs = { + "video": video, + "mask": mask, + "prompt": "", + "num_frames": num_frames, + "num_inference_steps": 1, + "guidance_scale": 6.0, + "height": height, + "width": height, + "max_sequence_length": sequence_length, + "output_type": "np", + } + if with_generator: + pipeline_inputs.update({"generator": generator}) + + return noise, input_ids, pipeline_inputs + + def test_simple_inference_with_text_lora_denoiser_fused_multi(self): + super().test_simple_inference_with_text_lora_denoiser_fused_multi(expected_atol=9e-3) + + def test_simple_inference_with_text_denoiser_lora_unfused(self): + super().test_simple_inference_with_text_denoiser_lora_unfused(expected_atol=9e-3) + + @unittest.skip("Not supported in Wan VACE.") + def test_simple_inference_with_text_denoiser_block_scale(self): + pass + + @unittest.skip("Not supported in Wan VACE.") + def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self): + pass + + @unittest.skip("Not supported in Wan VACE.") + def test_modify_padding_mode(self): + pass + + @unittest.skip("Text encoder LoRA is not supported in Wan VACE.") + def test_simple_inference_with_partial_text_lora(self): + pass + + @unittest.skip("Text encoder LoRA is not supported in Wan VACE.") + def test_simple_inference_with_text_lora(self): + pass + + @unittest.skip("Text encoder LoRA is not supported in Wan VACE.") + def test_simple_inference_with_text_lora_and_scale(self): + pass + + @unittest.skip("Text encoder LoRA is not supported in Wan VACE.") + def test_simple_inference_with_text_lora_fused(self): + pass + + @unittest.skip("Text encoder LoRA is not supported in Wan VACE.") + def test_simple_inference_with_text_lora_save_load(self): + pass + + def test_layerwise_casting_inference_denoiser(self): + super().test_layerwise_casting_inference_denoiser() + + @require_peft_version_greater("0.13.2") + def test_lora_exclude_modules_wanvace(self): + scheduler_cls = self.scheduler_classes[0] + exclude_module_name = "vace_blocks.0.proj_out" + components, text_lora_config, denoiser_lora_config = self.get_dummy_components(scheduler_cls) + pipe = self.pipeline_class(**components).to(torch_device) + _, _, inputs = self.get_dummy_inputs(with_generator=False) + + output_no_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] + self.assertTrue(output_no_lora.shape == self.output_shape) + + # only supported for `denoiser` now + denoiser_lora_config.target_modules = ["proj_out"] + denoiser_lora_config.exclude_modules = [exclude_module_name] + pipe, _ = self.add_adapters_to_pipeline( + pipe, text_lora_config=text_lora_config, denoiser_lora_config=denoiser_lora_config + ) + # The state dict shouldn't contain the modules to be excluded from LoRA. + state_dict_from_model = get_peft_model_state_dict(pipe.transformer, adapter_name="default") + self.assertTrue(not any(exclude_module_name in k for k in state_dict_from_model)) + self.assertTrue(any("proj_out" in k for k in state_dict_from_model)) + output_lora_exclude_modules = pipe(**inputs, generator=torch.manual_seed(0))[0] + + with tempfile.TemporaryDirectory() as tmpdir: + modules_to_save = self._get_modules_to_save(pipe, has_denoiser=True) + lora_state_dicts = self._get_lora_state_dicts(modules_to_save) + self.pipeline_class.save_lora_weights(save_directory=tmpdir, **lora_state_dicts) + pipe.unload_lora_weights() + + # Check in the loaded state dict. + loaded_state_dict = safetensors.torch.load_file(os.path.join(tmpdir, "pytorch_lora_weights.safetensors")) + self.assertTrue(not any(exclude_module_name in k for k in loaded_state_dict)) + self.assertTrue(any("proj_out" in k for k in loaded_state_dict)) + + # Check in the state dict obtained after loading LoRA. + pipe.load_lora_weights(tmpdir) + state_dict_from_model = get_peft_model_state_dict(pipe.transformer, adapter_name="default_0") + self.assertTrue(not any(exclude_module_name in k for k in state_dict_from_model)) + self.assertTrue(any("proj_out" in k for k in state_dict_from_model)) + + output_lora_pretrained = pipe(**inputs, generator=torch.manual_seed(0))[0] + self.assertTrue( + not np.allclose(output_no_lora, output_lora_exclude_modules, atol=1e-3, rtol=1e-3), + "LoRA should change outputs.", + ) + self.assertTrue( + np.allclose(output_lora_exclude_modules, output_lora_pretrained, atol=1e-3, rtol=1e-3), + "Lora outputs should match.", + ) + + def test_simple_inference_with_text_denoiser_lora_and_scale(self): + super().test_simple_inference_with_text_denoiser_lora_and_scale() diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/lora/utils.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/lora/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..72c1dddaa22819883853cb72036cfb861d1b148d --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/lora/utils.py @@ -0,0 +1,2481 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import inspect +import os +import re +import tempfile +import unittest +from itertools import product + +import numpy as np +import pytest +import torch +from parameterized import parameterized + +from diffusers import ( + AutoencoderKL, + DDIMScheduler, + LCMScheduler, + UNet2DConditionModel, +) +from diffusers.utils import logging +from diffusers.utils.import_utils import is_peft_available + +from ..testing_utils import ( + CaptureLogger, + check_if_dicts_are_equal, + floats_tensor, + is_torch_version, + require_peft_backend, + require_peft_version_greater, + require_torch_accelerator, + require_transformers_version_greater, + skip_mps, + torch_device, +) + + +if is_peft_available(): + from peft import LoraConfig, inject_adapter_in_model, set_peft_model_state_dict + from peft.tuners.tuners_utils import BaseTunerLayer + from peft.utils import get_peft_model_state_dict + + +def state_dicts_almost_equal(sd1, sd2): + sd1 = dict(sorted(sd1.items())) + sd2 = dict(sorted(sd2.items())) + + models_are_equal = True + for ten1, ten2 in zip(sd1.values(), sd2.values()): + if (ten1 - ten2).abs().max() > 1e-3: + models_are_equal = False + + return models_are_equal + + +def check_if_lora_correctly_set(model) -> bool: + """ + Checks if the LoRA layers are correctly set with peft + """ + for module in model.modules(): + if isinstance(module, BaseTunerLayer): + return True + return False + + +def check_module_lora_metadata(parsed_metadata: dict, lora_metadatas: dict, module_key: str): + extracted = { + k.removeprefix(f"{module_key}."): v for k, v in parsed_metadata.items() if k.startswith(f"{module_key}.") + } + check_if_dicts_are_equal(extracted, lora_metadatas[f"{module_key}_lora_adapter_metadata"]) + + +def initialize_dummy_state_dict(state_dict): + if not all(v.device.type == "meta" for _, v in state_dict.items()): + raise ValueError("`state_dict` has non-meta values.") + return {k: torch.randn(v.shape, device=torch_device, dtype=v.dtype) for k, v in state_dict.items()} + + +POSSIBLE_ATTENTION_KWARGS_NAMES = ["cross_attention_kwargs", "joint_attention_kwargs", "attention_kwargs"] + + +def determine_attention_kwargs_name(pipeline_class): + call_signature_keys = inspect.signature(pipeline_class.__call__).parameters.keys() + + # TODO(diffusers): Discuss a common naming convention across library for 1.0.0 release + for possible_attention_kwargs in POSSIBLE_ATTENTION_KWARGS_NAMES: + if possible_attention_kwargs in call_signature_keys: + attention_kwargs_name = possible_attention_kwargs + break + assert attention_kwargs_name is not None + return attention_kwargs_name + + +@require_peft_backend +class PeftLoraLoaderMixinTests: + pipeline_class = None + + scheduler_cls = None + scheduler_kwargs = None + scheduler_classes = [DDIMScheduler, LCMScheduler] + + has_two_text_encoders = False + has_three_text_encoders = False + text_encoder_cls, text_encoder_id, text_encoder_subfolder = None, None, "" + text_encoder_2_cls, text_encoder_2_id, text_encoder_2_subfolder = None, None, "" + text_encoder_3_cls, text_encoder_3_id, text_encoder_3_subfolder = None, None, "" + tokenizer_cls, tokenizer_id, tokenizer_subfolder = None, None, "" + tokenizer_2_cls, tokenizer_2_id, tokenizer_2_subfolder = None, None, "" + tokenizer_3_cls, tokenizer_3_id, tokenizer_3_subfolder = None, None, "" + + unet_kwargs = None + transformer_cls = None + transformer_kwargs = None + vae_cls = AutoencoderKL + vae_kwargs = None + + text_encoder_target_modules = ["q_proj", "k_proj", "v_proj", "out_proj"] + denoiser_target_modules = ["to_q", "to_k", "to_v", "to_out.0"] + + def get_dummy_components(self, scheduler_cls=None, use_dora=False, lora_alpha=None): + if self.unet_kwargs and self.transformer_kwargs: + raise ValueError("Both `unet_kwargs` and `transformer_kwargs` cannot be specified.") + if self.has_two_text_encoders and self.has_three_text_encoders: + raise ValueError("Both `has_two_text_encoders` and `has_three_text_encoders` cannot be True.") + + scheduler_cls = self.scheduler_cls if scheduler_cls is None else scheduler_cls + rank = 4 + lora_alpha = rank if lora_alpha is None else lora_alpha + + torch.manual_seed(0) + if self.unet_kwargs is not None: + unet = UNet2DConditionModel(**self.unet_kwargs) + else: + transformer = self.transformer_cls(**self.transformer_kwargs) + + scheduler = scheduler_cls(**self.scheduler_kwargs) + + torch.manual_seed(0) + vae = self.vae_cls(**self.vae_kwargs) + + text_encoder = self.text_encoder_cls.from_pretrained( + self.text_encoder_id, subfolder=self.text_encoder_subfolder + ) + tokenizer = self.tokenizer_cls.from_pretrained(self.tokenizer_id, subfolder=self.tokenizer_subfolder) + + if self.text_encoder_2_cls is not None: + text_encoder_2 = self.text_encoder_2_cls.from_pretrained( + self.text_encoder_2_id, subfolder=self.text_encoder_2_subfolder + ) + tokenizer_2 = self.tokenizer_2_cls.from_pretrained( + self.tokenizer_2_id, subfolder=self.tokenizer_2_subfolder + ) + + if self.text_encoder_3_cls is not None: + text_encoder_3 = self.text_encoder_3_cls.from_pretrained( + self.text_encoder_3_id, subfolder=self.text_encoder_3_subfolder + ) + tokenizer_3 = self.tokenizer_3_cls.from_pretrained( + self.tokenizer_3_id, subfolder=self.tokenizer_3_subfolder + ) + + text_lora_config = LoraConfig( + r=rank, + lora_alpha=lora_alpha, + target_modules=self.text_encoder_target_modules, + init_lora_weights=False, + use_dora=use_dora, + ) + + denoiser_lora_config = LoraConfig( + r=rank, + lora_alpha=lora_alpha, + target_modules=self.denoiser_target_modules, + init_lora_weights=False, + use_dora=use_dora, + ) + + pipeline_components = { + "scheduler": scheduler, + "vae": vae, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + } + # Denoiser + if self.unet_kwargs is not None: + pipeline_components.update({"unet": unet}) + elif self.transformer_kwargs is not None: + pipeline_components.update({"transformer": transformer}) + + # Remaining text encoders. + if self.text_encoder_2_cls is not None: + pipeline_components.update({"tokenizer_2": tokenizer_2, "text_encoder_2": text_encoder_2}) + if self.text_encoder_3_cls is not None: + pipeline_components.update({"tokenizer_3": tokenizer_3, "text_encoder_3": text_encoder_3}) + + # Remaining stuff + init_params = inspect.signature(self.pipeline_class.__init__).parameters + if "safety_checker" in init_params: + pipeline_components.update({"safety_checker": None}) + if "feature_extractor" in init_params: + pipeline_components.update({"feature_extractor": None}) + if "image_encoder" in init_params: + pipeline_components.update({"image_encoder": None}) + + return pipeline_components, text_lora_config, denoiser_lora_config + + @property + def output_shape(self): + raise NotImplementedError + + def get_dummy_inputs(self, with_generator=True): + batch_size = 1 + sequence_length = 10 + num_channels = 4 + sizes = (32, 32) + + generator = torch.manual_seed(0) + noise = floats_tensor((batch_size, num_channels) + sizes) + input_ids = torch.randint(1, sequence_length, size=(batch_size, sequence_length), generator=generator) + + pipeline_inputs = { + "prompt": "A painting of a squirrel eating a burger", + "num_inference_steps": 5, + "guidance_scale": 6.0, + "output_type": "np", + } + if with_generator: + pipeline_inputs.update({"generator": generator}) + + return noise, input_ids, pipeline_inputs + + # Copied from: https://colab.research.google.com/gist/sayakpaul/df2ef6e1ae6d8c10a49d859883b10860/scratchpad.ipynb + def get_dummy_tokens(self): + max_seq_length = 77 + + inputs = torch.randint(2, 56, size=(1, max_seq_length), generator=torch.manual_seed(0)) + + prepared_inputs = {} + prepared_inputs["input_ids"] = inputs + return prepared_inputs + + def _get_lora_state_dicts(self, modules_to_save): + state_dicts = {} + for module_name, module in modules_to_save.items(): + if module is not None: + state_dicts[f"{module_name}_lora_layers"] = get_peft_model_state_dict(module) + return state_dicts + + def _get_lora_adapter_metadata(self, modules_to_save): + metadatas = {} + for module_name, module in modules_to_save.items(): + if module is not None: + metadatas[f"{module_name}_lora_adapter_metadata"] = module.peft_config["default"].to_dict() + return metadatas + + def _get_modules_to_save(self, pipe, has_denoiser=False): + modules_to_save = {} + lora_loadable_modules = self.pipeline_class._lora_loadable_modules + + if ( + "text_encoder" in lora_loadable_modules + and hasattr(pipe, "text_encoder") + and getattr(pipe.text_encoder, "peft_config", None) is not None + ): + modules_to_save["text_encoder"] = pipe.text_encoder + + if ( + "text_encoder_2" in lora_loadable_modules + and hasattr(pipe, "text_encoder_2") + and getattr(pipe.text_encoder_2, "peft_config", None) is not None + ): + modules_to_save["text_encoder_2"] = pipe.text_encoder_2 + + if has_denoiser: + if "unet" in lora_loadable_modules and hasattr(pipe, "unet"): + modules_to_save["unet"] = pipe.unet + + if "transformer" in lora_loadable_modules and hasattr(pipe, "transformer"): + modules_to_save["transformer"] = pipe.transformer + + return modules_to_save + + def add_adapters_to_pipeline(self, pipe, text_lora_config=None, denoiser_lora_config=None, adapter_name="default"): + if text_lora_config is not None: + if "text_encoder" in self.pipeline_class._lora_loadable_modules: + pipe.text_encoder.add_adapter(text_lora_config, adapter_name=adapter_name) + self.assertTrue( + check_if_lora_correctly_set(pipe.text_encoder), "Lora not correctly set in text encoder" + ) + + if denoiser_lora_config is not None: + denoiser = pipe.transformer if self.unet_kwargs is None else pipe.unet + denoiser.add_adapter(denoiser_lora_config, adapter_name=adapter_name) + self.assertTrue(check_if_lora_correctly_set(denoiser), "Lora not correctly set in denoiser.") + else: + denoiser = None + + if text_lora_config is not None and self.has_two_text_encoders or self.has_three_text_encoders: + if "text_encoder_2" in self.pipeline_class._lora_loadable_modules: + pipe.text_encoder_2.add_adapter(text_lora_config, adapter_name=adapter_name) + self.assertTrue( + check_if_lora_correctly_set(pipe.text_encoder_2), "Lora not correctly set in text encoder 2" + ) + return pipe, denoiser + + def test_simple_inference(self): + """ + Tests a simple inference and makes sure it works as expected + """ + for scheduler_cls in self.scheduler_classes: + components, text_lora_config, _ = self.get_dummy_components(scheduler_cls) + pipe = self.pipeline_class(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + _, _, inputs = self.get_dummy_inputs() + output_no_lora = pipe(**inputs)[0] + self.assertTrue(output_no_lora.shape == self.output_shape) + + def test_simple_inference_with_text_lora(self): + """ + Tests a simple inference with lora attached on the text encoder + and makes sure it works as expected + """ + for scheduler_cls in self.scheduler_classes: + components, text_lora_config, _ = self.get_dummy_components(scheduler_cls) + pipe = self.pipeline_class(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + _, _, inputs = self.get_dummy_inputs(with_generator=False) + + output_no_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] + self.assertTrue(output_no_lora.shape == self.output_shape) + + pipe, _ = self.add_adapters_to_pipeline(pipe, text_lora_config, denoiser_lora_config=None) + + output_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] + self.assertTrue( + not np.allclose(output_lora, output_no_lora, atol=1e-3, rtol=1e-3), "Lora should change the output" + ) + + @require_peft_version_greater("0.13.1") + def test_low_cpu_mem_usage_with_injection(self): + """Tests if we can inject LoRA state dict with low_cpu_mem_usage.""" + for scheduler_cls in self.scheduler_classes: + components, text_lora_config, denoiser_lora_config = self.get_dummy_components(scheduler_cls) + pipe = self.pipeline_class(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + if "text_encoder" in self.pipeline_class._lora_loadable_modules: + inject_adapter_in_model(text_lora_config, pipe.text_encoder, low_cpu_mem_usage=True) + self.assertTrue( + check_if_lora_correctly_set(pipe.text_encoder), "Lora not correctly set in text encoder." + ) + self.assertTrue( + "meta" in {p.device.type for p in pipe.text_encoder.parameters()}, + "The LoRA params should be on 'meta' device.", + ) + + te_state_dict = initialize_dummy_state_dict(get_peft_model_state_dict(pipe.text_encoder)) + set_peft_model_state_dict(pipe.text_encoder, te_state_dict, low_cpu_mem_usage=True) + self.assertTrue( + "meta" not in {p.device.type for p in pipe.text_encoder.parameters()}, + "No param should be on 'meta' device.", + ) + + denoiser = pipe.transformer if self.unet_kwargs is None else pipe.unet + inject_adapter_in_model(denoiser_lora_config, denoiser, low_cpu_mem_usage=True) + self.assertTrue(check_if_lora_correctly_set(denoiser), "Lora not correctly set in denoiser.") + self.assertTrue( + "meta" in {p.device.type for p in denoiser.parameters()}, "The LoRA params should be on 'meta' device." + ) + + denoiser_state_dict = initialize_dummy_state_dict(get_peft_model_state_dict(denoiser)) + set_peft_model_state_dict(denoiser, denoiser_state_dict, low_cpu_mem_usage=True) + self.assertTrue( + "meta" not in {p.device.type for p in denoiser.parameters()}, "No param should be on 'meta' device." + ) + + if self.has_two_text_encoders or self.has_three_text_encoders: + if "text_encoder_2" in self.pipeline_class._lora_loadable_modules: + inject_adapter_in_model(text_lora_config, pipe.text_encoder_2, low_cpu_mem_usage=True) + self.assertTrue( + check_if_lora_correctly_set(pipe.text_encoder_2), "Lora not correctly set in text encoder 2" + ) + self.assertTrue( + "meta" in {p.device.type for p in pipe.text_encoder_2.parameters()}, + "The LoRA params should be on 'meta' device.", + ) + + te2_state_dict = initialize_dummy_state_dict(get_peft_model_state_dict(pipe.text_encoder_2)) + set_peft_model_state_dict(pipe.text_encoder_2, te2_state_dict, low_cpu_mem_usage=True) + self.assertTrue( + "meta" not in {p.device.type for p in pipe.text_encoder_2.parameters()}, + "No param should be on 'meta' device.", + ) + + _, _, inputs = self.get_dummy_inputs() + output_lora = pipe(**inputs)[0] + self.assertTrue(output_lora.shape == self.output_shape) + + @require_peft_version_greater("0.13.1") + @require_transformers_version_greater("4.45.2") + def test_low_cpu_mem_usage_with_loading(self): + """Tests if we can load LoRA state dict with low_cpu_mem_usage.""" + + for scheduler_cls in self.scheduler_classes: + components, text_lora_config, denoiser_lora_config = self.get_dummy_components(scheduler_cls) + pipe = self.pipeline_class(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + _, _, inputs = self.get_dummy_inputs(with_generator=False) + + output_no_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] + self.assertTrue(output_no_lora.shape == self.output_shape) + + pipe, _ = self.add_adapters_to_pipeline(pipe, text_lora_config, denoiser_lora_config) + + images_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] + + with tempfile.TemporaryDirectory() as tmpdirname: + modules_to_save = self._get_modules_to_save(pipe, has_denoiser=True) + lora_state_dicts = self._get_lora_state_dicts(modules_to_save) + self.pipeline_class.save_lora_weights( + save_directory=tmpdirname, safe_serialization=False, **lora_state_dicts + ) + + self.assertTrue(os.path.isfile(os.path.join(tmpdirname, "pytorch_lora_weights.bin"))) + pipe.unload_lora_weights() + pipe.load_lora_weights(os.path.join(tmpdirname, "pytorch_lora_weights.bin"), low_cpu_mem_usage=False) + + for module_name, module in modules_to_save.items(): + self.assertTrue(check_if_lora_correctly_set(module), f"Lora not correctly set in {module_name}") + + images_lora_from_pretrained = pipe(**inputs, generator=torch.manual_seed(0))[0] + self.assertTrue( + np.allclose(images_lora, images_lora_from_pretrained, atol=1e-3, rtol=1e-3), + "Loading from saved checkpoints should give same results.", + ) + + # Now, check for `low_cpu_mem_usage.` + pipe.unload_lora_weights() + pipe.load_lora_weights(os.path.join(tmpdirname, "pytorch_lora_weights.bin"), low_cpu_mem_usage=True) + + for module_name, module in modules_to_save.items(): + self.assertTrue(check_if_lora_correctly_set(module), f"Lora not correctly set in {module_name}") + + images_lora_from_pretrained_low_cpu = pipe(**inputs, generator=torch.manual_seed(0))[0] + self.assertTrue( + np.allclose( + images_lora_from_pretrained_low_cpu, images_lora_from_pretrained, atol=1e-3, rtol=1e-3 + ), + "Loading from saved checkpoints with `low_cpu_mem_usage` should give same results.", + ) + + def test_simple_inference_with_text_lora_and_scale(self): + """ + Tests a simple inference with lora attached on the text encoder + scale argument + and makes sure it works as expected + """ + attention_kwargs_name = determine_attention_kwargs_name(self.pipeline_class) + + for scheduler_cls in self.scheduler_classes: + components, text_lora_config, _ = self.get_dummy_components(scheduler_cls) + pipe = self.pipeline_class(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + _, _, inputs = self.get_dummy_inputs(with_generator=False) + + output_no_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] + self.assertTrue(output_no_lora.shape == self.output_shape) + + pipe, _ = self.add_adapters_to_pipeline(pipe, text_lora_config, denoiser_lora_config=None) + + output_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] + self.assertTrue( + not np.allclose(output_lora, output_no_lora, atol=1e-3, rtol=1e-3), "Lora should change the output" + ) + + attention_kwargs = {attention_kwargs_name: {"scale": 0.5}} + output_lora_scale = pipe(**inputs, generator=torch.manual_seed(0), **attention_kwargs)[0] + + self.assertTrue( + not np.allclose(output_lora, output_lora_scale, atol=1e-3, rtol=1e-3), + "Lora + scale should change the output", + ) + + attention_kwargs = {attention_kwargs_name: {"scale": 0.0}} + output_lora_0_scale = pipe(**inputs, generator=torch.manual_seed(0), **attention_kwargs)[0] + + self.assertTrue( + np.allclose(output_no_lora, output_lora_0_scale, atol=1e-3, rtol=1e-3), + "Lora + 0 scale should lead to same result as no LoRA", + ) + + def test_simple_inference_with_text_lora_fused(self): + """ + Tests a simple inference with lora attached into text encoder + fuses the lora weights into base model + and makes sure it works as expected + """ + for scheduler_cls in self.scheduler_classes: + components, text_lora_config, _ = self.get_dummy_components(scheduler_cls) + pipe = self.pipeline_class(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + _, _, inputs = self.get_dummy_inputs(with_generator=False) + + output_no_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] + self.assertTrue(output_no_lora.shape == self.output_shape) + + pipe, _ = self.add_adapters_to_pipeline(pipe, text_lora_config, denoiser_lora_config=None) + + pipe.fuse_lora() + # Fusing should still keep the LoRA layers + self.assertTrue(check_if_lora_correctly_set(pipe.text_encoder), "Lora not correctly set in text encoder") + + if self.has_two_text_encoders or self.has_three_text_encoders: + if "text_encoder_2" in self.pipeline_class._lora_loadable_modules: + self.assertTrue( + check_if_lora_correctly_set(pipe.text_encoder_2), "Lora not correctly set in text encoder 2" + ) + + ouput_fused = pipe(**inputs, generator=torch.manual_seed(0))[0] + self.assertFalse( + np.allclose(ouput_fused, output_no_lora, atol=1e-3, rtol=1e-3), "Fused lora should change the output" + ) + + def test_simple_inference_with_text_lora_unloaded(self): + """ + Tests a simple inference with lora attached to text encoder, then unloads the lora weights + and makes sure it works as expected + """ + for scheduler_cls in self.scheduler_classes: + components, text_lora_config, _ = self.get_dummy_components(scheduler_cls) + pipe = self.pipeline_class(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + _, _, inputs = self.get_dummy_inputs(with_generator=False) + + output_no_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] + self.assertTrue(output_no_lora.shape == self.output_shape) + + pipe, _ = self.add_adapters_to_pipeline(pipe, text_lora_config, denoiser_lora_config=None) + + pipe.unload_lora_weights() + # unloading should remove the LoRA layers + self.assertFalse( + check_if_lora_correctly_set(pipe.text_encoder), "Lora not correctly unloaded in text encoder" + ) + + if self.has_two_text_encoders or self.has_three_text_encoders: + if "text_encoder_2" in self.pipeline_class._lora_loadable_modules: + self.assertFalse( + check_if_lora_correctly_set(pipe.text_encoder_2), + "Lora not correctly unloaded in text encoder 2", + ) + + ouput_unloaded = pipe(**inputs, generator=torch.manual_seed(0))[0] + self.assertTrue( + np.allclose(ouput_unloaded, output_no_lora, atol=1e-3, rtol=1e-3), + "Fused lora should change the output", + ) + + def test_simple_inference_with_text_lora_save_load(self): + """ + Tests a simple usecase where users could use saving utilities for LoRA. + """ + for scheduler_cls in self.scheduler_classes: + components, text_lora_config, _ = self.get_dummy_components(scheduler_cls) + pipe = self.pipeline_class(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + _, _, inputs = self.get_dummy_inputs(with_generator=False) + + output_no_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] + self.assertTrue(output_no_lora.shape == self.output_shape) + + pipe, _ = self.add_adapters_to_pipeline(pipe, text_lora_config, denoiser_lora_config=None) + + images_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] + + with tempfile.TemporaryDirectory() as tmpdirname: + modules_to_save = self._get_modules_to_save(pipe) + lora_state_dicts = self._get_lora_state_dicts(modules_to_save) + + self.pipeline_class.save_lora_weights( + save_directory=tmpdirname, safe_serialization=False, **lora_state_dicts + ) + + self.assertTrue(os.path.isfile(os.path.join(tmpdirname, "pytorch_lora_weights.bin"))) + pipe.unload_lora_weights() + pipe.load_lora_weights(os.path.join(tmpdirname, "pytorch_lora_weights.bin")) + + for module_name, module in modules_to_save.items(): + self.assertTrue(check_if_lora_correctly_set(module), f"Lora not correctly set in {module_name}") + + images_lora_from_pretrained = pipe(**inputs, generator=torch.manual_seed(0))[0] + + self.assertTrue( + np.allclose(images_lora, images_lora_from_pretrained, atol=1e-3, rtol=1e-3), + "Loading from saved checkpoints should give same results.", + ) + + def test_simple_inference_with_partial_text_lora(self): + """ + Tests a simple inference with lora attached on the text encoder + with different ranks and some adapters removed + and makes sure it works as expected + """ + for scheduler_cls in self.scheduler_classes: + components, _, _ = self.get_dummy_components(scheduler_cls) + # Verify `StableDiffusionLoraLoaderMixin.load_lora_into_text_encoder` handles different ranks per module (PR#8324). + text_lora_config = LoraConfig( + r=4, + rank_pattern={self.text_encoder_target_modules[i]: i + 1 for i in range(3)}, + lora_alpha=4, + target_modules=self.text_encoder_target_modules, + init_lora_weights=False, + use_dora=False, + ) + pipe = self.pipeline_class(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + _, _, inputs = self.get_dummy_inputs(with_generator=False) + + output_no_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] + self.assertTrue(output_no_lora.shape == self.output_shape) + + pipe, _ = self.add_adapters_to_pipeline(pipe, text_lora_config, denoiser_lora_config=None) + + state_dict = {} + if "text_encoder" in self.pipeline_class._lora_loadable_modules: + # Gather the state dict for the PEFT model, excluding `layers.4`, to ensure `load_lora_into_text_encoder` + # supports missing layers (PR#8324). + state_dict = { + f"text_encoder.{module_name}": param + for module_name, param in get_peft_model_state_dict(pipe.text_encoder).items() + if "text_model.encoder.layers.4" not in module_name + } + + if self.has_two_text_encoders or self.has_three_text_encoders: + if "text_encoder_2" in self.pipeline_class._lora_loadable_modules: + state_dict.update( + { + f"text_encoder_2.{module_name}": param + for module_name, param in get_peft_model_state_dict(pipe.text_encoder_2).items() + if "text_model.encoder.layers.4" not in module_name + } + ) + + output_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] + self.assertTrue( + not np.allclose(output_lora, output_no_lora, atol=1e-3, rtol=1e-3), "Lora should change the output" + ) + + # Unload lora and load it back using the pipe.load_lora_weights machinery + pipe.unload_lora_weights() + pipe.load_lora_weights(state_dict) + + output_partial_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] + self.assertTrue( + not np.allclose(output_partial_lora, output_lora, atol=1e-3, rtol=1e-3), + "Removing adapters should change the output", + ) + + def test_simple_inference_save_pretrained_with_text_lora(self): + """ + Tests a simple usecase where users could use saving utilities for LoRA through save_pretrained + """ + for scheduler_cls in self.scheduler_classes: + components, text_lora_config, _ = self.get_dummy_components(scheduler_cls) + pipe = self.pipeline_class(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + _, _, inputs = self.get_dummy_inputs(with_generator=False) + + output_no_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] + self.assertTrue(output_no_lora.shape == self.output_shape) + + pipe, _ = self.add_adapters_to_pipeline(pipe, text_lora_config, denoiser_lora_config=None) + images_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] + + with tempfile.TemporaryDirectory() as tmpdirname: + pipe.save_pretrained(tmpdirname) + + pipe_from_pretrained = self.pipeline_class.from_pretrained(tmpdirname) + pipe_from_pretrained.to(torch_device) + + if "text_encoder" in self.pipeline_class._lora_loadable_modules: + self.assertTrue( + check_if_lora_correctly_set(pipe_from_pretrained.text_encoder), + "Lora not correctly set in text encoder", + ) + + if self.has_two_text_encoders or self.has_three_text_encoders: + if "text_encoder_2" in self.pipeline_class._lora_loadable_modules: + self.assertTrue( + check_if_lora_correctly_set(pipe_from_pretrained.text_encoder_2), + "Lora not correctly set in text encoder 2", + ) + + images_lora_save_pretrained = pipe_from_pretrained(**inputs, generator=torch.manual_seed(0))[0] + + self.assertTrue( + np.allclose(images_lora, images_lora_save_pretrained, atol=1e-3, rtol=1e-3), + "Loading from saved checkpoints should give same results.", + ) + + def test_simple_inference_with_text_denoiser_lora_save_load(self): + """ + Tests a simple usecase where users could use saving utilities for LoRA for Unet + text encoder + """ + for scheduler_cls in self.scheduler_classes: + components, text_lora_config, denoiser_lora_config = self.get_dummy_components(scheduler_cls) + pipe = self.pipeline_class(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + _, _, inputs = self.get_dummy_inputs(with_generator=False) + + output_no_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] + self.assertTrue(output_no_lora.shape == self.output_shape) + + pipe, _ = self.add_adapters_to_pipeline(pipe, text_lora_config, denoiser_lora_config) + + images_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] + + with tempfile.TemporaryDirectory() as tmpdirname: + modules_to_save = self._get_modules_to_save(pipe, has_denoiser=True) + lora_state_dicts = self._get_lora_state_dicts(modules_to_save) + self.pipeline_class.save_lora_weights( + save_directory=tmpdirname, safe_serialization=False, **lora_state_dicts + ) + + self.assertTrue(os.path.isfile(os.path.join(tmpdirname, "pytorch_lora_weights.bin"))) + pipe.unload_lora_weights() + pipe.load_lora_weights(os.path.join(tmpdirname, "pytorch_lora_weights.bin")) + + for module_name, module in modules_to_save.items(): + self.assertTrue(check_if_lora_correctly_set(module), f"Lora not correctly set in {module_name}") + + images_lora_from_pretrained = pipe(**inputs, generator=torch.manual_seed(0))[0] + self.assertTrue( + np.allclose(images_lora, images_lora_from_pretrained, atol=1e-3, rtol=1e-3), + "Loading from saved checkpoints should give same results.", + ) + + def test_simple_inference_with_text_denoiser_lora_and_scale(self): + """ + Tests a simple inference with lora attached on the text encoder + Unet + scale argument + and makes sure it works as expected + """ + attention_kwargs_name = determine_attention_kwargs_name(self.pipeline_class) + + for scheduler_cls in self.scheduler_classes: + components, text_lora_config, denoiser_lora_config = self.get_dummy_components(scheduler_cls) + pipe = self.pipeline_class(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + _, _, inputs = self.get_dummy_inputs(with_generator=False) + + output_no_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] + self.assertTrue(output_no_lora.shape == self.output_shape) + + pipe, _ = self.add_adapters_to_pipeline(pipe, text_lora_config, denoiser_lora_config) + + output_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] + self.assertTrue( + not np.allclose(output_lora, output_no_lora, atol=1e-3, rtol=1e-3), "Lora should change the output" + ) + + attention_kwargs = {attention_kwargs_name: {"scale": 0.5}} + output_lora_scale = pipe(**inputs, generator=torch.manual_seed(0), **attention_kwargs)[0] + + self.assertTrue( + not np.allclose(output_lora, output_lora_scale, atol=1e-3, rtol=1e-3), + "Lora + scale should change the output", + ) + + attention_kwargs = {attention_kwargs_name: {"scale": 0.0}} + output_lora_0_scale = pipe(**inputs, generator=torch.manual_seed(0), **attention_kwargs)[0] + + self.assertTrue( + np.allclose(output_no_lora, output_lora_0_scale, atol=1e-3, rtol=1e-3), + "Lora + 0 scale should lead to same result as no LoRA", + ) + + if "text_encoder" in self.pipeline_class._lora_loadable_modules: + self.assertTrue( + pipe.text_encoder.text_model.encoder.layers[0].self_attn.q_proj.scaling["default"] == 1.0, + "The scaling parameter has not been correctly restored!", + ) + + def test_simple_inference_with_text_lora_denoiser_fused(self): + """ + Tests a simple inference with lora attached into text encoder + fuses the lora weights into base model + and makes sure it works as expected - with unet + """ + for scheduler_cls in self.scheduler_classes: + components, text_lora_config, denoiser_lora_config = self.get_dummy_components(scheduler_cls) + pipe = self.pipeline_class(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + _, _, inputs = self.get_dummy_inputs(with_generator=False) + + output_no_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] + self.assertTrue(output_no_lora.shape == self.output_shape) + + pipe, denoiser = self.add_adapters_to_pipeline(pipe, text_lora_config, denoiser_lora_config) + + pipe.fuse_lora(components=self.pipeline_class._lora_loadable_modules) + + # Fusing should still keep the LoRA layers + if "text_encoder" in self.pipeline_class._lora_loadable_modules: + self.assertTrue( + check_if_lora_correctly_set(pipe.text_encoder), "Lora not correctly set in text encoder" + ) + + self.assertTrue(check_if_lora_correctly_set(denoiser), "Lora not correctly set in denoiser") + + if self.has_two_text_encoders or self.has_three_text_encoders: + if "text_encoder_2" in self.pipeline_class._lora_loadable_modules: + self.assertTrue( + check_if_lora_correctly_set(pipe.text_encoder_2), "Lora not correctly set in text encoder 2" + ) + + output_fused = pipe(**inputs, generator=torch.manual_seed(0))[0] + self.assertFalse( + np.allclose(output_fused, output_no_lora, atol=1e-3, rtol=1e-3), "Fused lora should change the output" + ) + + def test_simple_inference_with_text_denoiser_lora_unloaded(self): + """ + Tests a simple inference with lora attached to text encoder and unet, then unloads the lora weights + and makes sure it works as expected + """ + for scheduler_cls in self.scheduler_classes: + components, text_lora_config, denoiser_lora_config = self.get_dummy_components(scheduler_cls) + pipe = self.pipeline_class(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + _, _, inputs = self.get_dummy_inputs(with_generator=False) + + output_no_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] + self.assertTrue(output_no_lora.shape == self.output_shape) + + pipe, denoiser = self.add_adapters_to_pipeline(pipe, text_lora_config, denoiser_lora_config) + + pipe.unload_lora_weights() + # unloading should remove the LoRA layers + self.assertFalse( + check_if_lora_correctly_set(pipe.text_encoder), "Lora not correctly unloaded in text encoder" + ) + self.assertFalse(check_if_lora_correctly_set(denoiser), "Lora not correctly unloaded in denoiser") + + if self.has_two_text_encoders or self.has_three_text_encoders: + if "text_encoder_2" in self.pipeline_class._lora_loadable_modules: + self.assertFalse( + check_if_lora_correctly_set(pipe.text_encoder_2), + "Lora not correctly unloaded in text encoder 2", + ) + + output_unloaded = pipe(**inputs, generator=torch.manual_seed(0))[0] + self.assertTrue( + np.allclose(output_unloaded, output_no_lora, atol=1e-3, rtol=1e-3), + "Fused lora should change the output", + ) + + def test_simple_inference_with_text_denoiser_lora_unfused( + self, expected_atol: float = 1e-3, expected_rtol: float = 1e-3 + ): + """ + Tests a simple inference with lora attached to text encoder and unet, then unloads the lora weights + and makes sure it works as expected + """ + for scheduler_cls in self.scheduler_classes: + components, text_lora_config, denoiser_lora_config = self.get_dummy_components(scheduler_cls) + pipe = self.pipeline_class(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + _, _, inputs = self.get_dummy_inputs(with_generator=False) + + pipe, denoiser = self.add_adapters_to_pipeline(pipe, text_lora_config, denoiser_lora_config) + + pipe.fuse_lora(components=self.pipeline_class._lora_loadable_modules) + self.assertTrue(pipe.num_fused_loras == 1, f"{pipe.num_fused_loras=}, {pipe.fused_loras=}") + output_fused_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] + + pipe.unfuse_lora(components=self.pipeline_class._lora_loadable_modules) + self.assertTrue(pipe.num_fused_loras == 0, f"{pipe.num_fused_loras=}, {pipe.fused_loras=}") + output_unfused_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] + + # unloading should remove the LoRA layers + if "text_encoder" in self.pipeline_class._lora_loadable_modules: + self.assertTrue(check_if_lora_correctly_set(pipe.text_encoder), "Unfuse should still keep LoRA layers") + + self.assertTrue(check_if_lora_correctly_set(denoiser), "Unfuse should still keep LoRA layers") + + if self.has_two_text_encoders or self.has_three_text_encoders: + if "text_encoder_2" in self.pipeline_class._lora_loadable_modules: + self.assertTrue( + check_if_lora_correctly_set(pipe.text_encoder_2), "Unfuse should still keep LoRA layers" + ) + + # Fuse and unfuse should lead to the same results + self.assertTrue( + np.allclose(output_fused_lora, output_unfused_lora, atol=expected_atol, rtol=expected_rtol), + "Fused lora should not change the output", + ) + + def test_simple_inference_with_text_denoiser_multi_adapter(self): + """ + Tests a simple inference with lora attached to text encoder and unet, attaches + multiple adapters and set them + """ + for scheduler_cls in self.scheduler_classes: + components, text_lora_config, denoiser_lora_config = self.get_dummy_components(scheduler_cls) + pipe = self.pipeline_class(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + _, _, inputs = self.get_dummy_inputs(with_generator=False) + + output_no_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] + + if "text_encoder" in self.pipeline_class._lora_loadable_modules: + pipe.text_encoder.add_adapter(text_lora_config, "adapter-1") + pipe.text_encoder.add_adapter(text_lora_config, "adapter-2") + self.assertTrue( + check_if_lora_correctly_set(pipe.text_encoder), "Lora not correctly set in text encoder" + ) + + denoiser = pipe.transformer if self.unet_kwargs is None else pipe.unet + denoiser.add_adapter(denoiser_lora_config, "adapter-1") + denoiser.add_adapter(denoiser_lora_config, "adapter-2") + self.assertTrue(check_if_lora_correctly_set(denoiser), "Lora not correctly set in denoiser.") + + if self.has_two_text_encoders or self.has_three_text_encoders: + if "text_encoder_2" in self.pipeline_class._lora_loadable_modules: + pipe.text_encoder_2.add_adapter(text_lora_config, "adapter-1") + pipe.text_encoder_2.add_adapter(text_lora_config, "adapter-2") + self.assertTrue( + check_if_lora_correctly_set(pipe.text_encoder_2), "Lora not correctly set in text encoder 2" + ) + + pipe.set_adapters("adapter-1") + output_adapter_1 = pipe(**inputs, generator=torch.manual_seed(0))[0] + self.assertFalse( + np.allclose(output_no_lora, output_adapter_1, atol=1e-3, rtol=1e-3), + "Adapter outputs should be different.", + ) + + pipe.set_adapters("adapter-2") + output_adapter_2 = pipe(**inputs, generator=torch.manual_seed(0))[0] + self.assertFalse( + np.allclose(output_no_lora, output_adapter_2, atol=1e-3, rtol=1e-3), + "Adapter outputs should be different.", + ) + + pipe.set_adapters(["adapter-1", "adapter-2"]) + output_adapter_mixed = pipe(**inputs, generator=torch.manual_seed(0))[0] + self.assertFalse( + np.allclose(output_no_lora, output_adapter_mixed, atol=1e-3, rtol=1e-3), + "Adapter outputs should be different.", + ) + + # Fuse and unfuse should lead to the same results + self.assertFalse( + np.allclose(output_adapter_1, output_adapter_2, atol=1e-3, rtol=1e-3), + "Adapter 1 and 2 should give different results", + ) + + self.assertFalse( + np.allclose(output_adapter_1, output_adapter_mixed, atol=1e-3, rtol=1e-3), + "Adapter 1 and mixed adapters should give different results", + ) + + self.assertFalse( + np.allclose(output_adapter_2, output_adapter_mixed, atol=1e-3, rtol=1e-3), + "Adapter 2 and mixed adapters should give different results", + ) + + pipe.disable_lora() + output_disabled = pipe(**inputs, generator=torch.manual_seed(0))[0] + + self.assertTrue( + np.allclose(output_no_lora, output_disabled, atol=1e-3, rtol=1e-3), + "output with no lora and output with lora disabled should give same results", + ) + + def test_wrong_adapter_name_raises_error(self): + adapter_name = "adapter-1" + + scheduler_cls = self.scheduler_classes[0] + components, text_lora_config, denoiser_lora_config = self.get_dummy_components(scheduler_cls) + pipe = self.pipeline_class(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + _, _, inputs = self.get_dummy_inputs(with_generator=False) + + pipe, _ = self.add_adapters_to_pipeline( + pipe, text_lora_config, denoiser_lora_config, adapter_name=adapter_name + ) + + with self.assertRaises(ValueError) as err_context: + pipe.set_adapters("test") + + self.assertTrue("not in the list of present adapters" in str(err_context.exception)) + + # test this works. + pipe.set_adapters(adapter_name) + _ = pipe(**inputs, generator=torch.manual_seed(0))[0] + + def test_multiple_wrong_adapter_name_raises_error(self): + adapter_name = "adapter-1" + scheduler_cls = self.scheduler_classes[0] + components, text_lora_config, denoiser_lora_config = self.get_dummy_components(scheduler_cls) + pipe = self.pipeline_class(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + _, _, inputs = self.get_dummy_inputs(with_generator=False) + + pipe, _ = self.add_adapters_to_pipeline( + pipe, text_lora_config, denoiser_lora_config, adapter_name=adapter_name + ) + + scale_with_wrong_components = {"foo": 0.0, "bar": 0.0, "tik": 0.0} + logger = logging.get_logger("diffusers.loaders.lora_base") + logger.setLevel(30) + with CaptureLogger(logger) as cap_logger: + pipe.set_adapters(adapter_name, adapter_weights=scale_with_wrong_components) + + wrong_components = sorted(set(scale_with_wrong_components.keys())) + msg = f"The following components in `adapter_weights` are not part of the pipeline: {wrong_components}. " + self.assertTrue(msg in str(cap_logger.out)) + + # test this works. + pipe.set_adapters(adapter_name) + _ = pipe(**inputs, generator=torch.manual_seed(0))[0] + + def test_simple_inference_with_text_denoiser_block_scale(self): + """ + Tests a simple inference with lora attached to text encoder and unet, attaches + one adapter and set different weights for different blocks (i.e. block lora) + """ + for scheduler_cls in self.scheduler_classes: + components, text_lora_config, denoiser_lora_config = self.get_dummy_components(scheduler_cls) + pipe = self.pipeline_class(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + _, _, inputs = self.get_dummy_inputs(with_generator=False) + + output_no_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] + + pipe.text_encoder.add_adapter(text_lora_config, "adapter-1") + self.assertTrue(check_if_lora_correctly_set(pipe.text_encoder), "Lora not correctly set in text encoder") + + denoiser = pipe.transformer if self.unet_kwargs is None else pipe.unet + denoiser.add_adapter(denoiser_lora_config) + self.assertTrue(check_if_lora_correctly_set(denoiser), "Lora not correctly set in denoiser.") + + if self.has_two_text_encoders or self.has_three_text_encoders: + if "text_encoder_2" in self.pipeline_class._lora_loadable_modules: + pipe.text_encoder_2.add_adapter(text_lora_config, "adapter-1") + self.assertTrue( + check_if_lora_correctly_set(pipe.text_encoder_2), "Lora not correctly set in text encoder 2" + ) + + weights_1 = {"text_encoder": 2, "unet": {"down": 5}} + pipe.set_adapters("adapter-1", weights_1) + output_weights_1 = pipe(**inputs, generator=torch.manual_seed(0))[0] + + weights_2 = {"unet": {"up": 5}} + pipe.set_adapters("adapter-1", weights_2) + output_weights_2 = pipe(**inputs, generator=torch.manual_seed(0))[0] + + self.assertFalse( + np.allclose(output_weights_1, output_weights_2, atol=1e-3, rtol=1e-3), + "LoRA weights 1 and 2 should give different results", + ) + self.assertFalse( + np.allclose(output_no_lora, output_weights_1, atol=1e-3, rtol=1e-3), + "No adapter and LoRA weights 1 should give different results", + ) + self.assertFalse( + np.allclose(output_no_lora, output_weights_2, atol=1e-3, rtol=1e-3), + "No adapter and LoRA weights 2 should give different results", + ) + + pipe.disable_lora() + output_disabled = pipe(**inputs, generator=torch.manual_seed(0))[0] + + self.assertTrue( + np.allclose(output_no_lora, output_disabled, atol=1e-3, rtol=1e-3), + "output with no lora and output with lora disabled should give same results", + ) + + def test_simple_inference_with_text_denoiser_multi_adapter_block_lora(self): + """ + Tests a simple inference with lora attached to text encoder and unet, attaches + multiple adapters and set different weights for different blocks (i.e. block lora) + """ + for scheduler_cls in self.scheduler_classes: + components, text_lora_config, denoiser_lora_config = self.get_dummy_components(scheduler_cls) + pipe = self.pipeline_class(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + _, _, inputs = self.get_dummy_inputs(with_generator=False) + + output_no_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] + + if "text_encoder" in self.pipeline_class._lora_loadable_modules: + pipe.text_encoder.add_adapter(text_lora_config, "adapter-1") + pipe.text_encoder.add_adapter(text_lora_config, "adapter-2") + self.assertTrue( + check_if_lora_correctly_set(pipe.text_encoder), "Lora not correctly set in text encoder" + ) + + denoiser = pipe.transformer if self.unet_kwargs is None else pipe.unet + denoiser.add_adapter(denoiser_lora_config, "adapter-1") + denoiser.add_adapter(denoiser_lora_config, "adapter-2") + self.assertTrue(check_if_lora_correctly_set(denoiser), "Lora not correctly set in denoiser.") + + if self.has_two_text_encoders or self.has_three_text_encoders: + if "text_encoder_2" in self.pipeline_class._lora_loadable_modules: + pipe.text_encoder_2.add_adapter(text_lora_config, "adapter-1") + pipe.text_encoder_2.add_adapter(text_lora_config, "adapter-2") + self.assertTrue( + check_if_lora_correctly_set(pipe.text_encoder_2), "Lora not correctly set in text encoder 2" + ) + + scales_1 = {"text_encoder": 2, "unet": {"down": 5}} + scales_2 = {"unet": {"down": 5, "mid": 5}} + + pipe.set_adapters("adapter-1", scales_1) + output_adapter_1 = pipe(**inputs, generator=torch.manual_seed(0))[0] + + pipe.set_adapters("adapter-2", scales_2) + output_adapter_2 = pipe(**inputs, generator=torch.manual_seed(0))[0] + + pipe.set_adapters(["adapter-1", "adapter-2"], [scales_1, scales_2]) + output_adapter_mixed = pipe(**inputs, generator=torch.manual_seed(0))[0] + + # Fuse and unfuse should lead to the same results + self.assertFalse( + np.allclose(output_adapter_1, output_adapter_2, atol=1e-3, rtol=1e-3), + "Adapter 1 and 2 should give different results", + ) + + self.assertFalse( + np.allclose(output_adapter_1, output_adapter_mixed, atol=1e-3, rtol=1e-3), + "Adapter 1 and mixed adapters should give different results", + ) + + self.assertFalse( + np.allclose(output_adapter_2, output_adapter_mixed, atol=1e-3, rtol=1e-3), + "Adapter 2 and mixed adapters should give different results", + ) + + pipe.disable_lora() + output_disabled = pipe(**inputs, generator=torch.manual_seed(0))[0] + + self.assertTrue( + np.allclose(output_no_lora, output_disabled, atol=1e-3, rtol=1e-3), + "output with no lora and output with lora disabled should give same results", + ) + + # a mismatching number of adapter_names and adapter_weights should raise an error + with self.assertRaises(ValueError): + pipe.set_adapters(["adapter-1", "adapter-2"], [scales_1]) + + def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self): + """Tests that any valid combination of lora block scales can be used in pipe.set_adapter""" + + def updown_options(blocks_with_tf, layers_per_block, value): + """ + Generate every possible combination for how a lora weight dict for the up/down part can be. + E.g. 2, {"block_1": 2}, {"block_1": [2,2,2]}, {"block_1": 2, "block_2": [2,2,2]}, ... + """ + num_val = value + list_val = [value] * layers_per_block + + node_opts = [None, num_val, list_val] + node_opts_foreach_block = [node_opts] * len(blocks_with_tf) + + updown_opts = [num_val] + for nodes in product(*node_opts_foreach_block): + if all(n is None for n in nodes): + continue + opt = {} + for b, n in zip(blocks_with_tf, nodes): + if n is not None: + opt["block_" + str(b)] = n + updown_opts.append(opt) + return updown_opts + + def all_possible_dict_opts(unet, value): + """ + Generate every possible combination for how a lora weight dict can be. + E.g. 2, {"unet: {"down": 2}}, {"unet: {"down": [2,2,2]}}, {"unet: {"mid": 2, "up": [2,2,2]}}, ... + """ + + down_blocks_with_tf = [i for i, d in enumerate(unet.down_blocks) if hasattr(d, "attentions")] + up_blocks_with_tf = [i for i, u in enumerate(unet.up_blocks) if hasattr(u, "attentions")] + + layers_per_block = unet.config.layers_per_block + + text_encoder_opts = [None, value] + text_encoder_2_opts = [None, value] + mid_opts = [None, value] + down_opts = [None] + updown_options(down_blocks_with_tf, layers_per_block, value) + up_opts = [None] + updown_options(up_blocks_with_tf, layers_per_block + 1, value) + + opts = [] + + for t1, t2, d, m, u in product(text_encoder_opts, text_encoder_2_opts, down_opts, mid_opts, up_opts): + if all(o is None for o in (t1, t2, d, m, u)): + continue + opt = {} + if t1 is not None: + opt["text_encoder"] = t1 + if t2 is not None: + opt["text_encoder_2"] = t2 + if all(o is None for o in (d, m, u)): + # no unet scaling + continue + opt["unet"] = {} + if d is not None: + opt["unet"]["down"] = d + if m is not None: + opt["unet"]["mid"] = m + if u is not None: + opt["unet"]["up"] = u + opts.append(opt) + + return opts + + components, text_lora_config, denoiser_lora_config = self.get_dummy_components(self.scheduler_cls) + pipe = self.pipeline_class(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + _, _, inputs = self.get_dummy_inputs(with_generator=False) + + pipe.text_encoder.add_adapter(text_lora_config, "adapter-1") + + denoiser = pipe.transformer if self.unet_kwargs is None else pipe.unet + denoiser.add_adapter(denoiser_lora_config, "adapter-1") + + if self.has_two_text_encoders or self.has_three_text_encoders: + lora_loadable_components = self.pipeline_class._lora_loadable_modules + if "text_encoder_2" in lora_loadable_components: + pipe.text_encoder_2.add_adapter(text_lora_config, "adapter-1") + + for scale_dict in all_possible_dict_opts(pipe.unet, value=1234): + # test if lora block scales can be set with this scale_dict + if not self.has_two_text_encoders and "text_encoder_2" in scale_dict: + del scale_dict["text_encoder_2"] + + pipe.set_adapters("adapter-1", scale_dict) # test will fail if this line throws an error + + def test_simple_inference_with_text_denoiser_multi_adapter_delete_adapter(self): + """ + Tests a simple inference with lora attached to text encoder and unet, attaches + multiple adapters and set/delete them + """ + for scheduler_cls in self.scheduler_classes: + components, text_lora_config, denoiser_lora_config = self.get_dummy_components(scheduler_cls) + pipe = self.pipeline_class(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + _, _, inputs = self.get_dummy_inputs(with_generator=False) + + output_no_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] + + if "text_encoder" in self.pipeline_class._lora_loadable_modules: + pipe.text_encoder.add_adapter(text_lora_config, "adapter-1") + pipe.text_encoder.add_adapter(text_lora_config, "adapter-2") + self.assertTrue( + check_if_lora_correctly_set(pipe.text_encoder), "Lora not correctly set in text encoder" + ) + + denoiser = pipe.transformer if self.unet_kwargs is None else pipe.unet + denoiser.add_adapter(denoiser_lora_config, "adapter-1") + denoiser.add_adapter(denoiser_lora_config, "adapter-2") + self.assertTrue(check_if_lora_correctly_set(denoiser), "Lora not correctly set in denoiser.") + + if self.has_two_text_encoders or self.has_three_text_encoders: + lora_loadable_components = self.pipeline_class._lora_loadable_modules + if "text_encoder_2" in lora_loadable_components: + pipe.text_encoder_2.add_adapter(text_lora_config, "adapter-1") + pipe.text_encoder_2.add_adapter(text_lora_config, "adapter-2") + self.assertTrue( + check_if_lora_correctly_set(pipe.text_encoder_2), "Lora not correctly set in text encoder 2" + ) + + pipe.set_adapters("adapter-1") + output_adapter_1 = pipe(**inputs, generator=torch.manual_seed(0))[0] + + pipe.set_adapters("adapter-2") + output_adapter_2 = pipe(**inputs, generator=torch.manual_seed(0))[0] + + pipe.set_adapters(["adapter-1", "adapter-2"]) + output_adapter_mixed = pipe(**inputs, generator=torch.manual_seed(0))[0] + + self.assertFalse( + np.allclose(output_adapter_1, output_adapter_2, atol=1e-3, rtol=1e-3), + "Adapter 1 and 2 should give different results", + ) + + self.assertFalse( + np.allclose(output_adapter_1, output_adapter_mixed, atol=1e-3, rtol=1e-3), + "Adapter 1 and mixed adapters should give different results", + ) + + self.assertFalse( + np.allclose(output_adapter_2, output_adapter_mixed, atol=1e-3, rtol=1e-3), + "Adapter 2 and mixed adapters should give different results", + ) + + pipe.delete_adapters("adapter-1") + output_deleted_adapter_1 = pipe(**inputs, generator=torch.manual_seed(0))[0] + + self.assertTrue( + np.allclose(output_deleted_adapter_1, output_adapter_2, atol=1e-3, rtol=1e-3), + "Adapter 1 and 2 should give different results", + ) + + pipe.delete_adapters("adapter-2") + output_deleted_adapters = pipe(**inputs, generator=torch.manual_seed(0))[0] + + self.assertTrue( + np.allclose(output_no_lora, output_deleted_adapters, atol=1e-3, rtol=1e-3), + "output with no lora and output with lora disabled should give same results", + ) + + if "text_encoder" in self.pipeline_class._lora_loadable_modules: + pipe.text_encoder.add_adapter(text_lora_config, "adapter-1") + pipe.text_encoder.add_adapter(text_lora_config, "adapter-2") + + denoiser = pipe.transformer if self.unet_kwargs is None else pipe.unet + denoiser.add_adapter(denoiser_lora_config, "adapter-1") + denoiser.add_adapter(denoiser_lora_config, "adapter-2") + self.assertTrue(check_if_lora_correctly_set(denoiser), "Lora not correctly set in denoiser.") + + pipe.set_adapters(["adapter-1", "adapter-2"]) + pipe.delete_adapters(["adapter-1", "adapter-2"]) + + output_deleted_adapters = pipe(**inputs, generator=torch.manual_seed(0))[0] + + self.assertTrue( + np.allclose(output_no_lora, output_deleted_adapters, atol=1e-3, rtol=1e-3), + "output with no lora and output with lora disabled should give same results", + ) + + def test_simple_inference_with_text_denoiser_multi_adapter_weighted(self): + """ + Tests a simple inference with lora attached to text encoder and unet, attaches + multiple adapters and set them + """ + for scheduler_cls in self.scheduler_classes: + components, text_lora_config, denoiser_lora_config = self.get_dummy_components(scheduler_cls) + pipe = self.pipeline_class(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + _, _, inputs = self.get_dummy_inputs(with_generator=False) + + output_no_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] + + if "text_encoder" in self.pipeline_class._lora_loadable_modules: + pipe.text_encoder.add_adapter(text_lora_config, "adapter-1") + pipe.text_encoder.add_adapter(text_lora_config, "adapter-2") + self.assertTrue( + check_if_lora_correctly_set(pipe.text_encoder), "Lora not correctly set in text encoder" + ) + + denoiser = pipe.transformer if self.unet_kwargs is None else pipe.unet + denoiser.add_adapter(denoiser_lora_config, "adapter-1") + denoiser.add_adapter(denoiser_lora_config, "adapter-2") + self.assertTrue(check_if_lora_correctly_set(denoiser), "Lora not correctly set in denoiser.") + + if self.has_two_text_encoders or self.has_three_text_encoders: + lora_loadable_components = self.pipeline_class._lora_loadable_modules + if "text_encoder_2" in lora_loadable_components: + pipe.text_encoder_2.add_adapter(text_lora_config, "adapter-1") + pipe.text_encoder_2.add_adapter(text_lora_config, "adapter-2") + self.assertTrue( + check_if_lora_correctly_set(pipe.text_encoder_2), "Lora not correctly set in text encoder 2" + ) + + pipe.set_adapters("adapter-1") + output_adapter_1 = pipe(**inputs, generator=torch.manual_seed(0))[0] + + pipe.set_adapters("adapter-2") + output_adapter_2 = pipe(**inputs, generator=torch.manual_seed(0))[0] + + pipe.set_adapters(["adapter-1", "adapter-2"]) + output_adapter_mixed = pipe(**inputs, generator=torch.manual_seed(0))[0] + + # Fuse and unfuse should lead to the same results + self.assertFalse( + np.allclose(output_adapter_1, output_adapter_2, atol=1e-3, rtol=1e-3), + "Adapter 1 and 2 should give different results", + ) + + self.assertFalse( + np.allclose(output_adapter_1, output_adapter_mixed, atol=1e-3, rtol=1e-3), + "Adapter 1 and mixed adapters should give different results", + ) + + self.assertFalse( + np.allclose(output_adapter_2, output_adapter_mixed, atol=1e-3, rtol=1e-3), + "Adapter 2 and mixed adapters should give different results", + ) + + pipe.set_adapters(["adapter-1", "adapter-2"], [0.5, 0.6]) + output_adapter_mixed_weighted = pipe(**inputs, generator=torch.manual_seed(0))[0] + + self.assertFalse( + np.allclose(output_adapter_mixed_weighted, output_adapter_mixed, atol=1e-3, rtol=1e-3), + "Weighted adapter and mixed adapter should give different results", + ) + + pipe.disable_lora() + output_disabled = pipe(**inputs, generator=torch.manual_seed(0))[0] + + self.assertTrue( + np.allclose(output_no_lora, output_disabled, atol=1e-3, rtol=1e-3), + "output with no lora and output with lora disabled should give same results", + ) + + @skip_mps + @pytest.mark.xfail( + condition=torch.device(torch_device).type == "cpu" and is_torch_version(">=", "2.5"), + reason="Test currently fails on CPU and PyTorch 2.5.1 but not on PyTorch 2.4.1.", + strict=False, + ) + def test_lora_fuse_nan(self): + for scheduler_cls in self.scheduler_classes: + components, text_lora_config, denoiser_lora_config = self.get_dummy_components(scheduler_cls) + pipe = self.pipeline_class(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + _, _, inputs = self.get_dummy_inputs(with_generator=False) + + if "text_encoder" in self.pipeline_class._lora_loadable_modules: + pipe.text_encoder.add_adapter(text_lora_config, "adapter-1") + self.assertTrue( + check_if_lora_correctly_set(pipe.text_encoder), "Lora not correctly set in text encoder" + ) + + denoiser = pipe.transformer if self.unet_kwargs is None else pipe.unet + denoiser.add_adapter(denoiser_lora_config, "adapter-1") + self.assertTrue(check_if_lora_correctly_set(denoiser), "Lora not correctly set in denoiser.") + + # corrupt one LoRA weight with `inf` values + with torch.no_grad(): + if self.unet_kwargs: + pipe.unet.mid_block.attentions[0].transformer_blocks[0].attn1.to_q.lora_A[ + "adapter-1" + ].weight += float("inf") + else: + named_modules = [name for name, _ in pipe.transformer.named_modules()] + possible_tower_names = [ + "transformer_blocks", + "blocks", + "joint_transformer_blocks", + "single_transformer_blocks", + ] + filtered_tower_names = [ + tower_name for tower_name in possible_tower_names if hasattr(pipe.transformer, tower_name) + ] + if len(filtered_tower_names) == 0: + reason = ( + f"`pipe.transformer` didn't have any of the following attributes: {possible_tower_names}." + ) + raise ValueError(reason) + for tower_name in filtered_tower_names: + transformer_tower = getattr(pipe.transformer, tower_name) + has_attn1 = any("attn1" in name for name in named_modules) + if has_attn1: + transformer_tower[0].attn1.to_q.lora_A["adapter-1"].weight += float("inf") + else: + transformer_tower[0].attn.to_q.lora_A["adapter-1"].weight += float("inf") + + # with `safe_fusing=True` we should see an Error + with self.assertRaises(ValueError): + pipe.fuse_lora(components=self.pipeline_class._lora_loadable_modules, safe_fusing=True) + + # without we should not see an error, but every image will be black + pipe.fuse_lora(components=self.pipeline_class._lora_loadable_modules, safe_fusing=False) + out = pipe(**inputs)[0] + + self.assertTrue(np.isnan(out).all()) + + def test_get_adapters(self): + """ + Tests a simple usecase where we attach multiple adapters and check if the results + are the expected results + """ + for scheduler_cls in self.scheduler_classes: + components, text_lora_config, denoiser_lora_config = self.get_dummy_components(scheduler_cls) + pipe = self.pipeline_class(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + _, _, inputs = self.get_dummy_inputs(with_generator=False) + + pipe.text_encoder.add_adapter(text_lora_config, "adapter-1") + + denoiser = pipe.transformer if self.unet_kwargs is None else pipe.unet + denoiser.add_adapter(denoiser_lora_config, "adapter-1") + + adapter_names = pipe.get_active_adapters() + self.assertListEqual(adapter_names, ["adapter-1"]) + + pipe.text_encoder.add_adapter(text_lora_config, "adapter-2") + denoiser.add_adapter(denoiser_lora_config, "adapter-2") + + adapter_names = pipe.get_active_adapters() + self.assertListEqual(adapter_names, ["adapter-2"]) + + pipe.set_adapters(["adapter-1", "adapter-2"]) + self.assertListEqual(pipe.get_active_adapters(), ["adapter-1", "adapter-2"]) + + def test_get_list_adapters(self): + """ + Tests a simple usecase where we attach multiple adapters and check if the results + are the expected results + """ + for scheduler_cls in self.scheduler_classes: + components, text_lora_config, denoiser_lora_config = self.get_dummy_components(scheduler_cls) + pipe = self.pipeline_class(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + # 1. + dicts_to_be_checked = {} + if "text_encoder" in self.pipeline_class._lora_loadable_modules: + pipe.text_encoder.add_adapter(text_lora_config, "adapter-1") + dicts_to_be_checked = {"text_encoder": ["adapter-1"]} + + if self.unet_kwargs is not None: + pipe.unet.add_adapter(denoiser_lora_config, "adapter-1") + dicts_to_be_checked.update({"unet": ["adapter-1"]}) + else: + pipe.transformer.add_adapter(denoiser_lora_config, "adapter-1") + dicts_to_be_checked.update({"transformer": ["adapter-1"]}) + + self.assertDictEqual(pipe.get_list_adapters(), dicts_to_be_checked) + + # 2. + dicts_to_be_checked = {} + if "text_encoder" in self.pipeline_class._lora_loadable_modules: + pipe.text_encoder.add_adapter(text_lora_config, "adapter-2") + dicts_to_be_checked = {"text_encoder": ["adapter-1", "adapter-2"]} + + if self.unet_kwargs is not None: + pipe.unet.add_adapter(denoiser_lora_config, "adapter-2") + dicts_to_be_checked.update({"unet": ["adapter-1", "adapter-2"]}) + else: + pipe.transformer.add_adapter(denoiser_lora_config, "adapter-2") + dicts_to_be_checked.update({"transformer": ["adapter-1", "adapter-2"]}) + + self.assertDictEqual(pipe.get_list_adapters(), dicts_to_be_checked) + + # 3. + pipe.set_adapters(["adapter-1", "adapter-2"]) + + dicts_to_be_checked = {} + if "text_encoder" in self.pipeline_class._lora_loadable_modules: + dicts_to_be_checked = {"text_encoder": ["adapter-1", "adapter-2"]} + + if self.unet_kwargs is not None: + dicts_to_be_checked.update({"unet": ["adapter-1", "adapter-2"]}) + else: + dicts_to_be_checked.update({"transformer": ["adapter-1", "adapter-2"]}) + + self.assertDictEqual( + pipe.get_list_adapters(), + dicts_to_be_checked, + ) + + # 4. + dicts_to_be_checked = {} + if "text_encoder" in self.pipeline_class._lora_loadable_modules: + dicts_to_be_checked = {"text_encoder": ["adapter-1", "adapter-2"]} + + if self.unet_kwargs is not None: + pipe.unet.add_adapter(denoiser_lora_config, "adapter-3") + dicts_to_be_checked.update({"unet": ["adapter-1", "adapter-2", "adapter-3"]}) + else: + pipe.transformer.add_adapter(denoiser_lora_config, "adapter-3") + dicts_to_be_checked.update({"transformer": ["adapter-1", "adapter-2", "adapter-3"]}) + + self.assertDictEqual(pipe.get_list_adapters(), dicts_to_be_checked) + + @require_peft_version_greater(peft_version="0.6.2") + def test_simple_inference_with_text_lora_denoiser_fused_multi( + self, expected_atol: float = 1e-3, expected_rtol: float = 1e-3 + ): + """ + Tests a simple inference with lora attached into text encoder + fuses the lora weights into base model + and makes sure it works as expected - with unet and multi-adapter case + """ + for scheduler_cls in self.scheduler_classes: + components, text_lora_config, denoiser_lora_config = self.get_dummy_components(scheduler_cls) + pipe = self.pipeline_class(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + _, _, inputs = self.get_dummy_inputs(with_generator=False) + + output_no_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] + self.assertTrue(output_no_lora.shape == self.output_shape) + + if "text_encoder" in self.pipeline_class._lora_loadable_modules: + pipe.text_encoder.add_adapter(text_lora_config, "adapter-1") + self.assertTrue( + check_if_lora_correctly_set(pipe.text_encoder), "Lora not correctly set in text encoder" + ) + pipe.text_encoder.add_adapter(text_lora_config, "adapter-2") + + denoiser = pipe.transformer if self.unet_kwargs is None else pipe.unet + denoiser.add_adapter(denoiser_lora_config, "adapter-1") + self.assertTrue(check_if_lora_correctly_set(denoiser), "Lora not correctly set in denoiser.") + denoiser.add_adapter(denoiser_lora_config, "adapter-2") + + if self.has_two_text_encoders or self.has_three_text_encoders: + lora_loadable_components = self.pipeline_class._lora_loadable_modules + if "text_encoder_2" in lora_loadable_components: + pipe.text_encoder_2.add_adapter(text_lora_config, "adapter-1") + self.assertTrue( + check_if_lora_correctly_set(pipe.text_encoder_2), "Lora not correctly set in text encoder 2" + ) + pipe.text_encoder_2.add_adapter(text_lora_config, "adapter-2") + + # set them to multi-adapter inference mode + pipe.set_adapters(["adapter-1", "adapter-2"]) + outputs_all_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] + + pipe.set_adapters(["adapter-1"]) + outputs_lora_1 = pipe(**inputs, generator=torch.manual_seed(0))[0] + + pipe.fuse_lora(components=self.pipeline_class._lora_loadable_modules, adapter_names=["adapter-1"]) + self.assertTrue(pipe.num_fused_loras == 1, f"{pipe.num_fused_loras=}, {pipe.fused_loras=}") + + # Fusing should still keep the LoRA layers so output should remain the same + outputs_lora_1_fused = pipe(**inputs, generator=torch.manual_seed(0))[0] + + self.assertTrue( + np.allclose(outputs_lora_1, outputs_lora_1_fused, atol=expected_atol, rtol=expected_rtol), + "Fused lora should not change the output", + ) + + pipe.unfuse_lora(components=self.pipeline_class._lora_loadable_modules) + self.assertTrue(pipe.num_fused_loras == 0, f"{pipe.num_fused_loras=}, {pipe.fused_loras=}") + + if "text_encoder" in self.pipeline_class._lora_loadable_modules: + self.assertTrue(check_if_lora_correctly_set(pipe.text_encoder), "Unfuse should still keep LoRA layers") + + self.assertTrue(check_if_lora_correctly_set(denoiser), "Unfuse should still keep LoRA layers") + + if self.has_two_text_encoders or self.has_three_text_encoders: + if "text_encoder_2" in self.pipeline_class._lora_loadable_modules: + self.assertTrue( + check_if_lora_correctly_set(pipe.text_encoder_2), "Unfuse should still keep LoRA layers" + ) + + pipe.fuse_lora( + components=self.pipeline_class._lora_loadable_modules, adapter_names=["adapter-2", "adapter-1"] + ) + self.assertTrue(pipe.num_fused_loras == 2, f"{pipe.num_fused_loras=}, {pipe.fused_loras=}") + + # Fusing should still keep the LoRA layers + output_all_lora_fused = pipe(**inputs, generator=torch.manual_seed(0))[0] + self.assertTrue( + np.allclose(output_all_lora_fused, outputs_all_lora, atol=expected_atol, rtol=expected_rtol), + "Fused lora should not change the output", + ) + pipe.unfuse_lora(components=self.pipeline_class._lora_loadable_modules) + self.assertTrue(pipe.num_fused_loras == 0, f"{pipe.num_fused_loras=}, {pipe.fused_loras=}") + + def test_lora_scale_kwargs_match_fusion(self, expected_atol: float = 1e-3, expected_rtol: float = 1e-3): + attention_kwargs_name = determine_attention_kwargs_name(self.pipeline_class) + + for lora_scale in [1.0, 0.8]: + for scheduler_cls in self.scheduler_classes: + components, text_lora_config, denoiser_lora_config = self.get_dummy_components(scheduler_cls) + pipe = self.pipeline_class(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + _, _, inputs = self.get_dummy_inputs(with_generator=False) + + output_no_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] + self.assertTrue(output_no_lora.shape == self.output_shape) + + if "text_encoder" in self.pipeline_class._lora_loadable_modules: + pipe.text_encoder.add_adapter(text_lora_config, "adapter-1") + self.assertTrue( + check_if_lora_correctly_set(pipe.text_encoder), "Lora not correctly set in text encoder" + ) + + denoiser = pipe.transformer if self.unet_kwargs is None else pipe.unet + denoiser.add_adapter(denoiser_lora_config, "adapter-1") + self.assertTrue(check_if_lora_correctly_set(denoiser), "Lora not correctly set in denoiser.") + + if self.has_two_text_encoders or self.has_three_text_encoders: + lora_loadable_components = self.pipeline_class._lora_loadable_modules + if "text_encoder_2" in lora_loadable_components: + pipe.text_encoder_2.add_adapter(text_lora_config, "adapter-1") + self.assertTrue( + check_if_lora_correctly_set(pipe.text_encoder_2), + "Lora not correctly set in text encoder 2", + ) + + pipe.set_adapters(["adapter-1"]) + attention_kwargs = {attention_kwargs_name: {"scale": lora_scale}} + outputs_lora_1 = pipe(**inputs, generator=torch.manual_seed(0), **attention_kwargs)[0] + + pipe.fuse_lora( + components=self.pipeline_class._lora_loadable_modules, + adapter_names=["adapter-1"], + lora_scale=lora_scale, + ) + self.assertTrue(pipe.num_fused_loras == 1, f"{pipe.num_fused_loras=}, {pipe.fused_loras=}") + + outputs_lora_1_fused = pipe(**inputs, generator=torch.manual_seed(0))[0] + + self.assertTrue( + np.allclose(outputs_lora_1, outputs_lora_1_fused, atol=expected_atol, rtol=expected_rtol), + "Fused lora should not change the output", + ) + self.assertFalse( + np.allclose(output_no_lora, outputs_lora_1, atol=expected_atol, rtol=expected_rtol), + "LoRA should change the output", + ) + + @require_peft_version_greater(peft_version="0.9.0") + def test_simple_inference_with_dora(self): + for scheduler_cls in self.scheduler_classes: + components, text_lora_config, denoiser_lora_config = self.get_dummy_components( + scheduler_cls, use_dora=True + ) + pipe = self.pipeline_class(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + _, _, inputs = self.get_dummy_inputs(with_generator=False) + + output_no_dora_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] + self.assertTrue(output_no_dora_lora.shape == self.output_shape) + + pipe, _ = self.add_adapters_to_pipeline(pipe, text_lora_config, denoiser_lora_config) + + output_dora_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] + + self.assertFalse( + np.allclose(output_dora_lora, output_no_dora_lora, atol=1e-3, rtol=1e-3), + "DoRA lora should change the output", + ) + + def test_missing_keys_warning(self): + scheduler_cls = self.scheduler_classes[0] + # Skip text encoder check for now as that is handled with `transformers`. + components, _, denoiser_lora_config = self.get_dummy_components(scheduler_cls) + pipe = self.pipeline_class(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + denoiser = pipe.transformer if self.unet_kwargs is None else pipe.unet + denoiser.add_adapter(denoiser_lora_config) + self.assertTrue(check_if_lora_correctly_set(denoiser), "Lora not correctly set in denoiser.") + + with tempfile.TemporaryDirectory() as tmpdirname: + modules_to_save = self._get_modules_to_save(pipe, has_denoiser=True) + lora_state_dicts = self._get_lora_state_dicts(modules_to_save) + self.pipeline_class.save_lora_weights( + save_directory=tmpdirname, safe_serialization=False, **lora_state_dicts + ) + pipe.unload_lora_weights() + self.assertTrue(os.path.isfile(os.path.join(tmpdirname, "pytorch_lora_weights.bin"))) + state_dict = torch.load(os.path.join(tmpdirname, "pytorch_lora_weights.bin"), weights_only=True) + + # To make things dynamic since we cannot settle with a single key for all the models where we + # offer PEFT support. + missing_key = [k for k in state_dict if "lora_A" in k][0] + del state_dict[missing_key] + + logger = logging.get_logger("diffusers.utils.peft_utils") + logger.setLevel(30) + with CaptureLogger(logger) as cap_logger: + pipe.load_lora_weights(state_dict) + + # Since the missing key won't contain the adapter name ("default_0"). + # Also strip out the component prefix (such as "unet." from `missing_key`). + component = list({k.split(".")[0] for k in state_dict})[0] + self.assertTrue(missing_key.replace(f"{component}.", "") in cap_logger.out.replace("default_0.", "")) + + def test_unexpected_keys_warning(self): + scheduler_cls = self.scheduler_classes[0] + # Skip text encoder check for now as that is handled with `transformers`. + components, _, denoiser_lora_config = self.get_dummy_components(scheduler_cls) + pipe = self.pipeline_class(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + denoiser = pipe.transformer if self.unet_kwargs is None else pipe.unet + denoiser.add_adapter(denoiser_lora_config) + self.assertTrue(check_if_lora_correctly_set(denoiser), "Lora not correctly set in denoiser.") + + with tempfile.TemporaryDirectory() as tmpdirname: + modules_to_save = self._get_modules_to_save(pipe, has_denoiser=True) + lora_state_dicts = self._get_lora_state_dicts(modules_to_save) + self.pipeline_class.save_lora_weights( + save_directory=tmpdirname, safe_serialization=False, **lora_state_dicts + ) + pipe.unload_lora_weights() + self.assertTrue(os.path.isfile(os.path.join(tmpdirname, "pytorch_lora_weights.bin"))) + state_dict = torch.load(os.path.join(tmpdirname, "pytorch_lora_weights.bin"), weights_only=True) + + unexpected_key = [k for k in state_dict if "lora_A" in k][0] + ".diffusers_cat" + state_dict[unexpected_key] = torch.tensor(1.0, device=torch_device) + + logger = logging.get_logger("diffusers.utils.peft_utils") + logger.setLevel(30) + with CaptureLogger(logger) as cap_logger: + pipe.load_lora_weights(state_dict) + + self.assertTrue(".diffusers_cat" in cap_logger.out) + + @unittest.skip("This is failing for now - need to investigate") + def test_simple_inference_with_text_denoiser_lora_unfused_torch_compile(self): + """ + Tests a simple inference with lora attached to text encoder and unet, then unloads the lora weights + and makes sure it works as expected + """ + for scheduler_cls in self.scheduler_classes: + components, text_lora_config, denoiser_lora_config = self.get_dummy_components(scheduler_cls) + pipe = self.pipeline_class(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + _, _, inputs = self.get_dummy_inputs(with_generator=False) + + pipe, _ = self.add_adapters_to_pipeline(pipe, text_lora_config, denoiser_lora_config) + + pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True) + pipe.text_encoder = torch.compile(pipe.text_encoder, mode="reduce-overhead", fullgraph=True) + + if self.has_two_text_encoders or self.has_three_text_encoders: + pipe.text_encoder_2 = torch.compile(pipe.text_encoder_2, mode="reduce-overhead", fullgraph=True) + + # Just makes sure it works.. + _ = pipe(**inputs, generator=torch.manual_seed(0))[0] + + def test_modify_padding_mode(self): + def set_pad_mode(network, mode="circular"): + for _, module in network.named_modules(): + if isinstance(module, torch.nn.Conv2d): + module.padding_mode = mode + + for scheduler_cls in self.scheduler_classes: + components, _, _ = self.get_dummy_components(scheduler_cls) + pipe = self.pipeline_class(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + _pad_mode = "circular" + set_pad_mode(pipe.vae, _pad_mode) + set_pad_mode(pipe.unet, _pad_mode) + + _, _, inputs = self.get_dummy_inputs() + _ = pipe(**inputs)[0] + + def test_logs_info_when_no_lora_keys_found(self): + scheduler_cls = self.scheduler_classes[0] + # Skip text encoder check for now as that is handled with `transformers`. + components, _, _ = self.get_dummy_components(scheduler_cls) + pipe = self.pipeline_class(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + _, _, inputs = self.get_dummy_inputs(with_generator=False) + original_out = pipe(**inputs, generator=torch.manual_seed(0))[0] + + no_op_state_dict = {"lora_foo": torch.tensor(2.0), "lora_bar": torch.tensor(3.0)} + logger = logging.get_logger("diffusers.loaders.peft") + logger.setLevel(logging.WARNING) + + with CaptureLogger(logger) as cap_logger: + pipe.load_lora_weights(no_op_state_dict) + out_after_lora_attempt = pipe(**inputs, generator=torch.manual_seed(0))[0] + + denoiser = getattr(pipe, "unet") if self.unet_kwargs is not None else getattr(pipe, "transformer") + self.assertTrue(cap_logger.out.startswith(f"No LoRA keys associated to {denoiser.__class__.__name__}")) + self.assertTrue(np.allclose(original_out, out_after_lora_attempt, atol=1e-5, rtol=1e-5)) + + # test only for text encoder + for lora_module in self.pipeline_class._lora_loadable_modules: + if "text_encoder" in lora_module: + text_encoder = getattr(pipe, lora_module) + if lora_module == "text_encoder": + prefix = "text_encoder" + elif lora_module == "text_encoder_2": + prefix = "text_encoder_2" + + logger = logging.get_logger("diffusers.loaders.lora_base") + logger.setLevel(logging.WARNING) + + with CaptureLogger(logger) as cap_logger: + self.pipeline_class.load_lora_into_text_encoder( + no_op_state_dict, network_alphas=None, text_encoder=text_encoder, prefix=prefix + ) + + self.assertTrue( + cap_logger.out.startswith(f"No LoRA keys associated to {text_encoder.__class__.__name__}") + ) + + def test_set_adapters_match_attention_kwargs(self): + """Test to check if outputs after `set_adapters()` and attention kwargs match.""" + attention_kwargs_name = determine_attention_kwargs_name(self.pipeline_class) + + for scheduler_cls in self.scheduler_classes: + components, text_lora_config, denoiser_lora_config = self.get_dummy_components(scheduler_cls) + pipe = self.pipeline_class(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + _, _, inputs = self.get_dummy_inputs(with_generator=False) + + output_no_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] + self.assertTrue(output_no_lora.shape == self.output_shape) + + pipe, _ = self.add_adapters_to_pipeline(pipe, text_lora_config, denoiser_lora_config) + + lora_scale = 0.5 + attention_kwargs = {attention_kwargs_name: {"scale": lora_scale}} + output_lora_scale = pipe(**inputs, generator=torch.manual_seed(0), **attention_kwargs)[0] + self.assertFalse( + np.allclose(output_no_lora, output_lora_scale, atol=1e-3, rtol=1e-3), + "Lora + scale should change the output", + ) + + pipe.set_adapters("default", lora_scale) + output_lora_scale_wo_kwargs = pipe(**inputs, generator=torch.manual_seed(0))[0] + self.assertTrue( + not np.allclose(output_no_lora, output_lora_scale_wo_kwargs, atol=1e-3, rtol=1e-3), + "Lora + scale should change the output", + ) + self.assertTrue( + np.allclose(output_lora_scale, output_lora_scale_wo_kwargs, atol=1e-3, rtol=1e-3), + "Lora + scale should match the output of `set_adapters()`.", + ) + + with tempfile.TemporaryDirectory() as tmpdirname: + modules_to_save = self._get_modules_to_save(pipe, has_denoiser=True) + lora_state_dicts = self._get_lora_state_dicts(modules_to_save) + self.pipeline_class.save_lora_weights( + save_directory=tmpdirname, safe_serialization=True, **lora_state_dicts + ) + + self.assertTrue(os.path.isfile(os.path.join(tmpdirname, "pytorch_lora_weights.safetensors"))) + pipe = self.pipeline_class(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + pipe.load_lora_weights(os.path.join(tmpdirname, "pytorch_lora_weights.safetensors")) + + for module_name, module in modules_to_save.items(): + self.assertTrue(check_if_lora_correctly_set(module), f"Lora not correctly set in {module_name}") + + output_lora_from_pretrained = pipe(**inputs, generator=torch.manual_seed(0), **attention_kwargs)[0] + self.assertTrue( + not np.allclose(output_no_lora, output_lora_from_pretrained, atol=1e-3, rtol=1e-3), + "Lora + scale should change the output", + ) + self.assertTrue( + np.allclose(output_lora_scale, output_lora_from_pretrained, atol=1e-3, rtol=1e-3), + "Loading from saved checkpoints should give same results as attention_kwargs.", + ) + self.assertTrue( + np.allclose(output_lora_scale_wo_kwargs, output_lora_from_pretrained, atol=1e-3, rtol=1e-3), + "Loading from saved checkpoints should give same results as set_adapters().", + ) + + @require_peft_version_greater("0.13.2") + def test_lora_B_bias(self): + # Currently, this test is only relevant for Flux Control LoRA as we are not + # aware of any other LoRA checkpoint that has its `lora_B` biases trained. + components, _, denoiser_lora_config = self.get_dummy_components(self.scheduler_classes[0]) + pipe = self.pipeline_class(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + # keep track of the bias values of the base layers to perform checks later. + bias_values = {} + denoiser = pipe.unet if self.unet_kwargs is not None else pipe.transformer + for name, module in denoiser.named_modules(): + if any(k in name for k in self.denoiser_target_modules): + if module.bias is not None: + bias_values[name] = module.bias.data.clone() + + _, _, inputs = self.get_dummy_inputs(with_generator=False) + + original_output = pipe(**inputs, generator=torch.manual_seed(0))[0] + + denoiser_lora_config.lora_bias = False + if self.unet_kwargs is not None: + pipe.unet.add_adapter(denoiser_lora_config, "adapter-1") + else: + pipe.transformer.add_adapter(denoiser_lora_config, "adapter-1") + lora_bias_false_output = pipe(**inputs, generator=torch.manual_seed(0))[0] + pipe.delete_adapters("adapter-1") + + denoiser_lora_config.lora_bias = True + if self.unet_kwargs is not None: + pipe.unet.add_adapter(denoiser_lora_config, "adapter-1") + else: + pipe.transformer.add_adapter(denoiser_lora_config, "adapter-1") + lora_bias_true_output = pipe(**inputs, generator=torch.manual_seed(0))[0] + + self.assertFalse(np.allclose(original_output, lora_bias_false_output, atol=1e-3, rtol=1e-3)) + self.assertFalse(np.allclose(original_output, lora_bias_true_output, atol=1e-3, rtol=1e-3)) + self.assertFalse(np.allclose(lora_bias_false_output, lora_bias_true_output, atol=1e-3, rtol=1e-3)) + + def test_correct_lora_configs_with_different_ranks(self): + components, _, denoiser_lora_config = self.get_dummy_components(self.scheduler_classes[0]) + pipe = self.pipeline_class(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + _, _, inputs = self.get_dummy_inputs(with_generator=False) + + original_output = pipe(**inputs, generator=torch.manual_seed(0))[0] + + if self.unet_kwargs is not None: + pipe.unet.add_adapter(denoiser_lora_config, "adapter-1") + else: + pipe.transformer.add_adapter(denoiser_lora_config, "adapter-1") + + lora_output_same_rank = pipe(**inputs, generator=torch.manual_seed(0))[0] + + if self.unet_kwargs is not None: + pipe.unet.delete_adapters("adapter-1") + else: + pipe.transformer.delete_adapters("adapter-1") + + denoiser = pipe.unet if self.unet_kwargs is not None else pipe.transformer + for name, _ in denoiser.named_modules(): + if "to_k" in name and "attn" in name and "lora" not in name: + module_name_to_rank_update = name.replace(".base_layer.", ".") + break + + # change the rank_pattern + updated_rank = denoiser_lora_config.r * 2 + denoiser_lora_config.rank_pattern = {module_name_to_rank_update: updated_rank} + + if self.unet_kwargs is not None: + pipe.unet.add_adapter(denoiser_lora_config, "adapter-1") + updated_rank_pattern = pipe.unet.peft_config["adapter-1"].rank_pattern + else: + pipe.transformer.add_adapter(denoiser_lora_config, "adapter-1") + updated_rank_pattern = pipe.transformer.peft_config["adapter-1"].rank_pattern + + self.assertTrue(updated_rank_pattern == {module_name_to_rank_update: updated_rank}) + + lora_output_diff_rank = pipe(**inputs, generator=torch.manual_seed(0))[0] + self.assertTrue(not np.allclose(original_output, lora_output_same_rank, atol=1e-3, rtol=1e-3)) + self.assertTrue(not np.allclose(lora_output_diff_rank, lora_output_same_rank, atol=1e-3, rtol=1e-3)) + + if self.unet_kwargs is not None: + pipe.unet.delete_adapters("adapter-1") + else: + pipe.transformer.delete_adapters("adapter-1") + + # similarly change the alpha_pattern + updated_alpha = denoiser_lora_config.lora_alpha * 2 + denoiser_lora_config.alpha_pattern = {module_name_to_rank_update: updated_alpha} + if self.unet_kwargs is not None: + pipe.unet.add_adapter(denoiser_lora_config, "adapter-1") + self.assertTrue( + pipe.unet.peft_config["adapter-1"].alpha_pattern == {module_name_to_rank_update: updated_alpha} + ) + else: + pipe.transformer.add_adapter(denoiser_lora_config, "adapter-1") + self.assertTrue( + pipe.transformer.peft_config["adapter-1"].alpha_pattern == {module_name_to_rank_update: updated_alpha} + ) + + lora_output_diff_alpha = pipe(**inputs, generator=torch.manual_seed(0))[0] + self.assertTrue(not np.allclose(original_output, lora_output_diff_alpha, atol=1e-3, rtol=1e-3)) + self.assertTrue(not np.allclose(lora_output_diff_alpha, lora_output_same_rank, atol=1e-3, rtol=1e-3)) + + def test_layerwise_casting_inference_denoiser(self): + from diffusers.hooks._common import _GO_LC_SUPPORTED_PYTORCH_LAYERS + from diffusers.hooks.layerwise_casting import DEFAULT_SKIP_MODULES_PATTERN + + def check_linear_dtype(module, storage_dtype, compute_dtype): + patterns_to_check = DEFAULT_SKIP_MODULES_PATTERN + if getattr(module, "_skip_layerwise_casting_patterns", None) is not None: + patterns_to_check += tuple(module._skip_layerwise_casting_patterns) + for name, submodule in module.named_modules(): + if not isinstance(submodule, _GO_LC_SUPPORTED_PYTORCH_LAYERS): + continue + dtype_to_check = storage_dtype + if "lora" in name or any(re.search(pattern, name) for pattern in patterns_to_check): + dtype_to_check = compute_dtype + if getattr(submodule, "weight", None) is not None: + self.assertEqual(submodule.weight.dtype, dtype_to_check) + if getattr(submodule, "bias", None) is not None: + self.assertEqual(submodule.bias.dtype, dtype_to_check) + + def initialize_pipeline(storage_dtype=None, compute_dtype=torch.float32): + components, text_lora_config, denoiser_lora_config = self.get_dummy_components(self.scheduler_classes[0]) + pipe = self.pipeline_class(**components) + pipe = pipe.to(torch_device, dtype=compute_dtype) + pipe.set_progress_bar_config(disable=None) + + pipe, denoiser = self.add_adapters_to_pipeline(pipe, text_lora_config, denoiser_lora_config) + + if storage_dtype is not None: + denoiser.enable_layerwise_casting(storage_dtype=storage_dtype, compute_dtype=compute_dtype) + check_linear_dtype(denoiser, storage_dtype, compute_dtype) + + return pipe + + _, _, inputs = self.get_dummy_inputs(with_generator=False) + + pipe_fp32 = initialize_pipeline(storage_dtype=None) + pipe_fp32(**inputs, generator=torch.manual_seed(0))[0] + + pipe_float8_e4m3_fp32 = initialize_pipeline(storage_dtype=torch.float8_e4m3fn, compute_dtype=torch.float32) + pipe_float8_e4m3_fp32(**inputs, generator=torch.manual_seed(0))[0] + + pipe_float8_e4m3_bf16 = initialize_pipeline(storage_dtype=torch.float8_e4m3fn, compute_dtype=torch.bfloat16) + pipe_float8_e4m3_bf16(**inputs, generator=torch.manual_seed(0))[0] + + @require_peft_version_greater("0.14.0") + def test_layerwise_casting_peft_input_autocast_denoiser(self): + r""" + A test that checks if layerwise casting works correctly with PEFT layers and forward pass does not fail. This + is different from `test_layerwise_casting_inference_denoiser` as that disables the application of layerwise + cast hooks on the PEFT layers (relevant logic in `models.modeling_utils.ModelMixin.enable_layerwise_casting`). + In this test, we enable the layerwise casting on the PEFT layers as well. If run with PEFT version <= 0.14.0, + this test will fail with the following error: + + ``` + RuntimeError: expected mat1 and mat2 to have the same dtype, but got: c10::Float8_e4m3fn != float + ``` + + See the docstring of [`hooks.layerwise_casting.PeftInputAutocastDisableHook`] for more details. + """ + + from diffusers.hooks._common import _GO_LC_SUPPORTED_PYTORCH_LAYERS + from diffusers.hooks.layerwise_casting import ( + _PEFT_AUTOCAST_DISABLE_HOOK, + DEFAULT_SKIP_MODULES_PATTERN, + apply_layerwise_casting, + ) + + storage_dtype = torch.float8_e4m3fn + compute_dtype = torch.float32 + + def check_module(denoiser): + # This will also check if the peft layers are in torch.float8_e4m3fn dtype (unlike test_layerwise_casting_inference_denoiser) + for name, module in denoiser.named_modules(): + if not isinstance(module, _GO_LC_SUPPORTED_PYTORCH_LAYERS): + continue + dtype_to_check = storage_dtype + if any(re.search(pattern, name) for pattern in patterns_to_check): + dtype_to_check = compute_dtype + if getattr(module, "weight", None) is not None: + self.assertEqual(module.weight.dtype, dtype_to_check) + if getattr(module, "bias", None) is not None: + self.assertEqual(module.bias.dtype, dtype_to_check) + if isinstance(module, BaseTunerLayer): + self.assertTrue(getattr(module, "_diffusers_hook", None) is not None) + self.assertTrue(module._diffusers_hook.get_hook(_PEFT_AUTOCAST_DISABLE_HOOK) is not None) + + # 1. Test forward with add_adapter + components, _, denoiser_lora_config = self.get_dummy_components(self.scheduler_classes[0]) + pipe = self.pipeline_class(**components) + pipe = pipe.to(torch_device, dtype=compute_dtype) + pipe.set_progress_bar_config(disable=None) + + denoiser = pipe.transformer if self.unet_kwargs is None else pipe.unet + denoiser.add_adapter(denoiser_lora_config) + self.assertTrue(check_if_lora_correctly_set(denoiser), "Lora not correctly set in denoiser.") + + patterns_to_check = DEFAULT_SKIP_MODULES_PATTERN + if getattr(denoiser, "_skip_layerwise_casting_patterns", None) is not None: + patterns_to_check += tuple(denoiser._skip_layerwise_casting_patterns) + + apply_layerwise_casting( + denoiser, storage_dtype=storage_dtype, compute_dtype=compute_dtype, skip_modules_pattern=patterns_to_check + ) + check_module(denoiser) + + _, _, inputs = self.get_dummy_inputs(with_generator=False) + pipe(**inputs, generator=torch.manual_seed(0))[0] + + # 2. Test forward with load_lora_weights + with tempfile.TemporaryDirectory() as tmpdirname: + modules_to_save = self._get_modules_to_save(pipe, has_denoiser=True) + lora_state_dicts = self._get_lora_state_dicts(modules_to_save) + self.pipeline_class.save_lora_weights( + save_directory=tmpdirname, safe_serialization=True, **lora_state_dicts + ) + + self.assertTrue(os.path.isfile(os.path.join(tmpdirname, "pytorch_lora_weights.safetensors"))) + components, _, _ = self.get_dummy_components(self.scheduler_classes[0]) + pipe = self.pipeline_class(**components) + pipe = pipe.to(torch_device, dtype=compute_dtype) + pipe.set_progress_bar_config(disable=None) + pipe.load_lora_weights(os.path.join(tmpdirname, "pytorch_lora_weights.safetensors")) + + denoiser = pipe.transformer if self.unet_kwargs is None else pipe.unet + apply_layerwise_casting( + denoiser, + storage_dtype=storage_dtype, + compute_dtype=compute_dtype, + skip_modules_pattern=patterns_to_check, + ) + check_module(denoiser) + + _, _, inputs = self.get_dummy_inputs(with_generator=False) + pipe(**inputs, generator=torch.manual_seed(0))[0] + + @parameterized.expand([4, 8, 16]) + def test_lora_adapter_metadata_is_loaded_correctly(self, lora_alpha): + scheduler_cls = self.scheduler_classes[0] + components, text_lora_config, denoiser_lora_config = self.get_dummy_components( + scheduler_cls, lora_alpha=lora_alpha + ) + pipe = self.pipeline_class(**components) + + pipe, _ = self.add_adapters_to_pipeline( + pipe, text_lora_config=text_lora_config, denoiser_lora_config=denoiser_lora_config + ) + + with tempfile.TemporaryDirectory() as tmpdir: + modules_to_save = self._get_modules_to_save(pipe, has_denoiser=True) + lora_state_dicts = self._get_lora_state_dicts(modules_to_save) + lora_metadatas = self._get_lora_adapter_metadata(modules_to_save) + self.pipeline_class.save_lora_weights(save_directory=tmpdir, **lora_state_dicts, **lora_metadatas) + pipe.unload_lora_weights() + + out = pipe.lora_state_dict(tmpdir, return_lora_metadata=True) + if len(out) == 3: + _, _, parsed_metadata = out + elif len(out) == 2: + _, parsed_metadata = out + + denoiser_key = ( + f"{self.pipeline_class.transformer_name}" + if self.transformer_kwargs is not None + else f"{self.pipeline_class.unet_name}" + ) + self.assertTrue(any(k.startswith(f"{denoiser_key}.") for k in parsed_metadata)) + check_module_lora_metadata( + parsed_metadata=parsed_metadata, lora_metadatas=lora_metadatas, module_key=denoiser_key + ) + + if "text_encoder" in self.pipeline_class._lora_loadable_modules: + text_encoder_key = self.pipeline_class.text_encoder_name + self.assertTrue(any(k.startswith(f"{text_encoder_key}.") for k in parsed_metadata)) + check_module_lora_metadata( + parsed_metadata=parsed_metadata, lora_metadatas=lora_metadatas, module_key=text_encoder_key + ) + + if "text_encoder_2" in self.pipeline_class._lora_loadable_modules: + text_encoder_2_key = "text_encoder_2" + self.assertTrue(any(k.startswith(f"{text_encoder_2_key}.") for k in parsed_metadata)) + check_module_lora_metadata( + parsed_metadata=parsed_metadata, lora_metadatas=lora_metadatas, module_key=text_encoder_2_key + ) + + @parameterized.expand([4, 8, 16]) + def test_lora_adapter_metadata_save_load_inference(self, lora_alpha): + scheduler_cls = self.scheduler_classes[0] + components, text_lora_config, denoiser_lora_config = self.get_dummy_components( + scheduler_cls, lora_alpha=lora_alpha + ) + pipe = self.pipeline_class(**components).to(torch_device) + _, _, inputs = self.get_dummy_inputs(with_generator=False) + + output_no_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] + self.assertTrue(output_no_lora.shape == self.output_shape) + + pipe, _ = self.add_adapters_to_pipeline( + pipe, text_lora_config=text_lora_config, denoiser_lora_config=denoiser_lora_config + ) + output_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] + + with tempfile.TemporaryDirectory() as tmpdir: + modules_to_save = self._get_modules_to_save(pipe, has_denoiser=True) + lora_state_dicts = self._get_lora_state_dicts(modules_to_save) + lora_metadatas = self._get_lora_adapter_metadata(modules_to_save) + self.pipeline_class.save_lora_weights(save_directory=tmpdir, **lora_state_dicts, **lora_metadatas) + pipe.unload_lora_weights() + pipe.load_lora_weights(tmpdir) + + output_lora_pretrained = pipe(**inputs, generator=torch.manual_seed(0))[0] + + self.assertTrue( + np.allclose(output_lora, output_lora_pretrained, atol=1e-3, rtol=1e-3), "Lora outputs should match." + ) + + def test_lora_unload_add_adapter(self): + """Tests if `unload_lora_weights()` -> `add_adapter()` works.""" + scheduler_cls = self.scheduler_classes[0] + components, text_lora_config, denoiser_lora_config = self.get_dummy_components(scheduler_cls) + pipe = self.pipeline_class(**components).to(torch_device) + _, _, inputs = self.get_dummy_inputs(with_generator=False) + + pipe, _ = self.add_adapters_to_pipeline( + pipe, text_lora_config=text_lora_config, denoiser_lora_config=denoiser_lora_config + ) + _ = pipe(**inputs, generator=torch.manual_seed(0))[0] + + # unload and then add. + pipe.unload_lora_weights() + pipe, _ = self.add_adapters_to_pipeline( + pipe, text_lora_config=text_lora_config, denoiser_lora_config=denoiser_lora_config + ) + _ = pipe(**inputs, generator=torch.manual_seed(0))[0] + + def test_inference_load_delete_load_adapters(self): + "Tests if `load_lora_weights()` -> `delete_adapters()` -> `load_lora_weights()` works." + for scheduler_cls in self.scheduler_classes: + components, text_lora_config, denoiser_lora_config = self.get_dummy_components(scheduler_cls) + pipe = self.pipeline_class(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + _, _, inputs = self.get_dummy_inputs(with_generator=False) + + output_no_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] + + if "text_encoder" in self.pipeline_class._lora_loadable_modules: + pipe.text_encoder.add_adapter(text_lora_config) + self.assertTrue( + check_if_lora_correctly_set(pipe.text_encoder), "Lora not correctly set in text encoder" + ) + + denoiser = pipe.transformer if self.unet_kwargs is None else pipe.unet + denoiser.add_adapter(denoiser_lora_config) + self.assertTrue(check_if_lora_correctly_set(denoiser), "Lora not correctly set in denoiser.") + + if self.has_two_text_encoders or self.has_three_text_encoders: + lora_loadable_components = self.pipeline_class._lora_loadable_modules + if "text_encoder_2" in lora_loadable_components: + pipe.text_encoder_2.add_adapter(text_lora_config) + self.assertTrue( + check_if_lora_correctly_set(pipe.text_encoder_2), "Lora not correctly set in text encoder 2" + ) + + output_adapter_1 = pipe(**inputs, generator=torch.manual_seed(0))[0] + + with tempfile.TemporaryDirectory() as tmpdirname: + modules_to_save = self._get_modules_to_save(pipe, has_denoiser=True) + lora_state_dicts = self._get_lora_state_dicts(modules_to_save) + self.pipeline_class.save_lora_weights(save_directory=tmpdirname, **lora_state_dicts) + self.assertTrue(os.path.isfile(os.path.join(tmpdirname, "pytorch_lora_weights.safetensors"))) + + # First, delete adapter and compare. + pipe.delete_adapters(pipe.get_active_adapters()[0]) + output_no_adapter = pipe(**inputs, generator=torch.manual_seed(0))[0] + self.assertFalse(np.allclose(output_adapter_1, output_no_adapter, atol=1e-3, rtol=1e-3)) + self.assertTrue(np.allclose(output_no_lora, output_no_adapter, atol=1e-3, rtol=1e-3)) + + # Then load adapter and compare. + pipe.load_lora_weights(tmpdirname) + output_lora_loaded = pipe(**inputs, generator=torch.manual_seed(0))[0] + self.assertTrue(np.allclose(output_adapter_1, output_lora_loaded, atol=1e-3, rtol=1e-3)) + + def _test_group_offloading_inference_denoiser(self, offload_type, use_stream): + from diffusers.hooks.group_offloading import _get_top_level_group_offload_hook + + onload_device = torch_device + offload_device = torch.device("cpu") + + components, text_lora_config, denoiser_lora_config = self.get_dummy_components(self.scheduler_classes[0]) + pipe = self.pipeline_class(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + denoiser = pipe.transformer if self.unet_kwargs is None else pipe.unet + denoiser.add_adapter(denoiser_lora_config) + self.assertTrue(check_if_lora_correctly_set(denoiser), "Lora not correctly set in denoiser.") + + with tempfile.TemporaryDirectory() as tmpdirname: + modules_to_save = self._get_modules_to_save(pipe, has_denoiser=True) + lora_state_dicts = self._get_lora_state_dicts(modules_to_save) + self.pipeline_class.save_lora_weights( + save_directory=tmpdirname, safe_serialization=True, **lora_state_dicts + ) + self.assertTrue(os.path.isfile(os.path.join(tmpdirname, "pytorch_lora_weights.safetensors"))) + + components, _, _ = self.get_dummy_components(self.scheduler_classes[0]) + pipe = self.pipeline_class(**components) + pipe.set_progress_bar_config(disable=None) + denoiser = pipe.transformer if self.unet_kwargs is None else pipe.unet + + pipe.load_lora_weights(os.path.join(tmpdirname, "pytorch_lora_weights.safetensors")) + check_if_lora_correctly_set(denoiser) + _, _, inputs = self.get_dummy_inputs(with_generator=False) + + # Test group offloading with load_lora_weights + denoiser.enable_group_offload( + onload_device=onload_device, + offload_device=offload_device, + offload_type=offload_type, + num_blocks_per_group=1, + use_stream=use_stream, + ) + # Place other model-level components on `torch_device`. + for _, component in pipe.components.items(): + if isinstance(component, torch.nn.Module): + component.to(torch_device) + group_offload_hook_1 = _get_top_level_group_offload_hook(denoiser) + self.assertTrue(group_offload_hook_1 is not None) + output_1 = pipe(**inputs, generator=torch.manual_seed(0))[0] + + # Test group offloading after removing the lora + pipe.unload_lora_weights() + group_offload_hook_2 = _get_top_level_group_offload_hook(denoiser) + self.assertTrue(group_offload_hook_2 is not None) + output_2 = pipe(**inputs, generator=torch.manual_seed(0))[0] # noqa: F841 + + # Add the lora again and check if group offloading works + pipe.load_lora_weights(os.path.join(tmpdirname, "pytorch_lora_weights.safetensors")) + check_if_lora_correctly_set(denoiser) + group_offload_hook_3 = _get_top_level_group_offload_hook(denoiser) + self.assertTrue(group_offload_hook_3 is not None) + output_3 = pipe(**inputs, generator=torch.manual_seed(0))[0] + + self.assertTrue(np.allclose(output_1, output_3, atol=1e-3, rtol=1e-3)) + + @parameterized.expand([("block_level", True), ("leaf_level", False), ("leaf_level", True)]) + @require_torch_accelerator + def test_group_offloading_inference_denoiser(self, offload_type, use_stream): + for cls in inspect.getmro(self.__class__): + if "test_group_offloading_inference_denoiser" in cls.__dict__ and cls is not PeftLoraLoaderMixinTests: + # Skip this test if it is overwritten by child class. We need to do this because parameterized + # materializes the test methods on invocation which cannot be overridden. + return + self._test_group_offloading_inference_denoiser(offload_type, use_stream) + + @require_torch_accelerator + def test_lora_loading_model_cpu_offload(self): + components, _, denoiser_lora_config = self.get_dummy_components(self.scheduler_classes[0]) + _, _, inputs = self.get_dummy_inputs(with_generator=False) + pipe = self.pipeline_class(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + denoiser = pipe.transformer if self.unet_kwargs is None else pipe.unet + denoiser.add_adapter(denoiser_lora_config) + self.assertTrue(check_if_lora_correctly_set(denoiser), "Lora not correctly set in denoiser.") + + output_lora = pipe(**inputs, generator=torch.manual_seed(0))[0] + + with tempfile.TemporaryDirectory() as tmpdirname: + modules_to_save = self._get_modules_to_save(pipe, has_denoiser=True) + lora_state_dicts = self._get_lora_state_dicts(modules_to_save) + self.pipeline_class.save_lora_weights( + save_directory=tmpdirname, safe_serialization=True, **lora_state_dicts + ) + # reinitialize the pipeline to mimic the inference workflow. + components, _, denoiser_lora_config = self.get_dummy_components(self.scheduler_classes[0]) + pipe = self.pipeline_class(**components) + pipe.enable_model_cpu_offload(device=torch_device) + pipe.load_lora_weights(tmpdirname) + denoiser = pipe.transformer if self.unet_kwargs is None else pipe.unet + self.assertTrue(check_if_lora_correctly_set(denoiser), "Lora not correctly set in denoiser.") + + output_lora_loaded = pipe(**inputs, generator=torch.manual_seed(0))[0] + self.assertTrue(np.allclose(output_lora, output_lora_loaded, atol=1e-3, rtol=1e-3)) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/models/__init__.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/models/autoencoders/__init__.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/autoencoders/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/models/autoencoders/test_models_asymmetric_autoencoder_kl.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/autoencoders/test_models_asymmetric_autoencoder_kl.py new file mode 100644 index 0000000000000000000000000000000000000000..7eb830cd509706f018e377a05a08ec9c22f95755 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/autoencoders/test_models_asymmetric_autoencoder_kl.py @@ -0,0 +1,276 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import gc +import unittest + +import torch +from parameterized import parameterized + +from diffusers import AsymmetricAutoencoderKL +from diffusers.utils.import_utils import is_xformers_available + +from ...testing_utils import ( + Expectations, + backend_empty_cache, + enable_full_determinism, + floats_tensor, + load_hf_numpy, + require_torch_accelerator, + require_torch_gpu, + skip_mps, + slow, + torch_all_close, + torch_device, +) +from ..test_modeling_common import ModelTesterMixin, UNetTesterMixin + + +enable_full_determinism() + + +class AutoencoderKLTests(ModelTesterMixin, UNetTesterMixin, unittest.TestCase): + model_class = AsymmetricAutoencoderKL + main_input_name = "sample" + base_precision = 1e-2 + + def get_asym_autoencoder_kl_config(self, block_out_channels=None, norm_num_groups=None): + block_out_channels = block_out_channels or [2, 4] + norm_num_groups = norm_num_groups or 2 + init_dict = { + "in_channels": 3, + "out_channels": 3, + "down_block_types": ["DownEncoderBlock2D"] * len(block_out_channels), + "down_block_out_channels": block_out_channels, + "layers_per_down_block": 1, + "up_block_types": ["UpDecoderBlock2D"] * len(block_out_channels), + "up_block_out_channels": block_out_channels, + "layers_per_up_block": 1, + "act_fn": "silu", + "latent_channels": 4, + "norm_num_groups": norm_num_groups, + "sample_size": 32, + "scaling_factor": 0.18215, + } + return init_dict + + @property + def dummy_input(self): + batch_size = 4 + num_channels = 3 + sizes = (32, 32) + + image = floats_tensor((batch_size, num_channels) + sizes).to(torch_device) + mask = torch.ones((batch_size, 1) + sizes).to(torch_device) + + return {"sample": image, "mask": mask} + + @property + def input_shape(self): + return (3, 32, 32) + + @property + def output_shape(self): + return (3, 32, 32) + + def prepare_init_args_and_inputs_for_common(self): + init_dict = self.get_asym_autoencoder_kl_config() + inputs_dict = self.dummy_input + return init_dict, inputs_dict + + @unittest.skip("Unsupported test.") + def test_forward_with_norm_groups(self): + pass + + +@slow +class AsymmetricAutoencoderKLIntegrationTests(unittest.TestCase): + def get_file_format(self, seed, shape): + return f"gaussian_noise_s={seed}_shape={'_'.join([str(s) for s in shape])}.npy" + + def tearDown(self): + # clean up the VRAM after each test + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def get_sd_image(self, seed=0, shape=(4, 3, 512, 512), fp16=False): + dtype = torch.float16 if fp16 else torch.float32 + image = torch.from_numpy(load_hf_numpy(self.get_file_format(seed, shape))).to(torch_device).to(dtype) + return image + + def get_sd_vae_model(self, model_id="cross-attention/asymmetric-autoencoder-kl-x-1-5", fp16=False): + revision = "main" + torch_dtype = torch.float32 + + model = AsymmetricAutoencoderKL.from_pretrained( + model_id, + torch_dtype=torch_dtype, + revision=revision, + ) + model.to(torch_device).eval() + + return model + + def get_generator(self, seed=0): + generator_device = "cpu" if not torch_device.startswith(torch_device) else torch_device + if torch_device != "mps": + return torch.Generator(device=generator_device).manual_seed(seed) + return torch.manual_seed(seed) + + @parameterized.expand( + [ + # fmt: off + [ + 33, + Expectations( + { + ("xpu", 3): torch.tensor([-0.0343, 0.2873, 0.1680, -0.0140, -0.3459, 0.3522, -0.1336, 0.1075]), + ("cuda", 7): torch.tensor([-0.0336, 0.3011, 0.1764, 0.0087, -0.3401, 0.3645, -0.1247, 0.1205]), + ("mps", None): torch.tensor( + [-0.1603, 0.9878, -0.0495, -0.0790, -0.2709, 0.8375, -0.2060, -0.0824] + ), + } + ), + ], + [ + 47, + Expectations( + { + ("xpu", 3): torch.tensor([0.4400, 0.0543, 0.2873, 0.2946, 0.0553, 0.0839, -0.1585, 0.2529]), + ("cuda", 7): torch.tensor([0.4400, 0.0543, 0.2873, 0.2946, 0.0553, 0.0839, -0.1585, 0.2529]), + ("mps", None): torch.tensor( + [-0.2376, 0.1168, 0.1332, -0.4840, -0.2508, -0.0791, -0.0493, -0.4089] + ), + } + ), + ], + # fmt: on + ] + ) + def test_stable_diffusion(self, seed, expected_slices): + model = self.get_sd_vae_model() + image = self.get_sd_image(seed) + generator = self.get_generator(seed) + + with torch.no_grad(): + sample = model(image, generator=generator, sample_posterior=True).sample + + assert sample.shape == image.shape + + output_slice = sample[-1, -2:, -2:, :2].flatten().float().cpu() + + expected_slice = expected_slices.get_expectation() + assert torch_all_close(output_slice, expected_slice, atol=5e-3) + + @parameterized.expand( + [ + # fmt: off + [ + 33, + [-0.0340, 0.2870, 0.1698, -0.0105, -0.3448, 0.3529, -0.1321, 0.1097], + [-0.0344, 0.2912, 0.1687, -0.0137, -0.3462, 0.3552, -0.1337, 0.1078], + ], + [ + 47, + [0.4397, 0.0550, 0.2873, 0.2946, 0.0567, 0.0855, -0.1580, 0.2531], + [0.4397, 0.0550, 0.2873, 0.2946, 0.0567, 0.0855, -0.1580, 0.2531], + ], + # fmt: on + ] + ) + def test_stable_diffusion_mode(self, seed, expected_slice, expected_slice_mps): + model = self.get_sd_vae_model() + image = self.get_sd_image(seed) + + with torch.no_grad(): + sample = model(image).sample + + assert sample.shape == image.shape + + output_slice = sample[-1, -2:, -2:, :2].flatten().float().cpu() + expected_output_slice = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice) + + assert torch_all_close(output_slice, expected_output_slice, atol=3e-3) + + @parameterized.expand( + [ + # fmt: off + [13, [-0.0521, -0.2939, 0.1540, -0.1855, -0.5936, -0.3138, -0.4579, -0.2275]], + [37, [-0.1820, -0.4345, -0.0455, -0.2923, -0.8035, -0.5089, -0.4795, -0.3106]], + # fmt: on + ] + ) + @require_torch_accelerator + @skip_mps + def test_stable_diffusion_decode(self, seed, expected_slice): + model = self.get_sd_vae_model() + encoding = self.get_sd_image(seed, shape=(3, 4, 64, 64)) + + with torch.no_grad(): + sample = model.decode(encoding).sample + + assert list(sample.shape) == [3, 3, 512, 512] + + output_slice = sample[-1, -2:, :2, -2:].flatten().cpu() + expected_output_slice = torch.tensor(expected_slice) + + assert torch_all_close(output_slice, expected_output_slice, atol=2e-3) + + @parameterized.expand([(13,), (16,), (37,)]) + @require_torch_gpu + @unittest.skipIf( + not is_xformers_available(), + reason="xformers is not required when using PyTorch 2.0.", + ) + def test_stable_diffusion_decode_xformers_vs_2_0(self, seed): + model = self.get_sd_vae_model() + encoding = self.get_sd_image(seed, shape=(3, 4, 64, 64)) + + with torch.no_grad(): + sample = model.decode(encoding).sample + + model.enable_xformers_memory_efficient_attention() + with torch.no_grad(): + sample_2 = model.decode(encoding).sample + + assert list(sample.shape) == [3, 3, 512, 512] + + assert torch_all_close(sample, sample_2, atol=5e-2) + + @parameterized.expand( + [ + # fmt: off + [33, [-0.3001, 0.0918, -2.6984, -3.9720, -3.2099, -5.0353, 1.7338, -0.2065, 3.4267]], + [47, [-1.5030, -4.3871, -6.0355, -9.1157, -1.6661, -2.7853, 2.1607, -5.0823, 2.5633]], + # fmt: on + ] + ) + def test_stable_diffusion_encode_sample(self, seed, expected_slice): + model = self.get_sd_vae_model() + image = self.get_sd_image(seed) + generator = self.get_generator(seed) + + with torch.no_grad(): + dist = model.encode(image).latent_dist + sample = dist.sample(generator=generator) + + assert list(sample.shape) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]] + + output_slice = sample[0, -1, -3:, -3:].flatten().cpu() + expected_output_slice = torch.tensor(expected_slice) + + tolerance = 3e-3 if torch_device != "mps" else 1e-2 + assert torch_all_close(output_slice, expected_output_slice, atol=tolerance) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/models/autoencoders/test_models_autoencoder_cosmos.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/autoencoders/test_models_autoencoder_cosmos.py new file mode 100644 index 0000000000000000000000000000000000000000..ceccc2364e2632d6eb0b11646330d69a0726712d --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/autoencoders/test_models_autoencoder_cosmos.py @@ -0,0 +1,86 @@ +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +from diffusers import AutoencoderKLCosmos + +from ...testing_utils import enable_full_determinism, floats_tensor, torch_device +from ..test_modeling_common import ModelTesterMixin, UNetTesterMixin + + +enable_full_determinism() + + +class AutoencoderKLCosmosTests(ModelTesterMixin, UNetTesterMixin, unittest.TestCase): + model_class = AutoencoderKLCosmos + main_input_name = "sample" + base_precision = 1e-2 + + def get_autoencoder_kl_cosmos_config(self): + return { + "in_channels": 3, + "out_channels": 3, + "latent_channels": 4, + "encoder_block_out_channels": (8, 8, 8, 8), + "decode_block_out_channels": (8, 8, 8, 8), + "attention_resolutions": (8,), + "resolution": 64, + "num_layers": 2, + "patch_size": 4, + "patch_type": "haar", + "scaling_factor": 1.0, + "spatial_compression_ratio": 4, + "temporal_compression_ratio": 4, + } + + @property + def dummy_input(self): + batch_size = 2 + num_frames = 9 + num_channels = 3 + height = 32 + width = 32 + + image = floats_tensor((batch_size, num_channels, num_frames, height, width)).to(torch_device) + + return {"sample": image} + + @property + def input_shape(self): + return (3, 9, 32, 32) + + @property + def output_shape(self): + return (3, 9, 32, 32) + + def prepare_init_args_and_inputs_for_common(self): + init_dict = self.get_autoencoder_kl_cosmos_config() + inputs_dict = self.dummy_input + return init_dict, inputs_dict + + def test_gradient_checkpointing_is_applied(self): + expected_set = { + "CosmosEncoder3d", + "CosmosDecoder3d", + } + super().test_gradient_checkpointing_is_applied(expected_set=expected_set) + + @unittest.skip("Not sure why this test fails. Investigate later.") + def test_effective_gradient_checkpointing(self): + pass + + @unittest.skip("Unsupported test.") + def test_forward_with_norm_groups(self): + pass diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/models/autoencoders/test_models_autoencoder_dc.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/autoencoders/test_models_autoencoder_dc.py new file mode 100644 index 0000000000000000000000000000000000000000..56f172f1c869bd20aca92f4f7128596563b27c22 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/autoencoders/test_models_autoencoder_dc.py @@ -0,0 +1,87 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +from diffusers import AutoencoderDC + +from ...testing_utils import ( + enable_full_determinism, + floats_tensor, + torch_device, +) +from ..test_modeling_common import ModelTesterMixin, UNetTesterMixin + + +enable_full_determinism() + + +class AutoencoderDCTests(ModelTesterMixin, UNetTesterMixin, unittest.TestCase): + model_class = AutoencoderDC + main_input_name = "sample" + base_precision = 1e-2 + + def get_autoencoder_dc_config(self): + return { + "in_channels": 3, + "latent_channels": 4, + "attention_head_dim": 2, + "encoder_block_types": ( + "ResBlock", + "EfficientViTBlock", + ), + "decoder_block_types": ( + "ResBlock", + "EfficientViTBlock", + ), + "encoder_block_out_channels": (8, 8), + "decoder_block_out_channels": (8, 8), + "encoder_qkv_multiscales": ((), (5,)), + "decoder_qkv_multiscales": ((), (5,)), + "encoder_layers_per_block": (1, 1), + "decoder_layers_per_block": [1, 1], + "downsample_block_type": "conv", + "upsample_block_type": "interpolate", + "decoder_norm_types": "rms_norm", + "decoder_act_fns": "silu", + "scaling_factor": 0.41407, + } + + @property + def dummy_input(self): + batch_size = 4 + num_channels = 3 + sizes = (32, 32) + + image = floats_tensor((batch_size, num_channels) + sizes).to(torch_device) + + return {"sample": image} + + @property + def input_shape(self): + return (3, 32, 32) + + @property + def output_shape(self): + return (3, 32, 32) + + def prepare_init_args_and_inputs_for_common(self): + init_dict = self.get_autoencoder_dc_config() + inputs_dict = self.dummy_input + return init_dict, inputs_dict + + @unittest.skip("AutoencoderDC does not support `norm_num_groups` because it does not use GroupNorm.") + def test_forward_with_norm_groups(self): + pass diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/models/autoencoders/test_models_autoencoder_hunyuan_video.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/autoencoders/test_models_autoencoder_hunyuan_video.py new file mode 100644 index 0000000000000000000000000000000000000000..6f91f8bfa91b82d33071c959cdc24a8442f18372 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/autoencoders/test_models_autoencoder_hunyuan_video.py @@ -0,0 +1,210 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import torch + +from diffusers import AutoencoderKLHunyuanVideo +from diffusers.models.autoencoders.autoencoder_kl_hunyuan_video import prepare_causal_attention_mask + +from ...testing_utils import ( + enable_full_determinism, + floats_tensor, + torch_device, +) +from ..test_modeling_common import ModelTesterMixin, UNetTesterMixin + + +enable_full_determinism() + + +class AutoencoderKLHunyuanVideoTests(ModelTesterMixin, UNetTesterMixin, unittest.TestCase): + model_class = AutoencoderKLHunyuanVideo + main_input_name = "sample" + base_precision = 1e-2 + + def get_autoencoder_kl_hunyuan_video_config(self): + return { + "in_channels": 3, + "out_channels": 3, + "latent_channels": 4, + "down_block_types": ( + "HunyuanVideoDownBlock3D", + "HunyuanVideoDownBlock3D", + "HunyuanVideoDownBlock3D", + "HunyuanVideoDownBlock3D", + ), + "up_block_types": ( + "HunyuanVideoUpBlock3D", + "HunyuanVideoUpBlock3D", + "HunyuanVideoUpBlock3D", + "HunyuanVideoUpBlock3D", + ), + "block_out_channels": (8, 8, 8, 8), + "layers_per_block": 1, + "act_fn": "silu", + "norm_num_groups": 4, + "scaling_factor": 0.476986, + "spatial_compression_ratio": 8, + "temporal_compression_ratio": 4, + "mid_block_add_attention": True, + } + + @property + def dummy_input(self): + batch_size = 2 + num_frames = 9 + num_channels = 3 + sizes = (16, 16) + + image = floats_tensor((batch_size, num_channels, num_frames) + sizes).to(torch_device) + + return {"sample": image} + + @property + def input_shape(self): + return (3, 9, 16, 16) + + @property + def output_shape(self): + return (3, 9, 16, 16) + + def prepare_init_args_and_inputs_for_common(self): + init_dict = self.get_autoencoder_kl_hunyuan_video_config() + inputs_dict = self.dummy_input + return init_dict, inputs_dict + + def test_enable_disable_tiling(self): + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + + torch.manual_seed(0) + model = self.model_class(**init_dict).to(torch_device) + + inputs_dict.update({"return_dict": False}) + + torch.manual_seed(0) + output_without_tiling = model(**inputs_dict, generator=torch.manual_seed(0))[0] + + torch.manual_seed(0) + model.enable_tiling() + output_with_tiling = model(**inputs_dict, generator=torch.manual_seed(0))[0] + + self.assertLess( + (output_without_tiling.detach().cpu().numpy() - output_with_tiling.detach().cpu().numpy()).max(), + 0.5, + "VAE tiling should not affect the inference results", + ) + + torch.manual_seed(0) + model.disable_tiling() + output_without_tiling_2 = model(**inputs_dict, generator=torch.manual_seed(0))[0] + + self.assertEqual( + output_without_tiling.detach().cpu().numpy().all(), + output_without_tiling_2.detach().cpu().numpy().all(), + "Without tiling outputs should match with the outputs when tiling is manually disabled.", + ) + + def test_enable_disable_slicing(self): + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + + torch.manual_seed(0) + model = self.model_class(**init_dict).to(torch_device) + + inputs_dict.update({"return_dict": False}) + + torch.manual_seed(0) + output_without_slicing = model(**inputs_dict, generator=torch.manual_seed(0))[0] + + torch.manual_seed(0) + model.enable_slicing() + output_with_slicing = model(**inputs_dict, generator=torch.manual_seed(0))[0] + + self.assertLess( + (output_without_slicing.detach().cpu().numpy() - output_with_slicing.detach().cpu().numpy()).max(), + 0.5, + "VAE slicing should not affect the inference results", + ) + + torch.manual_seed(0) + model.disable_slicing() + output_without_slicing_2 = model(**inputs_dict, generator=torch.manual_seed(0))[0] + + self.assertEqual( + output_without_slicing.detach().cpu().numpy().all(), + output_without_slicing_2.detach().cpu().numpy().all(), + "Without slicing outputs should match with the outputs when slicing is manually disabled.", + ) + + def test_gradient_checkpointing_is_applied(self): + expected_set = { + "HunyuanVideoDecoder3D", + "HunyuanVideoDownBlock3D", + "HunyuanVideoEncoder3D", + "HunyuanVideoMidBlock3D", + "HunyuanVideoUpBlock3D", + } + super().test_gradient_checkpointing_is_applied(expected_set=expected_set) + + # We need to overwrite this test because the base test does not account length of down_block_types + def test_forward_with_norm_groups(self): + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + + init_dict["norm_num_groups"] = 16 + init_dict["block_out_channels"] = (16, 16, 16, 16) + + model = self.model_class(**init_dict) + model.to(torch_device) + model.eval() + + with torch.no_grad(): + output = model(**inputs_dict) + + if isinstance(output, dict): + output = output.to_tuple()[0] + + self.assertIsNotNone(output) + expected_shape = inputs_dict["sample"].shape + self.assertEqual(output.shape, expected_shape, "Input and output shapes do not match") + + @unittest.skip("Unsupported test.") + def test_outputs_equivalence(self): + pass + + def test_prepare_causal_attention_mask(self): + def prepare_causal_attention_mask_orig( + num_frames: int, height_width: int, dtype: torch.dtype, device: torch.device, batch_size: int = None + ) -> torch.Tensor: + seq_len = num_frames * height_width + mask = torch.full((seq_len, seq_len), float("-inf"), dtype=dtype, device=device) + for i in range(seq_len): + i_frame = i // height_width + mask[i, : (i_frame + 1) * height_width] = 0 + if batch_size is not None: + mask = mask.unsqueeze(0).expand(batch_size, -1, -1) + return mask + + # test with some odd shapes + original_mask = prepare_causal_attention_mask_orig( + num_frames=31, height_width=111, dtype=torch.float32, device=torch_device + ) + new_mask = prepare_causal_attention_mask( + num_frames=31, height_width=111, dtype=torch.float32, device=torch_device + ) + self.assertTrue( + torch.allclose(original_mask, new_mask), + "Causal attention mask should be the same", + ) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/models/autoencoders/test_models_autoencoder_kl.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/autoencoders/test_models_autoencoder_kl.py new file mode 100644 index 0000000000000000000000000000000000000000..662a3f1b80b7a32d07f0158dd71d464c1c6d9b4a --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/autoencoders/test_models_autoencoder_kl.py @@ -0,0 +1,468 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import gc +import unittest + +import torch +from parameterized import parameterized + +from diffusers import AutoencoderKL +from diffusers.utils.import_utils import is_xformers_available + +from ...testing_utils import ( + backend_empty_cache, + enable_full_determinism, + floats_tensor, + load_hf_numpy, + require_torch_accelerator, + require_torch_accelerator_with_fp16, + require_torch_gpu, + skip_mps, + slow, + torch_all_close, + torch_device, +) +from ..test_modeling_common import ModelTesterMixin, UNetTesterMixin + + +enable_full_determinism() + + +class AutoencoderKLTests(ModelTesterMixin, UNetTesterMixin, unittest.TestCase): + model_class = AutoencoderKL + main_input_name = "sample" + base_precision = 1e-2 + + def get_autoencoder_kl_config(self, block_out_channels=None, norm_num_groups=None): + block_out_channels = block_out_channels or [2, 4] + norm_num_groups = norm_num_groups or 2 + init_dict = { + "block_out_channels": block_out_channels, + "in_channels": 3, + "out_channels": 3, + "down_block_types": ["DownEncoderBlock2D"] * len(block_out_channels), + "up_block_types": ["UpDecoderBlock2D"] * len(block_out_channels), + "latent_channels": 4, + "norm_num_groups": norm_num_groups, + } + return init_dict + + @property + def dummy_input(self): + batch_size = 4 + num_channels = 3 + sizes = (32, 32) + + image = floats_tensor((batch_size, num_channels) + sizes).to(torch_device) + + return {"sample": image} + + @property + def input_shape(self): + return (3, 32, 32) + + @property + def output_shape(self): + return (3, 32, 32) + + def prepare_init_args_and_inputs_for_common(self): + init_dict = self.get_autoencoder_kl_config() + inputs_dict = self.dummy_input + return init_dict, inputs_dict + + def test_enable_disable_tiling(self): + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + + torch.manual_seed(0) + model = self.model_class(**init_dict).to(torch_device) + + inputs_dict.update({"return_dict": False}) + + torch.manual_seed(0) + output_without_tiling = model(**inputs_dict, generator=torch.manual_seed(0))[0] + + torch.manual_seed(0) + model.enable_tiling() + output_with_tiling = model(**inputs_dict, generator=torch.manual_seed(0))[0] + + self.assertLess( + (output_without_tiling.detach().cpu().numpy() - output_with_tiling.detach().cpu().numpy()).max(), + 0.5, + "VAE tiling should not affect the inference results", + ) + + torch.manual_seed(0) + model.disable_tiling() + output_without_tiling_2 = model(**inputs_dict, generator=torch.manual_seed(0))[0] + + self.assertEqual( + output_without_tiling.detach().cpu().numpy().all(), + output_without_tiling_2.detach().cpu().numpy().all(), + "Without tiling outputs should match with the outputs when tiling is manually disabled.", + ) + + def test_enable_disable_slicing(self): + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + + torch.manual_seed(0) + model = self.model_class(**init_dict).to(torch_device) + + inputs_dict.update({"return_dict": False}) + + torch.manual_seed(0) + output_without_slicing = model(**inputs_dict, generator=torch.manual_seed(0))[0] + + torch.manual_seed(0) + model.enable_slicing() + output_with_slicing = model(**inputs_dict, generator=torch.manual_seed(0))[0] + + self.assertLess( + (output_without_slicing.detach().cpu().numpy() - output_with_slicing.detach().cpu().numpy()).max(), + 0.5, + "VAE slicing should not affect the inference results", + ) + + torch.manual_seed(0) + model.disable_slicing() + output_without_slicing_2 = model(**inputs_dict, generator=torch.manual_seed(0))[0] + + self.assertEqual( + output_without_slicing.detach().cpu().numpy().all(), + output_without_slicing_2.detach().cpu().numpy().all(), + "Without slicing outputs should match with the outputs when slicing is manually disabled.", + ) + + def test_gradient_checkpointing_is_applied(self): + expected_set = {"Decoder", "Encoder", "UNetMidBlock2D"} + super().test_gradient_checkpointing_is_applied(expected_set=expected_set) + + def test_from_pretrained_hub(self): + model, loading_info = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy", output_loading_info=True) + self.assertIsNotNone(model) + self.assertEqual(len(loading_info["missing_keys"]), 0) + + model.to(torch_device) + image = model(**self.dummy_input) + + assert image is not None, "Make sure output is not None" + + def test_output_pretrained(self): + model = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy") + model = model.to(torch_device) + model.eval() + + # Keep generator on CPU for non-CUDA devices to compare outputs with CPU result tensors + generator_device = "cpu" if not torch_device.startswith(torch_device) else torch_device + if torch_device != "mps": + generator = torch.Generator(device=generator_device).manual_seed(0) + else: + generator = torch.manual_seed(0) + + image = torch.randn( + 1, + model.config.in_channels, + model.config.sample_size, + model.config.sample_size, + generator=torch.manual_seed(0), + ) + image = image.to(torch_device) + with torch.no_grad(): + output = model(image, sample_posterior=True, generator=generator).sample + + output_slice = output[0, -1, -3:, -3:].flatten().cpu() + + # Since the VAE Gaussian prior's generator is seeded on the appropriate device, + # the expected output slices are not the same for CPU and GPU. + if torch_device == "mps": + expected_output_slice = torch.tensor( + [ + -4.0078e-01, + -3.8323e-04, + -1.2681e-01, + -1.1462e-01, + 2.0095e-01, + 1.0893e-01, + -8.8247e-02, + -3.0361e-01, + -9.8644e-03, + ] + ) + elif generator_device == "cpu": + expected_output_slice = torch.tensor( + [ + -0.1352, + 0.0878, + 0.0419, + -0.0818, + -0.1069, + 0.0688, + -0.1458, + -0.4446, + -0.0026, + ] + ) + else: + expected_output_slice = torch.tensor( + [ + -0.2421, + 0.4642, + 0.2507, + -0.0438, + 0.0682, + 0.3160, + -0.2018, + -0.0727, + 0.2485, + ] + ) + + self.assertTrue(torch_all_close(output_slice, expected_output_slice, rtol=1e-2)) + + +@slow +class AutoencoderKLIntegrationTests(unittest.TestCase): + def get_file_format(self, seed, shape): + return f"gaussian_noise_s={seed}_shape={'_'.join([str(s) for s in shape])}.npy" + + def tearDown(self): + # clean up the VRAM after each test + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def get_sd_image(self, seed=0, shape=(4, 3, 512, 512), fp16=False): + dtype = torch.float16 if fp16 else torch.float32 + image = torch.from_numpy(load_hf_numpy(self.get_file_format(seed, shape))).to(torch_device).to(dtype) + return image + + def get_sd_vae_model(self, model_id="CompVis/stable-diffusion-v1-4", fp16=False): + revision = "fp16" if fp16 else None + torch_dtype = torch.float16 if fp16 else torch.float32 + + model = AutoencoderKL.from_pretrained( + model_id, + subfolder="vae", + torch_dtype=torch_dtype, + revision=revision, + ) + model.to(torch_device) + + return model + + def get_generator(self, seed=0): + generator_device = "cpu" if not torch_device.startswith(torch_device) else torch_device + if torch_device != "mps": + return torch.Generator(device=generator_device).manual_seed(seed) + return torch.manual_seed(seed) + + @parameterized.expand( + [ + # fmt: off + [ + 33, + [-0.1556, 0.9848, -0.0410, -0.0642, -0.2685, 0.8381, -0.2004, -0.0700], + [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824], + ], + [ + 47, + [-0.2376, 0.1200, 0.1337, -0.4830, -0.2504, -0.0759, -0.0486, -0.4077], + [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131], + ], + # fmt: on + ] + ) + def test_stable_diffusion(self, seed, expected_slice, expected_slice_mps): + model = self.get_sd_vae_model() + image = self.get_sd_image(seed) + generator = self.get_generator(seed) + + with torch.no_grad(): + sample = model(image, generator=generator, sample_posterior=True).sample + + assert sample.shape == image.shape + + output_slice = sample[-1, -2:, -2:, :2].flatten().float().cpu() + expected_output_slice = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice) + + assert torch_all_close(output_slice, expected_output_slice, atol=3e-3) + + @parameterized.expand( + [ + # fmt: off + [33, [-0.0513, 0.0289, 1.3799, 0.2166, -0.2573, -0.0871, 0.5103, -0.0999]], + [47, [-0.4128, -0.1320, -0.3704, 0.1965, -0.4116, -0.2332, -0.3340, 0.2247]], + # fmt: on + ] + ) + @require_torch_accelerator_with_fp16 + def test_stable_diffusion_fp16(self, seed, expected_slice): + model = self.get_sd_vae_model(fp16=True) + image = self.get_sd_image(seed, fp16=True) + generator = self.get_generator(seed) + + with torch.no_grad(): + sample = model(image, generator=generator, sample_posterior=True).sample + + assert sample.shape == image.shape + + output_slice = sample[-1, -2:, :2, -2:].flatten().float().cpu() + expected_output_slice = torch.tensor(expected_slice) + + assert torch_all_close(output_slice, expected_output_slice, atol=1e-2) + + @parameterized.expand( + [ + # fmt: off + [ + 33, + [-0.1609, 0.9866, -0.0487, -0.0777, -0.2716, 0.8368, -0.2055, -0.0814], + [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824], + ], + [ + 47, + [-0.2377, 0.1147, 0.1333, -0.4841, -0.2506, -0.0805, -0.0491, -0.4085], + [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131], + ], + # fmt: on + ] + ) + def test_stable_diffusion_mode(self, seed, expected_slice, expected_slice_mps): + model = self.get_sd_vae_model() + image = self.get_sd_image(seed) + + with torch.no_grad(): + sample = model(image).sample + + assert sample.shape == image.shape + + output_slice = sample[-1, -2:, -2:, :2].flatten().float().cpu() + expected_output_slice = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice) + + assert torch_all_close(output_slice, expected_output_slice, atol=3e-3) + + @parameterized.expand( + [ + # fmt: off + [13, [-0.2051, -0.1803, -0.2311, -0.2114, -0.3292, -0.3574, -0.2953, -0.3323]], + [37, [-0.2632, -0.2625, -0.2199, -0.2741, -0.4539, -0.4990, -0.3720, -0.4925]], + # fmt: on + ] + ) + @require_torch_accelerator + @skip_mps + def test_stable_diffusion_decode(self, seed, expected_slice): + model = self.get_sd_vae_model() + encoding = self.get_sd_image(seed, shape=(3, 4, 64, 64)) + + with torch.no_grad(): + sample = model.decode(encoding).sample + + assert list(sample.shape) == [3, 3, 512, 512] + + output_slice = sample[-1, -2:, :2, -2:].flatten().cpu() + expected_output_slice = torch.tensor(expected_slice) + + assert torch_all_close(output_slice, expected_output_slice, atol=1e-3) + + @parameterized.expand( + [ + # fmt: off + [27, [-0.0369, 0.0207, -0.0776, -0.0682, -0.1747, -0.1930, -0.1465, -0.2039]], + [16, [-0.1628, -0.2134, -0.2747, -0.2642, -0.3774, -0.4404, -0.3687, -0.4277]], + # fmt: on + ] + ) + @require_torch_accelerator_with_fp16 + def test_stable_diffusion_decode_fp16(self, seed, expected_slice): + model = self.get_sd_vae_model(fp16=True) + encoding = self.get_sd_image(seed, shape=(3, 4, 64, 64), fp16=True) + + with torch.no_grad(): + sample = model.decode(encoding).sample + + assert list(sample.shape) == [3, 3, 512, 512] + + output_slice = sample[-1, -2:, :2, -2:].flatten().float().cpu() + expected_output_slice = torch.tensor(expected_slice) + + assert torch_all_close(output_slice, expected_output_slice, atol=5e-3) + + @parameterized.expand([(13,), (16,), (27,)]) + @require_torch_gpu + @unittest.skipIf( + not is_xformers_available(), + reason="xformers is not required when using PyTorch 2.0.", + ) + def test_stable_diffusion_decode_xformers_vs_2_0_fp16(self, seed): + model = self.get_sd_vae_model(fp16=True) + encoding = self.get_sd_image(seed, shape=(3, 4, 64, 64), fp16=True) + + with torch.no_grad(): + sample = model.decode(encoding).sample + + model.enable_xformers_memory_efficient_attention() + with torch.no_grad(): + sample_2 = model.decode(encoding).sample + + assert list(sample.shape) == [3, 3, 512, 512] + + assert torch_all_close(sample, sample_2, atol=1e-1) + + @parameterized.expand([(13,), (16,), (37,)]) + @require_torch_gpu + @unittest.skipIf( + not is_xformers_available(), + reason="xformers is not required when using PyTorch 2.0.", + ) + def test_stable_diffusion_decode_xformers_vs_2_0(self, seed): + model = self.get_sd_vae_model() + encoding = self.get_sd_image(seed, shape=(3, 4, 64, 64)) + + with torch.no_grad(): + sample = model.decode(encoding).sample + + model.enable_xformers_memory_efficient_attention() + with torch.no_grad(): + sample_2 = model.decode(encoding).sample + + assert list(sample.shape) == [3, 3, 512, 512] + + assert torch_all_close(sample, sample_2, atol=1e-2) + + @parameterized.expand( + [ + # fmt: off + [33, [-0.3001, 0.0918, -2.6984, -3.9720, -3.2099, -5.0353, 1.7338, -0.2065, 3.4267]], + [47, [-1.5030, -4.3871, -6.0355, -9.1157, -1.6661, -2.7853, 2.1607, -5.0823, 2.5633]], + # fmt: on + ] + ) + def test_stable_diffusion_encode_sample(self, seed, expected_slice): + model = self.get_sd_vae_model() + image = self.get_sd_image(seed) + generator = self.get_generator(seed) + + with torch.no_grad(): + dist = model.encode(image).latent_dist + sample = dist.sample(generator=generator) + + assert list(sample.shape) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]] + + output_slice = sample[0, -1, -3:, -3:].flatten().cpu() + expected_output_slice = torch.tensor(expected_slice) + + tolerance = 3e-3 if torch_device != "mps" else 1e-2 + assert torch_all_close(output_slice, expected_output_slice, atol=tolerance) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/models/autoencoders/test_models_autoencoder_kl_cogvideox.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/autoencoders/test_models_autoencoder_kl_cogvideox.py new file mode 100644 index 0000000000000000000000000000000000000000..739daf2a492da9bf2e57b460574f84ffbed691cb --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/autoencoders/test_models_autoencoder_kl_cogvideox.py @@ -0,0 +1,179 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import torch + +from diffusers import AutoencoderKLCogVideoX + +from ...testing_utils import ( + enable_full_determinism, + floats_tensor, + torch_device, +) +from ..test_modeling_common import ModelTesterMixin, UNetTesterMixin + + +enable_full_determinism() + + +class AutoencoderKLCogVideoXTests(ModelTesterMixin, UNetTesterMixin, unittest.TestCase): + model_class = AutoencoderKLCogVideoX + main_input_name = "sample" + base_precision = 1e-2 + + def get_autoencoder_kl_cogvideox_config(self): + return { + "in_channels": 3, + "out_channels": 3, + "down_block_types": ( + "CogVideoXDownBlock3D", + "CogVideoXDownBlock3D", + "CogVideoXDownBlock3D", + "CogVideoXDownBlock3D", + ), + "up_block_types": ( + "CogVideoXUpBlock3D", + "CogVideoXUpBlock3D", + "CogVideoXUpBlock3D", + "CogVideoXUpBlock3D", + ), + "block_out_channels": (8, 8, 8, 8), + "latent_channels": 4, + "layers_per_block": 1, + "norm_num_groups": 2, + "temporal_compression_ratio": 4, + } + + @property + def dummy_input(self): + batch_size = 4 + num_frames = 8 + num_channels = 3 + sizes = (16, 16) + + image = floats_tensor((batch_size, num_channels, num_frames) + sizes).to(torch_device) + + return {"sample": image} + + @property + def input_shape(self): + return (3, 8, 16, 16) + + @property + def output_shape(self): + return (3, 8, 16, 16) + + def prepare_init_args_and_inputs_for_common(self): + init_dict = self.get_autoencoder_kl_cogvideox_config() + inputs_dict = self.dummy_input + return init_dict, inputs_dict + + def test_enable_disable_tiling(self): + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + + torch.manual_seed(0) + model = self.model_class(**init_dict).to(torch_device) + + inputs_dict.update({"return_dict": False}) + + torch.manual_seed(0) + output_without_tiling = model(**inputs_dict, generator=torch.manual_seed(0))[0] + + torch.manual_seed(0) + model.enable_tiling() + output_with_tiling = model(**inputs_dict, generator=torch.manual_seed(0))[0] + + self.assertLess( + (output_without_tiling.detach().cpu().numpy() - output_with_tiling.detach().cpu().numpy()).max(), + 0.5, + "VAE tiling should not affect the inference results", + ) + + torch.manual_seed(0) + model.disable_tiling() + output_without_tiling_2 = model(**inputs_dict, generator=torch.manual_seed(0))[0] + + self.assertEqual( + output_without_tiling.detach().cpu().numpy().all(), + output_without_tiling_2.detach().cpu().numpy().all(), + "Without tiling outputs should match with the outputs when tiling is manually disabled.", + ) + + def test_enable_disable_slicing(self): + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + + torch.manual_seed(0) + model = self.model_class(**init_dict).to(torch_device) + + inputs_dict.update({"return_dict": False}) + + torch.manual_seed(0) + output_without_slicing = model(**inputs_dict, generator=torch.manual_seed(0))[0] + + torch.manual_seed(0) + model.enable_slicing() + output_with_slicing = model(**inputs_dict, generator=torch.manual_seed(0))[0] + + self.assertLess( + (output_without_slicing.detach().cpu().numpy() - output_with_slicing.detach().cpu().numpy()).max(), + 0.5, + "VAE slicing should not affect the inference results", + ) + + torch.manual_seed(0) + model.disable_slicing() + output_without_slicing_2 = model(**inputs_dict, generator=torch.manual_seed(0))[0] + + self.assertEqual( + output_without_slicing.detach().cpu().numpy().all(), + output_without_slicing_2.detach().cpu().numpy().all(), + "Without slicing outputs should match with the outputs when slicing is manually disabled.", + ) + + def test_gradient_checkpointing_is_applied(self): + expected_set = { + "CogVideoXDownBlock3D", + "CogVideoXDecoder3D", + "CogVideoXEncoder3D", + "CogVideoXUpBlock3D", + "CogVideoXMidBlock3D", + } + super().test_gradient_checkpointing_is_applied(expected_set=expected_set) + + def test_forward_with_norm_groups(self): + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + + init_dict["norm_num_groups"] = 16 + init_dict["block_out_channels"] = (16, 32, 32, 32) + + model = self.model_class(**init_dict) + model.to(torch_device) + model.eval() + + with torch.no_grad(): + output = model(**inputs_dict) + + if isinstance(output, dict): + output = output.to_tuple()[0] + + self.assertIsNotNone(output) + expected_shape = inputs_dict["sample"].shape + self.assertEqual(output.shape, expected_shape, "Input and output shapes do not match") + + @unittest.skip("Unsupported test.") + def test_outputs_equivalence(self): + pass diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/models/autoencoders/test_models_autoencoder_kl_temporal_decoder.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/autoencoders/test_models_autoencoder_kl_temporal_decoder.py new file mode 100644 index 0000000000000000000000000000000000000000..6cb427bff8e138177a306a0445b2b90799af0dd2 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/autoencoders/test_models_autoencoder_kl_temporal_decoder.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +from diffusers import AutoencoderKLTemporalDecoder + +from ...testing_utils import ( + enable_full_determinism, + floats_tensor, + torch_device, +) +from ..test_modeling_common import ModelTesterMixin, UNetTesterMixin + + +enable_full_determinism() + + +class AutoencoderKLTemporalDecoderTests(ModelTesterMixin, UNetTesterMixin, unittest.TestCase): + model_class = AutoencoderKLTemporalDecoder + main_input_name = "sample" + base_precision = 1e-2 + + @property + def dummy_input(self): + batch_size = 3 + num_channels = 3 + sizes = (32, 32) + + image = floats_tensor((batch_size, num_channels) + sizes).to(torch_device) + num_frames = 3 + + return {"sample": image, "num_frames": num_frames} + + @property + def input_shape(self): + return (3, 32, 32) + + @property + def output_shape(self): + return (3, 32, 32) + + def prepare_init_args_and_inputs_for_common(self): + init_dict = { + "block_out_channels": [32, 64], + "in_channels": 3, + "out_channels": 3, + "down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"], + "latent_channels": 4, + "layers_per_block": 2, + } + inputs_dict = self.dummy_input + return init_dict, inputs_dict + + def test_gradient_checkpointing_is_applied(self): + expected_set = {"Encoder", "TemporalDecoder", "UNetMidBlock2D"} + super().test_gradient_checkpointing_is_applied(expected_set=expected_set) + + @unittest.skip("Test unsupported.") + def test_forward_with_norm_groups(self): + pass diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/models/autoencoders/test_models_autoencoder_ltx_video.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/autoencoders/test_models_autoencoder_ltx_video.py new file mode 100644 index 0000000000000000000000000000000000000000..21ab3896c890a67d0c35634f9bcb2f127ed3ef70 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/autoencoders/test_models_autoencoder_ltx_video.py @@ -0,0 +1,200 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import torch + +from diffusers import AutoencoderKLLTXVideo + +from ...testing_utils import ( + enable_full_determinism, + floats_tensor, + torch_device, +) +from ..test_modeling_common import ModelTesterMixin, UNetTesterMixin + + +enable_full_determinism() + + +class AutoencoderKLLTXVideo090Tests(ModelTesterMixin, UNetTesterMixin, unittest.TestCase): + model_class = AutoencoderKLLTXVideo + main_input_name = "sample" + base_precision = 1e-2 + + def get_autoencoder_kl_ltx_video_config(self): + return { + "in_channels": 3, + "out_channels": 3, + "latent_channels": 8, + "block_out_channels": (8, 8, 8, 8), + "decoder_block_out_channels": (8, 8, 8, 8), + "layers_per_block": (1, 1, 1, 1, 1), + "decoder_layers_per_block": (1, 1, 1, 1, 1), + "spatio_temporal_scaling": (True, True, False, False), + "decoder_spatio_temporal_scaling": (True, True, False, False), + "decoder_inject_noise": (False, False, False, False, False), + "upsample_residual": (False, False, False, False), + "upsample_factor": (1, 1, 1, 1), + "timestep_conditioning": False, + "patch_size": 1, + "patch_size_t": 1, + "encoder_causal": True, + "decoder_causal": False, + } + + @property + def dummy_input(self): + batch_size = 2 + num_frames = 9 + num_channels = 3 + sizes = (16, 16) + + image = floats_tensor((batch_size, num_channels, num_frames) + sizes).to(torch_device) + + return {"sample": image} + + @property + def input_shape(self): + return (3, 9, 16, 16) + + @property + def output_shape(self): + return (3, 9, 16, 16) + + def prepare_init_args_and_inputs_for_common(self): + init_dict = self.get_autoencoder_kl_ltx_video_config() + inputs_dict = self.dummy_input + return init_dict, inputs_dict + + def test_gradient_checkpointing_is_applied(self): + expected_set = { + "LTXVideoEncoder3d", + "LTXVideoDecoder3d", + "LTXVideoDownBlock3D", + "LTXVideoMidBlock3d", + "LTXVideoUpBlock3d", + } + super().test_gradient_checkpointing_is_applied(expected_set=expected_set) + + @unittest.skip("Unsupported test.") + def test_outputs_equivalence(self): + pass + + @unittest.skip("AutoencoderKLLTXVideo does not support `norm_num_groups` because it does not use GroupNorm.") + def test_forward_with_norm_groups(self): + pass + + +class AutoencoderKLLTXVideo091Tests(ModelTesterMixin, UNetTesterMixin, unittest.TestCase): + model_class = AutoencoderKLLTXVideo + main_input_name = "sample" + base_precision = 1e-2 + + def get_autoencoder_kl_ltx_video_config(self): + return { + "in_channels": 3, + "out_channels": 3, + "latent_channels": 8, + "block_out_channels": (8, 8, 8, 8), + "decoder_block_out_channels": (16, 32, 64), + "layers_per_block": (1, 1, 1, 1), + "decoder_layers_per_block": (1, 1, 1, 1), + "spatio_temporal_scaling": (True, True, True, False), + "decoder_spatio_temporal_scaling": (True, True, True), + "decoder_inject_noise": (True, True, True, False), + "upsample_residual": (True, True, True), + "upsample_factor": (2, 2, 2), + "timestep_conditioning": True, + "patch_size": 1, + "patch_size_t": 1, + "encoder_causal": True, + "decoder_causal": False, + } + + @property + def dummy_input(self): + batch_size = 2 + num_frames = 9 + num_channels = 3 + sizes = (16, 16) + + image = floats_tensor((batch_size, num_channels, num_frames) + sizes).to(torch_device) + timestep = torch.tensor([0.05] * batch_size, device=torch_device) + + return {"sample": image, "temb": timestep} + + @property + def input_shape(self): + return (3, 9, 16, 16) + + @property + def output_shape(self): + return (3, 9, 16, 16) + + def prepare_init_args_and_inputs_for_common(self): + init_dict = self.get_autoencoder_kl_ltx_video_config() + inputs_dict = self.dummy_input + return init_dict, inputs_dict + + def test_gradient_checkpointing_is_applied(self): + expected_set = { + "LTXVideoEncoder3d", + "LTXVideoDecoder3d", + "LTXVideoDownBlock3D", + "LTXVideoMidBlock3d", + "LTXVideoUpBlock3d", + } + super().test_gradient_checkpointing_is_applied(expected_set=expected_set) + + @unittest.skip("Unsupported test.") + def test_outputs_equivalence(self): + pass + + @unittest.skip("AutoencoderKLLTXVideo does not support `norm_num_groups` because it does not use GroupNorm.") + def test_forward_with_norm_groups(self): + pass + + def test_enable_disable_tiling(self): + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + + torch.manual_seed(0) + model = self.model_class(**init_dict).to(torch_device) + + inputs_dict.update({"return_dict": False}) + + torch.manual_seed(0) + output_without_tiling = model(**inputs_dict, generator=torch.manual_seed(0))[0] + + torch.manual_seed(0) + model.enable_tiling() + output_with_tiling = model(**inputs_dict, generator=torch.manual_seed(0))[0] + + self.assertLess( + (output_without_tiling.detach().cpu().numpy() - output_with_tiling.detach().cpu().numpy()).max(), + 0.5, + "VAE tiling should not affect the inference results", + ) + + torch.manual_seed(0) + model.disable_tiling() + output_without_tiling_2 = model(**inputs_dict, generator=torch.manual_seed(0))[0] + + self.assertEqual( + output_without_tiling.detach().cpu().numpy().all(), + output_without_tiling_2.detach().cpu().numpy().all(), + "Without tiling outputs should match with the outputs when tiling is manually disabled.", + ) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/models/autoencoders/test_models_autoencoder_magvit.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/autoencoders/test_models_autoencoder_magvit.py new file mode 100644 index 0000000000000000000000000000000000000000..58cbfc05bd03faec8362547627a6ed4c8807b6a1 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/autoencoders/test_models_autoencoder_magvit.py @@ -0,0 +1,90 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +from diffusers import AutoencoderKLMagvit + +from ...testing_utils import enable_full_determinism, floats_tensor, torch_device +from ..test_modeling_common import ModelTesterMixin, UNetTesterMixin + + +enable_full_determinism() + + +class AutoencoderKLMagvitTests(ModelTesterMixin, UNetTesterMixin, unittest.TestCase): + model_class = AutoencoderKLMagvit + main_input_name = "sample" + base_precision = 1e-2 + + def get_autoencoder_kl_magvit_config(self): + return { + "in_channels": 3, + "latent_channels": 4, + "out_channels": 3, + "block_out_channels": [8, 8, 8, 8], + "down_block_types": [ + "SpatialDownBlock3D", + "SpatialTemporalDownBlock3D", + "SpatialTemporalDownBlock3D", + "SpatialTemporalDownBlock3D", + ], + "up_block_types": [ + "SpatialUpBlock3D", + "SpatialTemporalUpBlock3D", + "SpatialTemporalUpBlock3D", + "SpatialTemporalUpBlock3D", + ], + "layers_per_block": 1, + "norm_num_groups": 8, + "spatial_group_norm": True, + } + + @property + def dummy_input(self): + batch_size = 2 + num_frames = 9 + num_channels = 3 + height = 16 + width = 16 + + image = floats_tensor((batch_size, num_channels, num_frames, height, width)).to(torch_device) + + return {"sample": image} + + @property + def input_shape(self): + return (3, 9, 16, 16) + + @property + def output_shape(self): + return (3, 9, 16, 16) + + def prepare_init_args_and_inputs_for_common(self): + init_dict = self.get_autoencoder_kl_magvit_config() + inputs_dict = self.dummy_input + return init_dict, inputs_dict + + def test_gradient_checkpointing_is_applied(self): + expected_set = {"EasyAnimateEncoder", "EasyAnimateDecoder"} + super().test_gradient_checkpointing_is_applied(expected_set=expected_set) + + @unittest.skip("Not quite sure why this test fails. Revisit later.") + def test_effective_gradient_checkpointing(self): + pass + + @unittest.skip("Unsupported test.") + def test_forward_with_norm_groups(self): + pass diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/models/autoencoders/test_models_autoencoder_mochi.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/autoencoders/test_models_autoencoder_mochi.py new file mode 100644 index 0000000000000000000000000000000000000000..b8c5aaaa1eb6fa9c388a418aa0f2cce5e0dadf5a --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/autoencoders/test_models_autoencoder_mochi.py @@ -0,0 +1,111 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +from diffusers import AutoencoderKLMochi + +from ...testing_utils import ( + enable_full_determinism, + floats_tensor, + torch_device, +) +from ..test_modeling_common import ModelTesterMixin, UNetTesterMixin + + +enable_full_determinism() + + +class AutoencoderKLMochiTests(ModelTesterMixin, UNetTesterMixin, unittest.TestCase): + model_class = AutoencoderKLMochi + main_input_name = "sample" + base_precision = 1e-2 + + def get_autoencoder_kl_mochi_config(self): + return { + "in_channels": 15, + "out_channels": 3, + "latent_channels": 4, + "encoder_block_out_channels": (32, 32, 32, 32), + "decoder_block_out_channels": (32, 32, 32, 32), + "layers_per_block": (1, 1, 1, 1, 1), + "act_fn": "silu", + "scaling_factor": 1, + } + + @property + def dummy_input(self): + batch_size = 2 + num_frames = 7 + num_channels = 3 + sizes = (16, 16) + + image = floats_tensor((batch_size, num_channels, num_frames) + sizes).to(torch_device) + + return {"sample": image} + + @property + def input_shape(self): + return (3, 7, 16, 16) + + @property + def output_shape(self): + return (3, 7, 16, 16) + + def prepare_init_args_and_inputs_for_common(self): + init_dict = self.get_autoencoder_kl_mochi_config() + inputs_dict = self.dummy_input + return init_dict, inputs_dict + + def test_gradient_checkpointing_is_applied(self): + expected_set = { + "MochiDecoder3D", + "MochiDownBlock3D", + "MochiEncoder3D", + "MochiMidBlock3D", + "MochiUpBlock3D", + } + super().test_gradient_checkpointing_is_applied(expected_set=expected_set) + + @unittest.skip("Unsupported test.") + def test_forward_with_norm_groups(self): + """ + tests/models/autoencoders/test_models_autoencoder_mochi.py::AutoencoderKLMochiTests::test_forward_with_norm_groups - + TypeError: AutoencoderKLMochi.__init__() got an unexpected keyword argument 'norm_num_groups' + """ + pass + + @unittest.skip("Unsupported test.") + def test_model_parallelism(self): + """ + tests/models/autoencoders/test_models_autoencoder_mochi.py::AutoencoderKLMochiTests::test_outputs_equivalence - + RuntimeError: values expected sparse tensor layout but got Strided + """ + pass + + @unittest.skip("Unsupported test.") + def test_outputs_equivalence(self): + """ + tests/models/autoencoders/test_models_autoencoder_mochi.py::AutoencoderKLMochiTests::test_outputs_equivalence - + RuntimeError: values expected sparse tensor layout but got Strided + """ + pass + + @unittest.skip("Unsupported test.") + def test_sharded_checkpoints_device_map(self): + """ + tests/models/autoencoders/test_models_autoencoder_mochi.py::AutoencoderKLMochiTests::test_sharded_checkpoints_device_map - + RuntimeError: Expected all tensors to be on the same device, but found at least two devices, cuda:0 and cuda:5! + """ diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/models/autoencoders/test_models_autoencoder_oobleck.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/autoencoders/test_models_autoencoder_oobleck.py new file mode 100644 index 0000000000000000000000000000000000000000..eb7bd50f4a54702ee5796752e67883d78a4ddd08 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/autoencoders/test_models_autoencoder_oobleck.py @@ -0,0 +1,252 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import gc +import unittest + +import torch +from datasets import load_dataset +from parameterized import parameterized + +from diffusers import AutoencoderOobleck + +from ...testing_utils import ( + backend_empty_cache, + enable_full_determinism, + floats_tensor, + slow, + torch_all_close, + torch_device, +) +from ..test_modeling_common import ModelTesterMixin, UNetTesterMixin + + +enable_full_determinism() + + +class AutoencoderOobleckTests(ModelTesterMixin, UNetTesterMixin, unittest.TestCase): + model_class = AutoencoderOobleck + main_input_name = "sample" + base_precision = 1e-2 + + def get_autoencoder_oobleck_config(self, block_out_channels=None): + init_dict = { + "encoder_hidden_size": 12, + "decoder_channels": 12, + "decoder_input_channels": 6, + "audio_channels": 2, + "downsampling_ratios": [2, 4], + "channel_multiples": [1, 2], + } + return init_dict + + @property + def dummy_input(self): + batch_size = 4 + num_channels = 2 + seq_len = 24 + + waveform = floats_tensor((batch_size, num_channels, seq_len)).to(torch_device) + + return {"sample": waveform, "sample_posterior": False} + + @property + def input_shape(self): + return (2, 24) + + @property + def output_shape(self): + return (2, 24) + + def prepare_init_args_and_inputs_for_common(self): + init_dict = self.get_autoencoder_oobleck_config() + inputs_dict = self.dummy_input + return init_dict, inputs_dict + + def test_enable_disable_slicing(self): + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + + torch.manual_seed(0) + model = self.model_class(**init_dict).to(torch_device) + + inputs_dict.update({"return_dict": False}) + + torch.manual_seed(0) + output_without_slicing = model(**inputs_dict, generator=torch.manual_seed(0))[0] + + torch.manual_seed(0) + model.enable_slicing() + output_with_slicing = model(**inputs_dict, generator=torch.manual_seed(0))[0] + + self.assertLess( + (output_without_slicing.detach().cpu().numpy() - output_with_slicing.detach().cpu().numpy()).max(), + 0.5, + "VAE slicing should not affect the inference results", + ) + + torch.manual_seed(0) + model.disable_slicing() + output_without_slicing_2 = model(**inputs_dict, generator=torch.manual_seed(0))[0] + + self.assertEqual( + output_without_slicing.detach().cpu().numpy().all(), + output_without_slicing_2.detach().cpu().numpy().all(), + "Without slicing outputs should match with the outputs when slicing is manually disabled.", + ) + + @unittest.skip("Test unsupported.") + def test_forward_with_norm_groups(self): + pass + + @unittest.skip("No attention module used in this model") + def test_set_attn_processor_for_determinism(self): + return + + @unittest.skip( + "Test not supported because of 'weight_norm_fwd_first_dim_kernel' not implemented for 'Float8_e4m3fn'" + ) + def test_layerwise_casting_training(self): + return super().test_layerwise_casting_training() + + @unittest.skip( + "The convolution layers of AutoencoderOobleck are wrapped with torch.nn.utils.weight_norm. This causes the hook's pre_forward to not " + "cast the module weights to compute_dtype (as required by forward pass). As a result, forward pass errors out. To fix:\n" + "1. Make sure `nn::Module::to` works with `torch.nn.utils.weight_norm` wrapped convolution layer.\n" + "2. Unskip this test." + ) + def test_layerwise_casting_inference(self): + pass + + @unittest.skip( + "The convolution layers of AutoencoderOobleck are wrapped with torch.nn.utils.weight_norm. This causes the hook's pre_forward to not " + "cast the module weights to compute_dtype (as required by forward pass). As a result, forward pass errors out. To fix:\n" + "1. Make sure `nn::Module::to` works with `torch.nn.utils.weight_norm` wrapped convolution layer.\n" + "2. Unskip this test." + ) + def test_layerwise_casting_memory(self): + pass + + +@slow +class AutoencoderOobleckIntegrationTests(unittest.TestCase): + def tearDown(self): + # clean up the VRAM after each test + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def _load_datasamples(self, num_samples): + ds = load_dataset( + "hf-internal-testing/librispeech_asr_dummy", "clean", split="validation", trust_remote_code=True + ) + # automatic decoding with librispeech + speech_samples = ds.sort("id").select(range(num_samples))[:num_samples]["audio"] + + return torch.nn.utils.rnn.pad_sequence( + [torch.from_numpy(x["array"]) for x in speech_samples], batch_first=True + ) + + def get_audio(self, audio_sample_size=2097152, fp16=False): + dtype = torch.float16 if fp16 else torch.float32 + audio = self._load_datasamples(2).to(torch_device).to(dtype) + + # pad / crop to audio_sample_size + audio = torch.nn.functional.pad(audio[:, :audio_sample_size], pad=(0, audio_sample_size - audio.shape[-1])) + + # todo channel + audio = audio.unsqueeze(1).repeat(1, 2, 1).to(torch_device) + + return audio + + def get_oobleck_vae_model(self, model_id="stabilityai/stable-audio-open-1.0", fp16=False): + torch_dtype = torch.float16 if fp16 else torch.float32 + + model = AutoencoderOobleck.from_pretrained( + model_id, + subfolder="vae", + torch_dtype=torch_dtype, + ) + model.to(torch_device) + + return model + + def get_generator(self, seed=0): + generator_device = "cpu" if not torch_device.startswith(torch_device) else torch_device + if torch_device != "mps": + return torch.Generator(device=generator_device).manual_seed(seed) + return torch.manual_seed(seed) + + @parameterized.expand( + [ + # fmt: off + [33, [1.193e-4, 6.56e-05, 1.314e-4, 3.80e-05, -4.01e-06], 0.001192], + [44, [2.77e-05, -2.65e-05, 1.18e-05, -6.94e-05, -9.57e-05], 0.001196], + # fmt: on + ] + ) + def test_stable_diffusion(self, seed, expected_slice, expected_mean_absolute_diff): + model = self.get_oobleck_vae_model() + audio = self.get_audio() + generator = self.get_generator(seed) + + with torch.no_grad(): + sample = model(audio, generator=generator, sample_posterior=True).sample + + assert sample.shape == audio.shape + assert ((sample - audio).abs().mean() - expected_mean_absolute_diff).abs() <= 1e-6 + + output_slice = sample[-1, 1, 5:10].cpu() + expected_output_slice = torch.tensor(expected_slice) + + assert torch_all_close(output_slice, expected_output_slice, atol=1e-5) + + def test_stable_diffusion_mode(self): + model = self.get_oobleck_vae_model() + audio = self.get_audio() + + with torch.no_grad(): + sample = model(audio, sample_posterior=False).sample + + assert sample.shape == audio.shape + + @parameterized.expand( + [ + # fmt: off + [33, [1.193e-4, 6.56e-05, 1.314e-4, 3.80e-05, -4.01e-06], 0.001192], + [44, [2.77e-05, -2.65e-05, 1.18e-05, -6.94e-05, -9.57e-05], 0.001196], + # fmt: on + ] + ) + def test_stable_diffusion_encode_decode(self, seed, expected_slice, expected_mean_absolute_diff): + model = self.get_oobleck_vae_model() + audio = self.get_audio() + generator = self.get_generator(seed) + + with torch.no_grad(): + x = audio + posterior = model.encode(x).latent_dist + z = posterior.sample(generator=generator) + sample = model.decode(z).sample + + # (batch_size, latent_dim, sequence_length) + assert posterior.mean.shape == (audio.shape[0], model.config.decoder_input_channels, 1024) + + assert sample.shape == audio.shape + assert ((sample - audio).abs().mean() - expected_mean_absolute_diff).abs() <= 1e-6 + + output_slice = sample[-1, 1, 5:10].cpu() + expected_output_slice = torch.tensor(expected_slice) + + assert torch_all_close(output_slice, expected_output_slice, atol=1e-5) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/models/autoencoders/test_models_autoencoder_tiny.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/autoencoders/test_models_autoencoder_tiny.py new file mode 100644 index 0000000000000000000000000000000000000000..4d1dc69cfaaddfad6753f4b72d9eb13b0092046b --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/autoencoders/test_models_autoencoder_tiny.py @@ -0,0 +1,267 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy +import gc +import unittest + +import torch +from parameterized import parameterized + +from diffusers import AutoencoderTiny + +from ...testing_utils import ( + backend_empty_cache, + enable_full_determinism, + floats_tensor, + load_hf_numpy, + slow, + torch_all_close, + torch_device, +) +from ..test_modeling_common import ModelTesterMixin, UNetTesterMixin + + +enable_full_determinism() + + +class AutoencoderTinyTests(ModelTesterMixin, UNetTesterMixin, unittest.TestCase): + model_class = AutoencoderTiny + main_input_name = "sample" + base_precision = 1e-2 + + def get_autoencoder_tiny_config(self, block_out_channels=None): + block_out_channels = (len(block_out_channels) * [32]) if block_out_channels is not None else [32, 32] + init_dict = { + "in_channels": 3, + "out_channels": 3, + "encoder_block_out_channels": block_out_channels, + "decoder_block_out_channels": block_out_channels, + "num_encoder_blocks": [b // min(block_out_channels) for b in block_out_channels], + "num_decoder_blocks": [b // min(block_out_channels) for b in reversed(block_out_channels)], + } + return init_dict + + @property + def dummy_input(self): + batch_size = 4 + num_channels = 3 + sizes = (32, 32) + + image = floats_tensor((batch_size, num_channels) + sizes).to(torch_device) + + return {"sample": image} + + @property + def input_shape(self): + return (3, 32, 32) + + @property + def output_shape(self): + return (3, 32, 32) + + def prepare_init_args_and_inputs_for_common(self): + init_dict = self.get_autoencoder_tiny_config() + inputs_dict = self.dummy_input + return init_dict, inputs_dict + + @unittest.skip("Model doesn't yet support smaller resolution.") + def test_enable_disable_tiling(self): + pass + + def test_enable_disable_slicing(self): + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + + torch.manual_seed(0) + model = self.model_class(**init_dict).to(torch_device) + + inputs_dict.update({"return_dict": False}) + + torch.manual_seed(0) + output_without_slicing = model(**inputs_dict)[0] + + torch.manual_seed(0) + model.enable_slicing() + output_with_slicing = model(**inputs_dict)[0] + + self.assertLess( + (output_without_slicing.detach().cpu().numpy() - output_with_slicing.detach().cpu().numpy()).max(), + 0.5, + "VAE slicing should not affect the inference results", + ) + + torch.manual_seed(0) + model.disable_slicing() + output_without_slicing_2 = model(**inputs_dict)[0] + + self.assertEqual( + output_without_slicing.detach().cpu().numpy().all(), + output_without_slicing_2.detach().cpu().numpy().all(), + "Without slicing outputs should match with the outputs when slicing is manually disabled.", + ) + + @unittest.skip("Test not supported.") + def test_outputs_equivalence(self): + pass + + @unittest.skip("Test not supported.") + def test_forward_with_norm_groups(self): + pass + + def test_gradient_checkpointing_is_applied(self): + expected_set = {"DecoderTiny", "EncoderTiny"} + super().test_gradient_checkpointing_is_applied(expected_set=expected_set) + + def test_effective_gradient_checkpointing(self): + if not self.model_class._supports_gradient_checkpointing: + return # Skip test if model does not support gradient checkpointing + + # enable deterministic behavior for gradient checkpointing + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + inputs_dict_copy = copy.deepcopy(inputs_dict) + torch.manual_seed(0) + model = self.model_class(**init_dict) + model.to(torch_device) + + assert not model.is_gradient_checkpointing and model.training + + out = model(**inputs_dict).sample + # run the backwards pass on the model. For backwards pass, for simplicity purpose, + # we won't calculate the loss and rather backprop on out.sum() + model.zero_grad() + + labels = torch.randn_like(out) + loss = (out - labels).mean() + loss.backward() + + # re-instantiate the model now enabling gradient checkpointing + torch.manual_seed(0) + model_2 = self.model_class(**init_dict) + # clone model + model_2.load_state_dict(model.state_dict()) + model_2.to(torch_device) + model_2.enable_gradient_checkpointing() + + assert model_2.is_gradient_checkpointing and model_2.training + + out_2 = model_2(**inputs_dict_copy).sample + # run the backwards pass on the model. For backwards pass, for simplicity purpose, + # we won't calculate the loss and rather backprop on out.sum() + model_2.zero_grad() + loss_2 = (out_2 - labels).mean() + loss_2.backward() + + # compare the output and parameters gradients + self.assertTrue((loss - loss_2).abs() < 1e-3) + named_params = dict(model.named_parameters()) + named_params_2 = dict(model_2.named_parameters()) + + for name, param in named_params.items(): + if "encoder.layers" in name: + continue + self.assertTrue(torch_all_close(param.grad.data, named_params_2[name].grad.data, atol=3e-2)) + + @unittest.skip( + "The forward pass of AutoencoderTiny creates a torch.float32 tensor. This causes inference in compute_dtype=torch.bfloat16 to fail. To fix:\n" + "1. Change the forward pass to be dtype agnostic.\n" + "2. Unskip this test." + ) + def test_layerwise_casting_inference(self): + pass + + @unittest.skip( + "The forward pass of AutoencoderTiny creates a torch.float32 tensor. This causes inference in compute_dtype=torch.bfloat16 to fail. To fix:\n" + "1. Change the forward pass to be dtype agnostic.\n" + "2. Unskip this test." + ) + def test_layerwise_casting_memory(self): + pass + + +@slow +class AutoencoderTinyIntegrationTests(unittest.TestCase): + def tearDown(self): + # clean up the VRAM after each test + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def get_file_format(self, seed, shape): + return f"gaussian_noise_s={seed}_shape={'_'.join([str(s) for s in shape])}.npy" + + def get_sd_image(self, seed=0, shape=(4, 3, 512, 512), fp16=False): + dtype = torch.float16 if fp16 else torch.float32 + image = torch.from_numpy(load_hf_numpy(self.get_file_format(seed, shape))).to(torch_device).to(dtype) + return image + + def get_sd_vae_model(self, model_id="hf-internal-testing/taesd-diffusers", fp16=False): + torch_dtype = torch.float16 if fp16 else torch.float32 + + model = AutoencoderTiny.from_pretrained(model_id, torch_dtype=torch_dtype) + model.to(torch_device).eval() + return model + + @parameterized.expand( + [ + [(1, 4, 73, 97), (1, 3, 584, 776)], + [(1, 4, 97, 73), (1, 3, 776, 584)], + [(1, 4, 49, 65), (1, 3, 392, 520)], + [(1, 4, 65, 49), (1, 3, 520, 392)], + [(1, 4, 49, 49), (1, 3, 392, 392)], + ] + ) + def test_tae_tiling(self, in_shape, out_shape): + model = self.get_sd_vae_model() + model.enable_tiling() + with torch.no_grad(): + zeros = torch.zeros(in_shape).to(torch_device) + dec = model.decode(zeros).sample + assert dec.shape == out_shape + + def test_stable_diffusion(self): + model = self.get_sd_vae_model() + image = self.get_sd_image(seed=33) + + with torch.no_grad(): + sample = model(image).sample + + assert sample.shape == image.shape + + output_slice = sample[-1, -2:, -2:, :2].flatten().float().cpu() + expected_output_slice = torch.tensor([0.0093, 0.6385, -0.1274, 0.1631, -0.1762, 0.5232, -0.3108, -0.0382]) + + assert torch_all_close(output_slice, expected_output_slice, atol=3e-3) + + @parameterized.expand([(True,), (False,)]) + def test_tae_roundtrip(self, enable_tiling): + # load the autoencoder + model = self.get_sd_vae_model() + if enable_tiling: + model.enable_tiling() + + # make a black image with a white square in the middle, + # which is large enough to split across multiple tiles + image = -torch.ones(1, 3, 1024, 1024, device=torch_device) + image[..., 256:768, 256:768] = 1.0 + + # round-trip the image through the autoencoder + with torch.no_grad(): + sample = model(image).sample + + # the autoencoder reconstruction should match original image, sorta + def downscale(x): + return torch.nn.functional.avg_pool2d(x, model.spatial_scale_factor) + + assert torch_all_close(downscale(sample), downscale(image), atol=0.125) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/models/autoencoders/test_models_autoencoder_wan.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/autoencoders/test_models_autoencoder_wan.py new file mode 100644 index 0000000000000000000000000000000000000000..cc9c888681571a02365344f65290f83323c82063 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/autoencoders/test_models_autoencoder_wan.py @@ -0,0 +1,155 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import torch + +from diffusers import AutoencoderKLWan + +from ...testing_utils import enable_full_determinism, floats_tensor, torch_device +from ..test_modeling_common import ModelTesterMixin, UNetTesterMixin + + +enable_full_determinism() + + +class AutoencoderKLWanTests(ModelTesterMixin, UNetTesterMixin, unittest.TestCase): + model_class = AutoencoderKLWan + main_input_name = "sample" + base_precision = 1e-2 + + def get_autoencoder_kl_wan_config(self): + return { + "base_dim": 3, + "z_dim": 16, + "dim_mult": [1, 1, 1, 1], + "num_res_blocks": 1, + "temperal_downsample": [False, True, True], + } + + @property + def dummy_input(self): + batch_size = 2 + num_frames = 9 + num_channels = 3 + sizes = (16, 16) + image = floats_tensor((batch_size, num_channels, num_frames) + sizes).to(torch_device) + return {"sample": image} + + @property + def dummy_input_tiling(self): + batch_size = 2 + num_frames = 9 + num_channels = 3 + sizes = (128, 128) + image = floats_tensor((batch_size, num_channels, num_frames) + sizes).to(torch_device) + return {"sample": image} + + @property + def input_shape(self): + return (3, 9, 16, 16) + + @property + def output_shape(self): + return (3, 9, 16, 16) + + def prepare_init_args_and_inputs_for_common(self): + init_dict = self.get_autoencoder_kl_wan_config() + inputs_dict = self.dummy_input + return init_dict, inputs_dict + + def prepare_init_args_and_inputs_for_tiling(self): + init_dict = self.get_autoencoder_kl_wan_config() + inputs_dict = self.dummy_input_tiling + return init_dict, inputs_dict + + def test_enable_disable_tiling(self): + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_tiling() + + torch.manual_seed(0) + model = self.model_class(**init_dict).to(torch_device) + + inputs_dict.update({"return_dict": False}) + + torch.manual_seed(0) + output_without_tiling = model(**inputs_dict, generator=torch.manual_seed(0))[0] + + torch.manual_seed(0) + model.enable_tiling(96, 96, 64, 64) + output_with_tiling = model(**inputs_dict, generator=torch.manual_seed(0))[0] + + self.assertLess( + (output_without_tiling.detach().cpu().numpy() - output_with_tiling.detach().cpu().numpy()).max(), + 0.5, + "VAE tiling should not affect the inference results", + ) + + torch.manual_seed(0) + model.disable_tiling() + output_without_tiling_2 = model(**inputs_dict, generator=torch.manual_seed(0))[0] + + self.assertEqual( + output_without_tiling.detach().cpu().numpy().all(), + output_without_tiling_2.detach().cpu().numpy().all(), + "Without tiling outputs should match with the outputs when tiling is manually disabled.", + ) + + def test_enable_disable_slicing(self): + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + + torch.manual_seed(0) + model = self.model_class(**init_dict).to(torch_device) + + inputs_dict.update({"return_dict": False}) + + torch.manual_seed(0) + output_without_slicing = model(**inputs_dict, generator=torch.manual_seed(0))[0] + + torch.manual_seed(0) + model.enable_slicing() + output_with_slicing = model(**inputs_dict, generator=torch.manual_seed(0))[0] + + self.assertLess( + (output_without_slicing.detach().cpu().numpy() - output_with_slicing.detach().cpu().numpy()).max(), + 0.05, + "VAE slicing should not affect the inference results", + ) + + torch.manual_seed(0) + model.disable_slicing() + output_without_slicing_2 = model(**inputs_dict, generator=torch.manual_seed(0))[0] + + self.assertEqual( + output_without_slicing.detach().cpu().numpy().all(), + output_without_slicing_2.detach().cpu().numpy().all(), + "Without slicing outputs should match with the outputs when slicing is manually disabled.", + ) + + @unittest.skip("Gradient checkpointing has not been implemented yet") + def test_gradient_checkpointing_is_applied(self): + pass + + @unittest.skip("Test not supported") + def test_forward_with_norm_groups(self): + pass + + @unittest.skip("RuntimeError: fill_out not implemented for 'Float8_e4m3fn'") + def test_layerwise_casting_inference(self): + pass + + @unittest.skip("RuntimeError: fill_out not implemented for 'Float8_e4m3fn'") + def test_layerwise_casting_training(self): + pass diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/models/autoencoders/test_models_consistency_decoder_vae.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/autoencoders/test_models_consistency_decoder_vae.py new file mode 100644 index 0000000000000000000000000000000000000000..7e44edba3624402864e454741e4e1d87f1d02507 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/autoencoders/test_models_consistency_decoder_vae.py @@ -0,0 +1,301 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import gc +import unittest + +import numpy as np +import torch + +from diffusers import ConsistencyDecoderVAE, StableDiffusionPipeline +from diffusers.utils.torch_utils import randn_tensor + +from ...testing_utils import ( + backend_empty_cache, + enable_full_determinism, + load_image, + slow, + torch_all_close, + torch_device, +) +from ..test_modeling_common import ModelTesterMixin + + +enable_full_determinism() + + +class ConsistencyDecoderVAETests(ModelTesterMixin, unittest.TestCase): + model_class = ConsistencyDecoderVAE + main_input_name = "sample" + base_precision = 1e-2 + forward_requires_fresh_args = True + + def get_consistency_vae_config(self, block_out_channels=None, norm_num_groups=None): + block_out_channels = block_out_channels or [2, 4] + norm_num_groups = norm_num_groups or 2 + return { + "encoder_block_out_channels": block_out_channels, + "encoder_in_channels": 3, + "encoder_out_channels": 4, + "encoder_down_block_types": ["DownEncoderBlock2D"] * len(block_out_channels), + "decoder_add_attention": False, + "decoder_block_out_channels": block_out_channels, + "decoder_down_block_types": ["ResnetDownsampleBlock2D"] * len(block_out_channels), + "decoder_downsample_padding": 1, + "decoder_in_channels": 7, + "decoder_layers_per_block": 1, + "decoder_norm_eps": 1e-05, + "decoder_norm_num_groups": norm_num_groups, + "encoder_norm_num_groups": norm_num_groups, + "decoder_num_train_timesteps": 1024, + "decoder_out_channels": 6, + "decoder_resnet_time_scale_shift": "scale_shift", + "decoder_time_embedding_type": "learned", + "decoder_up_block_types": ["ResnetUpsampleBlock2D"] * len(block_out_channels), + "scaling_factor": 1, + "latent_channels": 4, + } + + def inputs_dict(self, seed=None): + if seed is None: + generator = torch.Generator("cpu").manual_seed(0) + else: + generator = torch.Generator("cpu").manual_seed(seed) + image = randn_tensor((4, 3, 32, 32), generator=generator, device=torch.device(torch_device)) + + return {"sample": image, "generator": generator} + + @property + def input_shape(self): + return (3, 32, 32) + + @property + def output_shape(self): + return (3, 32, 32) + + @property + def init_dict(self): + return self.get_consistency_vae_config() + + def prepare_init_args_and_inputs_for_common(self): + return self.init_dict, self.inputs_dict() + + def test_enable_disable_tiling(self): + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + + torch.manual_seed(0) + model = self.model_class(**init_dict).to(torch_device) + + inputs_dict.update({"return_dict": False}) + _ = inputs_dict.pop("generator") + + torch.manual_seed(0) + output_without_tiling = model(**inputs_dict, generator=torch.manual_seed(0))[0] + + torch.manual_seed(0) + model.enable_tiling() + output_with_tiling = model(**inputs_dict, generator=torch.manual_seed(0))[0] + + self.assertLess( + (output_without_tiling.detach().cpu().numpy() - output_with_tiling.detach().cpu().numpy()).max(), + 0.5, + "VAE tiling should not affect the inference results", + ) + + torch.manual_seed(0) + model.disable_tiling() + output_without_tiling_2 = model(**inputs_dict, generator=torch.manual_seed(0))[0] + + self.assertEqual( + output_without_tiling.detach().cpu().numpy().all(), + output_without_tiling_2.detach().cpu().numpy().all(), + "Without tiling outputs should match with the outputs when tiling is manually disabled.", + ) + + def test_enable_disable_slicing(self): + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + + torch.manual_seed(0) + model = self.model_class(**init_dict).to(torch_device) + + inputs_dict.update({"return_dict": False}) + _ = inputs_dict.pop("generator") + + torch.manual_seed(0) + output_without_slicing = model(**inputs_dict, generator=torch.manual_seed(0))[0] + + torch.manual_seed(0) + model.enable_slicing() + output_with_slicing = model(**inputs_dict, generator=torch.manual_seed(0))[0] + + self.assertLess( + (output_without_slicing.detach().cpu().numpy() - output_with_slicing.detach().cpu().numpy()).max(), + 0.5, + "VAE slicing should not affect the inference results", + ) + + torch.manual_seed(0) + model.disable_slicing() + output_without_slicing_2 = model(**inputs_dict, generator=torch.manual_seed(0))[0] + + self.assertEqual( + output_without_slicing.detach().cpu().numpy().all(), + output_without_slicing_2.detach().cpu().numpy().all(), + "Without slicing outputs should match with the outputs when slicing is manually disabled.", + ) + + +@slow +class ConsistencyDecoderVAEIntegrationTests(unittest.TestCase): + def setUp(self): + # clean up the VRAM before each test + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + # clean up the VRAM after each test + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + @torch.no_grad() + def test_encode_decode(self): + vae = ConsistencyDecoderVAE.from_pretrained("openai/consistency-decoder") # TODO - update + vae.to(torch_device) + + image = load_image( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" + "/img2img/sketch-mountains-input.jpg" + ).resize((256, 256)) + image = torch.from_numpy(np.array(image).transpose(2, 0, 1).astype(np.float32) / 127.5 - 1)[None, :, :, :].to( + torch_device + ) + + latent = vae.encode(image).latent_dist.mean + + sample = vae.decode(latent, generator=torch.Generator("cpu").manual_seed(0)).sample + + actual_output = sample[0, :2, :2, :2].flatten().cpu() + expected_output = torch.tensor([-0.0141, -0.0014, 0.0115, 0.0086, 0.1051, 0.1053, 0.1031, 0.1024]) + + assert torch_all_close(actual_output, expected_output, atol=5e-3) + + def test_sd(self): + vae = ConsistencyDecoderVAE.from_pretrained("openai/consistency-decoder") # TODO - update + pipe = StableDiffusionPipeline.from_pretrained( + "stable-diffusion-v1-5/stable-diffusion-v1-5", vae=vae, safety_checker=None + ) + pipe.to(torch_device) + + out = pipe( + "horse", + num_inference_steps=2, + output_type="pt", + generator=torch.Generator("cpu").manual_seed(0), + ).images[0] + + actual_output = out[:2, :2, :2].flatten().cpu() + expected_output = torch.tensor([0.7686, 0.8228, 0.6489, 0.7455, 0.8661, 0.8797, 0.8241, 0.8759]) + + assert torch_all_close(actual_output, expected_output, atol=5e-3) + + def test_encode_decode_f16(self): + vae = ConsistencyDecoderVAE.from_pretrained( + "openai/consistency-decoder", torch_dtype=torch.float16 + ) # TODO - update + vae.to(torch_device) + + image = load_image( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" + "/img2img/sketch-mountains-input.jpg" + ).resize((256, 256)) + image = ( + torch.from_numpy(np.array(image).transpose(2, 0, 1).astype(np.float32) / 127.5 - 1)[None, :, :, :] + .half() + .to(torch_device) + ) + + latent = vae.encode(image).latent_dist.mean + + sample = vae.decode(latent, generator=torch.Generator("cpu").manual_seed(0)).sample + + actual_output = sample[0, :2, :2, :2].flatten().cpu() + expected_output = torch.tensor( + [-0.0111, -0.0125, -0.0017, -0.0007, 0.1257, 0.1465, 0.1450, 0.1471], + dtype=torch.float16, + ) + + assert torch_all_close(actual_output, expected_output, atol=5e-3) + + def test_sd_f16(self): + vae = ConsistencyDecoderVAE.from_pretrained( + "openai/consistency-decoder", torch_dtype=torch.float16 + ) # TODO - update + pipe = StableDiffusionPipeline.from_pretrained( + "stable-diffusion-v1-5/stable-diffusion-v1-5", + torch_dtype=torch.float16, + vae=vae, + safety_checker=None, + ) + pipe.to(torch_device) + + out = pipe( + "horse", + num_inference_steps=2, + output_type="pt", + generator=torch.Generator("cpu").manual_seed(0), + ).images[0] + + actual_output = out[:2, :2, :2].flatten().cpu() + expected_output = torch.tensor( + [0.0000, 0.0249, 0.0000, 0.0000, 0.1709, 0.2773, 0.0471, 0.1035], + dtype=torch.float16, + ) + + assert torch_all_close(actual_output, expected_output, atol=5e-3) + + def test_vae_tiling(self): + vae = ConsistencyDecoderVAE.from_pretrained("openai/consistency-decoder", torch_dtype=torch.float16) + pipe = StableDiffusionPipeline.from_pretrained( + "stable-diffusion-v1-5/stable-diffusion-v1-5", vae=vae, safety_checker=None, torch_dtype=torch.float16 + ) + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + out_1 = pipe( + "horse", + num_inference_steps=2, + output_type="pt", + generator=torch.Generator("cpu").manual_seed(0), + ).images[0] + + # make sure tiled vae decode yields the same result + pipe.enable_vae_tiling() + out_2 = pipe( + "horse", + num_inference_steps=2, + output_type="pt", + generator=torch.Generator("cpu").manual_seed(0), + ).images[0] + + assert torch_all_close(out_1, out_2, atol=5e-3) + + # test that tiled decode works with various shapes + shapes = [(1, 4, 73, 97), (1, 4, 97, 73), (1, 4, 49, 65), (1, 4, 65, 49)] + with torch.no_grad(): + for shape in shapes: + image = torch.zeros(shape, device=torch_device, dtype=pipe.vae.dtype) + pipe.vae.decode(image) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/models/autoencoders/test_models_vq.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/autoencoders/test_models_vq.py new file mode 100644 index 0000000000000000000000000000000000000000..1c636b08173388b6a80b1786c2b9de264f4172eb --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/autoencoders/test_models_vq.py @@ -0,0 +1,118 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import torch + +from diffusers import VQModel + +from ...testing_utils import ( + backend_manual_seed, + enable_full_determinism, + floats_tensor, + torch_device, +) +from ..test_modeling_common import ModelTesterMixin, UNetTesterMixin + + +enable_full_determinism() + + +class VQModelTests(ModelTesterMixin, UNetTesterMixin, unittest.TestCase): + model_class = VQModel + main_input_name = "sample" + + @property + def dummy_input(self, sizes=(32, 32)): + batch_size = 4 + num_channels = 3 + + image = floats_tensor((batch_size, num_channels) + sizes).to(torch_device) + + return {"sample": image} + + @property + def input_shape(self): + return (3, 32, 32) + + @property + def output_shape(self): + return (3, 32, 32) + + def prepare_init_args_and_inputs_for_common(self): + init_dict = { + "block_out_channels": [8, 16], + "norm_num_groups": 8, + "in_channels": 3, + "out_channels": 3, + "down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"], + "up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"], + "latent_channels": 3, + } + inputs_dict = self.dummy_input + return init_dict, inputs_dict + + @unittest.skip("Test not supported.") + def test_forward_signature(self): + pass + + @unittest.skip("Test not supported.") + def test_training(self): + pass + + def test_from_pretrained_hub(self): + model, loading_info = VQModel.from_pretrained("fusing/vqgan-dummy", output_loading_info=True) + self.assertIsNotNone(model) + self.assertEqual(len(loading_info["missing_keys"]), 0) + + model.to(torch_device) + image = model(**self.dummy_input) + + assert image is not None, "Make sure output is not None" + + def test_output_pretrained(self): + model = VQModel.from_pretrained("fusing/vqgan-dummy") + model.to(torch_device).eval() + + torch.manual_seed(0) + backend_manual_seed(torch_device, 0) + + image = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size) + image = image.to(torch_device) + with torch.no_grad(): + output = model(image).sample + + output_slice = output[0, -1, -3:, -3:].flatten().cpu() + # fmt: off + expected_output_slice = torch.tensor([-0.0153, -0.4044, -0.1880, -0.5161, -0.2418, -0.4072, -0.1612, -0.0633, -0.0143]) + # fmt: on + self.assertTrue(torch.allclose(output_slice, expected_output_slice, atol=1e-3)) + + def test_loss_pretrained(self): + model = VQModel.from_pretrained("fusing/vqgan-dummy") + model.to(torch_device).eval() + + torch.manual_seed(0) + backend_manual_seed(torch_device, 0) + + image = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size) + image = image.to(torch_device) + with torch.no_grad(): + output = model(image).commit_loss.cpu() + # fmt: off + expected_output = torch.tensor([0.1936]) + # fmt: on + self.assertTrue(torch.allclose(output, expected_output, atol=1e-3)) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/models/autoencoders/vae.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/autoencoders/vae.py new file mode 100644 index 0000000000000000000000000000000000000000..f8055f1c1cb0c0518234510f503730ae6116cba9 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/autoencoders/vae.py @@ -0,0 +1,86 @@ +def get_autoencoder_kl_config(block_out_channels=None, norm_num_groups=None): + block_out_channels = block_out_channels or [2, 4] + norm_num_groups = norm_num_groups or 2 + init_dict = { + "block_out_channels": block_out_channels, + "in_channels": 3, + "out_channels": 3, + "down_block_types": ["DownEncoderBlock2D"] * len(block_out_channels), + "up_block_types": ["UpDecoderBlock2D"] * len(block_out_channels), + "latent_channels": 4, + "norm_num_groups": norm_num_groups, + } + return init_dict + + +def get_asym_autoencoder_kl_config(block_out_channels=None, norm_num_groups=None): + block_out_channels = block_out_channels or [2, 4] + norm_num_groups = norm_num_groups or 2 + init_dict = { + "in_channels": 3, + "out_channels": 3, + "down_block_types": ["DownEncoderBlock2D"] * len(block_out_channels), + "down_block_out_channels": block_out_channels, + "layers_per_down_block": 1, + "up_block_types": ["UpDecoderBlock2D"] * len(block_out_channels), + "up_block_out_channels": block_out_channels, + "layers_per_up_block": 1, + "act_fn": "silu", + "latent_channels": 4, + "norm_num_groups": norm_num_groups, + "sample_size": 32, + "scaling_factor": 0.18215, + } + return init_dict + + +def get_autoencoder_tiny_config(block_out_channels=None): + block_out_channels = (len(block_out_channels) * [32]) if block_out_channels is not None else [32, 32] + init_dict = { + "in_channels": 3, + "out_channels": 3, + "encoder_block_out_channels": block_out_channels, + "decoder_block_out_channels": block_out_channels, + "num_encoder_blocks": [b // min(block_out_channels) for b in block_out_channels], + "num_decoder_blocks": [b // min(block_out_channels) for b in reversed(block_out_channels)], + } + return init_dict + + +def get_consistency_vae_config(block_out_channels=None, norm_num_groups=None): + block_out_channels = block_out_channels or [2, 4] + norm_num_groups = norm_num_groups or 2 + return { + "encoder_block_out_channels": block_out_channels, + "encoder_in_channels": 3, + "encoder_out_channels": 4, + "encoder_down_block_types": ["DownEncoderBlock2D"] * len(block_out_channels), + "decoder_add_attention": False, + "decoder_block_out_channels": block_out_channels, + "decoder_down_block_types": ["ResnetDownsampleBlock2D"] * len(block_out_channels), + "decoder_downsample_padding": 1, + "decoder_in_channels": 7, + "decoder_layers_per_block": 1, + "decoder_norm_eps": 1e-05, + "decoder_norm_num_groups": norm_num_groups, + "encoder_norm_num_groups": norm_num_groups, + "decoder_num_train_timesteps": 1024, + "decoder_out_channels": 6, + "decoder_resnet_time_scale_shift": "scale_shift", + "decoder_time_embedding_type": "learned", + "decoder_up_block_types": ["ResnetUpsampleBlock2D"] * len(block_out_channels), + "scaling_factor": 1, + "latent_channels": 4, + } + + +def get_autoencoder_oobleck_config(block_out_channels=None): + init_dict = { + "encoder_hidden_size": 12, + "decoder_channels": 12, + "decoder_input_channels": 6, + "audio_channels": 2, + "downsampling_ratios": [2, 4], + "channel_multiples": [1, 2], + } + return init_dict diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/models/test_activations.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/test_activations.py new file mode 100644 index 0000000000000000000000000000000000000000..4e8e51453e98157a753fc178ce146849e189a5a1 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/test_activations.py @@ -0,0 +1,48 @@ +import unittest + +import torch +from torch import nn + +from diffusers.models.activations import get_activation + + +class ActivationsTests(unittest.TestCase): + def test_swish(self): + act = get_activation("swish") + + self.assertIsInstance(act, nn.SiLU) + + self.assertEqual(act(torch.tensor(-100, dtype=torch.float32)).item(), 0) + self.assertNotEqual(act(torch.tensor(-1, dtype=torch.float32)).item(), 0) + self.assertEqual(act(torch.tensor(0, dtype=torch.float32)).item(), 0) + self.assertEqual(act(torch.tensor(20, dtype=torch.float32)).item(), 20) + + def test_silu(self): + act = get_activation("silu") + + self.assertIsInstance(act, nn.SiLU) + + self.assertEqual(act(torch.tensor(-100, dtype=torch.float32)).item(), 0) + self.assertNotEqual(act(torch.tensor(-1, dtype=torch.float32)).item(), 0) + self.assertEqual(act(torch.tensor(0, dtype=torch.float32)).item(), 0) + self.assertEqual(act(torch.tensor(20, dtype=torch.float32)).item(), 20) + + def test_mish(self): + act = get_activation("mish") + + self.assertIsInstance(act, nn.Mish) + + self.assertEqual(act(torch.tensor(-200, dtype=torch.float32)).item(), 0) + self.assertNotEqual(act(torch.tensor(-1, dtype=torch.float32)).item(), 0) + self.assertEqual(act(torch.tensor(0, dtype=torch.float32)).item(), 0) + self.assertEqual(act(torch.tensor(20, dtype=torch.float32)).item(), 20) + + def test_gelu(self): + act = get_activation("gelu") + + self.assertIsInstance(act, nn.GELU) + + self.assertEqual(act(torch.tensor(-100, dtype=torch.float32)).item(), 0) + self.assertNotEqual(act(torch.tensor(-1, dtype=torch.float32)).item(), 0) + self.assertEqual(act(torch.tensor(0, dtype=torch.float32)).item(), 0) + self.assertEqual(act(torch.tensor(20, dtype=torch.float32)).item(), 20) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/models/test_attention_processor.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/test_attention_processor.py new file mode 100644 index 0000000000000000000000000000000000000000..ccf36b092b46708becf7072401b1f3ae7120aa95 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/test_attention_processor.py @@ -0,0 +1,132 @@ +import tempfile +import unittest + +import numpy as np +import pytest +import torch + +from diffusers import DiffusionPipeline +from diffusers.models.attention_processor import Attention, AttnAddedKVProcessor + +from ..testing_utils import torch_device + + +class AttnAddedKVProcessorTests(unittest.TestCase): + def get_constructor_arguments(self, only_cross_attention: bool = False): + query_dim = 10 + + if only_cross_attention: + cross_attention_dim = 12 + else: + # when only cross attention is not set, the cross attention dim must be the same as the query dim + cross_attention_dim = query_dim + + return { + "query_dim": query_dim, + "cross_attention_dim": cross_attention_dim, + "heads": 2, + "dim_head": 4, + "added_kv_proj_dim": 6, + "norm_num_groups": 1, + "only_cross_attention": only_cross_attention, + "processor": AttnAddedKVProcessor(), + } + + def get_forward_arguments(self, query_dim, added_kv_proj_dim): + batch_size = 2 + + hidden_states = torch.rand(batch_size, query_dim, 3, 2) + encoder_hidden_states = torch.rand(batch_size, 4, added_kv_proj_dim) + attention_mask = None + + return { + "hidden_states": hidden_states, + "encoder_hidden_states": encoder_hidden_states, + "attention_mask": attention_mask, + } + + def test_only_cross_attention(self): + # self and cross attention + + torch.manual_seed(0) + + constructor_args = self.get_constructor_arguments(only_cross_attention=False) + attn = Attention(**constructor_args) + + self.assertTrue(attn.to_k is not None) + self.assertTrue(attn.to_v is not None) + + forward_args = self.get_forward_arguments( + query_dim=constructor_args["query_dim"], added_kv_proj_dim=constructor_args["added_kv_proj_dim"] + ) + + self_and_cross_attn_out = attn(**forward_args) + + # only self attention + + torch.manual_seed(0) + + constructor_args = self.get_constructor_arguments(only_cross_attention=True) + attn = Attention(**constructor_args) + + self.assertTrue(attn.to_k is None) + self.assertTrue(attn.to_v is None) + + forward_args = self.get_forward_arguments( + query_dim=constructor_args["query_dim"], added_kv_proj_dim=constructor_args["added_kv_proj_dim"] + ) + + only_cross_attn_out = attn(**forward_args) + + self.assertTrue((only_cross_attn_out != self_and_cross_attn_out).all()) + + +class DeprecatedAttentionBlockTests(unittest.TestCase): + @pytest.fixture(scope="session") + def is_dist_enabled(pytestconfig): + return pytestconfig.getoption("dist") == "loadfile" + + @pytest.mark.xfail( + condition=torch.device(torch_device).type == "cuda" and is_dist_enabled, + reason="Test currently fails on our GPU CI because of `loadfile`. Note that it only fails when the tests are distributed from `pytest ... tests/models`. If the tests are run individually, even with `loadfile` it won't fail.", + strict=True, + ) + def test_conversion_when_using_device_map(self): + pipe = DiffusionPipeline.from_pretrained( + "hf-internal-testing/tiny-stable-diffusion-torch", safety_checker=None + ) + + pre_conversion = pipe( + "foo", + num_inference_steps=2, + generator=torch.Generator("cpu").manual_seed(0), + output_type="np", + ).images + + # the initial conversion succeeds + pipe = DiffusionPipeline.from_pretrained( + "hf-internal-testing/tiny-stable-diffusion-torch", device_map="balanced", safety_checker=None + ) + + conversion = pipe( + "foo", + num_inference_steps=2, + generator=torch.Generator("cpu").manual_seed(0), + output_type="np", + ).images + + with tempfile.TemporaryDirectory() as tmpdir: + # save the converted model + pipe.save_pretrained(tmpdir) + + # can also load the converted weights + pipe = DiffusionPipeline.from_pretrained(tmpdir, device_map="balanced", safety_checker=None) + after_conversion = pipe( + "foo", + num_inference_steps=2, + generator=torch.Generator("cpu").manual_seed(0), + output_type="np", + ).images + + self.assertTrue(np.allclose(pre_conversion, conversion, atol=1e-3)) + self.assertTrue(np.allclose(conversion, after_conversion, atol=1e-3)) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/models/test_layers_utils.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/test_layers_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..eaeffa699db2cb36e18a0d4d929e2bf07a99e8a2 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/test_layers_utils.py @@ -0,0 +1,534 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import unittest + +import numpy as np +import torch +from torch import nn + +from diffusers.models.attention import GEGLU, AdaLayerNorm, ApproximateGELU +from diffusers.models.embeddings import get_timestep_embedding +from diffusers.models.resnet import Downsample2D, ResnetBlock2D, Upsample2D +from diffusers.models.transformers.transformer_2d import Transformer2DModel + +from ..testing_utils import ( + backend_manual_seed, + require_torch_accelerator_with_fp64, + require_torch_version_greater_equal, + torch_device, +) + + +class EmbeddingsTests(unittest.TestCase): + def test_timestep_embeddings(self): + embedding_dim = 256 + timesteps = torch.arange(16) + + t1 = get_timestep_embedding(timesteps, embedding_dim) + + # first vector should always be composed only of 0's and 1's + assert (t1[0, : embedding_dim // 2] - 0).abs().sum() < 1e-5 + assert (t1[0, embedding_dim // 2 :] - 1).abs().sum() < 1e-5 + + # last element of each vector should be one + assert (t1[:, -1] - 1).abs().sum() < 1e-5 + + # For large embeddings (e.g. 128) the frequency of every vector is higher + # than the previous one which means that the gradients of later vectors are + # ALWAYS higher than the previous ones + grad_mean = np.abs(np.gradient(t1, axis=-1)).mean(axis=1) + + prev_grad = 0.0 + for grad in grad_mean: + assert grad > prev_grad + prev_grad = grad + + def test_timestep_flip_sin_cos(self): + embedding_dim = 16 + timesteps = torch.arange(10) + + t1 = get_timestep_embedding(timesteps, embedding_dim, flip_sin_to_cos=True) + t1 = torch.cat([t1[:, embedding_dim // 2 :], t1[:, : embedding_dim // 2]], dim=-1) + + t2 = get_timestep_embedding(timesteps, embedding_dim, flip_sin_to_cos=False) + + assert torch.allclose(t1.cpu(), t2.cpu(), 1e-3) + + def test_timestep_downscale_freq_shift(self): + embedding_dim = 16 + timesteps = torch.arange(10) + + t1 = get_timestep_embedding(timesteps, embedding_dim, downscale_freq_shift=0) + t2 = get_timestep_embedding(timesteps, embedding_dim, downscale_freq_shift=1) + + # get cosine half (vectors that are wrapped into cosine) + cosine_half = (t1 - t2)[:, embedding_dim // 2 :] + + # cosine needs to be negative + assert (np.abs((cosine_half <= 0).numpy()) - 1).sum() < 1e-5 + + def test_sinoid_embeddings_hardcoded(self): + embedding_dim = 64 + timesteps = torch.arange(128) + + # standard unet, score_vde + t1 = get_timestep_embedding(timesteps, embedding_dim, downscale_freq_shift=1, flip_sin_to_cos=False) + # glide, ldm + t2 = get_timestep_embedding(timesteps, embedding_dim, downscale_freq_shift=0, flip_sin_to_cos=True) + # grad-tts + t3 = get_timestep_embedding(timesteps, embedding_dim, scale=1000) + + assert torch.allclose( + t1[23:26, 47:50].flatten().cpu(), + torch.tensor([0.9646, 0.9804, 0.9892, 0.9615, 0.9787, 0.9882, 0.9582, 0.9769, 0.9872]), + 1e-3, + ) + assert torch.allclose( + t2[23:26, 47:50].flatten().cpu(), + torch.tensor([0.3019, 0.2280, 0.1716, 0.3146, 0.2377, 0.1790, 0.3272, 0.2474, 0.1864]), + 1e-3, + ) + assert torch.allclose( + t3[23:26, 47:50].flatten().cpu(), + torch.tensor([-0.9801, -0.9464, -0.9349, -0.3952, 0.8887, -0.9709, 0.5299, -0.2853, -0.9927]), + 1e-3, + ) + + +class Upsample2DBlockTests(unittest.TestCase): + def test_upsample_default(self): + torch.manual_seed(0) + sample = torch.randn(1, 32, 32, 32) + upsample = Upsample2D(channels=32, use_conv=False) + with torch.no_grad(): + upsampled = upsample(sample) + + assert upsampled.shape == (1, 32, 64, 64) + output_slice = upsampled[0, -1, -3:, -3:] + expected_slice = torch.tensor([-0.2173, -1.2079, -1.2079, 0.2952, 1.1254, 1.1254, 0.2952, 1.1254, 1.1254]) + assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3) + + @require_torch_version_greater_equal("2.1") + def test_upsample_bfloat16(self): + torch.manual_seed(0) + sample = torch.randn(1, 32, 32, 32).to(torch.bfloat16) + upsample = Upsample2D(channels=32, use_conv=False) + with torch.no_grad(): + upsampled = upsample(sample) + + assert upsampled.shape == (1, 32, 64, 64) + output_slice = upsampled[0, -1, -3:, -3:] + expected_slice = torch.tensor( + [-0.2173, -1.2079, -1.2079, 0.2952, 1.1254, 1.1254, 0.2952, 1.1254, 1.1254], dtype=torch.bfloat16 + ) + assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3) + + def test_upsample_with_conv(self): + torch.manual_seed(0) + sample = torch.randn(1, 32, 32, 32) + upsample = Upsample2D(channels=32, use_conv=True) + with torch.no_grad(): + upsampled = upsample(sample) + + assert upsampled.shape == (1, 32, 64, 64) + output_slice = upsampled[0, -1, -3:, -3:] + expected_slice = torch.tensor([0.7145, 1.3773, 0.3492, 0.8448, 1.0839, -0.3341, 0.5956, 0.1250, -0.4841]) + assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3) + + def test_upsample_with_conv_out_dim(self): + torch.manual_seed(0) + sample = torch.randn(1, 32, 32, 32) + upsample = Upsample2D(channels=32, use_conv=True, out_channels=64) + with torch.no_grad(): + upsampled = upsample(sample) + + assert upsampled.shape == (1, 64, 64, 64) + output_slice = upsampled[0, -1, -3:, -3:] + expected_slice = torch.tensor([0.2703, 0.1656, -0.2538, -0.0553, -0.2984, 0.1044, 0.1155, 0.2579, 0.7755]) + assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3) + + def test_upsample_with_transpose(self): + torch.manual_seed(0) + sample = torch.randn(1, 32, 32, 32) + upsample = Upsample2D(channels=32, use_conv=False, use_conv_transpose=True) + with torch.no_grad(): + upsampled = upsample(sample) + + assert upsampled.shape == (1, 32, 64, 64) + output_slice = upsampled[0, -1, -3:, -3:] + expected_slice = torch.tensor([-0.3028, -0.1582, 0.0071, 0.0350, -0.4799, -0.1139, 0.1056, -0.1153, -0.1046]) + assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3) + + +class Downsample2DBlockTests(unittest.TestCase): + def test_downsample_default(self): + torch.manual_seed(0) + sample = torch.randn(1, 32, 64, 64) + downsample = Downsample2D(channels=32, use_conv=False) + with torch.no_grad(): + downsampled = downsample(sample) + + assert downsampled.shape == (1, 32, 32, 32) + output_slice = downsampled[0, -1, -3:, -3:] + expected_slice = torch.tensor([-0.0513, -0.3889, 0.0640, 0.0836, -0.5460, -0.0341, -0.0169, -0.6967, 0.1179]) + max_diff = (output_slice.flatten() - expected_slice).abs().sum().item() + assert max_diff <= 1e-3 + # assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-1) + + def test_downsample_with_conv(self): + torch.manual_seed(0) + sample = torch.randn(1, 32, 64, 64) + downsample = Downsample2D(channels=32, use_conv=True) + with torch.no_grad(): + downsampled = downsample(sample) + + assert downsampled.shape == (1, 32, 32, 32) + output_slice = downsampled[0, -1, -3:, -3:] + + expected_slice = torch.tensor( + [0.9267, 0.5878, 0.3337, 1.2321, -0.1191, -0.3984, -0.7532, -0.0715, -0.3913], + ) + assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3) + + def test_downsample_with_conv_pad1(self): + torch.manual_seed(0) + sample = torch.randn(1, 32, 64, 64) + downsample = Downsample2D(channels=32, use_conv=True, padding=1) + with torch.no_grad(): + downsampled = downsample(sample) + + assert downsampled.shape == (1, 32, 32, 32) + output_slice = downsampled[0, -1, -3:, -3:] + expected_slice = torch.tensor([0.9267, 0.5878, 0.3337, 1.2321, -0.1191, -0.3984, -0.7532, -0.0715, -0.3913]) + assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3) + + def test_downsample_with_conv_out_dim(self): + torch.manual_seed(0) + sample = torch.randn(1, 32, 64, 64) + downsample = Downsample2D(channels=32, use_conv=True, out_channels=16) + with torch.no_grad(): + downsampled = downsample(sample) + + assert downsampled.shape == (1, 16, 32, 32) + output_slice = downsampled[0, -1, -3:, -3:] + expected_slice = torch.tensor([-0.6586, 0.5985, 0.0721, 0.1256, -0.1492, 0.4436, -0.2544, 0.5021, 1.1522]) + assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3) + + +class ResnetBlock2DTests(unittest.TestCase): + def test_resnet_default(self): + torch.manual_seed(0) + sample = torch.randn(1, 32, 64, 64).to(torch_device) + temb = torch.randn(1, 128).to(torch_device) + resnet_block = ResnetBlock2D(in_channels=32, temb_channels=128).to(torch_device) + with torch.no_grad(): + output_tensor = resnet_block(sample, temb) + + assert output_tensor.shape == (1, 32, 64, 64) + output_slice = output_tensor[0, -1, -3:, -3:] + expected_slice = torch.tensor( + [-1.9010, -0.2974, -0.8245, -1.3533, 0.8742, -0.9645, -2.0584, 1.3387, -0.4746], device=torch_device + ) + assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3) + + def test_restnet_with_use_in_shortcut(self): + torch.manual_seed(0) + sample = torch.randn(1, 32, 64, 64).to(torch_device) + temb = torch.randn(1, 128).to(torch_device) + resnet_block = ResnetBlock2D(in_channels=32, temb_channels=128, use_in_shortcut=True).to(torch_device) + with torch.no_grad(): + output_tensor = resnet_block(sample, temb) + + assert output_tensor.shape == (1, 32, 64, 64) + output_slice = output_tensor[0, -1, -3:, -3:] + expected_slice = torch.tensor( + [0.2226, -1.0791, -0.1629, 0.3659, -0.2889, -1.2376, 0.0582, 0.9206, 0.0044], device=torch_device + ) + assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3) + + def test_resnet_up(self): + torch.manual_seed(0) + sample = torch.randn(1, 32, 64, 64).to(torch_device) + temb = torch.randn(1, 128).to(torch_device) + resnet_block = ResnetBlock2D(in_channels=32, temb_channels=128, up=True).to(torch_device) + with torch.no_grad(): + output_tensor = resnet_block(sample, temb) + + assert output_tensor.shape == (1, 32, 128, 128) + output_slice = output_tensor[0, -1, -3:, -3:] + expected_slice = torch.tensor( + [1.2130, -0.8753, -0.9027, 1.5783, -0.5362, -0.5001, 1.0726, -0.7732, -0.4182], device=torch_device + ) + assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3) + + def test_resnet_down(self): + torch.manual_seed(0) + sample = torch.randn(1, 32, 64, 64).to(torch_device) + temb = torch.randn(1, 128).to(torch_device) + resnet_block = ResnetBlock2D(in_channels=32, temb_channels=128, down=True).to(torch_device) + with torch.no_grad(): + output_tensor = resnet_block(sample, temb) + + assert output_tensor.shape == (1, 32, 32, 32) + output_slice = output_tensor[0, -1, -3:, -3:] + expected_slice = torch.tensor( + [-0.3002, -0.7135, 0.1359, 0.0561, -0.7935, 0.0113, -0.1766, -0.6714, -0.0436], device=torch_device + ) + assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3) + + def test_restnet_with_kernel_fir(self): + torch.manual_seed(0) + sample = torch.randn(1, 32, 64, 64).to(torch_device) + temb = torch.randn(1, 128).to(torch_device) + resnet_block = ResnetBlock2D(in_channels=32, temb_channels=128, kernel="fir", down=True).to(torch_device) + with torch.no_grad(): + output_tensor = resnet_block(sample, temb) + + assert output_tensor.shape == (1, 32, 32, 32) + output_slice = output_tensor[0, -1, -3:, -3:] + expected_slice = torch.tensor( + [-0.0934, -0.5729, 0.0909, -0.2710, -0.5044, 0.0243, -0.0665, -0.5267, -0.3136], device=torch_device + ) + assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3) + + def test_restnet_with_kernel_sde_vp(self): + torch.manual_seed(0) + sample = torch.randn(1, 32, 64, 64).to(torch_device) + temb = torch.randn(1, 128).to(torch_device) + resnet_block = ResnetBlock2D(in_channels=32, temb_channels=128, kernel="sde_vp", down=True).to(torch_device) + with torch.no_grad(): + output_tensor = resnet_block(sample, temb) + + assert output_tensor.shape == (1, 32, 32, 32) + output_slice = output_tensor[0, -1, -3:, -3:] + expected_slice = torch.tensor( + [-0.3002, -0.7135, 0.1359, 0.0561, -0.7935, 0.0113, -0.1766, -0.6714, -0.0436], device=torch_device + ) + assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3) + + +class Transformer2DModelTests(unittest.TestCase): + def test_spatial_transformer_default(self): + torch.manual_seed(0) + backend_manual_seed(torch_device, 0) + + sample = torch.randn(1, 32, 64, 64).to(torch_device) + spatial_transformer_block = Transformer2DModel( + in_channels=32, + num_attention_heads=1, + attention_head_dim=32, + dropout=0.0, + cross_attention_dim=None, + ).to(torch_device) + with torch.no_grad(): + attention_scores = spatial_transformer_block(sample).sample + + assert attention_scores.shape == (1, 32, 64, 64) + output_slice = attention_scores[0, -1, -3:, -3:] + + expected_slice = torch.tensor( + [-1.9455, -0.0066, -1.3933, -1.5878, 0.5325, -0.6486, -1.8648, 0.7515, -0.9689], device=torch_device + ) + assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3) + + def test_spatial_transformer_cross_attention_dim(self): + torch.manual_seed(0) + backend_manual_seed(torch_device, 0) + + sample = torch.randn(1, 64, 64, 64).to(torch_device) + spatial_transformer_block = Transformer2DModel( + in_channels=64, + num_attention_heads=2, + attention_head_dim=32, + dropout=0.0, + cross_attention_dim=64, + ).to(torch_device) + with torch.no_grad(): + context = torch.randn(1, 4, 64).to(torch_device) + attention_scores = spatial_transformer_block(sample, context).sample + + assert attention_scores.shape == (1, 64, 64, 64) + output_slice = attention_scores[0, -1, -3:, -3:] + expected_slice = torch.tensor( + [0.0143, -0.6909, -2.1547, -1.8893, 1.4097, 0.1359, -0.2521, -1.3359, 0.2598], device=torch_device + ) + assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3) + + def test_spatial_transformer_timestep(self): + torch.manual_seed(0) + backend_manual_seed(torch_device, 0) + + num_embeds_ada_norm = 5 + + sample = torch.randn(1, 64, 64, 64).to(torch_device) + spatial_transformer_block = Transformer2DModel( + in_channels=64, + num_attention_heads=2, + attention_head_dim=32, + dropout=0.0, + cross_attention_dim=64, + num_embeds_ada_norm=num_embeds_ada_norm, + ).to(torch_device) + with torch.no_grad(): + timestep_1 = torch.tensor(1, dtype=torch.long).to(torch_device) + timestep_2 = torch.tensor(2, dtype=torch.long).to(torch_device) + attention_scores_1 = spatial_transformer_block(sample, timestep=timestep_1).sample + attention_scores_2 = spatial_transformer_block(sample, timestep=timestep_2).sample + + assert attention_scores_1.shape == (1, 64, 64, 64) + assert attention_scores_2.shape == (1, 64, 64, 64) + + output_slice_1 = attention_scores_1[0, -1, -3:, -3:] + output_slice_2 = attention_scores_2[0, -1, -3:, -3:] + + expected_slice = torch.tensor( + [-0.3923, -1.0923, -1.7144, -1.5570, 1.4154, 0.1738, -0.1157, -1.2998, -0.1703], device=torch_device + ) + expected_slice_2 = torch.tensor( + [-0.4311, -1.1376, -1.7732, -1.5997, 1.3450, 0.0964, -0.1569, -1.3590, -0.2348], device=torch_device + ) + + assert torch.allclose(output_slice_1.flatten(), expected_slice, atol=1e-3) + assert torch.allclose(output_slice_2.flatten(), expected_slice_2, atol=1e-3) + + def test_spatial_transformer_dropout(self): + torch.manual_seed(0) + backend_manual_seed(torch_device, 0) + + sample = torch.randn(1, 32, 64, 64).to(torch_device) + spatial_transformer_block = ( + Transformer2DModel( + in_channels=32, + num_attention_heads=2, + attention_head_dim=16, + dropout=0.3, + cross_attention_dim=None, + ) + .to(torch_device) + .eval() + ) + with torch.no_grad(): + attention_scores = spatial_transformer_block(sample).sample + + assert attention_scores.shape == (1, 32, 64, 64) + output_slice = attention_scores[0, -1, -3:, -3:] + + expected_slice = torch.tensor( + [-1.9380, -0.0083, -1.3771, -1.5819, 0.5209, -0.6441, -1.8545, 0.7563, -0.9615], device=torch_device + ) + assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3) + + @require_torch_accelerator_with_fp64 + def test_spatial_transformer_discrete(self): + torch.manual_seed(0) + backend_manual_seed(torch_device, 0) + + num_embed = 5 + + sample = torch.randint(0, num_embed, (1, 32)).to(torch_device) + spatial_transformer_block = ( + Transformer2DModel( + num_attention_heads=1, + attention_head_dim=32, + num_vector_embeds=num_embed, + sample_size=16, + ) + .to(torch_device) + .eval() + ) + + with torch.no_grad(): + attention_scores = spatial_transformer_block(sample).sample + + assert attention_scores.shape == (1, num_embed - 1, 32) + + output_slice = attention_scores[0, -2:, -3:] + + expected_slice = torch.tensor([-1.7648, -1.0241, -2.0985, -1.8035, -1.6404, -1.2098], device=torch_device) + assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3) + + def test_spatial_transformer_default_norm_layers(self): + spatial_transformer_block = Transformer2DModel(num_attention_heads=1, attention_head_dim=32, in_channels=32) + + assert spatial_transformer_block.transformer_blocks[0].norm1.__class__ == nn.LayerNorm + assert spatial_transformer_block.transformer_blocks[0].norm3.__class__ == nn.LayerNorm + + def test_spatial_transformer_ada_norm_layers(self): + spatial_transformer_block = Transformer2DModel( + num_attention_heads=1, + attention_head_dim=32, + in_channels=32, + num_embeds_ada_norm=5, + ) + + assert spatial_transformer_block.transformer_blocks[0].norm1.__class__ == AdaLayerNorm + assert spatial_transformer_block.transformer_blocks[0].norm3.__class__ == nn.LayerNorm + + def test_spatial_transformer_default_ff_layers(self): + spatial_transformer_block = Transformer2DModel( + num_attention_heads=1, + attention_head_dim=32, + in_channels=32, + ) + + assert spatial_transformer_block.transformer_blocks[0].ff.net[0].__class__ == GEGLU + assert spatial_transformer_block.transformer_blocks[0].ff.net[1].__class__ == nn.Dropout + assert spatial_transformer_block.transformer_blocks[0].ff.net[2].__class__ == nn.Linear + + dim = 32 + inner_dim = 128 + + # First dimension change + assert spatial_transformer_block.transformer_blocks[0].ff.net[0].proj.in_features == dim + # NOTE: inner_dim * 2 because GEGLU + assert spatial_transformer_block.transformer_blocks[0].ff.net[0].proj.out_features == inner_dim * 2 + + # Second dimension change + assert spatial_transformer_block.transformer_blocks[0].ff.net[2].in_features == inner_dim + assert spatial_transformer_block.transformer_blocks[0].ff.net[2].out_features == dim + + def test_spatial_transformer_geglu_approx_ff_layers(self): + spatial_transformer_block = Transformer2DModel( + num_attention_heads=1, + attention_head_dim=32, + in_channels=32, + activation_fn="geglu-approximate", + ) + + assert spatial_transformer_block.transformer_blocks[0].ff.net[0].__class__ == ApproximateGELU + assert spatial_transformer_block.transformer_blocks[0].ff.net[1].__class__ == nn.Dropout + assert spatial_transformer_block.transformer_blocks[0].ff.net[2].__class__ == nn.Linear + + dim = 32 + inner_dim = 128 + + # First dimension change + assert spatial_transformer_block.transformer_blocks[0].ff.net[0].proj.in_features == dim + assert spatial_transformer_block.transformer_blocks[0].ff.net[0].proj.out_features == inner_dim + + # Second dimension change + assert spatial_transformer_block.transformer_blocks[0].ff.net[2].in_features == inner_dim + assert spatial_transformer_block.transformer_blocks[0].ff.net[2].out_features == dim + + def test_spatial_transformer_attention_bias(self): + spatial_transformer_block = Transformer2DModel( + num_attention_heads=1, attention_head_dim=32, in_channels=32, attention_bias=True + ) + + assert spatial_transformer_block.transformer_blocks[0].attn1.to_q.bias is not None + assert spatial_transformer_block.transformer_blocks[0].attn1.to_k.bias is not None + assert spatial_transformer_block.transformer_blocks[0].attn1.to_v.bias is not None diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/models/test_modeling_common.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/test_modeling_common.py new file mode 100644 index 0000000000000000000000000000000000000000..36eb2c1ef4882370206ac640291591bbd0b63100 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/test_modeling_common.py @@ -0,0 +1,2409 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy +import gc +import glob +import inspect +import json +import os +import re +import tempfile +import traceback +import unittest +import unittest.mock as mock +import uuid +import warnings +from collections import defaultdict +from typing import Dict, List, Optional, Tuple, Union + +import numpy as np +import pytest +import requests_mock +import safetensors.torch +import torch +import torch.nn as nn +from accelerate.utils.modeling import _get_proper_dtype, compute_module_sizes, dtype_byte_size +from huggingface_hub import ModelCard, delete_repo, snapshot_download, try_to_load_from_cache +from huggingface_hub.utils import is_jinja_available +from parameterized import parameterized +from requests.exceptions import HTTPError + +from diffusers.models import FluxTransformer2DModel, SD3Transformer2DModel, UNet2DConditionModel +from diffusers.models.attention_processor import ( + AttnProcessor, + AttnProcessor2_0, + AttnProcessorNPU, + XFormersAttnProcessor, +) +from diffusers.models.auto_model import AutoModel +from diffusers.training_utils import EMAModel +from diffusers.utils import ( + SAFE_WEIGHTS_INDEX_NAME, + WEIGHTS_INDEX_NAME, + is_peft_available, + is_torch_npu_available, + is_xformers_available, + logging, +) +from diffusers.utils.hub_utils import _add_variant +from diffusers.utils.torch_utils import get_torch_cuda_device_capability + +from ..others.test_utils import TOKEN, USER, is_staging_test +from ..testing_utils import ( + CaptureLogger, + _check_safetensors_serialization, + backend_empty_cache, + backend_max_memory_allocated, + backend_reset_peak_memory_stats, + backend_synchronize, + check_if_dicts_are_equal, + get_python_version, + is_torch_compile, + numpy_cosine_similarity_distance, + require_peft_backend, + require_peft_version_greater, + require_torch_2, + require_torch_accelerator, + require_torch_accelerator_with_training, + require_torch_multi_accelerator, + require_torch_version_greater, + run_test_in_subprocess, + slow, + torch_all_close, + torch_device, +) + + +if is_peft_available(): + from peft.tuners.tuners_utils import BaseTunerLayer + + +def caculate_expected_num_shards(index_map_path): + with open(index_map_path) as f: + weight_map_dict = json.load(f)["weight_map"] + first_key = list(weight_map_dict.keys())[0] + weight_loc = weight_map_dict[first_key] # e.g., diffusion_pytorch_model-00001-of-00002.safetensors + expected_num_shards = int(weight_loc.split("-")[-1].split(".")[0]) + return expected_num_shards + + +def check_if_lora_correctly_set(model) -> bool: + """ + Checks if the LoRA layers are correctly set with peft + """ + for module in model.modules(): + if isinstance(module, BaseTunerLayer): + return True + return False + + +# Will be run via run_test_in_subprocess +def _test_from_save_pretrained_dynamo(in_queue, out_queue, timeout): + error = None + try: + init_dict, model_class = in_queue.get(timeout=timeout) + + model = model_class(**init_dict) + model.to(torch_device) + model = torch.compile(model) + + with tempfile.TemporaryDirectory() as tmpdirname: + model.save_pretrained(tmpdirname, safe_serialization=False) + new_model = model_class.from_pretrained(tmpdirname) + new_model.to(torch_device) + + assert new_model.__class__ == model_class + except Exception: + error = f"{traceback.format_exc()}" + + results = {"error": error} + out_queue.put(results, timeout=timeout) + out_queue.join() + + +def named_persistent_module_tensors( + module: nn.Module, + recurse: bool = False, +): + """ + A helper function that gathers all the tensors (parameters + persistent buffers) of a given module. + + Args: + module (`torch.nn.Module`): + The module we want the tensors on. + recurse (`bool`, *optional`, defaults to `False`): + Whether or not to go look in every submodule or just return the direct parameters and buffers. + """ + yield from module.named_parameters(recurse=recurse) + + for named_buffer in module.named_buffers(recurse=recurse): + name, _ = named_buffer + # Get parent by splitting on dots and traversing the model + parent = module + if "." in name: + parent_name = name.rsplit(".", 1)[0] + for part in parent_name.split("."): + parent = getattr(parent, part) + name = name.split(".")[-1] + if name not in parent._non_persistent_buffers_set: + yield named_buffer + + +def compute_module_persistent_sizes( + model: nn.Module, + dtype: Optional[Union[str, torch.device]] = None, + special_dtypes: Optional[Dict[str, Union[str, torch.device]]] = None, +): + """ + Compute the size of each submodule of a given model (parameters + persistent buffers). + """ + if dtype is not None: + dtype = _get_proper_dtype(dtype) + dtype_size = dtype_byte_size(dtype) + if special_dtypes is not None: + special_dtypes = {key: _get_proper_dtype(dtyp) for key, dtyp in special_dtypes.items()} + special_dtypes_size = {key: dtype_byte_size(dtyp) for key, dtyp in special_dtypes.items()} + module_sizes = defaultdict(int) + + module_list = [] + + module_list = named_persistent_module_tensors(model, recurse=True) + + for name, tensor in module_list: + if special_dtypes is not None and name in special_dtypes: + size = tensor.numel() * special_dtypes_size[name] + elif dtype is None: + size = tensor.numel() * dtype_byte_size(tensor.dtype) + elif str(tensor.dtype).startswith(("torch.uint", "torch.int", "torch.bool")): + # According to the code in set_module_tensor_to_device, these types won't be converted + # so use their original size here + size = tensor.numel() * dtype_byte_size(tensor.dtype) + else: + size = tensor.numel() * min(dtype_size, dtype_byte_size(tensor.dtype)) + name_parts = name.split(".") + for idx in range(len(name_parts) + 1): + module_sizes[".".join(name_parts[:idx])] += size + + return module_sizes + + +def cast_maybe_tensor_dtype(maybe_tensor, current_dtype, target_dtype): + if torch.is_tensor(maybe_tensor): + return maybe_tensor.to(target_dtype) if maybe_tensor.dtype == current_dtype else maybe_tensor + if isinstance(maybe_tensor, dict): + return {k: cast_maybe_tensor_dtype(v, current_dtype, target_dtype) for k, v in maybe_tensor.items()} + if isinstance(maybe_tensor, list): + return [cast_maybe_tensor_dtype(v, current_dtype, target_dtype) for v in maybe_tensor] + return maybe_tensor + + +class ModelUtilsTest(unittest.TestCase): + def tearDown(self): + super().tearDown() + + def test_missing_key_loading_warning_message(self): + with self.assertLogs("diffusers.models.modeling_utils", level="WARNING") as logs: + UNet2DConditionModel.from_pretrained("hf-internal-testing/stable-diffusion-broken", subfolder="unet") + + # make sure that error message states what keys are missing + assert "conv_out.bias" in " ".join(logs.output) + + @parameterized.expand( + [ + ("hf-internal-testing/tiny-stable-diffusion-pipe-variants-all-kinds", "unet", False), + ("hf-internal-testing/tiny-stable-diffusion-pipe-variants-all-kinds", "unet", True), + ("hf-internal-testing/tiny-sd-unet-with-sharded-ckpt", None, False), + ("hf-internal-testing/tiny-sd-unet-with-sharded-ckpt", None, True), + ] + ) + def test_variant_sharded_ckpt_legacy_format_raises_warning(self, repo_id, subfolder, use_local): + def load_model(path): + kwargs = {"variant": "fp16"} + if subfolder: + kwargs["subfolder"] = subfolder + return UNet2DConditionModel.from_pretrained(path, **kwargs) + + with self.assertWarns(FutureWarning) as warning: + if use_local: + with tempfile.TemporaryDirectory() as tmpdirname: + tmpdirname = snapshot_download(repo_id=repo_id) + _ = load_model(tmpdirname) + else: + _ = load_model(repo_id) + + warning_message = str(warning.warnings[0].message) + self.assertIn("This serialization format is now deprecated to standardize the serialization", warning_message) + + # Local tests are already covered down below. + @parameterized.expand( + [ + ("hf-internal-testing/tiny-sd-unet-sharded-latest-format", None, "fp16"), + ("hf-internal-testing/tiny-sd-unet-sharded-latest-format-subfolder", "unet", "fp16"), + ("hf-internal-testing/tiny-sd-unet-sharded-no-variants", None, None), + ("hf-internal-testing/tiny-sd-unet-sharded-no-variants-subfolder", "unet", None), + ] + ) + def test_variant_sharded_ckpt_loads_from_hub(self, repo_id, subfolder, variant=None): + def load_model(): + kwargs = {} + if variant: + kwargs["variant"] = variant + if subfolder: + kwargs["subfolder"] = subfolder + return UNet2DConditionModel.from_pretrained(repo_id, **kwargs) + + assert load_model() + + def test_cached_files_are_used_when_no_internet(self): + # A mock response for an HTTP head request to emulate server down + response_mock = mock.Mock() + response_mock.status_code = 500 + response_mock.headers = {} + response_mock.raise_for_status.side_effect = HTTPError + response_mock.json.return_value = {} + + # Download this model to make sure it's in the cache. + orig_model = UNet2DConditionModel.from_pretrained( + "hf-internal-testing/tiny-stable-diffusion-torch", subfolder="unet" + ) + + # Under the mock environment we get a 500 error when trying to reach the model. + with mock.patch("requests.request", return_value=response_mock): + # Download this model to make sure it's in the cache. + model = UNet2DConditionModel.from_pretrained( + "hf-internal-testing/tiny-stable-diffusion-torch", subfolder="unet", local_files_only=True + ) + + for p1, p2 in zip(orig_model.parameters(), model.parameters()): + if p1.data.ne(p2.data).sum() > 0: + assert False, "Parameters not the same!" + + def test_local_files_only_with_sharded_checkpoint(self): + repo_id = "hf-internal-testing/tiny-flux-sharded" + error_response = mock.Mock( + status_code=500, + headers={}, + raise_for_status=mock.Mock(side_effect=HTTPError), + json=mock.Mock(return_value={}), + ) + + with tempfile.TemporaryDirectory() as tmpdir: + model = FluxTransformer2DModel.from_pretrained(repo_id, subfolder="transformer", cache_dir=tmpdir) + + with mock.patch("requests.Session.get", return_value=error_response): + # Should fail with local_files_only=False (network required) + # We would make a network call with model_info + with self.assertRaises(OSError): + FluxTransformer2DModel.from_pretrained( + repo_id, subfolder="transformer", cache_dir=tmpdir, local_files_only=False + ) + + # Should succeed with local_files_only=True (uses cache) + # model_info call skipped + local_model = FluxTransformer2DModel.from_pretrained( + repo_id, subfolder="transformer", cache_dir=tmpdir, local_files_only=True + ) + + assert all(torch.equal(p1, p2) for p1, p2 in zip(model.parameters(), local_model.parameters())), ( + "Model parameters don't match!" + ) + + # Remove a shard file + cached_shard_file = try_to_load_from_cache( + repo_id, filename="transformer/diffusion_pytorch_model-00001-of-00002.safetensors", cache_dir=tmpdir + ) + os.remove(cached_shard_file) + + # Attempting to load from cache should raise an error + with self.assertRaises(OSError) as context: + FluxTransformer2DModel.from_pretrained( + repo_id, subfolder="transformer", cache_dir=tmpdir, local_files_only=True + ) + + # Verify error mentions the missing shard + error_msg = str(context.exception) + assert cached_shard_file in error_msg or "required according to the checkpoint index" in error_msg, ( + f"Expected error about missing shard, got: {error_msg}" + ) + + @unittest.skip("Flaky behaviour on CI. Re-enable after migrating to new runners") + @unittest.skipIf(torch_device == "mps", reason="Test not supported for MPS.") + def test_one_request_upon_cached(self): + use_safetensors = False + + with tempfile.TemporaryDirectory() as tmpdirname: + with requests_mock.mock(real_http=True) as m: + UNet2DConditionModel.from_pretrained( + "hf-internal-testing/tiny-stable-diffusion-torch", + subfolder="unet", + cache_dir=tmpdirname, + use_safetensors=use_safetensors, + ) + + download_requests = [r.method for r in m.request_history] + assert download_requests.count("HEAD") == 3, ( + "3 HEAD requests one for config, one for model, and one for shard index file." + ) + assert download_requests.count("GET") == 2, "2 GET requests one for config, one for model" + + with requests_mock.mock(real_http=True) as m: + UNet2DConditionModel.from_pretrained( + "hf-internal-testing/tiny-stable-diffusion-torch", + subfolder="unet", + cache_dir=tmpdirname, + use_safetensors=use_safetensors, + ) + + cache_requests = [r.method for r in m.request_history] + assert "HEAD" == cache_requests[0] and len(cache_requests) == 2, ( + "We should call only `model_info` to check for commit hash and knowing if shard index is present." + ) + + def test_weight_overwrite(self): + with tempfile.TemporaryDirectory() as tmpdirname, self.assertRaises(ValueError) as error_context: + UNet2DConditionModel.from_pretrained( + "hf-internal-testing/tiny-stable-diffusion-torch", + subfolder="unet", + cache_dir=tmpdirname, + in_channels=9, + ) + + # make sure that error message states what keys are missing + assert "Cannot load" in str(error_context.exception) + + with tempfile.TemporaryDirectory() as tmpdirname: + model = UNet2DConditionModel.from_pretrained( + "hf-internal-testing/tiny-stable-diffusion-torch", + subfolder="unet", + cache_dir=tmpdirname, + in_channels=9, + low_cpu_mem_usage=False, + ignore_mismatched_sizes=True, + ) + + assert model.config.in_channels == 9 + + @require_torch_accelerator + def test_keep_modules_in_fp32(self): + r""" + A simple tests to check if the modules under `_keep_in_fp32_modules` are kept in fp32 when we load the model in fp16/bf16 + Also ensures if inference works. + """ + fp32_modules = SD3Transformer2DModel._keep_in_fp32_modules + + for torch_dtype in [torch.bfloat16, torch.float16]: + SD3Transformer2DModel._keep_in_fp32_modules = ["proj_out"] + + model = SD3Transformer2DModel.from_pretrained( + "hf-internal-testing/tiny-sd3-pipe", subfolder="transformer", torch_dtype=torch_dtype + ).to(torch_device) + + for name, module in model.named_modules(): + if isinstance(module, torch.nn.Linear): + if name in model._keep_in_fp32_modules: + self.assertTrue(module.weight.dtype == torch.float32) + else: + self.assertTrue(module.weight.dtype == torch_dtype) + + def get_dummy_inputs(): + batch_size = 2 + num_channels = 4 + height = width = embedding_dim = 32 + pooled_embedding_dim = embedding_dim * 2 + sequence_length = 154 + + hidden_states = torch.randn((batch_size, num_channels, height, width)).to(torch_device) + encoder_hidden_states = torch.randn((batch_size, sequence_length, embedding_dim)).to(torch_device) + pooled_prompt_embeds = torch.randn((batch_size, pooled_embedding_dim)).to(torch_device) + timestep = torch.randint(0, 1000, size=(batch_size,)).to(torch_device) + + return { + "hidden_states": hidden_states, + "encoder_hidden_states": encoder_hidden_states, + "pooled_projections": pooled_prompt_embeds, + "timestep": timestep, + } + + # test if inference works. + with torch.no_grad() and torch.amp.autocast(torch_device, dtype=torch_dtype): + input_dict_for_transformer = get_dummy_inputs() + model_inputs = { + k: v.to(device=torch_device) for k, v in input_dict_for_transformer.items() if not isinstance(v, bool) + } + model_inputs.update({k: v for k, v in input_dict_for_transformer.items() if k not in model_inputs}) + _ = model(**model_inputs) + + SD3Transformer2DModel._keep_in_fp32_modules = fp32_modules + + +class UNetTesterMixin: + def test_forward_with_norm_groups(self): + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + + init_dict["norm_num_groups"] = 16 + init_dict["block_out_channels"] = (16, 32) + + model = self.model_class(**init_dict) + model.to(torch_device) + model.eval() + + with torch.no_grad(): + output = model(**inputs_dict) + + if isinstance(output, dict): + output = output.to_tuple()[0] + + self.assertIsNotNone(output) + expected_shape = inputs_dict["sample"].shape + self.assertEqual(output.shape, expected_shape, "Input and output shapes do not match") + + +class ModelTesterMixin: + main_input_name = None # overwrite in model specific tester class + base_precision = 1e-3 + forward_requires_fresh_args = False + model_split_percents = [0.5, 0.7, 0.9] + uses_custom_attn_processor = False + + def check_device_map_is_respected(self, model, device_map): + for param_name, param in model.named_parameters(): + # Find device in device_map + while len(param_name) > 0 and param_name not in device_map: + param_name = ".".join(param_name.split(".")[:-1]) + if param_name not in device_map: + raise ValueError("device map is incomplete, it does not contain any device for `param_name`.") + + param_device = device_map[param_name] + if param_device in ["cpu", "disk"]: + self.assertEqual(param.device, torch.device("meta")) + else: + self.assertEqual(param.device, torch.device(param_device)) + + def test_from_save_pretrained(self, expected_max_diff=5e-5): + if self.forward_requires_fresh_args: + model = self.model_class(**self.init_dict) + else: + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + model = self.model_class(**init_dict) + + if hasattr(model, "set_default_attn_processor"): + model.set_default_attn_processor() + model.to(torch_device) + model.eval() + + with tempfile.TemporaryDirectory() as tmpdirname: + model.save_pretrained(tmpdirname, safe_serialization=False) + new_model = self.model_class.from_pretrained(tmpdirname) + if hasattr(new_model, "set_default_attn_processor"): + new_model.set_default_attn_processor() + new_model.to(torch_device) + + with torch.no_grad(): + if self.forward_requires_fresh_args: + image = model(**self.inputs_dict(0)) + else: + image = model(**inputs_dict) + + if isinstance(image, dict): + image = image.to_tuple()[0] + + if self.forward_requires_fresh_args: + new_image = new_model(**self.inputs_dict(0)) + else: + new_image = new_model(**inputs_dict) + + if isinstance(new_image, dict): + new_image = new_image.to_tuple()[0] + + max_diff = (image - new_image).abs().max().item() + self.assertLessEqual(max_diff, expected_max_diff, "Models give different forward passes") + + def test_getattr_is_correct(self): + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + model = self.model_class(**init_dict) + + # save some things to test + model.dummy_attribute = 5 + model.register_to_config(test_attribute=5) + + logger = logging.get_logger("diffusers.models.modeling_utils") + # 30 for warning + logger.setLevel(30) + with CaptureLogger(logger) as cap_logger: + assert hasattr(model, "dummy_attribute") + assert getattr(model, "dummy_attribute") == 5 + assert model.dummy_attribute == 5 + + # no warning should be thrown + assert cap_logger.out == "" + + logger = logging.get_logger("diffusers.models.modeling_utils") + # 30 for warning + logger.setLevel(30) + with CaptureLogger(logger) as cap_logger: + assert hasattr(model, "save_pretrained") + fn = model.save_pretrained + fn_1 = getattr(model, "save_pretrained") + + assert fn == fn_1 + # no warning should be thrown + assert cap_logger.out == "" + + # warning should be thrown + with self.assertWarns(FutureWarning): + assert model.test_attribute == 5 + + with self.assertWarns(FutureWarning): + assert getattr(model, "test_attribute") == 5 + + with self.assertRaises(AttributeError) as error: + model.does_not_exist + + assert str(error.exception) == f"'{type(model).__name__}' object has no attribute 'does_not_exist'" + + @unittest.skipIf( + torch_device != "npu" or not is_torch_npu_available(), + reason="torch npu flash attention is only available with NPU and `torch_npu` installed", + ) + def test_set_torch_npu_flash_attn_processor_determinism(self): + torch.use_deterministic_algorithms(False) + if self.forward_requires_fresh_args: + model = self.model_class(**self.init_dict) + else: + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + model = self.model_class(**init_dict) + model.to(torch_device) + + if not hasattr(model, "set_attn_processor"): + # If not has `set_attn_processor`, skip test + return + + model.set_default_attn_processor() + assert all(type(proc) == AttnProcessorNPU for proc in model.attn_processors.values()) + with torch.no_grad(): + if self.forward_requires_fresh_args: + output = model(**self.inputs_dict(0))[0] + else: + output = model(**inputs_dict)[0] + + model.enable_npu_flash_attention() + assert all(type(proc) == AttnProcessorNPU for proc in model.attn_processors.values()) + with torch.no_grad(): + if self.forward_requires_fresh_args: + output_2 = model(**self.inputs_dict(0))[0] + else: + output_2 = model(**inputs_dict)[0] + + model.set_attn_processor(AttnProcessorNPU()) + assert all(type(proc) == AttnProcessorNPU for proc in model.attn_processors.values()) + with torch.no_grad(): + if self.forward_requires_fresh_args: + output_3 = model(**self.inputs_dict(0))[0] + else: + output_3 = model(**inputs_dict)[0] + + torch.use_deterministic_algorithms(True) + + assert torch.allclose(output, output_2, atol=self.base_precision) + assert torch.allclose(output, output_3, atol=self.base_precision) + assert torch.allclose(output_2, output_3, atol=self.base_precision) + + @unittest.skipIf( + torch_device != "cuda" or not is_xformers_available(), + reason="XFormers attention is only available with CUDA and `xformers` installed", + ) + def test_set_xformers_attn_processor_for_determinism(self): + torch.use_deterministic_algorithms(False) + if self.forward_requires_fresh_args: + model = self.model_class(**self.init_dict) + else: + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + model = self.model_class(**init_dict) + model.to(torch_device) + + if not hasattr(model, "set_attn_processor"): + # If not has `set_attn_processor`, skip test + return + + if not hasattr(model, "set_default_attn_processor"): + # If not has `set_attn_processor`, skip test + return + + model.set_default_attn_processor() + assert all(type(proc) == AttnProcessor for proc in model.attn_processors.values()) + with torch.no_grad(): + if self.forward_requires_fresh_args: + output = model(**self.inputs_dict(0))[0] + else: + output = model(**inputs_dict)[0] + + model.enable_xformers_memory_efficient_attention() + assert all(type(proc) == XFormersAttnProcessor for proc in model.attn_processors.values()) + with torch.no_grad(): + if self.forward_requires_fresh_args: + output_2 = model(**self.inputs_dict(0))[0] + else: + output_2 = model(**inputs_dict)[0] + + model.set_attn_processor(XFormersAttnProcessor()) + assert all(type(proc) == XFormersAttnProcessor for proc in model.attn_processors.values()) + with torch.no_grad(): + if self.forward_requires_fresh_args: + output_3 = model(**self.inputs_dict(0))[0] + else: + output_3 = model(**inputs_dict)[0] + + torch.use_deterministic_algorithms(True) + + assert torch.allclose(output, output_2, atol=self.base_precision) + assert torch.allclose(output, output_3, atol=self.base_precision) + assert torch.allclose(output_2, output_3, atol=self.base_precision) + + @require_torch_accelerator + def test_set_attn_processor_for_determinism(self): + if self.uses_custom_attn_processor: + return + + torch.use_deterministic_algorithms(False) + if self.forward_requires_fresh_args: + model = self.model_class(**self.init_dict) + else: + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + model = self.model_class(**init_dict) + + model.to(torch_device) + + if not hasattr(model, "set_attn_processor"): + # If not has `set_attn_processor`, skip test + return + + assert all(type(proc) == AttnProcessor2_0 for proc in model.attn_processors.values()) + with torch.no_grad(): + if self.forward_requires_fresh_args: + output_1 = model(**self.inputs_dict(0))[0] + else: + output_1 = model(**inputs_dict)[0] + + model.set_default_attn_processor() + assert all(type(proc) == AttnProcessor for proc in model.attn_processors.values()) + with torch.no_grad(): + if self.forward_requires_fresh_args: + output_2 = model(**self.inputs_dict(0))[0] + else: + output_2 = model(**inputs_dict)[0] + + model.set_attn_processor(AttnProcessor2_0()) + assert all(type(proc) == AttnProcessor2_0 for proc in model.attn_processors.values()) + with torch.no_grad(): + if self.forward_requires_fresh_args: + output_4 = model(**self.inputs_dict(0))[0] + else: + output_4 = model(**inputs_dict)[0] + + model.set_attn_processor(AttnProcessor()) + assert all(type(proc) == AttnProcessor for proc in model.attn_processors.values()) + with torch.no_grad(): + if self.forward_requires_fresh_args: + output_5 = model(**self.inputs_dict(0))[0] + else: + output_5 = model(**inputs_dict)[0] + + torch.use_deterministic_algorithms(True) + + # make sure that outputs match + assert torch.allclose(output_2, output_1, atol=self.base_precision) + assert torch.allclose(output_2, output_4, atol=self.base_precision) + assert torch.allclose(output_2, output_5, atol=self.base_precision) + + def test_from_save_pretrained_variant(self, expected_max_diff=5e-5): + if self.forward_requires_fresh_args: + model = self.model_class(**self.init_dict) + else: + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + model = self.model_class(**init_dict) + + if hasattr(model, "set_default_attn_processor"): + model.set_default_attn_processor() + + model.to(torch_device) + model.eval() + + with tempfile.TemporaryDirectory() as tmpdirname: + model.save_pretrained(tmpdirname, variant="fp16", safe_serialization=False) + new_model = self.model_class.from_pretrained(tmpdirname, variant="fp16") + if hasattr(new_model, "set_default_attn_processor"): + new_model.set_default_attn_processor() + + # non-variant cannot be loaded + with self.assertRaises(OSError) as error_context: + self.model_class.from_pretrained(tmpdirname) + + # make sure that error message states what keys are missing + assert "Error no file named diffusion_pytorch_model.bin found in directory" in str(error_context.exception) + + new_model.to(torch_device) + + with torch.no_grad(): + if self.forward_requires_fresh_args: + image = model(**self.inputs_dict(0)) + else: + image = model(**inputs_dict) + if isinstance(image, dict): + image = image.to_tuple()[0] + + if self.forward_requires_fresh_args: + new_image = new_model(**self.inputs_dict(0)) + else: + new_image = new_model(**inputs_dict) + + if isinstance(new_image, dict): + new_image = new_image.to_tuple()[0] + + max_diff = (image - new_image).abs().max().item() + self.assertLessEqual(max_diff, expected_max_diff, "Models give different forward passes") + + @is_torch_compile + @require_torch_2 + @unittest.skipIf( + get_python_version == (3, 12), + reason="Torch Dynamo isn't yet supported for Python 3.12.", + ) + def test_from_save_pretrained_dynamo(self): + init_dict, _ = self.prepare_init_args_and_inputs_for_common() + inputs = [init_dict, self.model_class] + run_test_in_subprocess(test_case=self, target_func=_test_from_save_pretrained_dynamo, inputs=inputs) + + def test_from_save_pretrained_dtype(self): + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + + model = self.model_class(**init_dict) + model.to(torch_device) + model.eval() + + for dtype in [torch.float32, torch.float16, torch.bfloat16]: + if torch_device == "mps" and dtype == torch.bfloat16: + continue + with tempfile.TemporaryDirectory() as tmpdirname: + model.to(dtype) + model.save_pretrained(tmpdirname, safe_serialization=False) + new_model = self.model_class.from_pretrained(tmpdirname, low_cpu_mem_usage=True, torch_dtype=dtype) + assert new_model.dtype == dtype + if ( + hasattr(self.model_class, "_keep_in_fp32_modules") + and self.model_class._keep_in_fp32_modules is None + ): + new_model = self.model_class.from_pretrained( + tmpdirname, low_cpu_mem_usage=False, torch_dtype=dtype + ) + assert new_model.dtype == dtype + + def test_determinism(self, expected_max_diff=1e-5): + if self.forward_requires_fresh_args: + model = self.model_class(**self.init_dict) + else: + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + model = self.model_class(**init_dict) + model.to(torch_device) + model.eval() + + with torch.no_grad(): + if self.forward_requires_fresh_args: + first = model(**self.inputs_dict(0)) + else: + first = model(**inputs_dict) + if isinstance(first, dict): + first = first.to_tuple()[0] + + if self.forward_requires_fresh_args: + second = model(**self.inputs_dict(0)) + else: + second = model(**inputs_dict) + if isinstance(second, dict): + second = second.to_tuple()[0] + + out_1 = first.cpu().numpy() + out_2 = second.cpu().numpy() + out_1 = out_1[~np.isnan(out_1)] + out_2 = out_2[~np.isnan(out_2)] + max_diff = np.amax(np.abs(out_1 - out_2)) + self.assertLessEqual(max_diff, expected_max_diff) + + def test_output(self, expected_output_shape=None): + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + model = self.model_class(**init_dict) + model.to(torch_device) + model.eval() + + with torch.no_grad(): + output = model(**inputs_dict) + + if isinstance(output, dict): + output = output.to_tuple()[0] + + self.assertIsNotNone(output) + + # input & output have to have the same shape + input_tensor = inputs_dict[self.main_input_name] + + if expected_output_shape is None: + expected_shape = input_tensor.shape + self.assertEqual(output.shape, expected_shape, "Input and output shapes do not match") + else: + self.assertEqual(output.shape, expected_output_shape, "Input and output shapes do not match") + + def test_model_from_pretrained(self): + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + + model = self.model_class(**init_dict) + model.to(torch_device) + model.eval() + + # test if the model can be loaded from the config + # and has all the expected shape + with tempfile.TemporaryDirectory() as tmpdirname: + model.save_pretrained(tmpdirname, safe_serialization=False) + new_model = self.model_class.from_pretrained(tmpdirname) + new_model.to(torch_device) + new_model.eval() + + # check if all parameters shape are the same + for param_name in model.state_dict().keys(): + param_1 = model.state_dict()[param_name] + param_2 = new_model.state_dict()[param_name] + self.assertEqual(param_1.shape, param_2.shape) + + with torch.no_grad(): + output_1 = model(**inputs_dict) + + if isinstance(output_1, dict): + output_1 = output_1.to_tuple()[0] + + output_2 = new_model(**inputs_dict) + + if isinstance(output_2, dict): + output_2 = output_2.to_tuple()[0] + + self.assertEqual(output_1.shape, output_2.shape) + + @require_torch_accelerator_with_training + def test_training(self): + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + + model = self.model_class(**init_dict) + model.to(torch_device) + model.train() + output = model(**inputs_dict) + + if isinstance(output, dict): + output = output.to_tuple()[0] + + input_tensor = inputs_dict[self.main_input_name] + noise = torch.randn((input_tensor.shape[0],) + self.output_shape).to(torch_device) + loss = torch.nn.functional.mse_loss(output, noise) + loss.backward() + + @require_torch_accelerator_with_training + def test_ema_training(self): + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + + model = self.model_class(**init_dict) + model.to(torch_device) + model.train() + ema_model = EMAModel(model.parameters()) + + output = model(**inputs_dict) + + if isinstance(output, dict): + output = output.to_tuple()[0] + + input_tensor = inputs_dict[self.main_input_name] + noise = torch.randn((input_tensor.shape[0],) + self.output_shape).to(torch_device) + loss = torch.nn.functional.mse_loss(output, noise) + loss.backward() + ema_model.step(model.parameters()) + + def test_outputs_equivalence(self): + def set_nan_tensor_to_zero(t): + # Temporary fallback until `aten::_index_put_impl_` is implemented in mps + # Track progress in https://github.com/pytorch/pytorch/issues/77764 + device = t.device + if device.type == "mps": + t = t.to("cpu") + t[t != t] = 0 + return t.to(device) + + def recursive_check(tuple_object, dict_object): + if isinstance(tuple_object, (List, Tuple)): + for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object.values()): + recursive_check(tuple_iterable_value, dict_iterable_value) + elif isinstance(tuple_object, Dict): + for tuple_iterable_value, dict_iterable_value in zip(tuple_object.values(), dict_object.values()): + recursive_check(tuple_iterable_value, dict_iterable_value) + elif tuple_object is None: + return + else: + self.assertTrue( + torch.allclose( + set_nan_tensor_to_zero(tuple_object), set_nan_tensor_to_zero(dict_object), atol=1e-5 + ), + msg=( + "Tuple and dict output are not equal. Difference:" + f" {torch.max(torch.abs(tuple_object - dict_object))}. Tuple has `nan`:" + f" {torch.isnan(tuple_object).any()} and `inf`: {torch.isinf(tuple_object)}. Dict has" + f" `nan`: {torch.isnan(dict_object).any()} and `inf`: {torch.isinf(dict_object)}." + ), + ) + + if self.forward_requires_fresh_args: + model = self.model_class(**self.init_dict) + else: + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + model = self.model_class(**init_dict) + + model.to(torch_device) + model.eval() + + with torch.no_grad(): + if self.forward_requires_fresh_args: + outputs_dict = model(**self.inputs_dict(0)) + outputs_tuple = model(**self.inputs_dict(0), return_dict=False) + else: + outputs_dict = model(**inputs_dict) + outputs_tuple = model(**inputs_dict, return_dict=False) + + recursive_check(outputs_tuple, outputs_dict) + + @require_torch_accelerator_with_training + def test_enable_disable_gradient_checkpointing(self): + # Skip test if model does not support gradient checkpointing + if not self.model_class._supports_gradient_checkpointing: + pytest.skip("Gradient checkpointing is not supported.") + + init_dict, _ = self.prepare_init_args_and_inputs_for_common() + + # at init model should have gradient checkpointing disabled + model = self.model_class(**init_dict) + self.assertFalse(model.is_gradient_checkpointing) + + # check enable works + model.enable_gradient_checkpointing() + self.assertTrue(model.is_gradient_checkpointing) + + # check disable works + model.disable_gradient_checkpointing() + self.assertFalse(model.is_gradient_checkpointing) + + @require_torch_accelerator_with_training + def test_effective_gradient_checkpointing(self, loss_tolerance=1e-5, param_grad_tol=5e-5, skip: set[str] = {}): + # Skip test if model does not support gradient checkpointing + if not self.model_class._supports_gradient_checkpointing: + pytest.skip("Gradient checkpointing is not supported.") + + # enable deterministic behavior for gradient checkpointing + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + inputs_dict_copy = copy.deepcopy(inputs_dict) + torch.manual_seed(0) + model = self.model_class(**init_dict) + model.to(torch_device) + + assert not model.is_gradient_checkpointing and model.training + + out = model(**inputs_dict).sample + # run the backwards pass on the model. For backwards pass, for simplicity purpose, + # we won't calculate the loss and rather backprop on out.sum() + model.zero_grad() + + labels = torch.randn_like(out) + loss = (out - labels).mean() + loss.backward() + + # re-instantiate the model now enabling gradient checkpointing + torch.manual_seed(0) + model_2 = self.model_class(**init_dict) + # clone model + model_2.load_state_dict(model.state_dict()) + model_2.to(torch_device) + model_2.enable_gradient_checkpointing() + + assert model_2.is_gradient_checkpointing and model_2.training + + out_2 = model_2(**inputs_dict_copy).sample + # run the backwards pass on the model. For backwards pass, for simplicity purpose, + # we won't calculate the loss and rather backprop on out.sum() + model_2.zero_grad() + loss_2 = (out_2 - labels).mean() + loss_2.backward() + + # compare the output and parameters gradients + self.assertTrue((loss - loss_2).abs() < loss_tolerance) + named_params = dict(model.named_parameters()) + named_params_2 = dict(model_2.named_parameters()) + + for name, param in named_params.items(): + if "post_quant_conv" in name: + continue + if name in skip: + continue + # TODO(aryan): remove the below lines after looking into easyanimate transformer a little more + # It currently errors out the gradient checkpointing test because the gradients for attn2.to_out is None + if param.grad is None: + continue + self.assertTrue(torch_all_close(param.grad.data, named_params_2[name].grad.data, atol=param_grad_tol)) + + @unittest.skipIf(torch_device == "mps", "This test is not supported for MPS devices.") + def test_gradient_checkpointing_is_applied( + self, expected_set=None, attention_head_dim=None, num_attention_heads=None, block_out_channels=None + ): + # Skip test if model does not support gradient checkpointing + if not self.model_class._supports_gradient_checkpointing: + pytest.skip("Gradient checkpointing is not supported.") + + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + + if attention_head_dim is not None: + init_dict["attention_head_dim"] = attention_head_dim + if num_attention_heads is not None: + init_dict["num_attention_heads"] = num_attention_heads + if block_out_channels is not None: + init_dict["block_out_channels"] = block_out_channels + + model_class_copy = copy.copy(self.model_class) + model = model_class_copy(**init_dict) + model.enable_gradient_checkpointing() + + modules_with_gc_enabled = {} + for submodule in model.modules(): + if hasattr(submodule, "gradient_checkpointing"): + self.assertTrue(submodule.gradient_checkpointing) + modules_with_gc_enabled[submodule.__class__.__name__] = True + + assert set(modules_with_gc_enabled.keys()) == expected_set + assert all(modules_with_gc_enabled.values()), "All modules should be enabled" + + def test_deprecated_kwargs(self): + has_kwarg_in_model_class = "kwargs" in inspect.signature(self.model_class.__init__).parameters + has_deprecated_kwarg = len(self.model_class._deprecated_kwargs) > 0 + + if has_kwarg_in_model_class and not has_deprecated_kwarg: + raise ValueError( + f"{self.model_class} has `**kwargs` in its __init__ method but has not defined any deprecated kwargs" + " under the `_deprecated_kwargs` class attribute. Make sure to either remove `**kwargs` if there are" + " no deprecated arguments or add the deprecated argument with `_deprecated_kwargs =" + " []`" + ) + + if not has_kwarg_in_model_class and has_deprecated_kwarg: + raise ValueError( + f"{self.model_class} doesn't have `**kwargs` in its __init__ method but has defined deprecated kwargs" + " under the `_deprecated_kwargs` class attribute. Make sure to either add the `**kwargs` argument to" + f" {self.model_class}.__init__ if there are deprecated arguments or remove the deprecated argument" + " from `_deprecated_kwargs = []`" + ) + + @parameterized.expand([(4, 4, True), (4, 8, False), (8, 4, False)]) + @torch.no_grad() + @unittest.skipIf(not is_peft_available(), "Only with PEFT") + def test_save_load_lora_adapter(self, rank, lora_alpha, use_dora=False): + from peft import LoraConfig + from peft.utils import get_peft_model_state_dict + + from diffusers.loaders.peft import PeftAdapterMixin + + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + model = self.model_class(**init_dict).to(torch_device) + + if not issubclass(model.__class__, PeftAdapterMixin): + pytest.skip(f"PEFT is not supported for this model ({model.__class__.__name__}).") + + torch.manual_seed(0) + output_no_lora = model(**inputs_dict, return_dict=False)[0] + + denoiser_lora_config = LoraConfig( + r=rank, + lora_alpha=lora_alpha, + target_modules=["to_q", "to_k", "to_v", "to_out.0"], + init_lora_weights=False, + use_dora=use_dora, + ) + model.add_adapter(denoiser_lora_config) + self.assertTrue(check_if_lora_correctly_set(model), "LoRA layers not set correctly") + + torch.manual_seed(0) + outputs_with_lora = model(**inputs_dict, return_dict=False)[0] + + self.assertFalse(torch.allclose(output_no_lora, outputs_with_lora, atol=1e-4, rtol=1e-4)) + + with tempfile.TemporaryDirectory() as tmpdir: + model.save_lora_adapter(tmpdir) + self.assertTrue(os.path.isfile(os.path.join(tmpdir, "pytorch_lora_weights.safetensors"))) + + state_dict_loaded = safetensors.torch.load_file(os.path.join(tmpdir, "pytorch_lora_weights.safetensors")) + + model.unload_lora() + self.assertFalse(check_if_lora_correctly_set(model), "LoRA layers not set correctly") + + model.load_lora_adapter(tmpdir, prefix=None, use_safetensors=True) + state_dict_retrieved = get_peft_model_state_dict(model, adapter_name="default_0") + + for k in state_dict_loaded: + loaded_v = state_dict_loaded[k] + retrieved_v = state_dict_retrieved[k].to(loaded_v.device) + self.assertTrue(torch.allclose(loaded_v, retrieved_v)) + + self.assertTrue(check_if_lora_correctly_set(model), "LoRA layers not set correctly") + + torch.manual_seed(0) + outputs_with_lora_2 = model(**inputs_dict, return_dict=False)[0] + + self.assertFalse(torch.allclose(output_no_lora, outputs_with_lora_2, atol=1e-4, rtol=1e-4)) + self.assertTrue(torch.allclose(outputs_with_lora, outputs_with_lora_2, atol=1e-4, rtol=1e-4)) + + @unittest.skipIf(not is_peft_available(), "Only with PEFT") + def test_lora_wrong_adapter_name_raises_error(self): + from peft import LoraConfig + + from diffusers.loaders.peft import PeftAdapterMixin + + init_dict, _ = self.prepare_init_args_and_inputs_for_common() + model = self.model_class(**init_dict).to(torch_device) + + if not issubclass(model.__class__, PeftAdapterMixin): + pytest.skip(f"PEFT is not supported for this model ({model.__class__.__name__}).") + + denoiser_lora_config = LoraConfig( + r=4, + lora_alpha=4, + target_modules=["to_q", "to_k", "to_v", "to_out.0"], + init_lora_weights=False, + use_dora=False, + ) + model.add_adapter(denoiser_lora_config) + self.assertTrue(check_if_lora_correctly_set(model), "LoRA layers not set correctly") + + with tempfile.TemporaryDirectory() as tmpdir: + wrong_name = "foo" + with self.assertRaises(ValueError) as err_context: + model.save_lora_adapter(tmpdir, adapter_name=wrong_name) + + self.assertTrue(f"Adapter name {wrong_name} not found in the model." in str(err_context.exception)) + + @parameterized.expand([(4, 4, True), (4, 8, False), (8, 4, False)]) + @torch.no_grad() + @unittest.skipIf(not is_peft_available(), "Only with PEFT") + def test_lora_adapter_metadata_is_loaded_correctly(self, rank, lora_alpha, use_dora): + from peft import LoraConfig + + from diffusers.loaders.peft import PeftAdapterMixin + + init_dict, _ = self.prepare_init_args_and_inputs_for_common() + model = self.model_class(**init_dict).to(torch_device) + + if not issubclass(model.__class__, PeftAdapterMixin): + pytest.skip(f"PEFT is not supported for this model ({model.__class__.__name__}).") + + denoiser_lora_config = LoraConfig( + r=rank, + lora_alpha=lora_alpha, + target_modules=["to_q", "to_k", "to_v", "to_out.0"], + init_lora_weights=False, + use_dora=use_dora, + ) + model.add_adapter(denoiser_lora_config) + metadata = model.peft_config["default"].to_dict() + self.assertTrue(check_if_lora_correctly_set(model), "LoRA layers not set correctly") + + with tempfile.TemporaryDirectory() as tmpdir: + model.save_lora_adapter(tmpdir) + model_file = os.path.join(tmpdir, "pytorch_lora_weights.safetensors") + self.assertTrue(os.path.isfile(model_file)) + + model.unload_lora() + self.assertFalse(check_if_lora_correctly_set(model), "LoRA layers not set correctly") + + model.load_lora_adapter(tmpdir, prefix=None, use_safetensors=True) + parsed_metadata = model.peft_config["default_0"].to_dict() + check_if_dicts_are_equal(metadata, parsed_metadata) + + @torch.no_grad() + @unittest.skipIf(not is_peft_available(), "Only with PEFT") + def test_lora_adapter_wrong_metadata_raises_error(self): + from peft import LoraConfig + + from diffusers.loaders.lora_base import LORA_ADAPTER_METADATA_KEY + from diffusers.loaders.peft import PeftAdapterMixin + + init_dict, _ = self.prepare_init_args_and_inputs_for_common() + model = self.model_class(**init_dict).to(torch_device) + + if not issubclass(model.__class__, PeftAdapterMixin): + pytest.skip(f"PEFT is not supported for this model ({model.__class__.__name__}).") + + denoiser_lora_config = LoraConfig( + r=4, + lora_alpha=4, + target_modules=["to_q", "to_k", "to_v", "to_out.0"], + init_lora_weights=False, + use_dora=False, + ) + model.add_adapter(denoiser_lora_config) + self.assertTrue(check_if_lora_correctly_set(model), "LoRA layers not set correctly") + + with tempfile.TemporaryDirectory() as tmpdir: + model.save_lora_adapter(tmpdir) + model_file = os.path.join(tmpdir, "pytorch_lora_weights.safetensors") + self.assertTrue(os.path.isfile(model_file)) + + # Perturb the metadata in the state dict. + loaded_state_dict = safetensors.torch.load_file(model_file) + metadata = {"format": "pt"} + lora_adapter_metadata = denoiser_lora_config.to_dict() + lora_adapter_metadata.update({"foo": 1, "bar": 2}) + for key, value in lora_adapter_metadata.items(): + if isinstance(value, set): + lora_adapter_metadata[key] = list(value) + metadata[LORA_ADAPTER_METADATA_KEY] = json.dumps(lora_adapter_metadata, indent=2, sort_keys=True) + safetensors.torch.save_file(loaded_state_dict, model_file, metadata=metadata) + + model.unload_lora() + self.assertFalse(check_if_lora_correctly_set(model), "LoRA layers not set correctly") + + with self.assertRaises(TypeError) as err_context: + model.load_lora_adapter(tmpdir, prefix=None, use_safetensors=True) + self.assertTrue("`LoraConfig` class could not be instantiated" in str(err_context.exception)) + + @require_torch_accelerator + def test_cpu_offload(self): + if self.model_class._no_split_modules is None: + pytest.skip("Test not supported for this model as `_no_split_modules` is not set.") + config, inputs_dict = self.prepare_init_args_and_inputs_for_common() + model = self.model_class(**config).eval() + + model = model.to(torch_device) + + torch.manual_seed(0) + base_output = model(**inputs_dict) + + model_size = compute_module_sizes(model)[""] + # We test several splits of sizes to make sure it works. + max_gpu_sizes = [int(p * model_size) for p in self.model_split_percents[1:]] + with tempfile.TemporaryDirectory() as tmp_dir: + model.cpu().save_pretrained(tmp_dir) + + for max_size in max_gpu_sizes: + max_memory = {0: max_size, "cpu": model_size * 2} + new_model = self.model_class.from_pretrained(tmp_dir, device_map="auto", max_memory=max_memory) + # Making sure part of the model will actually end up offloaded + self.assertSetEqual(set(new_model.hf_device_map.values()), {0, "cpu"}) + + self.check_device_map_is_respected(new_model, new_model.hf_device_map) + torch.manual_seed(0) + new_output = new_model(**inputs_dict) + + self.assertTrue(torch.allclose(base_output[0], new_output[0], atol=1e-5)) + + @require_torch_accelerator + def test_disk_offload_without_safetensors(self): + if self.model_class._no_split_modules is None: + pytest.skip("Test not supported for this model as `_no_split_modules` is not set.") + config, inputs_dict = self.prepare_init_args_and_inputs_for_common() + model = self.model_class(**config).eval() + + model = model.to(torch_device) + + torch.manual_seed(0) + base_output = model(**inputs_dict) + + model_size = compute_module_sizes(model)[""] + max_size = int(self.model_split_percents[0] * model_size) + # Force disk offload by setting very small CPU memory + max_memory = {0: max_size, "cpu": int(0.1 * max_size)} + + with tempfile.TemporaryDirectory() as tmp_dir: + model.cpu().save_pretrained(tmp_dir, safe_serialization=False) + with self.assertRaises(ValueError): + # This errors out because it's missing an offload folder + new_model = self.model_class.from_pretrained(tmp_dir, device_map="auto", max_memory=max_memory) + + new_model = self.model_class.from_pretrained( + tmp_dir, device_map="auto", max_memory=max_memory, offload_folder=tmp_dir + ) + + self.check_device_map_is_respected(new_model, new_model.hf_device_map) + torch.manual_seed(0) + new_output = new_model(**inputs_dict) + + self.assertTrue(torch.allclose(base_output[0], new_output[0], atol=1e-5)) + + @require_torch_accelerator + def test_disk_offload_with_safetensors(self): + if self.model_class._no_split_modules is None: + pytest.skip("Test not supported for this model as `_no_split_modules` is not set.") + config, inputs_dict = self.prepare_init_args_and_inputs_for_common() + model = self.model_class(**config).eval() + + model = model.to(torch_device) + + torch.manual_seed(0) + base_output = model(**inputs_dict) + + model_size = compute_module_sizes(model)[""] + with tempfile.TemporaryDirectory() as tmp_dir: + model.cpu().save_pretrained(tmp_dir) + + max_size = int(self.model_split_percents[0] * model_size) + max_memory = {0: max_size, "cpu": max_size} + new_model = self.model_class.from_pretrained( + tmp_dir, device_map="auto", offload_folder=tmp_dir, max_memory=max_memory + ) + + self.check_device_map_is_respected(new_model, new_model.hf_device_map) + torch.manual_seed(0) + new_output = new_model(**inputs_dict) + + self.assertTrue(torch.allclose(base_output[0], new_output[0], atol=1e-5)) + + @require_torch_multi_accelerator + def test_model_parallelism(self): + if self.model_class._no_split_modules is None: + pytest.skip("Test not supported for this model as `_no_split_modules` is not set.") + config, inputs_dict = self.prepare_init_args_and_inputs_for_common() + model = self.model_class(**config).eval() + + model = model.to(torch_device) + + torch.manual_seed(0) + base_output = model(**inputs_dict) + + model_size = compute_module_sizes(model)[""] + # We test several splits of sizes to make sure it works. + max_gpu_sizes = [int(p * model_size) for p in self.model_split_percents[1:]] + with tempfile.TemporaryDirectory() as tmp_dir: + model.cpu().save_pretrained(tmp_dir) + + for max_size in max_gpu_sizes: + max_memory = {0: max_size, 1: model_size * 2, "cpu": model_size * 2} + new_model = self.model_class.from_pretrained(tmp_dir, device_map="auto", max_memory=max_memory) + # Making sure part of the model will actually end up offloaded + self.assertSetEqual(set(new_model.hf_device_map.values()), {0, 1}) + + self.check_device_map_is_respected(new_model, new_model.hf_device_map) + + torch.manual_seed(0) + new_output = new_model(**inputs_dict) + + self.assertTrue(torch.allclose(base_output[0], new_output[0], atol=1e-5)) + + @require_torch_accelerator + def test_sharded_checkpoints(self): + torch.manual_seed(0) + config, inputs_dict = self.prepare_init_args_and_inputs_for_common() + model = self.model_class(**config).eval() + model = model.to(torch_device) + + base_output = model(**inputs_dict) + + model_size = compute_module_persistent_sizes(model)[""] + max_shard_size = int((model_size * 0.75) / (2**10)) # Convert to KB as these test models are small. + with tempfile.TemporaryDirectory() as tmp_dir: + model.cpu().save_pretrained(tmp_dir, max_shard_size=f"{max_shard_size}KB") + self.assertTrue(os.path.exists(os.path.join(tmp_dir, SAFE_WEIGHTS_INDEX_NAME))) + + # Now check if the right number of shards exists. First, let's get the number of shards. + # Since this number can be dependent on the model being tested, it's important that we calculate it + # instead of hardcoding it. + expected_num_shards = caculate_expected_num_shards(os.path.join(tmp_dir, SAFE_WEIGHTS_INDEX_NAME)) + actual_num_shards = len([file for file in os.listdir(tmp_dir) if file.endswith(".safetensors")]) + self.assertTrue(actual_num_shards == expected_num_shards) + + new_model = self.model_class.from_pretrained(tmp_dir).eval() + new_model = new_model.to(torch_device) + + torch.manual_seed(0) + if "generator" in inputs_dict: + _, inputs_dict = self.prepare_init_args_and_inputs_for_common() + new_output = new_model(**inputs_dict) + + self.assertTrue(torch.allclose(base_output[0], new_output[0], atol=1e-5)) + + @require_torch_accelerator + def test_sharded_checkpoints_with_variant(self): + torch.manual_seed(0) + config, inputs_dict = self.prepare_init_args_and_inputs_for_common() + model = self.model_class(**config).eval() + model = model.to(torch_device) + + base_output = model(**inputs_dict) + + model_size = compute_module_persistent_sizes(model)[""] + max_shard_size = int((model_size * 0.75) / (2**10)) # Convert to KB as these test models are small. + variant = "fp16" + with tempfile.TemporaryDirectory() as tmp_dir: + # It doesn't matter if the actual model is in fp16 or not. Just adding the variant and + # testing if loading works with the variant when the checkpoint is sharded should be + # enough. + model.cpu().save_pretrained(tmp_dir, max_shard_size=f"{max_shard_size}KB", variant=variant) + + index_filename = _add_variant(SAFE_WEIGHTS_INDEX_NAME, variant) + self.assertTrue(os.path.exists(os.path.join(tmp_dir, index_filename))) + + # Now check if the right number of shards exists. First, let's get the number of shards. + # Since this number can be dependent on the model being tested, it's important that we calculate it + # instead of hardcoding it. + expected_num_shards = caculate_expected_num_shards(os.path.join(tmp_dir, index_filename)) + actual_num_shards = len([file for file in os.listdir(tmp_dir) if file.endswith(".safetensors")]) + self.assertTrue(actual_num_shards == expected_num_shards) + + new_model = self.model_class.from_pretrained(tmp_dir, variant=variant).eval() + new_model = new_model.to(torch_device) + + torch.manual_seed(0) + if "generator" in inputs_dict: + _, inputs_dict = self.prepare_init_args_and_inputs_for_common() + new_output = new_model(**inputs_dict) + + self.assertTrue(torch.allclose(base_output[0], new_output[0], atol=1e-5)) + + @require_torch_accelerator + def test_sharded_checkpoints_with_parallel_loading(self): + torch.manual_seed(0) + config, inputs_dict = self.prepare_init_args_and_inputs_for_common() + model = self.model_class(**config).eval() + model = model.to(torch_device) + + base_output = model(**inputs_dict) + + model_size = compute_module_persistent_sizes(model)[""] + max_shard_size = int((model_size * 0.75) / (2**10)) # Convert to KB as these test models are small. + with tempfile.TemporaryDirectory() as tmp_dir: + model.cpu().save_pretrained(tmp_dir, max_shard_size=f"{max_shard_size}KB") + self.assertTrue(os.path.exists(os.path.join(tmp_dir, SAFE_WEIGHTS_INDEX_NAME))) + + # Now check if the right number of shards exists. First, let's get the number of shards. + # Since this number can be dependent on the model being tested, it's important that we calculate it + # instead of hardcoding it. + expected_num_shards = caculate_expected_num_shards(os.path.join(tmp_dir, SAFE_WEIGHTS_INDEX_NAME)) + actual_num_shards = len([file for file in os.listdir(tmp_dir) if file.endswith(".safetensors")]) + self.assertTrue(actual_num_shards == expected_num_shards) + + # Load with parallel loading + os.environ["HF_ENABLE_PARALLEL_LOADING"] = "yes" + new_model = self.model_class.from_pretrained(tmp_dir).eval() + new_model = new_model.to(torch_device) + + torch.manual_seed(0) + if "generator" in inputs_dict: + _, inputs_dict = self.prepare_init_args_and_inputs_for_common() + new_output = new_model(**inputs_dict) + self.assertTrue(torch.allclose(base_output[0], new_output[0], atol=1e-5)) + # set to no. + os.environ["HF_ENABLE_PARALLEL_LOADING"] = "no" + + @require_torch_accelerator + def test_sharded_checkpoints_device_map(self): + if self.model_class._no_split_modules is None: + pytest.skip("Test not supported for this model as `_no_split_modules` is not set.") + config, inputs_dict = self.prepare_init_args_and_inputs_for_common() + model = self.model_class(**config).eval() + model = model.to(torch_device) + + torch.manual_seed(0) + base_output = model(**inputs_dict) + + model_size = compute_module_persistent_sizes(model)[""] + max_shard_size = int((model_size * 0.75) / (2**10)) # Convert to KB as these test models are small. + with tempfile.TemporaryDirectory() as tmp_dir: + model.cpu().save_pretrained(tmp_dir, max_shard_size=f"{max_shard_size}KB") + self.assertTrue(os.path.exists(os.path.join(tmp_dir, SAFE_WEIGHTS_INDEX_NAME))) + + # Now check if the right number of shards exists. First, let's get the number of shards. + # Since this number can be dependent on the model being tested, it's important that we calculate it + # instead of hardcoding it. + expected_num_shards = caculate_expected_num_shards(os.path.join(tmp_dir, SAFE_WEIGHTS_INDEX_NAME)) + actual_num_shards = len([file for file in os.listdir(tmp_dir) if file.endswith(".safetensors")]) + self.assertTrue(actual_num_shards == expected_num_shards) + + new_model = self.model_class.from_pretrained(tmp_dir, device_map="auto") + + torch.manual_seed(0) + if "generator" in inputs_dict: + _, inputs_dict = self.prepare_init_args_and_inputs_for_common() + new_output = new_model(**inputs_dict) + self.assertTrue(torch.allclose(base_output[0], new_output[0], atol=1e-5)) + + # This test is okay without a GPU because we're not running any execution. We're just serializing + # and check if the resultant files are following an expected format. + def test_variant_sharded_ckpt_right_format(self): + for use_safe in [True, False]: + extension = ".safetensors" if use_safe else ".bin" + config, _ = self.prepare_init_args_and_inputs_for_common() + model = self.model_class(**config).eval() + + model_size = compute_module_persistent_sizes(model)[""] + max_shard_size = int((model_size * 0.75) / (2**10)) # Convert to KB as these test models are small. + variant = "fp16" + with tempfile.TemporaryDirectory() as tmp_dir: + model.cpu().save_pretrained( + tmp_dir, variant=variant, max_shard_size=f"{max_shard_size}KB", safe_serialization=use_safe + ) + index_variant = _add_variant(SAFE_WEIGHTS_INDEX_NAME if use_safe else WEIGHTS_INDEX_NAME, variant) + self.assertTrue(os.path.exists(os.path.join(tmp_dir, index_variant))) + + # Now check if the right number of shards exists. First, let's get the number of shards. + # Since this number can be dependent on the model being tested, it's important that we calculate it + # instead of hardcoding it. + expected_num_shards = caculate_expected_num_shards(os.path.join(tmp_dir, index_variant)) + actual_num_shards = len([file for file in os.listdir(tmp_dir) if file.endswith(extension)]) + self.assertTrue(actual_num_shards == expected_num_shards) + + # Check if the variant is present as a substring in the checkpoints. + shard_files = [ + file + for file in os.listdir(tmp_dir) + if file.endswith(extension) or ("index" in file and "json" in file) + ] + assert all(variant in f for f in shard_files) + + # Check if the sharded checkpoints were serialized in the right format. + shard_files = [file for file in os.listdir(tmp_dir) if file.endswith(extension)] + # Example: diffusion_pytorch_model.fp16-00001-of-00002.safetensors + assert all(f.split(".")[1].split("-")[0] == variant for f in shard_files) + + def test_layerwise_casting_training(self): + def test_fn(storage_dtype, compute_dtype): + if torch.device(torch_device).type == "cpu" and compute_dtype == torch.bfloat16: + pytest.skip("Skipping test because CPU doesn't go well with bfloat16.") + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + + model = self.model_class(**init_dict) + model = model.to(torch_device, dtype=compute_dtype) + model.enable_layerwise_casting(storage_dtype=storage_dtype, compute_dtype=compute_dtype) + model.train() + + inputs_dict = cast_maybe_tensor_dtype(inputs_dict, torch.float32, compute_dtype) + with torch.amp.autocast(device_type=torch.device(torch_device).type): + output = model(**inputs_dict) + + if isinstance(output, dict): + output = output.to_tuple()[0] + + input_tensor = inputs_dict[self.main_input_name] + noise = torch.randn((input_tensor.shape[0],) + self.output_shape).to(torch_device) + noise = cast_maybe_tensor_dtype(noise, torch.float32, compute_dtype) + loss = torch.nn.functional.mse_loss(output, noise) + + loss.backward() + + test_fn(torch.float16, torch.float32) + test_fn(torch.float8_e4m3fn, torch.float32) + test_fn(torch.float8_e5m2, torch.float32) + test_fn(torch.float8_e4m3fn, torch.bfloat16) + + @torch.no_grad() + def test_layerwise_casting_inference(self): + from diffusers.hooks._common import _GO_LC_SUPPORTED_PYTORCH_LAYERS + from diffusers.hooks.layerwise_casting import DEFAULT_SKIP_MODULES_PATTERN + + torch.manual_seed(0) + config, inputs_dict = self.prepare_init_args_and_inputs_for_common() + model = self.model_class(**config) + model.eval() + model.to(torch_device) + base_slice = model(**inputs_dict)[0].detach().flatten().cpu().numpy() + + def check_linear_dtype(module, storage_dtype, compute_dtype): + patterns_to_check = DEFAULT_SKIP_MODULES_PATTERN + if getattr(module, "_skip_layerwise_casting_patterns", None) is not None: + patterns_to_check += tuple(module._skip_layerwise_casting_patterns) + for name, submodule in module.named_modules(): + if not isinstance(submodule, _GO_LC_SUPPORTED_PYTORCH_LAYERS): + continue + dtype_to_check = storage_dtype + if any(re.search(pattern, name) for pattern in patterns_to_check): + dtype_to_check = compute_dtype + if getattr(submodule, "weight", None) is not None: + self.assertEqual(submodule.weight.dtype, dtype_to_check) + if getattr(submodule, "bias", None) is not None: + self.assertEqual(submodule.bias.dtype, dtype_to_check) + + def test_layerwise_casting(storage_dtype, compute_dtype): + torch.manual_seed(0) + config, inputs_dict = self.prepare_init_args_and_inputs_for_common() + inputs_dict = cast_maybe_tensor_dtype(inputs_dict, torch.float32, compute_dtype) + model = self.model_class(**config).eval() + model = model.to(torch_device, dtype=compute_dtype) + model.enable_layerwise_casting(storage_dtype=storage_dtype, compute_dtype=compute_dtype) + + check_linear_dtype(model, storage_dtype, compute_dtype) + output = model(**inputs_dict)[0].float().flatten().detach().cpu().numpy() + + # The precision test is not very important for fast tests. In most cases, the outputs will not be the same. + # We just want to make sure that the layerwise casting is working as expected. + self.assertTrue(numpy_cosine_similarity_distance(base_slice, output) < 1.0) + + test_layerwise_casting(torch.float16, torch.float32) + test_layerwise_casting(torch.float8_e4m3fn, torch.float32) + test_layerwise_casting(torch.float8_e5m2, torch.float32) + test_layerwise_casting(torch.float8_e4m3fn, torch.bfloat16) + + @require_torch_accelerator + @torch.no_grad() + def test_layerwise_casting_memory(self): + MB_TOLERANCE = 0.2 + LEAST_COMPUTE_CAPABILITY = 8.0 + + def reset_memory_stats(): + gc.collect() + backend_synchronize(torch_device) + backend_empty_cache(torch_device) + backend_reset_peak_memory_stats(torch_device) + + def get_memory_usage(storage_dtype, compute_dtype): + torch.manual_seed(0) + config, inputs_dict = self.prepare_init_args_and_inputs_for_common() + inputs_dict = cast_maybe_tensor_dtype(inputs_dict, torch.float32, compute_dtype) + model = self.model_class(**config).eval() + model = model.to(torch_device, dtype=compute_dtype) + model.enable_layerwise_casting(storage_dtype=storage_dtype, compute_dtype=compute_dtype) + + reset_memory_stats() + model(**inputs_dict) + model_memory_footprint = model.get_memory_footprint() + peak_inference_memory_allocated_mb = backend_max_memory_allocated(torch_device) / 1024**2 + + return model_memory_footprint, peak_inference_memory_allocated_mb + + fp32_memory_footprint, fp32_max_memory = get_memory_usage(torch.float32, torch.float32) + fp8_e4m3_fp32_memory_footprint, fp8_e4m3_fp32_max_memory = get_memory_usage(torch.float8_e4m3fn, torch.float32) + fp8_e4m3_bf16_memory_footprint, fp8_e4m3_bf16_max_memory = get_memory_usage( + torch.float8_e4m3fn, torch.bfloat16 + ) + + compute_capability = get_torch_cuda_device_capability() if torch_device == "cuda" else None + self.assertTrue(fp8_e4m3_bf16_memory_footprint < fp8_e4m3_fp32_memory_footprint < fp32_memory_footprint) + # NOTE: the following assertion would fail on our CI (running Tesla T4) due to bf16 using more memory than fp32. + # On other devices, such as DGX (Ampere) and Audace (Ada), the test passes. So, we conditionally check it. + if compute_capability and compute_capability >= LEAST_COMPUTE_CAPABILITY: + self.assertTrue(fp8_e4m3_bf16_max_memory < fp8_e4m3_fp32_max_memory) + # On this dummy test case with a small model, sometimes fp8_e4m3_fp32 max memory usage is higher than fp32 by a few + # bytes. This only happens for some models, so we allow a small tolerance. + # For any real model being tested, the order would be fp8_e4m3_bf16 < fp8_e4m3_fp32 < fp32. + self.assertTrue( + fp8_e4m3_fp32_max_memory < fp32_max_memory + or abs(fp8_e4m3_fp32_max_memory - fp32_max_memory) < MB_TOLERANCE + ) + + @parameterized.expand([False, True]) + @require_torch_accelerator + def test_group_offloading(self, record_stream): + if not self.model_class._supports_group_offloading: + pytest.skip("Model does not support group offloading.") + + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + torch.manual_seed(0) + + @torch.no_grad() + def run_forward(model): + self.assertTrue( + all( + module._diffusers_hook.get_hook("group_offloading") is not None + for module in model.modules() + if hasattr(module, "_diffusers_hook") + ) + ) + model.eval() + return model(**inputs_dict)[0] + + model = self.model_class(**init_dict) + + model.to(torch_device) + output_without_group_offloading = run_forward(model) + + torch.manual_seed(0) + model = self.model_class(**init_dict) + model.enable_group_offload(torch_device, offload_type="block_level", num_blocks_per_group=1) + output_with_group_offloading1 = run_forward(model) + + torch.manual_seed(0) + model = self.model_class(**init_dict) + model.enable_group_offload(torch_device, offload_type="block_level", num_blocks_per_group=1, non_blocking=True) + output_with_group_offloading2 = run_forward(model) + + torch.manual_seed(0) + model = self.model_class(**init_dict) + model.enable_group_offload(torch_device, offload_type="leaf_level") + output_with_group_offloading3 = run_forward(model) + + torch.manual_seed(0) + model = self.model_class(**init_dict) + model.enable_group_offload( + torch_device, offload_type="leaf_level", use_stream=True, record_stream=record_stream + ) + output_with_group_offloading4 = run_forward(model) + + self.assertTrue(torch.allclose(output_without_group_offloading, output_with_group_offloading1, atol=1e-5)) + self.assertTrue(torch.allclose(output_without_group_offloading, output_with_group_offloading2, atol=1e-5)) + self.assertTrue(torch.allclose(output_without_group_offloading, output_with_group_offloading3, atol=1e-5)) + self.assertTrue(torch.allclose(output_without_group_offloading, output_with_group_offloading4, atol=1e-5)) + + @parameterized.expand([(False, "block_level"), (True, "leaf_level")]) + @require_torch_accelerator + @torch.no_grad() + def test_group_offloading_with_layerwise_casting(self, record_stream, offload_type): + if not self.model_class._supports_group_offloading: + pytest.skip("Model does not support group offloading.") + + torch.manual_seed(0) + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + model = self.model_class(**init_dict) + + model.to(torch_device) + model.eval() + _ = model(**inputs_dict)[0] + + torch.manual_seed(0) + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + storage_dtype, compute_dtype = torch.float16, torch.float32 + inputs_dict = cast_maybe_tensor_dtype(inputs_dict, torch.float32, compute_dtype) + model = self.model_class(**init_dict) + model.eval() + additional_kwargs = {} if offload_type == "leaf_level" else {"num_blocks_per_group": 1} + model.enable_group_offload( + torch_device, offload_type=offload_type, use_stream=True, record_stream=record_stream, **additional_kwargs + ) + model.enable_layerwise_casting(storage_dtype=storage_dtype, compute_dtype=compute_dtype) + _ = model(**inputs_dict)[0] + + @parameterized.expand([("block_level", False), ("leaf_level", True)]) + @require_torch_accelerator + @torch.no_grad() + @torch.inference_mode() + def test_group_offloading_with_disk(self, offload_type, record_stream, atol=1e-5): + if not self.model_class._supports_group_offloading: + pytest.skip("Model does not support group offloading.") + + if self.model_class.__name__ == "QwenImageTransformer2DModel": + pytest.skip( + "QwenImageTransformer2DModel doesn't support group offloading with disk. Needs to be investigated." + ) + + def _has_generator_arg(model): + sig = inspect.signature(model.forward) + params = sig.parameters + return "generator" in params + + def _run_forward(model, inputs_dict): + accepts_generator = _has_generator_arg(model) + if accepts_generator: + inputs_dict["generator"] = torch.manual_seed(0) + torch.manual_seed(0) + return model(**inputs_dict)[0] + + if self.__class__.__name__ == "AutoencoderKLCosmosTests" and offload_type == "leaf_level": + pytest.skip("With `leaf_type` as the offloading type, it fails. Needs investigation.") + + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + torch.manual_seed(0) + model = self.model_class(**init_dict) + + model.eval() + model.to(torch_device) + output_without_group_offloading = _run_forward(model, inputs_dict) + + torch.manual_seed(0) + model = self.model_class(**init_dict) + model.eval() + + num_blocks_per_group = None if offload_type == "leaf_level" else 1 + additional_kwargs = {} if offload_type == "leaf_level" else {"num_blocks_per_group": num_blocks_per_group} + with tempfile.TemporaryDirectory() as tmpdir: + model.enable_group_offload( + torch_device, + offload_type=offload_type, + offload_to_disk_path=tmpdir, + use_stream=True, + record_stream=record_stream, + **additional_kwargs, + ) + has_safetensors = glob.glob(f"{tmpdir}/*.safetensors") + self.assertTrue(has_safetensors, "No safetensors found in the directory.") + + # For "leaf-level", there is a prefetching hook which makes this check a bit non-deterministic + # in nature. So, skip it. + if offload_type != "leaf_level": + is_correct, extra_files, missing_files = _check_safetensors_serialization( + module=model, + offload_to_disk_path=tmpdir, + offload_type=offload_type, + num_blocks_per_group=num_blocks_per_group, + ) + if not is_correct: + if extra_files: + raise ValueError(f"Found extra files: {', '.join(extra_files)}") + elif missing_files: + raise ValueError(f"Following files are missing: {', '.join(missing_files)}") + + output_with_group_offloading = _run_forward(model, inputs_dict) + self.assertTrue(torch.allclose(output_without_group_offloading, output_with_group_offloading, atol=atol)) + + def test_auto_model(self, expected_max_diff=5e-5): + if self.forward_requires_fresh_args: + model = self.model_class(**self.init_dict) + else: + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + model = self.model_class(**init_dict) + + model = model.eval() + model = model.to(torch_device) + + if hasattr(model, "set_default_attn_processor"): + model.set_default_attn_processor() + + with tempfile.TemporaryDirectory(ignore_cleanup_errors=True) as tmpdirname: + model.save_pretrained(tmpdirname, safe_serialization=False) + + auto_model = AutoModel.from_pretrained(tmpdirname) + if hasattr(auto_model, "set_default_attn_processor"): + auto_model.set_default_attn_processor() + + auto_model = auto_model.eval() + auto_model = auto_model.to(torch_device) + + with torch.no_grad(): + if self.forward_requires_fresh_args: + output_original = model(**self.inputs_dict(0)) + output_auto = auto_model(**self.inputs_dict(0)) + else: + output_original = model(**inputs_dict) + output_auto = auto_model(**inputs_dict) + + if isinstance(output_original, dict): + output_original = output_original.to_tuple()[0] + if isinstance(output_auto, dict): + output_auto = output_auto.to_tuple()[0] + + max_diff = (output_original - output_auto).abs().max().item() + self.assertLessEqual( + max_diff, + expected_max_diff, + f"AutoModel forward pass diff: {max_diff} exceeds threshold {expected_max_diff}", + ) + + @parameterized.expand( + [ + (-1, "You can't pass device_map as a negative int"), + ("foo", "When passing device_map as a string, the value needs to be a device name"), + ] + ) + def test_wrong_device_map_raises_error(self, device_map, msg_substring): + init_dict, _ = self.prepare_init_args_and_inputs_for_common() + model = self.model_class(**init_dict) + with tempfile.TemporaryDirectory() as tmpdir: + model.save_pretrained(tmpdir) + with self.assertRaises(ValueError) as err_ctx: + _ = self.model_class.from_pretrained(tmpdir, device_map=device_map) + + assert msg_substring in str(err_ctx.exception) + + @parameterized.expand([0, torch_device, torch.device(torch_device)]) + @require_torch_accelerator + def test_passing_non_dict_device_map_works(self, device_map): + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + model = self.model_class(**init_dict).eval() + with tempfile.TemporaryDirectory() as tmpdir: + model.save_pretrained(tmpdir) + loaded_model = self.model_class.from_pretrained(tmpdir, device_map=device_map) + _ = loaded_model(**inputs_dict) + + @parameterized.expand([("", torch_device), ("", torch.device(torch_device))]) + @require_torch_accelerator + def test_passing_dict_device_map_works(self, name, device): + # There are other valid dict-based `device_map` values too. It's best to refer to + # the docs for those: https://huggingface.co/docs/accelerate/en/concept_guides/big_model_inference#the-devicemap. + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + model = self.model_class(**init_dict).eval() + device_map = {name: device} + with tempfile.TemporaryDirectory() as tmpdir: + model.save_pretrained(tmpdir) + loaded_model = self.model_class.from_pretrained(tmpdir, device_map=device_map) + _ = loaded_model(**inputs_dict) + + +@is_staging_test +class ModelPushToHubTester(unittest.TestCase): + identifier = uuid.uuid4() + repo_id = f"test-model-{identifier}" + org_repo_id = f"valid_org/{repo_id}-org" + + def test_push_to_hub(self): + model = UNet2DConditionModel( + block_out_channels=(32, 64), + layers_per_block=2, + sample_size=32, + in_channels=4, + out_channels=4, + down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), + up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), + cross_attention_dim=32, + ) + model.push_to_hub(self.repo_id, token=TOKEN) + + new_model = UNet2DConditionModel.from_pretrained(f"{USER}/{self.repo_id}") + for p1, p2 in zip(model.parameters(), new_model.parameters()): + self.assertTrue(torch.equal(p1, p2)) + + # Reset repo + delete_repo(token=TOKEN, repo_id=self.repo_id) + + # Push to hub via save_pretrained + with tempfile.TemporaryDirectory() as tmp_dir: + model.save_pretrained(tmp_dir, repo_id=self.repo_id, push_to_hub=True, token=TOKEN) + + new_model = UNet2DConditionModel.from_pretrained(f"{USER}/{self.repo_id}") + for p1, p2 in zip(model.parameters(), new_model.parameters()): + self.assertTrue(torch.equal(p1, p2)) + + # Reset repo + delete_repo(self.repo_id, token=TOKEN) + + def test_push_to_hub_in_organization(self): + model = UNet2DConditionModel( + block_out_channels=(32, 64), + layers_per_block=2, + sample_size=32, + in_channels=4, + out_channels=4, + down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), + up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), + cross_attention_dim=32, + ) + model.push_to_hub(self.org_repo_id, token=TOKEN) + + new_model = UNet2DConditionModel.from_pretrained(self.org_repo_id) + for p1, p2 in zip(model.parameters(), new_model.parameters()): + self.assertTrue(torch.equal(p1, p2)) + + # Reset repo + delete_repo(token=TOKEN, repo_id=self.org_repo_id) + + # Push to hub via save_pretrained + with tempfile.TemporaryDirectory() as tmp_dir: + model.save_pretrained(tmp_dir, push_to_hub=True, token=TOKEN, repo_id=self.org_repo_id) + + new_model = UNet2DConditionModel.from_pretrained(self.org_repo_id) + for p1, p2 in zip(model.parameters(), new_model.parameters()): + self.assertTrue(torch.equal(p1, p2)) + + # Reset repo + delete_repo(self.org_repo_id, token=TOKEN) + + @unittest.skipIf( + not is_jinja_available(), + reason="Model card tests cannot be performed without Jinja installed.", + ) + def test_push_to_hub_library_name(self): + model = UNet2DConditionModel( + block_out_channels=(32, 64), + layers_per_block=2, + sample_size=32, + in_channels=4, + out_channels=4, + down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), + up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), + cross_attention_dim=32, + ) + model.push_to_hub(self.repo_id, token=TOKEN) + + model_card = ModelCard.load(f"{USER}/{self.repo_id}", token=TOKEN).data + assert model_card.library_name == "diffusers" + + # Reset repo + delete_repo(self.repo_id, token=TOKEN) + + +@require_torch_accelerator +@require_torch_2 +@is_torch_compile +@slow +@require_torch_version_greater("2.7.1") +class TorchCompileTesterMixin: + different_shapes_for_compilation = None + + def setUp(self): + # clean up the VRAM before each test + super().setUp() + torch.compiler.reset() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + # clean up the VRAM after each test in case of CUDA runtime errors + super().tearDown() + torch.compiler.reset() + gc.collect() + backend_empty_cache(torch_device) + + def test_torch_compile_recompilation_and_graph_break(self): + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + + model = self.model_class(**init_dict).to(torch_device) + model = torch.compile(model, fullgraph=True) + + with ( + torch._inductor.utils.fresh_inductor_cache(), + torch._dynamo.config.patch(error_on_recompile=True), + torch.no_grad(), + ): + _ = model(**inputs_dict) + _ = model(**inputs_dict) + + def test_torch_compile_repeated_blocks(self): + if self.model_class._repeated_blocks is None: + pytest.skip("Skipping test as the model class doesn't have `_repeated_blocks` set.") + + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + + model = self.model_class(**init_dict).to(torch_device) + model.compile_repeated_blocks(fullgraph=True) + + recompile_limit = 1 + if self.model_class.__name__ == "UNet2DConditionModel": + recompile_limit = 2 + + with ( + torch._inductor.utils.fresh_inductor_cache(), + torch._dynamo.config.patch(recompile_limit=recompile_limit), + torch.no_grad(), + ): + _ = model(**inputs_dict) + _ = model(**inputs_dict) + + def test_compile_with_group_offloading(self): + if not self.model_class._supports_group_offloading: + pytest.skip("Model does not support group offloading.") + + torch._dynamo.config.cache_size_limit = 10000 + + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + model = self.model_class(**init_dict) + + model.eval() + # TODO: Can test for other group offloading kwargs later if needed. + group_offload_kwargs = { + "onload_device": torch_device, + "offload_device": "cpu", + "offload_type": "block_level", + "num_blocks_per_group": 1, + "use_stream": True, + "non_blocking": True, + } + model.enable_group_offload(**group_offload_kwargs) + model.compile() + with torch.no_grad(): + _ = model(**inputs_dict) + _ = model(**inputs_dict) + + @require_torch_version_greater("2.7.1") + def test_compile_on_different_shapes(self): + if self.different_shapes_for_compilation is None: + pytest.skip(f"Skipping as `different_shapes_for_compilation` is not set for {self.__class__.__name__}.") + torch.fx.experimental._config.use_duck_shape = False + + init_dict, _ = self.prepare_init_args_and_inputs_for_common() + model = self.model_class(**init_dict).to(torch_device) + model = torch.compile(model, fullgraph=True, dynamic=True) + + for height, width in self.different_shapes_for_compilation: + with torch._dynamo.config.patch(error_on_recompile=True), torch.no_grad(): + inputs_dict = self.prepare_dummy_input(height=height, width=width) + _ = model(**inputs_dict) + + +@slow +@require_torch_2 +@require_torch_accelerator +@require_peft_backend +@require_peft_version_greater("0.14.0") +@require_torch_version_greater("2.7.1") +@is_torch_compile +class LoraHotSwappingForModelTesterMixin: + """Test that hotswapping does not result in recompilation on the model directly. + + We're not extensively testing the hotswapping functionality since it is implemented in PEFT and is extensively + tested there. The goal of this test is specifically to ensure that hotswapping with diffusers does not require + recompilation. + + See + https://github.com/huggingface/peft/blob/eaab05e18d51fb4cce20a73c9acd82a00c013b83/tests/test_gpu_examples.py#L4252 + for the analogous PEFT test. + + """ + + different_shapes_for_compilation = None + + def tearDown(self): + # It is critical that the dynamo cache is reset for each test. Otherwise, if the test re-uses the same model, + # there will be recompilation errors, as torch caches the model when run in the same process. + super().tearDown() + torch.compiler.reset() + gc.collect() + backend_empty_cache(torch_device) + + def get_lora_config(self, lora_rank, lora_alpha, target_modules): + # from diffusers test_models_unet_2d_condition.py + from peft import LoraConfig + + lora_config = LoraConfig( + r=lora_rank, + lora_alpha=lora_alpha, + target_modules=target_modules, + init_lora_weights=False, + use_dora=False, + ) + return lora_config + + def get_linear_module_name_other_than_attn(self, model): + linear_names = [ + name for name, module in model.named_modules() if isinstance(module, nn.Linear) and "to_" not in name + ] + return linear_names[0] + + def check_model_hotswap(self, do_compile, rank0, rank1, target_modules0, target_modules1=None): + """ + Check that hotswapping works on a small unet. + + Steps: + - create 2 LoRA adapters and save them + - load the first adapter + - hotswap the second adapter + - check that the outputs are correct + - optionally compile the model + - optionally check if recompilations happen on different shapes + + Note: We set rank == alpha here because save_lora_adapter does not save the alpha scalings, thus the test would + fail if the values are different. Since rank != alpha does not matter for the purpose of this test, this is + fine. + """ + different_shapes = self.different_shapes_for_compilation + # create 2 adapters with different ranks and alphas + torch.manual_seed(0) + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + model = self.model_class(**init_dict).to(torch_device) + + alpha0, alpha1 = rank0, rank1 + max_rank = max([rank0, rank1]) + if target_modules1 is None: + target_modules1 = target_modules0[:] + lora_config0 = self.get_lora_config(rank0, alpha0, target_modules0) + lora_config1 = self.get_lora_config(rank1, alpha1, target_modules1) + + model.add_adapter(lora_config0, adapter_name="adapter0") + with torch.inference_mode(): + torch.manual_seed(0) + output0_before = model(**inputs_dict)["sample"] + + model.add_adapter(lora_config1, adapter_name="adapter1") + model.set_adapter("adapter1") + with torch.inference_mode(): + torch.manual_seed(0) + output1_before = model(**inputs_dict)["sample"] + + # sanity checks: + tol = 5e-3 + assert not torch.allclose(output0_before, output1_before, atol=tol, rtol=tol) + assert not (output0_before == 0).all() + assert not (output1_before == 0).all() + + with tempfile.TemporaryDirectory() as tmp_dirname: + # save the adapter checkpoints + model.save_lora_adapter(os.path.join(tmp_dirname, "0"), safe_serialization=True, adapter_name="adapter0") + model.save_lora_adapter(os.path.join(tmp_dirname, "1"), safe_serialization=True, adapter_name="adapter1") + del model + + # load the first adapter + torch.manual_seed(0) + init_dict, _ = self.prepare_init_args_and_inputs_for_common() + model = self.model_class(**init_dict).to(torch_device) + + if do_compile or (rank0 != rank1): + # no need to prepare if the model is not compiled or if the ranks are identical + model.enable_lora_hotswap(target_rank=max_rank) + + file_name0 = os.path.join(os.path.join(tmp_dirname, "0"), "pytorch_lora_weights.safetensors") + file_name1 = os.path.join(os.path.join(tmp_dirname, "1"), "pytorch_lora_weights.safetensors") + model.load_lora_adapter(file_name0, safe_serialization=True, adapter_name="adapter0", prefix=None) + + if do_compile: + model = torch.compile(model, mode="reduce-overhead", dynamic=different_shapes is not None) + + with torch.inference_mode(): + # additionally check if dynamic compilation works. + if different_shapes is not None: + for height, width in different_shapes: + new_inputs_dict = self.prepare_dummy_input(height=height, width=width) + _ = model(**new_inputs_dict) + else: + output0_after = model(**inputs_dict)["sample"] + assert torch.allclose(output0_before, output0_after, atol=tol, rtol=tol) + + # hotswap the 2nd adapter + model.load_lora_adapter(file_name1, adapter_name="adapter0", hotswap=True, prefix=None) + + # we need to call forward to potentially trigger recompilation + with torch.inference_mode(): + if different_shapes is not None: + for height, width in different_shapes: + new_inputs_dict = self.prepare_dummy_input(height=height, width=width) + _ = model(**new_inputs_dict) + else: + output1_after = model(**inputs_dict)["sample"] + assert torch.allclose(output1_before, output1_after, atol=tol, rtol=tol) + + # check error when not passing valid adapter name + name = "does-not-exist" + msg = f"Trying to hotswap LoRA adapter '{name}' but there is no existing adapter by that name" + with self.assertRaisesRegex(ValueError, msg): + model.load_lora_adapter(file_name1, adapter_name=name, hotswap=True, prefix=None) + + @parameterized.expand([(11, 11), (7, 13), (13, 7)]) # important to test small to large and vice versa + def test_hotswapping_model(self, rank0, rank1): + self.check_model_hotswap( + do_compile=False, rank0=rank0, rank1=rank1, target_modules0=["to_q", "to_k", "to_v", "to_out.0"] + ) + + @parameterized.expand([(11, 11), (7, 13), (13, 7)]) # important to test small to large and vice versa + def test_hotswapping_compiled_model_linear(self, rank0, rank1): + # It's important to add this context to raise an error on recompilation + target_modules = ["to_q", "to_k", "to_v", "to_out.0"] + with torch._dynamo.config.patch(error_on_recompile=True), torch._inductor.utils.fresh_inductor_cache(): + self.check_model_hotswap(do_compile=True, rank0=rank0, rank1=rank1, target_modules0=target_modules) + + @parameterized.expand([(11, 11), (7, 13), (13, 7)]) # important to test small to large and vice versa + def test_hotswapping_compiled_model_conv2d(self, rank0, rank1): + if "unet" not in self.model_class.__name__.lower(): + pytest.skip("Test only applies to UNet.") + + # It's important to add this context to raise an error on recompilation + target_modules = ["conv", "conv1", "conv2"] + with torch._dynamo.config.patch(error_on_recompile=True), torch._inductor.utils.fresh_inductor_cache(): + self.check_model_hotswap(do_compile=True, rank0=rank0, rank1=rank1, target_modules0=target_modules) + + @parameterized.expand([(11, 11), (7, 13), (13, 7)]) # important to test small to large and vice versa + def test_hotswapping_compiled_model_both_linear_and_conv2d(self, rank0, rank1): + if "unet" not in self.model_class.__name__.lower(): + pytest.skip("Test only applies to UNet.") + + # It's important to add this context to raise an error on recompilation + target_modules = ["to_q", "conv"] + with torch._dynamo.config.patch(error_on_recompile=True), torch._inductor.utils.fresh_inductor_cache(): + self.check_model_hotswap(do_compile=True, rank0=rank0, rank1=rank1, target_modules0=target_modules) + + @parameterized.expand([(11, 11), (7, 13), (13, 7)]) # important to test small to large and vice versa + def test_hotswapping_compiled_model_both_linear_and_other(self, rank0, rank1): + # In `test_hotswapping_compiled_model_both_linear_and_conv2d()`, we check if we can do hotswapping + # with `torch.compile()` for models that have both linear and conv layers. In this test, we check + # if we can target a linear layer from the transformer blocks and another linear layer from non-attention + # block. + target_modules = ["to_q"] + init_dict, _ = self.prepare_init_args_and_inputs_for_common() + model = self.model_class(**init_dict) + + target_modules.append(self.get_linear_module_name_other_than_attn(model)) + del model + + # It's important to add this context to raise an error on recompilation + with torch._dynamo.config.patch(error_on_recompile=True): + self.check_model_hotswap(do_compile=True, rank0=rank0, rank1=rank1, target_modules0=target_modules) + + def test_enable_lora_hotswap_called_after_adapter_added_raises(self): + # ensure that enable_lora_hotswap is called before loading the first adapter + lora_config = self.get_lora_config(8, 8, target_modules=["to_q"]) + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + model = self.model_class(**init_dict).to(torch_device) + model.add_adapter(lora_config) + + msg = re.escape("Call `enable_lora_hotswap` before loading the first adapter.") + with self.assertRaisesRegex(RuntimeError, msg): + model.enable_lora_hotswap(target_rank=32) + + def test_enable_lora_hotswap_called_after_adapter_added_warning(self): + # ensure that enable_lora_hotswap is called before loading the first adapter + from diffusers.loaders.peft import logger + + lora_config = self.get_lora_config(8, 8, target_modules=["to_q"]) + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + model = self.model_class(**init_dict).to(torch_device) + model.add_adapter(lora_config) + msg = ( + "It is recommended to call `enable_lora_hotswap` before loading the first adapter to avoid recompilation." + ) + with self.assertLogs(logger=logger, level="WARNING") as cm: + model.enable_lora_hotswap(target_rank=32, check_compiled="warn") + assert any(msg in log for log in cm.output) + + def test_enable_lora_hotswap_called_after_adapter_added_ignore(self): + # check possibility to ignore the error/warning + lora_config = self.get_lora_config(8, 8, target_modules=["to_q"]) + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + model = self.model_class(**init_dict).to(torch_device) + model.add_adapter(lora_config) + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") # Capture all warnings + model.enable_lora_hotswap(target_rank=32, check_compiled="warn") + self.assertEqual(len(w), 0, f"Expected no warnings, but got: {[str(warn.message) for warn in w]}") + + def test_enable_lora_hotswap_wrong_check_compiled_argument_raises(self): + # check that wrong argument value raises an error + lora_config = self.get_lora_config(8, 8, target_modules=["to_q"]) + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + model = self.model_class(**init_dict).to(torch_device) + model.add_adapter(lora_config) + msg = re.escape("check_compiles should be one of 'error', 'warn', or 'ignore', got 'wrong-argument' instead.") + with self.assertRaisesRegex(ValueError, msg): + model.enable_lora_hotswap(target_rank=32, check_compiled="wrong-argument") + + def test_hotswap_second_adapter_targets_more_layers_raises(self): + # check the error and log + from diffusers.loaders.peft import logger + + # at the moment, PEFT requires the 2nd adapter to target the same or a subset of layers + target_modules0 = ["to_q"] + target_modules1 = ["to_q", "to_k"] + with self.assertRaises(RuntimeError): # peft raises RuntimeError + with self.assertLogs(logger=logger, level="ERROR") as cm: + self.check_model_hotswap( + do_compile=True, rank0=8, rank1=8, target_modules0=target_modules0, target_modules1=target_modules1 + ) + assert any("Hotswapping adapter0 was unsuccessful" in log for log in cm.output) + + @parameterized.expand([(11, 11), (7, 13), (13, 7)]) + @require_torch_version_greater("2.7.1") + def test_hotswapping_compile_on_different_shapes(self, rank0, rank1): + different_shapes_for_compilation = self.different_shapes_for_compilation + if different_shapes_for_compilation is None: + pytest.skip(f"Skipping as `different_shapes_for_compilation` is not set for {self.__class__.__name__}.") + # Specifying `use_duck_shape=False` instructs the compiler if it should use the same symbolic + # variable to represent input sizes that are the same. For more details, + # check out this [comment](https://github.com/huggingface/diffusers/pull/11327#discussion_r2047659790). + torch.fx.experimental._config.use_duck_shape = False + + target_modules = ["to_q", "to_k", "to_v", "to_out.0"] + with torch._dynamo.config.patch(error_on_recompile=True): + self.check_model_hotswap( + do_compile=True, + rank0=rank0, + rank1=rank1, + target_modules0=target_modules, + ) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/models/test_models_auto.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/test_models_auto.py new file mode 100644 index 0000000000000000000000000000000000000000..a70754343f308c3c57e67aaca6781ac9d8a319d1 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/test_models_auto.py @@ -0,0 +1,32 @@ +import unittest +from unittest.mock import patch + +from transformers import CLIPTextModel, LongformerModel + +from diffusers.models import AutoModel, UNet2DConditionModel + + +class TestAutoModel(unittest.TestCase): + @patch( + "diffusers.models.AutoModel.load_config", + side_effect=[EnvironmentError("File not found"), {"_class_name": "UNet2DConditionModel"}], + ) + def test_load_from_config_diffusers_with_subfolder(self, mock_load_config): + model = AutoModel.from_pretrained("hf-internal-testing/tiny-stable-diffusion-torch", subfolder="unet") + assert isinstance(model, UNet2DConditionModel) + + @patch( + "diffusers.models.AutoModel.load_config", + side_effect=[EnvironmentError("File not found"), {"model_type": "clip_text_model"}], + ) + def test_load_from_config_transformers_with_subfolder(self, mock_load_config): + model = AutoModel.from_pretrained("hf-internal-testing/tiny-stable-diffusion-torch", subfolder="text_encoder") + assert isinstance(model, CLIPTextModel) + + def test_load_from_config_without_subfolder(self): + model = AutoModel.from_pretrained("hf-internal-testing/tiny-random-longformer") + assert isinstance(model, LongformerModel) + + def test_load_from_model_index(self): + model = AutoModel.from_pretrained("hf-internal-testing/tiny-stable-diffusion-torch", subfolder="text_encoder") + assert isinstance(model, CLIPTextModel) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/models/transformers/__init__.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/transformers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/models/transformers/test_models_dit_transformer2d.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/transformers/test_models_dit_transformer2d.py new file mode 100644 index 0000000000000000000000000000000000000000..473a87637578e40699378e9c94e989118e0db30d --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/transformers/test_models_dit_transformer2d.py @@ -0,0 +1,102 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import torch + +from diffusers import DiTTransformer2DModel, Transformer2DModel + +from ...testing_utils import ( + enable_full_determinism, + floats_tensor, + slow, + torch_device, +) +from ..test_modeling_common import ModelTesterMixin + + +enable_full_determinism() + + +class DiTTransformer2DModelTests(ModelTesterMixin, unittest.TestCase): + model_class = DiTTransformer2DModel + main_input_name = "hidden_states" + + @property + def dummy_input(self): + batch_size = 4 + in_channels = 4 + sample_size = 8 + scheduler_num_train_steps = 1000 + num_class_labels = 4 + + hidden_states = floats_tensor((batch_size, in_channels, sample_size, sample_size)).to(torch_device) + timesteps = torch.randint(0, scheduler_num_train_steps, size=(batch_size,)).to(torch_device) + class_label_ids = torch.randint(0, num_class_labels, size=(batch_size,)).to(torch_device) + + return {"hidden_states": hidden_states, "timestep": timesteps, "class_labels": class_label_ids} + + @property + def input_shape(self): + return (4, 8, 8) + + @property + def output_shape(self): + return (8, 8, 8) + + def prepare_init_args_and_inputs_for_common(self): + init_dict = { + "in_channels": 4, + "out_channels": 8, + "activation_fn": "gelu-approximate", + "num_attention_heads": 2, + "attention_head_dim": 4, + "attention_bias": True, + "num_layers": 1, + "norm_type": "ada_norm_zero", + "num_embeds_ada_norm": 8, + "patch_size": 2, + "sample_size": 8, + } + inputs_dict = self.dummy_input + return init_dict, inputs_dict + + def test_output(self): + super().test_output( + expected_output_shape=(self.dummy_input[self.main_input_name].shape[0],) + self.output_shape + ) + + def test_correct_class_remapping_from_dict_config(self): + init_dict, _ = self.prepare_init_args_and_inputs_for_common() + model = Transformer2DModel.from_config(init_dict) + assert isinstance(model, DiTTransformer2DModel) + + def test_gradient_checkpointing_is_applied(self): + expected_set = {"DiTTransformer2DModel"} + super().test_gradient_checkpointing_is_applied(expected_set=expected_set) + + def test_effective_gradient_checkpointing(self): + super().test_effective_gradient_checkpointing(loss_tolerance=1e-4) + + def test_correct_class_remapping_from_pretrained_config(self): + config = DiTTransformer2DModel.load_config("facebook/DiT-XL-2-256", subfolder="transformer") + model = Transformer2DModel.from_config(config) + assert isinstance(model, DiTTransformer2DModel) + + @slow + def test_correct_class_remapping(self): + model = Transformer2DModel.from_pretrained("facebook/DiT-XL-2-256", subfolder="transformer") + assert isinstance(model, DiTTransformer2DModel) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/models/transformers/test_models_pixart_transformer2d.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/transformers/test_models_pixart_transformer2d.py new file mode 100644 index 0000000000000000000000000000000000000000..17c400cf1911a929a8ec291d303395816bee86da --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/transformers/test_models_pixart_transformer2d.py @@ -0,0 +1,112 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import torch + +from diffusers import PixArtTransformer2DModel, Transformer2DModel + +from ...testing_utils import ( + enable_full_determinism, + floats_tensor, + slow, + torch_device, +) +from ..test_modeling_common import ModelTesterMixin + + +enable_full_determinism() + + +class PixArtTransformer2DModelTests(ModelTesterMixin, unittest.TestCase): + model_class = PixArtTransformer2DModel + main_input_name = "hidden_states" + # We override the items here because the transformer under consideration is small. + model_split_percents = [0.7, 0.6, 0.6] + + @property + def dummy_input(self): + batch_size = 4 + in_channels = 4 + sample_size = 8 + scheduler_num_train_steps = 1000 + cross_attention_dim = 8 + seq_len = 8 + + hidden_states = floats_tensor((batch_size, in_channels, sample_size, sample_size)).to(torch_device) + timesteps = torch.randint(0, scheduler_num_train_steps, size=(batch_size,)).to(torch_device) + encoder_hidden_states = floats_tensor((batch_size, seq_len, cross_attention_dim)).to(torch_device) + + return { + "hidden_states": hidden_states, + "timestep": timesteps, + "encoder_hidden_states": encoder_hidden_states, + "added_cond_kwargs": {"aspect_ratio": None, "resolution": None}, + } + + @property + def input_shape(self): + return (4, 8, 8) + + @property + def output_shape(self): + return (8, 8, 8) + + def prepare_init_args_and_inputs_for_common(self): + init_dict = { + "sample_size": 8, + "num_layers": 1, + "patch_size": 2, + "attention_head_dim": 2, + "num_attention_heads": 2, + "in_channels": 4, + "cross_attention_dim": 8, + "out_channels": 8, + "attention_bias": True, + "activation_fn": "gelu-approximate", + "num_embeds_ada_norm": 8, + "norm_type": "ada_norm_single", + "norm_elementwise_affine": False, + "norm_eps": 1e-6, + "use_additional_conditions": False, + "caption_channels": None, + } + inputs_dict = self.dummy_input + return init_dict, inputs_dict + + def test_output(self): + super().test_output( + expected_output_shape=(self.dummy_input[self.main_input_name].shape[0],) + self.output_shape + ) + + def test_gradient_checkpointing_is_applied(self): + expected_set = {"PixArtTransformer2DModel"} + super().test_gradient_checkpointing_is_applied(expected_set=expected_set) + + def test_correct_class_remapping_from_dict_config(self): + init_dict, _ = self.prepare_init_args_and_inputs_for_common() + model = Transformer2DModel.from_config(init_dict) + assert isinstance(model, PixArtTransformer2DModel) + + def test_correct_class_remapping_from_pretrained_config(self): + config = PixArtTransformer2DModel.load_config("PixArt-alpha/PixArt-XL-2-1024-MS", subfolder="transformer") + model = Transformer2DModel.from_config(config) + assert isinstance(model, PixArtTransformer2DModel) + + @slow + def test_correct_class_remapping(self): + model = Transformer2DModel.from_pretrained("PixArt-alpha/PixArt-XL-2-1024-MS", subfolder="transformer") + assert isinstance(model, PixArtTransformer2DModel) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/models/transformers/test_models_prior.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/transformers/test_models_prior.py new file mode 100644 index 0000000000000000000000000000000000000000..af5ac4bbbd76ad8652e5ac0f6f4d1ad133392f50 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/transformers/test_models_prior.py @@ -0,0 +1,186 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import gc +import inspect +import unittest + +import torch +from parameterized import parameterized + +from diffusers import PriorTransformer + +from ...testing_utils import ( + backend_empty_cache, + enable_full_determinism, + floats_tensor, + slow, + torch_all_close, + torch_device, +) +from ..test_modeling_common import ModelTesterMixin + + +enable_full_determinism() + + +class PriorTransformerTests(ModelTesterMixin, unittest.TestCase): + model_class = PriorTransformer + main_input_name = "hidden_states" + + @property + def dummy_input(self): + batch_size = 4 + embedding_dim = 8 + num_embeddings = 7 + + hidden_states = floats_tensor((batch_size, embedding_dim)).to(torch_device) + + proj_embedding = floats_tensor((batch_size, embedding_dim)).to(torch_device) + encoder_hidden_states = floats_tensor((batch_size, num_embeddings, embedding_dim)).to(torch_device) + + return { + "hidden_states": hidden_states, + "timestep": 2, + "proj_embedding": proj_embedding, + "encoder_hidden_states": encoder_hidden_states, + } + + def get_dummy_seed_input(self, seed=0): + torch.manual_seed(seed) + batch_size = 4 + embedding_dim = 8 + num_embeddings = 7 + + hidden_states = torch.randn((batch_size, embedding_dim)).to(torch_device) + + proj_embedding = torch.randn((batch_size, embedding_dim)).to(torch_device) + encoder_hidden_states = torch.randn((batch_size, num_embeddings, embedding_dim)).to(torch_device) + + return { + "hidden_states": hidden_states, + "timestep": 2, + "proj_embedding": proj_embedding, + "encoder_hidden_states": encoder_hidden_states, + } + + @property + def input_shape(self): + return (4, 8) + + @property + def output_shape(self): + return (4, 8) + + def prepare_init_args_and_inputs_for_common(self): + init_dict = { + "num_attention_heads": 2, + "attention_head_dim": 4, + "num_layers": 2, + "embedding_dim": 8, + "num_embeddings": 7, + "additional_embeddings": 4, + } + inputs_dict = self.dummy_input + return init_dict, inputs_dict + + def test_from_pretrained_hub(self): + model, loading_info = PriorTransformer.from_pretrained( + "hf-internal-testing/prior-dummy", output_loading_info=True + ) + self.assertIsNotNone(model) + self.assertEqual(len(loading_info["missing_keys"]), 0) + + model.to(torch_device) + hidden_states = model(**self.dummy_input)[0] + + assert hidden_states is not None, "Make sure output is not None" + + def test_forward_signature(self): + init_dict, _ = self.prepare_init_args_and_inputs_for_common() + + model = self.model_class(**init_dict) + signature = inspect.signature(model.forward) + # signature.parameters is an OrderedDict => so arg_names order is deterministic + arg_names = [*signature.parameters.keys()] + + expected_arg_names = ["hidden_states", "timestep"] + self.assertListEqual(arg_names[:2], expected_arg_names) + + def test_output_pretrained(self): + model = PriorTransformer.from_pretrained("hf-internal-testing/prior-dummy") + model = model.to(torch_device) + + if hasattr(model, "set_default_attn_processor"): + model.set_default_attn_processor() + + input = self.get_dummy_seed_input() + + with torch.no_grad(): + output = model(**input)[0] + + output_slice = output[0, :5].flatten().cpu() + + # Since the VAE Gaussian prior's generator is seeded on the appropriate device, + # the expected output slices are not the same for CPU and GPU. + expected_output_slice = torch.tensor([-1.3436, -0.2870, 0.7538, 0.4368, -0.0239]) + self.assertTrue(torch_all_close(output_slice, expected_output_slice, rtol=1e-2)) + + +@slow +class PriorTransformerIntegrationTests(unittest.TestCase): + def get_dummy_seed_input(self, batch_size=1, embedding_dim=768, num_embeddings=77, seed=0): + torch.manual_seed(seed) + + hidden_states = torch.randn((batch_size, embedding_dim)).to(torch_device) + + proj_embedding = torch.randn((batch_size, embedding_dim)).to(torch_device) + encoder_hidden_states = torch.randn((batch_size, num_embeddings, embedding_dim)).to(torch_device) + + return { + "hidden_states": hidden_states, + "timestep": 2, + "proj_embedding": proj_embedding, + "encoder_hidden_states": encoder_hidden_states, + } + + def tearDown(self): + # clean up the VRAM after each test + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + @parameterized.expand( + [ + # fmt: off + [13, [-0.5861, 0.1283, -0.0931, 0.0882, 0.4476, 0.1329, -0.0498, 0.0640]], + [37, [-0.4913, 0.0110, -0.0483, 0.0541, 0.4954, -0.0170, 0.0354, 0.1651]], + # fmt: on + ] + ) + def test_kandinsky_prior(self, seed, expected_slice): + model = PriorTransformer.from_pretrained("kandinsky-community/kandinsky-2-1-prior", subfolder="prior") + model.to(torch_device) + input = self.get_dummy_seed_input(seed=seed) + + with torch.no_grad(): + sample = model(**input)[0] + + assert list(sample.shape) == [1, 768] + + output_slice = sample[0, :8].flatten().cpu() + expected_output_slice = torch.tensor(expected_slice) + + assert torch_all_close(output_slice, expected_output_slice, atol=1e-3) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/models/transformers/test_models_transformer_allegro.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/transformers/test_models_transformer_allegro.py new file mode 100644 index 0000000000000000000000000000000000000000..7c002f87819e502a612e4c7d82e799f085d299a6 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/transformers/test_models_transformer_allegro.py @@ -0,0 +1,83 @@ +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import torch + +from diffusers import AllegroTransformer3DModel + +from ...testing_utils import ( + enable_full_determinism, + torch_device, +) +from ..test_modeling_common import ModelTesterMixin + + +enable_full_determinism() + + +class AllegroTransformerTests(ModelTesterMixin, unittest.TestCase): + model_class = AllegroTransformer3DModel + main_input_name = "hidden_states" + uses_custom_attn_processor = True + + @property + def dummy_input(self): + batch_size = 2 + num_channels = 4 + num_frames = 2 + height = 8 + width = 8 + embedding_dim = 16 + sequence_length = 16 + + hidden_states = torch.randn((batch_size, num_channels, num_frames, height, width)).to(torch_device) + encoder_hidden_states = torch.randn((batch_size, sequence_length, embedding_dim // 2)).to(torch_device) + timestep = torch.randint(0, 1000, size=(batch_size,)).to(torch_device) + + return { + "hidden_states": hidden_states, + "encoder_hidden_states": encoder_hidden_states, + "timestep": timestep, + } + + @property + def input_shape(self): + return (4, 2, 8, 8) + + @property + def output_shape(self): + return (4, 2, 8, 8) + + def prepare_init_args_and_inputs_for_common(self): + init_dict = { + # Product of num_attention_heads * attention_head_dim must be divisible by 16 for 3D positional embeddings. + "num_attention_heads": 2, + "attention_head_dim": 8, + "in_channels": 4, + "out_channels": 4, + "num_layers": 1, + "cross_attention_dim": 16, + "sample_width": 8, + "sample_height": 8, + "sample_frames": 8, + "caption_channels": 8, + } + inputs_dict = self.dummy_input + return init_dict, inputs_dict + + def test_gradient_checkpointing_is_applied(self): + expected_set = {"AllegroTransformer3DModel"} + super().test_gradient_checkpointing_is_applied(expected_set=expected_set) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/models/transformers/test_models_transformer_aura_flow.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/transformers/test_models_transformer_aura_flow.py new file mode 100644 index 0000000000000000000000000000000000000000..ae8c3b7234a3d8c221258350abadd23841026606 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/transformers/test_models_transformer_aura_flow.py @@ -0,0 +1,83 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import torch + +from diffusers import AuraFlowTransformer2DModel + +from ...testing_utils import enable_full_determinism, torch_device +from ..test_modeling_common import ModelTesterMixin + + +enable_full_determinism() + + +class AuraFlowTransformerTests(ModelTesterMixin, unittest.TestCase): + model_class = AuraFlowTransformer2DModel + main_input_name = "hidden_states" + # We override the items here because the transformer under consideration is small. + model_split_percents = [0.7, 0.6, 0.6] + + @property + def dummy_input(self): + batch_size = 2 + num_channels = 4 + height = width = embedding_dim = 32 + sequence_length = 256 + + hidden_states = torch.randn((batch_size, num_channels, height, width)).to(torch_device) + encoder_hidden_states = torch.randn((batch_size, sequence_length, embedding_dim)).to(torch_device) + timestep = torch.randint(0, 1000, size=(batch_size,)).to(torch_device) + + return { + "hidden_states": hidden_states, + "encoder_hidden_states": encoder_hidden_states, + "timestep": timestep, + } + + @property + def input_shape(self): + return (4, 32, 32) + + @property + def output_shape(self): + return (4, 32, 32) + + def prepare_init_args_and_inputs_for_common(self): + init_dict = { + "sample_size": 32, + "patch_size": 2, + "in_channels": 4, + "num_mmdit_layers": 1, + "num_single_dit_layers": 1, + "attention_head_dim": 8, + "num_attention_heads": 4, + "caption_projection_dim": 32, + "joint_attention_dim": 32, + "out_channels": 4, + "pos_embed_max_size": 256, + } + inputs_dict = self.dummy_input + return init_dict, inputs_dict + + def test_gradient_checkpointing_is_applied(self): + expected_set = {"AuraFlowTransformer2DModel"} + super().test_gradient_checkpointing_is_applied(expected_set=expected_set) + + @unittest.skip("AuraFlowTransformer2DModel uses its own dedicated attention processor. This test does not apply") + def test_set_attn_processor_for_determinism(self): + pass diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/models/transformers/test_models_transformer_bria.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/transformers/test_models_transformer_bria.py new file mode 100644 index 0000000000000000000000000000000000000000..9056590edffe6d1f57062d04257a648c357663e4 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/transformers/test_models_transformer_bria.py @@ -0,0 +1,181 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import torch + +from diffusers import BriaTransformer2DModel +from diffusers.models.attention_processor import FluxIPAdapterJointAttnProcessor2_0 +from diffusers.models.embeddings import ImageProjection + +from ...testing_utils import enable_full_determinism, torch_device +from ..test_modeling_common import LoraHotSwappingForModelTesterMixin, ModelTesterMixin, TorchCompileTesterMixin + + +enable_full_determinism() + + +def create_bria_ip_adapter_state_dict(model): + # "ip_adapter" (cross-attention weights) + ip_cross_attn_state_dict = {} + key_id = 0 + + for name in model.attn_processors.keys(): + if name.startswith("single_transformer_blocks"): + continue + + joint_attention_dim = model.config["joint_attention_dim"] + hidden_size = model.config["num_attention_heads"] * model.config["attention_head_dim"] + sd = FluxIPAdapterJointAttnProcessor2_0( + hidden_size=hidden_size, cross_attention_dim=joint_attention_dim, scale=1.0 + ).state_dict() + ip_cross_attn_state_dict.update( + { + f"{key_id}.to_k_ip.weight": sd["to_k_ip.0.weight"], + f"{key_id}.to_v_ip.weight": sd["to_v_ip.0.weight"], + f"{key_id}.to_k_ip.bias": sd["to_k_ip.0.bias"], + f"{key_id}.to_v_ip.bias": sd["to_v_ip.0.bias"], + } + ) + + key_id += 1 + + # "image_proj" (ImageProjection layer weights) + + image_projection = ImageProjection( + cross_attention_dim=model.config["joint_attention_dim"], + image_embed_dim=model.config["pooled_projection_dim"], + num_image_text_embeds=4, + ) + + ip_image_projection_state_dict = {} + sd = image_projection.state_dict() + ip_image_projection_state_dict.update( + { + "proj.weight": sd["image_embeds.weight"], + "proj.bias": sd["image_embeds.bias"], + "norm.weight": sd["norm.weight"], + "norm.bias": sd["norm.bias"], + } + ) + + del sd + ip_state_dict = {} + ip_state_dict.update({"image_proj": ip_image_projection_state_dict, "ip_adapter": ip_cross_attn_state_dict}) + return ip_state_dict + + +class BriaTransformerTests(ModelTesterMixin, unittest.TestCase): + model_class = BriaTransformer2DModel + main_input_name = "hidden_states" + # We override the items here because the transformer under consideration is small. + model_split_percents = [0.8, 0.7, 0.7] + + # Skip setting testing with default: AttnProcessor + uses_custom_attn_processor = True + + @property + def dummy_input(self): + batch_size = 1 + num_latent_channels = 4 + num_image_channels = 3 + height = width = 4 + sequence_length = 48 + embedding_dim = 32 + + hidden_states = torch.randn((batch_size, height * width, num_latent_channels)).to(torch_device) + encoder_hidden_states = torch.randn((batch_size, sequence_length, embedding_dim)).to(torch_device) + text_ids = torch.randn((sequence_length, num_image_channels)).to(torch_device) + image_ids = torch.randn((height * width, num_image_channels)).to(torch_device) + timestep = torch.tensor([1.0]).to(torch_device).expand(batch_size) + + return { + "hidden_states": hidden_states, + "encoder_hidden_states": encoder_hidden_states, + "img_ids": image_ids, + "txt_ids": text_ids, + "timestep": timestep, + } + + @property + def input_shape(self): + return (16, 4) + + @property + def output_shape(self): + return (16, 4) + + def prepare_init_args_and_inputs_for_common(self): + init_dict = { + "patch_size": 1, + "in_channels": 4, + "num_layers": 1, + "num_single_layers": 1, + "attention_head_dim": 8, + "num_attention_heads": 2, + "joint_attention_dim": 32, + "pooled_projection_dim": None, + "axes_dims_rope": [0, 4, 4], + } + + inputs_dict = self.dummy_input + return init_dict, inputs_dict + + def test_deprecated_inputs_img_txt_ids_3d(self): + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + model = self.model_class(**init_dict) + model.to(torch_device) + model.eval() + + with torch.no_grad(): + output_1 = model(**inputs_dict).to_tuple()[0] + + # update inputs_dict with txt_ids and img_ids as 3d tensors (deprecated) + text_ids_3d = inputs_dict["txt_ids"].unsqueeze(0) + image_ids_3d = inputs_dict["img_ids"].unsqueeze(0) + + assert text_ids_3d.ndim == 3, "text_ids_3d should be a 3d tensor" + assert image_ids_3d.ndim == 3, "img_ids_3d should be a 3d tensor" + + inputs_dict["txt_ids"] = text_ids_3d + inputs_dict["img_ids"] = image_ids_3d + + with torch.no_grad(): + output_2 = model(**inputs_dict).to_tuple()[0] + + self.assertEqual(output_1.shape, output_2.shape) + self.assertTrue( + torch.allclose(output_1, output_2, atol=1e-5), + msg="output with deprecated inputs (img_ids and txt_ids as 3d torch tensors) are not equal as them as 2d inputs", + ) + + def test_gradient_checkpointing_is_applied(self): + expected_set = {"BriaTransformer2DModel"} + super().test_gradient_checkpointing_is_applied(expected_set=expected_set) + + +class BriaTransformerCompileTests(TorchCompileTesterMixin, unittest.TestCase): + model_class = BriaTransformer2DModel + + def prepare_init_args_and_inputs_for_common(self): + return BriaTransformerTests().prepare_init_args_and_inputs_for_common() + + +class BriaTransformerLoRAHotSwapTests(LoraHotSwappingForModelTesterMixin, unittest.TestCase): + model_class = BriaTransformer2DModel + + def prepare_init_args_and_inputs_for_common(self): + return BriaTransformerTests().prepare_init_args_and_inputs_for_common() diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/models/transformers/test_models_transformer_chroma.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/transformers/test_models_transformer_chroma.py new file mode 100644 index 0000000000000000000000000000000000000000..92ac8198ed06273a00292861a52ecdb846a6ca8d --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/transformers/test_models_transformer_chroma.py @@ -0,0 +1,183 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import torch + +from diffusers import ChromaTransformer2DModel +from diffusers.models.attention_processor import FluxIPAdapterJointAttnProcessor2_0 +from diffusers.models.embeddings import ImageProjection + +from ...testing_utils import enable_full_determinism, torch_device +from ..test_modeling_common import LoraHotSwappingForModelTesterMixin, ModelTesterMixin, TorchCompileTesterMixin + + +enable_full_determinism() + + +def create_chroma_ip_adapter_state_dict(model): + # "ip_adapter" (cross-attention weights) + ip_cross_attn_state_dict = {} + key_id = 0 + + for name in model.attn_processors.keys(): + if name.startswith("single_transformer_blocks"): + continue + + joint_attention_dim = model.config["joint_attention_dim"] + hidden_size = model.config["num_attention_heads"] * model.config["attention_head_dim"] + sd = FluxIPAdapterJointAttnProcessor2_0( + hidden_size=hidden_size, cross_attention_dim=joint_attention_dim, scale=1.0 + ).state_dict() + ip_cross_attn_state_dict.update( + { + f"{key_id}.to_k_ip.weight": sd["to_k_ip.0.weight"], + f"{key_id}.to_v_ip.weight": sd["to_v_ip.0.weight"], + f"{key_id}.to_k_ip.bias": sd["to_k_ip.0.bias"], + f"{key_id}.to_v_ip.bias": sd["to_v_ip.0.bias"], + } + ) + + key_id += 1 + + # "image_proj" (ImageProjection layer weights) + + image_projection = ImageProjection( + cross_attention_dim=model.config["joint_attention_dim"], + image_embed_dim=model.config["pooled_projection_dim"], + num_image_text_embeds=4, + ) + + ip_image_projection_state_dict = {} + sd = image_projection.state_dict() + ip_image_projection_state_dict.update( + { + "proj.weight": sd["image_embeds.weight"], + "proj.bias": sd["image_embeds.bias"], + "norm.weight": sd["norm.weight"], + "norm.bias": sd["norm.bias"], + } + ) + + del sd + ip_state_dict = {} + ip_state_dict.update({"image_proj": ip_image_projection_state_dict, "ip_adapter": ip_cross_attn_state_dict}) + return ip_state_dict + + +class ChromaTransformerTests(ModelTesterMixin, unittest.TestCase): + model_class = ChromaTransformer2DModel + main_input_name = "hidden_states" + # We override the items here because the transformer under consideration is small. + model_split_percents = [0.8, 0.7, 0.7] + + # Skip setting testing with default: AttnProcessor + uses_custom_attn_processor = True + + @property + def dummy_input(self): + batch_size = 1 + num_latent_channels = 4 + num_image_channels = 3 + height = width = 4 + sequence_length = 48 + embedding_dim = 32 + + hidden_states = torch.randn((batch_size, height * width, num_latent_channels)).to(torch_device) + encoder_hidden_states = torch.randn((batch_size, sequence_length, embedding_dim)).to(torch_device) + text_ids = torch.randn((sequence_length, num_image_channels)).to(torch_device) + image_ids = torch.randn((height * width, num_image_channels)).to(torch_device) + timestep = torch.tensor([1.0]).to(torch_device).expand(batch_size) + + return { + "hidden_states": hidden_states, + "encoder_hidden_states": encoder_hidden_states, + "img_ids": image_ids, + "txt_ids": text_ids, + "timestep": timestep, + } + + @property + def input_shape(self): + return (16, 4) + + @property + def output_shape(self): + return (16, 4) + + def prepare_init_args_and_inputs_for_common(self): + init_dict = { + "patch_size": 1, + "in_channels": 4, + "num_layers": 1, + "num_single_layers": 1, + "attention_head_dim": 16, + "num_attention_heads": 2, + "joint_attention_dim": 32, + "axes_dims_rope": [4, 4, 8], + "approximator_num_channels": 8, + "approximator_hidden_dim": 16, + "approximator_layers": 1, + } + + inputs_dict = self.dummy_input + return init_dict, inputs_dict + + def test_deprecated_inputs_img_txt_ids_3d(self): + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + model = self.model_class(**init_dict) + model.to(torch_device) + model.eval() + + with torch.no_grad(): + output_1 = model(**inputs_dict).to_tuple()[0] + + # update inputs_dict with txt_ids and img_ids as 3d tensors (deprecated) + text_ids_3d = inputs_dict["txt_ids"].unsqueeze(0) + image_ids_3d = inputs_dict["img_ids"].unsqueeze(0) + + assert text_ids_3d.ndim == 3, "text_ids_3d should be a 3d tensor" + assert image_ids_3d.ndim == 3, "img_ids_3d should be a 3d tensor" + + inputs_dict["txt_ids"] = text_ids_3d + inputs_dict["img_ids"] = image_ids_3d + + with torch.no_grad(): + output_2 = model(**inputs_dict).to_tuple()[0] + + self.assertEqual(output_1.shape, output_2.shape) + self.assertTrue( + torch.allclose(output_1, output_2, atol=1e-5), + msg="output with deprecated inputs (img_ids and txt_ids as 3d torch tensors) are not equal as them as 2d inputs", + ) + + def test_gradient_checkpointing_is_applied(self): + expected_set = {"ChromaTransformer2DModel"} + super().test_gradient_checkpointing_is_applied(expected_set=expected_set) + + +class ChromaTransformerCompileTests(TorchCompileTesterMixin, unittest.TestCase): + model_class = ChromaTransformer2DModel + + def prepare_init_args_and_inputs_for_common(self): + return ChromaTransformerTests().prepare_init_args_and_inputs_for_common() + + +class ChromaTransformerLoRAHotSwapTests(LoraHotSwappingForModelTesterMixin, unittest.TestCase): + model_class = ChromaTransformer2DModel + + def prepare_init_args_and_inputs_for_common(self): + return ChromaTransformerTests().prepare_init_args_and_inputs_for_common() diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/models/transformers/test_models_transformer_cogvideox.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/transformers/test_models_transformer_cogvideox.py new file mode 100644 index 0000000000000000000000000000000000000000..f632add7e5a7c4b5bc4259354266bea19c309888 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/transformers/test_models_transformer_cogvideox.py @@ -0,0 +1,149 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import torch + +from diffusers import CogVideoXTransformer3DModel + +from ...testing_utils import ( + enable_full_determinism, + torch_device, +) +from ..test_modeling_common import ModelTesterMixin + + +enable_full_determinism() + + +class CogVideoXTransformerTests(ModelTesterMixin, unittest.TestCase): + model_class = CogVideoXTransformer3DModel + main_input_name = "hidden_states" + uses_custom_attn_processor = True + model_split_percents = [0.7, 0.7, 0.8] + + @property + def dummy_input(self): + batch_size = 2 + num_channels = 4 + num_frames = 1 + height = 8 + width = 8 + embedding_dim = 8 + sequence_length = 8 + + hidden_states = torch.randn((batch_size, num_frames, num_channels, height, width)).to(torch_device) + encoder_hidden_states = torch.randn((batch_size, sequence_length, embedding_dim)).to(torch_device) + timestep = torch.randint(0, 1000, size=(batch_size,)).to(torch_device) + + return { + "hidden_states": hidden_states, + "encoder_hidden_states": encoder_hidden_states, + "timestep": timestep, + } + + @property + def input_shape(self): + return (1, 4, 8, 8) + + @property + def output_shape(self): + return (1, 4, 8, 8) + + def prepare_init_args_and_inputs_for_common(self): + init_dict = { + # Product of num_attention_heads * attention_head_dim must be divisible by 16 for 3D positional embeddings. + "num_attention_heads": 2, + "attention_head_dim": 8, + "in_channels": 4, + "out_channels": 4, + "time_embed_dim": 2, + "text_embed_dim": 8, + "num_layers": 2, + "sample_width": 8, + "sample_height": 8, + "sample_frames": 8, + "patch_size": 2, + "patch_size_t": None, + "temporal_compression_ratio": 4, + "max_text_seq_length": 8, + } + inputs_dict = self.dummy_input + return init_dict, inputs_dict + + def test_gradient_checkpointing_is_applied(self): + expected_set = {"CogVideoXTransformer3DModel"} + super().test_gradient_checkpointing_is_applied(expected_set=expected_set) + + +class CogVideoX1_5TransformerTests(ModelTesterMixin, unittest.TestCase): + model_class = CogVideoXTransformer3DModel + main_input_name = "hidden_states" + uses_custom_attn_processor = True + + @property + def dummy_input(self): + batch_size = 2 + num_channels = 4 + num_frames = 2 + height = 8 + width = 8 + embedding_dim = 8 + sequence_length = 8 + + hidden_states = torch.randn((batch_size, num_frames, num_channels, height, width)).to(torch_device) + encoder_hidden_states = torch.randn((batch_size, sequence_length, embedding_dim)).to(torch_device) + timestep = torch.randint(0, 1000, size=(batch_size,)).to(torch_device) + + return { + "hidden_states": hidden_states, + "encoder_hidden_states": encoder_hidden_states, + "timestep": timestep, + } + + @property + def input_shape(self): + return (1, 4, 8, 8) + + @property + def output_shape(self): + return (1, 4, 8, 8) + + def prepare_init_args_and_inputs_for_common(self): + init_dict = { + # Product of num_attention_heads * attention_head_dim must be divisible by 16 for 3D positional embeddings. + "num_attention_heads": 2, + "attention_head_dim": 8, + "in_channels": 4, + "out_channels": 4, + "time_embed_dim": 2, + "text_embed_dim": 8, + "num_layers": 2, + "sample_width": 8, + "sample_height": 8, + "sample_frames": 8, + "patch_size": 2, + "patch_size_t": 2, + "temporal_compression_ratio": 4, + "max_text_seq_length": 8, + "use_rotary_positional_embeddings": True, + } + inputs_dict = self.dummy_input + return init_dict, inputs_dict + + def test_gradient_checkpointing_is_applied(self): + expected_set = {"CogVideoXTransformer3DModel"} + super().test_gradient_checkpointing_is_applied(expected_set=expected_set) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/models/transformers/test_models_transformer_cogview3plus.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/transformers/test_models_transformer_cogview3plus.py new file mode 100644 index 0000000000000000000000000000000000000000..d38d77531d4c20eca23c0b8b91409152da5592d1 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/transformers/test_models_transformer_cogview3plus.py @@ -0,0 +1,90 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import torch + +from diffusers import CogView3PlusTransformer2DModel + +from ...testing_utils import ( + enable_full_determinism, + torch_device, +) +from ..test_modeling_common import ModelTesterMixin + + +enable_full_determinism() + + +class CogView3PlusTransformerTests(ModelTesterMixin, unittest.TestCase): + model_class = CogView3PlusTransformer2DModel + main_input_name = "hidden_states" + uses_custom_attn_processor = True + model_split_percents = [0.7, 0.6, 0.6] + + @property + def dummy_input(self): + batch_size = 2 + num_channels = 4 + height = 8 + width = 8 + embedding_dim = 8 + sequence_length = 8 + + hidden_states = torch.randn((batch_size, num_channels, height, width)).to(torch_device) + encoder_hidden_states = torch.randn((batch_size, sequence_length, embedding_dim)).to(torch_device) + original_size = torch.tensor([height * 8, width * 8]).unsqueeze(0).repeat(batch_size, 1).to(torch_device) + target_size = torch.tensor([height * 8, width * 8]).unsqueeze(0).repeat(batch_size, 1).to(torch_device) + crop_coords = torch.tensor([0, 0]).unsqueeze(0).repeat(batch_size, 1).to(torch_device) + timestep = torch.randint(0, 1000, size=(batch_size,)).to(torch_device) + + return { + "hidden_states": hidden_states, + "encoder_hidden_states": encoder_hidden_states, + "original_size": original_size, + "target_size": target_size, + "crop_coords": crop_coords, + "timestep": timestep, + } + + @property + def input_shape(self): + return (1, 4, 8, 8) + + @property + def output_shape(self): + return (1, 4, 8, 8) + + def prepare_init_args_and_inputs_for_common(self): + init_dict = { + "patch_size": 2, + "in_channels": 4, + "num_layers": 2, + "attention_head_dim": 4, + "num_attention_heads": 2, + "out_channels": 4, + "text_embed_dim": 8, + "time_embed_dim": 8, + "condition_dim": 2, + "pos_embed_max_size": 8, + "sample_size": 8, + } + inputs_dict = self.dummy_input + return init_dict, inputs_dict + + def test_gradient_checkpointing_is_applied(self): + expected_set = {"CogView3PlusTransformer2DModel"} + super().test_gradient_checkpointing_is_applied(expected_set=expected_set) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/models/transformers/test_models_transformer_cogview4.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/transformers/test_models_transformer_cogview4.py new file mode 100644 index 0000000000000000000000000000000000000000..084c3b7cea41a4ec03d6424d68f0a0fbb6069a22 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/transformers/test_models_transformer_cogview4.py @@ -0,0 +1,83 @@ +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import torch + +from diffusers import CogView4Transformer2DModel + +from ...testing_utils import enable_full_determinism, torch_device +from ..test_modeling_common import ModelTesterMixin + + +enable_full_determinism() + + +class CogView3PlusTransformerTests(ModelTesterMixin, unittest.TestCase): + model_class = CogView4Transformer2DModel + main_input_name = "hidden_states" + uses_custom_attn_processor = True + + @property + def dummy_input(self): + batch_size = 2 + num_channels = 4 + height = 8 + width = 8 + embedding_dim = 8 + sequence_length = 8 + + hidden_states = torch.randn((batch_size, num_channels, height, width)).to(torch_device) + encoder_hidden_states = torch.randn((batch_size, sequence_length, embedding_dim)).to(torch_device) + original_size = torch.tensor([height * 8, width * 8]).unsqueeze(0).repeat(batch_size, 1).to(torch_device) + target_size = torch.tensor([height * 8, width * 8]).unsqueeze(0).repeat(batch_size, 1).to(torch_device) + crop_coords = torch.tensor([0, 0]).unsqueeze(0).repeat(batch_size, 1).to(torch_device) + timestep = torch.randint(0, 1000, size=(batch_size,)).to(torch_device) + + return { + "hidden_states": hidden_states, + "encoder_hidden_states": encoder_hidden_states, + "timestep": timestep, + "original_size": original_size, + "target_size": target_size, + "crop_coords": crop_coords, + } + + @property + def input_shape(self): + return (4, 8, 8) + + @property + def output_shape(self): + return (4, 8, 8) + + def prepare_init_args_and_inputs_for_common(self): + init_dict = { + "patch_size": 2, + "in_channels": 4, + "num_layers": 2, + "attention_head_dim": 4, + "num_attention_heads": 4, + "out_channels": 4, + "text_embed_dim": 8, + "time_embed_dim": 8, + "condition_dim": 4, + } + inputs_dict = self.dummy_input + return init_dict, inputs_dict + + def test_gradient_checkpointing_is_applied(self): + expected_set = {"CogView4Transformer2DModel"} + super().test_gradient_checkpointing_is_applied(expected_set=expected_set) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/models/transformers/test_models_transformer_consisid.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/transformers/test_models_transformer_consisid.py new file mode 100644 index 0000000000000000000000000000000000000000..77fc172d078a2da7c6003a1c100bc4d59770124f --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/transformers/test_models_transformer_consisid.py @@ -0,0 +1,105 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import torch + +from diffusers import ConsisIDTransformer3DModel + +from ...testing_utils import ( + enable_full_determinism, + torch_device, +) +from ..test_modeling_common import ModelTesterMixin + + +enable_full_determinism() + + +class ConsisIDTransformerTests(ModelTesterMixin, unittest.TestCase): + model_class = ConsisIDTransformer3DModel + main_input_name = "hidden_states" + uses_custom_attn_processor = True + + @property + def dummy_input(self): + batch_size = 2 + num_channels = 4 + num_frames = 1 + height = 8 + width = 8 + embedding_dim = 8 + sequence_length = 8 + + hidden_states = torch.randn((batch_size, num_frames, num_channels, height, width)).to(torch_device) + encoder_hidden_states = torch.randn((batch_size, sequence_length, embedding_dim)).to(torch_device) + timestep = torch.randint(0, 1000, size=(batch_size,)).to(torch_device) + id_vit_hidden = [torch.ones([batch_size, 2, 2]).to(torch_device)] * 1 + id_cond = torch.ones(batch_size, 2).to(torch_device) + + return { + "hidden_states": hidden_states, + "encoder_hidden_states": encoder_hidden_states, + "timestep": timestep, + "id_vit_hidden": id_vit_hidden, + "id_cond": id_cond, + } + + @property + def input_shape(self): + return (1, 4, 8, 8) + + @property + def output_shape(self): + return (1, 4, 8, 8) + + def prepare_init_args_and_inputs_for_common(self): + init_dict = { + "num_attention_heads": 2, + "attention_head_dim": 8, + "in_channels": 4, + "out_channels": 4, + "time_embed_dim": 2, + "text_embed_dim": 8, + "num_layers": 1, + "sample_width": 8, + "sample_height": 8, + "sample_frames": 8, + "patch_size": 2, + "temporal_compression_ratio": 4, + "max_text_seq_length": 8, + "cross_attn_interval": 1, + "is_kps": False, + "is_train_face": True, + "cross_attn_dim_head": 1, + "cross_attn_num_heads": 1, + "LFE_id_dim": 2, + "LFE_vit_dim": 2, + "LFE_depth": 5, + "LFE_dim_head": 8, + "LFE_num_heads": 2, + "LFE_num_id_token": 1, + "LFE_num_querie": 1, + "LFE_output_dim": 10, + "LFE_ff_mult": 1, + "LFE_num_scale": 1, + } + inputs_dict = self.dummy_input + return init_dict, inputs_dict + + def test_gradient_checkpointing_is_applied(self): + expected_set = {"ConsisIDTransformer3DModel"} + super().test_gradient_checkpointing_is_applied(expected_set=expected_set) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/models/transformers/test_models_transformer_cosmos.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/transformers/test_models_transformer_cosmos.py new file mode 100644 index 0000000000000000000000000000000000000000..d7390e105c451af63f11d160fdcde94b447c59a9 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/transformers/test_models_transformer_cosmos.py @@ -0,0 +1,153 @@ +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import torch + +from diffusers import CosmosTransformer3DModel + +from ...testing_utils import enable_full_determinism, torch_device +from ..test_modeling_common import ModelTesterMixin + + +enable_full_determinism() + + +class CosmosTransformer3DModelTests(ModelTesterMixin, unittest.TestCase): + model_class = CosmosTransformer3DModel + main_input_name = "hidden_states" + uses_custom_attn_processor = True + + @property + def dummy_input(self): + batch_size = 1 + num_channels = 4 + num_frames = 1 + height = 16 + width = 16 + text_embed_dim = 16 + sequence_length = 12 + fps = 30 + + hidden_states = torch.randn((batch_size, num_channels, num_frames, height, width)).to(torch_device) + timestep = torch.randint(0, 1000, size=(batch_size,)).to(torch_device) + encoder_hidden_states = torch.randn((batch_size, sequence_length, text_embed_dim)).to(torch_device) + attention_mask = torch.ones((batch_size, sequence_length)).to(torch_device) + padding_mask = torch.zeros(batch_size, 1, height, width).to(torch_device) + + return { + "hidden_states": hidden_states, + "timestep": timestep, + "encoder_hidden_states": encoder_hidden_states, + "attention_mask": attention_mask, + "fps": fps, + "padding_mask": padding_mask, + } + + @property + def input_shape(self): + return (4, 1, 16, 16) + + @property + def output_shape(self): + return (4, 1, 16, 16) + + def prepare_init_args_and_inputs_for_common(self): + init_dict = { + "in_channels": 4, + "out_channels": 4, + "num_attention_heads": 2, + "attention_head_dim": 12, + "num_layers": 2, + "mlp_ratio": 2, + "text_embed_dim": 16, + "adaln_lora_dim": 4, + "max_size": (4, 32, 32), + "patch_size": (1, 2, 2), + "rope_scale": (2.0, 1.0, 1.0), + "concat_padding_mask": True, + "extra_pos_embed_type": "learnable", + } + inputs_dict = self.dummy_input + return init_dict, inputs_dict + + def test_gradient_checkpointing_is_applied(self): + expected_set = {"CosmosTransformer3DModel"} + super().test_gradient_checkpointing_is_applied(expected_set=expected_set) + + +class CosmosTransformer3DModelVideoToWorldTests(ModelTesterMixin, unittest.TestCase): + model_class = CosmosTransformer3DModel + main_input_name = "hidden_states" + uses_custom_attn_processor = True + + @property + def dummy_input(self): + batch_size = 1 + num_channels = 4 + num_frames = 1 + height = 16 + width = 16 + text_embed_dim = 16 + sequence_length = 12 + fps = 30 + + hidden_states = torch.randn((batch_size, num_channels, num_frames, height, width)).to(torch_device) + timestep = torch.randint(0, 1000, size=(batch_size,)).to(torch_device) + encoder_hidden_states = torch.randn((batch_size, sequence_length, text_embed_dim)).to(torch_device) + attention_mask = torch.ones((batch_size, sequence_length)).to(torch_device) + condition_mask = torch.ones(batch_size, 1, num_frames, height, width).to(torch_device) + padding_mask = torch.zeros(batch_size, 1, height, width).to(torch_device) + + return { + "hidden_states": hidden_states, + "timestep": timestep, + "encoder_hidden_states": encoder_hidden_states, + "attention_mask": attention_mask, + "fps": fps, + "condition_mask": condition_mask, + "padding_mask": padding_mask, + } + + @property + def input_shape(self): + return (4, 1, 16, 16) + + @property + def output_shape(self): + return (4, 1, 16, 16) + + def prepare_init_args_and_inputs_for_common(self): + init_dict = { + "in_channels": 4 + 1, + "out_channels": 4, + "num_attention_heads": 2, + "attention_head_dim": 12, + "num_layers": 2, + "mlp_ratio": 2, + "text_embed_dim": 16, + "adaln_lora_dim": 4, + "max_size": (4, 32, 32), + "patch_size": (1, 2, 2), + "rope_scale": (2.0, 1.0, 1.0), + "concat_padding_mask": True, + "extra_pos_embed_type": "learnable", + } + inputs_dict = self.dummy_input + return init_dict, inputs_dict + + def test_gradient_checkpointing_is_applied(self): + expected_set = {"CosmosTransformer3DModel"} + super().test_gradient_checkpointing_is_applied(expected_set=expected_set) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/models/transformers/test_models_transformer_easyanimate.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/transformers/test_models_transformer_easyanimate.py new file mode 100644 index 0000000000000000000000000000000000000000..d7b90a47d9749830fb2da2ca75251b80ffafb344 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/transformers/test_models_transformer_easyanimate.py @@ -0,0 +1,87 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import torch + +from diffusers import EasyAnimateTransformer3DModel + +from ...testing_utils import enable_full_determinism, torch_device +from ..test_modeling_common import ModelTesterMixin + + +enable_full_determinism() + + +class EasyAnimateTransformerTests(ModelTesterMixin, unittest.TestCase): + model_class = EasyAnimateTransformer3DModel + main_input_name = "hidden_states" + uses_custom_attn_processor = True + + @property + def dummy_input(self): + batch_size = 2 + num_channels = 4 + num_frames = 2 + height = 16 + width = 16 + embedding_dim = 16 + sequence_length = 16 + + hidden_states = torch.randn((batch_size, num_channels, num_frames, height, width)).to(torch_device) + encoder_hidden_states = torch.randn((batch_size, sequence_length, embedding_dim)).to(torch_device) + timestep = torch.randint(0, 1000, size=(batch_size,)).to(torch_device) + + return { + "hidden_states": hidden_states, + "timestep": timestep, + "timestep_cond": None, + "encoder_hidden_states": encoder_hidden_states, + "encoder_hidden_states_t5": None, + "inpaint_latents": None, + "control_latents": None, + } + + @property + def input_shape(self): + return (4, 2, 16, 16) + + @property + def output_shape(self): + return (4, 2, 16, 16) + + def prepare_init_args_and_inputs_for_common(self): + init_dict = { + "attention_head_dim": 16, + "num_attention_heads": 2, + "in_channels": 4, + "mmdit_layers": 2, + "num_layers": 2, + "out_channels": 4, + "patch_size": 2, + "sample_height": 60, + "sample_width": 90, + "text_embed_dim": 16, + "time_embed_dim": 8, + "time_position_encoding_type": "3d_rope", + "timestep_activation_fn": "silu", + } + inputs_dict = self.dummy_input + return init_dict, inputs_dict + + def test_gradient_checkpointing_is_applied(self): + expected_set = {"EasyAnimateTransformer3DModel"} + super().test_gradient_checkpointing_is_applied(expected_set=expected_set) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/models/transformers/test_models_transformer_flux.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/transformers/test_models_transformer_flux.py new file mode 100644 index 0000000000000000000000000000000000000000..3ab02f797b5b962ecd8e070086e944a1b3df3ff0 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/transformers/test_models_transformer_flux.py @@ -0,0 +1,224 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import torch + +from diffusers import FluxTransformer2DModel +from diffusers.models.attention_processor import FluxIPAdapterJointAttnProcessor2_0 +from diffusers.models.embeddings import ImageProjection + +from ...testing_utils import enable_full_determinism, is_peft_available, torch_device +from ..test_modeling_common import LoraHotSwappingForModelTesterMixin, ModelTesterMixin, TorchCompileTesterMixin + + +enable_full_determinism() + + +def create_flux_ip_adapter_state_dict(model): + # "ip_adapter" (cross-attention weights) + ip_cross_attn_state_dict = {} + key_id = 0 + + for name in model.attn_processors.keys(): + if name.startswith("single_transformer_blocks"): + continue + + joint_attention_dim = model.config["joint_attention_dim"] + hidden_size = model.config["num_attention_heads"] * model.config["attention_head_dim"] + sd = FluxIPAdapterJointAttnProcessor2_0( + hidden_size=hidden_size, cross_attention_dim=joint_attention_dim, scale=1.0 + ).state_dict() + ip_cross_attn_state_dict.update( + { + f"{key_id}.to_k_ip.weight": sd["to_k_ip.0.weight"], + f"{key_id}.to_v_ip.weight": sd["to_v_ip.0.weight"], + f"{key_id}.to_k_ip.bias": sd["to_k_ip.0.bias"], + f"{key_id}.to_v_ip.bias": sd["to_v_ip.0.bias"], + } + ) + + key_id += 1 + + # "image_proj" (ImageProjection layer weights) + + image_projection = ImageProjection( + cross_attention_dim=model.config["joint_attention_dim"], + image_embed_dim=( + model.config["pooled_projection_dim"] if "pooled_projection_dim" in model.config.keys() else 768 + ), + num_image_text_embeds=4, + ) + + ip_image_projection_state_dict = {} + sd = image_projection.state_dict() + ip_image_projection_state_dict.update( + { + "proj.weight": sd["image_embeds.weight"], + "proj.bias": sd["image_embeds.bias"], + "norm.weight": sd["norm.weight"], + "norm.bias": sd["norm.bias"], + } + ) + + del sd + ip_state_dict = {} + ip_state_dict.update({"image_proj": ip_image_projection_state_dict, "ip_adapter": ip_cross_attn_state_dict}) + return ip_state_dict + + +class FluxTransformerTests(ModelTesterMixin, unittest.TestCase): + model_class = FluxTransformer2DModel + main_input_name = "hidden_states" + # We override the items here because the transformer under consideration is small. + model_split_percents = [0.7, 0.6, 0.6] + + # Skip setting testing with default: AttnProcessor + uses_custom_attn_processor = True + + @property + def dummy_input(self): + return self.prepare_dummy_input() + + @property + def input_shape(self): + return (16, 4) + + @property + def output_shape(self): + return (16, 4) + + def prepare_dummy_input(self, height=4, width=4): + batch_size = 1 + num_latent_channels = 4 + num_image_channels = 3 + sequence_length = 48 + embedding_dim = 32 + + hidden_states = torch.randn((batch_size, height * width, num_latent_channels)).to(torch_device) + encoder_hidden_states = torch.randn((batch_size, sequence_length, embedding_dim)).to(torch_device) + pooled_prompt_embeds = torch.randn((batch_size, embedding_dim)).to(torch_device) + text_ids = torch.randn((sequence_length, num_image_channels)).to(torch_device) + image_ids = torch.randn((height * width, num_image_channels)).to(torch_device) + timestep = torch.tensor([1.0]).to(torch_device).expand(batch_size) + + return { + "hidden_states": hidden_states, + "encoder_hidden_states": encoder_hidden_states, + "img_ids": image_ids, + "txt_ids": text_ids, + "pooled_projections": pooled_prompt_embeds, + "timestep": timestep, + } + + def prepare_init_args_and_inputs_for_common(self): + init_dict = { + "patch_size": 1, + "in_channels": 4, + "num_layers": 1, + "num_single_layers": 1, + "attention_head_dim": 16, + "num_attention_heads": 2, + "joint_attention_dim": 32, + "pooled_projection_dim": 32, + "axes_dims_rope": [4, 4, 8], + } + + inputs_dict = self.dummy_input + return init_dict, inputs_dict + + def test_deprecated_inputs_img_txt_ids_3d(self): + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + model = self.model_class(**init_dict) + model.to(torch_device) + model.eval() + + with torch.no_grad(): + output_1 = model(**inputs_dict).to_tuple()[0] + + # update inputs_dict with txt_ids and img_ids as 3d tensors (deprecated) + text_ids_3d = inputs_dict["txt_ids"].unsqueeze(0) + image_ids_3d = inputs_dict["img_ids"].unsqueeze(0) + + assert text_ids_3d.ndim == 3, "text_ids_3d should be a 3d tensor" + assert image_ids_3d.ndim == 3, "img_ids_3d should be a 3d tensor" + + inputs_dict["txt_ids"] = text_ids_3d + inputs_dict["img_ids"] = image_ids_3d + + with torch.no_grad(): + output_2 = model(**inputs_dict).to_tuple()[0] + + self.assertEqual(output_1.shape, output_2.shape) + self.assertTrue( + torch.allclose(output_1, output_2, atol=1e-5), + msg="output with deprecated inputs (img_ids and txt_ids as 3d torch tensors) are not equal as them as 2d inputs", + ) + + def test_gradient_checkpointing_is_applied(self): + expected_set = {"FluxTransformer2DModel"} + super().test_gradient_checkpointing_is_applied(expected_set=expected_set) + + # The test exists for cases like + # https://github.com/huggingface/diffusers/issues/11874 + @unittest.skipIf(not is_peft_available(), "Only with PEFT") + def test_lora_exclude_modules(self): + from peft import LoraConfig, get_peft_model_state_dict, inject_adapter_in_model, set_peft_model_state_dict + + lora_rank = 4 + target_module = "single_transformer_blocks.0.proj_out" + adapter_name = "foo" + init_dict, _ = self.prepare_init_args_and_inputs_for_common() + model = self.model_class(**init_dict).to(torch_device) + + state_dict = model.state_dict() + target_mod_shape = state_dict[f"{target_module}.weight"].shape + lora_state_dict = { + f"{target_module}.lora_A.weight": torch.ones(lora_rank, target_mod_shape[1]) * 22, + f"{target_module}.lora_B.weight": torch.ones(target_mod_shape[0], lora_rank) * 33, + } + # Passing exclude_modules should no longer be necessary (or even passing target_modules, for that matter). + config = LoraConfig( + r=lora_rank, target_modules=["single_transformer_blocks.0.proj_out"], exclude_modules=["proj_out"] + ) + inject_adapter_in_model(config, model, adapter_name=adapter_name, state_dict=lora_state_dict) + set_peft_model_state_dict(model, lora_state_dict, adapter_name) + retrieved_lora_state_dict = get_peft_model_state_dict(model, adapter_name=adapter_name) + assert len(retrieved_lora_state_dict) == len(lora_state_dict) + assert (retrieved_lora_state_dict["single_transformer_blocks.0.proj_out.lora_A.weight"] == 22).all() + assert (retrieved_lora_state_dict["single_transformer_blocks.0.proj_out.lora_B.weight"] == 33).all() + + +class FluxTransformerCompileTests(TorchCompileTesterMixin, unittest.TestCase): + model_class = FluxTransformer2DModel + different_shapes_for_compilation = [(4, 4), (4, 8), (8, 8)] + + def prepare_init_args_and_inputs_for_common(self): + return FluxTransformerTests().prepare_init_args_and_inputs_for_common() + + def prepare_dummy_input(self, height, width): + return FluxTransformerTests().prepare_dummy_input(height=height, width=width) + + +class FluxTransformerLoRAHotSwapTests(LoraHotSwappingForModelTesterMixin, unittest.TestCase): + model_class = FluxTransformer2DModel + different_shapes_for_compilation = [(4, 4), (4, 8), (8, 8)] + + def prepare_init_args_and_inputs_for_common(self): + return FluxTransformerTests().prepare_init_args_and_inputs_for_common() + + def prepare_dummy_input(self, height, width): + return FluxTransformerTests().prepare_dummy_input(height=height, width=width) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/models/transformers/test_models_transformer_hidream.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/transformers/test_models_transformer_hidream.py new file mode 100644 index 0000000000000000000000000000000000000000..fdd5f8c7fd07a4b98debe3d08a7b2fc83be6a6c0 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/transformers/test_models_transformer_hidream.py @@ -0,0 +1,96 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import torch + +from diffusers import HiDreamImageTransformer2DModel + +from ...testing_utils import ( + enable_full_determinism, + torch_device, +) +from ..test_modeling_common import ModelTesterMixin + + +enable_full_determinism() + + +class HiDreamTransformerTests(ModelTesterMixin, unittest.TestCase): + model_class = HiDreamImageTransformer2DModel + main_input_name = "hidden_states" + model_split_percents = [0.8, 0.8, 0.9] + + @property + def dummy_input(self): + batch_size = 2 + num_channels = 4 + height = width = 32 + embedding_dim_t5, embedding_dim_llama, embedding_dim_pooled = 8, 4, 8 + sequence_length = 8 + + hidden_states = torch.randn((batch_size, num_channels, height, width)).to(torch_device) + encoder_hidden_states_t5 = torch.randn((batch_size, sequence_length, embedding_dim_t5)).to(torch_device) + encoder_hidden_states_llama3 = torch.randn((batch_size, batch_size, sequence_length, embedding_dim_llama)).to( + torch_device + ) + pooled_embeds = torch.randn((batch_size, embedding_dim_pooled)).to(torch_device) + timesteps = torch.randint(0, 1000, size=(batch_size,)).to(torch_device) + + return { + "hidden_states": hidden_states, + "encoder_hidden_states_t5": encoder_hidden_states_t5, + "encoder_hidden_states_llama3": encoder_hidden_states_llama3, + "pooled_embeds": pooled_embeds, + "timesteps": timesteps, + } + + @property + def input_shape(self): + return (4, 32, 32) + + @property + def output_shape(self): + return (4, 32, 32) + + def prepare_init_args_and_inputs_for_common(self): + init_dict = { + "patch_size": 2, + "in_channels": 4, + "out_channels": 4, + "num_layers": 1, + "num_single_layers": 1, + "attention_head_dim": 8, + "num_attention_heads": 4, + "caption_channels": [8, 4], + "text_emb_dim": 8, + "num_routed_experts": 2, + "num_activated_experts": 2, + "axes_dims_rope": (4, 2, 2), + "max_resolution": (32, 32), + "llama_layers": (0, 1), + "force_inference_output": True, # TODO: as we don't implement MoE loss in training tests. + } + inputs_dict = self.dummy_input + return init_dict, inputs_dict + + @unittest.skip("HiDreamImageTransformer2DModel uses a dedicated attention processor. This test doesn't apply") + def test_set_attn_processor_for_determinism(self): + pass + + def test_gradient_checkpointing_is_applied(self): + expected_set = {"HiDreamImageTransformer2DModel"} + super().test_gradient_checkpointing_is_applied(expected_set=expected_set) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/models/transformers/test_models_transformer_hunyuan_dit.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/transformers/test_models_transformer_hunyuan_dit.py new file mode 100644 index 0000000000000000000000000000000000000000..d82a62d58ec3e1f5a0109ba959cc51d6b8fd5ce0 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/transformers/test_models_transformer_hunyuan_dit.py @@ -0,0 +1,113 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import torch + +from diffusers import HunyuanDiT2DModel + +from ...testing_utils import ( + enable_full_determinism, + torch_device, +) +from ..test_modeling_common import ModelTesterMixin + + +enable_full_determinism() + + +class HunyuanDiTTests(ModelTesterMixin, unittest.TestCase): + model_class = HunyuanDiT2DModel + main_input_name = "hidden_states" + + @property + def dummy_input(self): + batch_size = 2 + num_channels = 4 + height = width = 8 + embedding_dim = 8 + sequence_length = 4 + sequence_length_t5 = 4 + + hidden_states = torch.randn((batch_size, num_channels, height, width)).to(torch_device) + encoder_hidden_states = torch.randn((batch_size, sequence_length, embedding_dim)).to(torch_device) + text_embedding_mask = torch.ones(size=(batch_size, sequence_length)).to(torch_device) + encoder_hidden_states_t5 = torch.randn((batch_size, sequence_length_t5, embedding_dim)).to(torch_device) + text_embedding_mask_t5 = torch.ones(size=(batch_size, sequence_length_t5)).to(torch_device) + timestep = torch.randint(0, 1000, size=(batch_size,), dtype=encoder_hidden_states.dtype).to(torch_device) + + original_size = [1024, 1024] + target_size = [16, 16] + crops_coords_top_left = [0, 0] + add_time_ids = list(original_size + target_size + crops_coords_top_left) + add_time_ids = torch.tensor([add_time_ids, add_time_ids], dtype=encoder_hidden_states.dtype).to(torch_device) + style = torch.zeros(size=(batch_size,), dtype=int).to(torch_device) + image_rotary_emb = [ + torch.ones(size=(1, 8), dtype=encoder_hidden_states.dtype), + torch.zeros(size=(1, 8), dtype=encoder_hidden_states.dtype), + ] + + return { + "hidden_states": hidden_states, + "encoder_hidden_states": encoder_hidden_states, + "text_embedding_mask": text_embedding_mask, + "encoder_hidden_states_t5": encoder_hidden_states_t5, + "text_embedding_mask_t5": text_embedding_mask_t5, + "timestep": timestep, + "image_meta_size": add_time_ids, + "style": style, + "image_rotary_emb": image_rotary_emb, + } + + @property + def input_shape(self): + return (4, 8, 8) + + @property + def output_shape(self): + return (8, 8, 8) + + def prepare_init_args_and_inputs_for_common(self): + init_dict = { + "sample_size": 8, + "patch_size": 2, + "in_channels": 4, + "num_layers": 1, + "attention_head_dim": 8, + "num_attention_heads": 2, + "cross_attention_dim": 8, + "cross_attention_dim_t5": 8, + "pooled_projection_dim": 4, + "hidden_size": 16, + "text_len": 4, + "text_len_t5": 4, + "activation_fn": "gelu-approximate", + } + inputs_dict = self.dummy_input + return init_dict, inputs_dict + + def test_output(self): + super().test_output( + expected_output_shape=(self.dummy_input[self.main_input_name].shape[0],) + self.output_shape + ) + + @unittest.skip("HunyuanDIT use a custom processor HunyuanAttnProcessor2_0") + def test_set_xformers_attn_processor_for_determinism(self): + pass + + @unittest.skip("HunyuanDIT use a custom processor HunyuanAttnProcessor2_0") + def test_set_attn_processor_for_determinism(self): + pass diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/models/transformers/test_models_transformer_hunyuan_video.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/transformers/test_models_transformer_hunyuan_video.py new file mode 100644 index 0000000000000000000000000000000000000000..385a5eefd58b840556f86cd1d285161d26c41ab5 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/transformers/test_models_transformer_hunyuan_video.py @@ -0,0 +1,323 @@ +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import torch + +from diffusers import HunyuanVideoTransformer3DModel + +from ...testing_utils import ( + enable_full_determinism, + torch_device, +) +from ..test_modeling_common import ModelTesterMixin, TorchCompileTesterMixin + + +enable_full_determinism() + + +class HunyuanVideoTransformer3DTests(ModelTesterMixin, unittest.TestCase): + model_class = HunyuanVideoTransformer3DModel + main_input_name = "hidden_states" + uses_custom_attn_processor = True + + @property + def dummy_input(self): + batch_size = 1 + num_channels = 4 + num_frames = 1 + height = 16 + width = 16 + text_encoder_embedding_dim = 16 + pooled_projection_dim = 8 + sequence_length = 12 + + hidden_states = torch.randn((batch_size, num_channels, num_frames, height, width)).to(torch_device) + timestep = torch.randint(0, 1000, size=(batch_size,)).to(torch_device) + encoder_hidden_states = torch.randn((batch_size, sequence_length, text_encoder_embedding_dim)).to(torch_device) + pooled_projections = torch.randn((batch_size, pooled_projection_dim)).to(torch_device) + encoder_attention_mask = torch.ones((batch_size, sequence_length)).to(torch_device) + guidance = torch.randint(0, 1000, size=(batch_size,)).to(torch_device, dtype=torch.float32) + + return { + "hidden_states": hidden_states, + "timestep": timestep, + "encoder_hidden_states": encoder_hidden_states, + "pooled_projections": pooled_projections, + "encoder_attention_mask": encoder_attention_mask, + "guidance": guidance, + } + + @property + def input_shape(self): + return (4, 1, 16, 16) + + @property + def output_shape(self): + return (4, 1, 16, 16) + + def prepare_init_args_and_inputs_for_common(self): + init_dict = { + "in_channels": 4, + "out_channels": 4, + "num_attention_heads": 2, + "attention_head_dim": 10, + "num_layers": 1, + "num_single_layers": 1, + "num_refiner_layers": 1, + "patch_size": 1, + "patch_size_t": 1, + "guidance_embeds": True, + "text_embed_dim": 16, + "pooled_projection_dim": 8, + "rope_axes_dim": (2, 4, 4), + "image_condition_type": None, + } + inputs_dict = self.dummy_input + return init_dict, inputs_dict + + def test_gradient_checkpointing_is_applied(self): + expected_set = {"HunyuanVideoTransformer3DModel"} + super().test_gradient_checkpointing_is_applied(expected_set=expected_set) + + +class HunyuanTransformerCompileTests(TorchCompileTesterMixin, unittest.TestCase): + model_class = HunyuanVideoTransformer3DModel + + def prepare_init_args_and_inputs_for_common(self): + return HunyuanVideoTransformer3DTests().prepare_init_args_and_inputs_for_common() + + +class HunyuanSkyreelsImageToVideoTransformer3DTests(ModelTesterMixin, unittest.TestCase): + model_class = HunyuanVideoTransformer3DModel + main_input_name = "hidden_states" + uses_custom_attn_processor = True + + @property + def dummy_input(self): + batch_size = 1 + num_channels = 8 + num_frames = 1 + height = 16 + width = 16 + text_encoder_embedding_dim = 16 + pooled_projection_dim = 8 + sequence_length = 12 + + hidden_states = torch.randn((batch_size, num_channels, num_frames, height, width)).to(torch_device) + timestep = torch.randint(0, 1000, size=(batch_size,)).to(torch_device) + encoder_hidden_states = torch.randn((batch_size, sequence_length, text_encoder_embedding_dim)).to(torch_device) + pooled_projections = torch.randn((batch_size, pooled_projection_dim)).to(torch_device) + encoder_attention_mask = torch.ones((batch_size, sequence_length)).to(torch_device) + guidance = torch.randint(0, 1000, size=(batch_size,)).to(torch_device, dtype=torch.float32) + + return { + "hidden_states": hidden_states, + "timestep": timestep, + "encoder_hidden_states": encoder_hidden_states, + "pooled_projections": pooled_projections, + "encoder_attention_mask": encoder_attention_mask, + "guidance": guidance, + } + + @property + def input_shape(self): + return (8, 1, 16, 16) + + @property + def output_shape(self): + return (4, 1, 16, 16) + + def prepare_init_args_and_inputs_for_common(self): + init_dict = { + "in_channels": 8, + "out_channels": 4, + "num_attention_heads": 2, + "attention_head_dim": 10, + "num_layers": 1, + "num_single_layers": 1, + "num_refiner_layers": 1, + "patch_size": 1, + "patch_size_t": 1, + "guidance_embeds": True, + "text_embed_dim": 16, + "pooled_projection_dim": 8, + "rope_axes_dim": (2, 4, 4), + "image_condition_type": None, + } + inputs_dict = self.dummy_input + return init_dict, inputs_dict + + def test_output(self): + super().test_output(expected_output_shape=(1, *self.output_shape)) + + def test_gradient_checkpointing_is_applied(self): + expected_set = {"HunyuanVideoTransformer3DModel"} + super().test_gradient_checkpointing_is_applied(expected_set=expected_set) + + +class HunyuanSkyreelsImageToVideoCompileTests(TorchCompileTesterMixin, unittest.TestCase): + model_class = HunyuanVideoTransformer3DModel + + def prepare_init_args_and_inputs_for_common(self): + return HunyuanSkyreelsImageToVideoTransformer3DTests().prepare_init_args_and_inputs_for_common() + + +class HunyuanVideoImageToVideoTransformer3DTests(ModelTesterMixin, unittest.TestCase): + model_class = HunyuanVideoTransformer3DModel + main_input_name = "hidden_states" + uses_custom_attn_processor = True + + @property + def dummy_input(self): + batch_size = 1 + num_channels = 2 * 4 + 1 + num_frames = 1 + height = 16 + width = 16 + text_encoder_embedding_dim = 16 + pooled_projection_dim = 8 + sequence_length = 12 + + hidden_states = torch.randn((batch_size, num_channels, num_frames, height, width)).to(torch_device) + timestep = torch.randint(0, 1000, size=(batch_size,)).to(torch_device) + encoder_hidden_states = torch.randn((batch_size, sequence_length, text_encoder_embedding_dim)).to(torch_device) + pooled_projections = torch.randn((batch_size, pooled_projection_dim)).to(torch_device) + encoder_attention_mask = torch.ones((batch_size, sequence_length)).to(torch_device) + + return { + "hidden_states": hidden_states, + "timestep": timestep, + "encoder_hidden_states": encoder_hidden_states, + "pooled_projections": pooled_projections, + "encoder_attention_mask": encoder_attention_mask, + } + + @property + def input_shape(self): + return (8, 1, 16, 16) + + @property + def output_shape(self): + return (4, 1, 16, 16) + + def prepare_init_args_and_inputs_for_common(self): + init_dict = { + "in_channels": 2 * 4 + 1, + "out_channels": 4, + "num_attention_heads": 2, + "attention_head_dim": 10, + "num_layers": 1, + "num_single_layers": 1, + "num_refiner_layers": 1, + "patch_size": 1, + "patch_size_t": 1, + "guidance_embeds": False, + "text_embed_dim": 16, + "pooled_projection_dim": 8, + "rope_axes_dim": (2, 4, 4), + "image_condition_type": "latent_concat", + } + inputs_dict = self.dummy_input + return init_dict, inputs_dict + + def test_output(self): + super().test_output(expected_output_shape=(1, *self.output_shape)) + + def test_gradient_checkpointing_is_applied(self): + expected_set = {"HunyuanVideoTransformer3DModel"} + super().test_gradient_checkpointing_is_applied(expected_set=expected_set) + + +class HunyuanImageToVideoCompileTests(TorchCompileTesterMixin, unittest.TestCase): + model_class = HunyuanVideoTransformer3DModel + + def prepare_init_args_and_inputs_for_common(self): + return HunyuanVideoImageToVideoTransformer3DTests().prepare_init_args_and_inputs_for_common() + + +class HunyuanVideoTokenReplaceImageToVideoTransformer3DTests(ModelTesterMixin, unittest.TestCase): + model_class = HunyuanVideoTransformer3DModel + main_input_name = "hidden_states" + uses_custom_attn_processor = True + + @property + def dummy_input(self): + batch_size = 1 + num_channels = 2 + num_frames = 1 + height = 16 + width = 16 + text_encoder_embedding_dim = 16 + pooled_projection_dim = 8 + sequence_length = 12 + + hidden_states = torch.randn((batch_size, num_channels, num_frames, height, width)).to(torch_device) + timestep = torch.randint(0, 1000, size=(batch_size,)).to(torch_device) + encoder_hidden_states = torch.randn((batch_size, sequence_length, text_encoder_embedding_dim)).to(torch_device) + pooled_projections = torch.randn((batch_size, pooled_projection_dim)).to(torch_device) + encoder_attention_mask = torch.ones((batch_size, sequence_length)).to(torch_device) + guidance = torch.randint(0, 1000, size=(batch_size,)).to(torch_device, dtype=torch.float32) + + return { + "hidden_states": hidden_states, + "timestep": timestep, + "encoder_hidden_states": encoder_hidden_states, + "pooled_projections": pooled_projections, + "encoder_attention_mask": encoder_attention_mask, + "guidance": guidance, + } + + @property + def input_shape(self): + return (8, 1, 16, 16) + + @property + def output_shape(self): + return (4, 1, 16, 16) + + def prepare_init_args_and_inputs_for_common(self): + init_dict = { + "in_channels": 2, + "out_channels": 4, + "num_attention_heads": 2, + "attention_head_dim": 10, + "num_layers": 1, + "num_single_layers": 1, + "num_refiner_layers": 1, + "patch_size": 1, + "patch_size_t": 1, + "guidance_embeds": True, + "text_embed_dim": 16, + "pooled_projection_dim": 8, + "rope_axes_dim": (2, 4, 4), + "image_condition_type": "token_replace", + } + inputs_dict = self.dummy_input + return init_dict, inputs_dict + + def test_output(self): + super().test_output(expected_output_shape=(1, *self.output_shape)) + + def test_gradient_checkpointing_is_applied(self): + expected_set = {"HunyuanVideoTransformer3DModel"} + super().test_gradient_checkpointing_is_applied(expected_set=expected_set) + + +class HunyuanVideoTokenReplaceCompileTests(TorchCompileTesterMixin, unittest.TestCase): + model_class = HunyuanVideoTransformer3DModel + + def prepare_init_args_and_inputs_for_common(self): + return HunyuanVideoTokenReplaceImageToVideoTransformer3DTests().prepare_init_args_and_inputs_for_common() diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/models/transformers/test_models_transformer_hunyuan_video_framepack.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/transformers/test_models_transformer_hunyuan_video_framepack.py new file mode 100644 index 0000000000000000000000000000000000000000..00a2b27e02b66d195d8f1663f1e52ff3cbb5db5a --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/transformers/test_models_transformer_hunyuan_video_framepack.py @@ -0,0 +1,116 @@ +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import torch + +from diffusers import HunyuanVideoFramepackTransformer3DModel + +from ...testing_utils import ( + enable_full_determinism, + torch_device, +) +from ..test_modeling_common import ModelTesterMixin + + +enable_full_determinism() + + +class HunyuanVideoTransformer3DTests(ModelTesterMixin, unittest.TestCase): + model_class = HunyuanVideoFramepackTransformer3DModel + main_input_name = "hidden_states" + uses_custom_attn_processor = True + model_split_percents = [0.5, 0.7, 0.9] + + @property + def dummy_input(self): + batch_size = 1 + num_channels = 4 + num_frames = 3 + height = 4 + width = 4 + text_encoder_embedding_dim = 16 + image_encoder_embedding_dim = 16 + pooled_projection_dim = 8 + sequence_length = 12 + + hidden_states = torch.randn((batch_size, num_channels, num_frames, height, width)).to(torch_device) + encoder_hidden_states = torch.randn((batch_size, sequence_length, text_encoder_embedding_dim)).to(torch_device) + pooled_projections = torch.randn((batch_size, pooled_projection_dim)).to(torch_device) + encoder_attention_mask = torch.ones((batch_size, sequence_length)).to(torch_device) + image_embeds = torch.randn((batch_size, sequence_length, image_encoder_embedding_dim)).to(torch_device) + indices_latents = torch.ones((3,)).to(torch_device) + latents_clean = torch.randn((batch_size, num_channels, num_frames - 1, height, width)).to(torch_device) + indices_latents_clean = torch.ones((num_frames - 1,)).to(torch_device) + latents_history_2x = torch.randn((batch_size, num_channels, num_frames - 1, height, width)).to(torch_device) + indices_latents_history_2x = torch.ones((num_frames - 1,)).to(torch_device) + latents_history_4x = torch.randn((batch_size, num_channels, (num_frames - 1) * 4, height, width)).to( + torch_device + ) + indices_latents_history_4x = torch.ones(((num_frames - 1) * 4,)).to(torch_device) + timestep = torch.randint(0, 1000, size=(batch_size,)).to(torch_device) + guidance = torch.randint(0, 1000, size=(batch_size,)).to(torch_device) + + return { + "hidden_states": hidden_states, + "timestep": timestep, + "encoder_hidden_states": encoder_hidden_states, + "pooled_projections": pooled_projections, + "encoder_attention_mask": encoder_attention_mask, + "guidance": guidance, + "image_embeds": image_embeds, + "indices_latents": indices_latents, + "latents_clean": latents_clean, + "indices_latents_clean": indices_latents_clean, + "latents_history_2x": latents_history_2x, + "indices_latents_history_2x": indices_latents_history_2x, + "latents_history_4x": latents_history_4x, + "indices_latents_history_4x": indices_latents_history_4x, + } + + @property + def input_shape(self): + return (4, 3, 4, 4) + + @property + def output_shape(self): + return (4, 3, 4, 4) + + def prepare_init_args_and_inputs_for_common(self): + init_dict = { + "in_channels": 4, + "out_channels": 4, + "num_attention_heads": 2, + "attention_head_dim": 10, + "num_layers": 1, + "num_single_layers": 1, + "num_refiner_layers": 1, + "patch_size": 2, + "patch_size_t": 1, + "guidance_embeds": True, + "text_embed_dim": 16, + "pooled_projection_dim": 8, + "rope_axes_dim": (2, 4, 4), + "image_condition_type": None, + "has_image_proj": True, + "image_proj_dim": 16, + "has_clean_x_embedder": True, + } + inputs_dict = self.dummy_input + return init_dict, inputs_dict + + def test_gradient_checkpointing_is_applied(self): + expected_set = {"HunyuanVideoFramepackTransformer3DModel"} + super().test_gradient_checkpointing_is_applied(expected_set=expected_set) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/models/transformers/test_models_transformer_latte.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/transformers/test_models_transformer_latte.py new file mode 100644 index 0000000000000000000000000000000000000000..7bf2c52e626950b04d5f538a56f3b68a8b9131db --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/transformers/test_models_transformer_latte.py @@ -0,0 +1,92 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import torch + +from diffusers import LatteTransformer3DModel + +from ...testing_utils import ( + enable_full_determinism, + torch_device, +) +from ..test_modeling_common import ModelTesterMixin + + +enable_full_determinism() + + +class LatteTransformerTests(ModelTesterMixin, unittest.TestCase): + model_class = LatteTransformer3DModel + main_input_name = "hidden_states" + + @property + def dummy_input(self): + batch_size = 2 + num_channels = 4 + num_frames = 1 + height = width = 8 + embedding_dim = 8 + sequence_length = 8 + + hidden_states = torch.randn((batch_size, num_channels, num_frames, height, width)).to(torch_device) + encoder_hidden_states = torch.randn((batch_size, sequence_length, embedding_dim)).to(torch_device) + timestep = torch.randint(0, 1000, size=(batch_size,)).to(torch_device) + + return { + "hidden_states": hidden_states, + "encoder_hidden_states": encoder_hidden_states, + "timestep": timestep, + "enable_temporal_attentions": True, + } + + @property + def input_shape(self): + return (4, 1, 8, 8) + + @property + def output_shape(self): + return (8, 1, 8, 8) + + def prepare_init_args_and_inputs_for_common(self): + init_dict = { + "sample_size": 8, + "num_layers": 1, + "patch_size": 2, + "attention_head_dim": 4, + "num_attention_heads": 2, + "caption_channels": 8, + "in_channels": 4, + "cross_attention_dim": 8, + "out_channels": 8, + "attention_bias": True, + "activation_fn": "gelu-approximate", + "num_embeds_ada_norm": 1000, + "norm_type": "ada_norm_single", + "norm_elementwise_affine": False, + "norm_eps": 1e-6, + } + inputs_dict = self.dummy_input + return init_dict, inputs_dict + + def test_output(self): + super().test_output( + expected_output_shape=(self.dummy_input[self.main_input_name].shape[0],) + self.output_shape + ) + + def test_gradient_checkpointing_is_applied(self): + expected_set = {"LatteTransformer3DModel"} + super().test_gradient_checkpointing_is_applied(expected_set=expected_set) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/models/transformers/test_models_transformer_ltx.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/transformers/test_models_transformer_ltx.py new file mode 100644 index 0000000000000000000000000000000000000000..e912463bbf6ad6645a8c1476544792333d750e21 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/transformers/test_models_transformer_ltx.py @@ -0,0 +1,90 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import torch + +from diffusers import LTXVideoTransformer3DModel + +from ...testing_utils import enable_full_determinism, torch_device +from ..test_modeling_common import ModelTesterMixin, TorchCompileTesterMixin + + +enable_full_determinism() + + +class LTXTransformerTests(ModelTesterMixin, unittest.TestCase): + model_class = LTXVideoTransformer3DModel + main_input_name = "hidden_states" + uses_custom_attn_processor = True + + @property + def dummy_input(self): + batch_size = 2 + num_channels = 4 + num_frames = 2 + height = 16 + width = 16 + embedding_dim = 16 + sequence_length = 16 + + hidden_states = torch.randn((batch_size, num_frames * height * width, num_channels)).to(torch_device) + encoder_hidden_states = torch.randn((batch_size, sequence_length, embedding_dim)).to(torch_device) + encoder_attention_mask = torch.ones((batch_size, sequence_length)).bool().to(torch_device) + timestep = torch.randint(0, 1000, size=(batch_size,)).to(torch_device) + + return { + "hidden_states": hidden_states, + "encoder_hidden_states": encoder_hidden_states, + "timestep": timestep, + "encoder_attention_mask": encoder_attention_mask, + "num_frames": num_frames, + "height": height, + "width": width, + } + + @property + def input_shape(self): + return (512, 4) + + @property + def output_shape(self): + return (512, 4) + + def prepare_init_args_and_inputs_for_common(self): + init_dict = { + "in_channels": 4, + "out_channels": 4, + "num_attention_heads": 2, + "attention_head_dim": 8, + "cross_attention_dim": 16, + "num_layers": 1, + "qk_norm": "rms_norm_across_heads", + "caption_channels": 16, + } + inputs_dict = self.dummy_input + return init_dict, inputs_dict + + def test_gradient_checkpointing_is_applied(self): + expected_set = {"LTXVideoTransformer3DModel"} + super().test_gradient_checkpointing_is_applied(expected_set=expected_set) + + +class LTXTransformerCompileTests(TorchCompileTesterMixin, unittest.TestCase): + model_class = LTXVideoTransformer3DModel + + def prepare_init_args_and_inputs_for_common(self): + return LTXTransformerTests().prepare_init_args_and_inputs_for_common() diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/models/transformers/test_models_transformer_lumina.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/transformers/test_models_transformer_lumina.py new file mode 100644 index 0000000000000000000000000000000000000000..0024aa106c6d64698283b2e729ccfa53eae71c15 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/transformers/test_models_transformer_lumina.py @@ -0,0 +1,112 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import torch + +from diffusers import LuminaNextDiT2DModel + +from ...testing_utils import ( + enable_full_determinism, + torch_device, +) +from ..test_modeling_common import ModelTesterMixin + + +enable_full_determinism() + + +class LuminaNextDiT2DModelTransformerTests(ModelTesterMixin, unittest.TestCase): + model_class = LuminaNextDiT2DModel + main_input_name = "hidden_states" + uses_custom_attn_processor = True + + @property + def dummy_input(self): + """ + Args: + None + Returns: + Dict: Dictionary of dummy input tensors + """ + batch_size = 2 # N + num_channels = 4 # C + height = width = 16 # H, W + embedding_dim = 32 # D + sequence_length = 16 # L + + hidden_states = torch.randn((batch_size, num_channels, height, width)).to(torch_device) + encoder_hidden_states = torch.randn((batch_size, sequence_length, embedding_dim)).to(torch_device) + timestep = torch.rand(size=(batch_size,)).to(torch_device) + encoder_mask = torch.randn(size=(batch_size, sequence_length)).to(torch_device) + image_rotary_emb = torch.randn((384, 384, 4)).to(torch_device) + + return { + "hidden_states": hidden_states, + "encoder_hidden_states": encoder_hidden_states, + "timestep": timestep, + "encoder_mask": encoder_mask, + "image_rotary_emb": image_rotary_emb, + "cross_attention_kwargs": {}, + } + + @property + def input_shape(self): + """ + Args: + None + Returns: + Tuple: (int, int, int) + """ + return (4, 16, 16) + + @property + def output_shape(self): + """ + Args: + None + Returns: + Tuple: (int, int, int) + """ + return (4, 16, 16) + + def prepare_init_args_and_inputs_for_common(self): + """ + Args: + None + + Returns: + Tuple: (Dict, Dict) + """ + init_dict = { + "sample_size": 16, + "patch_size": 2, + "in_channels": 4, + "hidden_size": 24, + "num_layers": 2, + "num_attention_heads": 3, + "num_kv_heads": 1, + "multiple_of": 16, + "ffn_dim_multiplier": None, + "norm_eps": 1e-5, + "learn_sigma": False, + "qk_norm": True, + "cross_attention_dim": 32, + "scaling_factor": 1.0, + } + + inputs_dict = self.dummy_input + return init_dict, inputs_dict diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/models/transformers/test_models_transformer_lumina2.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/transformers/test_models_transformer_lumina2.py new file mode 100644 index 0000000000000000000000000000000000000000..4efae3d4b71391c9140a13e6e3fed8aa064f0bbe --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/transformers/test_models_transformer_lumina2.py @@ -0,0 +1,89 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import torch + +from diffusers import Lumina2Transformer2DModel + +from ...testing_utils import ( + enable_full_determinism, + torch_device, +) +from ..test_modeling_common import ModelTesterMixin + + +enable_full_determinism() + + +class Lumina2Transformer2DModelTransformerTests(ModelTesterMixin, unittest.TestCase): + model_class = Lumina2Transformer2DModel + main_input_name = "hidden_states" + uses_custom_attn_processor = True + + @property + def dummy_input(self): + batch_size = 2 # N + num_channels = 4 # C + height = width = 16 # H, W + embedding_dim = 32 # D + sequence_length = 16 # L + + hidden_states = torch.randn((batch_size, num_channels, height, width)).to(torch_device) + encoder_hidden_states = torch.randn((batch_size, sequence_length, embedding_dim)).to(torch_device) + timestep = torch.rand(size=(batch_size,)).to(torch_device) + attention_mask = torch.ones(size=(batch_size, sequence_length), dtype=torch.bool).to(torch_device) + + return { + "hidden_states": hidden_states, + "encoder_hidden_states": encoder_hidden_states, + "timestep": timestep, + "encoder_attention_mask": attention_mask, + } + + @property + def input_shape(self): + return (4, 16, 16) + + @property + def output_shape(self): + return (4, 16, 16) + + def prepare_init_args_and_inputs_for_common(self): + init_dict = { + "sample_size": 16, + "patch_size": 2, + "in_channels": 4, + "hidden_size": 24, + "num_layers": 2, + "num_refiner_layers": 1, + "num_attention_heads": 3, + "num_kv_heads": 1, + "multiple_of": 2, + "ffn_dim_multiplier": None, + "norm_eps": 1e-5, + "scaling_factor": 1.0, + "axes_dim_rope": (4, 2, 2), + "axes_lens": (128, 128, 128), + "cap_feat_dim": 32, + } + + inputs_dict = self.dummy_input + return init_dict, inputs_dict + + def test_gradient_checkpointing_is_applied(self): + expected_set = {"Lumina2Transformer2DModel"} + super().test_gradient_checkpointing_is_applied(expected_set=expected_set) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/models/transformers/test_models_transformer_mochi.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/transformers/test_models_transformer_mochi.py new file mode 100644 index 0000000000000000000000000000000000000000..931b5874ee785c3e3c7f326f6f3371622fa84a76 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/transformers/test_models_transformer_mochi.py @@ -0,0 +1,86 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import torch + +from diffusers import MochiTransformer3DModel + +from ...testing_utils import enable_full_determinism, torch_device +from ..test_modeling_common import ModelTesterMixin + + +enable_full_determinism() + + +class MochiTransformerTests(ModelTesterMixin, unittest.TestCase): + model_class = MochiTransformer3DModel + main_input_name = "hidden_states" + uses_custom_attn_processor = True + # Overriding it because of the transformer size. + model_split_percents = [0.7, 0.6, 0.6] + + @property + def dummy_input(self): + batch_size = 2 + num_channels = 4 + num_frames = 2 + height = 16 + width = 16 + embedding_dim = 16 + sequence_length = 16 + + hidden_states = torch.randn((batch_size, num_channels, num_frames, height, width)).to(torch_device) + encoder_hidden_states = torch.randn((batch_size, sequence_length, embedding_dim)).to(torch_device) + encoder_attention_mask = torch.ones((batch_size, sequence_length)).bool().to(torch_device) + timestep = torch.randint(0, 1000, size=(batch_size,)).to(torch_device) + + return { + "hidden_states": hidden_states, + "encoder_hidden_states": encoder_hidden_states, + "timestep": timestep, + "encoder_attention_mask": encoder_attention_mask, + } + + @property + def input_shape(self): + return (4, 2, 16, 16) + + @property + def output_shape(self): + return (4, 2, 16, 16) + + def prepare_init_args_and_inputs_for_common(self): + init_dict = { + "patch_size": 2, + "num_attention_heads": 2, + "attention_head_dim": 8, + "num_layers": 2, + "pooled_projection_dim": 16, + "in_channels": 4, + "out_channels": None, + "qk_norm": "rms_norm", + "text_embed_dim": 16, + "time_embed_dim": 4, + "activation_fn": "swiglu", + "max_sequence_length": 16, + } + inputs_dict = self.dummy_input + return init_dict, inputs_dict + + def test_gradient_checkpointing_is_applied(self): + expected_set = {"MochiTransformer3DModel"} + super().test_gradient_checkpointing_is_applied(expected_set=expected_set) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/models/transformers/test_models_transformer_omnigen.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/transformers/test_models_transformer_omnigen.py new file mode 100644 index 0000000000000000000000000000000000000000..f1963ddb77094180e2b18b9de6902d2581609a25 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/transformers/test_models_transformer_omnigen.py @@ -0,0 +1,89 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import torch + +from diffusers import OmniGenTransformer2DModel + +from ...testing_utils import enable_full_determinism, torch_device +from ..test_modeling_common import ModelTesterMixin + + +enable_full_determinism() + + +class OmniGenTransformerTests(ModelTesterMixin, unittest.TestCase): + model_class = OmniGenTransformer2DModel + main_input_name = "hidden_states" + uses_custom_attn_processor = True + model_split_percents = [0.1, 0.1, 0.1] + + @property + def dummy_input(self): + batch_size = 2 + num_channels = 4 + height = 8 + width = 8 + sequence_length = 24 + + hidden_states = torch.randn((batch_size, num_channels, height, width)).to(torch_device) + timestep = torch.rand(size=(batch_size,), dtype=hidden_states.dtype).to(torch_device) + input_ids = torch.randint(0, 10, (batch_size, sequence_length)).to(torch_device) + input_img_latents = [torch.randn((1, num_channels, height, width)).to(torch_device)] + input_image_sizes = {0: [[0, 0 + height * width // 2 // 2]]} + + attn_seq_length = sequence_length + 1 + height * width // 2 // 2 + attention_mask = torch.ones((batch_size, attn_seq_length, attn_seq_length)).to(torch_device) + position_ids = torch.LongTensor([list(range(attn_seq_length))] * batch_size).to(torch_device) + + return { + "hidden_states": hidden_states, + "timestep": timestep, + "input_ids": input_ids, + "input_img_latents": input_img_latents, + "input_image_sizes": input_image_sizes, + "attention_mask": attention_mask, + "position_ids": position_ids, + } + + @property + def input_shape(self): + return (4, 8, 8) + + @property + def output_shape(self): + return (4, 8, 8) + + def prepare_init_args_and_inputs_for_common(self): + init_dict = { + "hidden_size": 16, + "num_attention_heads": 4, + "num_key_value_heads": 4, + "intermediate_size": 32, + "num_layers": 20, + "pad_token_id": 0, + "vocab_size": 1000, + "in_channels": 4, + "time_step_dim": 4, + "rope_scaling": {"long_factor": list(range(1, 3)), "short_factor": list(range(1, 3))}, + } + inputs_dict = self.dummy_input + return init_dict, inputs_dict + + def test_gradient_checkpointing_is_applied(self): + expected_set = {"OmniGenTransformer2DModel"} + super().test_gradient_checkpointing_is_applied(expected_set=expected_set) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/models/transformers/test_models_transformer_qwenimage.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/transformers/test_models_transformer_qwenimage.py new file mode 100644 index 0000000000000000000000000000000000000000..b24fa90503efb5b69b72e27671ae9a22d72e2c5a --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/transformers/test_models_transformer_qwenimage.py @@ -0,0 +1,106 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import pytest +import torch + +from diffusers import QwenImageTransformer2DModel + +from ...testing_utils import enable_full_determinism, torch_device +from ..test_modeling_common import ModelTesterMixin, TorchCompileTesterMixin + + +enable_full_determinism() + + +class QwenImageTransformerTests(ModelTesterMixin, unittest.TestCase): + model_class = QwenImageTransformer2DModel + main_input_name = "hidden_states" + # We override the items here because the transformer under consideration is small. + model_split_percents = [0.7, 0.6, 0.6] + + # Skip setting testing with default: AttnProcessor + uses_custom_attn_processor = True + + @property + def dummy_input(self): + return self.prepare_dummy_input() + + @property + def input_shape(self): + return (16, 16) + + @property + def output_shape(self): + return (16, 16) + + def prepare_dummy_input(self, height=4, width=4): + batch_size = 1 + num_latent_channels = embedding_dim = 16 + sequence_length = 7 + vae_scale_factor = 4 + + hidden_states = torch.randn((batch_size, height * width, num_latent_channels)).to(torch_device) + encoder_hidden_states = torch.randn((batch_size, sequence_length, embedding_dim)).to(torch_device) + encoder_hidden_states_mask = torch.ones((batch_size, sequence_length)).to(torch_device, torch.long) + timestep = torch.tensor([1.0]).to(torch_device).expand(batch_size) + orig_height = height * 2 * vae_scale_factor + orig_width = width * 2 * vae_scale_factor + img_shapes = [(1, orig_height // vae_scale_factor // 2, orig_width // vae_scale_factor // 2)] * batch_size + + return { + "hidden_states": hidden_states, + "encoder_hidden_states": encoder_hidden_states, + "encoder_hidden_states_mask": encoder_hidden_states_mask, + "timestep": timestep, + "img_shapes": img_shapes, + "txt_seq_lens": encoder_hidden_states_mask.sum(dim=1).tolist(), + } + + def prepare_init_args_and_inputs_for_common(self): + init_dict = { + "patch_size": 2, + "in_channels": 16, + "out_channels": 4, + "num_layers": 2, + "attention_head_dim": 16, + "num_attention_heads": 3, + "joint_attention_dim": 16, + "guidance_embeds": False, + "axes_dims_rope": (8, 4, 4), + } + + inputs_dict = self.dummy_input + return init_dict, inputs_dict + + def test_gradient_checkpointing_is_applied(self): + expected_set = {"QwenImageTransformer2DModel"} + super().test_gradient_checkpointing_is_applied(expected_set=expected_set) + + +class QwenImageTransformerCompileTests(TorchCompileTesterMixin, unittest.TestCase): + model_class = QwenImageTransformer2DModel + + def prepare_init_args_and_inputs_for_common(self): + return QwenImageTransformerTests().prepare_init_args_and_inputs_for_common() + + def prepare_dummy_input(self, height, width): + return QwenImageTransformerTests().prepare_dummy_input(height=height, width=width) + + @pytest.mark.xfail(condition=True, reason="RoPE needs to be revisited.", strict=True) + def test_torch_compile_recompilation_and_graph_break(self): + super().test_torch_compile_recompilation_and_graph_break() diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/models/transformers/test_models_transformer_sana.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/transformers/test_models_transformer_sana.py new file mode 100644 index 0000000000000000000000000000000000000000..2e316c3aedc1dcf13dc07bb23857aee7de1c093e --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/transformers/test_models_transformer_sana.py @@ -0,0 +1,83 @@ +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import torch + +from diffusers import SanaTransformer2DModel + +from ...testing_utils import ( + enable_full_determinism, + torch_device, +) +from ..test_modeling_common import ModelTesterMixin + + +enable_full_determinism() + + +class SanaTransformerTests(ModelTesterMixin, unittest.TestCase): + model_class = SanaTransformer2DModel + main_input_name = "hidden_states" + uses_custom_attn_processor = True + model_split_percents = [0.7, 0.7, 0.9] + + @property + def dummy_input(self): + batch_size = 2 + num_channels = 4 + height = 32 + width = 32 + embedding_dim = 8 + sequence_length = 8 + + hidden_states = torch.randn((batch_size, num_channels, height, width)).to(torch_device) + encoder_hidden_states = torch.randn((batch_size, sequence_length, embedding_dim)).to(torch_device) + timestep = torch.randint(0, 1000, size=(batch_size,)).to(torch_device) + + return { + "hidden_states": hidden_states, + "encoder_hidden_states": encoder_hidden_states, + "timestep": timestep, + } + + @property + def input_shape(self): + return (4, 32, 32) + + @property + def output_shape(self): + return (4, 32, 32) + + def prepare_init_args_and_inputs_for_common(self): + init_dict = { + "patch_size": 1, + "in_channels": 4, + "out_channels": 4, + "num_layers": 1, + "attention_head_dim": 4, + "num_attention_heads": 2, + "num_cross_attention_heads": 2, + "cross_attention_head_dim": 4, + "cross_attention_dim": 8, + "caption_channels": 8, + "sample_size": 32, + } + inputs_dict = self.dummy_input + return init_dict, inputs_dict + + def test_gradient_checkpointing_is_applied(self): + expected_set = {"SanaTransformer2DModel"} + super().test_gradient_checkpointing_is_applied(expected_set=expected_set) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/models/transformers/test_models_transformer_sd3.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/transformers/test_models_transformer_sd3.py new file mode 100644 index 0000000000000000000000000000000000000000..c4ee7017a380e2eecd3c4a97f81bd67283c1f886 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/transformers/test_models_transformer_sd3.py @@ -0,0 +1,200 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import torch + +from diffusers import SD3Transformer2DModel +from diffusers.utils.import_utils import is_xformers_available + +from ...testing_utils import ( + enable_full_determinism, + torch_device, +) +from ..test_modeling_common import ModelTesterMixin + + +enable_full_determinism() + + +class SD3TransformerTests(ModelTesterMixin, unittest.TestCase): + model_class = SD3Transformer2DModel + main_input_name = "hidden_states" + model_split_percents = [0.8, 0.8, 0.9] + + @property + def dummy_input(self): + batch_size = 2 + num_channels = 4 + height = width = embedding_dim = 32 + pooled_embedding_dim = embedding_dim * 2 + sequence_length = 154 + + hidden_states = torch.randn((batch_size, num_channels, height, width)).to(torch_device) + encoder_hidden_states = torch.randn((batch_size, sequence_length, embedding_dim)).to(torch_device) + pooled_prompt_embeds = torch.randn((batch_size, pooled_embedding_dim)).to(torch_device) + timestep = torch.randint(0, 1000, size=(batch_size,)).to(torch_device) + + return { + "hidden_states": hidden_states, + "encoder_hidden_states": encoder_hidden_states, + "pooled_projections": pooled_prompt_embeds, + "timestep": timestep, + } + + @property + def input_shape(self): + return (4, 32, 32) + + @property + def output_shape(self): + return (4, 32, 32) + + def prepare_init_args_and_inputs_for_common(self): + init_dict = { + "sample_size": 32, + "patch_size": 1, + "in_channels": 4, + "num_layers": 4, + "attention_head_dim": 8, + "num_attention_heads": 4, + "caption_projection_dim": 32, + "joint_attention_dim": 32, + "pooled_projection_dim": 64, + "out_channels": 4, + "pos_embed_max_size": 96, + "dual_attention_layers": (), + "qk_norm": None, + } + inputs_dict = self.dummy_input + return init_dict, inputs_dict + + @unittest.skipIf( + torch_device != "cuda" or not is_xformers_available(), + reason="XFormers attention is only available with CUDA and `xformers` installed", + ) + def test_xformers_enable_works(self): + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + model = self.model_class(**init_dict) + + model.enable_xformers_memory_efficient_attention() + + assert model.transformer_blocks[0].attn.processor.__class__.__name__ == "XFormersJointAttnProcessor", ( + "xformers is not enabled" + ) + + @unittest.skip("SD3Transformer2DModel uses a dedicated attention processor. This test doesn't apply") + def test_set_attn_processor_for_determinism(self): + pass + + def test_gradient_checkpointing_is_applied(self): + expected_set = {"SD3Transformer2DModel"} + super().test_gradient_checkpointing_is_applied(expected_set=expected_set) + + +class SD35TransformerTests(ModelTesterMixin, unittest.TestCase): + model_class = SD3Transformer2DModel + main_input_name = "hidden_states" + model_split_percents = [0.8, 0.8, 0.9] + + @property + def dummy_input(self): + batch_size = 2 + num_channels = 4 + height = width = embedding_dim = 32 + pooled_embedding_dim = embedding_dim * 2 + sequence_length = 154 + + hidden_states = torch.randn((batch_size, num_channels, height, width)).to(torch_device) + encoder_hidden_states = torch.randn((batch_size, sequence_length, embedding_dim)).to(torch_device) + pooled_prompt_embeds = torch.randn((batch_size, pooled_embedding_dim)).to(torch_device) + timestep = torch.randint(0, 1000, size=(batch_size,)).to(torch_device) + + return { + "hidden_states": hidden_states, + "encoder_hidden_states": encoder_hidden_states, + "pooled_projections": pooled_prompt_embeds, + "timestep": timestep, + } + + @property + def input_shape(self): + return (4, 32, 32) + + @property + def output_shape(self): + return (4, 32, 32) + + def prepare_init_args_and_inputs_for_common(self): + init_dict = { + "sample_size": 32, + "patch_size": 1, + "in_channels": 4, + "num_layers": 4, + "attention_head_dim": 8, + "num_attention_heads": 4, + "caption_projection_dim": 32, + "joint_attention_dim": 32, + "pooled_projection_dim": 64, + "out_channels": 4, + "pos_embed_max_size": 96, + "dual_attention_layers": (0,), + "qk_norm": "rms_norm", + } + inputs_dict = self.dummy_input + return init_dict, inputs_dict + + @unittest.skipIf( + torch_device != "cuda" or not is_xformers_available(), + reason="XFormers attention is only available with CUDA and `xformers` installed", + ) + def test_xformers_enable_works(self): + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + model = self.model_class(**init_dict) + + model.enable_xformers_memory_efficient_attention() + + assert model.transformer_blocks[0].attn.processor.__class__.__name__ == "XFormersJointAttnProcessor", ( + "xformers is not enabled" + ) + + @unittest.skip("SD3Transformer2DModel uses a dedicated attention processor. This test doesn't apply") + def test_set_attn_processor_for_determinism(self): + pass + + def test_gradient_checkpointing_is_applied(self): + expected_set = {"SD3Transformer2DModel"} + super().test_gradient_checkpointing_is_applied(expected_set=expected_set) + + def test_skip_layers(self): + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + model = self.model_class(**init_dict).to(torch_device) + + # Forward pass without skipping layers + output_full = model(**inputs_dict).sample + + # Forward pass with skipping layers 0 (since there's only one layer in this test setup) + inputs_dict_with_skip = inputs_dict.copy() + inputs_dict_with_skip["skip_layers"] = [0] + output_skip = model(**inputs_dict_with_skip).sample + + # Check that the outputs are different + self.assertFalse( + torch.allclose(output_full, output_skip, atol=1e-5), "Outputs should differ when layers are skipped" + ) + + # Check that the outputs have the same shape + self.assertEqual(output_full.shape, output_skip.shape, "Outputs should have the same shape") diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/models/transformers/test_models_transformer_skyreels_v2.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/transformers/test_models_transformer_skyreels_v2.py new file mode 100644 index 0000000000000000000000000000000000000000..8c36d8256ee90ff3d81096ced299a2b2b41920eb --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/transformers/test_models_transformer_skyreels_v2.py @@ -0,0 +1,84 @@ +# Copyright 2024 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import torch + +from diffusers import SkyReelsV2Transformer3DModel + +from ...testing_utils import ( + enable_full_determinism, + torch_device, +) +from ..test_modeling_common import ModelTesterMixin, TorchCompileTesterMixin + + +enable_full_determinism() + + +class SkyReelsV2Transformer3DTests(ModelTesterMixin, TorchCompileTesterMixin, unittest.TestCase): + model_class = SkyReelsV2Transformer3DModel + main_input_name = "hidden_states" + uses_custom_attn_processor = True + + @property + def dummy_input(self): + batch_size = 1 + num_channels = 4 + num_frames = 2 + height = 16 + width = 16 + text_encoder_embedding_dim = 16 + sequence_length = 12 + + hidden_states = torch.randn((batch_size, num_channels, num_frames, height, width)).to(torch_device) + timestep = torch.randint(0, 1000, size=(batch_size,)).to(torch_device) + encoder_hidden_states = torch.randn((batch_size, sequence_length, text_encoder_embedding_dim)).to(torch_device) + + return { + "hidden_states": hidden_states, + "encoder_hidden_states": encoder_hidden_states, + "timestep": timestep, + } + + @property + def input_shape(self): + return (4, 1, 16, 16) + + @property + def output_shape(self): + return (4, 1, 16, 16) + + def prepare_init_args_and_inputs_for_common(self): + init_dict = { + "patch_size": (1, 2, 2), + "num_attention_heads": 2, + "attention_head_dim": 12, + "in_channels": 4, + "out_channels": 4, + "text_dim": 16, + "freq_dim": 256, + "ffn_dim": 32, + "num_layers": 2, + "cross_attn_norm": True, + "qk_norm": "rms_norm_across_heads", + "rope_max_seq_len": 32, + } + inputs_dict = self.dummy_input + return init_dict, inputs_dict + + def test_gradient_checkpointing_is_applied(self): + expected_set = {"SkyReelsV2Transformer3DModel"} + super().test_gradient_checkpointing_is_applied(expected_set=expected_set) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/models/transformers/test_models_transformer_temporal.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/transformers/test_models_transformer_temporal.py new file mode 100644 index 0000000000000000000000000000000000000000..aff83be511247e3272ca37ac6ade2359f357dd60 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/transformers/test_models_transformer_temporal.py @@ -0,0 +1,67 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import torch + +from diffusers.models.transformers import TransformerTemporalModel + +from ...testing_utils import ( + enable_full_determinism, + torch_device, +) +from ..test_modeling_common import ModelTesterMixin + + +enable_full_determinism() + + +class TemporalTransformerTests(ModelTesterMixin, unittest.TestCase): + model_class = TransformerTemporalModel + main_input_name = "hidden_states" + + @property + def dummy_input(self): + batch_size = 2 + num_channels = 4 + height = width = 32 + + hidden_states = torch.randn((batch_size, num_channels, height, width)).to(torch_device) + timestep = torch.randint(0, 1000, size=(batch_size,)).to(torch_device) + + return { + "hidden_states": hidden_states, + "timestep": timestep, + } + + @property + def input_shape(self): + return (4, 32, 32) + + @property + def output_shape(self): + return (4, 32, 32) + + def prepare_init_args_and_inputs_for_common(self): + init_dict = { + "num_attention_heads": 8, + "attention_head_dim": 4, + "in_channels": 4, + "num_layers": 1, + "norm_num_groups": 1, + } + inputs_dict = self.dummy_input + return init_dict, inputs_dict diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/models/transformers/test_models_transformer_wan.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/transformers/test_models_transformer_wan.py new file mode 100644 index 0000000000000000000000000000000000000000..9f248f990c8aaeebed46291b9aa7cc2e433a6d72 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/transformers/test_models_transformer_wan.py @@ -0,0 +1,91 @@ +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import torch + +from diffusers import WanTransformer3DModel + +from ...testing_utils import ( + enable_full_determinism, + torch_device, +) +from ..test_modeling_common import ModelTesterMixin, TorchCompileTesterMixin + + +enable_full_determinism() + + +class WanTransformer3DTests(ModelTesterMixin, unittest.TestCase): + model_class = WanTransformer3DModel + main_input_name = "hidden_states" + uses_custom_attn_processor = True + + @property + def dummy_input(self): + batch_size = 1 + num_channels = 4 + num_frames = 2 + height = 16 + width = 16 + text_encoder_embedding_dim = 16 + sequence_length = 12 + + hidden_states = torch.randn((batch_size, num_channels, num_frames, height, width)).to(torch_device) + timestep = torch.randint(0, 1000, size=(batch_size,)).to(torch_device) + encoder_hidden_states = torch.randn((batch_size, sequence_length, text_encoder_embedding_dim)).to(torch_device) + + return { + "hidden_states": hidden_states, + "encoder_hidden_states": encoder_hidden_states, + "timestep": timestep, + } + + @property + def input_shape(self): + return (4, 1, 16, 16) + + @property + def output_shape(self): + return (4, 1, 16, 16) + + def prepare_init_args_and_inputs_for_common(self): + init_dict = { + "patch_size": (1, 2, 2), + "num_attention_heads": 2, + "attention_head_dim": 12, + "in_channels": 4, + "out_channels": 4, + "text_dim": 16, + "freq_dim": 256, + "ffn_dim": 32, + "num_layers": 2, + "cross_attn_norm": True, + "qk_norm": "rms_norm_across_heads", + "rope_max_seq_len": 32, + } + inputs_dict = self.dummy_input + return init_dict, inputs_dict + + def test_gradient_checkpointing_is_applied(self): + expected_set = {"WanTransformer3DModel"} + super().test_gradient_checkpointing_is_applied(expected_set=expected_set) + + +class WanTransformerCompileTests(TorchCompileTesterMixin, unittest.TestCase): + model_class = WanTransformer3DModel + + def prepare_init_args_and_inputs_for_common(self): + return WanTransformer3DTests().prepare_init_args_and_inputs_for_common() diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/models/unets/__init__.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/unets/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/models/unets/test_models_unet_1d.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/unets/test_models_unet_1d.py new file mode 100644 index 0000000000000000000000000000000000000000..bac017e7e7d32abd5eccd5583cdcc2e1ded36b31 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/unets/test_models_unet_1d.py @@ -0,0 +1,329 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import pytest +import torch + +from diffusers import UNet1DModel + +from ...testing_utils import ( + backend_manual_seed, + floats_tensor, + slow, + torch_device, +) +from ..test_modeling_common import ModelTesterMixin, UNetTesterMixin + + +class UNet1DModelTests(ModelTesterMixin, UNetTesterMixin, unittest.TestCase): + model_class = UNet1DModel + main_input_name = "sample" + + @property + def dummy_input(self): + batch_size = 4 + num_features = 14 + seq_len = 16 + + noise = floats_tensor((batch_size, num_features, seq_len)).to(torch_device) + time_step = torch.tensor([10] * batch_size).to(torch_device) + + return {"sample": noise, "timestep": time_step} + + @property + def input_shape(self): + return (4, 14, 16) + + @property + def output_shape(self): + return (4, 14, 16) + + @unittest.skip("Test not supported.") + def test_ema_training(self): + pass + + @unittest.skip("Test not supported.") + def test_training(self): + pass + + @unittest.skip("Test not supported.") + def test_layerwise_casting_training(self): + pass + + def test_determinism(self): + super().test_determinism() + + def test_outputs_equivalence(self): + super().test_outputs_equivalence() + + def test_from_save_pretrained(self): + super().test_from_save_pretrained() + + def test_from_save_pretrained_variant(self): + super().test_from_save_pretrained_variant() + + def test_model_from_pretrained(self): + super().test_model_from_pretrained() + + def test_output(self): + super().test_output() + + def prepare_init_args_and_inputs_for_common(self): + init_dict = { + "block_out_channels": (8, 8, 16, 16), + "in_channels": 14, + "out_channels": 14, + "time_embedding_type": "positional", + "use_timestep_embedding": True, + "flip_sin_to_cos": False, + "freq_shift": 1.0, + "out_block_type": "OutConv1DBlock", + "mid_block_type": "MidResTemporalBlock1D", + "down_block_types": ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D"), + "up_block_types": ("UpResnetBlock1D", "UpResnetBlock1D", "UpResnetBlock1D"), + "act_fn": "swish", + } + inputs_dict = self.dummy_input + return init_dict, inputs_dict + + def test_from_pretrained_hub(self): + model, loading_info = UNet1DModel.from_pretrained( + "bglick13/hopper-medium-v2-value-function-hor32", output_loading_info=True, subfolder="unet" + ) + self.assertIsNotNone(model) + self.assertEqual(len(loading_info["missing_keys"]), 0) + + model.to(torch_device) + image = model(**self.dummy_input) + + assert image is not None, "Make sure output is not None" + + def test_output_pretrained(self): + model = UNet1DModel.from_pretrained("bglick13/hopper-medium-v2-value-function-hor32", subfolder="unet") + torch.manual_seed(0) + backend_manual_seed(torch_device, 0) + + num_features = model.config.in_channels + seq_len = 16 + noise = torch.randn((1, seq_len, num_features)).permute( + 0, 2, 1 + ) # match original, we can update values and remove + time_step = torch.full((num_features,), 0) + + with torch.no_grad(): + output = model(noise, time_step).sample.permute(0, 2, 1) + + output_slice = output[0, -3:, -3:].flatten() + # fmt: off + expected_output_slice = torch.tensor([-2.137172, 1.1426016, 0.3688687, -0.766922, 0.7303146, 0.11038864, -0.4760633, 0.13270172, 0.02591348]) + # fmt: on + self.assertTrue(torch.allclose(output_slice, expected_output_slice, rtol=1e-3)) + + @unittest.skip("Test not supported.") + def test_forward_with_norm_groups(self): + # Not implemented yet for this UNet + pass + + @slow + def test_unet_1d_maestro(self): + model_id = "harmonai/maestro-150k" + model = UNet1DModel.from_pretrained(model_id, subfolder="unet") + model.to(torch_device) + + sample_size = 65536 + noise = torch.sin(torch.arange(sample_size)[None, None, :].repeat(1, 2, 1)).to(torch_device) + timestep = torch.tensor([1]).to(torch_device) + + with torch.no_grad(): + output = model(noise, timestep).sample + + output_sum = output.abs().sum() + output_max = output.abs().max() + + assert (output_sum - 224.0896).abs() < 0.5 + assert (output_max - 0.0607).abs() < 4e-4 + + @pytest.mark.xfail( + reason=( + "RuntimeError: 'fill_out' not implemented for 'Float8_e4m3fn'. The error is caused due to certain torch.float8_e4m3fn and torch.float8_e5m2 operations " + "not being supported when using deterministic algorithms (which is what the tests run with). To fix:\n" + "1. Wait for next PyTorch release: https://github.com/pytorch/pytorch/issues/137160.\n" + "2. Unskip this test." + ), + ) + def test_layerwise_casting_inference(self): + super().test_layerwise_casting_inference() + + @pytest.mark.xfail( + reason=( + "RuntimeError: 'fill_out' not implemented for 'Float8_e4m3fn'. The error is caused due to certain torch.float8_e4m3fn and torch.float8_e5m2 operations " + "not being supported when using deterministic algorithms (which is what the tests run with). To fix:\n" + "1. Wait for next PyTorch release: https://github.com/pytorch/pytorch/issues/137160.\n" + "2. Unskip this test." + ), + ) + def test_layerwise_casting_memory(self): + pass + + +class UNetRLModelTests(ModelTesterMixin, UNetTesterMixin, unittest.TestCase): + model_class = UNet1DModel + main_input_name = "sample" + + @property + def dummy_input(self): + batch_size = 4 + num_features = 14 + seq_len = 16 + + noise = floats_tensor((batch_size, num_features, seq_len)).to(torch_device) + time_step = torch.tensor([10] * batch_size).to(torch_device) + + return {"sample": noise, "timestep": time_step} + + @property + def input_shape(self): + return (4, 14, 16) + + @property + def output_shape(self): + return (4, 14, 1) + + def test_determinism(self): + super().test_determinism() + + def test_outputs_equivalence(self): + super().test_outputs_equivalence() + + def test_from_save_pretrained(self): + super().test_from_save_pretrained() + + def test_from_save_pretrained_variant(self): + super().test_from_save_pretrained_variant() + + def test_model_from_pretrained(self): + super().test_model_from_pretrained() + + def test_output(self): + # UNetRL is a value-function is different output shape + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + model = self.model_class(**init_dict) + model.to(torch_device) + model.eval() + + with torch.no_grad(): + output = model(**inputs_dict) + + if isinstance(output, dict): + output = output.sample + + self.assertIsNotNone(output) + expected_shape = torch.Size((inputs_dict["sample"].shape[0], 1)) + self.assertEqual(output.shape, expected_shape, "Input and output shapes do not match") + + @unittest.skip("Test not supported.") + def test_ema_training(self): + pass + + @unittest.skip("Test not supported.") + def test_training(self): + pass + + @unittest.skip("Test not supported.") + def test_layerwise_casting_training(self): + pass + + def prepare_init_args_and_inputs_for_common(self): + init_dict = { + "in_channels": 14, + "out_channels": 14, + "down_block_types": ["DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D"], + "up_block_types": [], + "out_block_type": "ValueFunction", + "mid_block_type": "ValueFunctionMidBlock1D", + "block_out_channels": [32, 64, 128, 256], + "layers_per_block": 1, + "downsample_each_block": True, + "use_timestep_embedding": True, + "freq_shift": 1.0, + "flip_sin_to_cos": False, + "time_embedding_type": "positional", + "act_fn": "mish", + } + inputs_dict = self.dummy_input + return init_dict, inputs_dict + + def test_from_pretrained_hub(self): + value_function, vf_loading_info = UNet1DModel.from_pretrained( + "bglick13/hopper-medium-v2-value-function-hor32", output_loading_info=True, subfolder="value_function" + ) + self.assertIsNotNone(value_function) + self.assertEqual(len(vf_loading_info["missing_keys"]), 0) + + value_function.to(torch_device) + image = value_function(**self.dummy_input) + + assert image is not None, "Make sure output is not None" + + def test_output_pretrained(self): + value_function, vf_loading_info = UNet1DModel.from_pretrained( + "bglick13/hopper-medium-v2-value-function-hor32", output_loading_info=True, subfolder="value_function" + ) + torch.manual_seed(0) + backend_manual_seed(torch_device, 0) + + num_features = value_function.config.in_channels + seq_len = 14 + noise = torch.randn((1, seq_len, num_features)).permute( + 0, 2, 1 + ) # match original, we can update values and remove + time_step = torch.full((num_features,), 0) + + with torch.no_grad(): + output = value_function(noise, time_step).sample + + # fmt: off + expected_output_slice = torch.tensor([165.25] * seq_len) + # fmt: on + self.assertTrue(torch.allclose(output, expected_output_slice, rtol=1e-3)) + + @unittest.skip("Test not supported.") + def test_forward_with_norm_groups(self): + # Not implemented yet for this UNet + pass + + @pytest.mark.xfail( + reason=( + "RuntimeError: 'fill_out' not implemented for 'Float8_e4m3fn'. The error is caused due to certain torch.float8_e4m3fn and torch.float8_e5m2 operations " + "not being supported when using deterministic algorithms (which is what the tests run with). To fix:\n" + "1. Wait for next PyTorch release: https://github.com/pytorch/pytorch/issues/137160.\n" + "2. Unskip this test." + ), + ) + def test_layerwise_casting_inference(self): + pass + + @pytest.mark.xfail( + reason=( + "RuntimeError: 'fill_out' not implemented for 'Float8_e4m3fn'. The error is caused due to certain torch.float8_e4m3fn and torch.float8_e5m2 operations " + "not being supported when using deterministic algorithms (which is what the tests run with). To fix:\n" + "1. Wait for next PyTorch release: https://github.com/pytorch/pytorch/issues/137160.\n" + "2. Unskip this test." + ), + ) + def test_layerwise_casting_memory(self): + pass diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/models/unets/test_models_unet_2d.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/unets/test_models_unet_2d.py new file mode 100644 index 0000000000000000000000000000000000000000..e289f44303f23dadb11f0522a3218396ebacbd1d --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/unets/test_models_unet_2d.py @@ -0,0 +1,416 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import gc +import math +import unittest + +import torch + +from diffusers import UNet2DModel +from diffusers.utils import logging + +from ...testing_utils import ( + backend_empty_cache, + enable_full_determinism, + floats_tensor, + require_torch_accelerator, + slow, + torch_all_close, + torch_device, +) +from ..test_modeling_common import ModelTesterMixin, UNetTesterMixin + + +logger = logging.get_logger(__name__) + +enable_full_determinism() + + +class Unet2DModelTests(ModelTesterMixin, UNetTesterMixin, unittest.TestCase): + model_class = UNet2DModel + main_input_name = "sample" + + @property + def dummy_input(self): + batch_size = 4 + num_channels = 3 + sizes = (32, 32) + + noise = floats_tensor((batch_size, num_channels) + sizes).to(torch_device) + time_step = torch.tensor([10]).to(torch_device) + + return {"sample": noise, "timestep": time_step} + + @property + def input_shape(self): + return (3, 32, 32) + + @property + def output_shape(self): + return (3, 32, 32) + + def prepare_init_args_and_inputs_for_common(self): + init_dict = { + "block_out_channels": (4, 8), + "norm_num_groups": 2, + "down_block_types": ("DownBlock2D", "AttnDownBlock2D"), + "up_block_types": ("AttnUpBlock2D", "UpBlock2D"), + "attention_head_dim": 3, + "out_channels": 3, + "in_channels": 3, + "layers_per_block": 2, + "sample_size": 32, + } + inputs_dict = self.dummy_input + return init_dict, inputs_dict + + def test_mid_block_attn_groups(self): + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + + init_dict["add_attention"] = True + init_dict["attn_norm_num_groups"] = 4 + + model = self.model_class(**init_dict) + model.to(torch_device) + model.eval() + + self.assertIsNotNone( + model.mid_block.attentions[0].group_norm, "Mid block Attention group norm should exist but does not." + ) + self.assertEqual( + model.mid_block.attentions[0].group_norm.num_groups, + init_dict["attn_norm_num_groups"], + "Mid block Attention group norm does not have the expected number of groups.", + ) + + with torch.no_grad(): + output = model(**inputs_dict) + + if isinstance(output, dict): + output = output.to_tuple()[0] + + self.assertIsNotNone(output) + expected_shape = inputs_dict["sample"].shape + self.assertEqual(output.shape, expected_shape, "Input and output shapes do not match") + + def test_mid_block_none(self): + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + mid_none_init_dict, mid_none_inputs_dict = self.prepare_init_args_and_inputs_for_common() + mid_none_init_dict["mid_block_type"] = None + + model = self.model_class(**init_dict) + model.to(torch_device) + model.eval() + + mid_none_model = self.model_class(**mid_none_init_dict) + mid_none_model.to(torch_device) + mid_none_model.eval() + + self.assertIsNone(mid_none_model.mid_block, "Mid block should not exist.") + + with torch.no_grad(): + output = model(**inputs_dict) + + if isinstance(output, dict): + output = output.to_tuple()[0] + + with torch.no_grad(): + mid_none_output = mid_none_model(**mid_none_inputs_dict) + + if isinstance(mid_none_output, dict): + mid_none_output = mid_none_output.to_tuple()[0] + + self.assertFalse(torch.allclose(output, mid_none_output, rtol=1e-3), "outputs should be different.") + + def test_gradient_checkpointing_is_applied(self): + expected_set = { + "AttnUpBlock2D", + "AttnDownBlock2D", + "UNetMidBlock2D", + "UpBlock2D", + "DownBlock2D", + } + + # NOTE: unlike UNet2DConditionModel, UNet2DModel does not currently support tuples for `attention_head_dim` + attention_head_dim = 8 + block_out_channels = (16, 32) + + super().test_gradient_checkpointing_is_applied( + expected_set=expected_set, attention_head_dim=attention_head_dim, block_out_channels=block_out_channels + ) + + +class UNetLDMModelTests(ModelTesterMixin, UNetTesterMixin, unittest.TestCase): + model_class = UNet2DModel + main_input_name = "sample" + + @property + def dummy_input(self): + batch_size = 4 + num_channels = 4 + sizes = (32, 32) + + noise = floats_tensor((batch_size, num_channels) + sizes).to(torch_device) + time_step = torch.tensor([10]).to(torch_device) + + return {"sample": noise, "timestep": time_step} + + @property + def input_shape(self): + return (4, 32, 32) + + @property + def output_shape(self): + return (4, 32, 32) + + def prepare_init_args_and_inputs_for_common(self): + init_dict = { + "sample_size": 32, + "in_channels": 4, + "out_channels": 4, + "layers_per_block": 2, + "block_out_channels": (32, 64), + "attention_head_dim": 32, + "down_block_types": ("DownBlock2D", "DownBlock2D"), + "up_block_types": ("UpBlock2D", "UpBlock2D"), + } + inputs_dict = self.dummy_input + return init_dict, inputs_dict + + def test_from_pretrained_hub(self): + model, loading_info = UNet2DModel.from_pretrained("fusing/unet-ldm-dummy-update", output_loading_info=True) + + self.assertIsNotNone(model) + self.assertEqual(len(loading_info["missing_keys"]), 0) + + model.to(torch_device) + image = model(**self.dummy_input).sample + + assert image is not None, "Make sure output is not None" + + @require_torch_accelerator + def test_from_pretrained_accelerate(self): + model, _ = UNet2DModel.from_pretrained("fusing/unet-ldm-dummy-update", output_loading_info=True) + model.to(torch_device) + image = model(**self.dummy_input).sample + + assert image is not None, "Make sure output is not None" + + @require_torch_accelerator + def test_from_pretrained_accelerate_wont_change_results(self): + # by default model loading will use accelerate as `low_cpu_mem_usage=True` + model_accelerate, _ = UNet2DModel.from_pretrained("fusing/unet-ldm-dummy-update", output_loading_info=True) + model_accelerate.to(torch_device) + model_accelerate.eval() + + noise = torch.randn( + 1, + model_accelerate.config.in_channels, + model_accelerate.config.sample_size, + model_accelerate.config.sample_size, + generator=torch.manual_seed(0), + ) + noise = noise.to(torch_device) + time_step = torch.tensor([10] * noise.shape[0]).to(torch_device) + + arr_accelerate = model_accelerate(noise, time_step)["sample"] + + # two models don't need to stay in the device at the same time + del model_accelerate + backend_empty_cache(torch_device) + gc.collect() + + model_normal_load, _ = UNet2DModel.from_pretrained( + "fusing/unet-ldm-dummy-update", output_loading_info=True, low_cpu_mem_usage=False + ) + model_normal_load.to(torch_device) + model_normal_load.eval() + arr_normal_load = model_normal_load(noise, time_step)["sample"] + + assert torch_all_close(arr_accelerate, arr_normal_load, rtol=1e-3) + + def test_output_pretrained(self): + model = UNet2DModel.from_pretrained("fusing/unet-ldm-dummy-update") + model.eval() + model.to(torch_device) + + noise = torch.randn( + 1, + model.config.in_channels, + model.config.sample_size, + model.config.sample_size, + generator=torch.manual_seed(0), + ) + noise = noise.to(torch_device) + time_step = torch.tensor([10] * noise.shape[0]).to(torch_device) + + with torch.no_grad(): + output = model(noise, time_step).sample + + output_slice = output[0, -1, -3:, -3:].flatten().cpu() + # fmt: off + expected_output_slice = torch.tensor([-13.3258, -20.1100, -15.9873, -17.6617, -23.0596, -17.9419, -13.3675, -16.1889, -12.3800]) + # fmt: on + + self.assertTrue(torch_all_close(output_slice, expected_output_slice, rtol=1e-3)) + + def test_gradient_checkpointing_is_applied(self): + expected_set = {"DownBlock2D", "UNetMidBlock2D", "UpBlock2D"} + + # NOTE: unlike UNet2DConditionModel, UNet2DModel does not currently support tuples for `attention_head_dim` + attention_head_dim = 32 + block_out_channels = (32, 64) + + super().test_gradient_checkpointing_is_applied( + expected_set=expected_set, attention_head_dim=attention_head_dim, block_out_channels=block_out_channels + ) + + +class NCSNppModelTests(ModelTesterMixin, UNetTesterMixin, unittest.TestCase): + model_class = UNet2DModel + main_input_name = "sample" + + @property + def dummy_input(self, sizes=(32, 32)): + batch_size = 4 + num_channels = 3 + + noise = floats_tensor((batch_size, num_channels) + sizes).to(torch_device) + time_step = torch.tensor(batch_size * [10]).to(dtype=torch.int32, device=torch_device) + + return {"sample": noise, "timestep": time_step} + + @property + def input_shape(self): + return (3, 32, 32) + + @property + def output_shape(self): + return (3, 32, 32) + + def prepare_init_args_and_inputs_for_common(self): + init_dict = { + "block_out_channels": [32, 64, 64, 64], + "in_channels": 3, + "layers_per_block": 1, + "out_channels": 3, + "time_embedding_type": "fourier", + "norm_eps": 1e-6, + "mid_block_scale_factor": math.sqrt(2.0), + "norm_num_groups": None, + "down_block_types": [ + "SkipDownBlock2D", + "AttnSkipDownBlock2D", + "SkipDownBlock2D", + "SkipDownBlock2D", + ], + "up_block_types": [ + "SkipUpBlock2D", + "SkipUpBlock2D", + "AttnSkipUpBlock2D", + "SkipUpBlock2D", + ], + } + inputs_dict = self.dummy_input + return init_dict, inputs_dict + + @slow + def test_from_pretrained_hub(self): + model, loading_info = UNet2DModel.from_pretrained("google/ncsnpp-celebahq-256", output_loading_info=True) + self.assertIsNotNone(model) + self.assertEqual(len(loading_info["missing_keys"]), 0) + + model.to(torch_device) + inputs = self.dummy_input + noise = floats_tensor((4, 3) + (256, 256)).to(torch_device) + inputs["sample"] = noise + image = model(**inputs) + + assert image is not None, "Make sure output is not None" + + @slow + def test_output_pretrained_ve_mid(self): + model = UNet2DModel.from_pretrained("google/ncsnpp-celebahq-256") + model.to(torch_device) + + batch_size = 4 + num_channels = 3 + sizes = (256, 256) + + noise = torch.ones((batch_size, num_channels) + sizes).to(torch_device) + time_step = torch.tensor(batch_size * [1e-4]).to(torch_device) + + with torch.no_grad(): + output = model(noise, time_step).sample + + output_slice = output[0, -3:, -3:, -1].flatten().cpu() + # fmt: off + expected_output_slice = torch.tensor([-4836.2178, -6487.1470, -3816.8196, -7964.9302, -10966.3037, -20043.5957, 8137.0513, 2340.3328, 544.6056]) + # fmt: on + + self.assertTrue(torch_all_close(output_slice, expected_output_slice, rtol=1e-2)) + + def test_output_pretrained_ve_large(self): + model = UNet2DModel.from_pretrained("fusing/ncsnpp-ffhq-ve-dummy-update") + model.to(torch_device) + + batch_size = 4 + num_channels = 3 + sizes = (32, 32) + + noise = torch.ones((batch_size, num_channels) + sizes).to(torch_device) + time_step = torch.tensor(batch_size * [1e-4]).to(torch_device) + + with torch.no_grad(): + output = model(noise, time_step).sample + + output_slice = output[0, -3:, -3:, -1].flatten().cpu() + # fmt: off + expected_output_slice = torch.tensor([-0.0325, -0.0900, -0.0869, -0.0332, -0.0725, -0.0270, -0.0101, 0.0227, 0.0256]) + # fmt: on + + self.assertTrue(torch_all_close(output_slice, expected_output_slice, rtol=1e-2)) + + @unittest.skip("Test not supported.") + def test_forward_with_norm_groups(self): + # not required for this model + pass + + def test_gradient_checkpointing_is_applied(self): + expected_set = { + "UNetMidBlock2D", + } + + block_out_channels = (32, 64, 64, 64) + + super().test_gradient_checkpointing_is_applied( + expected_set=expected_set, block_out_channels=block_out_channels + ) + + def test_effective_gradient_checkpointing(self): + super().test_effective_gradient_checkpointing(skip={"time_proj.weight"}) + + @unittest.skip( + "To make layerwise casting work with this model, we will have to update the implementation. Due to potentially low usage, we don't support it here." + ) + def test_layerwise_casting_inference(self): + pass + + @unittest.skip( + "To make layerwise casting work with this model, we will have to update the implementation. Due to potentially low usage, we don't support it here." + ) + def test_layerwise_casting_memory(self): + pass diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/models/unets/test_models_unet_2d_condition.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/unets/test_models_unet_2d_condition.py new file mode 100644 index 0000000000000000000000000000000000000000..4dbb8ca7c0751c86cb1f204417050d3b03e004b0 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/unets/test_models_unet_2d_condition.py @@ -0,0 +1,1471 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy +import gc +import os +import tempfile +import unittest +from collections import OrderedDict + +import torch +from huggingface_hub import snapshot_download +from parameterized import parameterized +from pytest import mark + +from diffusers import UNet2DConditionModel +from diffusers.models.attention_processor import ( + CustomDiffusionAttnProcessor, + IPAdapterAttnProcessor, + IPAdapterAttnProcessor2_0, +) +from diffusers.models.embeddings import ImageProjection, IPAdapterFaceIDImageProjection, IPAdapterPlusImageProjection +from diffusers.utils import logging +from diffusers.utils.import_utils import is_xformers_available + +from ...testing_utils import ( + backend_empty_cache, + backend_max_memory_allocated, + backend_reset_max_memory_allocated, + backend_reset_peak_memory_stats, + enable_full_determinism, + floats_tensor, + is_peft_available, + load_hf_numpy, + require_peft_backend, + require_torch_accelerator, + require_torch_accelerator_with_fp16, + skip_mps, + slow, + torch_all_close, + torch_device, +) +from ..test_modeling_common import ( + LoraHotSwappingForModelTesterMixin, + ModelTesterMixin, + TorchCompileTesterMixin, + UNetTesterMixin, +) + + +if is_peft_available(): + from peft import LoraConfig + from peft.tuners.tuners_utils import BaseTunerLayer + + +logger = logging.get_logger(__name__) + +enable_full_determinism() + + +def get_unet_lora_config(): + rank = 4 + unet_lora_config = LoraConfig( + r=rank, + lora_alpha=rank, + target_modules=["to_q", "to_k", "to_v", "to_out.0"], + init_lora_weights=False, + use_dora=False, + ) + return unet_lora_config + + +def check_if_lora_correctly_set(model) -> bool: + """ + Checks if the LoRA layers are correctly set with peft + """ + for module in model.modules(): + if isinstance(module, BaseTunerLayer): + return True + return False + + +def create_ip_adapter_state_dict(model): + # "ip_adapter" (cross-attention weights) + ip_cross_attn_state_dict = {} + key_id = 1 + + for name in model.attn_processors.keys(): + cross_attention_dim = ( + None if name.endswith("attn1.processor") or "motion_module" in name else model.config.cross_attention_dim + ) + + if name.startswith("mid_block"): + hidden_size = model.config.block_out_channels[-1] + elif name.startswith("up_blocks"): + block_id = int(name[len("up_blocks.")]) + hidden_size = list(reversed(model.config.block_out_channels))[block_id] + elif name.startswith("down_blocks"): + block_id = int(name[len("down_blocks.")]) + hidden_size = model.config.block_out_channels[block_id] + + if cross_attention_dim is not None: + sd = IPAdapterAttnProcessor( + hidden_size=hidden_size, cross_attention_dim=cross_attention_dim, scale=1.0 + ).state_dict() + ip_cross_attn_state_dict.update( + { + f"{key_id}.to_k_ip.weight": sd["to_k_ip.0.weight"], + f"{key_id}.to_v_ip.weight": sd["to_v_ip.0.weight"], + } + ) + + key_id += 2 + + # "image_proj" (ImageProjection layer weights) + cross_attention_dim = model.config["cross_attention_dim"] + image_projection = ImageProjection( + cross_attention_dim=cross_attention_dim, image_embed_dim=cross_attention_dim, num_image_text_embeds=4 + ) + + ip_image_projection_state_dict = {} + sd = image_projection.state_dict() + ip_image_projection_state_dict.update( + { + "proj.weight": sd["image_embeds.weight"], + "proj.bias": sd["image_embeds.bias"], + "norm.weight": sd["norm.weight"], + "norm.bias": sd["norm.bias"], + } + ) + + del sd + ip_state_dict = {} + ip_state_dict.update({"image_proj": ip_image_projection_state_dict, "ip_adapter": ip_cross_attn_state_dict}) + return ip_state_dict + + +def create_ip_adapter_plus_state_dict(model): + # "ip_adapter" (cross-attention weights) + ip_cross_attn_state_dict = {} + key_id = 1 + + for name in model.attn_processors.keys(): + cross_attention_dim = None if name.endswith("attn1.processor") else model.config.cross_attention_dim + if name.startswith("mid_block"): + hidden_size = model.config.block_out_channels[-1] + elif name.startswith("up_blocks"): + block_id = int(name[len("up_blocks.")]) + hidden_size = list(reversed(model.config.block_out_channels))[block_id] + elif name.startswith("down_blocks"): + block_id = int(name[len("down_blocks.")]) + hidden_size = model.config.block_out_channels[block_id] + if cross_attention_dim is not None: + sd = IPAdapterAttnProcessor( + hidden_size=hidden_size, cross_attention_dim=cross_attention_dim, scale=1.0 + ).state_dict() + ip_cross_attn_state_dict.update( + { + f"{key_id}.to_k_ip.weight": sd["to_k_ip.0.weight"], + f"{key_id}.to_v_ip.weight": sd["to_v_ip.0.weight"], + } + ) + + key_id += 2 + + # "image_proj" (ImageProjection layer weights) + cross_attention_dim = model.config["cross_attention_dim"] + image_projection = IPAdapterPlusImageProjection( + embed_dims=cross_attention_dim, output_dims=cross_attention_dim, dim_head=32, heads=2, num_queries=4 + ) + + ip_image_projection_state_dict = OrderedDict() + + for k, v in image_projection.state_dict().items(): + if "2.to" in k: + k = k.replace("2.to", "0.to") + elif "layers.0.ln0" in k: + k = k.replace("layers.0.ln0", "layers.0.0.norm1") + elif "layers.0.ln1" in k: + k = k.replace("layers.0.ln1", "layers.0.0.norm2") + elif "layers.1.ln0" in k: + k = k.replace("layers.1.ln0", "layers.1.0.norm1") + elif "layers.1.ln1" in k: + k = k.replace("layers.1.ln1", "layers.1.0.norm2") + elif "layers.2.ln0" in k: + k = k.replace("layers.2.ln0", "layers.2.0.norm1") + elif "layers.2.ln1" in k: + k = k.replace("layers.2.ln1", "layers.2.0.norm2") + elif "layers.3.ln0" in k: + k = k.replace("layers.3.ln0", "layers.3.0.norm1") + elif "layers.3.ln1" in k: + k = k.replace("layers.3.ln1", "layers.3.0.norm2") + elif "to_q" in k: + parts = k.split(".") + parts[2] = "attn" + k = ".".join(parts) + elif "to_out.0" in k: + parts = k.split(".") + parts[2] = "attn" + k = ".".join(parts) + k = k.replace("to_out.0", "to_out") + else: + k = k.replace("0.ff.0", "0.1.0") + k = k.replace("0.ff.1.net.0.proj", "0.1.1") + k = k.replace("0.ff.1.net.2", "0.1.3") + + k = k.replace("1.ff.0", "1.1.0") + k = k.replace("1.ff.1.net.0.proj", "1.1.1") + k = k.replace("1.ff.1.net.2", "1.1.3") + + k = k.replace("2.ff.0", "2.1.0") + k = k.replace("2.ff.1.net.0.proj", "2.1.1") + k = k.replace("2.ff.1.net.2", "2.1.3") + + k = k.replace("3.ff.0", "3.1.0") + k = k.replace("3.ff.1.net.0.proj", "3.1.1") + k = k.replace("3.ff.1.net.2", "3.1.3") + + # if "norm_cross" in k: + # ip_image_projection_state_dict[k.replace("norm_cross", "norm1")] = v + # elif "layer_norm" in k: + # ip_image_projection_state_dict[k.replace("layer_norm", "norm2")] = v + if "to_k" in k: + parts = k.split(".") + parts[2] = "attn" + k = ".".join(parts) + ip_image_projection_state_dict[k.replace("to_k", "to_kv")] = torch.cat([v, v], dim=0) + elif "to_v" in k: + continue + else: + ip_image_projection_state_dict[k] = v + + ip_state_dict = {} + ip_state_dict.update({"image_proj": ip_image_projection_state_dict, "ip_adapter": ip_cross_attn_state_dict}) + return ip_state_dict + + +def create_ip_adapter_faceid_state_dict(model): + # "ip_adapter" (cross-attention weights) + # no LoRA weights + ip_cross_attn_state_dict = {} + key_id = 1 + + for name in model.attn_processors.keys(): + cross_attention_dim = ( + None if name.endswith("attn1.processor") or "motion_module" in name else model.config.cross_attention_dim + ) + + if name.startswith("mid_block"): + hidden_size = model.config.block_out_channels[-1] + elif name.startswith("up_blocks"): + block_id = int(name[len("up_blocks.")]) + hidden_size = list(reversed(model.config.block_out_channels))[block_id] + elif name.startswith("down_blocks"): + block_id = int(name[len("down_blocks.")]) + hidden_size = model.config.block_out_channels[block_id] + + if cross_attention_dim is not None: + sd = IPAdapterAttnProcessor( + hidden_size=hidden_size, cross_attention_dim=cross_attention_dim, scale=1.0 + ).state_dict() + ip_cross_attn_state_dict.update( + { + f"{key_id}.to_k_ip.weight": sd["to_k_ip.0.weight"], + f"{key_id}.to_v_ip.weight": sd["to_v_ip.0.weight"], + } + ) + + key_id += 2 + + # "image_proj" (ImageProjection layer weights) + cross_attention_dim = model.config["cross_attention_dim"] + image_projection = IPAdapterFaceIDImageProjection( + cross_attention_dim=cross_attention_dim, image_embed_dim=cross_attention_dim, mult=2, num_tokens=4 + ) + + ip_image_projection_state_dict = {} + sd = image_projection.state_dict() + ip_image_projection_state_dict.update( + { + "proj.0.weight": sd["ff.net.0.proj.weight"], + "proj.0.bias": sd["ff.net.0.proj.bias"], + "proj.2.weight": sd["ff.net.2.weight"], + "proj.2.bias": sd["ff.net.2.bias"], + "norm.weight": sd["norm.weight"], + "norm.bias": sd["norm.bias"], + } + ) + + del sd + ip_state_dict = {} + ip_state_dict.update({"image_proj": ip_image_projection_state_dict, "ip_adapter": ip_cross_attn_state_dict}) + return ip_state_dict + + +def create_custom_diffusion_layers(model, mock_weights: bool = True): + train_kv = True + train_q_out = True + custom_diffusion_attn_procs = {} + + st = model.state_dict() + for name, _ in model.attn_processors.items(): + cross_attention_dim = None if name.endswith("attn1.processor") else model.config.cross_attention_dim + if name.startswith("mid_block"): + hidden_size = model.config.block_out_channels[-1] + elif name.startswith("up_blocks"): + block_id = int(name[len("up_blocks.")]) + hidden_size = list(reversed(model.config.block_out_channels))[block_id] + elif name.startswith("down_blocks"): + block_id = int(name[len("down_blocks.")]) + hidden_size = model.config.block_out_channels[block_id] + layer_name = name.split(".processor")[0] + weights = { + "to_k_custom_diffusion.weight": st[layer_name + ".to_k.weight"], + "to_v_custom_diffusion.weight": st[layer_name + ".to_v.weight"], + } + if train_q_out: + weights["to_q_custom_diffusion.weight"] = st[layer_name + ".to_q.weight"] + weights["to_out_custom_diffusion.0.weight"] = st[layer_name + ".to_out.0.weight"] + weights["to_out_custom_diffusion.0.bias"] = st[layer_name + ".to_out.0.bias"] + if cross_attention_dim is not None: + custom_diffusion_attn_procs[name] = CustomDiffusionAttnProcessor( + train_kv=train_kv, + train_q_out=train_q_out, + hidden_size=hidden_size, + cross_attention_dim=cross_attention_dim, + ).to(model.device) + custom_diffusion_attn_procs[name].load_state_dict(weights) + if mock_weights: + # add 1 to weights to mock trained weights + with torch.no_grad(): + custom_diffusion_attn_procs[name].to_k_custom_diffusion.weight += 1 + custom_diffusion_attn_procs[name].to_v_custom_diffusion.weight += 1 + else: + custom_diffusion_attn_procs[name] = CustomDiffusionAttnProcessor( + train_kv=False, + train_q_out=False, + hidden_size=hidden_size, + cross_attention_dim=cross_attention_dim, + ) + del st + return custom_diffusion_attn_procs + + +class UNet2DConditionModelTests(ModelTesterMixin, UNetTesterMixin, unittest.TestCase): + model_class = UNet2DConditionModel + main_input_name = "sample" + # We override the items here because the unet under consideration is small. + model_split_percents = [0.5, 0.34, 0.4] + + @property + def dummy_input(self): + batch_size = 4 + num_channels = 4 + sizes = (16, 16) + + noise = floats_tensor((batch_size, num_channels) + sizes).to(torch_device) + time_step = torch.tensor([10]).to(torch_device) + encoder_hidden_states = floats_tensor((batch_size, 4, 8)).to(torch_device) + + return {"sample": noise, "timestep": time_step, "encoder_hidden_states": encoder_hidden_states} + + @property + def input_shape(self): + return (4, 16, 16) + + @property + def output_shape(self): + return (4, 16, 16) + + def prepare_init_args_and_inputs_for_common(self): + init_dict = { + "block_out_channels": (4, 8), + "norm_num_groups": 4, + "down_block_types": ("CrossAttnDownBlock2D", "DownBlock2D"), + "up_block_types": ("UpBlock2D", "CrossAttnUpBlock2D"), + "cross_attention_dim": 8, + "attention_head_dim": 2, + "out_channels": 4, + "in_channels": 4, + "layers_per_block": 1, + "sample_size": 16, + } + inputs_dict = self.dummy_input + return init_dict, inputs_dict + + @unittest.skipIf( + torch_device != "cuda" or not is_xformers_available(), + reason="XFormers attention is only available with CUDA and `xformers` installed", + ) + def test_xformers_enable_works(self): + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + model = self.model_class(**init_dict) + + model.enable_xformers_memory_efficient_attention() + + assert ( + model.mid_block.attentions[0].transformer_blocks[0].attn1.processor.__class__.__name__ + == "XFormersAttnProcessor" + ), "xformers is not enabled" + + def test_model_with_attention_head_dim_tuple(self): + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + + init_dict["block_out_channels"] = (16, 32) + init_dict["attention_head_dim"] = (8, 16) + + model = self.model_class(**init_dict) + model.to(torch_device) + model.eval() + + with torch.no_grad(): + output = model(**inputs_dict) + + if isinstance(output, dict): + output = output.sample + + self.assertIsNotNone(output) + expected_shape = inputs_dict["sample"].shape + self.assertEqual(output.shape, expected_shape, "Input and output shapes do not match") + + def test_model_with_use_linear_projection(self): + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + + init_dict["use_linear_projection"] = True + + model = self.model_class(**init_dict) + model.to(torch_device) + model.eval() + + with torch.no_grad(): + output = model(**inputs_dict) + + if isinstance(output, dict): + output = output.sample + + self.assertIsNotNone(output) + expected_shape = inputs_dict["sample"].shape + self.assertEqual(output.shape, expected_shape, "Input and output shapes do not match") + + def test_model_with_cross_attention_dim_tuple(self): + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + + init_dict["cross_attention_dim"] = (8, 8) + + model = self.model_class(**init_dict) + model.to(torch_device) + model.eval() + + with torch.no_grad(): + output = model(**inputs_dict) + + if isinstance(output, dict): + output = output.sample + + self.assertIsNotNone(output) + expected_shape = inputs_dict["sample"].shape + self.assertEqual(output.shape, expected_shape, "Input and output shapes do not match") + + def test_model_with_simple_projection(self): + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + + batch_size, _, _, sample_size = inputs_dict["sample"].shape + + init_dict["class_embed_type"] = "simple_projection" + init_dict["projection_class_embeddings_input_dim"] = sample_size + + inputs_dict["class_labels"] = floats_tensor((batch_size, sample_size)).to(torch_device) + + model = self.model_class(**init_dict) + model.to(torch_device) + model.eval() + + with torch.no_grad(): + output = model(**inputs_dict) + + if isinstance(output, dict): + output = output.sample + + self.assertIsNotNone(output) + expected_shape = inputs_dict["sample"].shape + self.assertEqual(output.shape, expected_shape, "Input and output shapes do not match") + + def test_model_with_class_embeddings_concat(self): + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + + batch_size, _, _, sample_size = inputs_dict["sample"].shape + + init_dict["class_embed_type"] = "simple_projection" + init_dict["projection_class_embeddings_input_dim"] = sample_size + init_dict["class_embeddings_concat"] = True + + inputs_dict["class_labels"] = floats_tensor((batch_size, sample_size)).to(torch_device) + + model = self.model_class(**init_dict) + model.to(torch_device) + model.eval() + + with torch.no_grad(): + output = model(**inputs_dict) + + if isinstance(output, dict): + output = output.sample + + self.assertIsNotNone(output) + expected_shape = inputs_dict["sample"].shape + self.assertEqual(output.shape, expected_shape, "Input and output shapes do not match") + + def test_model_attention_slicing(self): + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + + init_dict["block_out_channels"] = (16, 32) + init_dict["attention_head_dim"] = (8, 16) + + model = self.model_class(**init_dict) + model.to(torch_device) + model.eval() + + model.set_attention_slice("auto") + with torch.no_grad(): + output = model(**inputs_dict) + assert output is not None + + model.set_attention_slice("max") + with torch.no_grad(): + output = model(**inputs_dict) + assert output is not None + + model.set_attention_slice(2) + with torch.no_grad(): + output = model(**inputs_dict) + assert output is not None + + def test_model_sliceable_head_dim(self): + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + + init_dict["block_out_channels"] = (16, 32) + init_dict["attention_head_dim"] = (8, 16) + + model = self.model_class(**init_dict) + + def check_sliceable_dim_attr(module: torch.nn.Module): + if hasattr(module, "set_attention_slice"): + assert isinstance(module.sliceable_head_dim, int) + + for child in module.children(): + check_sliceable_dim_attr(child) + + # retrieve number of attention layers + for module in model.children(): + check_sliceable_dim_attr(module) + + def test_gradient_checkpointing_is_applied(self): + expected_set = { + "CrossAttnUpBlock2D", + "CrossAttnDownBlock2D", + "UNetMidBlock2DCrossAttn", + "UpBlock2D", + "Transformer2DModel", + "DownBlock2D", + } + attention_head_dim = (8, 16) + block_out_channels = (16, 32) + super().test_gradient_checkpointing_is_applied( + expected_set=expected_set, attention_head_dim=attention_head_dim, block_out_channels=block_out_channels + ) + + def test_special_attn_proc(self): + class AttnEasyProc(torch.nn.Module): + def __init__(self, num): + super().__init__() + self.weight = torch.nn.Parameter(torch.tensor(num)) + self.is_run = False + self.number = 0 + self.counter = 0 + + def __call__(self, attn, hidden_states, encoder_hidden_states=None, attention_mask=None, number=None): + batch_size, sequence_length, _ = hidden_states.shape + attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) + + query = attn.to_q(hidden_states) + + encoder_hidden_states = encoder_hidden_states if encoder_hidden_states is not None else hidden_states + key = attn.to_k(encoder_hidden_states) + value = attn.to_v(encoder_hidden_states) + + query = attn.head_to_batch_dim(query) + key = attn.head_to_batch_dim(key) + value = attn.head_to_batch_dim(value) + + attention_probs = attn.get_attention_scores(query, key, attention_mask) + hidden_states = torch.bmm(attention_probs, value) + hidden_states = attn.batch_to_head_dim(hidden_states) + + # linear proj + hidden_states = attn.to_out[0](hidden_states) + # dropout + hidden_states = attn.to_out[1](hidden_states) + + hidden_states += self.weight + + self.is_run = True + self.counter += 1 + self.number = number + + return hidden_states + + # enable deterministic behavior for gradient checkpointing + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + + init_dict["block_out_channels"] = (16, 32) + init_dict["attention_head_dim"] = (8, 16) + + model = self.model_class(**init_dict) + model.to(torch_device) + + processor = AttnEasyProc(5.0) + + model.set_attn_processor(processor) + model(**inputs_dict, cross_attention_kwargs={"number": 123}).sample + + assert processor.counter == 8 + assert processor.is_run + assert processor.number == 123 + + @parameterized.expand( + [ + # fmt: off + [torch.bool], + [torch.long], + [torch.float], + # fmt: on + ] + ) + def test_model_xattn_mask(self, mask_dtype): + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + + model = self.model_class(**{**init_dict, "attention_head_dim": (8, 16), "block_out_channels": (16, 32)}) + model.to(torch_device) + model.eval() + + cond = inputs_dict["encoder_hidden_states"] + with torch.no_grad(): + full_cond_out = model(**inputs_dict).sample + assert full_cond_out is not None + + keepall_mask = torch.ones(*cond.shape[:-1], device=cond.device, dtype=mask_dtype) + full_cond_keepallmask_out = model(**{**inputs_dict, "encoder_attention_mask": keepall_mask}).sample + assert full_cond_keepallmask_out.allclose(full_cond_out, rtol=1e-05, atol=1e-05), ( + "a 'keep all' mask should give the same result as no mask" + ) + + trunc_cond = cond[:, :-1, :] + trunc_cond_out = model(**{**inputs_dict, "encoder_hidden_states": trunc_cond}).sample + assert not trunc_cond_out.allclose(full_cond_out, rtol=1e-05, atol=1e-05), ( + "discarding the last token from our cond should change the result" + ) + + batch, tokens, _ = cond.shape + mask_last = (torch.arange(tokens) < tokens - 1).expand(batch, -1).to(cond.device, mask_dtype) + masked_cond_out = model(**{**inputs_dict, "encoder_attention_mask": mask_last}).sample + assert masked_cond_out.allclose(trunc_cond_out, rtol=1e-05, atol=1e-05), ( + "masking the last token from our cond should be equivalent to truncating that token out of the condition" + ) + + # see diffusers.models.attention_processor::Attention#prepare_attention_mask + # note: we may not need to fix mask padding to work for stable-diffusion cross-attn masks. + # since the use-case (somebody passes in a too-short cross-attn mask) is pretty esoteric. + # maybe it's fine that this only works for the unclip use-case. + @mark.skip( + reason="we currently pad mask by target_length tokens (what unclip needs), whereas stable-diffusion's cross-attn needs to instead pad by remaining_length." + ) + def test_model_xattn_padding(self): + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + + model = self.model_class(**{**init_dict, "attention_head_dim": (8, 16)}) + model.to(torch_device) + model.eval() + + cond = inputs_dict["encoder_hidden_states"] + with torch.no_grad(): + full_cond_out = model(**inputs_dict).sample + assert full_cond_out is not None + + batch, tokens, _ = cond.shape + keeplast_mask = (torch.arange(tokens) == tokens - 1).expand(batch, -1).to(cond.device, torch.bool) + keeplast_out = model(**{**inputs_dict, "encoder_attention_mask": keeplast_mask}).sample + assert not keeplast_out.allclose(full_cond_out), "a 'keep last token' mask should change the result" + + trunc_mask = torch.zeros(batch, tokens - 1, device=cond.device, dtype=torch.bool) + trunc_mask_out = model(**{**inputs_dict, "encoder_attention_mask": trunc_mask}).sample + assert trunc_mask_out.allclose(keeplast_out), ( + "a mask with fewer tokens than condition, will be padded with 'keep' tokens. a 'discard-all' mask missing the final token is thus equivalent to a 'keep last' mask." + ) + + def test_custom_diffusion_processors(self): + # enable deterministic behavior for gradient checkpointing + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + + init_dict["block_out_channels"] = (16, 32) + init_dict["attention_head_dim"] = (8, 16) + + model = self.model_class(**init_dict) + model.to(torch_device) + + with torch.no_grad(): + sample1 = model(**inputs_dict).sample + + custom_diffusion_attn_procs = create_custom_diffusion_layers(model, mock_weights=False) + + # make sure we can set a list of attention processors + model.set_attn_processor(custom_diffusion_attn_procs) + model.to(torch_device) + + # test that attn processors can be set to itself + model.set_attn_processor(model.attn_processors) + + with torch.no_grad(): + sample2 = model(**inputs_dict).sample + + assert (sample1 - sample2).abs().max() < 3e-3 + + def test_custom_diffusion_save_load(self): + # enable deterministic behavior for gradient checkpointing + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + + init_dict["block_out_channels"] = (16, 32) + init_dict["attention_head_dim"] = (8, 16) + + torch.manual_seed(0) + model = self.model_class(**init_dict) + model.to(torch_device) + + with torch.no_grad(): + old_sample = model(**inputs_dict).sample + + custom_diffusion_attn_procs = create_custom_diffusion_layers(model, mock_weights=False) + model.set_attn_processor(custom_diffusion_attn_procs) + + with torch.no_grad(): + sample = model(**inputs_dict).sample + + with tempfile.TemporaryDirectory() as tmpdirname: + model.save_attn_procs(tmpdirname, safe_serialization=False) + self.assertTrue(os.path.isfile(os.path.join(tmpdirname, "pytorch_custom_diffusion_weights.bin"))) + torch.manual_seed(0) + new_model = self.model_class(**init_dict) + new_model.load_attn_procs(tmpdirname, weight_name="pytorch_custom_diffusion_weights.bin") + new_model.to(torch_device) + + with torch.no_grad(): + new_sample = new_model(**inputs_dict).sample + + assert (sample - new_sample).abs().max() < 1e-4 + + # custom diffusion and no custom diffusion should be the same + assert (sample - old_sample).abs().max() < 3e-3 + + @unittest.skipIf( + torch_device != "cuda" or not is_xformers_available(), + reason="XFormers attention is only available with CUDA and `xformers` installed", + ) + def test_custom_diffusion_xformers_on_off(self): + # enable deterministic behavior for gradient checkpointing + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + + init_dict["block_out_channels"] = (16, 32) + init_dict["attention_head_dim"] = (8, 16) + + torch.manual_seed(0) + model = self.model_class(**init_dict) + model.to(torch_device) + custom_diffusion_attn_procs = create_custom_diffusion_layers(model, mock_weights=False) + model.set_attn_processor(custom_diffusion_attn_procs) + + # default + with torch.no_grad(): + sample = model(**inputs_dict).sample + + model.enable_xformers_memory_efficient_attention() + on_sample = model(**inputs_dict).sample + + model.disable_xformers_memory_efficient_attention() + off_sample = model(**inputs_dict).sample + + assert (sample - on_sample).abs().max() < 1e-4 + assert (sample - off_sample).abs().max() < 1e-4 + + def test_pickle(self): + # enable deterministic behavior for gradient checkpointing + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + + init_dict["block_out_channels"] = (16, 32) + init_dict["attention_head_dim"] = (8, 16) + + model = self.model_class(**init_dict) + model.to(torch_device) + + with torch.no_grad(): + sample = model(**inputs_dict).sample + + sample_copy = copy.copy(sample) + + assert (sample - sample_copy).abs().max() < 1e-4 + + def test_asymmetrical_unet(self): + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + # Add asymmetry to configs + init_dict["transformer_layers_per_block"] = [[3, 2], 1] + init_dict["reverse_transformer_layers_per_block"] = [[3, 4], 1] + + torch.manual_seed(0) + model = self.model_class(**init_dict) + model.to(torch_device) + + output = model(**inputs_dict).sample + expected_shape = inputs_dict["sample"].shape + + # Check if input and output shapes are the same + self.assertEqual(output.shape, expected_shape, "Input and output shapes do not match") + + def test_ip_adapter(self): + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + + init_dict["block_out_channels"] = (16, 32) + init_dict["attention_head_dim"] = (8, 16) + + model = self.model_class(**init_dict) + model.to(torch_device) + + # forward pass without ip-adapter + with torch.no_grad(): + sample1 = model(**inputs_dict).sample + + # update inputs_dict for ip-adapter + batch_size = inputs_dict["encoder_hidden_states"].shape[0] + # for ip-adapter image_embeds has shape [batch_size, num_image, embed_dim] + image_embeds = floats_tensor((batch_size, 1, model.config.cross_attention_dim)).to(torch_device) + inputs_dict["added_cond_kwargs"] = {"image_embeds": [image_embeds]} + + # make ip_adapter_1 and ip_adapter_2 + ip_adapter_1 = create_ip_adapter_state_dict(model) + + image_proj_state_dict_2 = {k: w + 1.0 for k, w in ip_adapter_1["image_proj"].items()} + cross_attn_state_dict_2 = {k: w + 1.0 for k, w in ip_adapter_1["ip_adapter"].items()} + ip_adapter_2 = {} + ip_adapter_2.update({"image_proj": image_proj_state_dict_2, "ip_adapter": cross_attn_state_dict_2}) + + # forward pass ip_adapter_1 + model._load_ip_adapter_weights([ip_adapter_1]) + assert model.config.encoder_hid_dim_type == "ip_image_proj" + assert model.encoder_hid_proj is not None + assert model.down_blocks[0].attentions[0].transformer_blocks[0].attn2.processor.__class__.__name__ in ( + "IPAdapterAttnProcessor", + "IPAdapterAttnProcessor2_0", + ) + with torch.no_grad(): + sample2 = model(**inputs_dict).sample + + # forward pass with ip_adapter_2 + model._load_ip_adapter_weights([ip_adapter_2]) + with torch.no_grad(): + sample3 = model(**inputs_dict).sample + + # forward pass with ip_adapter_1 again + model._load_ip_adapter_weights([ip_adapter_1]) + with torch.no_grad(): + sample4 = model(**inputs_dict).sample + + # forward pass with multiple ip-adapters and multiple images + model._load_ip_adapter_weights([ip_adapter_1, ip_adapter_2]) + # set the scale for ip_adapter_2 to 0 so that result should be same as only load ip_adapter_1 + for attn_processor in model.attn_processors.values(): + if isinstance(attn_processor, (IPAdapterAttnProcessor, IPAdapterAttnProcessor2_0)): + attn_processor.scale = [1, 0] + image_embeds_multi = image_embeds.repeat(1, 2, 1) + inputs_dict["added_cond_kwargs"] = {"image_embeds": [image_embeds_multi, image_embeds_multi]} + with torch.no_grad(): + sample5 = model(**inputs_dict).sample + + # forward pass with single ip-adapter & single image when image_embeds is not a list and a 2-d tensor + image_embeds = image_embeds.squeeze(1) + inputs_dict["added_cond_kwargs"] = {"image_embeds": image_embeds} + + model._load_ip_adapter_weights(ip_adapter_1) + with torch.no_grad(): + sample6 = model(**inputs_dict).sample + + assert not sample1.allclose(sample2, atol=1e-4, rtol=1e-4) + assert not sample2.allclose(sample3, atol=1e-4, rtol=1e-4) + assert sample2.allclose(sample4, atol=1e-4, rtol=1e-4) + assert sample2.allclose(sample5, atol=1e-4, rtol=1e-4) + assert sample2.allclose(sample6, atol=1e-4, rtol=1e-4) + + def test_ip_adapter_plus(self): + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + + init_dict["block_out_channels"] = (16, 32) + init_dict["attention_head_dim"] = (8, 16) + + model = self.model_class(**init_dict) + model.to(torch_device) + + # forward pass without ip-adapter + with torch.no_grad(): + sample1 = model(**inputs_dict).sample + + # update inputs_dict for ip-adapter + batch_size = inputs_dict["encoder_hidden_states"].shape[0] + # for ip-adapter-plus image_embeds has shape [batch_size, num_image, sequence_length, embed_dim] + image_embeds = floats_tensor((batch_size, 1, 1, model.config.cross_attention_dim)).to(torch_device) + inputs_dict["added_cond_kwargs"] = {"image_embeds": [image_embeds]} + + # make ip_adapter_1 and ip_adapter_2 + ip_adapter_1 = create_ip_adapter_plus_state_dict(model) + + image_proj_state_dict_2 = {k: w + 1.0 for k, w in ip_adapter_1["image_proj"].items()} + cross_attn_state_dict_2 = {k: w + 1.0 for k, w in ip_adapter_1["ip_adapter"].items()} + ip_adapter_2 = {} + ip_adapter_2.update({"image_proj": image_proj_state_dict_2, "ip_adapter": cross_attn_state_dict_2}) + + # forward pass ip_adapter_1 + model._load_ip_adapter_weights([ip_adapter_1]) + assert model.config.encoder_hid_dim_type == "ip_image_proj" + assert model.encoder_hid_proj is not None + assert model.down_blocks[0].attentions[0].transformer_blocks[0].attn2.processor.__class__.__name__ in ( + "IPAdapterAttnProcessor", + "IPAdapterAttnProcessor2_0", + ) + with torch.no_grad(): + sample2 = model(**inputs_dict).sample + + # forward pass with ip_adapter_2 + model._load_ip_adapter_weights([ip_adapter_2]) + with torch.no_grad(): + sample3 = model(**inputs_dict).sample + + # forward pass with ip_adapter_1 again + model._load_ip_adapter_weights([ip_adapter_1]) + with torch.no_grad(): + sample4 = model(**inputs_dict).sample + + # forward pass with multiple ip-adapters and multiple images + model._load_ip_adapter_weights([ip_adapter_1, ip_adapter_2]) + # set the scale for ip_adapter_2 to 0 so that result should be same as only load ip_adapter_1 + for attn_processor in model.attn_processors.values(): + if isinstance(attn_processor, (IPAdapterAttnProcessor, IPAdapterAttnProcessor2_0)): + attn_processor.scale = [1, 0] + image_embeds_multi = image_embeds.repeat(1, 2, 1, 1) + inputs_dict["added_cond_kwargs"] = {"image_embeds": [image_embeds_multi, image_embeds_multi]} + with torch.no_grad(): + sample5 = model(**inputs_dict).sample + + # forward pass with single ip-adapter & single image when image_embeds is a 3-d tensor + image_embeds = image_embeds[:,].squeeze(1) + inputs_dict["added_cond_kwargs"] = {"image_embeds": image_embeds} + + model._load_ip_adapter_weights(ip_adapter_1) + with torch.no_grad(): + sample6 = model(**inputs_dict).sample + + assert not sample1.allclose(sample2, atol=1e-4, rtol=1e-4) + assert not sample2.allclose(sample3, atol=1e-4, rtol=1e-4) + assert sample2.allclose(sample4, atol=1e-4, rtol=1e-4) + assert sample2.allclose(sample5, atol=1e-4, rtol=1e-4) + assert sample2.allclose(sample6, atol=1e-4, rtol=1e-4) + + @parameterized.expand( + [ + ("hf-internal-testing/unet2d-sharded-dummy", None), + ("hf-internal-testing/tiny-sd-unet-sharded-latest-format", "fp16"), + ] + ) + @require_torch_accelerator + def test_load_sharded_checkpoint_from_hub(self, repo_id, variant): + _, inputs_dict = self.prepare_init_args_and_inputs_for_common() + loaded_model = self.model_class.from_pretrained(repo_id, variant=variant) + loaded_model = loaded_model.to(torch_device) + new_output = loaded_model(**inputs_dict) + + assert loaded_model + assert new_output.sample.shape == (4, 4, 16, 16) + + @parameterized.expand( + [ + ("hf-internal-testing/unet2d-sharded-dummy-subfolder", None), + ("hf-internal-testing/tiny-sd-unet-sharded-latest-format-subfolder", "fp16"), + ] + ) + @require_torch_accelerator + def test_load_sharded_checkpoint_from_hub_subfolder(self, repo_id, variant): + _, inputs_dict = self.prepare_init_args_and_inputs_for_common() + loaded_model = self.model_class.from_pretrained(repo_id, subfolder="unet", variant=variant) + loaded_model = loaded_model.to(torch_device) + new_output = loaded_model(**inputs_dict) + + assert loaded_model + assert new_output.sample.shape == (4, 4, 16, 16) + + @require_torch_accelerator + def test_load_sharded_checkpoint_from_hub_local(self): + _, inputs_dict = self.prepare_init_args_and_inputs_for_common() + ckpt_path = snapshot_download("hf-internal-testing/unet2d-sharded-dummy") + loaded_model = self.model_class.from_pretrained(ckpt_path, local_files_only=True) + loaded_model = loaded_model.to(torch_device) + new_output = loaded_model(**inputs_dict) + + assert loaded_model + assert new_output.sample.shape == (4, 4, 16, 16) + + @require_torch_accelerator + def test_load_sharded_checkpoint_from_hub_local_subfolder(self): + _, inputs_dict = self.prepare_init_args_and_inputs_for_common() + ckpt_path = snapshot_download("hf-internal-testing/unet2d-sharded-dummy-subfolder") + loaded_model = self.model_class.from_pretrained(ckpt_path, subfolder="unet", local_files_only=True) + loaded_model = loaded_model.to(torch_device) + new_output = loaded_model(**inputs_dict) + + assert loaded_model + assert new_output.sample.shape == (4, 4, 16, 16) + + @require_torch_accelerator + @parameterized.expand( + [ + ("hf-internal-testing/unet2d-sharded-dummy", None), + ("hf-internal-testing/tiny-sd-unet-sharded-latest-format", "fp16"), + ] + ) + def test_load_sharded_checkpoint_device_map_from_hub(self, repo_id, variant): + _, inputs_dict = self.prepare_init_args_and_inputs_for_common() + loaded_model = self.model_class.from_pretrained(repo_id, variant=variant, device_map="auto") + new_output = loaded_model(**inputs_dict) + + assert loaded_model + assert new_output.sample.shape == (4, 4, 16, 16) + + @require_torch_accelerator + @parameterized.expand( + [ + ("hf-internal-testing/unet2d-sharded-dummy-subfolder", None), + ("hf-internal-testing/tiny-sd-unet-sharded-latest-format-subfolder", "fp16"), + ] + ) + def test_load_sharded_checkpoint_device_map_from_hub_subfolder(self, repo_id, variant): + _, inputs_dict = self.prepare_init_args_and_inputs_for_common() + loaded_model = self.model_class.from_pretrained(repo_id, variant=variant, subfolder="unet", device_map="auto") + new_output = loaded_model(**inputs_dict) + + assert loaded_model + assert new_output.sample.shape == (4, 4, 16, 16) + + @require_torch_accelerator + def test_load_sharded_checkpoint_device_map_from_hub_local(self): + _, inputs_dict = self.prepare_init_args_and_inputs_for_common() + ckpt_path = snapshot_download("hf-internal-testing/unet2d-sharded-dummy") + loaded_model = self.model_class.from_pretrained(ckpt_path, local_files_only=True, device_map="auto") + new_output = loaded_model(**inputs_dict) + + assert loaded_model + assert new_output.sample.shape == (4, 4, 16, 16) + + @require_torch_accelerator + def test_load_sharded_checkpoint_device_map_from_hub_local_subfolder(self): + _, inputs_dict = self.prepare_init_args_and_inputs_for_common() + ckpt_path = snapshot_download("hf-internal-testing/unet2d-sharded-dummy-subfolder") + loaded_model = self.model_class.from_pretrained( + ckpt_path, local_files_only=True, subfolder="unet", device_map="auto" + ) + new_output = loaded_model(**inputs_dict) + + assert loaded_model + assert new_output.sample.shape == (4, 4, 16, 16) + + @require_peft_backend + def test_load_attn_procs_raise_warning(self): + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + model = self.model_class(**init_dict) + model.to(torch_device) + + # forward pass without LoRA + with torch.no_grad(): + non_lora_sample = model(**inputs_dict).sample + + unet_lora_config = get_unet_lora_config() + model.add_adapter(unet_lora_config) + + assert check_if_lora_correctly_set(model), "Lora not correctly set in UNet." + + # forward pass with LoRA + with torch.no_grad(): + lora_sample_1 = model(**inputs_dict).sample + + with tempfile.TemporaryDirectory() as tmpdirname: + model.save_attn_procs(tmpdirname) + model.unload_lora() + + with self.assertWarns(FutureWarning) as warning: + model.load_attn_procs(os.path.join(tmpdirname, "pytorch_lora_weights.safetensors")) + + warning_message = str(warning.warnings[0].message) + assert "Using the `load_attn_procs()` method has been deprecated" in warning_message + + # import to still check for the rest of the stuff. + assert check_if_lora_correctly_set(model), "Lora not correctly set in UNet." + + with torch.no_grad(): + lora_sample_2 = model(**inputs_dict).sample + + assert not torch.allclose(non_lora_sample, lora_sample_1, atol=1e-4, rtol=1e-4), ( + "LoRA injected UNet should produce different results." + ) + assert torch.allclose(lora_sample_1, lora_sample_2, atol=1e-4, rtol=1e-4), ( + "Loading from a saved checkpoint should produce identical results." + ) + + @require_peft_backend + def test_save_attn_procs_raise_warning(self): + init_dict, _ = self.prepare_init_args_and_inputs_for_common() + model = self.model_class(**init_dict) + model.to(torch_device) + + unet_lora_config = get_unet_lora_config() + model.add_adapter(unet_lora_config) + + assert check_if_lora_correctly_set(model), "Lora not correctly set in UNet." + + with tempfile.TemporaryDirectory() as tmpdirname: + with self.assertWarns(FutureWarning) as warning: + model.save_attn_procs(tmpdirname) + + warning_message = str(warning.warnings[0].message) + assert "Using the `save_attn_procs()` method has been deprecated" in warning_message + + +class UNet2DConditionModelCompileTests(TorchCompileTesterMixin, unittest.TestCase): + model_class = UNet2DConditionModel + + def prepare_init_args_and_inputs_for_common(self): + return UNet2DConditionModelTests().prepare_init_args_and_inputs_for_common() + + +class UNet2DConditionModelLoRAHotSwapTests(LoraHotSwappingForModelTesterMixin, unittest.TestCase): + model_class = UNet2DConditionModel + + def prepare_init_args_and_inputs_for_common(self): + return UNet2DConditionModelTests().prepare_init_args_and_inputs_for_common() + + +@slow +class UNet2DConditionModelIntegrationTests(unittest.TestCase): + def get_file_format(self, seed, shape): + return f"gaussian_noise_s={seed}_shape={'_'.join([str(s) for s in shape])}.npy" + + def tearDown(self): + # clean up the VRAM after each test + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def get_latents(self, seed=0, shape=(4, 4, 64, 64), fp16=False): + dtype = torch.float16 if fp16 else torch.float32 + image = torch.from_numpy(load_hf_numpy(self.get_file_format(seed, shape))).to(torch_device).to(dtype) + return image + + def get_unet_model(self, fp16=False, model_id="CompVis/stable-diffusion-v1-4"): + variant = "fp16" if fp16 else None + torch_dtype = torch.float16 if fp16 else torch.float32 + + model = UNet2DConditionModel.from_pretrained( + model_id, subfolder="unet", torch_dtype=torch_dtype, variant=variant + ) + model.to(torch_device).eval() + + return model + + @require_torch_accelerator + def test_set_attention_slice_auto(self): + backend_empty_cache(torch_device) + backend_reset_max_memory_allocated(torch_device) + backend_reset_peak_memory_stats(torch_device) + + unet = self.get_unet_model() + unet.set_attention_slice("auto") + + latents = self.get_latents(33) + encoder_hidden_states = self.get_encoder_hidden_states(33) + timestep = 1 + + with torch.no_grad(): + _ = unet(latents, timestep=timestep, encoder_hidden_states=encoder_hidden_states).sample + + mem_bytes = backend_max_memory_allocated(torch_device) + + assert mem_bytes < 5 * 10**9 + + @require_torch_accelerator + def test_set_attention_slice_max(self): + backend_empty_cache(torch_device) + backend_reset_max_memory_allocated(torch_device) + backend_reset_peak_memory_stats(torch_device) + + unet = self.get_unet_model() + unet.set_attention_slice("max") + + latents = self.get_latents(33) + encoder_hidden_states = self.get_encoder_hidden_states(33) + timestep = 1 + + with torch.no_grad(): + _ = unet(latents, timestep=timestep, encoder_hidden_states=encoder_hidden_states).sample + + mem_bytes = backend_max_memory_allocated(torch_device) + + assert mem_bytes < 5 * 10**9 + + @require_torch_accelerator + def test_set_attention_slice_int(self): + backend_empty_cache(torch_device) + backend_reset_max_memory_allocated(torch_device) + backend_reset_peak_memory_stats(torch_device) + + unet = self.get_unet_model() + unet.set_attention_slice(2) + + latents = self.get_latents(33) + encoder_hidden_states = self.get_encoder_hidden_states(33) + timestep = 1 + + with torch.no_grad(): + _ = unet(latents, timestep=timestep, encoder_hidden_states=encoder_hidden_states).sample + + mem_bytes = backend_max_memory_allocated(torch_device) + + assert mem_bytes < 5 * 10**9 + + @require_torch_accelerator + def test_set_attention_slice_list(self): + backend_empty_cache(torch_device) + backend_reset_max_memory_allocated(torch_device) + backend_reset_peak_memory_stats(torch_device) + + # there are 32 sliceable layers + slice_list = 16 * [2, 3] + unet = self.get_unet_model() + unet.set_attention_slice(slice_list) + + latents = self.get_latents(33) + encoder_hidden_states = self.get_encoder_hidden_states(33) + timestep = 1 + + with torch.no_grad(): + _ = unet(latents, timestep=timestep, encoder_hidden_states=encoder_hidden_states).sample + + mem_bytes = backend_max_memory_allocated(torch_device) + + assert mem_bytes < 5 * 10**9 + + def get_encoder_hidden_states(self, seed=0, shape=(4, 77, 768), fp16=False): + dtype = torch.float16 if fp16 else torch.float32 + hidden_states = torch.from_numpy(load_hf_numpy(self.get_file_format(seed, shape))).to(torch_device).to(dtype) + return hidden_states + + @parameterized.expand( + [ + # fmt: off + [33, 4, [-0.4424, 0.1510, -0.1937, 0.2118, 0.3746, -0.3957, 0.0160, -0.0435]], + [47, 0.55, [-0.1508, 0.0379, -0.3075, 0.2540, 0.3633, -0.0821, 0.1719, -0.0207]], + [21, 0.89, [-0.6479, 0.6364, -0.3464, 0.8697, 0.4443, -0.6289, -0.0091, 0.1778]], + [9, 1000, [0.8888, -0.5659, 0.5834, -0.7469, 1.1912, -0.3923, 1.1241, -0.4424]], + # fmt: on + ] + ) + @require_torch_accelerator_with_fp16 + def test_compvis_sd_v1_4(self, seed, timestep, expected_slice): + model = self.get_unet_model(model_id="CompVis/stable-diffusion-v1-4") + latents = self.get_latents(seed) + encoder_hidden_states = self.get_encoder_hidden_states(seed) + + timestep = torch.tensor([timestep], dtype=torch.long, device=torch_device) + + with torch.no_grad(): + sample = model(latents, timestep=timestep, encoder_hidden_states=encoder_hidden_states).sample + + assert sample.shape == latents.shape + + output_slice = sample[-1, -2:, -2:, :2].flatten().float().cpu() + expected_output_slice = torch.tensor(expected_slice) + + assert torch_all_close(output_slice, expected_output_slice, atol=1e-3) + + @parameterized.expand( + [ + # fmt: off + [83, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]], + [17, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]], + [8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]], + [3, 1000, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]], + # fmt: on + ] + ) + @require_torch_accelerator_with_fp16 + def test_compvis_sd_v1_4_fp16(self, seed, timestep, expected_slice): + model = self.get_unet_model(model_id="CompVis/stable-diffusion-v1-4", fp16=True) + latents = self.get_latents(seed, fp16=True) + encoder_hidden_states = self.get_encoder_hidden_states(seed, fp16=True) + + timestep = torch.tensor([timestep], dtype=torch.long, device=torch_device) + + with torch.no_grad(): + sample = model(latents, timestep=timestep, encoder_hidden_states=encoder_hidden_states).sample + + assert sample.shape == latents.shape + + output_slice = sample[-1, -2:, -2:, :2].flatten().float().cpu() + expected_output_slice = torch.tensor(expected_slice) + + assert torch_all_close(output_slice, expected_output_slice, atol=5e-3) + + @parameterized.expand( + [ + # fmt: off + [33, 4, [-0.4430, 0.1570, -0.1867, 0.2376, 0.3205, -0.3681, 0.0525, -0.0722]], + [47, 0.55, [-0.1415, 0.0129, -0.3136, 0.2257, 0.3430, -0.0536, 0.2114, -0.0436]], + [21, 0.89, [-0.7091, 0.6664, -0.3643, 0.9032, 0.4499, -0.6541, 0.0139, 0.1750]], + [9, 1000, [0.8878, -0.5659, 0.5844, -0.7442, 1.1883, -0.3927, 1.1192, -0.4423]], + # fmt: on + ] + ) + @require_torch_accelerator + @skip_mps + def test_compvis_sd_v1_5(self, seed, timestep, expected_slice): + model = self.get_unet_model(model_id="stable-diffusion-v1-5/stable-diffusion-v1-5") + latents = self.get_latents(seed) + encoder_hidden_states = self.get_encoder_hidden_states(seed) + + timestep = torch.tensor([timestep], dtype=torch.long, device=torch_device) + + with torch.no_grad(): + sample = model(latents, timestep=timestep, encoder_hidden_states=encoder_hidden_states).sample + + assert sample.shape == latents.shape + + output_slice = sample[-1, -2:, -2:, :2].flatten().float().cpu() + expected_output_slice = torch.tensor(expected_slice) + + assert torch_all_close(output_slice, expected_output_slice, atol=1e-3) + + @parameterized.expand( + [ + # fmt: off + [83, 4, [-0.2695, -0.1669, 0.0073, -0.3181, -0.1187, -0.1676, -0.1395, -0.5972]], + [17, 0.55, [-0.1290, -0.2588, 0.0551, -0.0916, 0.3286, 0.0238, -0.3669, 0.0322]], + [8, 0.89, [-0.5283, 0.1198, 0.0870, -0.1141, 0.9189, -0.0150, 0.5474, 0.4319]], + [3, 1000, [-0.5601, 0.2411, -0.5435, 0.1268, 1.1338, -0.2427, -0.0280, -1.0020]], + # fmt: on + ] + ) + @require_torch_accelerator_with_fp16 + def test_compvis_sd_v1_5_fp16(self, seed, timestep, expected_slice): + model = self.get_unet_model(model_id="stable-diffusion-v1-5/stable-diffusion-v1-5", fp16=True) + latents = self.get_latents(seed, fp16=True) + encoder_hidden_states = self.get_encoder_hidden_states(seed, fp16=True) + + timestep = torch.tensor([timestep], dtype=torch.long, device=torch_device) + + with torch.no_grad(): + sample = model(latents, timestep=timestep, encoder_hidden_states=encoder_hidden_states).sample + + assert sample.shape == latents.shape + + output_slice = sample[-1, -2:, -2:, :2].flatten().float().cpu() + expected_output_slice = torch.tensor(expected_slice) + + assert torch_all_close(output_slice, expected_output_slice, atol=5e-3) + + @parameterized.expand( + [ + # fmt: off + [33, 4, [-0.7639, 0.0106, -0.1615, -0.3487, -0.0423, -0.7972, 0.0085, -0.4858]], + [47, 0.55, [-0.6564, 0.0795, -1.9026, -0.6258, 1.8235, 1.2056, 1.2169, 0.9073]], + [21, 0.89, [0.0327, 0.4399, -0.6358, 0.3417, 0.4120, -0.5621, -0.0397, -1.0430]], + [9, 1000, [0.1600, 0.7303, -1.0556, -0.3515, -0.7440, -1.2037, -1.8149, -1.8931]], + # fmt: on + ] + ) + @require_torch_accelerator + @skip_mps + def test_compvis_sd_inpaint(self, seed, timestep, expected_slice): + model = self.get_unet_model(model_id="stable-diffusion-v1-5/stable-diffusion-inpainting") + latents = self.get_latents(seed, shape=(4, 9, 64, 64)) + encoder_hidden_states = self.get_encoder_hidden_states(seed) + + timestep = torch.tensor([timestep], dtype=torch.long, device=torch_device) + + with torch.no_grad(): + sample = model(latents, timestep=timestep, encoder_hidden_states=encoder_hidden_states).sample + + assert sample.shape == (4, 4, 64, 64) + + output_slice = sample[-1, -2:, -2:, :2].flatten().float().cpu() + expected_output_slice = torch.tensor(expected_slice) + + assert torch_all_close(output_slice, expected_output_slice, atol=3e-3) + + @parameterized.expand( + [ + # fmt: off + [83, 4, [-0.1047, -1.7227, 0.1067, 0.0164, -0.5698, -0.4172, -0.1388, 1.1387]], + [17, 0.55, [0.0975, -0.2856, -0.3508, -0.4600, 0.3376, 0.2930, -0.2747, -0.7026]], + [8, 0.89, [-0.0952, 0.0183, -0.5825, -0.1981, 0.1131, 0.4668, -0.0395, -0.3486]], + [3, 1000, [0.4790, 0.4949, -1.0732, -0.7158, 0.7959, -0.9478, 0.1105, -0.9741]], + # fmt: on + ] + ) + @require_torch_accelerator_with_fp16 + def test_compvis_sd_inpaint_fp16(self, seed, timestep, expected_slice): + model = self.get_unet_model(model_id="stable-diffusion-v1-5/stable-diffusion-inpainting", fp16=True) + latents = self.get_latents(seed, shape=(4, 9, 64, 64), fp16=True) + encoder_hidden_states = self.get_encoder_hidden_states(seed, fp16=True) + + timestep = torch.tensor([timestep], dtype=torch.long, device=torch_device) + + with torch.no_grad(): + sample = model(latents, timestep=timestep, encoder_hidden_states=encoder_hidden_states).sample + + assert sample.shape == (4, 4, 64, 64) + + output_slice = sample[-1, -2:, -2:, :2].flatten().float().cpu() + expected_output_slice = torch.tensor(expected_slice) + + assert torch_all_close(output_slice, expected_output_slice, atol=5e-3) + + @parameterized.expand( + [ + # fmt: off + [83, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]], + [17, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]], + [8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]], + [3, 1000, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]], + # fmt: on + ] + ) + @require_torch_accelerator_with_fp16 + def test_stabilityai_sd_v2_fp16(self, seed, timestep, expected_slice): + model = self.get_unet_model(model_id="stabilityai/stable-diffusion-2", fp16=True) + latents = self.get_latents(seed, shape=(4, 4, 96, 96), fp16=True) + encoder_hidden_states = self.get_encoder_hidden_states(seed, shape=(4, 77, 1024), fp16=True) + + timestep = torch.tensor([timestep], dtype=torch.long, device=torch_device) + + with torch.no_grad(): + sample = model(latents, timestep=timestep, encoder_hidden_states=encoder_hidden_states).sample + + assert sample.shape == latents.shape + + output_slice = sample[-1, -2:, -2:, :2].flatten().float().cpu() + expected_output_slice = torch.tensor(expected_slice) + + assert torch_all_close(output_slice, expected_output_slice, atol=5e-3) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/models/unets/test_models_unet_3d_condition.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/unets/test_models_unet_3d_condition.py new file mode 100644 index 0000000000000000000000000000000000000000..f73e3461c38ea9606a5a0af1cc7a3081a94e17bb --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/unets/test_models_unet_3d_condition.py @@ -0,0 +1,183 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import numpy as np +import torch + +from diffusers.models import ModelMixin, UNet3DConditionModel +from diffusers.utils import logging +from diffusers.utils.import_utils import is_xformers_available + +from ...testing_utils import enable_full_determinism, floats_tensor, skip_mps, torch_device +from ..test_modeling_common import ModelTesterMixin, UNetTesterMixin + + +enable_full_determinism() + +logger = logging.get_logger(__name__) + + +@skip_mps +class UNet3DConditionModelTests(ModelTesterMixin, UNetTesterMixin, unittest.TestCase): + model_class = UNet3DConditionModel + main_input_name = "sample" + + @property + def dummy_input(self): + batch_size = 4 + num_channels = 4 + num_frames = 4 + sizes = (16, 16) + + noise = floats_tensor((batch_size, num_channels, num_frames) + sizes).to(torch_device) + time_step = torch.tensor([10]).to(torch_device) + encoder_hidden_states = floats_tensor((batch_size, 4, 8)).to(torch_device) + + return {"sample": noise, "timestep": time_step, "encoder_hidden_states": encoder_hidden_states} + + @property + def input_shape(self): + return (4, 4, 16, 16) + + @property + def output_shape(self): + return (4, 4, 16, 16) + + def prepare_init_args_and_inputs_for_common(self): + init_dict = { + "block_out_channels": (4, 8), + "norm_num_groups": 4, + "down_block_types": ( + "CrossAttnDownBlock3D", + "DownBlock3D", + ), + "up_block_types": ("UpBlock3D", "CrossAttnUpBlock3D"), + "cross_attention_dim": 8, + "attention_head_dim": 2, + "out_channels": 4, + "in_channels": 4, + "layers_per_block": 1, + "sample_size": 16, + } + inputs_dict = self.dummy_input + return init_dict, inputs_dict + + @unittest.skipIf( + torch_device != "cuda" or not is_xformers_available(), + reason="XFormers attention is only available with CUDA and `xformers` installed", + ) + def test_xformers_enable_works(self): + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + model = self.model_class(**init_dict) + + model.enable_xformers_memory_efficient_attention() + + assert ( + model.mid_block.attentions[0].transformer_blocks[0].attn1.processor.__class__.__name__ + == "XFormersAttnProcessor" + ), "xformers is not enabled" + + # Overriding to set `norm_num_groups` needs to be different for this model. + def test_forward_with_norm_groups(self): + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + init_dict["block_out_channels"] = (32, 64) + init_dict["norm_num_groups"] = 32 + + model = self.model_class(**init_dict) + model.to(torch_device) + model.eval() + + with torch.no_grad(): + output = model(**inputs_dict) + + if isinstance(output, dict): + output = output.sample + + self.assertIsNotNone(output) + expected_shape = inputs_dict["sample"].shape + self.assertEqual(output.shape, expected_shape, "Input and output shapes do not match") + + # Overriding since the UNet3D outputs a different structure. + def test_determinism(self): + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + model = self.model_class(**init_dict) + model.to(torch_device) + model.eval() + + with torch.no_grad(): + # Warmup pass when using mps (see #372) + if torch_device == "mps" and isinstance(model, ModelMixin): + model(**self.dummy_input) + + first = model(**inputs_dict) + if isinstance(first, dict): + first = first.sample + + second = model(**inputs_dict) + if isinstance(second, dict): + second = second.sample + + out_1 = first.cpu().numpy() + out_2 = second.cpu().numpy() + out_1 = out_1[~np.isnan(out_1)] + out_2 = out_2[~np.isnan(out_2)] + max_diff = np.amax(np.abs(out_1 - out_2)) + self.assertLessEqual(max_diff, 1e-5) + + def test_model_attention_slicing(self): + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + + init_dict["block_out_channels"] = (16, 32) + init_dict["attention_head_dim"] = 8 + + model = self.model_class(**init_dict) + model.to(torch_device) + model.eval() + + model.set_attention_slice("auto") + with torch.no_grad(): + output = model(**inputs_dict) + assert output is not None + + model.set_attention_slice("max") + with torch.no_grad(): + output = model(**inputs_dict) + assert output is not None + + model.set_attention_slice(2) + with torch.no_grad(): + output = model(**inputs_dict) + assert output is not None + + def test_feed_forward_chunking(self): + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + init_dict["block_out_channels"] = (32, 64) + init_dict["norm_num_groups"] = 32 + + model = self.model_class(**init_dict) + model.to(torch_device) + model.eval() + + with torch.no_grad(): + output = model(**inputs_dict)[0] + + model.enable_forward_chunking() + with torch.no_grad(): + output_2 = model(**inputs_dict)[0] + + self.assertEqual(output.shape, output_2.shape, "Shape doesn't match") + assert np.abs(output.cpu() - output_2.cpu()).max() < 1e-2 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/models/unets/test_models_unet_controlnetxs.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/unets/test_models_unet_controlnetxs.py new file mode 100644 index 0000000000000000000000000000000000000000..40773536df7017382b695c39c4793964e72cf5fc --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/unets/test_models_unet_controlnetxs.py @@ -0,0 +1,326 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import numpy as np +import torch +from torch import nn + +from diffusers import ControlNetXSAdapter, UNet2DConditionModel, UNetControlNetXSModel +from diffusers.utils import logging + +from ...testing_utils import enable_full_determinism, floats_tensor, is_flaky, torch_device +from ..test_modeling_common import ModelTesterMixin, UNetTesterMixin + + +logger = logging.get_logger(__name__) + +enable_full_determinism() + + +class UNetControlNetXSModelTests(ModelTesterMixin, UNetTesterMixin, unittest.TestCase): + model_class = UNetControlNetXSModel + main_input_name = "sample" + + @property + def dummy_input(self): + batch_size = 4 + num_channels = 4 + sizes = (16, 16) + conditioning_image_size = (3, 32, 32) # size of additional, unprocessed image for control-conditioning + + noise = floats_tensor((batch_size, num_channels) + sizes).to(torch_device) + time_step = torch.tensor([10]).to(torch_device) + encoder_hidden_states = floats_tensor((batch_size, 4, 8)).to(torch_device) + controlnet_cond = floats_tensor((batch_size, *conditioning_image_size)).to(torch_device) + conditioning_scale = 1 + + return { + "sample": noise, + "timestep": time_step, + "encoder_hidden_states": encoder_hidden_states, + "controlnet_cond": controlnet_cond, + "conditioning_scale": conditioning_scale, + } + + @property + def input_shape(self): + return (4, 16, 16) + + @property + def output_shape(self): + return (4, 16, 16) + + def prepare_init_args_and_inputs_for_common(self): + init_dict = { + "sample_size": 16, + "down_block_types": ("DownBlock2D", "CrossAttnDownBlock2D"), + "up_block_types": ("CrossAttnUpBlock2D", "UpBlock2D"), + "block_out_channels": (4, 8), + "cross_attention_dim": 8, + "transformer_layers_per_block": 1, + "num_attention_heads": 2, + "norm_num_groups": 4, + "upcast_attention": False, + "ctrl_block_out_channels": [2, 4], + "ctrl_num_attention_heads": 4, + "ctrl_max_norm_num_groups": 2, + "ctrl_conditioning_embedding_out_channels": (2, 2), + } + inputs_dict = self.dummy_input + return init_dict, inputs_dict + + def get_dummy_unet(self): + """For some tests we also need the underlying UNet. For these, we'll build the UNetControlNetXSModel from the UNet and ControlNetXS-Adapter""" + return UNet2DConditionModel( + block_out_channels=(4, 8), + layers_per_block=2, + sample_size=16, + in_channels=4, + out_channels=4, + down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), + up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), + cross_attention_dim=8, + norm_num_groups=4, + use_linear_projection=True, + ) + + def get_dummy_controlnet_from_unet(self, unet, **kwargs): + """For some tests we also need the underlying ControlNetXS-Adapter. For these, we'll build the UNetControlNetXSModel from the UNet and ControlNetXS-Adapter""" + # size_ratio and conditioning_embedding_out_channels chosen to keep model small + return ControlNetXSAdapter.from_unet(unet, size_ratio=1, conditioning_embedding_out_channels=(2, 2), **kwargs) + + def test_from_unet(self): + unet = self.get_dummy_unet() + controlnet = self.get_dummy_controlnet_from_unet(unet) + + model = UNetControlNetXSModel.from_unet(unet, controlnet) + model_state_dict = model.state_dict() + + def assert_equal_weights(module, weight_dict_prefix): + for param_name, param_value in module.named_parameters(): + assert torch.equal(model_state_dict[weight_dict_prefix + "." + param_name], param_value) + + # # check unet + # everything expect down,mid,up blocks + modules_from_unet = [ + "time_embedding", + "conv_in", + "conv_norm_out", + "conv_out", + ] + for p in modules_from_unet: + assert_equal_weights(getattr(unet, p), "base_" + p) + optional_modules_from_unet = [ + "class_embedding", + "add_time_proj", + "add_embedding", + ] + for p in optional_modules_from_unet: + if hasattr(unet, p) and getattr(unet, p) is not None: + assert_equal_weights(getattr(unet, p), "base_" + p) + # down blocks + assert len(unet.down_blocks) == len(model.down_blocks) + for i, d in enumerate(unet.down_blocks): + assert_equal_weights(d.resnets, f"down_blocks.{i}.base_resnets") + if hasattr(d, "attentions"): + assert_equal_weights(d.attentions, f"down_blocks.{i}.base_attentions") + if hasattr(d, "downsamplers") and getattr(d, "downsamplers") is not None: + assert_equal_weights(d.downsamplers[0], f"down_blocks.{i}.base_downsamplers") + # mid block + assert_equal_weights(unet.mid_block, "mid_block.base_midblock") + # up blocks + assert len(unet.up_blocks) == len(model.up_blocks) + for i, u in enumerate(unet.up_blocks): + assert_equal_weights(u.resnets, f"up_blocks.{i}.resnets") + if hasattr(u, "attentions"): + assert_equal_weights(u.attentions, f"up_blocks.{i}.attentions") + if hasattr(u, "upsamplers") and getattr(u, "upsamplers") is not None: + assert_equal_weights(u.upsamplers[0], f"up_blocks.{i}.upsamplers") + + # # check controlnet + # everything expect down,mid,up blocks + modules_from_controlnet = { + "controlnet_cond_embedding": "controlnet_cond_embedding", + "conv_in": "ctrl_conv_in", + "control_to_base_for_conv_in": "control_to_base_for_conv_in", + } + optional_modules_from_controlnet = {"time_embedding": "ctrl_time_embedding"} + for name_in_controlnet, name_in_unetcnxs in modules_from_controlnet.items(): + assert_equal_weights(getattr(controlnet, name_in_controlnet), name_in_unetcnxs) + + for name_in_controlnet, name_in_unetcnxs in optional_modules_from_controlnet.items(): + if hasattr(controlnet, name_in_controlnet) and getattr(controlnet, name_in_controlnet) is not None: + assert_equal_weights(getattr(controlnet, name_in_controlnet), name_in_unetcnxs) + # down blocks + assert len(controlnet.down_blocks) == len(model.down_blocks) + for i, d in enumerate(controlnet.down_blocks): + assert_equal_weights(d.resnets, f"down_blocks.{i}.ctrl_resnets") + assert_equal_weights(d.base_to_ctrl, f"down_blocks.{i}.base_to_ctrl") + assert_equal_weights(d.ctrl_to_base, f"down_blocks.{i}.ctrl_to_base") + if d.attentions is not None: + assert_equal_weights(d.attentions, f"down_blocks.{i}.ctrl_attentions") + if d.downsamplers is not None: + assert_equal_weights(d.downsamplers, f"down_blocks.{i}.ctrl_downsamplers") + # mid block + assert_equal_weights(controlnet.mid_block.base_to_ctrl, "mid_block.base_to_ctrl") + assert_equal_weights(controlnet.mid_block.midblock, "mid_block.ctrl_midblock") + assert_equal_weights(controlnet.mid_block.ctrl_to_base, "mid_block.ctrl_to_base") + # up blocks + assert len(controlnet.up_connections) == len(model.up_blocks) + for i, u in enumerate(controlnet.up_connections): + assert_equal_weights(u.ctrl_to_base, f"up_blocks.{i}.ctrl_to_base") + + def test_freeze_unet(self): + def assert_frozen(module): + for p in module.parameters(): + assert not p.requires_grad + + def assert_unfrozen(module): + for p in module.parameters(): + assert p.requires_grad + + init_dict, _ = self.prepare_init_args_and_inputs_for_common() + model = UNetControlNetXSModel(**init_dict) + model.freeze_unet_params() + + # # check unet + # everything expect down,mid,up blocks + modules_from_unet = [ + model.base_time_embedding, + model.base_conv_in, + model.base_conv_norm_out, + model.base_conv_out, + ] + for m in modules_from_unet: + assert_frozen(m) + + optional_modules_from_unet = [ + model.base_add_time_proj, + model.base_add_embedding, + ] + for m in optional_modules_from_unet: + if m is not None: + assert_frozen(m) + + # down blocks + for i, d in enumerate(model.down_blocks): + assert_frozen(d.base_resnets) + if isinstance(d.base_attentions, nn.ModuleList): # attentions can be list of Nones + assert_frozen(d.base_attentions) + if d.base_downsamplers is not None: + assert_frozen(d.base_downsamplers) + + # mid block + assert_frozen(model.mid_block.base_midblock) + + # up blocks + for i, u in enumerate(model.up_blocks): + assert_frozen(u.resnets) + if isinstance(u.attentions, nn.ModuleList): # attentions can be list of Nones + assert_frozen(u.attentions) + if u.upsamplers is not None: + assert_frozen(u.upsamplers) + + # # check controlnet + # everything expect down,mid,up blocks + modules_from_controlnet = [ + model.controlnet_cond_embedding, + model.ctrl_conv_in, + model.control_to_base_for_conv_in, + ] + optional_modules_from_controlnet = [model.ctrl_time_embedding] + + for m in modules_from_controlnet: + assert_unfrozen(m) + for m in optional_modules_from_controlnet: + if m is not None: + assert_unfrozen(m) + + # down blocks + for d in model.down_blocks: + assert_unfrozen(d.ctrl_resnets) + assert_unfrozen(d.base_to_ctrl) + assert_unfrozen(d.ctrl_to_base) + if isinstance(d.ctrl_attentions, nn.ModuleList): # attentions can be list of Nones + assert_unfrozen(d.ctrl_attentions) + if d.ctrl_downsamplers is not None: + assert_unfrozen(d.ctrl_downsamplers) + # mid block + assert_unfrozen(model.mid_block.base_to_ctrl) + assert_unfrozen(model.mid_block.ctrl_midblock) + assert_unfrozen(model.mid_block.ctrl_to_base) + # up blocks + for u in model.up_blocks: + assert_unfrozen(u.ctrl_to_base) + + def test_gradient_checkpointing_is_applied(self): + expected_set = { + "Transformer2DModel", + "UNetMidBlock2DCrossAttn", + "ControlNetXSCrossAttnDownBlock2D", + "ControlNetXSCrossAttnMidBlock2D", + "ControlNetXSCrossAttnUpBlock2D", + } + super().test_gradient_checkpointing_is_applied(expected_set=expected_set) + + @is_flaky + def test_forward_no_control(self): + unet = self.get_dummy_unet() + controlnet = self.get_dummy_controlnet_from_unet(unet) + + model = UNetControlNetXSModel.from_unet(unet, controlnet) + + unet = unet.to(torch_device) + model = model.to(torch_device) + + input_ = self.dummy_input + + control_specific_input = ["controlnet_cond", "conditioning_scale"] + input_for_unet = {k: v for k, v in input_.items() if k not in control_specific_input} + + with torch.no_grad(): + unet_output = unet(**input_for_unet).sample.cpu() + unet_controlnet_output = model(**input_, apply_control=False).sample.cpu() + + assert np.abs(unet_output.flatten() - unet_controlnet_output.flatten()).max() < 3e-4 + + def test_time_embedding_mixing(self): + unet = self.get_dummy_unet() + controlnet = self.get_dummy_controlnet_from_unet(unet) + controlnet_mix_time = self.get_dummy_controlnet_from_unet( + unet, time_embedding_mix=0.5, learn_time_embedding=True + ) + + model = UNetControlNetXSModel.from_unet(unet, controlnet) + model_mix_time = UNetControlNetXSModel.from_unet(unet, controlnet_mix_time) + + unet = unet.to(torch_device) + model = model.to(torch_device) + model_mix_time = model_mix_time.to(torch_device) + + input_ = self.dummy_input + + with torch.no_grad(): + output = model(**input_).sample + output_mix_time = model_mix_time(**input_).sample + + assert output.shape == output_mix_time.shape + + @unittest.skip("Test not supported.") + def test_forward_with_norm_groups(self): + # UNetControlNetXSModel currently only supports StableDiffusion and StableDiffusion-XL, both of which have norm_num_groups fixed at 32. So we don't need to test different values for norm_num_groups. + pass diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/models/unets/test_models_unet_motion.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/unets/test_models_unet_motion.py new file mode 100644 index 0000000000000000000000000000000000000000..d931b345fd09fac52a4c3cf8f48bea88df19e182 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/unets/test_models_unet_motion.py @@ -0,0 +1,319 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy +import os +import tempfile +import unittest + +import numpy as np +import torch + +from diffusers import MotionAdapter, UNet2DConditionModel, UNetMotionModel +from diffusers.utils import logging +from diffusers.utils.import_utils import is_xformers_available + +from ...testing_utils import ( + enable_full_determinism, + floats_tensor, + torch_device, +) +from ..test_modeling_common import ModelTesterMixin, UNetTesterMixin + + +logger = logging.get_logger(__name__) + +enable_full_determinism() + + +class UNetMotionModelTests(ModelTesterMixin, UNetTesterMixin, unittest.TestCase): + model_class = UNetMotionModel + main_input_name = "sample" + + @property + def dummy_input(self): + batch_size = 4 + num_channels = 4 + num_frames = 4 + sizes = (16, 16) + + noise = floats_tensor((batch_size, num_channels, num_frames) + sizes).to(torch_device) + time_step = torch.tensor([10]).to(torch_device) + encoder_hidden_states = floats_tensor((batch_size * num_frames, 4, 16)).to(torch_device) + + return {"sample": noise, "timestep": time_step, "encoder_hidden_states": encoder_hidden_states} + + @property + def input_shape(self): + return (4, 4, 16, 16) + + @property + def output_shape(self): + return (4, 4, 16, 16) + + def prepare_init_args_and_inputs_for_common(self): + init_dict = { + "block_out_channels": (16, 32), + "norm_num_groups": 16, + "down_block_types": ("CrossAttnDownBlockMotion", "DownBlockMotion"), + "up_block_types": ("UpBlockMotion", "CrossAttnUpBlockMotion"), + "cross_attention_dim": 16, + "num_attention_heads": 2, + "out_channels": 4, + "in_channels": 4, + "layers_per_block": 1, + "sample_size": 16, + } + inputs_dict = self.dummy_input + return init_dict, inputs_dict + + def test_from_unet2d(self): + torch.manual_seed(0) + unet2d = UNet2DConditionModel() + + torch.manual_seed(1) + model = self.model_class.from_unet2d(unet2d) + model_state_dict = model.state_dict() + + for param_name, param_value in unet2d.named_parameters(): + self.assertTrue(torch.equal(model_state_dict[param_name], param_value)) + + def test_freeze_unet2d(self): + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + model = self.model_class(**init_dict) + model.freeze_unet2d_params() + + for param_name, param_value in model.named_parameters(): + if "motion_modules" not in param_name: + self.assertFalse(param_value.requires_grad) + + else: + self.assertTrue(param_value.requires_grad) + + def test_loading_motion_adapter(self): + model = self.model_class() + adapter = MotionAdapter() + model.load_motion_modules(adapter) + + for idx, down_block in enumerate(model.down_blocks): + adapter_state_dict = adapter.down_blocks[idx].motion_modules.state_dict() + for param_name, param_value in down_block.motion_modules.named_parameters(): + self.assertTrue(torch.equal(adapter_state_dict[param_name], param_value)) + + for idx, up_block in enumerate(model.up_blocks): + adapter_state_dict = adapter.up_blocks[idx].motion_modules.state_dict() + for param_name, param_value in up_block.motion_modules.named_parameters(): + self.assertTrue(torch.equal(adapter_state_dict[param_name], param_value)) + + mid_block_adapter_state_dict = adapter.mid_block.motion_modules.state_dict() + for param_name, param_value in model.mid_block.motion_modules.named_parameters(): + self.assertTrue(torch.equal(mid_block_adapter_state_dict[param_name], param_value)) + + def test_saving_motion_modules(self): + torch.manual_seed(0) + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + model = self.model_class(**init_dict) + model.to(torch_device) + + with tempfile.TemporaryDirectory() as tmpdirname: + model.save_motion_modules(tmpdirname) + self.assertTrue(os.path.isfile(os.path.join(tmpdirname, "diffusion_pytorch_model.safetensors"))) + + adapter_loaded = MotionAdapter.from_pretrained(tmpdirname) + + torch.manual_seed(0) + model_loaded = self.model_class(**init_dict) + model_loaded.load_motion_modules(adapter_loaded) + model_loaded.to(torch_device) + + with torch.no_grad(): + output = model(**inputs_dict)[0] + output_loaded = model_loaded(**inputs_dict)[0] + + max_diff = (output - output_loaded).abs().max().item() + self.assertLessEqual(max_diff, 1e-4, "Models give different forward passes") + + @unittest.skipIf( + torch_device != "cuda" or not is_xformers_available(), + reason="XFormers attention is only available with CUDA and `xformers` installed", + ) + def test_xformers_enable_works(self): + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + model = self.model_class(**init_dict) + + model.enable_xformers_memory_efficient_attention() + + assert ( + model.mid_block.attentions[0].transformer_blocks[0].attn1.processor.__class__.__name__ + == "XFormersAttnProcessor" + ), "xformers is not enabled" + + def test_gradient_checkpointing_is_applied(self): + expected_set = { + "CrossAttnUpBlockMotion", + "CrossAttnDownBlockMotion", + "UNetMidBlockCrossAttnMotion", + "UpBlockMotion", + "Transformer2DModel", + "DownBlockMotion", + } + super().test_gradient_checkpointing_is_applied(expected_set=expected_set) + + def test_feed_forward_chunking(self): + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + init_dict["block_out_channels"] = (32, 64) + init_dict["norm_num_groups"] = 32 + + model = self.model_class(**init_dict) + model.to(torch_device) + model.eval() + + with torch.no_grad(): + output = model(**inputs_dict)[0] + + model.enable_forward_chunking() + with torch.no_grad(): + output_2 = model(**inputs_dict)[0] + + self.assertEqual(output.shape, output_2.shape, "Shape doesn't match") + assert np.abs(output.cpu() - output_2.cpu()).max() < 1e-2 + + def test_pickle(self): + # enable deterministic behavior for gradient checkpointing + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + model = self.model_class(**init_dict) + model.to(torch_device) + + with torch.no_grad(): + sample = model(**inputs_dict).sample + + sample_copy = copy.copy(sample) + + assert (sample - sample_copy).abs().max() < 1e-4 + + def test_from_save_pretrained(self, expected_max_diff=5e-5): + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + + torch.manual_seed(0) + model = self.model_class(**init_dict) + model.to(torch_device) + model.eval() + + with tempfile.TemporaryDirectory() as tmpdirname: + model.save_pretrained(tmpdirname, safe_serialization=False) + torch.manual_seed(0) + new_model = self.model_class.from_pretrained(tmpdirname) + new_model.to(torch_device) + + with torch.no_grad(): + image = model(**inputs_dict) + if isinstance(image, dict): + image = image.to_tuple()[0] + + new_image = new_model(**inputs_dict) + + if isinstance(new_image, dict): + new_image = new_image.to_tuple()[0] + + max_diff = (image - new_image).abs().max().item() + self.assertLessEqual(max_diff, expected_max_diff, "Models give different forward passes") + + def test_from_save_pretrained_variant(self, expected_max_diff=5e-5): + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + + torch.manual_seed(0) + model = self.model_class(**init_dict) + model.to(torch_device) + model.eval() + + with tempfile.TemporaryDirectory() as tmpdirname: + model.save_pretrained(tmpdirname, variant="fp16", safe_serialization=False) + + torch.manual_seed(0) + new_model = self.model_class.from_pretrained(tmpdirname, variant="fp16") + # non-variant cannot be loaded + with self.assertRaises(OSError) as error_context: + self.model_class.from_pretrained(tmpdirname) + + # make sure that error message states what keys are missing + assert "Error no file named diffusion_pytorch_model.bin found in directory" in str(error_context.exception) + + new_model.to(torch_device) + + with torch.no_grad(): + image = model(**inputs_dict) + if isinstance(image, dict): + image = image.to_tuple()[0] + + new_image = new_model(**inputs_dict) + + if isinstance(new_image, dict): + new_image = new_image.to_tuple()[0] + + max_diff = (image - new_image).abs().max().item() + self.assertLessEqual(max_diff, expected_max_diff, "Models give different forward passes") + + def test_forward_with_norm_groups(self): + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + + init_dict["norm_num_groups"] = 16 + init_dict["block_out_channels"] = (16, 32) + + model = self.model_class(**init_dict) + model.to(torch_device) + model.eval() + + with torch.no_grad(): + output = model(**inputs_dict) + + if isinstance(output, dict): + output = output.to_tuple()[0] + + self.assertIsNotNone(output) + expected_shape = inputs_dict["sample"].shape + self.assertEqual(output.shape, expected_shape, "Input and output shapes do not match") + + def test_asymmetric_motion_model(self): + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + + init_dict["layers_per_block"] = (2, 3) + init_dict["transformer_layers_per_block"] = ((1, 2), (3, 4, 5)) + init_dict["reverse_transformer_layers_per_block"] = ((7, 6, 7, 4), (4, 2, 2)) + + init_dict["temporal_transformer_layers_per_block"] = ((2, 5), (2, 3, 5)) + init_dict["reverse_temporal_transformer_layers_per_block"] = ((5, 4, 3, 4), (3, 2, 2)) + + init_dict["num_attention_heads"] = (2, 4) + init_dict["motion_num_attention_heads"] = (4, 4) + init_dict["reverse_motion_num_attention_heads"] = (2, 2) + + init_dict["use_motion_mid_block"] = True + init_dict["mid_block_layers"] = 2 + init_dict["transformer_layers_per_mid_block"] = (1, 5) + init_dict["temporal_transformer_layers_per_mid_block"] = (2, 4) + + model = self.model_class(**init_dict) + model.to(torch_device) + model.eval() + + with torch.no_grad(): + output = model(**inputs_dict) + + if isinstance(output, dict): + output = output.to_tuple()[0] + + self.assertIsNotNone(output) + expected_shape = inputs_dict["sample"].shape + self.assertEqual(output.shape, expected_shape, "Input and output shapes do not match") diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/models/unets/test_models_unet_spatiotemporal.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/unets/test_models_unet_spatiotemporal.py new file mode 100644 index 0000000000000000000000000000000000000000..7df868c9e95bc0ae5d066523a9f30becb0a87984 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/unets/test_models_unet_spatiotemporal.py @@ -0,0 +1,227 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy +import unittest + +import torch + +from diffusers import UNetSpatioTemporalConditionModel +from diffusers.utils import logging +from diffusers.utils.import_utils import is_xformers_available + +from ...testing_utils import ( + enable_full_determinism, + floats_tensor, + skip_mps, + torch_device, +) +from ..test_modeling_common import ModelTesterMixin, UNetTesterMixin + + +logger = logging.get_logger(__name__) + +enable_full_determinism() + + +@skip_mps +class UNetSpatioTemporalConditionModelTests(ModelTesterMixin, UNetTesterMixin, unittest.TestCase): + model_class = UNetSpatioTemporalConditionModel + main_input_name = "sample" + + @property + def dummy_input(self): + batch_size = 2 + num_frames = 2 + num_channels = 4 + sizes = (32, 32) + + noise = floats_tensor((batch_size, num_frames, num_channels) + sizes).to(torch_device) + time_step = torch.tensor([10]).to(torch_device) + encoder_hidden_states = floats_tensor((batch_size, 1, 32)).to(torch_device) + + return { + "sample": noise, + "timestep": time_step, + "encoder_hidden_states": encoder_hidden_states, + "added_time_ids": self._get_add_time_ids(), + } + + @property + def input_shape(self): + return (2, 2, 4, 32, 32) + + @property + def output_shape(self): + return (4, 32, 32) + + @property + def fps(self): + return 6 + + @property + def motion_bucket_id(self): + return 127 + + @property + def noise_aug_strength(self): + return 0.02 + + @property + def addition_time_embed_dim(self): + return 32 + + def prepare_init_args_and_inputs_for_common(self): + init_dict = { + "block_out_channels": (32, 64), + "down_block_types": ( + "CrossAttnDownBlockSpatioTemporal", + "DownBlockSpatioTemporal", + ), + "up_block_types": ( + "UpBlockSpatioTemporal", + "CrossAttnUpBlockSpatioTemporal", + ), + "cross_attention_dim": 32, + "num_attention_heads": 8, + "out_channels": 4, + "in_channels": 4, + "layers_per_block": 2, + "sample_size": 32, + "projection_class_embeddings_input_dim": self.addition_time_embed_dim * 3, + "addition_time_embed_dim": self.addition_time_embed_dim, + } + inputs_dict = self.dummy_input + return init_dict, inputs_dict + + def _get_add_time_ids(self, do_classifier_free_guidance=True): + add_time_ids = [self.fps, self.motion_bucket_id, self.noise_aug_strength] + + passed_add_embed_dim = self.addition_time_embed_dim * len(add_time_ids) + expected_add_embed_dim = self.addition_time_embed_dim * 3 + + if expected_add_embed_dim != passed_add_embed_dim: + raise ValueError( + f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`." + ) + + add_time_ids = torch.tensor([add_time_ids], device=torch_device) + add_time_ids = add_time_ids.repeat(1, 1) + if do_classifier_free_guidance: + add_time_ids = torch.cat([add_time_ids, add_time_ids]) + + return add_time_ids + + @unittest.skip("Number of Norm Groups is not configurable") + def test_forward_with_norm_groups(self): + pass + + @unittest.skip("Deprecated functionality") + def test_model_attention_slicing(self): + pass + + @unittest.skip("Not supported") + def test_model_with_use_linear_projection(self): + pass + + @unittest.skip("Not supported") + def test_model_with_simple_projection(self): + pass + + @unittest.skip("Not supported") + def test_model_with_class_embeddings_concat(self): + pass + + @unittest.skipIf( + torch_device != "cuda" or not is_xformers_available(), + reason="XFormers attention is only available with CUDA and `xformers` installed", + ) + def test_xformers_enable_works(self): + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + model = self.model_class(**init_dict) + + model.enable_xformers_memory_efficient_attention() + + assert ( + model.mid_block.attentions[0].transformer_blocks[0].attn1.processor.__class__.__name__ + == "XFormersAttnProcessor" + ), "xformers is not enabled" + + def test_model_with_num_attention_heads_tuple(self): + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + + init_dict["num_attention_heads"] = (8, 16) + model = self.model_class(**init_dict) + model.to(torch_device) + model.eval() + + with torch.no_grad(): + output = model(**inputs_dict) + + if isinstance(output, dict): + output = output.sample + + self.assertIsNotNone(output) + expected_shape = inputs_dict["sample"].shape + self.assertEqual(output.shape, expected_shape, "Input and output shapes do not match") + + def test_model_with_cross_attention_dim_tuple(self): + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + + init_dict["cross_attention_dim"] = (32, 32) + + model = self.model_class(**init_dict) + model.to(torch_device) + model.eval() + + with torch.no_grad(): + output = model(**inputs_dict) + + if isinstance(output, dict): + output = output.sample + + self.assertIsNotNone(output) + expected_shape = inputs_dict["sample"].shape + self.assertEqual(output.shape, expected_shape, "Input and output shapes do not match") + + def test_gradient_checkpointing_is_applied(self): + expected_set = { + "TransformerSpatioTemporalModel", + "CrossAttnDownBlockSpatioTemporal", + "DownBlockSpatioTemporal", + "UpBlockSpatioTemporal", + "CrossAttnUpBlockSpatioTemporal", + "UNetMidBlockSpatioTemporal", + } + num_attention_heads = (8, 16) + super().test_gradient_checkpointing_is_applied( + expected_set=expected_set, num_attention_heads=num_attention_heads + ) + + def test_pickle(self): + # enable deterministic behavior for gradient checkpointing + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + + init_dict["num_attention_heads"] = (8, 16) + + model = self.model_class(**init_dict) + model.to(torch_device) + + with torch.no_grad(): + sample = model(**inputs_dict).sample + + sample_copy = copy.copy(sample) + + assert (sample - sample_copy).abs().max() < 1e-4 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/models/unets/test_unet_2d_blocks.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/unets/test_unet_2d_blocks.py new file mode 100644 index 0000000000000000000000000000000000000000..5c006963e30c9300cdd8944837ef6b394f518053 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/unets/test_unet_2d_blocks.py @@ -0,0 +1,337 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import unittest + +from diffusers.models.unets.unet_2d_blocks import * # noqa F403 + +from ...testing_utils import torch_device +from .test_unet_blocks_common import UNetBlockTesterMixin + + +class DownBlock2DTests(UNetBlockTesterMixin, unittest.TestCase): + block_class = DownBlock2D # noqa F405 + block_type = "down" + + def test_output(self): + expected_slice = [-0.0232, -0.9869, 0.8054, -0.0637, -0.1688, -1.4264, 0.4470, -1.3394, 0.0904] + super().test_output(expected_slice) + + +class ResnetDownsampleBlock2DTests(UNetBlockTesterMixin, unittest.TestCase): + block_class = ResnetDownsampleBlock2D # noqa F405 + block_type = "down" + + def test_output(self): + expected_slice = [0.0710, 0.2410, -0.7320, -1.0757, -1.1343, 0.3540, -0.0133, -0.2576, 0.0948] + super().test_output(expected_slice) + + +class AttnDownBlock2DTests(UNetBlockTesterMixin, unittest.TestCase): + block_class = AttnDownBlock2D # noqa F405 + block_type = "down" + + def test_output(self): + expected_slice = [0.0636, 0.8964, -0.6234, -1.0131, 0.0844, 0.4935, 0.3437, 0.0911, -0.2957] + super().test_output(expected_slice) + + +class CrossAttnDownBlock2DTests(UNetBlockTesterMixin, unittest.TestCase): + block_class = CrossAttnDownBlock2D # noqa F405 + block_type = "down" + + def prepare_init_args_and_inputs_for_common(self): + init_dict, inputs_dict = super().prepare_init_args_and_inputs_for_common() + init_dict["cross_attention_dim"] = 32 + return init_dict, inputs_dict + + def test_output(self): + expected_slice = [0.2238, -0.7396, -0.2255, -0.3829, 0.1925, 1.1665, 0.0603, -0.7295, 0.1983] + super().test_output(expected_slice) + + +class SimpleCrossAttnDownBlock2DTests(UNetBlockTesterMixin, unittest.TestCase): + block_class = SimpleCrossAttnDownBlock2D # noqa F405 + block_type = "down" + + @property + def dummy_input(self): + return super().get_dummy_input(include_encoder_hidden_states=True) + + def prepare_init_args_and_inputs_for_common(self): + init_dict, inputs_dict = super().prepare_init_args_and_inputs_for_common() + init_dict["cross_attention_dim"] = 32 + return init_dict, inputs_dict + + @unittest.skipIf(torch_device == "mps", "MPS result is not consistent") + def test_output(self): + expected_slice = [0.7921, -0.0992, -0.1962, -0.7695, -0.4242, 0.7804, 0.4737, 0.2765, 0.3338] + super().test_output(expected_slice) + + +class SkipDownBlock2DTests(UNetBlockTesterMixin, unittest.TestCase): + block_class = SkipDownBlock2D # noqa F405 + block_type = "down" + + @property + def dummy_input(self): + return super().get_dummy_input(include_skip_sample=True) + + def test_output(self): + expected_slice = [-0.0845, -0.2087, -0.2465, 0.0971, 0.1900, -0.0484, 0.2664, 0.4179, 0.5069] + super().test_output(expected_slice) + + +class AttnSkipDownBlock2DTests(UNetBlockTesterMixin, unittest.TestCase): + block_class = AttnSkipDownBlock2D # noqa F405 + block_type = "down" + + @property + def dummy_input(self): + return super().get_dummy_input(include_skip_sample=True) + + def test_output(self): + expected_slice = [0.5539, 0.1609, 0.4924, 0.0537, -0.1995, 0.4050, 0.0979, -0.2721, -0.0642] + super().test_output(expected_slice) + + +class DownEncoderBlock2DTests(UNetBlockTesterMixin, unittest.TestCase): + block_class = DownEncoderBlock2D # noqa F405 + block_type = "down" + + @property + def dummy_input(self): + return super().get_dummy_input(include_temb=False) + + def prepare_init_args_and_inputs_for_common(self): + init_dict = { + "in_channels": 32, + "out_channels": 32, + } + inputs_dict = self.dummy_input + return init_dict, inputs_dict + + def test_output(self): + expected_slice = [1.1102, 0.5302, 0.4872, -0.0023, -0.8042, 0.0483, -0.3489, -0.5632, 0.7626] + super().test_output(expected_slice) + + +class AttnDownEncoderBlock2DTests(UNetBlockTesterMixin, unittest.TestCase): + block_class = AttnDownEncoderBlock2D # noqa F405 + block_type = "down" + + @property + def dummy_input(self): + return super().get_dummy_input(include_temb=False) + + def prepare_init_args_and_inputs_for_common(self): + init_dict = { + "in_channels": 32, + "out_channels": 32, + } + inputs_dict = self.dummy_input + return init_dict, inputs_dict + + def test_output(self): + expected_slice = [0.8966, -0.1486, 0.8568, 0.8141, -0.9046, -0.1342, -0.0972, -0.7417, 0.1538] + super().test_output(expected_slice) + + +class UNetMidBlock2DTests(UNetBlockTesterMixin, unittest.TestCase): + block_class = UNetMidBlock2D # noqa F405 + block_type = "mid" + + def prepare_init_args_and_inputs_for_common(self): + init_dict = { + "in_channels": 32, + "temb_channels": 128, + } + inputs_dict = self.dummy_input + return init_dict, inputs_dict + + def test_output(self): + expected_slice = [-0.1062, 1.7248, 0.3494, 1.4569, -0.0910, -1.2421, -0.9984, 0.6736, 1.0028] + super().test_output(expected_slice) + + +class UNetMidBlock2DCrossAttnTests(UNetBlockTesterMixin, unittest.TestCase): + block_class = UNetMidBlock2DCrossAttn # noqa F405 + block_type = "mid" + + def prepare_init_args_and_inputs_for_common(self): + init_dict, inputs_dict = super().prepare_init_args_and_inputs_for_common() + init_dict["cross_attention_dim"] = 32 + return init_dict, inputs_dict + + def test_output(self): + expected_slice = [0.0187, 2.4220, 0.4484, 1.1203, -0.6121, -1.5122, -0.8270, 0.7851, 1.8335] + super().test_output(expected_slice) + + +class UNetMidBlock2DSimpleCrossAttnTests(UNetBlockTesterMixin, unittest.TestCase): + block_class = UNetMidBlock2DSimpleCrossAttn # noqa F405 + block_type = "mid" + + @property + def dummy_input(self): + return super().get_dummy_input(include_encoder_hidden_states=True) + + def prepare_init_args_and_inputs_for_common(self): + init_dict, inputs_dict = super().prepare_init_args_and_inputs_for_common() + init_dict["cross_attention_dim"] = 32 + return init_dict, inputs_dict + + def test_output(self): + expected_slice = [0.7143, 1.9974, 0.5448, 1.3977, 0.1282, -1.1237, -1.4238, 0.5530, 0.8880] + super().test_output(expected_slice) + + +class UpBlock2DTests(UNetBlockTesterMixin, unittest.TestCase): + block_class = UpBlock2D # noqa F405 + block_type = "up" + + @property + def dummy_input(self): + return super().get_dummy_input(include_res_hidden_states_tuple=True) + + def test_output(self): + expected_slice = [-0.2041, -0.4165, -0.3022, 0.0041, -0.6628, -0.7053, 0.1928, -0.0325, 0.0523] + super().test_output(expected_slice) + + +class ResnetUpsampleBlock2DTests(UNetBlockTesterMixin, unittest.TestCase): + block_class = ResnetUpsampleBlock2D # noqa F405 + block_type = "up" + + @property + def dummy_input(self): + return super().get_dummy_input(include_res_hidden_states_tuple=True) + + def test_output(self): + expected_slice = [0.2287, 0.3549, -0.1346, 0.4797, -0.1715, -0.9649, 0.7305, -0.5864, -0.6244] + super().test_output(expected_slice) + + +class CrossAttnUpBlock2DTests(UNetBlockTesterMixin, unittest.TestCase): + block_class = CrossAttnUpBlock2D # noqa F405 + block_type = "up" + + @property + def dummy_input(self): + return super().get_dummy_input(include_res_hidden_states_tuple=True) + + def prepare_init_args_and_inputs_for_common(self): + init_dict, inputs_dict = super().prepare_init_args_and_inputs_for_common() + init_dict["cross_attention_dim"] = 32 + return init_dict, inputs_dict + + def test_output(self): + expected_slice = [-0.1403, -0.3515, -0.0420, -0.1425, 0.3167, 0.5094, -0.2181, 0.5931, 0.5582] + super().test_output(expected_slice) + + +class SimpleCrossAttnUpBlock2DTests(UNetBlockTesterMixin, unittest.TestCase): + block_class = SimpleCrossAttnUpBlock2D # noqa F405 + block_type = "up" + + @property + def dummy_input(self): + return super().get_dummy_input(include_res_hidden_states_tuple=True, include_encoder_hidden_states=True) + + def prepare_init_args_and_inputs_for_common(self): + init_dict, inputs_dict = super().prepare_init_args_and_inputs_for_common() + init_dict["cross_attention_dim"] = 32 + return init_dict, inputs_dict + + def test_output(self): + expected_slice = [0.2645, 0.1480, 0.0909, 0.8044, -0.9758, -0.9083, 0.0994, -1.1453, -0.7402] + super().test_output(expected_slice) + + +class AttnUpBlock2DTests(UNetBlockTesterMixin, unittest.TestCase): + block_class = AttnUpBlock2D # noqa F405 + block_type = "up" + + @property + def dummy_input(self): + return super().get_dummy_input(include_res_hidden_states_tuple=True) + + @unittest.skipIf(torch_device == "mps", "MPS result is not consistent") + def test_output(self): + expected_slice = [0.0979, 0.1326, 0.0021, 0.0659, 0.2249, 0.0059, 0.1132, 0.5952, 0.1033] + super().test_output(expected_slice) + + +class SkipUpBlock2DTests(UNetBlockTesterMixin, unittest.TestCase): + block_class = SkipUpBlock2D # noqa F405 + block_type = "up" + + @property + def dummy_input(self): + return super().get_dummy_input(include_res_hidden_states_tuple=True) + + def test_output(self): + expected_slice = [-0.0893, -0.1234, -0.1506, -0.0332, 0.0123, -0.0211, 0.0566, 0.0143, 0.0362] + super().test_output(expected_slice) + + +class AttnSkipUpBlock2DTests(UNetBlockTesterMixin, unittest.TestCase): + block_class = AttnSkipUpBlock2D # noqa F405 + block_type = "up" + + @property + def dummy_input(self): + return super().get_dummy_input(include_res_hidden_states_tuple=True) + + def test_output(self): + expected_slice = [0.0361, 0.0617, 0.2787, -0.0350, 0.0342, 0.3421, -0.0843, 0.0913, 0.3015] + super().test_output(expected_slice) + + +class UpDecoderBlock2DTests(UNetBlockTesterMixin, unittest.TestCase): + block_class = UpDecoderBlock2D # noqa F405 + block_type = "up" + + @property + def dummy_input(self): + return super().get_dummy_input(include_temb=False) + + def prepare_init_args_and_inputs_for_common(self): + init_dict = {"in_channels": 32, "out_channels": 32} + + inputs_dict = self.dummy_input + return init_dict, inputs_dict + + def test_output(self): + expected_slice = [0.4404, 0.1998, -0.9886, -0.3320, -0.3128, -0.7034, -0.6955, -0.2338, -0.3137] + super().test_output(expected_slice) + + +class AttnUpDecoderBlock2DTests(UNetBlockTesterMixin, unittest.TestCase): + block_class = AttnUpDecoderBlock2D # noqa F405 + block_type = "up" + + @property + def dummy_input(self): + return super().get_dummy_input(include_temb=False) + + def prepare_init_args_and_inputs_for_common(self): + init_dict = {"in_channels": 32, "out_channels": 32} + + inputs_dict = self.dummy_input + return init_dict, inputs_dict + + def test_output(self): + expected_slice = [0.6738, 0.4491, 0.1055, 1.0710, 0.7316, 0.3339, 0.3352, 0.1023, 0.3568] + super().test_output(expected_slice) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/models/unets/test_unet_blocks_common.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/unets/test_unet_blocks_common.py new file mode 100644 index 0000000000000000000000000000000000000000..85f9bf8353bf2afae05c0a7fa5a94e92deba5ee3 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/models/unets/test_unet_blocks_common.py @@ -0,0 +1,127 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Tuple + +import torch + +from diffusers.utils.torch_utils import randn_tensor + +from ...testing_utils import ( + floats_tensor, + require_torch, + require_torch_accelerator_with_training, + torch_all_close, + torch_device, +) + + +@require_torch +class UNetBlockTesterMixin: + @property + def dummy_input(self): + return self.get_dummy_input() + + @property + def output_shape(self): + if self.block_type == "down": + return (4, 32, 16, 16) + elif self.block_type == "mid": + return (4, 32, 32, 32) + elif self.block_type == "up": + return (4, 32, 64, 64) + + raise ValueError(f"'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'.") + + def get_dummy_input( + self, + include_temb=True, + include_res_hidden_states_tuple=False, + include_encoder_hidden_states=False, + include_skip_sample=False, + ): + batch_size = 4 + num_channels = 32 + sizes = (32, 32) + + generator = torch.manual_seed(0) + device = torch.device(torch_device) + shape = (batch_size, num_channels) + sizes + hidden_states = randn_tensor(shape, generator=generator, device=device) + dummy_input = {"hidden_states": hidden_states} + + if include_temb: + temb_channels = 128 + dummy_input["temb"] = randn_tensor((batch_size, temb_channels), generator=generator, device=device) + + if include_res_hidden_states_tuple: + generator_1 = torch.manual_seed(1) + dummy_input["res_hidden_states_tuple"] = (randn_tensor(shape, generator=generator_1, device=device),) + + if include_encoder_hidden_states: + dummy_input["encoder_hidden_states"] = floats_tensor((batch_size, 32, 32)).to(torch_device) + + if include_skip_sample: + dummy_input["skip_sample"] = randn_tensor(((batch_size, 3) + sizes), generator=generator, device=device) + + return dummy_input + + def prepare_init_args_and_inputs_for_common(self): + init_dict = { + "in_channels": 32, + "out_channels": 32, + "temb_channels": 128, + } + if self.block_type == "up": + init_dict["prev_output_channel"] = 32 + + if self.block_type == "mid": + init_dict.pop("out_channels") + + inputs_dict = self.dummy_input + return init_dict, inputs_dict + + def test_output(self, expected_slice): + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + unet_block = self.block_class(**init_dict) + unet_block.to(torch_device) + unet_block.eval() + + with torch.no_grad(): + output = unet_block(**inputs_dict) + + if isinstance(output, Tuple): + output = output[0] + + self.assertEqual(output.shape, self.output_shape) + + output_slice = output[0, -1, -3:, -3:] + expected_slice = torch.tensor(expected_slice).to(torch_device) + assert torch_all_close(output_slice.flatten(), expected_slice, atol=5e-3) + + @require_torch_accelerator_with_training + def test_training(self): + init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() + model = self.block_class(**init_dict) + model.to(torch_device) + model.train() + output = model(**inputs_dict) + + if isinstance(output, Tuple): + output = output[0] + + device = torch.device(torch_device) + noise = randn_tensor(output.shape, device=device) + loss = torch.nn.functional.mse_loss(output, noise) + loss.backward() diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/modular_pipelines/__init__.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/modular_pipelines/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/modular_pipelines/stable_diffusion_xl/__init__.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/modular_pipelines/stable_diffusion_xl/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/modular_pipelines/stable_diffusion_xl/test_modular_pipeline_stable_diffusion_xl.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/modular_pipelines/stable_diffusion_xl/test_modular_pipeline_stable_diffusion_xl.py new file mode 100644 index 0000000000000000000000000000000000000000..d05f818135ab4a26e0db09d5ae42ec75412eb23c --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/modular_pipelines/stable_diffusion_xl/test_modular_pipeline_stable_diffusion_xl.py @@ -0,0 +1,462 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import random +import unittest +from typing import Any, Dict + +import numpy as np +import torch +from PIL import Image + +from diffusers import ( + ClassifierFreeGuidance, + StableDiffusionXLAutoBlocks, + StableDiffusionXLModularPipeline, +) +from diffusers.loaders import ModularIPAdapterMixin + +from ...models.unets.test_models_unet_2d_condition import ( + create_ip_adapter_state_dict, +) +from ...testing_utils import ( + enable_full_determinism, + floats_tensor, + torch_device, +) +from ..test_modular_pipelines_common import ( + ModularPipelineTesterMixin, +) + + +enable_full_determinism() + + +class SDXLModularTests: + """ + This mixin defines method to create pipeline, base input and base test across all SDXL modular tests. + """ + + pipeline_class = StableDiffusionXLModularPipeline + pipeline_blocks_class = StableDiffusionXLAutoBlocks + repo = "hf-internal-testing/tiny-sdxl-modular" + params = frozenset( + [ + "prompt", + "height", + "width", + "negative_prompt", + "cross_attention_kwargs", + "image", + "mask_image", + ] + ) + batch_params = frozenset(["prompt", "negative_prompt", "image", "mask_image"]) + + def get_pipeline(self, components_manager=None, torch_dtype=torch.float32): + pipeline = self.pipeline_blocks_class().init_pipeline(self.repo, components_manager=components_manager) + pipeline.load_components(torch_dtype=torch_dtype) + return pipeline + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + inputs = { + "prompt": "A painting of a squirrel eating a burger", + "generator": generator, + "num_inference_steps": 2, + "output_type": "np", + } + return inputs + + def _test_stable_diffusion_xl_euler(self, expected_image_shape, expected_slice, expected_max_diff=1e-2): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + sd_pipe = self.get_pipeline() + sd_pipe = sd_pipe.to(device) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + image = sd_pipe(**inputs, output="images") + image_slice = image[0, -3:, -3:, -1] + + assert image.shape == expected_image_shape + + assert np.abs(image_slice.flatten() - expected_slice).max() < expected_max_diff, ( + "Image Slice does not match expected slice" + ) + + +class SDXLModularIPAdapterTests: + """ + This mixin is designed to test IP Adapter. + """ + + def test_pipeline_inputs_and_blocks(self): + blocks = self.pipeline_blocks_class() + parameters = blocks.input_names + + assert issubclass(self.pipeline_class, ModularIPAdapterMixin) + assert "ip_adapter_image" in parameters, ( + "`ip_adapter_image` argument must be supported by the `__call__` method" + ) + assert "ip_adapter" in blocks.sub_blocks, "pipeline must contain an IPAdapter block" + + _ = blocks.sub_blocks.pop("ip_adapter") + parameters = blocks.input_names + assert "ip_adapter_image" not in parameters, ( + "`ip_adapter_image` argument must be removed from the `__call__` method" + ) + + def _get_dummy_image_embeds(self, cross_attention_dim: int = 32): + return torch.randn((1, 1, cross_attention_dim), device=torch_device) + + def _get_dummy_faceid_image_embeds(self, cross_attention_dim: int = 32): + return torch.randn((1, 1, 1, cross_attention_dim), device=torch_device) + + def _get_dummy_masks(self, input_size: int = 64): + _masks = torch.zeros((1, 1, input_size, input_size), device=torch_device) + _masks[0, :, :, : int(input_size / 2)] = 1 + return _masks + + def _modify_inputs_for_ip_adapter_test(self, inputs: Dict[str, Any]): + blocks = self.pipeline_blocks_class() + _ = blocks.sub_blocks.pop("ip_adapter") + parameters = blocks.input_names + if "image" in parameters and "strength" in parameters: + inputs["num_inference_steps"] = 4 + + inputs["output_type"] = "np" + return inputs + + def test_ip_adapter(self, expected_max_diff: float = 1e-4, expected_pipe_slice=None): + r"""Tests for IP-Adapter. + + The following scenarios are tested: + - Single IP-Adapter with scale=0 should produce same output as no IP-Adapter. + - Multi IP-Adapter with scale=0 should produce same output as no IP-Adapter. + - Single IP-Adapter with scale!=0 should produce different output compared to no IP-Adapter. + - Multi IP-Adapter with scale!=0 should produce different output compared to no IP-Adapter. + """ + # Raising the tolerance for this test when it's run on a CPU because we + # compare against static slices and that can be shaky (with a VVVV low probability). + expected_max_diff = 9e-4 if torch_device == "cpu" else expected_max_diff + + blocks = self.pipeline_blocks_class() + _ = blocks.sub_blocks.pop("ip_adapter") + pipe = blocks.init_pipeline(self.repo) + pipe.load_components(torch_dtype=torch.float32) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + cross_attention_dim = pipe.unet.config.get("cross_attention_dim") + + # forward pass without ip adapter + inputs = self._modify_inputs_for_ip_adapter_test(self.get_dummy_inputs(torch_device)) + if expected_pipe_slice is None: + output_without_adapter = pipe(**inputs, output="images") + else: + output_without_adapter = expected_pipe_slice + + # 1. Single IP-Adapter test cases + adapter_state_dict = create_ip_adapter_state_dict(pipe.unet) + pipe.unet._load_ip_adapter_weights(adapter_state_dict) + + # forward pass with single ip adapter, but scale=0 which should have no effect + inputs = self._modify_inputs_for_ip_adapter_test(self.get_dummy_inputs(torch_device)) + inputs["ip_adapter_embeds"] = [self._get_dummy_image_embeds(cross_attention_dim)] + inputs["negative_ip_adapter_embeds"] = [self._get_dummy_image_embeds(cross_attention_dim)] + pipe.set_ip_adapter_scale(0.0) + output_without_adapter_scale = pipe(**inputs, output="images") + if expected_pipe_slice is not None: + output_without_adapter_scale = output_without_adapter_scale[0, -3:, -3:, -1].flatten() + + # forward pass with single ip adapter, but with scale of adapter weights + inputs = self._modify_inputs_for_ip_adapter_test(self.get_dummy_inputs(torch_device)) + inputs["ip_adapter_embeds"] = [self._get_dummy_image_embeds(cross_attention_dim)] + inputs["negative_ip_adapter_embeds"] = [self._get_dummy_image_embeds(cross_attention_dim)] + pipe.set_ip_adapter_scale(42.0) + output_with_adapter_scale = pipe(**inputs, output="images") + if expected_pipe_slice is not None: + output_with_adapter_scale = output_with_adapter_scale[0, -3:, -3:, -1].flatten() + + max_diff_without_adapter_scale = np.abs(output_without_adapter_scale - output_without_adapter).max() + max_diff_with_adapter_scale = np.abs(output_with_adapter_scale - output_without_adapter).max() + + assert max_diff_without_adapter_scale < expected_max_diff, ( + "Output without ip-adapter must be same as normal inference" + ) + assert max_diff_with_adapter_scale > 1e-2, "Output with ip-adapter must be different from normal inference" + + # 2. Multi IP-Adapter test cases + adapter_state_dict_1 = create_ip_adapter_state_dict(pipe.unet) + adapter_state_dict_2 = create_ip_adapter_state_dict(pipe.unet) + pipe.unet._load_ip_adapter_weights([adapter_state_dict_1, adapter_state_dict_2]) + + # forward pass with multi ip adapter, but scale=0 which should have no effect + inputs = self._modify_inputs_for_ip_adapter_test(self.get_dummy_inputs(torch_device)) + inputs["ip_adapter_embeds"] = [self._get_dummy_image_embeds(cross_attention_dim)] * 2 + inputs["negative_ip_adapter_embeds"] = [self._get_dummy_image_embeds(cross_attention_dim)] * 2 + pipe.set_ip_adapter_scale([0.0, 0.0]) + output_without_multi_adapter_scale = pipe(**inputs, output="images") + if expected_pipe_slice is not None: + output_without_multi_adapter_scale = output_without_multi_adapter_scale[0, -3:, -3:, -1].flatten() + + # forward pass with multi ip adapter, but with scale of adapter weights + inputs = self._modify_inputs_for_ip_adapter_test(self.get_dummy_inputs(torch_device)) + inputs["ip_adapter_embeds"] = [self._get_dummy_image_embeds(cross_attention_dim)] * 2 + inputs["negative_ip_adapter_embeds"] = [self._get_dummy_image_embeds(cross_attention_dim)] * 2 + pipe.set_ip_adapter_scale([42.0, 42.0]) + output_with_multi_adapter_scale = pipe(**inputs, output="images") + if expected_pipe_slice is not None: + output_with_multi_adapter_scale = output_with_multi_adapter_scale[0, -3:, -3:, -1].flatten() + + max_diff_without_multi_adapter_scale = np.abs( + output_without_multi_adapter_scale - output_without_adapter + ).max() + max_diff_with_multi_adapter_scale = np.abs(output_with_multi_adapter_scale - output_without_adapter).max() + assert max_diff_without_multi_adapter_scale < expected_max_diff, ( + "Output without multi-ip-adapter must be same as normal inference" + ) + assert max_diff_with_multi_adapter_scale > 1e-2, ( + "Output with multi-ip-adapter scale must be different from normal inference" + ) + + +class SDXLModularControlNetTests: + """ + This mixin is designed to test ControlNet. + """ + + def test_pipeline_inputs(self): + blocks = self.pipeline_blocks_class() + parameters = blocks.input_names + + assert "control_image" in parameters, "`control_image` argument must be supported by the `__call__` method" + assert "controlnet_conditioning_scale" in parameters, ( + "`controlnet_conditioning_scale` argument must be supported by the `__call__` method" + ) + + def _modify_inputs_for_controlnet_test(self, inputs: Dict[str, Any]): + controlnet_embedder_scale_factor = 2 + image = torch.randn( + (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), + device=torch_device, + ) + inputs["control_image"] = image + return inputs + + def test_controlnet(self, expected_max_diff: float = 1e-4, expected_pipe_slice=None): + r"""Tests for ControlNet. + + The following scenarios are tested: + - Single ControlNet with scale=0 should produce same output as no ControlNet. + - Single ControlNet with scale!=0 should produce different output compared to no ControlNet. + """ + # Raising the tolerance for this test when it's run on a CPU because we + # compare against static slices and that can be shaky (with a VVVV low probability). + expected_max_diff = 9e-4 if torch_device == "cpu" else expected_max_diff + + pipe = self.get_pipeline() + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + # forward pass without controlnet + inputs = self.get_dummy_inputs(torch_device) + output_without_controlnet = pipe(**inputs, output="images") + output_without_controlnet = output_without_controlnet[0, -3:, -3:, -1].flatten() + + # forward pass with single controlnet, but scale=0 which should have no effect + inputs = self._modify_inputs_for_controlnet_test(self.get_dummy_inputs(torch_device)) + inputs["controlnet_conditioning_scale"] = 0.0 + output_without_controlnet_scale = pipe(**inputs, output="images") + output_without_controlnet_scale = output_without_controlnet_scale[0, -3:, -3:, -1].flatten() + + # forward pass with single controlnet, but with scale of adapter weights + inputs = self._modify_inputs_for_controlnet_test(self.get_dummy_inputs(torch_device)) + inputs["controlnet_conditioning_scale"] = 42.0 + output_with_controlnet_scale = pipe(**inputs, output="images") + output_with_controlnet_scale = output_with_controlnet_scale[0, -3:, -3:, -1].flatten() + + max_diff_without_controlnet_scale = np.abs(output_without_controlnet_scale - output_without_controlnet).max() + max_diff_with_controlnet_scale = np.abs(output_with_controlnet_scale - output_without_controlnet).max() + + assert max_diff_without_controlnet_scale < expected_max_diff, ( + "Output without controlnet must be same as normal inference" + ) + assert max_diff_with_controlnet_scale > 1e-2, "Output with controlnet must be different from normal inference" + + def test_controlnet_cfg(self): + pipe = self.get_pipeline() + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + # forward pass with CFG not applied + guider = ClassifierFreeGuidance(guidance_scale=1.0) + pipe.update_components(guider=guider) + + inputs = self._modify_inputs_for_controlnet_test(self.get_dummy_inputs(torch_device)) + out_no_cfg = pipe(**inputs, output="images") + + # forward pass with CFG applied + guider = ClassifierFreeGuidance(guidance_scale=7.5) + pipe.update_components(guider=guider) + inputs = self._modify_inputs_for_controlnet_test(self.get_dummy_inputs(torch_device)) + out_cfg = pipe(**inputs, output="images") + + assert out_cfg.shape == out_no_cfg.shape + max_diff = np.abs(out_cfg - out_no_cfg).max() + assert max_diff > 1e-2, "Output with CFG must be different from normal inference" + + +class SDXLModularGuiderTests: + def test_guider_cfg(self): + pipe = self.get_pipeline() + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + # forward pass with CFG not applied + guider = ClassifierFreeGuidance(guidance_scale=1.0) + pipe.update_components(guider=guider) + + inputs = self.get_dummy_inputs(torch_device) + out_no_cfg = pipe(**inputs, output="images") + + # forward pass with CFG applied + guider = ClassifierFreeGuidance(guidance_scale=7.5) + pipe.update_components(guider=guider) + inputs = self.get_dummy_inputs(torch_device) + out_cfg = pipe(**inputs, output="images") + + assert out_cfg.shape == out_no_cfg.shape + max_diff = np.abs(out_cfg - out_no_cfg).max() + assert max_diff > 1e-2, "Output with CFG must be different from normal inference" + + +class SDXLModularPipelineFastTests( + SDXLModularTests, + SDXLModularIPAdapterTests, + SDXLModularControlNetTests, + SDXLModularGuiderTests, + ModularPipelineTesterMixin, + unittest.TestCase, +): + """Test cases for Stable Diffusion XL modular pipeline fast tests.""" + + def test_stable_diffusion_xl_euler(self): + self._test_stable_diffusion_xl_euler( + expected_image_shape=(1, 64, 64, 3), + expected_slice=[ + 0.5966781, + 0.62939394, + 0.48465094, + 0.51573336, + 0.57593524, + 0.47035995, + 0.53410417, + 0.51436996, + 0.47313565, + ], + expected_max_diff=1e-2, + ) + + def test_inference_batch_single_identical(self): + super().test_inference_batch_single_identical(expected_max_diff=3e-3) + + +class SDXLImg2ImgModularPipelineFastTests( + SDXLModularTests, + SDXLModularIPAdapterTests, + SDXLModularControlNetTests, + SDXLModularGuiderTests, + ModularPipelineTesterMixin, + unittest.TestCase, +): + """Test cases for Stable Diffusion XL image-to-image modular pipeline fast tests.""" + + def get_dummy_inputs(self, device, seed=0): + inputs = super().get_dummy_inputs(device, seed) + image = floats_tensor((1, 3, 64, 64), rng=random.Random(seed)).to(device) + image = image / 2 + 0.5 + inputs["image"] = image + inputs["strength"] = 0.8 + + return inputs + + def test_stable_diffusion_xl_euler(self): + self._test_stable_diffusion_xl_euler( + expected_image_shape=(1, 64, 64, 3), + expected_slice=[ + 0.56943184, + 0.4702148, + 0.48048905, + 0.6235963, + 0.551138, + 0.49629188, + 0.60031277, + 0.5688907, + 0.43996853, + ], + expected_max_diff=1e-2, + ) + + def test_inference_batch_single_identical(self): + super().test_inference_batch_single_identical(expected_max_diff=3e-3) + + +class SDXLInpaintingModularPipelineFastTests( + SDXLModularTests, + SDXLModularIPAdapterTests, + SDXLModularControlNetTests, + SDXLModularGuiderTests, + ModularPipelineTesterMixin, + unittest.TestCase, +): + """Test cases for Stable Diffusion XL inpainting modular pipeline fast tests.""" + + def get_dummy_inputs(self, device, seed=0): + inputs = super().get_dummy_inputs(device, seed) + image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) + image = image.cpu().permute(0, 2, 3, 1)[0] + init_image = Image.fromarray(np.uint8(image)).convert("RGB").resize((64, 64)) + # create mask + image[8:, 8:, :] = 255 + mask_image = Image.fromarray(np.uint8(image)).convert("L").resize((64, 64)) + + inputs["image"] = init_image + inputs["mask_image"] = mask_image + inputs["strength"] = 1.0 + + return inputs + + def test_stable_diffusion_xl_euler(self): + self._test_stable_diffusion_xl_euler( + expected_image_shape=(1, 64, 64, 3), + expected_slice=[ + 0.40872607, + 0.38842705, + 0.34893104, + 0.47837183, + 0.43792963, + 0.5332134, + 0.3716843, + 0.47274873, + 0.45000193, + ], + expected_max_diff=1e-2, + ) + + def test_inference_batch_single_identical(self): + super().test_inference_batch_single_identical(expected_max_diff=3e-3) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/modular_pipelines/test_modular_pipelines_common.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/modular_pipelines/test_modular_pipelines_common.py new file mode 100644 index 0000000000000000000000000000000000000000..d309fcf3533934ff31bb67dad9f792aab13f769d --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/modular_pipelines/test_modular_pipelines_common.py @@ -0,0 +1,359 @@ +import gc +import tempfile +import unittest +from typing import Callable, Union + +import numpy as np +import torch + +import diffusers +from diffusers import ComponentsManager, ModularPipeline, ModularPipelineBlocks +from diffusers.utils import logging + +from ..testing_utils import ( + backend_empty_cache, + numpy_cosine_similarity_distance, + require_accelerator, + require_torch, + torch_device, +) + + +def to_np(tensor): + if isinstance(tensor, torch.Tensor): + tensor = tensor.detach().cpu().numpy() + + return tensor + + +@require_torch +class ModularPipelineTesterMixin: + """ + This mixin is designed to be used with unittest.TestCase classes. + It provides a set of common tests for each modular pipeline, + including: + - test_pipeline_call_signature: check if the pipeline's __call__ method has all required parameters + - test_inference_batch_consistent: check if the pipeline's __call__ method can handle batch inputs + - test_inference_batch_single_identical: check if the pipeline's __call__ method can handle single input + - test_float16_inference: check if the pipeline's __call__ method can handle float16 inputs + - test_to_device: check if the pipeline's __call__ method can handle different devices + """ + + # Canonical parameters that are passed to `__call__` regardless + # of the type of pipeline. They are always optional and have common + # sense default values. + optional_params = frozenset( + [ + "num_inference_steps", + "num_images_per_prompt", + "latents", + "output_type", + ] + ) + # this is modular specific: generator needs to be a intermediate input because it's mutable + intermediate_params = frozenset( + [ + "generator", + ] + ) + + def get_generator(self, seed): + device = torch_device if torch_device != "mps" else "cpu" + generator = torch.Generator(device).manual_seed(seed) + return generator + + @property + def pipeline_class(self) -> Union[Callable, ModularPipeline]: + raise NotImplementedError( + "You need to set the attribute `pipeline_class = ClassNameOfPipeline` in the child test class. " + "See existing pipeline tests for reference." + ) + + @property + def repo(self) -> str: + raise NotImplementedError( + "You need to set the attribute `repo` in the child test class. See existing pipeline tests for reference." + ) + + @property + def pipeline_blocks_class(self) -> Union[Callable, ModularPipelineBlocks]: + raise NotImplementedError( + "You need to set the attribute `pipeline_blocks_class = ClassNameOfPipelineBlocks` in the child test class. " + "See existing pipeline tests for reference." + ) + + def get_pipeline(self): + raise NotImplementedError( + "You need to implement `get_pipeline(self)` in the child test class. " + "See existing pipeline tests for reference." + ) + + def get_dummy_inputs(self, device, seed=0): + raise NotImplementedError( + "You need to implement `get_dummy_inputs(self, device, seed)` in the child test class. " + "See existing pipeline tests for reference." + ) + + @property + def params(self) -> frozenset: + raise NotImplementedError( + "You need to set the attribute `params` in the child test class. " + "`params` are checked for if all values are present in `__call__`'s signature." + " You can set `params` using one of the common set of parameters defined in `pipeline_params.py`" + " e.g., `TEXT_TO_IMAGE_PARAMS` defines the common parameters used in text to " + "image pipelines, including prompts and prompt embedding overrides." + "If your pipeline's set of arguments has minor changes from one of the common sets of arguments, " + "do not make modifications to the existing common sets of arguments. I.e. a text to image pipeline " + "with non-configurable height and width arguments should set the attribute as " + "`params = TEXT_TO_IMAGE_PARAMS - {'height', 'width'}`. " + "See existing pipeline tests for reference." + ) + + @property + def batch_params(self) -> frozenset: + raise NotImplementedError( + "You need to set the attribute `batch_params` in the child test class. " + "`batch_params` are the parameters required to be batched when passed to the pipeline's " + "`__call__` method. `pipeline_params.py` provides some common sets of parameters such as " + "`TEXT_TO_IMAGE_BATCH_PARAMS`, `IMAGE_VARIATION_BATCH_PARAMS`, etc... If your pipeline's " + "set of batch arguments has minor changes from one of the common sets of batch arguments, " + "do not make modifications to the existing common sets of batch arguments. I.e. a text to " + "image pipeline `negative_prompt` is not batched should set the attribute as " + "`batch_params = TEXT_TO_IMAGE_BATCH_PARAMS - {'negative_prompt'}`. " + "See existing pipeline tests for reference." + ) + + def setUp(self): + # clean up the VRAM before each test + super().setUp() + torch.compiler.reset() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + # clean up the VRAM after each test in case of CUDA runtime errors + super().tearDown() + torch.compiler.reset() + gc.collect() + backend_empty_cache(torch_device) + + def test_pipeline_call_signature(self): + pipe = self.get_pipeline() + input_parameters = pipe.blocks.input_names + optional_parameters = pipe.default_call_parameters + + def _check_for_parameters(parameters, expected_parameters, param_type): + remaining_parameters = {param for param in parameters if param not in expected_parameters} + assert len(remaining_parameters) == 0, ( + f"Required {param_type} parameters not present: {remaining_parameters}" + ) + + _check_for_parameters(self.params, input_parameters, "input") + _check_for_parameters(self.optional_params, optional_parameters, "optional") + + def test_inference_batch_consistent(self, batch_sizes=[2], batch_generator=True): + pipe = self.get_pipeline() + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(torch_device) + inputs["generator"] = self.get_generator(0) + + logger = logging.get_logger(pipe.__module__) + logger.setLevel(level=diffusers.logging.FATAL) + + # prepare batched inputs + batched_inputs = [] + for batch_size in batch_sizes: + batched_input = {} + batched_input.update(inputs) + + for name in self.batch_params: + if name not in inputs: + continue + + value = inputs[name] + batched_input[name] = batch_size * [value] + + if batch_generator and "generator" in inputs: + batched_input["generator"] = [self.get_generator(i) for i in range(batch_size)] + + if "batch_size" in inputs: + batched_input["batch_size"] = batch_size + + batched_inputs.append(batched_input) + + logger.setLevel(level=diffusers.logging.WARNING) + for batch_size, batched_input in zip(batch_sizes, batched_inputs): + output = pipe(**batched_input, output="images") + assert len(output) == batch_size, "Output is different from expected batch size" + + def test_inference_batch_single_identical( + self, + batch_size=2, + expected_max_diff=1e-4, + ): + pipe = self.get_pipeline() + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + inputs = self.get_dummy_inputs(torch_device) + + # Reset generator in case it is has been used in self.get_dummy_inputs + inputs["generator"] = self.get_generator(0) + + logger = logging.get_logger(pipe.__module__) + logger.setLevel(level=diffusers.logging.FATAL) + + # batchify inputs + batched_inputs = {} + batched_inputs.update(inputs) + + for name in self.batch_params: + if name not in inputs: + continue + + value = inputs[name] + batched_inputs[name] = batch_size * [value] + + if "generator" in inputs: + batched_inputs["generator"] = [self.get_generator(i) for i in range(batch_size)] + + if "batch_size" in inputs: + batched_inputs["batch_size"] = batch_size + + output = pipe(**inputs, output="images") + output_batch = pipe(**batched_inputs, output="images") + + assert output_batch.shape[0] == batch_size + + max_diff = np.abs(to_np(output_batch[0]) - to_np(output[0])).max() + assert max_diff < expected_max_diff, "Batch inference results different from single inference results" + + @unittest.skipIf(torch_device not in ["cuda", "xpu"], reason="float16 requires CUDA or XPU") + @require_accelerator + def test_float16_inference(self, expected_max_diff=5e-2): + pipe = self.get_pipeline() + pipe.to(torch_device, torch.float32) + pipe.set_progress_bar_config(disable=None) + + pipe_fp16 = self.get_pipeline() + pipe_fp16.to(torch_device, torch.float16) + pipe_fp16.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(torch_device) + # Reset generator in case it is used inside dummy inputs + if "generator" in inputs: + inputs["generator"] = self.get_generator(0) + output = pipe(**inputs, output="images") + + fp16_inputs = self.get_dummy_inputs(torch_device) + # Reset generator in case it is used inside dummy inputs + if "generator" in fp16_inputs: + fp16_inputs["generator"] = self.get_generator(0) + output_fp16 = pipe_fp16(**fp16_inputs, output="images") + + if isinstance(output, torch.Tensor): + output = output.cpu() + output_fp16 = output_fp16.cpu() + + max_diff = numpy_cosine_similarity_distance(output.flatten(), output_fp16.flatten()) + assert max_diff < expected_max_diff, "FP16 inference is different from FP32 inference" + + @require_accelerator + def test_to_device(self): + pipe = self.get_pipeline() + pipe.set_progress_bar_config(disable=None) + + pipe.to("cpu") + model_devices = [ + component.device.type for component in pipe.components.values() if hasattr(component, "device") + ] + assert all(device == "cpu" for device in model_devices), "All pipeline components are not on CPU" + + pipe.to(torch_device) + model_devices = [ + component.device.type for component in pipe.components.values() if hasattr(component, "device") + ] + assert all(device == torch_device for device in model_devices), ( + "All pipeline components are not on accelerator device" + ) + + def test_inference_is_not_nan_cpu(self): + pipe = self.get_pipeline() + pipe.set_progress_bar_config(disable=None) + pipe.to("cpu") + + output = pipe(**self.get_dummy_inputs("cpu"), output="images") + assert np.isnan(to_np(output)).sum() == 0, "CPU Inference returns NaN" + + @require_accelerator + def test_inference_is_not_nan(self): + pipe = self.get_pipeline() + pipe.set_progress_bar_config(disable=None) + pipe.to(torch_device) + + output = pipe(**self.get_dummy_inputs(torch_device), output="images") + assert np.isnan(to_np(output)).sum() == 0, "Accelerator Inference returns NaN" + + def test_num_images_per_prompt(self): + pipe = self.get_pipeline() + + if "num_images_per_prompt" not in pipe.blocks.input_names: + return + + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + batch_sizes = [1, 2] + num_images_per_prompts = [1, 2] + + for batch_size in batch_sizes: + for num_images_per_prompt in num_images_per_prompts: + inputs = self.get_dummy_inputs(torch_device) + + for key in inputs.keys(): + if key in self.batch_params: + inputs[key] = batch_size * [inputs[key]] + + images = pipe(**inputs, num_images_per_prompt=num_images_per_prompt, output="images") + + assert images.shape[0] == batch_size * num_images_per_prompt + + @require_accelerator + def test_components_auto_cpu_offload_inference_consistent(self): + base_pipe = self.get_pipeline().to(torch_device) + + cm = ComponentsManager() + cm.enable_auto_cpu_offload(device=torch_device) + offload_pipe = self.get_pipeline(components_manager=cm) + + image_slices = [] + for pipe in [base_pipe, offload_pipe]: + inputs = self.get_dummy_inputs(torch_device) + image = pipe(**inputs, output="images") + + image_slices.append(image[0, -3:, -3:, -1].flatten()) + + assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3 + + def test_save_from_pretrained(self): + pipes = [] + base_pipe = self.get_pipeline().to(torch_device) + pipes.append(base_pipe) + + with tempfile.TemporaryDirectory() as tmpdirname: + base_pipe.save_pretrained(tmpdirname) + pipe = ModularPipeline.from_pretrained(tmpdirname).to(torch_device) + pipe.load_components(torch_dtype=torch.float32) + pipe.to(torch_device) + + pipes.append(pipe) + + image_slices = [] + for pipe in pipes: + inputs = self.get_dummy_inputs(torch_device) + image = pipe(**inputs, output="images") + + image_slices.append(image[0, -3:, -3:, -1].flatten()) + + assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/others/__init__.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/others/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/others/test_check_copies.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/others/test_check_copies.py new file mode 100644 index 0000000000000000000000000000000000000000..4b6fa28eb9ac75131972f4c4188f1746df8996be --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/others/test_check_copies.py @@ -0,0 +1,117 @@ +# Copyright 2025 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import re +import shutil +import sys +import tempfile +import unittest + + +git_repo_path = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) +sys.path.append(os.path.join(git_repo_path, "utils")) + +import check_copies # noqa: E402 + + +# This is the reference code that will be used in the tests. +# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated. +REFERENCE_CODE = """ \""" + Output class for the scheduler's `step` function output. + + Args: + prev_sample (`torch.Tensor` of shape `(batch_size, num_channels, height, width)` for images): + Computed sample `(x_{t-1})` of previous timestep. `prev_sample` should be used as next model input in the + denoising loop. + pred_original_sample (`torch.Tensor` of shape `(batch_size, num_channels, height, width)` for images): + The predicted denoised sample `(x_{0})` based on the model output from the current timestep. + `pred_original_sample` can be used to preview progress or for guidance. + \""" + + prev_sample: torch.Tensor + pred_original_sample: Optional[torch.Tensor] = None +""" + + +class CopyCheckTester(unittest.TestCase): + def setUp(self): + self.diffusers_dir = tempfile.mkdtemp() + os.makedirs(os.path.join(self.diffusers_dir, "schedulers/")) + check_copies.DIFFUSERS_PATH = self.diffusers_dir + shutil.copy( + os.path.join(git_repo_path, "src/diffusers/schedulers/scheduling_ddpm.py"), + os.path.join(self.diffusers_dir, "schedulers/scheduling_ddpm.py"), + ) + + def tearDown(self): + check_copies.DIFFUSERS_PATH = "src/diffusers" + shutil.rmtree(self.diffusers_dir) + + def check_copy_consistency(self, comment, class_name, class_code, overwrite_result=None): + code = comment + f"\nclass {class_name}(nn.Module):\n" + class_code + if overwrite_result is not None: + expected = comment + f"\nclass {class_name}(nn.Module):\n" + overwrite_result + code = check_copies.run_ruff(code) + fname = os.path.join(self.diffusers_dir, "new_code.py") + with open(fname, "w", newline="\n") as f: + f.write(code) + if overwrite_result is None: + self.assertTrue(len(check_copies.is_copy_consistent(fname)) == 0) + else: + check_copies.is_copy_consistent(f.name, overwrite=True) + with open(fname, "r") as f: + self.assertTrue(f.read(), expected) + + def test_find_code_in_diffusers(self): + code = check_copies.find_code_in_diffusers("schedulers.scheduling_ddpm.DDPMSchedulerOutput") + self.assertEqual(code, REFERENCE_CODE) + + def test_is_copy_consistent(self): + # Base copy consistency + self.check_copy_consistency( + "# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput", + "DDPMSchedulerOutput", + REFERENCE_CODE + "\n", + ) + + # With no empty line at the end + self.check_copy_consistency( + "# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput", + "DDPMSchedulerOutput", + REFERENCE_CODE, + ) + + # Copy consistency with rename + self.check_copy_consistency( + "# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test", + "TestSchedulerOutput", + re.sub("DDPM", "Test", REFERENCE_CODE), + ) + + # Copy consistency with a really long name + long_class_name = "TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason" + self.check_copy_consistency( + f"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}", + f"{long_class_name}SchedulerOutput", + re.sub("Bert", long_class_name, REFERENCE_CODE), + ) + + # Copy consistency with overwrite + self.check_copy_consistency( + "# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test", + "TestSchedulerOutput", + REFERENCE_CODE, + overwrite_result=re.sub("DDPM", "Test", REFERENCE_CODE), + ) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/others/test_check_dummies.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/others/test_check_dummies.py new file mode 100644 index 0000000000000000000000000000000000000000..b7c544370ca89b39c929ddbbfca6aaac74c1dafc --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/others/test_check_dummies.py @@ -0,0 +1,122 @@ +# Copyright 2025 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import sys +import unittest + + +git_repo_path = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) +sys.path.append(os.path.join(git_repo_path, "utils")) + +import check_dummies # noqa: E402 +from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 + + +# Align TRANSFORMERS_PATH in check_dummies with the current path +check_dummies.PATH_TO_DIFFUSERS = os.path.join(git_repo_path, "src", "diffusers") + + +class CheckDummiesTester(unittest.TestCase): + def test_find_backend(self): + simple_backend = find_backend(" if not is_torch_available():") + self.assertEqual(simple_backend, "torch") + + # backend_with_underscore = find_backend(" if not is_tensorflow_text_available():") + # self.assertEqual(backend_with_underscore, "tensorflow_text") + + double_backend = find_backend(" if not (is_torch_available() and is_transformers_available()):") + self.assertEqual(double_backend, "torch_and_transformers") + + # double_backend_with_underscore = find_backend( + # " if not (is_sentencepiece_available() and is_tensorflow_text_available()):" + # ) + # self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text") + + triple_backend = find_backend( + " if not (is_torch_available() and is_transformers_available() and is_onnx_available()):" + ) + self.assertEqual(triple_backend, "torch_and_transformers_and_onnx") + + def test_read_init(self): + objects = read_init() + # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects + self.assertIn("torch", objects) + self.assertIn("torch_and_transformers", objects) + self.assertIn("flax_and_transformers", objects) + self.assertIn("torch_and_transformers_and_onnx", objects) + + # Likewise, we can't assert on the exact content of a key + self.assertIn("UNet2DModel", objects["torch"]) + self.assertIn("FlaxUNet2DConditionModel", objects["flax"]) + self.assertIn("StableDiffusionPipeline", objects["torch_and_transformers"]) + self.assertIn("FlaxStableDiffusionPipeline", objects["flax_and_transformers"]) + self.assertIn("LMSDiscreteScheduler", objects["torch_and_scipy"]) + self.assertIn("OnnxStableDiffusionPipeline", objects["torch_and_transformers_and_onnx"]) + + def test_create_dummy_object(self): + dummy_constant = create_dummy_object("CONSTANT", "'torch'") + self.assertEqual(dummy_constant, "\nCONSTANT = None\n") + + dummy_function = create_dummy_object("function", "'torch'") + self.assertEqual( + dummy_function, "\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n" + ) + + expected_dummy_class = """ +class FakeClass(metaclass=DummyObject): + _backends = 'torch' + + def __init__(self, *args, **kwargs): + requires_backends(self, 'torch') + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, 'torch') + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, 'torch') +""" + dummy_class = create_dummy_object("FakeClass", "'torch'") + self.assertEqual(dummy_class, expected_dummy_class) + + def test_create_dummy_files(self): + expected_dummy_pytorch_file = """# This file is autogenerated by the command `make fix-copies`, do not edit. +from ..utils import DummyObject, requires_backends + + +CONSTANT = None + + +def function(*args, **kwargs): + requires_backends(function, ["torch"]) + + +class FakeClass(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, ["torch"]) +""" + dummy_files = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]}) + self.assertEqual(dummy_files["torch"], expected_dummy_pytorch_file) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/others/test_check_support_list.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/others/test_check_support_list.py new file mode 100644 index 0000000000000000000000000000000000000000..0f6b134aad49f9dddac139a62de202e9606cab0a --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/others/test_check_support_list.py @@ -0,0 +1,68 @@ +import os +import sys +import unittest +from unittest.mock import mock_open, patch + + +git_repo_path = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) +sys.path.append(os.path.join(git_repo_path, "utils")) + +from check_support_list import check_documentation # noqa: E402 + + +class TestCheckSupportList(unittest.TestCase): + def setUp(self): + # Mock doc and source contents that we can reuse + self.doc_content = """# Documentation +## FooProcessor + +[[autodoc]] module.FooProcessor + +## BarProcessor + +[[autodoc]] module.BarProcessor +""" + self.source_content = """ +class FooProcessor(nn.Module): + pass + +class BarProcessor(nn.Module): + pass +""" + + def test_check_documentation_all_documented(self): + # In this test, both FooProcessor and BarProcessor are documented + with patch("builtins.open", mock_open(read_data=self.doc_content)) as doc_file: + doc_file.side_effect = [ + mock_open(read_data=self.doc_content).return_value, + mock_open(read_data=self.source_content).return_value, + ] + + undocumented = check_documentation( + doc_path="fake_doc.md", + src_path="fake_source.py", + doc_regex=r"\[\[autodoc\]\]\s([^\n]+)", + src_regex=r"class\s+(\w+Processor)\(.*?nn\.Module.*?\):", + ) + self.assertEqual(len(undocumented), 0, f"Expected no undocumented classes, got {undocumented}") + + def test_check_documentation_missing_class(self): + # In this test, only FooProcessor is documented, but BarProcessor is missing from the docs + doc_content_missing = """# Documentation +## FooProcessor + +[[autodoc]] module.FooProcessor +""" + with patch("builtins.open", mock_open(read_data=doc_content_missing)) as doc_file: + doc_file.side_effect = [ + mock_open(read_data=doc_content_missing).return_value, + mock_open(read_data=self.source_content).return_value, + ] + + undocumented = check_documentation( + doc_path="fake_doc.md", + src_path="fake_source.py", + doc_regex=r"\[\[autodoc\]\]\s([^\n]+)", + src_regex=r"class\s+(\w+Processor)\(.*?nn\.Module.*?\):", + ) + self.assertIn("BarProcessor", undocumented, f"BarProcessor should be undocumented, got {undocumented}") diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/others/test_config.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/others/test_config.py new file mode 100644 index 0000000000000000000000000000000000000000..232bf9d473b86dc3e8622c909cb927d8b8bf1d72 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/others/test_config.py @@ -0,0 +1,307 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +import tempfile +import unittest +from pathlib import Path + +from diffusers import ( + DDIMScheduler, + DDPMScheduler, + DPMSolverMultistepScheduler, + EulerAncestralDiscreteScheduler, + EulerDiscreteScheduler, + PNDMScheduler, + logging, +) +from diffusers.configuration_utils import ConfigMixin, register_to_config + +from ..testing_utils import CaptureLogger + + +class SampleObject(ConfigMixin): + config_name = "config.json" + + @register_to_config + def __init__( + self, + a=2, + b=5, + c=(2, 5), + d="for diffusion", + e=[1, 3], + ): + pass + + +class SampleObject2(ConfigMixin): + config_name = "config.json" + + @register_to_config + def __init__( + self, + a=2, + b=5, + c=(2, 5), + d="for diffusion", + f=[1, 3], + ): + pass + + +class SampleObject3(ConfigMixin): + config_name = "config.json" + + @register_to_config + def __init__( + self, + a=2, + b=5, + c=(2, 5), + d="for diffusion", + e=[1, 3], + f=[1, 3], + ): + pass + + +class SampleObject4(ConfigMixin): + config_name = "config.json" + + @register_to_config + def __init__( + self, + a=2, + b=5, + c=(2, 5), + d="for diffusion", + e=[1, 5], + f=[5, 4], + ): + pass + + +class SampleObjectPaths(ConfigMixin): + config_name = "config.json" + + @register_to_config + def __init__(self, test_file_1=Path("foo/bar"), test_file_2=Path("foo bar\\bar")): + pass + + +class ConfigTester(unittest.TestCase): + def test_load_not_from_mixin(self): + with self.assertRaises(ValueError): + ConfigMixin.load_config("dummy_path") + + def test_register_to_config(self): + obj = SampleObject() + config = obj.config + assert config["a"] == 2 + assert config["b"] == 5 + assert config["c"] == (2, 5) + assert config["d"] == "for diffusion" + assert config["e"] == [1, 3] + + # init ignore private arguments + obj = SampleObject(_name_or_path="lalala") + config = obj.config + assert config["a"] == 2 + assert config["b"] == 5 + assert config["c"] == (2, 5) + assert config["d"] == "for diffusion" + assert config["e"] == [1, 3] + + # can override default + obj = SampleObject(c=6) + config = obj.config + assert config["a"] == 2 + assert config["b"] == 5 + assert config["c"] == 6 + assert config["d"] == "for diffusion" + assert config["e"] == [1, 3] + + # can use positional arguments. + obj = SampleObject(1, c=6) + config = obj.config + assert config["a"] == 1 + assert config["b"] == 5 + assert config["c"] == 6 + assert config["d"] == "for diffusion" + assert config["e"] == [1, 3] + + def test_save_load(self): + obj = SampleObject() + config = obj.config + + assert config["a"] == 2 + assert config["b"] == 5 + assert config["c"] == (2, 5) + assert config["d"] == "for diffusion" + assert config["e"] == [1, 3] + + with tempfile.TemporaryDirectory() as tmpdirname: + obj.save_config(tmpdirname) + new_obj = SampleObject.from_config(SampleObject.load_config(tmpdirname)) + new_config = new_obj.config + + # unfreeze configs + config = dict(config) + new_config = dict(new_config) + + assert config.pop("c") == (2, 5) # instantiated as tuple + assert new_config.pop("c") == [2, 5] # saved & loaded as list because of json + config.pop("_use_default_values") + assert config == new_config + + def test_load_ddim_from_pndm(self): + logger = logging.get_logger("diffusers.configuration_utils") + # 30 for warning + logger.setLevel(30) + + with CaptureLogger(logger) as cap_logger: + ddim = DDIMScheduler.from_pretrained( + "hf-internal-testing/tiny-stable-diffusion-torch", subfolder="scheduler" + ) + + assert ddim.__class__ == DDIMScheduler + # no warning should be thrown + assert cap_logger.out == "" + + def test_load_euler_from_pndm(self): + logger = logging.get_logger("diffusers.configuration_utils") + # 30 for warning + logger.setLevel(30) + + with CaptureLogger(logger) as cap_logger: + euler = EulerDiscreteScheduler.from_pretrained( + "hf-internal-testing/tiny-stable-diffusion-torch", subfolder="scheduler" + ) + + assert euler.__class__ == EulerDiscreteScheduler + # no warning should be thrown + assert cap_logger.out == "" + + def test_load_euler_ancestral_from_pndm(self): + logger = logging.get_logger("diffusers.configuration_utils") + # 30 for warning + logger.setLevel(30) + + with CaptureLogger(logger) as cap_logger: + euler = EulerAncestralDiscreteScheduler.from_pretrained( + "hf-internal-testing/tiny-stable-diffusion-torch", subfolder="scheduler" + ) + + assert euler.__class__ == EulerAncestralDiscreteScheduler + # no warning should be thrown + assert cap_logger.out == "" + + def test_load_pndm(self): + logger = logging.get_logger("diffusers.configuration_utils") + # 30 for warning + logger.setLevel(30) + + with CaptureLogger(logger) as cap_logger: + pndm = PNDMScheduler.from_pretrained( + "hf-internal-testing/tiny-stable-diffusion-torch", subfolder="scheduler" + ) + + assert pndm.__class__ == PNDMScheduler + # no warning should be thrown + assert cap_logger.out == "" + + def test_overwrite_config_on_load(self): + logger = logging.get_logger("diffusers.configuration_utils") + # 30 for warning + logger.setLevel(30) + + with CaptureLogger(logger) as cap_logger: + ddpm = DDPMScheduler.from_pretrained( + "hf-internal-testing/tiny-stable-diffusion-torch", + subfolder="scheduler", + prediction_type="sample", + beta_end=8, + ) + + with CaptureLogger(logger) as cap_logger_2: + ddpm_2 = DDPMScheduler.from_pretrained("google/ddpm-celebahq-256", beta_start=88) + + assert ddpm.__class__ == DDPMScheduler + assert ddpm.config.prediction_type == "sample" + assert ddpm.config.beta_end == 8 + assert ddpm_2.config.beta_start == 88 + + # no warning should be thrown + assert cap_logger.out == "" + assert cap_logger_2.out == "" + + def test_load_dpmsolver(self): + logger = logging.get_logger("diffusers.configuration_utils") + # 30 for warning + logger.setLevel(30) + + with CaptureLogger(logger) as cap_logger: + dpm = DPMSolverMultistepScheduler.from_pretrained( + "hf-internal-testing/tiny-stable-diffusion-torch", subfolder="scheduler" + ) + + assert dpm.__class__ == DPMSolverMultistepScheduler + # no warning should be thrown + assert cap_logger.out == "" + + def test_use_default_values(self): + # let's first save a config that should be in the form + # a=2, + # b=5, + # c=(2, 5), + # d="for diffusion", + # e=[1, 3], + + config = SampleObject() + + config_dict = {k: v for k, v in config.config.items() if not k.startswith("_")} + + # make sure that default config has all keys in `_use_default_values` + assert set(config_dict.keys()) == set(config.config._use_default_values) + + with tempfile.TemporaryDirectory() as tmpdirname: + config.save_config(tmpdirname) + + # now loading it with SampleObject2 should put f into `_use_default_values` + config = SampleObject2.from_config(SampleObject2.load_config(tmpdirname)) + + assert "f" in config.config._use_default_values + assert config.config.f == [1, 3] + + # now loading the config, should **NOT** use [1, 3] for `f`, but the default [1, 4] value + # **BECAUSE** it is part of `config.config._use_default_values` + new_config = SampleObject4.from_config(config.config) + assert new_config.config.f == [5, 4] + + config.config._use_default_values.pop() + new_config_2 = SampleObject4.from_config(config.config) + assert new_config_2.config.f == [1, 3] + + # Nevertheless "e" should still be correctly loaded to [1, 3] from SampleObject2 instead of defaulting to [1, 5] + assert new_config_2.config.e == [1, 3] + + def test_check_path_types(self): + # Verify that we get a string returned from a WindowsPath or PosixPath (depending on system) + config = SampleObjectPaths() + json_string = config.to_json_string() + result = json.loads(json_string) + assert result["test_file_1"] == config.config.test_file_1.as_posix() + assert result["test_file_2"] == config.config.test_file_2.as_posix() diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/others/test_dependencies.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/others/test_dependencies.py new file mode 100644 index 0000000000000000000000000000000000000000..a08129a1e9c90b52737761a35f94e9bf0e6b794a --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/others/test_dependencies.py @@ -0,0 +1,52 @@ +# Copyright 2025 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +import unittest +from importlib import import_module + + +class DependencyTester(unittest.TestCase): + def test_diffusers_import(self): + try: + import diffusers # noqa: F401 + except ImportError: + assert False + + def test_backend_registration(self): + import diffusers + from diffusers.dependency_versions_table import deps + + all_classes = inspect.getmembers(diffusers, inspect.isclass) + + for cls_name, cls_module in all_classes: + if "dummy_" in cls_module.__module__: + for backend in cls_module._backends: + if backend == "k_diffusion": + backend = "k-diffusion" + elif backend == "invisible_watermark": + backend = "invisible-watermark" + elif backend == "opencv": + backend = "opencv-python" + assert backend in deps, f"{backend} is not in the deps table!" + + def test_pipeline_imports(self): + import diffusers + import diffusers.pipelines + + all_classes = inspect.getmembers(diffusers, inspect.isclass) + for cls_name, cls_module in all_classes: + if hasattr(diffusers.pipelines, cls_name): + pipeline_folder_module = ".".join(str(cls_module.__module__).split(".")[:3]) + _ = import_module(pipeline_folder_module, str(cls_name)) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/others/test_ema.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/others/test_ema.py new file mode 100644 index 0000000000000000000000000000000000000000..436bbe1d53ff55d1d90288711f1891c0df36ebed --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/others/test_ema.py @@ -0,0 +1,335 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import tempfile +import unittest + +import torch + +from diffusers import UNet2DConditionModel +from diffusers.training_utils import EMAModel + +from ..testing_utils import enable_full_determinism, skip_mps, torch_device + + +enable_full_determinism() + + +class EMAModelTests(unittest.TestCase): + model_id = "hf-internal-testing/tiny-stable-diffusion-pipe" + batch_size = 1 + prompt_length = 77 + text_encoder_hidden_dim = 32 + num_in_channels = 4 + latent_height = latent_width = 64 + generator = torch.manual_seed(0) + + def get_models(self, decay=0.9999): + unet = UNet2DConditionModel.from_pretrained(self.model_id, subfolder="unet") + unet = unet.to(torch_device) + ema_unet = EMAModel(unet.parameters(), decay=decay, model_cls=UNet2DConditionModel, model_config=unet.config) + return unet, ema_unet + + def get_dummy_inputs(self): + noisy_latents = torch.randn( + self.batch_size, self.num_in_channels, self.latent_height, self.latent_width, generator=self.generator + ).to(torch_device) + timesteps = torch.randint(0, 1000, size=(self.batch_size,), generator=self.generator).to(torch_device) + encoder_hidden_states = torch.randn( + self.batch_size, self.prompt_length, self.text_encoder_hidden_dim, generator=self.generator + ).to(torch_device) + return noisy_latents, timesteps, encoder_hidden_states + + def simulate_backprop(self, unet): + updated_state_dict = {} + for k, param in unet.state_dict().items(): + updated_param = torch.randn_like(param) + (param * torch.randn_like(param)) + updated_state_dict.update({k: updated_param}) + unet.load_state_dict(updated_state_dict) + return unet + + def test_from_pretrained(self): + # Save the model parameters to a temporary directory + unet, ema_unet = self.get_models() + with tempfile.TemporaryDirectory() as tmpdir: + ema_unet.save_pretrained(tmpdir) + + # Load the EMA model from the saved directory + loaded_ema_unet = EMAModel.from_pretrained(tmpdir, model_cls=UNet2DConditionModel, foreach=False) + loaded_ema_unet.to(torch_device) + + # Check that the shadow parameters of the loaded model match the original EMA model + for original_param, loaded_param in zip(ema_unet.shadow_params, loaded_ema_unet.shadow_params): + assert torch.allclose(original_param, loaded_param, atol=1e-4) + + # Verify that the optimization step is also preserved + assert loaded_ema_unet.optimization_step == ema_unet.optimization_step + + # Check the decay value + assert loaded_ema_unet.decay == ema_unet.decay + + def test_optimization_steps_updated(self): + unet, ema_unet = self.get_models() + # Take the first (hypothetical) EMA step. + ema_unet.step(unet.parameters()) + assert ema_unet.optimization_step == 1 + + # Take two more. + for _ in range(2): + ema_unet.step(unet.parameters()) + assert ema_unet.optimization_step == 3 + + def test_shadow_params_not_updated(self): + unet, ema_unet = self.get_models() + # Since the `unet` is not being updated (i.e., backprop'd) + # there won't be any difference between the `params` of `unet` + # and `ema_unet` even if we call `ema_unet.step(unet.parameters())`. + ema_unet.step(unet.parameters()) + orig_params = list(unet.parameters()) + for s_param, param in zip(ema_unet.shadow_params, orig_params): + assert torch.allclose(s_param, param) + + # The above holds true even if we call `ema.step()` multiple times since + # `unet` params are still not being updated. + for _ in range(4): + ema_unet.step(unet.parameters()) + for s_param, param in zip(ema_unet.shadow_params, orig_params): + assert torch.allclose(s_param, param) + + def test_shadow_params_updated(self): + unet, ema_unet = self.get_models() + # Here we simulate the parameter updates for `unet`. Since there might + # be some parameters which are initialized to zero we take extra care to + # initialize their values to something non-zero before the multiplication. + unet_pseudo_updated_step_one = self.simulate_backprop(unet) + + # Take the EMA step. + ema_unet.step(unet_pseudo_updated_step_one.parameters()) + + # Now the EMA'd parameters won't be equal to the original model parameters. + orig_params = list(unet_pseudo_updated_step_one.parameters()) + for s_param, param in zip(ema_unet.shadow_params, orig_params): + assert ~torch.allclose(s_param, param) + + # Ensure this is the case when we take multiple EMA steps. + for _ in range(4): + ema_unet.step(unet.parameters()) + for s_param, param in zip(ema_unet.shadow_params, orig_params): + assert ~torch.allclose(s_param, param) + + def test_consecutive_shadow_params_updated(self): + # If we call EMA step after a backpropagation consecutively for two times, + # the shadow params from those two steps should be different. + unet, ema_unet = self.get_models() + + # First backprop + EMA + unet_step_one = self.simulate_backprop(unet) + ema_unet.step(unet_step_one.parameters()) + step_one_shadow_params = ema_unet.shadow_params + + # Second backprop + EMA + unet_step_two = self.simulate_backprop(unet_step_one) + ema_unet.step(unet_step_two.parameters()) + step_two_shadow_params = ema_unet.shadow_params + + for step_one, step_two in zip(step_one_shadow_params, step_two_shadow_params): + assert ~torch.allclose(step_one, step_two) + + def test_zero_decay(self): + # If there's no decay even if there are backprops, EMA steps + # won't take any effect i.e., the shadow params would remain the + # same. + unet, ema_unet = self.get_models(decay=0.0) + unet_step_one = self.simulate_backprop(unet) + ema_unet.step(unet_step_one.parameters()) + step_one_shadow_params = ema_unet.shadow_params + + unet_step_two = self.simulate_backprop(unet_step_one) + ema_unet.step(unet_step_two.parameters()) + step_two_shadow_params = ema_unet.shadow_params + + for step_one, step_two in zip(step_one_shadow_params, step_two_shadow_params): + assert torch.allclose(step_one, step_two) + + @skip_mps + def test_serialization(self): + unet, ema_unet = self.get_models() + noisy_latents, timesteps, encoder_hidden_states = self.get_dummy_inputs() + + with tempfile.TemporaryDirectory() as tmpdir: + ema_unet.save_pretrained(tmpdir) + loaded_unet = UNet2DConditionModel.from_pretrained(tmpdir, model_cls=UNet2DConditionModel) + loaded_unet = loaded_unet.to(unet.device) + + # Since no EMA step has been performed the outputs should match. + output = unet(noisy_latents, timesteps, encoder_hidden_states).sample + output_loaded = loaded_unet(noisy_latents, timesteps, encoder_hidden_states).sample + + assert torch.allclose(output, output_loaded, atol=1e-4) + + +class EMAModelTestsForeach(unittest.TestCase): + model_id = "hf-internal-testing/tiny-stable-diffusion-pipe" + batch_size = 1 + prompt_length = 77 + text_encoder_hidden_dim = 32 + num_in_channels = 4 + latent_height = latent_width = 64 + generator = torch.manual_seed(0) + + def get_models(self, decay=0.9999): + unet = UNet2DConditionModel.from_pretrained(self.model_id, subfolder="unet") + unet = unet.to(torch_device) + ema_unet = EMAModel( + unet.parameters(), decay=decay, model_cls=UNet2DConditionModel, model_config=unet.config, foreach=True + ) + return unet, ema_unet + + def get_dummy_inputs(self): + noisy_latents = torch.randn( + self.batch_size, self.num_in_channels, self.latent_height, self.latent_width, generator=self.generator + ).to(torch_device) + timesteps = torch.randint(0, 1000, size=(self.batch_size,), generator=self.generator).to(torch_device) + encoder_hidden_states = torch.randn( + self.batch_size, self.prompt_length, self.text_encoder_hidden_dim, generator=self.generator + ).to(torch_device) + return noisy_latents, timesteps, encoder_hidden_states + + def simulate_backprop(self, unet): + updated_state_dict = {} + for k, param in unet.state_dict().items(): + updated_param = torch.randn_like(param) + (param * torch.randn_like(param)) + updated_state_dict.update({k: updated_param}) + unet.load_state_dict(updated_state_dict) + return unet + + def test_from_pretrained(self): + # Save the model parameters to a temporary directory + unet, ema_unet = self.get_models() + with tempfile.TemporaryDirectory() as tmpdir: + ema_unet.save_pretrained(tmpdir) + + # Load the EMA model from the saved directory + loaded_ema_unet = EMAModel.from_pretrained(tmpdir, model_cls=UNet2DConditionModel, foreach=True) + loaded_ema_unet.to(torch_device) + + # Check that the shadow parameters of the loaded model match the original EMA model + for original_param, loaded_param in zip(ema_unet.shadow_params, loaded_ema_unet.shadow_params): + assert torch.allclose(original_param, loaded_param, atol=1e-4) + + # Verify that the optimization step is also preserved + assert loaded_ema_unet.optimization_step == ema_unet.optimization_step + + # Check the decay value + assert loaded_ema_unet.decay == ema_unet.decay + + def test_optimization_steps_updated(self): + unet, ema_unet = self.get_models() + # Take the first (hypothetical) EMA step. + ema_unet.step(unet.parameters()) + assert ema_unet.optimization_step == 1 + + # Take two more. + for _ in range(2): + ema_unet.step(unet.parameters()) + assert ema_unet.optimization_step == 3 + + def test_shadow_params_not_updated(self): + unet, ema_unet = self.get_models() + # Since the `unet` is not being updated (i.e., backprop'd) + # there won't be any difference between the `params` of `unet` + # and `ema_unet` even if we call `ema_unet.step(unet.parameters())`. + ema_unet.step(unet.parameters()) + orig_params = list(unet.parameters()) + for s_param, param in zip(ema_unet.shadow_params, orig_params): + assert torch.allclose(s_param, param) + + # The above holds true even if we call `ema.step()` multiple times since + # `unet` params are still not being updated. + for _ in range(4): + ema_unet.step(unet.parameters()) + for s_param, param in zip(ema_unet.shadow_params, orig_params): + assert torch.allclose(s_param, param) + + def test_shadow_params_updated(self): + unet, ema_unet = self.get_models() + # Here we simulate the parameter updates for `unet`. Since there might + # be some parameters which are initialized to zero we take extra care to + # initialize their values to something non-zero before the multiplication. + unet_pseudo_updated_step_one = self.simulate_backprop(unet) + + # Take the EMA step. + ema_unet.step(unet_pseudo_updated_step_one.parameters()) + + # Now the EMA'd parameters won't be equal to the original model parameters. + orig_params = list(unet_pseudo_updated_step_one.parameters()) + for s_param, param in zip(ema_unet.shadow_params, orig_params): + assert ~torch.allclose(s_param, param) + + # Ensure this is the case when we take multiple EMA steps. + for _ in range(4): + ema_unet.step(unet.parameters()) + for s_param, param in zip(ema_unet.shadow_params, orig_params): + assert ~torch.allclose(s_param, param) + + def test_consecutive_shadow_params_updated(self): + # If we call EMA step after a backpropagation consecutively for two times, + # the shadow params from those two steps should be different. + unet, ema_unet = self.get_models() + + # First backprop + EMA + unet_step_one = self.simulate_backprop(unet) + ema_unet.step(unet_step_one.parameters()) + step_one_shadow_params = ema_unet.shadow_params + + # Second backprop + EMA + unet_step_two = self.simulate_backprop(unet_step_one) + ema_unet.step(unet_step_two.parameters()) + step_two_shadow_params = ema_unet.shadow_params + + for step_one, step_two in zip(step_one_shadow_params, step_two_shadow_params): + assert ~torch.allclose(step_one, step_two) + + def test_zero_decay(self): + # If there's no decay even if there are backprops, EMA steps + # won't take any effect i.e., the shadow params would remain the + # same. + unet, ema_unet = self.get_models(decay=0.0) + unet_step_one = self.simulate_backprop(unet) + ema_unet.step(unet_step_one.parameters()) + step_one_shadow_params = ema_unet.shadow_params + + unet_step_two = self.simulate_backprop(unet_step_one) + ema_unet.step(unet_step_two.parameters()) + step_two_shadow_params = ema_unet.shadow_params + + for step_one, step_two in zip(step_one_shadow_params, step_two_shadow_params): + assert torch.allclose(step_one, step_two) + + @skip_mps + def test_serialization(self): + unet, ema_unet = self.get_models() + noisy_latents, timesteps, encoder_hidden_states = self.get_dummy_inputs() + + with tempfile.TemporaryDirectory() as tmpdir: + ema_unet.save_pretrained(tmpdir) + loaded_unet = UNet2DConditionModel.from_pretrained(tmpdir, model_cls=UNet2DConditionModel) + loaded_unet = loaded_unet.to(unet.device) + + # Since no EMA step has been performed the outputs should match. + output = unet(noisy_latents, timesteps, encoder_hidden_states).sample + output_loaded = loaded_unet(noisy_latents, timesteps, encoder_hidden_states).sample + + assert torch.allclose(output, output_loaded, atol=1e-4) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/others/test_hub_utils.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/others/test_hub_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..0a6b8ef2bd9f91d4ced3e1f8caa9e15b4dba5211 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/others/test_hub_utils.py @@ -0,0 +1,29 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import unittest +from pathlib import Path +from tempfile import TemporaryDirectory + +from diffusers.utils.hub_utils import load_or_create_model_card, populate_model_card + + +class CreateModelCardTest(unittest.TestCase): + def test_generate_model_card_with_library_name(self): + with TemporaryDirectory() as tmpdir: + file_path = Path(tmpdir) / "README.md" + file_path.write_text("---\nlibrary_name: foo\n---\nContent\n") + model_card = load_or_create_model_card(file_path) + populate_model_card(model_card) + assert model_card.data.library_name == "foo" diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/others/test_image_processor.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/others/test_image_processor.py new file mode 100644 index 0000000000000000000000000000000000000000..e9e5c0670676a865efb65a173781201c3efdfe0d --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/others/test_image_processor.py @@ -0,0 +1,310 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import numpy as np +import PIL.Image +import torch + +from diffusers.image_processor import VaeImageProcessor + + +class ImageProcessorTest(unittest.TestCase): + @property + def dummy_sample(self): + batch_size = 1 + num_channels = 3 + height = 8 + width = 8 + + sample = torch.rand((batch_size, num_channels, height, width)) + + return sample + + @property + def dummy_mask(self): + batch_size = 1 + num_channels = 1 + height = 8 + width = 8 + + sample = torch.rand((batch_size, num_channels, height, width)) + + return sample + + def to_np(self, image): + if isinstance(image[0], PIL.Image.Image): + return np.stack([np.array(i) for i in image], axis=0) + elif isinstance(image, torch.Tensor): + return image.cpu().numpy().transpose(0, 2, 3, 1) + return image + + def test_vae_image_processor_pt(self): + image_processor = VaeImageProcessor(do_resize=False, do_normalize=True) + + input_pt = self.dummy_sample + input_np = self.to_np(input_pt) + + for output_type in ["pt", "np", "pil"]: + out = image_processor.postprocess( + image_processor.preprocess(input_pt), + output_type=output_type, + ) + out_np = self.to_np(out) + in_np = (input_np * 255).round() if output_type == "pil" else input_np + assert np.abs(in_np - out_np).max() < 1e-6, ( + f"decoded output does not match input for output_type {output_type}" + ) + + def test_vae_image_processor_np(self): + image_processor = VaeImageProcessor(do_resize=False, do_normalize=True) + input_np = self.dummy_sample.cpu().numpy().transpose(0, 2, 3, 1) + + for output_type in ["pt", "np", "pil"]: + out = image_processor.postprocess(image_processor.preprocess(input_np), output_type=output_type) + + out_np = self.to_np(out) + in_np = (input_np * 255).round() if output_type == "pil" else input_np + assert np.abs(in_np - out_np).max() < 1e-6, ( + f"decoded output does not match input for output_type {output_type}" + ) + + def test_vae_image_processor_pil(self): + image_processor = VaeImageProcessor(do_resize=False, do_normalize=True) + + input_np = self.dummy_sample.cpu().numpy().transpose(0, 2, 3, 1) + input_pil = image_processor.numpy_to_pil(input_np) + + for output_type in ["pt", "np", "pil"]: + out = image_processor.postprocess(image_processor.preprocess(input_pil), output_type=output_type) + for i, o in zip(input_pil, out): + in_np = np.array(i) + out_np = self.to_np(out) if output_type == "pil" else (self.to_np(out) * 255).round() + assert np.abs(in_np - out_np).max() < 1e-6, ( + f"decoded output does not match input for output_type {output_type}" + ) + + def test_preprocess_input_3d(self): + image_processor = VaeImageProcessor(do_resize=False, do_normalize=False) + + input_pt_4d = self.dummy_sample + input_pt_3d = input_pt_4d.squeeze(0) + + out_pt_4d = image_processor.postprocess( + image_processor.preprocess(input_pt_4d), + output_type="np", + ) + out_pt_3d = image_processor.postprocess( + image_processor.preprocess(input_pt_3d), + output_type="np", + ) + + input_np_4d = self.to_np(self.dummy_sample) + input_np_3d = input_np_4d.squeeze(0) + + out_np_4d = image_processor.postprocess( + image_processor.preprocess(input_np_4d), + output_type="np", + ) + out_np_3d = image_processor.postprocess( + image_processor.preprocess(input_np_3d), + output_type="np", + ) + + assert np.abs(out_pt_4d - out_pt_3d).max() < 1e-6 + assert np.abs(out_np_4d - out_np_3d).max() < 1e-6 + + def test_preprocess_input_list(self): + image_processor = VaeImageProcessor(do_resize=False, do_normalize=False) + + input_pt_4d = self.dummy_sample + input_pt_list = list(input_pt_4d) + + out_pt_4d = image_processor.postprocess( + image_processor.preprocess(input_pt_4d), + output_type="np", + ) + + out_pt_list = image_processor.postprocess( + image_processor.preprocess(input_pt_list), + output_type="np", + ) + + input_np_4d = self.to_np(self.dummy_sample) + input_np_list = list(input_np_4d) + + out_np_4d = image_processor.postprocess( + image_processor.preprocess(input_np_4d), + output_type="np", + ) + + out_np_list = image_processor.postprocess( + image_processor.preprocess(input_np_list), + output_type="np", + ) + + assert np.abs(out_pt_4d - out_pt_list).max() < 1e-6 + assert np.abs(out_np_4d - out_np_list).max() < 1e-6 + + def test_preprocess_input_mask_3d(self): + image_processor = VaeImageProcessor( + do_resize=False, do_normalize=False, do_binarize=True, do_convert_grayscale=True + ) + + input_pt_4d = self.dummy_mask + input_pt_3d = input_pt_4d.squeeze(0) + input_pt_2d = input_pt_3d.squeeze(0) + + out_pt_4d = image_processor.postprocess( + image_processor.preprocess(input_pt_4d), + output_type="np", + ) + out_pt_3d = image_processor.postprocess( + image_processor.preprocess(input_pt_3d), + output_type="np", + ) + + out_pt_2d = image_processor.postprocess( + image_processor.preprocess(input_pt_2d), + output_type="np", + ) + + input_np_4d = self.to_np(self.dummy_mask) + input_np_3d = input_np_4d.squeeze(0) + input_np_3d_1 = input_np_4d.squeeze(-1) + input_np_2d = input_np_3d.squeeze(-1) + + out_np_4d = image_processor.postprocess( + image_processor.preprocess(input_np_4d), + output_type="np", + ) + out_np_3d = image_processor.postprocess( + image_processor.preprocess(input_np_3d), + output_type="np", + ) + + out_np_3d_1 = image_processor.postprocess( + image_processor.preprocess(input_np_3d_1), + output_type="np", + ) + + out_np_2d = image_processor.postprocess( + image_processor.preprocess(input_np_2d), + output_type="np", + ) + + assert np.abs(out_pt_4d - out_pt_3d).max() == 0 + assert np.abs(out_pt_4d - out_pt_2d).max() == 0 + assert np.abs(out_np_4d - out_np_3d).max() == 0 + assert np.abs(out_np_4d - out_np_3d_1).max() == 0 + assert np.abs(out_np_4d - out_np_2d).max() == 0 + + def test_preprocess_input_mask_list(self): + image_processor = VaeImageProcessor(do_resize=False, do_normalize=False, do_convert_grayscale=True) + + input_pt_4d = self.dummy_mask + input_pt_3d = input_pt_4d.squeeze(0) + input_pt_2d = input_pt_3d.squeeze(0) + + inputs_pt = [input_pt_4d, input_pt_3d, input_pt_2d] + inputs_pt_list = [[input_pt] for input_pt in inputs_pt] + + for input_pt, input_pt_list in zip(inputs_pt, inputs_pt_list): + out_pt = image_processor.postprocess( + image_processor.preprocess(input_pt), + output_type="np", + ) + out_pt_list = image_processor.postprocess( + image_processor.preprocess(input_pt_list), + output_type="np", + ) + assert np.abs(out_pt - out_pt_list).max() < 1e-6 + + input_np_4d = self.to_np(self.dummy_mask) + input_np_3d = input_np_4d.squeeze(0) + input_np_2d = input_np_3d.squeeze(-1) + + inputs_np = [input_np_4d, input_np_3d, input_np_2d] + inputs_np_list = [[input_np] for input_np in inputs_np] + + for input_np, input_np_list in zip(inputs_np, inputs_np_list): + out_np = image_processor.postprocess( + image_processor.preprocess(input_np), + output_type="np", + ) + out_np_list = image_processor.postprocess( + image_processor.preprocess(input_np_list), + output_type="np", + ) + assert np.abs(out_np - out_np_list).max() < 1e-6 + + def test_preprocess_input_mask_3d_batch(self): + image_processor = VaeImageProcessor(do_resize=False, do_normalize=False, do_convert_grayscale=True) + + # create a dummy mask input with batch_size 2 + dummy_mask_batch = torch.cat([self.dummy_mask] * 2, axis=0) + + # squeeze out the channel dimension + input_pt_3d = dummy_mask_batch.squeeze(1) + input_np_3d = self.to_np(dummy_mask_batch).squeeze(-1) + + input_pt_3d_list = list(input_pt_3d) + input_np_3d_list = list(input_np_3d) + + out_pt_3d = image_processor.postprocess( + image_processor.preprocess(input_pt_3d), + output_type="np", + ) + out_pt_3d_list = image_processor.postprocess( + image_processor.preprocess(input_pt_3d_list), + output_type="np", + ) + + assert np.abs(out_pt_3d - out_pt_3d_list).max() < 1e-6 + + out_np_3d = image_processor.postprocess( + image_processor.preprocess(input_np_3d), + output_type="np", + ) + out_np_3d_list = image_processor.postprocess( + image_processor.preprocess(input_np_3d_list), + output_type="np", + ) + + assert np.abs(out_np_3d - out_np_3d_list).max() < 1e-6 + + def test_vae_image_processor_resize_pt(self): + image_processor = VaeImageProcessor(do_resize=True, vae_scale_factor=1) + input_pt = self.dummy_sample + b, c, h, w = input_pt.shape + scale = 2 + out_pt = image_processor.resize(image=input_pt, height=h // scale, width=w // scale) + exp_pt_shape = (b, c, h // scale, w // scale) + assert out_pt.shape == exp_pt_shape, ( + f"resized image output shape '{out_pt.shape}' didn't match expected shape '{exp_pt_shape}'." + ) + + def test_vae_image_processor_resize_np(self): + image_processor = VaeImageProcessor(do_resize=True, vae_scale_factor=1) + input_pt = self.dummy_sample + b, c, h, w = input_pt.shape + scale = 2 + input_np = self.to_np(input_pt) + out_np = image_processor.resize(image=input_np, height=h // scale, width=w // scale) + exp_np_shape = (b, h // scale, w // scale, c) + assert out_np.shape == exp_np_shape, ( + f"resized image output shape '{out_np.shape}' didn't match expected shape '{exp_np_shape}'." + ) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/others/test_outputs.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/others/test_outputs.py new file mode 100644 index 0000000000000000000000000000000000000000..c8069e6916edbe6dc3edef2cd54085b2d377128c --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/others/test_outputs.py @@ -0,0 +1,94 @@ +import pickle as pkl +import unittest +from dataclasses import dataclass +from typing import List, Union + +import numpy as np +import PIL.Image + +from diffusers.utils.outputs import BaseOutput + +from ..testing_utils import require_torch + + +@dataclass +class CustomOutput(BaseOutput): + images: Union[List[PIL.Image.Image], np.ndarray] + + +class ConfigTester(unittest.TestCase): + def test_outputs_single_attribute(self): + outputs = CustomOutput(images=np.random.rand(1, 3, 4, 4)) + + # check every way of getting the attribute + assert isinstance(outputs.images, np.ndarray) + assert outputs.images.shape == (1, 3, 4, 4) + assert isinstance(outputs["images"], np.ndarray) + assert outputs["images"].shape == (1, 3, 4, 4) + assert isinstance(outputs[0], np.ndarray) + assert outputs[0].shape == (1, 3, 4, 4) + + # test with a non-tensor attribute + outputs = CustomOutput(images=[PIL.Image.new("RGB", (4, 4))]) + + # check every way of getting the attribute + assert isinstance(outputs.images, list) + assert isinstance(outputs.images[0], PIL.Image.Image) + assert isinstance(outputs["images"], list) + assert isinstance(outputs["images"][0], PIL.Image.Image) + assert isinstance(outputs[0], list) + assert isinstance(outputs[0][0], PIL.Image.Image) + + def test_outputs_dict_init(self): + # test output reinitialization with a `dict` for compatibility with `accelerate` + outputs = CustomOutput({"images": np.random.rand(1, 3, 4, 4)}) + + # check every way of getting the attribute + assert isinstance(outputs.images, np.ndarray) + assert outputs.images.shape == (1, 3, 4, 4) + assert isinstance(outputs["images"], np.ndarray) + assert outputs["images"].shape == (1, 3, 4, 4) + assert isinstance(outputs[0], np.ndarray) + assert outputs[0].shape == (1, 3, 4, 4) + + # test with a non-tensor attribute + outputs = CustomOutput({"images": [PIL.Image.new("RGB", (4, 4))]}) + + # check every way of getting the attribute + assert isinstance(outputs.images, list) + assert isinstance(outputs.images[0], PIL.Image.Image) + assert isinstance(outputs["images"], list) + assert isinstance(outputs["images"][0], PIL.Image.Image) + assert isinstance(outputs[0], list) + assert isinstance(outputs[0][0], PIL.Image.Image) + + def test_outputs_serialization(self): + outputs_orig = CustomOutput(images=[PIL.Image.new("RGB", (4, 4))]) + serialized = pkl.dumps(outputs_orig) + outputs_copy = pkl.loads(serialized) + + # Check original and copy are equal + assert dir(outputs_orig) == dir(outputs_copy) + assert dict(outputs_orig) == dict(outputs_copy) + assert vars(outputs_orig) == vars(outputs_copy) + + @require_torch + def test_torch_pytree(self): + # ensure torch.utils._pytree treats ModelOutput subclasses as nodes (and not leaves) + # this is important for DistributedDataParallel gradient synchronization with static_graph=True + import torch + import torch.utils._pytree + + data = np.random.rand(1, 3, 4, 4) + x = CustomOutput(images=data) + self.assertFalse(torch.utils._pytree._is_leaf(x)) + + expected_flat_outs = [data] + expected_tree_spec = torch.utils._pytree.TreeSpec(CustomOutput, ["images"], [torch.utils._pytree.LeafSpec()]) + + actual_flat_outs, actual_tree_spec = torch.utils._pytree.tree_flatten(x) + self.assertEqual(expected_flat_outs, actual_flat_outs) + self.assertEqual(expected_tree_spec, actual_tree_spec) + + unflattened_x = torch.utils._pytree.tree_unflatten(actual_flat_outs, actual_tree_spec) + self.assertEqual(x, unflattened_x) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/others/test_training.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/others/test_training.py new file mode 100644 index 0000000000000000000000000000000000000000..2038a98a813e4b4ebe22e45a99255101d6cc0af8 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/others/test_training.py @@ -0,0 +1,87 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import torch + +from diffusers import DDIMScheduler, DDPMScheduler, UNet2DModel +from diffusers.training_utils import set_seed + +from ..testing_utils import slow + + +torch.backends.cuda.matmul.allow_tf32 = False + + +class TrainingTests(unittest.TestCase): + def get_model_optimizer(self, resolution=32): + set_seed(0) + model = UNet2DModel(sample_size=resolution, in_channels=3, out_channels=3) + optimizer = torch.optim.SGD(model.parameters(), lr=0.0001) + return model, optimizer + + @slow + def test_training_step_equality(self): + device = "cpu" # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable + ddpm_scheduler = DDPMScheduler( + num_train_timesteps=1000, + beta_start=0.0001, + beta_end=0.02, + beta_schedule="linear", + clip_sample=True, + ) + ddim_scheduler = DDIMScheduler( + num_train_timesteps=1000, + beta_start=0.0001, + beta_end=0.02, + beta_schedule="linear", + clip_sample=True, + ) + + assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps + + # shared batches for DDPM and DDIM + set_seed(0) + clean_images = [torch.randn((4, 3, 32, 32)).clip(-1, 1).to(device) for _ in range(4)] + noise = [torch.randn((4, 3, 32, 32)).to(device) for _ in range(4)] + timesteps = [torch.randint(0, 1000, (4,)).long().to(device) for _ in range(4)] + + # train with a DDPM scheduler + model, optimizer = self.get_model_optimizer(resolution=32) + model.train().to(device) + for i in range(4): + optimizer.zero_grad() + ddpm_noisy_images = ddpm_scheduler.add_noise(clean_images[i], noise[i], timesteps[i]) + ddpm_noise_pred = model(ddpm_noisy_images, timesteps[i]).sample + loss = torch.nn.functional.mse_loss(ddpm_noise_pred, noise[i]) + loss.backward() + optimizer.step() + del model, optimizer + + # recreate the model and optimizer, and retry with DDIM + model, optimizer = self.get_model_optimizer(resolution=32) + model.train().to(device) + for i in range(4): + optimizer.zero_grad() + ddim_noisy_images = ddim_scheduler.add_noise(clean_images[i], noise[i], timesteps[i]) + ddim_noise_pred = model(ddim_noisy_images, timesteps[i]).sample + loss = torch.nn.functional.mse_loss(ddim_noise_pred, noise[i]) + loss.backward() + optimizer.step() + del model, optimizer + + self.assertTrue(torch.allclose(ddpm_noisy_images, ddim_noisy_images, atol=1e-5)) + self.assertTrue(torch.allclose(ddpm_noise_pred, ddim_noise_pred, atol=1e-5)) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/others/test_utils.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/others/test_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..747b8d584058acace080dbe4bbcc3081bbabcb1f --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/others/test_utils.py @@ -0,0 +1,246 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import unittest + +import pytest + +from diffusers import __version__ +from diffusers.utils import deprecate + +from ..testing_utils import Expectations, str_to_bool + + +# Used to test the hub +USER = "__DUMMY_TRANSFORMERS_USER__" +ENDPOINT_STAGING = "https://hub-ci.huggingface.co" + +# Not critical, only usable on the sandboxed CI instance. +TOKEN = "hf_94wBhPGp6KrrTH3KDchhKpRxZwd6dmHWLL" + + +class DeprecateTester(unittest.TestCase): + higher_version = ".".join([str(int(__version__.split(".")[0]) + 1)] + __version__.split(".")[1:]) + lower_version = "0.0.1" + + def test_deprecate_function_arg(self): + kwargs = {"deprecated_arg": 4} + + with self.assertWarns(FutureWarning) as warning: + output = deprecate("deprecated_arg", self.higher_version, "message", take_from=kwargs) + + assert output == 4 + assert ( + str(warning.warning) + == f"The `deprecated_arg` argument is deprecated and will be removed in version {self.higher_version}." + " message" + ) + + def test_deprecate_function_arg_tuple(self): + kwargs = {"deprecated_arg": 4} + + with self.assertWarns(FutureWarning) as warning: + output = deprecate(("deprecated_arg", self.higher_version, "message"), take_from=kwargs) + + assert output == 4 + assert ( + str(warning.warning) + == f"The `deprecated_arg` argument is deprecated and will be removed in version {self.higher_version}." + " message" + ) + + def test_deprecate_function_args(self): + kwargs = {"deprecated_arg_1": 4, "deprecated_arg_2": 8} + with self.assertWarns(FutureWarning) as warning: + output_1, output_2 = deprecate( + ("deprecated_arg_1", self.higher_version, "Hey"), + ("deprecated_arg_2", self.higher_version, "Hey"), + take_from=kwargs, + ) + assert output_1 == 4 + assert output_2 == 8 + assert ( + str(warning.warnings[0].message) + == "The `deprecated_arg_1` argument is deprecated and will be removed in version" + f" {self.higher_version}. Hey" + ) + assert ( + str(warning.warnings[1].message) + == "The `deprecated_arg_2` argument is deprecated and will be removed in version" + f" {self.higher_version}. Hey" + ) + + def test_deprecate_function_incorrect_arg(self): + kwargs = {"deprecated_arg": 4} + + with self.assertRaises(TypeError) as error: + deprecate(("wrong_arg", self.higher_version, "message"), take_from=kwargs) + + assert "test_deprecate_function_incorrect_arg in" in str(error.exception) + assert "line" in str(error.exception) + assert "got an unexpected keyword argument `deprecated_arg`" in str(error.exception) + + def test_deprecate_arg_no_kwarg(self): + with self.assertWarns(FutureWarning) as warning: + deprecate(("deprecated_arg", self.higher_version, "message")) + + assert ( + str(warning.warning) + == f"`deprecated_arg` is deprecated and will be removed in version {self.higher_version}. message" + ) + + def test_deprecate_args_no_kwarg(self): + with self.assertWarns(FutureWarning) as warning: + deprecate( + ("deprecated_arg_1", self.higher_version, "Hey"), + ("deprecated_arg_2", self.higher_version, "Hey"), + ) + assert ( + str(warning.warnings[0].message) + == f"`deprecated_arg_1` is deprecated and will be removed in version {self.higher_version}. Hey" + ) + assert ( + str(warning.warnings[1].message) + == f"`deprecated_arg_2` is deprecated and will be removed in version {self.higher_version}. Hey" + ) + + def test_deprecate_class_obj(self): + class Args: + arg = 5 + + with self.assertWarns(FutureWarning) as warning: + arg = deprecate(("arg", self.higher_version, "message"), take_from=Args()) + + assert arg == 5 + assert ( + str(warning.warning) + == f"The `arg` attribute is deprecated and will be removed in version {self.higher_version}. message" + ) + + def test_deprecate_class_objs(self): + class Args: + arg = 5 + foo = 7 + + with self.assertWarns(FutureWarning) as warning: + arg_1, arg_2 = deprecate( + ("arg", self.higher_version, "message"), + ("foo", self.higher_version, "message"), + ("does not exist", self.higher_version, "message"), + take_from=Args(), + ) + + assert arg_1 == 5 + assert arg_2 == 7 + assert ( + str(warning.warning) + == f"The `arg` attribute is deprecated and will be removed in version {self.higher_version}. message" + ) + assert ( + str(warning.warnings[0].message) + == f"The `arg` attribute is deprecated and will be removed in version {self.higher_version}. message" + ) + assert ( + str(warning.warnings[1].message) + == f"The `foo` attribute is deprecated and will be removed in version {self.higher_version}. message" + ) + + def test_deprecate_incorrect_version(self): + kwargs = {"deprecated_arg": 4} + + with self.assertRaises(ValueError) as error: + deprecate(("wrong_arg", self.lower_version, "message"), take_from=kwargs) + + assert ( + str(error.exception) + == "The deprecation tuple ('wrong_arg', '0.0.1', 'message') should be removed since diffusers' version" + f" {__version__} is >= {self.lower_version}" + ) + + def test_deprecate_incorrect_no_standard_warn(self): + with self.assertWarns(FutureWarning) as warning: + deprecate(("deprecated_arg", self.higher_version, "This message is better!!!"), standard_warn=False) + + assert str(warning.warning) == "This message is better!!!" + + def test_deprecate_stacklevel(self): + with self.assertWarns(FutureWarning) as warning: + deprecate(("deprecated_arg", self.higher_version, "This message is better!!!"), standard_warn=False) + assert str(warning.warning) == "This message is better!!!" + assert "diffusers/tests/others/test_utils.py" in warning.filename + + +# Copied from https://github.com/huggingface/transformers/blob/main/tests/utils/test_expectations.py +class ExpectationsTester(unittest.TestCase): + def test_expectations(self): + expectations = Expectations( + { + (None, None): 1, + ("cuda", 8): 2, + ("cuda", 7): 3, + ("rocm", 8): 4, + ("rocm", None): 5, + ("cpu", None): 6, + ("xpu", 3): 7, + } + ) + + def check(value, key): + assert expectations.find_expectation(key) == value + + # npu has no matches so should find default expectation + check(1, ("npu", None)) + check(7, ("xpu", 3)) + check(2, ("cuda", 8)) + check(3, ("cuda", 7)) + check(4, ("rocm", 9)) + check(4, ("rocm", None)) + check(2, ("cuda", 2)) + + expectations = Expectations({("cuda", 8): 1}) + with self.assertRaises(ValueError): + expectations.find_expectation(("xpu", None)) + + +def parse_flag_from_env(key, default=False): + try: + value = os.environ[key] + except KeyError: + # KEY isn't set, default to `default`. + _value = default + else: + # KEY is set, convert it to True or False. + try: + _value = str_to_bool(value) + except ValueError: + # More values are supported, but let's keep the message simple. + raise ValueError(f"If set, {key} must be yes or no.") + return _value + + +_run_staging = parse_flag_from_env("HUGGINGFACE_CO_STAGING", default=False) + + +def is_staging_test(test_case): + """ + Decorator marking a test as a staging test. + + Those tests will run using the staging environment of huggingface.co instead of the real model hub. + """ + if not _run_staging: + return unittest.skip("test is staging test")(test_case) + else: + return pytest.mark.is_staging_test()(test_case) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/others/test_video_processor.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/others/test_video_processor.py new file mode 100644 index 0000000000000000000000000000000000000000..35c9f99c37ae103134f3fa31939d8cc003418c89 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/others/test_video_processor.py @@ -0,0 +1,169 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import numpy as np +import PIL.Image +import torch +from parameterized import parameterized + +from diffusers.video_processor import VideoProcessor + + +np.random.seed(0) +torch.manual_seed(0) + + +class VideoProcessorTest(unittest.TestCase): + def get_dummy_sample(self, input_type): + batch_size = 1 + num_frames = 5 + num_channels = 3 + height = 8 + width = 8 + + def generate_image(): + return PIL.Image.fromarray(np.random.randint(0, 256, size=(height, width, num_channels)).astype("uint8")) + + def generate_4d_array(): + return np.random.rand(num_frames, height, width, num_channels) + + def generate_5d_array(): + return np.random.rand(batch_size, num_frames, height, width, num_channels) + + def generate_4d_tensor(): + return torch.rand(num_frames, num_channels, height, width) + + def generate_5d_tensor(): + return torch.rand(batch_size, num_frames, num_channels, height, width) + + if input_type == "list_images": + sample = [generate_image() for _ in range(num_frames)] + elif input_type == "list_list_images": + sample = [[generate_image() for _ in range(num_frames)] for _ in range(num_frames)] + elif input_type == "list_4d_np": + sample = [generate_4d_array() for _ in range(num_frames)] + elif input_type == "list_list_4d_np": + sample = [[generate_4d_array() for _ in range(num_frames)] for _ in range(num_frames)] + elif input_type == "list_5d_np": + sample = [generate_5d_array() for _ in range(num_frames)] + elif input_type == "5d_np": + sample = generate_5d_array() + elif input_type == "list_4d_pt": + sample = [generate_4d_tensor() for _ in range(num_frames)] + elif input_type == "list_list_4d_pt": + sample = [[generate_4d_tensor() for _ in range(num_frames)] for _ in range(num_frames)] + elif input_type == "list_5d_pt": + sample = [generate_5d_tensor() for _ in range(num_frames)] + elif input_type == "5d_pt": + sample = generate_5d_tensor() + + return sample + + def to_np(self, video): + # List of images. + if isinstance(video[0], PIL.Image.Image): + video = np.stack([np.array(i) for i in video], axis=0) + + # List of list of images. + elif isinstance(video, list) and isinstance(video[0][0], PIL.Image.Image): + frames = [] + for vid in video: + all_current_frames = np.stack([np.array(i) for i in vid], axis=0) + frames.append(all_current_frames) + video = np.stack([np.array(frame) for frame in frames], axis=0) + + # List of 4d/5d {ndarrays, torch tensors}. + elif isinstance(video, list) and isinstance(video[0], (torch.Tensor, np.ndarray)): + if isinstance(video[0], np.ndarray): + video = np.stack(video, axis=0) if video[0].ndim == 4 else np.concatenate(video, axis=0) + else: + if video[0].ndim == 4: + video = np.stack([i.cpu().numpy().transpose(0, 2, 3, 1) for i in video], axis=0) + elif video[0].ndim == 5: + video = np.concatenate([i.cpu().numpy().transpose(0, 1, 3, 4, 2) for i in video], axis=0) + + # List of list of 4d/5d {ndarrays, torch tensors}. + elif ( + isinstance(video, list) + and isinstance(video[0], list) + and isinstance(video[0][0], (torch.Tensor, np.ndarray)) + ): + all_frames = [] + for list_of_videos in video: + temp_frames = [] + for vid in list_of_videos: + if vid.ndim == 4: + current_vid_frames = np.stack( + [i if isinstance(i, np.ndarray) else i.cpu().numpy().transpose(1, 2, 0) for i in vid], + axis=0, + ) + elif vid.ndim == 5: + current_vid_frames = np.concatenate( + [i if isinstance(i, np.ndarray) else i.cpu().numpy().transpose(0, 2, 3, 1) for i in vid], + axis=0, + ) + temp_frames.append(current_vid_frames) + temp_frames = np.stack(temp_frames, axis=0) + all_frames.append(temp_frames) + + video = np.concatenate(all_frames, axis=0) + + # Just 5d {ndarrays, torch tensors}. + elif isinstance(video, (torch.Tensor, np.ndarray)) and video.ndim == 5: + video = video if isinstance(video, np.ndarray) else video.cpu().numpy().transpose(0, 1, 3, 4, 2) + + return video + + @parameterized.expand(["list_images", "list_list_images"]) + def test_video_processor_pil(self, input_type): + video_processor = VideoProcessor(do_resize=False, do_normalize=True) + + input = self.get_dummy_sample(input_type=input_type) + + for output_type in ["pt", "np", "pil"]: + out = video_processor.postprocess_video(video_processor.preprocess_video(input), output_type=output_type) + out_np = self.to_np(out) + input_np = self.to_np(input).astype("float32") / 255.0 if output_type != "pil" else self.to_np(input) + assert np.abs(input_np - out_np).max() < 1e-6, f"Decoded output does not match input for {output_type=}" + + @parameterized.expand(["list_4d_np", "list_5d_np", "5d_np"]) + def test_video_processor_np(self, input_type): + video_processor = VideoProcessor(do_resize=False, do_normalize=True) + + input = self.get_dummy_sample(input_type=input_type) + + for output_type in ["pt", "np", "pil"]: + out = video_processor.postprocess_video(video_processor.preprocess_video(input), output_type=output_type) + out_np = self.to_np(out) + input_np = ( + (self.to_np(input) * 255.0).round().astype("uint8") if output_type == "pil" else self.to_np(input) + ) + assert np.abs(input_np - out_np).max() < 1e-6, f"Decoded output does not match input for {output_type=}" + + @parameterized.expand(["list_4d_pt", "list_5d_pt", "5d_pt"]) + def test_video_processor_pt(self, input_type): + video_processor = VideoProcessor(do_resize=False, do_normalize=True) + + input = self.get_dummy_sample(input_type=input_type) + + for output_type in ["pt", "np", "pil"]: + out = video_processor.postprocess_video(video_processor.preprocess_video(input), output_type=output_type) + out_np = self.to_np(out) + input_np = ( + (self.to_np(input) * 255.0).round().astype("uint8") if output_type == "pil" else self.to_np(input) + ) + assert np.abs(input_np - out_np).max() < 1e-6, f"Decoded output does not match input for {output_type=}" diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/__init__.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/allegro/__init__.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/allegro/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/allegro/test_allegro.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/allegro/test_allegro.py new file mode 100644 index 0000000000000000000000000000000000000000..b2e588de0647fa0aec71a2023f3969bc1f065624 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/allegro/test_allegro.py @@ -0,0 +1,373 @@ +# Copyright 2025 The HuggingFace Team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import gc +import inspect +import os +import tempfile +import unittest + +import numpy as np +import torch +from transformers import AutoTokenizer, T5Config, T5EncoderModel + +from diffusers import AllegroPipeline, AllegroTransformer3DModel, AutoencoderKLAllegro, DDIMScheduler + +from ...testing_utils import ( + backend_empty_cache, + enable_full_determinism, + numpy_cosine_similarity_distance, + require_hf_hub_version_greater, + require_torch_accelerator, + require_transformers_version_greater, + slow, + torch_device, +) +from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS +from ..test_pipelines_common import PipelineTesterMixin, PyramidAttentionBroadcastTesterMixin, to_np + + +enable_full_determinism() + + +class AllegroPipelineFastTests(PipelineTesterMixin, PyramidAttentionBroadcastTesterMixin, unittest.TestCase): + pipeline_class = AllegroPipeline + params = TEXT_TO_IMAGE_PARAMS - {"cross_attention_kwargs"} + batch_params = TEXT_TO_IMAGE_BATCH_PARAMS + image_params = TEXT_TO_IMAGE_IMAGE_PARAMS + image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS + required_optional_params = frozenset( + [ + "num_inference_steps", + "generator", + "latents", + "return_dict", + "callback_on_step_end", + "callback_on_step_end_tensor_inputs", + ] + ) + test_xformers_attention = False + test_layerwise_casting = True + test_group_offloading = True + + def get_dummy_components(self, num_layers: int = 1): + torch.manual_seed(0) + transformer = AllegroTransformer3DModel( + num_attention_heads=2, + attention_head_dim=12, + in_channels=4, + out_channels=4, + num_layers=num_layers, + cross_attention_dim=24, + sample_width=8, + sample_height=8, + sample_frames=8, + caption_channels=24, + ) + + torch.manual_seed(0) + vae = AutoencoderKLAllegro( + in_channels=3, + out_channels=3, + down_block_types=( + "AllegroDownBlock3D", + "AllegroDownBlock3D", + "AllegroDownBlock3D", + "AllegroDownBlock3D", + ), + up_block_types=( + "AllegroUpBlock3D", + "AllegroUpBlock3D", + "AllegroUpBlock3D", + "AllegroUpBlock3D", + ), + block_out_channels=(8, 8, 8, 8), + latent_channels=4, + layers_per_block=1, + norm_num_groups=2, + temporal_compression_ratio=4, + ) + + # TODO(aryan): Only for now, since VAE decoding without tiling is not yet implemented here + vae.enable_tiling() + + torch.manual_seed(0) + scheduler = DDIMScheduler() + + text_encoder_config = T5Config( + **{ + "d_ff": 37, + "d_kv": 8, + "d_model": 24, + "num_decoder_layers": 2, + "num_heads": 4, + "num_layers": 2, + "relative_attention_num_buckets": 8, + "vocab_size": 1103, + } + ) + text_encoder = T5EncoderModel(text_encoder_config) + tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") + + components = { + "transformer": transformer, + "vae": vae, + "scheduler": scheduler, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + } + return components + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + + inputs = { + "prompt": "dance monkey", + "negative_prompt": "", + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 6.0, + "height": 16, + "width": 16, + "num_frames": 8, + "max_sequence_length": 16, + "output_type": "pt", + } + + return inputs + + @unittest.skip("Decoding without tiling is not yet implemented") + def test_save_load_local(self): + pass + + @unittest.skip("Decoding without tiling is not yet implemented") + def test_save_load_optional_components(self): + pass + + def test_inference(self): + device = "cpu" + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + video = pipe(**inputs).frames + generated_video = video[0] + + self.assertEqual(generated_video.shape, (8, 3, 16, 16)) + expected_video = torch.randn(8, 3, 16, 16) + max_diff = np.abs(generated_video - expected_video).max() + self.assertLessEqual(max_diff, 1e10) + + def test_callback_inputs(self): + sig = inspect.signature(self.pipeline_class.__call__) + has_callback_tensor_inputs = "callback_on_step_end_tensor_inputs" in sig.parameters + has_callback_step_end = "callback_on_step_end" in sig.parameters + + if not (has_callback_tensor_inputs and has_callback_step_end): + return + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + self.assertTrue( + hasattr(pipe, "_callback_tensor_inputs"), + f" {self.pipeline_class} should have `_callback_tensor_inputs` that defines a list of tensor variables its callback function can use as inputs", + ) + + def callback_inputs_subset(pipe, i, t, callback_kwargs): + # iterate over callback args + for tensor_name, tensor_value in callback_kwargs.items(): + # check that we're only passing in allowed tensor inputs + assert tensor_name in pipe._callback_tensor_inputs + + return callback_kwargs + + def callback_inputs_all(pipe, i, t, callback_kwargs): + for tensor_name in pipe._callback_tensor_inputs: + assert tensor_name in callback_kwargs + + # iterate over callback args + for tensor_name, tensor_value in callback_kwargs.items(): + # check that we're only passing in allowed tensor inputs + assert tensor_name in pipe._callback_tensor_inputs + + return callback_kwargs + + inputs = self.get_dummy_inputs(torch_device) + + # Test passing in a subset + inputs["callback_on_step_end"] = callback_inputs_subset + inputs["callback_on_step_end_tensor_inputs"] = ["latents"] + output = pipe(**inputs)[0] + + # Test passing in a everything + inputs["callback_on_step_end"] = callback_inputs_all + inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs + output = pipe(**inputs)[0] + + def callback_inputs_change_tensor(pipe, i, t, callback_kwargs): + is_last = i == (pipe.num_timesteps - 1) + if is_last: + callback_kwargs["latents"] = torch.zeros_like(callback_kwargs["latents"]) + return callback_kwargs + + inputs["callback_on_step_end"] = callback_inputs_change_tensor + inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs + output = pipe(**inputs)[0] + assert output.abs().sum() < 1e10 + + def test_inference_batch_single_identical(self): + self._test_inference_batch_single_identical(batch_size=3, expected_max_diff=1e-3) + + def test_attention_slicing_forward_pass( + self, test_max_difference=True, test_mean_pixel_difference=True, expected_max_diff=1e-3 + ): + if not self.test_attention_slicing: + return + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + for component in pipe.components.values(): + if hasattr(component, "set_default_attn_processor"): + component.set_default_attn_processor() + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + generator_device = "cpu" + inputs = self.get_dummy_inputs(generator_device) + output_without_slicing = pipe(**inputs)[0] + + pipe.enable_attention_slicing(slice_size=1) + inputs = self.get_dummy_inputs(generator_device) + output_with_slicing1 = pipe(**inputs)[0] + + pipe.enable_attention_slicing(slice_size=2) + inputs = self.get_dummy_inputs(generator_device) + output_with_slicing2 = pipe(**inputs)[0] + + if test_max_difference: + max_diff1 = np.abs(to_np(output_with_slicing1) - to_np(output_without_slicing)).max() + max_diff2 = np.abs(to_np(output_with_slicing2) - to_np(output_without_slicing)).max() + self.assertLess( + max(max_diff1, max_diff2), + expected_max_diff, + "Attention slicing should not affect the inference results", + ) + + # TODO(aryan) + @unittest.skip("Decoding without tiling is not yet implemented.") + def test_vae_tiling(self, expected_diff_max: float = 0.2): + generator_device = "cpu" + components = self.get_dummy_components() + + pipe = self.pipeline_class(**components) + pipe.to("cpu") + pipe.set_progress_bar_config(disable=None) + + # Without tiling + inputs = self.get_dummy_inputs(generator_device) + inputs["height"] = inputs["width"] = 128 + output_without_tiling = pipe(**inputs)[0] + + # With tiling + pipe.vae.enable_tiling( + tile_sample_min_height=96, + tile_sample_min_width=96, + tile_overlap_factor_height=1 / 12, + tile_overlap_factor_width=1 / 12, + ) + inputs = self.get_dummy_inputs(generator_device) + inputs["height"] = inputs["width"] = 128 + output_with_tiling = pipe(**inputs)[0] + + self.assertLess( + (to_np(output_without_tiling) - to_np(output_with_tiling)).max(), + expected_diff_max, + "VAE tiling should not affect the inference results", + ) + + @require_hf_hub_version_greater("0.26.5") + @require_transformers_version_greater("4.47.1") + def test_save_load_dduf(self): + # reimplement because it needs `enable_tiling()` on the loaded pipe. + from huggingface_hub import export_folder_as_dduf + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device="cpu") + inputs.pop("generator") + inputs["generator"] = torch.manual_seed(0) + + pipeline_out = pipe(**inputs)[0].cpu() + + with tempfile.TemporaryDirectory() as tmpdir: + dduf_filename = os.path.join(tmpdir, f"{pipe.__class__.__name__.lower()}.dduf") + pipe.save_pretrained(tmpdir, safe_serialization=True) + export_folder_as_dduf(dduf_filename, folder_path=tmpdir) + loaded_pipe = self.pipeline_class.from_pretrained(tmpdir, dduf_file=dduf_filename).to(torch_device) + + loaded_pipe.vae.enable_tiling() + inputs["generator"] = torch.manual_seed(0) + loaded_pipeline_out = loaded_pipe(**inputs)[0].cpu() + + assert np.allclose(pipeline_out, loaded_pipeline_out) + + +@slow +@require_torch_accelerator +class AllegroPipelineIntegrationTests(unittest.TestCase): + prompt = "A painting of a squirrel eating a burger." + + def setUp(self): + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def test_allegro(self): + generator = torch.Generator("cpu").manual_seed(0) + + pipe = AllegroPipeline.from_pretrained("rhymes-ai/Allegro", torch_dtype=torch.float16) + pipe.enable_model_cpu_offload(device=torch_device) + prompt = self.prompt + + videos = pipe( + prompt=prompt, + height=720, + width=1280, + num_frames=88, + generator=generator, + num_inference_steps=2, + output_type="pt", + ).frames + + video = videos[0] + expected_video = torch.randn(1, 88, 720, 1280, 3).numpy() + + max_diff = numpy_cosine_similarity_distance(video, expected_video) + assert max_diff < 1e-3, f"Max diff is too high. got {video}" diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/animatediff/__init__.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/animatediff/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/animatediff/test_animatediff.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/animatediff/test_animatediff.py new file mode 100644 index 0000000000000000000000000000000000000000..8d4cd4cf2c1a14801aa32f6f8dba5d02a163a8e8 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/animatediff/test_animatediff.py @@ -0,0 +1,621 @@ +import gc +import unittest + +import numpy as np +import torch +from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer + +import diffusers +from diffusers import ( + AnimateDiffPipeline, + AutoencoderKL, + DDIMScheduler, + DPMSolverMultistepScheduler, + LCMScheduler, + MotionAdapter, + StableDiffusionPipeline, + UNet2DConditionModel, + UNetMotionModel, +) +from diffusers.models.attention import FreeNoiseTransformerBlock +from diffusers.utils import is_xformers_available, logging + +from ...testing_utils import ( + backend_empty_cache, + numpy_cosine_similarity_distance, + require_accelerator, + require_torch_accelerator, + slow, + torch_device, +) +from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS +from ..test_pipelines_common import ( + IPAdapterTesterMixin, + PipelineFromPipeTesterMixin, + PipelineTesterMixin, + SDFunctionTesterMixin, +) + + +def to_np(tensor): + if isinstance(tensor, torch.Tensor): + tensor = tensor.detach().cpu().numpy() + + return tensor + + +class AnimateDiffPipelineFastTests( + IPAdapterTesterMixin, SDFunctionTesterMixin, PipelineTesterMixin, PipelineFromPipeTesterMixin, unittest.TestCase +): + pipeline_class = AnimateDiffPipeline + params = TEXT_TO_IMAGE_PARAMS + batch_params = TEXT_TO_IMAGE_BATCH_PARAMS + required_optional_params = frozenset( + [ + "num_inference_steps", + "generator", + "latents", + "return_dict", + "callback_on_step_end", + "callback_on_step_end_tensor_inputs", + ] + ) + test_layerwise_casting = True + test_group_offloading = True + + def get_dummy_components(self): + cross_attention_dim = 8 + block_out_channels = (8, 8) + + torch.manual_seed(0) + unet = UNet2DConditionModel( + block_out_channels=block_out_channels, + layers_per_block=2, + sample_size=8, + in_channels=4, + out_channels=4, + down_block_types=("CrossAttnDownBlock2D", "DownBlock2D"), + up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), + cross_attention_dim=cross_attention_dim, + norm_num_groups=2, + ) + scheduler = DDIMScheduler( + beta_start=0.00085, + beta_end=0.012, + beta_schedule="linear", + clip_sample=False, + ) + torch.manual_seed(0) + vae = AutoencoderKL( + block_out_channels=block_out_channels, + in_channels=3, + out_channels=3, + down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], + up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], + latent_channels=4, + norm_num_groups=2, + ) + torch.manual_seed(0) + text_encoder_config = CLIPTextConfig( + bos_token_id=0, + eos_token_id=2, + hidden_size=cross_attention_dim, + intermediate_size=37, + layer_norm_eps=1e-05, + num_attention_heads=4, + num_hidden_layers=5, + pad_token_id=1, + vocab_size=1000, + ) + text_encoder = CLIPTextModel(text_encoder_config) + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + torch.manual_seed(0) + motion_adapter = MotionAdapter( + block_out_channels=block_out_channels, + motion_layers_per_block=2, + motion_norm_num_groups=2, + motion_num_attention_heads=4, + ) + + components = { + "unet": unet, + "scheduler": scheduler, + "vae": vae, + "motion_adapter": motion_adapter, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + "feature_extractor": None, + "image_encoder": None, + } + return components + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + + inputs = { + "prompt": "A painting of a squirrel eating a burger", + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 7.5, + "output_type": "pt", + } + return inputs + + def test_from_pipe_consistent_config(self): + assert self.original_pipeline_class == StableDiffusionPipeline + original_repo = "hf-internal-testing/tinier-stable-diffusion-pipe" + original_kwargs = {"requires_safety_checker": False} + + # create original_pipeline_class(sd) + pipe_original = self.original_pipeline_class.from_pretrained(original_repo, **original_kwargs) + + # original_pipeline_class(sd) -> pipeline_class + pipe_components = self.get_dummy_components() + pipe_additional_components = {} + for name, component in pipe_components.items(): + if name not in pipe_original.components: + pipe_additional_components[name] = component + + pipe = self.pipeline_class.from_pipe(pipe_original, **pipe_additional_components) + + # pipeline_class -> original_pipeline_class(sd) + original_pipe_additional_components = {} + for name, component in pipe_original.components.items(): + if name not in pipe.components or not isinstance(component, pipe.components[name].__class__): + original_pipe_additional_components[name] = component + + pipe_original_2 = self.original_pipeline_class.from_pipe(pipe, **original_pipe_additional_components) + + # compare the config + original_config = {k: v for k, v in pipe_original.config.items() if not k.startswith("_")} + original_config_2 = {k: v for k, v in pipe_original_2.config.items() if not k.startswith("_")} + assert original_config_2 == original_config + + def test_motion_unet_loading(self): + components = self.get_dummy_components() + pipe = AnimateDiffPipeline(**components) + + assert isinstance(pipe.unet, UNetMotionModel) + + @unittest.skip("Attention slicing is not enabled in this pipeline") + def test_attention_slicing_forward_pass(self): + pass + + def test_ip_adapter(self): + expected_pipe_slice = None + if torch_device == "cpu": + expected_pipe_slice = np.array( + [ + 0.5216, + 0.5620, + 0.4927, + 0.5082, + 0.4786, + 0.5932, + 0.5125, + 0.4514, + 0.5315, + 0.4694, + 0.3276, + 0.4863, + 0.3920, + 0.3684, + 0.5745, + 0.4499, + 0.5081, + 0.5414, + 0.6014, + 0.5062, + 0.3630, + 0.5296, + 0.6018, + 0.5098, + 0.4948, + 0.5101, + 0.5620, + ] + ) + return super().test_ip_adapter(expected_pipe_slice=expected_pipe_slice) + + def test_dict_tuple_outputs_equivalent(self): + expected_slice = None + if torch_device == "cpu": + expected_slice = np.array([0.5125, 0.4514, 0.5315, 0.4499, 0.5081, 0.5414, 0.4948, 0.5101, 0.5620]) + return super().test_dict_tuple_outputs_equivalent(expected_slice=expected_slice) + + def test_inference_batch_single_identical( + self, + batch_size=2, + expected_max_diff=1e-4, + additional_params_copy_to_batched_inputs=["num_inference_steps"], + ): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + for components in pipe.components.values(): + if hasattr(components, "set_default_attn_processor"): + components.set_default_attn_processor() + + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + inputs = self.get_dummy_inputs(torch_device) + # Reset generator in case it is has been used in self.get_dummy_inputs + inputs["generator"] = self.get_generator(0) + + logger = logging.get_logger(pipe.__module__) + logger.setLevel(level=diffusers.logging.FATAL) + + # batchify inputs + batched_inputs = {} + batched_inputs.update(inputs) + + for name in self.batch_params: + if name not in inputs: + continue + + value = inputs[name] + if name == "prompt": + len_prompt = len(value) + batched_inputs[name] = [value[: len_prompt // i] for i in range(1, batch_size + 1)] + batched_inputs[name][-1] = 100 * "very long" + + else: + batched_inputs[name] = batch_size * [value] + + if "generator" in inputs: + batched_inputs["generator"] = [self.get_generator(i) for i in range(batch_size)] + + if "batch_size" in inputs: + batched_inputs["batch_size"] = batch_size + + for arg in additional_params_copy_to_batched_inputs: + batched_inputs[arg] = inputs[arg] + + output = pipe(**inputs) + output_batch = pipe(**batched_inputs) + + assert output_batch[0].shape[0] == batch_size + + max_diff = np.abs(to_np(output_batch[0][0]) - to_np(output[0][0])).max() + assert max_diff < expected_max_diff + + @require_accelerator + def test_to_device(self): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.set_progress_bar_config(disable=None) + + pipe.to("cpu") + # pipeline creates a new motion UNet under the hood. So we need to check the device from pipe.components + model_devices = [ + component.device.type for component in pipe.components.values() if hasattr(component, "device") + ] + self.assertTrue(all(device == "cpu" for device in model_devices)) + + output_cpu = pipe(**self.get_dummy_inputs("cpu"))[0] + self.assertTrue(np.isnan(output_cpu).sum() == 0) + + pipe.to(torch_device) + model_devices = [ + component.device.type for component in pipe.components.values() if hasattr(component, "device") + ] + self.assertTrue(all(device == torch_device for device in model_devices)) + + output_device = pipe(**self.get_dummy_inputs(torch_device))[0] + self.assertTrue(np.isnan(to_np(output_device)).sum() == 0) + + def test_to_dtype(self): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.set_progress_bar_config(disable=None) + + # pipeline creates a new motion UNet under the hood. So we need to check the dtype from pipe.components + model_dtypes = [component.dtype for component in pipe.components.values() if hasattr(component, "dtype")] + self.assertTrue(all(dtype == torch.float32 for dtype in model_dtypes)) + + pipe.to(dtype=torch.float16) + model_dtypes = [component.dtype for component in pipe.components.values() if hasattr(component, "dtype")] + self.assertTrue(all(dtype == torch.float16 for dtype in model_dtypes)) + + def test_prompt_embeds(self): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.set_progress_bar_config(disable=None) + pipe.to(torch_device) + + inputs = self.get_dummy_inputs(torch_device) + inputs.pop("prompt") + inputs["prompt_embeds"] = torch.randn((1, 4, pipe.text_encoder.config.hidden_size), device=torch_device) + pipe(**inputs) + + def test_free_init(self): + components = self.get_dummy_components() + pipe: AnimateDiffPipeline = self.pipeline_class(**components) + pipe.set_progress_bar_config(disable=None) + pipe.to(torch_device) + + inputs_normal = self.get_dummy_inputs(torch_device) + frames_normal = pipe(**inputs_normal).frames[0] + + pipe.enable_free_init( + num_iters=2, + use_fast_sampling=True, + method="butterworth", + order=4, + spatial_stop_frequency=0.25, + temporal_stop_frequency=0.25, + ) + inputs_enable_free_init = self.get_dummy_inputs(torch_device) + frames_enable_free_init = pipe(**inputs_enable_free_init).frames[0] + + pipe.disable_free_init() + inputs_disable_free_init = self.get_dummy_inputs(torch_device) + frames_disable_free_init = pipe(**inputs_disable_free_init).frames[0] + + sum_enabled = np.abs(to_np(frames_normal) - to_np(frames_enable_free_init)).sum() + max_diff_disabled = np.abs(to_np(frames_normal) - to_np(frames_disable_free_init)).max() + self.assertGreater( + sum_enabled, 1e1, "Enabling of FreeInit should lead to results different from the default pipeline results" + ) + self.assertLess( + max_diff_disabled, + 1e-4, + "Disabling of FreeInit should lead to results similar to the default pipeline results", + ) + + def test_free_init_with_schedulers(self): + components = self.get_dummy_components() + pipe: AnimateDiffPipeline = self.pipeline_class(**components) + pipe.set_progress_bar_config(disable=None) + pipe.to(torch_device) + + inputs_normal = self.get_dummy_inputs(torch_device) + frames_normal = pipe(**inputs_normal).frames[0] + + schedulers_to_test = [ + DPMSolverMultistepScheduler.from_config( + components["scheduler"].config, + timestep_spacing="linspace", + beta_schedule="linear", + algorithm_type="dpmsolver++", + steps_offset=1, + clip_sample=False, + ), + LCMScheduler.from_config( + components["scheduler"].config, + timestep_spacing="linspace", + beta_schedule="linear", + steps_offset=1, + clip_sample=False, + ), + ] + components.pop("scheduler") + + for scheduler in schedulers_to_test: + components["scheduler"] = scheduler + pipe: AnimateDiffPipeline = self.pipeline_class(**components) + pipe.set_progress_bar_config(disable=None) + pipe.to(torch_device) + + pipe.enable_free_init(num_iters=2, use_fast_sampling=False) + + inputs = self.get_dummy_inputs(torch_device) + frames_enable_free_init = pipe(**inputs).frames[0] + sum_enabled = np.abs(to_np(frames_normal) - to_np(frames_enable_free_init)).sum() + + self.assertGreater( + sum_enabled, + 1e1, + "Enabling of FreeInit should lead to results different from the default pipeline results", + ) + + def test_free_noise_blocks(self): + components = self.get_dummy_components() + pipe: AnimateDiffPipeline = self.pipeline_class(**components) + pipe.set_progress_bar_config(disable=None) + pipe.to(torch_device) + + pipe.enable_free_noise() + for block in pipe.unet.down_blocks: + for motion_module in block.motion_modules: + for transformer_block in motion_module.transformer_blocks: + self.assertTrue( + isinstance(transformer_block, FreeNoiseTransformerBlock), + "Motion module transformer blocks must be an instance of `FreeNoiseTransformerBlock` after enabling FreeNoise.", + ) + + pipe.disable_free_noise() + for block in pipe.unet.down_blocks: + for motion_module in block.motion_modules: + for transformer_block in motion_module.transformer_blocks: + self.assertFalse( + isinstance(transformer_block, FreeNoiseTransformerBlock), + "Motion module transformer blocks must not be an instance of `FreeNoiseTransformerBlock` after disabling FreeNoise.", + ) + + def test_free_noise(self): + components = self.get_dummy_components() + pipe: AnimateDiffPipeline = self.pipeline_class(**components) + pipe.set_progress_bar_config(disable=None) + pipe.to(torch_device) + + inputs_normal = self.get_dummy_inputs(torch_device) + frames_normal = pipe(**inputs_normal).frames[0] + + for context_length in [8, 9]: + for context_stride in [4, 6]: + pipe.enable_free_noise(context_length, context_stride) + + inputs_enable_free_noise = self.get_dummy_inputs(torch_device) + frames_enable_free_noise = pipe(**inputs_enable_free_noise).frames[0] + + pipe.disable_free_noise() + + inputs_disable_free_noise = self.get_dummy_inputs(torch_device) + frames_disable_free_noise = pipe(**inputs_disable_free_noise).frames[0] + + sum_enabled = np.abs(to_np(frames_normal) - to_np(frames_enable_free_noise)).sum() + max_diff_disabled = np.abs(to_np(frames_normal) - to_np(frames_disable_free_noise)).max() + self.assertGreater( + sum_enabled, + 1e1, + "Enabling of FreeNoise should lead to results different from the default pipeline results", + ) + self.assertLess( + max_diff_disabled, + 1e-4, + "Disabling of FreeNoise should lead to results similar to the default pipeline results", + ) + + def test_free_noise_split_inference(self): + components = self.get_dummy_components() + pipe: AnimateDiffPipeline = self.pipeline_class(**components) + pipe.set_progress_bar_config(disable=None) + pipe.to(torch_device) + + pipe.enable_free_noise(8, 4) + + inputs_normal = self.get_dummy_inputs(torch_device) + frames_normal = pipe(**inputs_normal).frames[0] + + # Test FreeNoise with split inference memory-optimization + pipe.enable_free_noise_split_inference(spatial_split_size=16, temporal_split_size=4) + + inputs_enable_split_inference = self.get_dummy_inputs(torch_device) + frames_enable_split_inference = pipe(**inputs_enable_split_inference).frames[0] + + sum_split_inference = np.abs(to_np(frames_normal) - to_np(frames_enable_split_inference)).sum() + self.assertLess( + sum_split_inference, + 1e-4, + "Enabling FreeNoise Split Inference memory-optimizations should lead to results similar to the default pipeline results", + ) + + def test_free_noise_multi_prompt(self): + components = self.get_dummy_components() + pipe: AnimateDiffPipeline = self.pipeline_class(**components) + pipe.set_progress_bar_config(disable=None) + pipe.to(torch_device) + + context_length = 8 + context_stride = 4 + pipe.enable_free_noise(context_length, context_stride) + + # Make sure that pipeline works when prompt indices are within num_frames bounds + inputs = self.get_dummy_inputs(torch_device) + inputs["prompt"] = {0: "Caterpillar on a leaf", 10: "Butterfly on a leaf"} + inputs["num_frames"] = 16 + pipe(**inputs).frames[0] + + with self.assertRaises(ValueError): + # Ensure that prompt indices are within bounds + inputs = self.get_dummy_inputs(torch_device) + inputs["num_frames"] = 16 + inputs["prompt"] = {0: "Caterpillar on a leaf", 10: "Butterfly on a leaf", 42: "Error on a leaf"} + pipe(**inputs).frames[0] + + @unittest.skipIf( + torch_device != "cuda" or not is_xformers_available(), + reason="XFormers attention is only available with CUDA and `xformers` installed", + ) + def test_xformers_attention_forwardGenerator_pass(self): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + for component in pipe.components.values(): + if hasattr(component, "set_default_attn_processor"): + component.set_default_attn_processor() + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(torch_device) + output_without_offload = pipe(**inputs).frames[0] + output_without_offload = ( + output_without_offload.cpu() if torch.is_tensor(output_without_offload) else output_without_offload + ) + + pipe.enable_xformers_memory_efficient_attention() + inputs = self.get_dummy_inputs(torch_device) + output_with_offload = pipe(**inputs).frames[0] + output_with_offload = ( + output_with_offload.cpu() if torch.is_tensor(output_with_offload) else output_without_offload + ) + + max_diff = np.abs(to_np(output_with_offload) - to_np(output_without_offload)).max() + self.assertLess(max_diff, 1e-4, "XFormers attention should not affect the inference results") + + def test_vae_slicing(self): + return super().test_vae_slicing(image_count=2) + + def test_encode_prompt_works_in_isolation(self): + extra_required_param_value_dict = { + "device": torch.device(torch_device).type, + "num_images_per_prompt": 1, + "do_classifier_free_guidance": self.get_dummy_inputs(device=torch_device).get("guidance_scale", 1.0) > 1.0, + } + return super().test_encode_prompt_works_in_isolation(extra_required_param_value_dict) + + +@slow +@require_torch_accelerator +class AnimateDiffPipelineSlowTests(unittest.TestCase): + def setUp(self): + # clean up the VRAM before each test + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + # clean up the VRAM after each test + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def test_animatediff(self): + adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5-2") + pipe = AnimateDiffPipeline.from_pretrained("frankjoshua/toonyou_beta6", motion_adapter=adapter) + pipe = pipe.to(torch_device) + pipe.scheduler = DDIMScheduler( + beta_start=0.00085, + beta_end=0.012, + beta_schedule="linear", + steps_offset=1, + clip_sample=False, + ) + pipe.enable_vae_slicing() + pipe.enable_model_cpu_offload(device=torch_device) + pipe.set_progress_bar_config(disable=None) + + prompt = "night, b&w photo of old house, post apocalypse, forest, storm weather, wind, rocks, 8k uhd, dslr, soft lighting, high quality, film grain" + negative_prompt = "bad quality, worse quality" + + generator = torch.Generator("cpu").manual_seed(0) + output = pipe( + prompt, + negative_prompt=negative_prompt, + num_frames=16, + generator=generator, + guidance_scale=7.5, + num_inference_steps=3, + output_type="np", + ) + + image = output.frames[0] + assert image.shape == (16, 512, 512, 3) + + image_slice = image[0, -3:, -3:, -1] + expected_slice = np.array( + [ + 0.11357737, + 0.11285847, + 0.11180121, + 0.11084166, + 0.11414117, + 0.09785956, + 0.10742754, + 0.10510018, + 0.08045256, + ] + ) + assert numpy_cosine_similarity_distance(image_slice.flatten(), expected_slice.flatten()) < 1e-3 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/animatediff/test_animatediff_controlnet.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/animatediff/test_animatediff_controlnet.py new file mode 100644 index 0000000000000000000000000000000000000000..4b0eb01d067c3064a06c9b322888f80d6def6f81 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/animatediff/test_animatediff_controlnet.py @@ -0,0 +1,527 @@ +import unittest + +import numpy as np +import torch +from PIL import Image +from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer + +import diffusers +from diffusers import ( + AnimateDiffControlNetPipeline, + AutoencoderKL, + ControlNetModel, + DDIMScheduler, + DPMSolverMultistepScheduler, + LCMScheduler, + MotionAdapter, + StableDiffusionPipeline, + UNet2DConditionModel, + UNetMotionModel, +) +from diffusers.models.attention import FreeNoiseTransformerBlock +from diffusers.utils import logging +from diffusers.utils.import_utils import is_xformers_available + +from ...testing_utils import require_accelerator, torch_device +from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS +from ..test_pipelines_common import ( + IPAdapterTesterMixin, + PipelineFromPipeTesterMixin, + PipelineTesterMixin, + SDFunctionTesterMixin, +) + + +def to_np(tensor): + if isinstance(tensor, torch.Tensor): + tensor = tensor.detach().cpu().numpy() + + return tensor + + +class AnimateDiffControlNetPipelineFastTests( + IPAdapterTesterMixin, SDFunctionTesterMixin, PipelineTesterMixin, PipelineFromPipeTesterMixin, unittest.TestCase +): + pipeline_class = AnimateDiffControlNetPipeline + params = TEXT_TO_IMAGE_PARAMS + batch_params = TEXT_TO_IMAGE_BATCH_PARAMS.union({"conditioning_frames"}) + required_optional_params = frozenset( + [ + "num_inference_steps", + "generator", + "latents", + "return_dict", + "callback_on_step_end", + "callback_on_step_end_tensor_inputs", + ] + ) + + def get_dummy_components(self): + cross_attention_dim = 8 + block_out_channels = (8, 8) + + torch.manual_seed(0) + unet = UNet2DConditionModel( + block_out_channels=block_out_channels, + layers_per_block=2, + sample_size=8, + in_channels=4, + out_channels=4, + down_block_types=("CrossAttnDownBlock2D", "DownBlock2D"), + up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), + cross_attention_dim=cross_attention_dim, + norm_num_groups=2, + ) + scheduler = DDIMScheduler( + beta_start=0.00085, + beta_end=0.012, + beta_schedule="linear", + clip_sample=False, + ) + torch.manual_seed(0) + controlnet = ControlNetModel( + block_out_channels=block_out_channels, + layers_per_block=2, + in_channels=4, + down_block_types=("CrossAttnDownBlock2D", "DownBlock2D"), + cross_attention_dim=cross_attention_dim, + conditioning_embedding_out_channels=(8, 8), + norm_num_groups=1, + ) + torch.manual_seed(0) + vae = AutoencoderKL( + block_out_channels=block_out_channels, + in_channels=3, + out_channels=3, + down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], + up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], + latent_channels=4, + norm_num_groups=2, + ) + torch.manual_seed(0) + text_encoder_config = CLIPTextConfig( + bos_token_id=0, + eos_token_id=2, + hidden_size=cross_attention_dim, + intermediate_size=37, + layer_norm_eps=1e-05, + num_attention_heads=4, + num_hidden_layers=5, + pad_token_id=1, + vocab_size=1000, + ) + text_encoder = CLIPTextModel(text_encoder_config) + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + motion_adapter = MotionAdapter( + block_out_channels=block_out_channels, + motion_layers_per_block=2, + motion_norm_num_groups=2, + motion_num_attention_heads=4, + ) + + components = { + "unet": unet, + "controlnet": controlnet, + "scheduler": scheduler, + "vae": vae, + "motion_adapter": motion_adapter, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + "feature_extractor": None, + "image_encoder": None, + } + return components + + def get_dummy_inputs(self, device, seed: int = 0, num_frames: int = 2): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + + video_height = 32 + video_width = 32 + conditioning_frames = [Image.new("RGB", (video_width, video_height))] * num_frames + + inputs = { + "prompt": "A painting of a squirrel eating a burger", + "conditioning_frames": conditioning_frames, + "generator": generator, + "num_inference_steps": 2, + "num_frames": num_frames, + "guidance_scale": 7.5, + "output_type": "pt", + } + return inputs + + def test_from_pipe_consistent_config(self): + assert self.original_pipeline_class == StableDiffusionPipeline + original_repo = "hf-internal-testing/tinier-stable-diffusion-pipe" + original_kwargs = {"requires_safety_checker": False} + + # create original_pipeline_class(sd) + pipe_original = self.original_pipeline_class.from_pretrained(original_repo, **original_kwargs) + + # original_pipeline_class(sd) -> pipeline_class + pipe_components = self.get_dummy_components() + pipe_additional_components = {} + for name, component in pipe_components.items(): + if name not in pipe_original.components: + pipe_additional_components[name] = component + + pipe = self.pipeline_class.from_pipe(pipe_original, **pipe_additional_components) + + # pipeline_class -> original_pipeline_class(sd) + original_pipe_additional_components = {} + for name, component in pipe_original.components.items(): + if name not in pipe.components or not isinstance(component, pipe.components[name].__class__): + original_pipe_additional_components[name] = component + + pipe_original_2 = self.original_pipeline_class.from_pipe(pipe, **original_pipe_additional_components) + + # compare the config + original_config = {k: v for k, v in pipe_original.config.items() if not k.startswith("_")} + original_config_2 = {k: v for k, v in pipe_original_2.config.items() if not k.startswith("_")} + assert original_config_2 == original_config + + def test_motion_unet_loading(self): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + + assert isinstance(pipe.unet, UNetMotionModel) + + @unittest.skip("Attention slicing is not enabled in this pipeline") + def test_attention_slicing_forward_pass(self): + pass + + def test_ip_adapter(self): + expected_pipe_slice = None + if torch_device == "cpu": + expected_pipe_slice = np.array( + [ + 0.6604, + 0.4099, + 0.4928, + 0.5706, + 0.5096, + 0.5012, + 0.6051, + 0.5169, + 0.5021, + 0.4864, + 0.4261, + 0.5779, + 0.5822, + 0.4049, + 0.5253, + 0.6160, + 0.4150, + 0.5155, + ] + ) + return super().test_ip_adapter(expected_pipe_slice=expected_pipe_slice) + + def test_dict_tuple_outputs_equivalent(self): + expected_slice = None + if torch_device == "cpu": + expected_slice = np.array([0.6051, 0.5169, 0.5021, 0.6160, 0.4150, 0.5155]) + return super().test_dict_tuple_outputs_equivalent(expected_slice=expected_slice) + + def test_inference_batch_single_identical( + self, + batch_size=2, + expected_max_diff=1e-4, + additional_params_copy_to_batched_inputs=["num_inference_steps"], + ): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + for components in pipe.components.values(): + if hasattr(components, "set_default_attn_processor"): + components.set_default_attn_processor() + + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + inputs = self.get_dummy_inputs(torch_device) + # Reset generator in case it is has been used in self.get_dummy_inputs + inputs["generator"] = self.get_generator(0) + + logger = logging.get_logger(pipe.__module__) + logger.setLevel(level=diffusers.logging.FATAL) + + # batchify inputs + batched_inputs = {} + batched_inputs.update(inputs) + + for name in self.batch_params: + if name not in inputs: + continue + + value = inputs[name] + if name == "prompt": + len_prompt = len(value) + batched_inputs[name] = [value[: len_prompt // i] for i in range(1, batch_size + 1)] + batched_inputs[name][-1] = 100 * "very long" + + else: + batched_inputs[name] = batch_size * [value] + + if "generator" in inputs: + batched_inputs["generator"] = [self.get_generator(i) for i in range(batch_size)] + + if "batch_size" in inputs: + batched_inputs["batch_size"] = batch_size + + for arg in additional_params_copy_to_batched_inputs: + batched_inputs[arg] = inputs[arg] + + output = pipe(**inputs) + output_batch = pipe(**batched_inputs) + + assert output_batch[0].shape[0] == batch_size + + max_diff = np.abs(to_np(output_batch[0][0]) - to_np(output[0][0])).max() + assert max_diff < expected_max_diff + + @require_accelerator + def test_to_device(self): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.set_progress_bar_config(disable=None) + + pipe.to("cpu") + # pipeline creates a new motion UNet under the hood. So we need to check the device from pipe.components + model_devices = [ + component.device.type for component in pipe.components.values() if hasattr(component, "device") + ] + self.assertTrue(all(device == "cpu" for device in model_devices)) + + output_cpu = pipe(**self.get_dummy_inputs("cpu"))[0] + self.assertTrue(np.isnan(output_cpu).sum() == 0) + + pipe.to(torch_device) + model_devices = [ + component.device.type for component in pipe.components.values() if hasattr(component, "device") + ] + self.assertTrue(all(device == torch_device for device in model_devices)) + + output_device = pipe(**self.get_dummy_inputs(torch_device))[0] + self.assertTrue(np.isnan(to_np(output_device)).sum() == 0) + + def test_to_dtype(self): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.set_progress_bar_config(disable=None) + + # pipeline creates a new motion UNet under the hood. So we need to check the dtype from pipe.components + model_dtypes = [component.dtype for component in pipe.components.values() if hasattr(component, "dtype")] + self.assertTrue(all(dtype == torch.float32 for dtype in model_dtypes)) + + pipe.to(dtype=torch.float16) + model_dtypes = [component.dtype for component in pipe.components.values() if hasattr(component, "dtype")] + self.assertTrue(all(dtype == torch.float16 for dtype in model_dtypes)) + + def test_prompt_embeds(self): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.set_progress_bar_config(disable=None) + pipe.to(torch_device) + + inputs = self.get_dummy_inputs(torch_device) + inputs.pop("prompt") + inputs["prompt_embeds"] = torch.randn((1, 4, pipe.text_encoder.config.hidden_size), device=torch_device) + pipe(**inputs) + + @unittest.skipIf( + torch_device != "cuda" or not is_xformers_available(), + reason="XFormers attention is only available with CUDA and `xformers` installed", + ) + def test_xformers_attention_forwardGenerator_pass(self): + super()._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=False) + + def test_free_init(self): + components = self.get_dummy_components() + pipe: AnimateDiffControlNetPipeline = self.pipeline_class(**components) + pipe.set_progress_bar_config(disable=None) + pipe.to(torch_device) + + inputs_normal = self.get_dummy_inputs(torch_device) + frames_normal = pipe(**inputs_normal).frames[0] + + pipe.enable_free_init( + num_iters=2, + use_fast_sampling=True, + method="butterworth", + order=4, + spatial_stop_frequency=0.25, + temporal_stop_frequency=0.25, + ) + inputs_enable_free_init = self.get_dummy_inputs(torch_device) + frames_enable_free_init = pipe(**inputs_enable_free_init).frames[0] + + pipe.disable_free_init() + inputs_disable_free_init = self.get_dummy_inputs(torch_device) + frames_disable_free_init = pipe(**inputs_disable_free_init).frames[0] + + sum_enabled = np.abs(to_np(frames_normal) - to_np(frames_enable_free_init)).sum() + max_diff_disabled = np.abs(to_np(frames_normal) - to_np(frames_disable_free_init)).max() + self.assertGreater( + sum_enabled, 1e1, "Enabling of FreeInit should lead to results different from the default pipeline results" + ) + self.assertLess( + max_diff_disabled, + 1e-4, + "Disabling of FreeInit should lead to results similar to the default pipeline results", + ) + + def test_free_init_with_schedulers(self): + components = self.get_dummy_components() + pipe: AnimateDiffControlNetPipeline = self.pipeline_class(**components) + pipe.set_progress_bar_config(disable=None) + pipe.to(torch_device) + + inputs_normal = self.get_dummy_inputs(torch_device) + frames_normal = pipe(**inputs_normal).frames[0] + + schedulers_to_test = [ + DPMSolverMultistepScheduler.from_config( + components["scheduler"].config, + timestep_spacing="linspace", + beta_schedule="linear", + algorithm_type="dpmsolver++", + steps_offset=1, + clip_sample=False, + ), + LCMScheduler.from_config( + components["scheduler"].config, + timestep_spacing="linspace", + beta_schedule="linear", + steps_offset=1, + clip_sample=False, + ), + ] + components.pop("scheduler") + + for scheduler in schedulers_to_test: + components["scheduler"] = scheduler + pipe: AnimateDiffControlNetPipeline = self.pipeline_class(**components) + pipe.set_progress_bar_config(disable=None) + pipe.to(torch_device) + + pipe.enable_free_init(num_iters=2, use_fast_sampling=False) + + inputs = self.get_dummy_inputs(torch_device) + frames_enable_free_init = pipe(**inputs).frames[0] + sum_enabled = np.abs(to_np(frames_normal) - to_np(frames_enable_free_init)).sum() + + self.assertGreater( + sum_enabled, + 1e1, + "Enabling of FreeInit should lead to results different from the default pipeline results", + ) + + def test_free_noise_blocks(self): + components = self.get_dummy_components() + pipe: AnimateDiffControlNetPipeline = self.pipeline_class(**components) + pipe.set_progress_bar_config(disable=None) + pipe.to(torch_device) + + pipe.enable_free_noise() + for block in pipe.unet.down_blocks: + for motion_module in block.motion_modules: + for transformer_block in motion_module.transformer_blocks: + self.assertTrue( + isinstance(transformer_block, FreeNoiseTransformerBlock), + "Motion module transformer blocks must be an instance of `FreeNoiseTransformerBlock` after enabling FreeNoise.", + ) + + pipe.disable_free_noise() + for block in pipe.unet.down_blocks: + for motion_module in block.motion_modules: + for transformer_block in motion_module.transformer_blocks: + self.assertFalse( + isinstance(transformer_block, FreeNoiseTransformerBlock), + "Motion module transformer blocks must not be an instance of `FreeNoiseTransformerBlock` after disabling FreeNoise.", + ) + + def test_free_noise(self): + components = self.get_dummy_components() + pipe: AnimateDiffControlNetPipeline = self.pipeline_class(**components) + pipe.set_progress_bar_config(disable=None) + pipe.to(torch_device) + + inputs_normal = self.get_dummy_inputs(torch_device, num_frames=16) + frames_normal = pipe(**inputs_normal).frames[0] + + for context_length in [8, 9]: + for context_stride in [4, 6]: + pipe.enable_free_noise(context_length, context_stride) + + inputs_enable_free_noise = self.get_dummy_inputs(torch_device, num_frames=16) + frames_enable_free_noise = pipe(**inputs_enable_free_noise).frames[0] + + pipe.disable_free_noise() + + inputs_disable_free_noise = self.get_dummy_inputs(torch_device, num_frames=16) + frames_disable_free_noise = pipe(**inputs_disable_free_noise).frames[0] + + sum_enabled = np.abs(to_np(frames_normal) - to_np(frames_enable_free_noise)).sum() + max_diff_disabled = np.abs(to_np(frames_normal) - to_np(frames_disable_free_noise)).max() + self.assertGreater( + sum_enabled, + 1e1, + "Enabling of FreeNoise should lead to results different from the default pipeline results", + ) + self.assertLess( + max_diff_disabled, + 1e-4, + "Disabling of FreeNoise should lead to results similar to the default pipeline results", + ) + + def test_free_noise_multi_prompt(self): + components = self.get_dummy_components() + pipe: AnimateDiffControlNetPipeline = self.pipeline_class(**components) + pipe.set_progress_bar_config(disable=None) + pipe.to(torch_device) + + context_length = 8 + context_stride = 4 + pipe.enable_free_noise(context_length, context_stride) + + # Make sure that pipeline works when prompt indices are within num_frames bounds + inputs = self.get_dummy_inputs(torch_device, num_frames=16) + inputs["prompt"] = {0: "Caterpillar on a leaf", 10: "Butterfly on a leaf"} + pipe(**inputs).frames[0] + + with self.assertRaises(ValueError): + # Ensure that prompt indices are within bounds + inputs = self.get_dummy_inputs(torch_device, num_frames=16) + inputs["prompt"] = {0: "Caterpillar on a leaf", 10: "Butterfly on a leaf", 42: "Error on a leaf"} + pipe(**inputs).frames[0] + + def test_vae_slicing(self, video_count=2): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe = pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + inputs["prompt"] = [inputs["prompt"]] * video_count + inputs["conditioning_frames"] = [inputs["conditioning_frames"]] * video_count + output_1 = pipe(**inputs) + + # make sure sliced vae decode yields the same result + pipe.enable_vae_slicing() + inputs = self.get_dummy_inputs(device) + inputs["prompt"] = [inputs["prompt"]] * video_count + inputs["conditioning_frames"] = [inputs["conditioning_frames"]] * video_count + output_2 = pipe(**inputs) + + assert np.abs(output_2[0].flatten() - output_1[0].flatten()).max() < 1e-2 + + def test_encode_prompt_works_in_isolation(self): + extra_required_param_value_dict = { + "device": torch.device(torch_device).type, + "num_images_per_prompt": 1, + "do_classifier_free_guidance": self.get_dummy_inputs(device=torch_device).get("guidance_scale", 1.0) > 1.0, + } + return super().test_encode_prompt_works_in_isolation(extra_required_param_value_dict) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/animatediff/test_animatediff_sdxl.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/animatediff/test_animatediff_sdxl.py new file mode 100644 index 0000000000000000000000000000000000000000..b5dcd877962375d6e957eb5d683d3c9c939b74ff --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/animatediff/test_animatediff_sdxl.py @@ -0,0 +1,286 @@ +import unittest + +import numpy as np +import torch +from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer + +import diffusers +from diffusers import ( + AnimateDiffSDXLPipeline, + AutoencoderKL, + DDIMScheduler, + MotionAdapter, + UNet2DConditionModel, + UNetMotionModel, +) +from diffusers.utils import is_xformers_available, logging + +from ...testing_utils import require_accelerator, torch_device +from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS, TEXT_TO_IMAGE_PARAMS +from ..test_pipelines_common import ( + IPAdapterTesterMixin, + PipelineTesterMixin, + SDFunctionTesterMixin, +) + + +def to_np(tensor): + if isinstance(tensor, torch.Tensor): + tensor = tensor.detach().cpu().numpy() + + return tensor + + +class AnimateDiffPipelineSDXLFastTests( + IPAdapterTesterMixin, + SDFunctionTesterMixin, + PipelineTesterMixin, + unittest.TestCase, +): + pipeline_class = AnimateDiffSDXLPipeline + params = TEXT_TO_IMAGE_PARAMS + batch_params = TEXT_TO_IMAGE_BATCH_PARAMS + required_optional_params = frozenset( + [ + "num_inference_steps", + "generator", + "latents", + "return_dict", + "callback_on_step_end", + "callback_on_step_end_tensor_inputs", + ] + ) + callback_cfg_params = TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS.union({"add_text_embeds", "add_time_ids"}) + + def get_dummy_components(self, time_cond_proj_dim=None): + torch.manual_seed(0) + unet = UNet2DConditionModel( + block_out_channels=(32, 64, 128), + layers_per_block=2, + time_cond_proj_dim=time_cond_proj_dim, + sample_size=32, + in_channels=4, + out_channels=4, + down_block_types=("DownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D"), + up_block_types=("CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "UpBlock2D"), + # SD2-specific config below + attention_head_dim=(2, 4, 8), + use_linear_projection=True, + addition_embed_type="text_time", + addition_time_embed_dim=8, + transformer_layers_per_block=(1, 2, 4), + projection_class_embeddings_input_dim=80, # 6 * 8 + 32 + cross_attention_dim=64, + norm_num_groups=1, + ) + scheduler = DDIMScheduler( + beta_start=0.00085, + beta_end=0.012, + beta_schedule="linear", + clip_sample=False, + ) + torch.manual_seed(0) + vae = AutoencoderKL( + block_out_channels=[32, 64], + in_channels=3, + out_channels=3, + down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], + up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], + latent_channels=4, + sample_size=128, + ) + torch.manual_seed(0) + text_encoder_config = CLIPTextConfig( + bos_token_id=0, + eos_token_id=2, + hidden_size=32, + intermediate_size=37, + layer_norm_eps=1e-05, + num_attention_heads=4, + num_hidden_layers=5, + pad_token_id=1, + vocab_size=1000, + # SD2-specific config below + hidden_act="gelu", + projection_dim=32, + ) + text_encoder = CLIPTextModel(text_encoder_config) + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + text_encoder_2 = CLIPTextModelWithProjection(text_encoder_config) + tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + motion_adapter = MotionAdapter( + block_out_channels=(32, 64, 128), + motion_layers_per_block=2, + motion_norm_num_groups=2, + motion_num_attention_heads=4, + use_motion_mid_block=False, + ) + + components = { + "unet": unet, + "scheduler": scheduler, + "vae": vae, + "motion_adapter": motion_adapter, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + "text_encoder_2": text_encoder_2, + "tokenizer_2": tokenizer_2, + "feature_extractor": None, + "image_encoder": None, + } + return components + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + + inputs = { + "prompt": "A painting of a squirrel eating a burger", + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 7.5, + "output_type": "np", + } + return inputs + + def test_motion_unet_loading(self): + components = self.get_dummy_components() + pipe = AnimateDiffSDXLPipeline(**components) + + assert isinstance(pipe.unet, UNetMotionModel) + + @unittest.skip("Attention slicing is not enabled in this pipeline") + def test_attention_slicing_forward_pass(self): + pass + + def test_inference_batch_single_identical( + self, + batch_size=2, + expected_max_diff=1e-4, + additional_params_copy_to_batched_inputs=["num_inference_steps"], + ): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + for components in pipe.components.values(): + if hasattr(components, "set_default_attn_processor"): + components.set_default_attn_processor() + + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + inputs = self.get_dummy_inputs(torch_device) + # Reset generator in case it is has been used in self.get_dummy_inputs + inputs["generator"] = self.get_generator(0) + + logger = logging.get_logger(pipe.__module__) + logger.setLevel(level=diffusers.logging.FATAL) + + # batchify inputs + batched_inputs = {} + batched_inputs.update(inputs) + + for name in self.batch_params: + if name not in inputs: + continue + + value = inputs[name] + if name == "prompt": + len_prompt = len(value) + batched_inputs[name] = [value[: len_prompt // i] for i in range(1, batch_size + 1)] + batched_inputs[name][-1] = 100 * "very long" + + else: + batched_inputs[name] = batch_size * [value] + + if "generator" in inputs: + batched_inputs["generator"] = [self.get_generator(i) for i in range(batch_size)] + + if "batch_size" in inputs: + batched_inputs["batch_size"] = batch_size + + for arg in additional_params_copy_to_batched_inputs: + batched_inputs[arg] = inputs[arg] + + output = pipe(**inputs) + output_batch = pipe(**batched_inputs) + + assert output_batch[0].shape[0] == batch_size + + max_diff = np.abs(to_np(output_batch[0][0]) - to_np(output[0][0])).max() + assert max_diff < expected_max_diff + + @require_accelerator + def test_to_device(self): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.set_progress_bar_config(disable=None) + + pipe.to("cpu") + # pipeline creates a new motion UNet under the hood. So we need to check the device from pipe.components + model_devices = [ + component.device.type for component in pipe.components.values() if hasattr(component, "device") + ] + self.assertTrue(all(device == "cpu" for device in model_devices)) + + output_cpu = pipe(**self.get_dummy_inputs("cpu"))[0] + self.assertTrue(np.isnan(output_cpu).sum() == 0) + + pipe.to(torch_device) + model_devices = [ + component.device.type for component in pipe.components.values() if hasattr(component, "device") + ] + self.assertTrue(all(device == torch_device for device in model_devices)) + + output_device = pipe(**self.get_dummy_inputs(torch_device))[0] + self.assertTrue(np.isnan(to_np(output_device)).sum() == 0) + + def test_to_dtype(self): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.set_progress_bar_config(disable=None) + + # pipeline creates a new motion UNet under the hood. So we need to check the dtype from pipe.components + model_dtypes = [component.dtype for component in pipe.components.values() if hasattr(component, "dtype")] + self.assertTrue(all(dtype == torch.float32 for dtype in model_dtypes)) + + pipe.to(dtype=torch.float16) + model_dtypes = [component.dtype for component in pipe.components.values() if hasattr(component, "dtype")] + self.assertTrue(all(dtype == torch.float16 for dtype in model_dtypes)) + + @unittest.skipIf( + torch_device != "cuda" or not is_xformers_available(), + reason="XFormers attention is only available with CUDA and `xformers` installed", + ) + def test_xformers_attention_forwardGenerator_pass(self): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + for component in pipe.components.values(): + if hasattr(component, "set_default_attn_processor"): + component.set_default_attn_processor() + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(torch_device) + output_without_offload = pipe(**inputs).frames[0] + output_without_offload = ( + output_without_offload.cpu() if torch.is_tensor(output_without_offload) else output_without_offload + ) + + pipe.enable_xformers_memory_efficient_attention() + inputs = self.get_dummy_inputs(torch_device) + output_with_offload = pipe(**inputs).frames[0] + output_with_offload = ( + output_with_offload.cpu() if torch.is_tensor(output_with_offload) else output_without_offload + ) + + max_diff = np.abs(to_np(output_with_offload) - to_np(output_without_offload)).max() + self.assertLess(max_diff, 1e-4, "XFormers attention should not affect the inference results") + + @unittest.skip("Test currently not supported.") + def test_encode_prompt_works_in_isolation(self): + pass + + @unittest.skip("Functionality is tested elsewhere.") + def test_save_load_optional_components(self): + pass diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/animatediff/test_animatediff_sparsectrl.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/animatediff/test_animatediff_sparsectrl.py new file mode 100644 index 0000000000000000000000000000000000000000..6b9f672cc4a1dc74765143f0aeaafd60c70ba7dc --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/animatediff/test_animatediff_sparsectrl.py @@ -0,0 +1,494 @@ +import unittest + +import numpy as np +import torch +from PIL import Image +from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer + +import diffusers +from diffusers import ( + AnimateDiffSparseControlNetPipeline, + AutoencoderKL, + DDIMScheduler, + DPMSolverMultistepScheduler, + LCMScheduler, + MotionAdapter, + SparseControlNetModel, + StableDiffusionPipeline, + UNet2DConditionModel, + UNetMotionModel, +) +from diffusers.utils import logging +from diffusers.utils.import_utils import is_xformers_available + +from ...testing_utils import require_accelerator, torch_device +from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS +from ..test_pipelines_common import ( + IPAdapterTesterMixin, + PipelineFromPipeTesterMixin, + PipelineTesterMixin, + SDFunctionTesterMixin, +) + + +def to_np(tensor): + if isinstance(tensor, torch.Tensor): + tensor = tensor.detach().cpu().numpy() + + return tensor + + +class AnimateDiffSparseControlNetPipelineFastTests( + IPAdapterTesterMixin, SDFunctionTesterMixin, PipelineTesterMixin, PipelineFromPipeTesterMixin, unittest.TestCase +): + pipeline_class = AnimateDiffSparseControlNetPipeline + params = TEXT_TO_IMAGE_PARAMS + batch_params = TEXT_TO_IMAGE_BATCH_PARAMS + required_optional_params = frozenset( + [ + "num_inference_steps", + "generator", + "latents", + "return_dict", + "callback_on_step_end", + "callback_on_step_end_tensor_inputs", + ] + ) + + def get_dummy_components(self): + cross_attention_dim = 8 + block_out_channels = (8, 8) + + torch.manual_seed(0) + unet = UNet2DConditionModel( + block_out_channels=block_out_channels, + layers_per_block=2, + sample_size=8, + in_channels=4, + out_channels=4, + down_block_types=("CrossAttnDownBlock2D", "DownBlock2D"), + up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), + cross_attention_dim=cross_attention_dim, + norm_num_groups=2, + ) + scheduler = DDIMScheduler( + beta_start=0.00085, + beta_end=0.012, + beta_schedule="linear", + clip_sample=False, + ) + torch.manual_seed(0) + controlnet = SparseControlNetModel( + block_out_channels=block_out_channels, + layers_per_block=2, + in_channels=4, + conditioning_channels=3, + down_block_types=("CrossAttnDownBlockMotion", "DownBlockMotion"), + cross_attention_dim=cross_attention_dim, + conditioning_embedding_out_channels=(8, 8), + norm_num_groups=1, + use_simplified_condition_embedding=False, + ) + torch.manual_seed(0) + vae = AutoencoderKL( + block_out_channels=block_out_channels, + in_channels=3, + out_channels=3, + down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], + up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], + latent_channels=4, + norm_num_groups=2, + ) + torch.manual_seed(0) + text_encoder_config = CLIPTextConfig( + bos_token_id=0, + eos_token_id=2, + hidden_size=cross_attention_dim, + intermediate_size=37, + layer_norm_eps=1e-05, + num_attention_heads=4, + num_hidden_layers=5, + pad_token_id=1, + vocab_size=1000, + ) + text_encoder = CLIPTextModel(text_encoder_config) + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + motion_adapter = MotionAdapter( + block_out_channels=block_out_channels, + motion_layers_per_block=2, + motion_norm_num_groups=2, + motion_num_attention_heads=4, + ) + + components = { + "unet": unet, + "controlnet": controlnet, + "scheduler": scheduler, + "vae": vae, + "motion_adapter": motion_adapter, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + "feature_extractor": None, + "image_encoder": None, + } + return components + + def get_dummy_inputs(self, device, seed: int = 0, num_frames: int = 2): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + + video_height = 32 + video_width = 32 + conditioning_frames = [Image.new("RGB", (video_width, video_height))] * num_frames + + inputs = { + "prompt": "A painting of a squirrel eating a burger", + "conditioning_frames": conditioning_frames, + "controlnet_frame_indices": list(range(num_frames)), + "generator": generator, + "num_inference_steps": 2, + "num_frames": num_frames, + "guidance_scale": 7.5, + "output_type": "pt", + } + return inputs + + def test_from_pipe_consistent_config(self): + assert self.original_pipeline_class == StableDiffusionPipeline + original_repo = "hf-internal-testing/tinier-stable-diffusion-pipe" + original_kwargs = {"requires_safety_checker": False} + + # create original_pipeline_class(sd) + pipe_original = self.original_pipeline_class.from_pretrained(original_repo, **original_kwargs) + + # original_pipeline_class(sd) -> pipeline_class + pipe_components = self.get_dummy_components() + pipe_additional_components = {} + for name, component in pipe_components.items(): + if name not in pipe_original.components: + pipe_additional_components[name] = component + + pipe = self.pipeline_class.from_pipe(pipe_original, **pipe_additional_components) + + # pipeline_class -> original_pipeline_class(sd) + original_pipe_additional_components = {} + for name, component in pipe_original.components.items(): + if name not in pipe.components or not isinstance(component, pipe.components[name].__class__): + original_pipe_additional_components[name] = component + + pipe_original_2 = self.original_pipeline_class.from_pipe(pipe, **original_pipe_additional_components) + + # compare the config + original_config = {k: v for k, v in pipe_original.config.items() if not k.startswith("_")} + original_config_2 = {k: v for k, v in pipe_original_2.config.items() if not k.startswith("_")} + assert original_config_2 == original_config + + def test_motion_unet_loading(self): + components = self.get_dummy_components() + pipe = AnimateDiffSparseControlNetPipeline(**components) + + assert isinstance(pipe.unet, UNetMotionModel) + + @unittest.skip("Attention slicing is not enabled in this pipeline") + def test_attention_slicing_forward_pass(self): + pass + + def test_ip_adapter(self): + expected_pipe_slice = None + if torch_device == "cpu": + expected_pipe_slice = np.array( + [ + 0.6604, + 0.4099, + 0.4928, + 0.5706, + 0.5096, + 0.5012, + 0.6051, + 0.5169, + 0.5021, + 0.4864, + 0.4261, + 0.5779, + 0.5822, + 0.4049, + 0.5253, + 0.6160, + 0.4150, + 0.5155, + ] + ) + return super().test_ip_adapter(expected_pipe_slice=expected_pipe_slice) + + def test_dict_tuple_outputs_equivalent(self): + expected_slice = None + if torch_device == "cpu": + expected_slice = np.array([0.6051, 0.5169, 0.5021, 0.6160, 0.4150, 0.5155]) + return super().test_dict_tuple_outputs_equivalent(expected_slice=expected_slice) + + def test_inference_batch_single_identical( + self, + batch_size=2, + expected_max_diff=1e-4, + additional_params_copy_to_batched_inputs=["num_inference_steps"], + ): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + for components in pipe.components.values(): + if hasattr(components, "set_default_attn_processor"): + components.set_default_attn_processor() + + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + inputs = self.get_dummy_inputs(torch_device) + # Reset generator in case it is has been used in self.get_dummy_inputs + inputs["generator"] = self.get_generator(0) + + logger = logging.get_logger(pipe.__module__) + logger.setLevel(level=diffusers.logging.FATAL) + + # batchify inputs + batched_inputs = {} + batched_inputs.update(inputs) + + for name in self.batch_params: + if name not in inputs: + continue + + value = inputs[name] + if name == "prompt": + len_prompt = len(value) + batched_inputs[name] = [value[: len_prompt // i] for i in range(1, batch_size + 1)] + batched_inputs[name][-1] = 100 * "very long" + + else: + batched_inputs[name] = batch_size * [value] + + if "generator" in inputs: + batched_inputs["generator"] = [self.get_generator(i) for i in range(batch_size)] + + if "batch_size" in inputs: + batched_inputs["batch_size"] = batch_size + + for arg in additional_params_copy_to_batched_inputs: + batched_inputs[arg] = inputs[arg] + + output = pipe(**inputs) + output_batch = pipe(**batched_inputs) + + assert output_batch[0].shape[0] == batch_size + + max_diff = np.abs(to_np(output_batch[0][0]) - to_np(output[0][0])).max() + assert max_diff < expected_max_diff + + def test_inference_batch_single_identical_use_simplified_condition_embedding_true( + self, + batch_size=2, + expected_max_diff=1e-4, + additional_params_copy_to_batched_inputs=["num_inference_steps"], + ): + components = self.get_dummy_components() + + torch.manual_seed(0) + old_controlnet = components.pop("controlnet") + components["controlnet"] = SparseControlNetModel.from_config( + old_controlnet.config, conditioning_channels=4, use_simplified_condition_embedding=True + ) + + pipe = self.pipeline_class(**components) + for components in pipe.components.values(): + if hasattr(components, "set_default_attn_processor"): + components.set_default_attn_processor() + + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + inputs = self.get_dummy_inputs(torch_device) + # Reset generator in case it is has been used in self.get_dummy_inputs + inputs["generator"] = self.get_generator(0) + + logger = logging.get_logger(pipe.__module__) + logger.setLevel(level=diffusers.logging.FATAL) + + # batchify inputs + batched_inputs = {} + batched_inputs.update(inputs) + + for name in self.batch_params: + if name not in inputs: + continue + + value = inputs[name] + if name == "prompt": + len_prompt = len(value) + batched_inputs[name] = [value[: len_prompt // i] for i in range(1, batch_size + 1)] + batched_inputs[name][-1] = 100 * "very long" + + else: + batched_inputs[name] = batch_size * [value] + + if "generator" in inputs: + batched_inputs["generator"] = [self.get_generator(i) for i in range(batch_size)] + + if "batch_size" in inputs: + batched_inputs["batch_size"] = batch_size + + for arg in additional_params_copy_to_batched_inputs: + batched_inputs[arg] = inputs[arg] + + output = pipe(**inputs) + output_batch = pipe(**batched_inputs) + + assert output_batch[0].shape[0] == batch_size + + max_diff = np.abs(to_np(output_batch[0][0]) - to_np(output[0][0])).max() + assert max_diff < expected_max_diff + + @require_accelerator + def test_to_device(self): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.set_progress_bar_config(disable=None) + + pipe.to("cpu") + # pipeline creates a new motion UNet under the hood. So we need to check the device from pipe.components + model_devices = [ + component.device.type for component in pipe.components.values() if hasattr(component, "device") + ] + self.assertTrue(all(device == "cpu" for device in model_devices)) + + output_cpu = pipe(**self.get_dummy_inputs("cpu"))[0] + self.assertTrue(np.isnan(output_cpu).sum() == 0) + + pipe.to(torch_device) + model_devices = [ + component.device.type for component in pipe.components.values() if hasattr(component, "device") + ] + self.assertTrue(all(device == torch_device for device in model_devices)) + + output_cuda = pipe(**self.get_dummy_inputs(torch_device))[0] + self.assertTrue(np.isnan(to_np(output_cuda)).sum() == 0) + + def test_to_dtype(self): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.set_progress_bar_config(disable=None) + + # pipeline creates a new motion UNet under the hood. So we need to check the dtype from pipe.components + model_dtypes = [component.dtype for component in pipe.components.values() if hasattr(component, "dtype")] + self.assertTrue(all(dtype == torch.float32 for dtype in model_dtypes)) + + pipe.to(dtype=torch.float16) + model_dtypes = [component.dtype for component in pipe.components.values() if hasattr(component, "dtype")] + self.assertTrue(all(dtype == torch.float16 for dtype in model_dtypes)) + + def test_prompt_embeds(self): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.set_progress_bar_config(disable=None) + pipe.to(torch_device) + + inputs = self.get_dummy_inputs(torch_device) + inputs.pop("prompt") + inputs["prompt_embeds"] = torch.randn((1, 4, pipe.text_encoder.config.hidden_size), device=torch_device) + pipe(**inputs) + + @unittest.skipIf( + torch_device != "cuda" or not is_xformers_available(), + reason="XFormers attention is only available with CUDA and `xformers` installed", + ) + def test_xformers_attention_forwardGenerator_pass(self): + super()._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=False) + + def test_free_init(self): + components = self.get_dummy_components() + pipe: AnimateDiffSparseControlNetPipeline = self.pipeline_class(**components) + pipe.set_progress_bar_config(disable=None) + pipe.to(torch_device) + + inputs_normal = self.get_dummy_inputs(torch_device) + frames_normal = pipe(**inputs_normal).frames[0] + + pipe.enable_free_init( + num_iters=2, + use_fast_sampling=True, + method="butterworth", + order=4, + spatial_stop_frequency=0.25, + temporal_stop_frequency=0.25, + ) + inputs_enable_free_init = self.get_dummy_inputs(torch_device) + frames_enable_free_init = pipe(**inputs_enable_free_init).frames[0] + + pipe.disable_free_init() + inputs_disable_free_init = self.get_dummy_inputs(torch_device) + frames_disable_free_init = pipe(**inputs_disable_free_init).frames[0] + + sum_enabled = np.abs(to_np(frames_normal) - to_np(frames_enable_free_init)).sum() + max_diff_disabled = np.abs(to_np(frames_normal) - to_np(frames_disable_free_init)).max() + self.assertGreater( + sum_enabled, 1e1, "Enabling of FreeInit should lead to results different from the default pipeline results" + ) + self.assertLess( + max_diff_disabled, + 1e-4, + "Disabling of FreeInit should lead to results similar to the default pipeline results", + ) + + def test_free_init_with_schedulers(self): + components = self.get_dummy_components() + pipe: AnimateDiffSparseControlNetPipeline = self.pipeline_class(**components) + pipe.set_progress_bar_config(disable=None) + pipe.to(torch_device) + + inputs_normal = self.get_dummy_inputs(torch_device) + frames_normal = pipe(**inputs_normal).frames[0] + + schedulers_to_test = [ + DPMSolverMultistepScheduler.from_config( + components["scheduler"].config, + timestep_spacing="linspace", + beta_schedule="linear", + algorithm_type="dpmsolver++", + steps_offset=1, + clip_sample=False, + ), + LCMScheduler.from_config( + components["scheduler"].config, + timestep_spacing="linspace", + beta_schedule="linear", + steps_offset=1, + clip_sample=False, + ), + ] + components.pop("scheduler") + + for scheduler in schedulers_to_test: + components["scheduler"] = scheduler + pipe: AnimateDiffSparseControlNetPipeline = self.pipeline_class(**components) + pipe.set_progress_bar_config(disable=None) + pipe.to(torch_device) + + pipe.enable_free_init(num_iters=2, use_fast_sampling=False) + + inputs = self.get_dummy_inputs(torch_device) + frames_enable_free_init = pipe(**inputs).frames[0] + sum_enabled = np.abs(to_np(frames_normal) - to_np(frames_enable_free_init)).sum() + + self.assertGreater( + sum_enabled, + 1e1, + "Enabling of FreeInit should lead to results different from the default pipeline results", + ) + + def test_vae_slicing(self): + return super().test_vae_slicing(image_count=2) + + def test_encode_prompt_works_in_isolation(self): + extra_required_param_value_dict = { + "device": torch.device(torch_device).type, + "num_images_per_prompt": 1, + "do_classifier_free_guidance": self.get_dummy_inputs(device=torch_device).get("guidance_scale", 1.0) > 1.0, + } + return super().test_encode_prompt_works_in_isolation(extra_required_param_value_dict) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/animatediff/test_animatediff_video2video.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/animatediff/test_animatediff_video2video.py new file mode 100644 index 0000000000000000000000000000000000000000..1adb13dc4cc5781d48fed66f1ab2e16780ebfa5f --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/animatediff/test_animatediff_video2video.py @@ -0,0 +1,554 @@ +import unittest + +import numpy as np +import torch +from PIL import Image +from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer + +import diffusers +from diffusers import ( + AnimateDiffVideoToVideoPipeline, + AutoencoderKL, + DDIMScheduler, + DPMSolverMultistepScheduler, + LCMScheduler, + MotionAdapter, + StableDiffusionPipeline, + UNet2DConditionModel, + UNetMotionModel, +) +from diffusers.models.attention import FreeNoiseTransformerBlock +from diffusers.utils import is_xformers_available, logging + +from ...testing_utils import require_accelerator, torch_device +from ..pipeline_params import TEXT_TO_IMAGE_PARAMS, VIDEO_TO_VIDEO_BATCH_PARAMS +from ..test_pipelines_common import IPAdapterTesterMixin, PipelineFromPipeTesterMixin, PipelineTesterMixin + + +def to_np(tensor): + if isinstance(tensor, torch.Tensor): + tensor = tensor.detach().cpu().numpy() + + return tensor + + +class AnimateDiffVideoToVideoPipelineFastTests( + IPAdapterTesterMixin, PipelineTesterMixin, PipelineFromPipeTesterMixin, unittest.TestCase +): + pipeline_class = AnimateDiffVideoToVideoPipeline + params = TEXT_TO_IMAGE_PARAMS + batch_params = VIDEO_TO_VIDEO_BATCH_PARAMS + required_optional_params = frozenset( + [ + "num_inference_steps", + "generator", + "latents", + "return_dict", + "callback_on_step_end", + "callback_on_step_end_tensor_inputs", + ] + ) + + def get_dummy_components(self): + cross_attention_dim = 8 + block_out_channels = (8, 8) + + torch.manual_seed(0) + unet = UNet2DConditionModel( + block_out_channels=block_out_channels, + layers_per_block=2, + sample_size=8, + in_channels=4, + out_channels=4, + down_block_types=("CrossAttnDownBlock2D", "DownBlock2D"), + up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), + cross_attention_dim=cross_attention_dim, + norm_num_groups=2, + ) + scheduler = DDIMScheduler( + beta_start=0.00085, + beta_end=0.012, + beta_schedule="linear", + clip_sample=False, + ) + torch.manual_seed(0) + vae = AutoencoderKL( + block_out_channels=block_out_channels, + in_channels=3, + out_channels=3, + down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], + up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], + latent_channels=4, + norm_num_groups=2, + ) + torch.manual_seed(0) + text_encoder_config = CLIPTextConfig( + bos_token_id=0, + eos_token_id=2, + hidden_size=cross_attention_dim, + intermediate_size=37, + layer_norm_eps=1e-05, + num_attention_heads=4, + num_hidden_layers=5, + pad_token_id=1, + vocab_size=1000, + ) + text_encoder = CLIPTextModel(text_encoder_config) + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + torch.manual_seed(0) + motion_adapter = MotionAdapter( + block_out_channels=block_out_channels, + motion_layers_per_block=2, + motion_norm_num_groups=2, + motion_num_attention_heads=4, + ) + + components = { + "unet": unet, + "scheduler": scheduler, + "vae": vae, + "motion_adapter": motion_adapter, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + "feature_extractor": None, + "image_encoder": None, + } + return components + + def get_dummy_inputs(self, device, seed=0, num_frames: int = 2): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + + video_height = 32 + video_width = 32 + video = [Image.new("RGB", (video_width, video_height))] * num_frames + + inputs = { + "video": video, + "prompt": "A painting of a squirrel eating a burger", + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 7.5, + "output_type": "pt", + } + return inputs + + def test_from_pipe_consistent_config(self): + assert self.original_pipeline_class == StableDiffusionPipeline + original_repo = "hf-internal-testing/tinier-stable-diffusion-pipe" + original_kwargs = {"requires_safety_checker": False} + + # create original_pipeline_class(sd) + pipe_original = self.original_pipeline_class.from_pretrained(original_repo, **original_kwargs) + + # original_pipeline_class(sd) -> pipeline_class + pipe_components = self.get_dummy_components() + pipe_additional_components = {} + for name, component in pipe_components.items(): + if name not in pipe_original.components: + pipe_additional_components[name] = component + + pipe = self.pipeline_class.from_pipe(pipe_original, **pipe_additional_components) + + # pipeline_class -> original_pipeline_class(sd) + original_pipe_additional_components = {} + for name, component in pipe_original.components.items(): + if name not in pipe.components or not isinstance(component, pipe.components[name].__class__): + original_pipe_additional_components[name] = component + + pipe_original_2 = self.original_pipeline_class.from_pipe(pipe, **original_pipe_additional_components) + + # compare the config + original_config = {k: v for k, v in pipe_original.config.items() if not k.startswith("_")} + original_config_2 = {k: v for k, v in pipe_original_2.config.items() if not k.startswith("_")} + assert original_config_2 == original_config + + def test_motion_unet_loading(self): + components = self.get_dummy_components() + pipe = AnimateDiffVideoToVideoPipeline(**components) + + assert isinstance(pipe.unet, UNetMotionModel) + + @unittest.skip("Attention slicing is not enabled in this pipeline") + def test_attention_slicing_forward_pass(self): + pass + + def test_ip_adapter(self): + expected_pipe_slice = None + + if torch_device == "cpu": + expected_pipe_slice = np.array( + [ + 0.5569, + 0.6250, + 0.4145, + 0.5613, + 0.5563, + 0.5213, + 0.5092, + 0.4950, + 0.4950, + 0.5685, + 0.3858, + 0.4864, + 0.6458, + 0.4312, + 0.5518, + 0.5608, + 0.4418, + 0.5378, + ] + ) + return super().test_ip_adapter(expected_pipe_slice=expected_pipe_slice) + + def test_inference_batch_single_identical( + self, + batch_size=2, + expected_max_diff=1e-4, + additional_params_copy_to_batched_inputs=["num_inference_steps"], + ): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + for components in pipe.components.values(): + if hasattr(components, "set_default_attn_processor"): + components.set_default_attn_processor() + + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + inputs = self.get_dummy_inputs(torch_device) + # Reset generator in case it is has been used in self.get_dummy_inputs + inputs["generator"] = self.get_generator(0) + + logger = logging.get_logger(pipe.__module__) + logger.setLevel(level=diffusers.logging.FATAL) + + # batchify inputs + batched_inputs = {} + batched_inputs.update(inputs) + + for name in self.batch_params: + if name not in inputs: + continue + + value = inputs[name] + if name == "prompt": + len_prompt = len(value) + batched_inputs[name] = [value[: len_prompt // i] for i in range(1, batch_size + 1)] + batched_inputs[name][-1] = 100 * "very long" + + else: + batched_inputs[name] = batch_size * [value] + + if "generator" in inputs: + batched_inputs["generator"] = [self.get_generator(i) for i in range(batch_size)] + + if "batch_size" in inputs: + batched_inputs["batch_size"] = batch_size + + for arg in additional_params_copy_to_batched_inputs: + batched_inputs[arg] = inputs[arg] + + output = pipe(**inputs) + output_batch = pipe(**batched_inputs) + + assert output_batch[0].shape[0] == batch_size + + max_diff = np.abs(to_np(output_batch[0][0]) - to_np(output[0][0])).max() + assert max_diff < expected_max_diff + + @require_accelerator + def test_to_device(self): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.set_progress_bar_config(disable=None) + + pipe.to("cpu") + # pipeline creates a new motion UNet under the hood. So we need to check the device from pipe.components + model_devices = [ + component.device.type for component in pipe.components.values() if hasattr(component, "device") + ] + self.assertTrue(all(device == "cpu" for device in model_devices)) + + output_cpu = pipe(**self.get_dummy_inputs("cpu"))[0] + self.assertTrue(np.isnan(output_cpu).sum() == 0) + + pipe.to(torch_device) + model_devices = [ + component.device.type for component in pipe.components.values() if hasattr(component, "device") + ] + self.assertTrue(all(device == torch_device for device in model_devices)) + + output_device = pipe(**self.get_dummy_inputs(torch_device))[0] + self.assertTrue(np.isnan(to_np(output_device)).sum() == 0) + + def test_to_dtype(self): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.set_progress_bar_config(disable=None) + + # pipeline creates a new motion UNet under the hood. So we need to check the dtype from pipe.components + model_dtypes = [component.dtype for component in pipe.components.values() if hasattr(component, "dtype")] + self.assertTrue(all(dtype == torch.float32 for dtype in model_dtypes)) + + pipe.to(dtype=torch.float16) + model_dtypes = [component.dtype for component in pipe.components.values() if hasattr(component, "dtype")] + self.assertTrue(all(dtype == torch.float16 for dtype in model_dtypes)) + + def test_prompt_embeds(self): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.set_progress_bar_config(disable=None) + pipe.to(torch_device) + + inputs = self.get_dummy_inputs(torch_device) + inputs.pop("prompt") + inputs["prompt_embeds"] = torch.randn((1, 4, pipe.text_encoder.config.hidden_size), device=torch_device) + pipe(**inputs) + + def test_latent_inputs(self): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.set_progress_bar_config(disable=None) + pipe.to(torch_device) + + inputs = self.get_dummy_inputs(torch_device) + sample_size = pipe.unet.config.sample_size + inputs["latents"] = torch.randn((1, 4, 1, sample_size, sample_size), device=torch_device) + inputs.pop("video") + pipe(**inputs) + + @unittest.skipIf( + torch_device != "cuda" or not is_xformers_available(), + reason="XFormers attention is only available with CUDA and `xformers` installed", + ) + def test_xformers_attention_forwardGenerator_pass(self): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + for component in pipe.components.values(): + if hasattr(component, "set_default_attn_processor"): + component.set_default_attn_processor() + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(torch_device) + output_without_offload = pipe(**inputs).frames[0] + output_without_offload = ( + output_without_offload.cpu() if torch.is_tensor(output_without_offload) else output_without_offload + ) + + pipe.enable_xformers_memory_efficient_attention() + inputs = self.get_dummy_inputs(torch_device) + output_with_offload = pipe(**inputs).frames[0] + output_with_offload = ( + output_with_offload.cpu() if torch.is_tensor(output_with_offload) else output_without_offload + ) + + max_diff = np.abs(to_np(output_with_offload) - to_np(output_without_offload)).max() + self.assertLess(max_diff, 1e-4, "XFormers attention should not affect the inference results") + + def test_free_init(self): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.set_progress_bar_config(disable=None) + pipe.to(torch_device) + + inputs_normal = self.get_dummy_inputs(torch_device) + frames_normal = pipe(**inputs_normal).frames[0] + + pipe.enable_free_init( + num_iters=2, + use_fast_sampling=True, + method="butterworth", + order=4, + spatial_stop_frequency=0.25, + temporal_stop_frequency=0.25, + ) + inputs_enable_free_init = self.get_dummy_inputs(torch_device) + frames_enable_free_init = pipe(**inputs_enable_free_init).frames[0] + + pipe.disable_free_init() + inputs_disable_free_init = self.get_dummy_inputs(torch_device) + frames_disable_free_init = pipe(**inputs_disable_free_init).frames[0] + + sum_enabled = np.abs(to_np(frames_normal) - to_np(frames_enable_free_init)).sum() + max_diff_disabled = np.abs(to_np(frames_normal) - to_np(frames_disable_free_init)).max() + self.assertGreater( + sum_enabled, 1e1, "Enabling of FreeInit should lead to results different from the default pipeline results" + ) + self.assertLess( + max_diff_disabled, + 1e-4, + "Disabling of FreeInit should lead to results similar to the default pipeline results", + ) + + def test_free_init_with_schedulers(self): + components = self.get_dummy_components() + pipe: AnimateDiffVideoToVideoPipeline = self.pipeline_class(**components) + pipe.set_progress_bar_config(disable=None) + pipe.to(torch_device) + + inputs_normal = self.get_dummy_inputs(torch_device) + frames_normal = pipe(**inputs_normal).frames[0] + + schedulers_to_test = [ + DPMSolverMultistepScheduler.from_config( + components["scheduler"].config, + timestep_spacing="linspace", + beta_schedule="linear", + algorithm_type="dpmsolver++", + steps_offset=1, + clip_sample=False, + ), + LCMScheduler.from_config( + components["scheduler"].config, + timestep_spacing="linspace", + beta_schedule="linear", + steps_offset=1, + clip_sample=False, + ), + ] + components.pop("scheduler") + + for scheduler in schedulers_to_test: + components["scheduler"] = scheduler + pipe: AnimateDiffVideoToVideoPipeline = self.pipeline_class(**components) + pipe.set_progress_bar_config(disable=None) + pipe.to(torch_device) + + pipe.enable_free_init(num_iters=2, use_fast_sampling=False) + + inputs = self.get_dummy_inputs(torch_device) + frames_enable_free_init = pipe(**inputs).frames[0] + sum_enabled = np.abs(to_np(frames_normal) - to_np(frames_enable_free_init)).sum() + + self.assertGreater( + sum_enabled, + 1e1, + "Enabling of FreeInit should lead to results different from the default pipeline results", + ) + + def test_free_noise_blocks(self): + components = self.get_dummy_components() + pipe: AnimateDiffVideoToVideoPipeline = self.pipeline_class(**components) + pipe.set_progress_bar_config(disable=None) + pipe.to(torch_device) + + pipe.enable_free_noise() + for block in pipe.unet.down_blocks: + for motion_module in block.motion_modules: + for transformer_block in motion_module.transformer_blocks: + self.assertTrue( + isinstance(transformer_block, FreeNoiseTransformerBlock), + "Motion module transformer blocks must be an instance of `FreeNoiseTransformerBlock` after enabling FreeNoise.", + ) + + pipe.disable_free_noise() + for block in pipe.unet.down_blocks: + for motion_module in block.motion_modules: + for transformer_block in motion_module.transformer_blocks: + self.assertFalse( + isinstance(transformer_block, FreeNoiseTransformerBlock), + "Motion module transformer blocks must not be an instance of `FreeNoiseTransformerBlock` after disabling FreeNoise.", + ) + + def test_free_noise(self): + components = self.get_dummy_components() + pipe: AnimateDiffVideoToVideoPipeline = self.pipeline_class(**components) + pipe.set_progress_bar_config(disable=None) + pipe.to(torch_device) + + inputs_normal = self.get_dummy_inputs(torch_device, num_frames=16) + inputs_normal["num_inference_steps"] = 2 + inputs_normal["strength"] = 0.5 + frames_normal = pipe(**inputs_normal).frames[0] + + for context_length in [8, 9]: + for context_stride in [4, 6]: + pipe.enable_free_noise(context_length, context_stride) + + inputs_enable_free_noise = self.get_dummy_inputs(torch_device, num_frames=16) + inputs_enable_free_noise["num_inference_steps"] = 2 + inputs_enable_free_noise["strength"] = 0.5 + frames_enable_free_noise = pipe(**inputs_enable_free_noise).frames[0] + + pipe.disable_free_noise() + inputs_disable_free_noise = self.get_dummy_inputs(torch_device, num_frames=16) + inputs_disable_free_noise["num_inference_steps"] = 2 + inputs_disable_free_noise["strength"] = 0.5 + frames_disable_free_noise = pipe(**inputs_disable_free_noise).frames[0] + + sum_enabled = np.abs(to_np(frames_normal) - to_np(frames_enable_free_noise)).sum() + max_diff_disabled = np.abs(to_np(frames_normal) - to_np(frames_disable_free_noise)).max() + self.assertGreater( + sum_enabled, + 1e1, + "Enabling of FreeNoise should lead to results different from the default pipeline results", + ) + self.assertLess( + max_diff_disabled, + 1e-4, + "Disabling of FreeNoise should lead to results similar to the default pipeline results", + ) + + def test_free_noise_split_inference(self): + components = self.get_dummy_components() + pipe: AnimateDiffVideoToVideoPipeline = self.pipeline_class(**components) + pipe.set_progress_bar_config(disable=None) + pipe.to(torch_device) + + pipe.enable_free_noise(8, 4) + + inputs_normal = self.get_dummy_inputs(torch_device, num_frames=16) + inputs_normal["num_inference_steps"] = 2 + inputs_normal["strength"] = 0.5 + frames_normal = pipe(**inputs_normal).frames[0] + + # Test FreeNoise with split inference memory-optimization + pipe.enable_free_noise_split_inference(spatial_split_size=16, temporal_split_size=4) + + inputs_enable_split_inference = self.get_dummy_inputs(torch_device, num_frames=16) + inputs_enable_split_inference["num_inference_steps"] = 2 + inputs_enable_split_inference["strength"] = 0.5 + frames_enable_split_inference = pipe(**inputs_enable_split_inference).frames[0] + + sum_split_inference = np.abs(to_np(frames_normal) - to_np(frames_enable_split_inference)).sum() + self.assertLess( + sum_split_inference, + 1e-4, + "Enabling FreeNoise Split Inference memory-optimizations should lead to results similar to the default pipeline results", + ) + + def test_free_noise_multi_prompt(self): + components = self.get_dummy_components() + pipe: AnimateDiffVideoToVideoPipeline = self.pipeline_class(**components) + pipe.set_progress_bar_config(disable=None) + pipe.to(torch_device) + + context_length = 8 + context_stride = 4 + pipe.enable_free_noise(context_length, context_stride) + + # Make sure that pipeline works when prompt indices are within num_frames bounds + inputs = self.get_dummy_inputs(torch_device, num_frames=16) + inputs["prompt"] = {0: "Caterpillar on a leaf", 10: "Butterfly on a leaf"} + inputs["num_inference_steps"] = 2 + inputs["strength"] = 0.5 + pipe(**inputs).frames[0] + + with self.assertRaises(ValueError): + # Ensure that prompt indices are within bounds + inputs = self.get_dummy_inputs(torch_device, num_frames=16) + inputs["num_inference_steps"] = 2 + inputs["strength"] = 0.5 + inputs["prompt"] = {0: "Caterpillar on a leaf", 10: "Butterfly on a leaf", 42: "Error on a leaf"} + pipe(**inputs).frames[0] + + def test_encode_prompt_works_in_isolation(self): + extra_required_param_value_dict = { + "device": torch.device(torch_device).type, + "num_images_per_prompt": 1, + "do_classifier_free_guidance": self.get_dummy_inputs(device=torch_device).get("guidance_scale", 1.0) > 1.0, + } + return super().test_encode_prompt_works_in_isolation(extra_required_param_value_dict) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/animatediff/test_animatediff_video2video_controlnet.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/animatediff/test_animatediff_video2video_controlnet.py new file mode 100644 index 0000000000000000000000000000000000000000..c71c8c8817dcb1d3a74aae4cb947624be88bcc17 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/animatediff/test_animatediff_video2video_controlnet.py @@ -0,0 +1,543 @@ +import unittest + +import numpy as np +import torch +from PIL import Image +from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer + +import diffusers +from diffusers import ( + AnimateDiffVideoToVideoControlNetPipeline, + AutoencoderKL, + ControlNetModel, + DDIMScheduler, + DPMSolverMultistepScheduler, + LCMScheduler, + MotionAdapter, + StableDiffusionPipeline, + UNet2DConditionModel, + UNetMotionModel, +) +from diffusers.models.attention import FreeNoiseTransformerBlock +from diffusers.utils import is_xformers_available, logging + +from ...testing_utils import require_accelerator, torch_device +from ..pipeline_params import TEXT_TO_IMAGE_PARAMS, VIDEO_TO_VIDEO_BATCH_PARAMS +from ..test_pipelines_common import IPAdapterTesterMixin, PipelineFromPipeTesterMixin, PipelineTesterMixin + + +def to_np(tensor): + if isinstance(tensor, torch.Tensor): + tensor = tensor.detach().cpu().numpy() + + return tensor + + +class AnimateDiffVideoToVideoControlNetPipelineFastTests( + IPAdapterTesterMixin, PipelineTesterMixin, PipelineFromPipeTesterMixin, unittest.TestCase +): + pipeline_class = AnimateDiffVideoToVideoControlNetPipeline + params = TEXT_TO_IMAGE_PARAMS + batch_params = VIDEO_TO_VIDEO_BATCH_PARAMS.union({"conditioning_frames"}) + required_optional_params = frozenset( + [ + "num_inference_steps", + "generator", + "latents", + "return_dict", + "callback_on_step_end", + "callback_on_step_end_tensor_inputs", + ] + ) + + def get_dummy_components(self): + cross_attention_dim = 8 + block_out_channels = (8, 8) + + torch.manual_seed(0) + unet = UNet2DConditionModel( + block_out_channels=block_out_channels, + layers_per_block=2, + sample_size=8, + in_channels=4, + out_channels=4, + down_block_types=("CrossAttnDownBlock2D", "DownBlock2D"), + up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), + cross_attention_dim=cross_attention_dim, + norm_num_groups=2, + ) + scheduler = DDIMScheduler( + beta_start=0.00085, + beta_end=0.012, + beta_schedule="linear", + clip_sample=False, + ) + torch.manual_seed(0) + controlnet = ControlNetModel( + block_out_channels=block_out_channels, + layers_per_block=2, + in_channels=4, + down_block_types=("CrossAttnDownBlock2D", "DownBlock2D"), + cross_attention_dim=cross_attention_dim, + conditioning_embedding_out_channels=(8, 8), + norm_num_groups=1, + ) + torch.manual_seed(0) + vae = AutoencoderKL( + block_out_channels=block_out_channels, + in_channels=3, + out_channels=3, + down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], + up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], + latent_channels=4, + norm_num_groups=2, + ) + torch.manual_seed(0) + text_encoder_config = CLIPTextConfig( + bos_token_id=0, + eos_token_id=2, + hidden_size=cross_attention_dim, + intermediate_size=37, + layer_norm_eps=1e-05, + num_attention_heads=4, + num_hidden_layers=5, + pad_token_id=1, + vocab_size=1000, + ) + text_encoder = CLIPTextModel(text_encoder_config) + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + torch.manual_seed(0) + motion_adapter = MotionAdapter( + block_out_channels=block_out_channels, + motion_layers_per_block=2, + motion_norm_num_groups=2, + motion_num_attention_heads=4, + ) + + components = { + "unet": unet, + "controlnet": controlnet, + "scheduler": scheduler, + "vae": vae, + "motion_adapter": motion_adapter, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + "feature_extractor": None, + "image_encoder": None, + } + return components + + def get_dummy_inputs(self, device, seed=0, num_frames: int = 2): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + + video_height = 32 + video_width = 32 + video = [Image.new("RGB", (video_width, video_height))] * num_frames + + video_height = 32 + video_width = 32 + conditioning_frames = [Image.new("RGB", (video_width, video_height))] * num_frames + + inputs = { + "video": video, + "conditioning_frames": conditioning_frames, + "prompt": "A painting of a squirrel eating a burger", + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 7.5, + "output_type": "pt", + } + return inputs + + def test_from_pipe_consistent_config(self): + assert self.original_pipeline_class == StableDiffusionPipeline + original_repo = "hf-internal-testing/tinier-stable-diffusion-pipe" + original_kwargs = {"requires_safety_checker": False} + + # create original_pipeline_class(sd) + pipe_original = self.original_pipeline_class.from_pretrained(original_repo, **original_kwargs) + + # original_pipeline_class(sd) -> pipeline_class + pipe_components = self.get_dummy_components() + pipe_additional_components = {} + for name, component in pipe_components.items(): + if name not in pipe_original.components: + pipe_additional_components[name] = component + + pipe = self.pipeline_class.from_pipe(pipe_original, **pipe_additional_components) + + # pipeline_class -> original_pipeline_class(sd) + original_pipe_additional_components = {} + for name, component in pipe_original.components.items(): + if name not in pipe.components or not isinstance(component, pipe.components[name].__class__): + original_pipe_additional_components[name] = component + + pipe_original_2 = self.original_pipeline_class.from_pipe(pipe, **original_pipe_additional_components) + + # compare the config + original_config = {k: v for k, v in pipe_original.config.items() if not k.startswith("_")} + original_config_2 = {k: v for k, v in pipe_original_2.config.items() if not k.startswith("_")} + assert original_config_2 == original_config + + def test_motion_unet_loading(self): + components = self.get_dummy_components() + pipe = AnimateDiffVideoToVideoControlNetPipeline(**components) + + assert isinstance(pipe.unet, UNetMotionModel) + + @unittest.skip("Attention slicing is not enabled in this pipeline") + def test_attention_slicing_forward_pass(self): + pass + + def test_ip_adapter(self): + expected_pipe_slice = None + if torch_device == "cpu": + expected_pipe_slice = np.array( + [ + 0.5569, + 0.6250, + 0.4144, + 0.5613, + 0.5563, + 0.5213, + 0.5091, + 0.4950, + 0.4950, + 0.5684, + 0.3858, + 0.4863, + 0.6457, + 0.4311, + 0.5517, + 0.5608, + 0.4417, + 0.5377, + ] + ) + return super().test_ip_adapter(expected_pipe_slice=expected_pipe_slice) + + def test_inference_batch_single_identical( + self, + batch_size=2, + expected_max_diff=1e-4, + additional_params_copy_to_batched_inputs=["num_inference_steps"], + ): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + for components in pipe.components.values(): + if hasattr(components, "set_default_attn_processor"): + components.set_default_attn_processor() + + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + inputs = self.get_dummy_inputs(torch_device) + # Reset generator in case it is has been used in self.get_dummy_inputs + inputs["generator"] = self.get_generator(0) + + logger = logging.get_logger(pipe.__module__) + logger.setLevel(level=diffusers.logging.FATAL) + + # batchify inputs + batched_inputs = {} + batched_inputs.update(inputs) + + for name in self.batch_params: + if name not in inputs: + continue + + value = inputs[name] + if name == "prompt": + len_prompt = len(value) + batched_inputs[name] = [value[: len_prompt // i] for i in range(1, batch_size + 1)] + batched_inputs[name][-1] = 100 * "very long" + + else: + batched_inputs[name] = batch_size * [value] + + if "generator" in inputs: + batched_inputs["generator"] = [self.get_generator(i) for i in range(batch_size)] + + if "batch_size" in inputs: + batched_inputs["batch_size"] = batch_size + + for arg in additional_params_copy_to_batched_inputs: + batched_inputs[arg] = inputs[arg] + + output = pipe(**inputs) + output_batch = pipe(**batched_inputs) + + assert output_batch[0].shape[0] == batch_size + + max_diff = np.abs(to_np(output_batch[0][0]) - to_np(output[0][0])).max() + assert max_diff < expected_max_diff + + @require_accelerator + def test_to_device(self): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.set_progress_bar_config(disable=None) + + pipe.to("cpu") + # pipeline creates a new motion UNet under the hood. So we need to check the device from pipe.components + model_devices = [ + component.device.type for component in pipe.components.values() if hasattr(component, "device") + ] + self.assertTrue(all(device == "cpu" for device in model_devices)) + + output_cpu = pipe(**self.get_dummy_inputs("cpu"))[0] + self.assertTrue(np.isnan(output_cpu).sum() == 0) + + pipe.to(torch_device) + model_devices = [ + component.device.type for component in pipe.components.values() if hasattr(component, "device") + ] + self.assertTrue(all(device == torch_device for device in model_devices)) + + output_cuda = pipe(**self.get_dummy_inputs(torch_device))[0] + self.assertTrue(np.isnan(to_np(output_cuda)).sum() == 0) + + def test_to_dtype(self): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.set_progress_bar_config(disable=None) + + # pipeline creates a new motion UNet under the hood. So we need to check the dtype from pipe.components + model_dtypes = [component.dtype for component in pipe.components.values() if hasattr(component, "dtype")] + self.assertTrue(all(dtype == torch.float32 for dtype in model_dtypes)) + + pipe.to(dtype=torch.float16) + model_dtypes = [component.dtype for component in pipe.components.values() if hasattr(component, "dtype")] + self.assertTrue(all(dtype == torch.float16 for dtype in model_dtypes)) + + def test_prompt_embeds(self): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.set_progress_bar_config(disable=None) + pipe.to(torch_device) + + inputs = self.get_dummy_inputs(torch_device) + inputs.pop("prompt") + inputs["prompt_embeds"] = torch.randn((1, 4, pipe.text_encoder.config.hidden_size), device=torch_device) + pipe(**inputs) + + def test_latent_inputs(self): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.set_progress_bar_config(disable=None) + pipe.to(torch_device) + + inputs = self.get_dummy_inputs(torch_device) + sample_size = pipe.unet.config.sample_size + num_frames = len(inputs["conditioning_frames"]) + inputs["latents"] = torch.randn((1, 4, num_frames, sample_size, sample_size), device=torch_device) + inputs.pop("video") + pipe(**inputs) + + @unittest.skipIf( + torch_device != "cuda" or not is_xformers_available(), + reason="XFormers attention is only available with CUDA and `xformers` installed", + ) + def test_xformers_attention_forwardGenerator_pass(self): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + for component in pipe.components.values(): + if hasattr(component, "set_default_attn_processor"): + component.set_default_attn_processor() + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(torch_device) + output_without_offload = pipe(**inputs).frames[0] + output_without_offload = ( + output_without_offload.cpu() if torch.is_tensor(output_without_offload) else output_without_offload + ) + + pipe.enable_xformers_memory_efficient_attention() + inputs = self.get_dummy_inputs(torch_device) + output_with_offload = pipe(**inputs).frames[0] + output_with_offload = ( + output_with_offload.cpu() if torch.is_tensor(output_with_offload) else output_without_offload + ) + + max_diff = np.abs(to_np(output_with_offload) - to_np(output_without_offload)).max() + self.assertLess(max_diff, 1e-4, "XFormers attention should not affect the inference results") + + def test_free_init(self): + components = self.get_dummy_components() + pipe: AnimateDiffVideoToVideoControlNetPipeline = self.pipeline_class(**components) + pipe.set_progress_bar_config(disable=None) + pipe.to(torch_device) + + inputs_normal = self.get_dummy_inputs(torch_device) + frames_normal = pipe(**inputs_normal).frames[0] + + pipe.enable_free_init( + num_iters=2, + use_fast_sampling=True, + method="butterworth", + order=4, + spatial_stop_frequency=0.25, + temporal_stop_frequency=0.25, + ) + inputs_enable_free_init = self.get_dummy_inputs(torch_device) + frames_enable_free_init = pipe(**inputs_enable_free_init).frames[0] + + pipe.disable_free_init() + inputs_disable_free_init = self.get_dummy_inputs(torch_device) + frames_disable_free_init = pipe(**inputs_disable_free_init).frames[0] + + sum_enabled = np.abs(to_np(frames_normal) - to_np(frames_enable_free_init)).sum() + max_diff_disabled = np.abs(to_np(frames_normal) - to_np(frames_disable_free_init)).max() + self.assertGreater( + sum_enabled, 1e1, "Enabling of FreeInit should lead to results different from the default pipeline results" + ) + self.assertLess( + max_diff_disabled, + 1e-4, + "Disabling of FreeInit should lead to results similar to the default pipeline results", + ) + + def test_free_init_with_schedulers(self): + components = self.get_dummy_components() + pipe: AnimateDiffVideoToVideoControlNetPipeline = self.pipeline_class(**components) + pipe.set_progress_bar_config(disable=None) + pipe.to(torch_device) + + inputs_normal = self.get_dummy_inputs(torch_device) + frames_normal = pipe(**inputs_normal).frames[0] + + schedulers_to_test = [ + DPMSolverMultistepScheduler.from_config( + components["scheduler"].config, + timestep_spacing="linspace", + beta_schedule="linear", + algorithm_type="dpmsolver++", + steps_offset=1, + clip_sample=False, + ), + LCMScheduler.from_config( + components["scheduler"].config, + timestep_spacing="linspace", + beta_schedule="linear", + steps_offset=1, + clip_sample=False, + ), + ] + components.pop("scheduler") + + for scheduler in schedulers_to_test: + components["scheduler"] = scheduler + pipe: AnimateDiffVideoToVideoControlNetPipeline = self.pipeline_class(**components) + pipe.set_progress_bar_config(disable=None) + pipe.to(torch_device) + + pipe.enable_free_init(num_iters=2, use_fast_sampling=False) + + inputs = self.get_dummy_inputs(torch_device) + frames_enable_free_init = pipe(**inputs).frames[0] + sum_enabled = np.abs(to_np(frames_normal) - to_np(frames_enable_free_init)).sum() + + self.assertGreater( + sum_enabled, + 1e1, + "Enabling of FreeInit should lead to results different from the default pipeline results", + ) + + def test_free_noise_blocks(self): + components = self.get_dummy_components() + pipe: AnimateDiffVideoToVideoControlNetPipeline = self.pipeline_class(**components) + pipe.set_progress_bar_config(disable=None) + pipe.to(torch_device) + + pipe.enable_free_noise() + for block in pipe.unet.down_blocks: + for motion_module in block.motion_modules: + for transformer_block in motion_module.transformer_blocks: + self.assertTrue( + isinstance(transformer_block, FreeNoiseTransformerBlock), + "Motion module transformer blocks must be an instance of `FreeNoiseTransformerBlock` after enabling FreeNoise.", + ) + + pipe.disable_free_noise() + for block in pipe.unet.down_blocks: + for motion_module in block.motion_modules: + for transformer_block in motion_module.transformer_blocks: + self.assertFalse( + isinstance(transformer_block, FreeNoiseTransformerBlock), + "Motion module transformer blocks must not be an instance of `FreeNoiseTransformerBlock` after disabling FreeNoise.", + ) + + def test_free_noise(self): + components = self.get_dummy_components() + pipe: AnimateDiffVideoToVideoControlNetPipeline = self.pipeline_class(**components) + pipe.set_progress_bar_config(disable=None) + pipe.to(torch_device) + + inputs_normal = self.get_dummy_inputs(torch_device, num_frames=16) + inputs_normal["num_inference_steps"] = 2 + inputs_normal["strength"] = 0.5 + frames_normal = pipe(**inputs_normal).frames[0] + + for context_length in [8, 9]: + for context_stride in [4, 6]: + pipe.enable_free_noise(context_length, context_stride) + + inputs_enable_free_noise = self.get_dummy_inputs(torch_device, num_frames=16) + inputs_enable_free_noise["num_inference_steps"] = 2 + inputs_enable_free_noise["strength"] = 0.5 + frames_enable_free_noise = pipe(**inputs_enable_free_noise).frames[0] + + pipe.disable_free_noise() + inputs_disable_free_noise = self.get_dummy_inputs(torch_device, num_frames=16) + inputs_disable_free_noise["num_inference_steps"] = 2 + inputs_disable_free_noise["strength"] = 0.5 + frames_disable_free_noise = pipe(**inputs_disable_free_noise).frames[0] + + sum_enabled = np.abs(to_np(frames_normal) - to_np(frames_enable_free_noise)).sum() + max_diff_disabled = np.abs(to_np(frames_normal) - to_np(frames_disable_free_noise)).max() + self.assertGreater( + sum_enabled, + 1e1, + "Enabling of FreeNoise should lead to results different from the default pipeline results", + ) + self.assertLess( + max_diff_disabled, + 1e-4, + "Disabling of FreeNoise should lead to results similar to the default pipeline results", + ) + + def test_free_noise_multi_prompt(self): + components = self.get_dummy_components() + pipe: AnimateDiffVideoToVideoControlNetPipeline = self.pipeline_class(**components) + pipe.set_progress_bar_config(disable=None) + pipe.to(torch_device) + + context_length = 8 + context_stride = 4 + pipe.enable_free_noise(context_length, context_stride) + + # Make sure that pipeline works when prompt indices are within num_frames bounds + inputs = self.get_dummy_inputs(torch_device, num_frames=16) + inputs["prompt"] = {0: "Caterpillar on a leaf", 10: "Butterfly on a leaf"} + inputs["num_inference_steps"] = 2 + inputs["strength"] = 0.5 + pipe(**inputs).frames[0] + + with self.assertRaises(ValueError): + # Ensure that prompt indices are within bounds + inputs = self.get_dummy_inputs(torch_device, num_frames=16) + inputs["num_inference_steps"] = 2 + inputs["strength"] = 0.5 + inputs["prompt"] = {0: "Caterpillar on a leaf", 10: "Butterfly on a leaf", 42: "Error on a leaf"} + pipe(**inputs).frames[0] + + def test_encode_prompt_works_in_isolation(self): + extra_required_param_value_dict = { + "device": torch.device(torch_device).type, + "num_images_per_prompt": 1, + "do_classifier_free_guidance": self.get_dummy_inputs(device=torch_device).get("guidance_scale", 1.0) > 1.0, + } + return super().test_encode_prompt_works_in_isolation(extra_required_param_value_dict) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/audioldm2/__init__.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/audioldm2/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/audioldm2/test_audioldm2.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/audioldm2/test_audioldm2.py new file mode 100644 index 0000000000000000000000000000000000000000..e4bc5cc11003c595194a837f1610563ea313c15d --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/audioldm2/test_audioldm2.py @@ -0,0 +1,667 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import gc +import unittest + +import numpy as np +import pytest +import torch +from transformers import ( + ClapAudioConfig, + ClapConfig, + ClapFeatureExtractor, + ClapModel, + ClapTextConfig, + GPT2Config, + GPT2LMHeadModel, + RobertaTokenizer, + SpeechT5HifiGan, + SpeechT5HifiGanConfig, + T5Config, + T5EncoderModel, + T5Tokenizer, +) + +from diffusers import ( + AudioLDM2Pipeline, + AudioLDM2ProjectionModel, + AudioLDM2UNet2DConditionModel, + AutoencoderKL, + DDIMScheduler, + LMSDiscreteScheduler, + PNDMScheduler, +) +from diffusers.utils import is_transformers_version + +from ...testing_utils import ( + backend_empty_cache, + enable_full_determinism, + is_torch_version, + nightly, + torch_device, +) +from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS +from ..test_pipelines_common import PipelineTesterMixin + + +enable_full_determinism() + + +class AudioLDM2PipelineFastTests(PipelineTesterMixin, unittest.TestCase): + pipeline_class = AudioLDM2Pipeline + params = TEXT_TO_AUDIO_PARAMS + batch_params = TEXT_TO_AUDIO_BATCH_PARAMS + required_optional_params = frozenset( + [ + "num_inference_steps", + "num_waveforms_per_prompt", + "generator", + "latents", + "output_type", + "return_dict", + "callback", + "callback_steps", + ] + ) + + supports_dduf = False + + def get_dummy_components(self): + torch.manual_seed(0) + unet = AudioLDM2UNet2DConditionModel( + block_out_channels=(8, 16), + layers_per_block=1, + norm_num_groups=8, + sample_size=32, + in_channels=4, + out_channels=4, + down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), + up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), + cross_attention_dim=(8, 16), + ) + scheduler = DDIMScheduler( + beta_start=0.00085, + beta_end=0.012, + beta_schedule="scaled_linear", + clip_sample=False, + set_alpha_to_one=False, + ) + torch.manual_seed(0) + vae = AutoencoderKL( + block_out_channels=[8, 16], + in_channels=1, + out_channels=1, + norm_num_groups=8, + down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], + up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], + latent_channels=4, + ) + torch.manual_seed(0) + text_branch_config = ClapTextConfig( + bos_token_id=0, + eos_token_id=2, + hidden_size=8, + intermediate_size=37, + layer_norm_eps=1e-05, + num_attention_heads=1, + num_hidden_layers=1, + pad_token_id=1, + vocab_size=1000, + projection_dim=8, + ) + audio_branch_config = ClapAudioConfig( + spec_size=8, + window_size=4, + num_mel_bins=8, + intermediate_size=37, + layer_norm_eps=1e-05, + depths=[1, 1], + num_attention_heads=[1, 1], + num_hidden_layers=1, + hidden_size=192, + projection_dim=8, + patch_size=2, + patch_stride=2, + patch_embed_input_channels=4, + ) + text_encoder_config = ClapConfig.from_text_audio_configs( + text_config=text_branch_config, + audio_config=audio_branch_config, + projection_dim=16, + ) + text_encoder = ClapModel(text_encoder_config) + tokenizer = RobertaTokenizer.from_pretrained("hf-internal-testing/tiny-random-roberta", model_max_length=77) + feature_extractor = ClapFeatureExtractor.from_pretrained( + "hf-internal-testing/tiny-random-ClapModel", hop_length=7900 + ) + + torch.manual_seed(0) + text_encoder_2_config = T5Config( + vocab_size=32100, + d_model=32, + d_ff=37, + d_kv=8, + num_heads=1, + num_layers=1, + ) + text_encoder_2 = T5EncoderModel(text_encoder_2_config) + tokenizer_2 = T5Tokenizer.from_pretrained("hf-internal-testing/tiny-random-T5Model", model_max_length=77) + + torch.manual_seed(0) + language_model_config = GPT2Config( + n_embd=16, + n_head=1, + n_layer=1, + vocab_size=1000, + n_ctx=99, + n_positions=99, + ) + language_model = GPT2LMHeadModel(language_model_config) + language_model.config.max_new_tokens = 8 + + torch.manual_seed(0) + projection_model = AudioLDM2ProjectionModel( + text_encoder_dim=16, + text_encoder_1_dim=32, + langauge_model_dim=16, + ) + + vocoder_config = SpeechT5HifiGanConfig( + model_in_dim=8, + sampling_rate=16000, + upsample_initial_channel=16, + upsample_rates=[2, 2], + upsample_kernel_sizes=[4, 4], + resblock_kernel_sizes=[3, 7], + resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]], + normalize_before=False, + ) + + vocoder = SpeechT5HifiGan(vocoder_config) + + components = { + "unet": unet, + "scheduler": scheduler, + "vae": vae, + "text_encoder": text_encoder, + "text_encoder_2": text_encoder_2, + "tokenizer": tokenizer, + "tokenizer_2": tokenizer_2, + "feature_extractor": feature_extractor, + "language_model": language_model, + "projection_model": projection_model, + "vocoder": vocoder, + } + return components + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + inputs = { + "prompt": "A hammer hitting a wooden surface", + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 6.0, + } + return inputs + + @pytest.mark.xfail( + condition=is_transformers_version(">=", "4.54.1"), + reason="Test currently fails on Transformers version 4.54.1.", + strict=False, + ) + def test_audioldm2_ddim(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + + components = self.get_dummy_components() + audioldm_pipe = AudioLDM2Pipeline(**components) + audioldm_pipe = audioldm_pipe.to(torch_device) + audioldm_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + output = audioldm_pipe(**inputs) + audio = output.audios[0] + + assert audio.ndim == 1 + assert len(audio) == 256 + + audio_slice = audio[:10] + expected_slice = np.array( + [ + 2.602e-03, + 1.729e-03, + 1.863e-03, + -2.219e-03, + -2.656e-03, + -2.017e-03, + -2.648e-03, + -2.115e-03, + -2.502e-03, + -2.081e-03, + ] + ) + + assert np.abs(audio_slice - expected_slice).max() < 1e-4 + + def test_audioldm2_prompt_embeds(self): + components = self.get_dummy_components() + audioldm_pipe = AudioLDM2Pipeline(**components) + audioldm_pipe = audioldm_pipe.to(torch_device) + audioldm_pipe = audioldm_pipe.to(torch_device) + audioldm_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(torch_device) + inputs["prompt"] = 3 * [inputs["prompt"]] + + # forward + output = audioldm_pipe(**inputs) + audio_1 = output.audios[0] + + inputs = self.get_dummy_inputs(torch_device) + prompt = 3 * [inputs.pop("prompt")] + + text_inputs = audioldm_pipe.tokenizer( + prompt, + padding="max_length", + max_length=audioldm_pipe.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_inputs = text_inputs["input_ids"].to(torch_device) + + clap_prompt_embeds = audioldm_pipe.text_encoder.get_text_features(text_inputs) + clap_prompt_embeds = clap_prompt_embeds[:, None, :] + + text_inputs = audioldm_pipe.tokenizer_2( + prompt, + padding="max_length", + max_length=True, + truncation=True, + return_tensors="pt", + ) + text_inputs = text_inputs["input_ids"].to(torch_device) + + t5_prompt_embeds = audioldm_pipe.text_encoder_2( + text_inputs, + ) + t5_prompt_embeds = t5_prompt_embeds[0] + + projection_embeds = audioldm_pipe.projection_model(clap_prompt_embeds, t5_prompt_embeds)[0] + generated_prompt_embeds = audioldm_pipe.generate_language_model(projection_embeds, max_new_tokens=8) + + inputs["prompt_embeds"] = t5_prompt_embeds + inputs["generated_prompt_embeds"] = generated_prompt_embeds + + # forward + output = audioldm_pipe(**inputs) + audio_2 = output.audios[0] + + assert np.abs(audio_1 - audio_2).max() < 1e-2 + + def test_audioldm2_negative_prompt_embeds(self): + components = self.get_dummy_components() + audioldm_pipe = AudioLDM2Pipeline(**components) + audioldm_pipe = audioldm_pipe.to(torch_device) + audioldm_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(torch_device) + negative_prompt = 3 * ["this is a negative prompt"] + inputs["negative_prompt"] = negative_prompt + inputs["prompt"] = 3 * [inputs["prompt"]] + + # forward + output = audioldm_pipe(**inputs) + audio_1 = output.audios[0] + + inputs = self.get_dummy_inputs(torch_device) + prompt = 3 * [inputs.pop("prompt")] + + embeds = [] + generated_embeds = [] + for p in [prompt, negative_prompt]: + text_inputs = audioldm_pipe.tokenizer( + p, + padding="max_length", + max_length=audioldm_pipe.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_inputs = text_inputs["input_ids"].to(torch_device) + + clap_prompt_embeds = audioldm_pipe.text_encoder.get_text_features(text_inputs) + clap_prompt_embeds = clap_prompt_embeds[:, None, :] + + text_inputs = audioldm_pipe.tokenizer_2( + prompt, + padding="max_length", + max_length=True if len(embeds) == 0 else embeds[0].shape[1], + truncation=True, + return_tensors="pt", + ) + text_inputs = text_inputs["input_ids"].to(torch_device) + + t5_prompt_embeds = audioldm_pipe.text_encoder_2( + text_inputs, + ) + t5_prompt_embeds = t5_prompt_embeds[0] + + projection_embeds = audioldm_pipe.projection_model(clap_prompt_embeds, t5_prompt_embeds)[0] + generated_prompt_embeds = audioldm_pipe.generate_language_model(projection_embeds, max_new_tokens=8) + + embeds.append(t5_prompt_embeds) + generated_embeds.append(generated_prompt_embeds) + + inputs["prompt_embeds"], inputs["negative_prompt_embeds"] = embeds + inputs["generated_prompt_embeds"], inputs["negative_generated_prompt_embeds"] = generated_embeds + + # forward + output = audioldm_pipe(**inputs) + audio_2 = output.audios[0] + + assert np.abs(audio_1 - audio_2).max() < 1e-2 + + @pytest.mark.xfail( + condition=is_transformers_version(">=", "4.54.1"), + reason="Test currently fails on Transformers version 4.54.1.", + strict=False, + ) + def test_audioldm2_negative_prompt(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + components["scheduler"] = PNDMScheduler(skip_prk_steps=True) + audioldm_pipe = AudioLDM2Pipeline(**components) + audioldm_pipe = audioldm_pipe.to(device) + audioldm_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + negative_prompt = "egg cracking" + output = audioldm_pipe(**inputs, negative_prompt=negative_prompt) + audio = output.audios[0] + + assert audio.ndim == 1 + assert len(audio) == 256 + + audio_slice = audio[:10] + expected_slice = np.array( + [0.0026, 0.0017, 0.0018, -0.0022, -0.0026, -0.002, -0.0026, -0.0021, -0.0025, -0.0021] + ) + + assert np.abs(audio_slice - expected_slice).max() < 1e-4 + + def test_audioldm2_num_waveforms_per_prompt(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + components["scheduler"] = PNDMScheduler(skip_prk_steps=True) + audioldm_pipe = AudioLDM2Pipeline(**components) + audioldm_pipe = audioldm_pipe.to(device) + audioldm_pipe.set_progress_bar_config(disable=None) + + prompt = "A hammer hitting a wooden surface" + + # test num_waveforms_per_prompt=1 (default) + audios = audioldm_pipe(prompt, num_inference_steps=2).audios + + assert audios.shape == (1, 256) + + # test num_waveforms_per_prompt=1 (default) for batch of prompts + batch_size = 2 + audios = audioldm_pipe([prompt] * batch_size, num_inference_steps=2).audios + + assert audios.shape == (batch_size, 256) + + # test num_waveforms_per_prompt for single prompt + num_waveforms_per_prompt = 1 + audios = audioldm_pipe(prompt, num_inference_steps=2, num_waveforms_per_prompt=num_waveforms_per_prompt).audios + + assert audios.shape == (num_waveforms_per_prompt, 256) + + # test num_waveforms_per_prompt for batch of prompts + batch_size = 2 + audios = audioldm_pipe( + [prompt] * batch_size, num_inference_steps=2, num_waveforms_per_prompt=num_waveforms_per_prompt + ).audios + + assert audios.shape == (batch_size * num_waveforms_per_prompt, 256) + + def test_audioldm2_audio_length_in_s(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + audioldm_pipe = AudioLDM2Pipeline(**components) + audioldm_pipe = audioldm_pipe.to(torch_device) + audioldm_pipe.set_progress_bar_config(disable=None) + vocoder_sampling_rate = audioldm_pipe.vocoder.config.sampling_rate + + inputs = self.get_dummy_inputs(device) + output = audioldm_pipe(audio_length_in_s=0.016, **inputs) + audio = output.audios[0] + + assert audio.ndim == 1 + assert len(audio) / vocoder_sampling_rate == 0.016 + + output = audioldm_pipe(audio_length_in_s=0.032, **inputs) + audio = output.audios[0] + + assert audio.ndim == 1 + assert len(audio) / vocoder_sampling_rate == 0.032 + + def test_audioldm2_vocoder_model_in_dim(self): + components = self.get_dummy_components() + audioldm_pipe = AudioLDM2Pipeline(**components) + audioldm_pipe = audioldm_pipe.to(torch_device) + audioldm_pipe.set_progress_bar_config(disable=None) + + prompt = ["hey"] + + output = audioldm_pipe(prompt, num_inference_steps=1) + audio_shape = output.audios.shape + assert audio_shape == (1, 256) + + config = audioldm_pipe.vocoder.config + config.model_in_dim *= 2 + audioldm_pipe.vocoder = SpeechT5HifiGan(config).to(torch_device) + output = audioldm_pipe(prompt, num_inference_steps=1) + audio_shape = output.audios.shape + # waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram + assert audio_shape == (1, 256) + + def test_attention_slicing_forward_pass(self): + self._test_attention_slicing_forward_pass(test_mean_pixel_difference=False) + + @unittest.skip("Raises a not implemented error in AudioLDM2") + def test_xformers_attention_forwardGenerator_pass(self): + pass + + def test_dict_tuple_outputs_equivalent(self): + # increase tolerance from 1e-4 -> 3e-4 to account for large composite model + super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-4) + + @pytest.mark.xfail( + condition=is_torch_version(">=", "2.7"), + reason="Test currently fails on PyTorch 2.7.", + strict=False, + ) + def test_inference_batch_single_identical(self): + # increase tolerance from 1e-4 -> 2e-4 to account for large composite model + self._test_inference_batch_single_identical(expected_max_diff=2e-4) + + def test_save_load_local(self): + # increase tolerance from 1e-4 -> 2e-4 to account for large composite model + super().test_save_load_local(expected_max_difference=2e-4) + + def test_save_load_optional_components(self): + # increase tolerance from 1e-4 -> 2e-4 to account for large composite model + super().test_save_load_optional_components(expected_max_difference=2e-4) + + def test_to_dtype(self): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.set_progress_bar_config(disable=None) + + # The method component.dtype returns the dtype of the first parameter registered in the model, not the + # dtype of the entire model. In the case of CLAP, the first parameter is a float64 constant (logit scale) + model_dtypes = {key: component.dtype for key, component in components.items() if hasattr(component, "dtype")} + + # Without the logit scale parameters, everything is float32 + model_dtypes.pop("text_encoder") + self.assertTrue(all(dtype == torch.float32 for dtype in model_dtypes.values())) + + # the CLAP sub-models are float32 + model_dtypes["clap_text_branch"] = components["text_encoder"].text_model.dtype + self.assertTrue(all(dtype == torch.float32 for dtype in model_dtypes.values())) + + # Once we send to fp16, all params are in half-precision, including the logit scale + pipe.to(dtype=torch.float16) + model_dtypes = {key: component.dtype for key, component in components.items() if hasattr(component, "dtype")} + self.assertTrue(all(dtype == torch.float16 for dtype in model_dtypes.values())) + + @unittest.skip("Test not supported.") + def test_sequential_cpu_offload_forward_pass(self): + pass + + @unittest.skip("Test not supported for now because of the use of `projection_model` in `encode_prompt()`.") + def test_encode_prompt_works_in_isolation(self): + pass + + @unittest.skip("Not supported yet due to CLAPModel.") + def test_sequential_offload_forward_pass_twice(self): + pass + + @unittest.skip("Not supported yet, the second forward has mixed devices and `vocoder` is not offloaded.") + def test_cpu_offload_forward_pass_twice(self): + pass + + @unittest.skip("Not supported yet. `vocoder` is not offloaded.") + def test_model_cpu_offload_forward_pass(self): + pass + + +@nightly +class AudioLDM2PipelineSlowTests(unittest.TestCase): + def setUp(self): + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): + generator = torch.Generator(device=generator_device).manual_seed(seed) + latents = np.random.RandomState(seed).standard_normal((1, 8, 128, 16)) + latents = torch.from_numpy(latents).to(device=device, dtype=dtype) + inputs = { + "prompt": "A hammer hitting a wooden surface", + "latents": latents, + "generator": generator, + "num_inference_steps": 3, + "guidance_scale": 2.5, + } + return inputs + + def get_inputs_tts(self, device, generator_device="cpu", dtype=torch.float32, seed=0): + generator = torch.Generator(device=generator_device).manual_seed(seed) + latents = np.random.RandomState(seed).standard_normal((1, 8, 128, 16)) + latents = torch.from_numpy(latents).to(device=device, dtype=dtype) + inputs = { + "prompt": "A men saying", + "transcription": "hello my name is John", + "latents": latents, + "generator": generator, + "num_inference_steps": 3, + "guidance_scale": 2.5, + } + return inputs + + def test_audioldm2(self): + audioldm_pipe = AudioLDM2Pipeline.from_pretrained("cvssp/audioldm2") + audioldm_pipe = audioldm_pipe.to(torch_device) + audioldm_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_inputs(torch_device) + inputs["num_inference_steps"] = 25 + audio = audioldm_pipe(**inputs).audios[0] + + assert audio.ndim == 1 + assert len(audio) == 81952 + + # check the portion of the generated audio with the largest dynamic range (reduces flakiness) + audio_slice = audio[17275:17285] + expected_slice = np.array([0.0791, 0.0666, 0.1158, 0.1227, 0.1171, -0.2880, -0.1940, -0.0283, -0.0126, 0.1127]) + max_diff = np.abs(expected_slice - audio_slice).max() + assert max_diff < 1e-3 + + def test_audioldm2_lms(self): + audioldm_pipe = AudioLDM2Pipeline.from_pretrained("cvssp/audioldm2") + audioldm_pipe.scheduler = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config) + audioldm_pipe = audioldm_pipe.to(torch_device) + audioldm_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_inputs(torch_device) + audio = audioldm_pipe(**inputs).audios[0] + + assert audio.ndim == 1 + assert len(audio) == 81952 + + # check the portion of the generated audio with the largest dynamic range (reduces flakiness) + audio_slice = audio[31390:31400] + expected_slice = np.array( + [-0.1318, -0.0577, 0.0446, -0.0573, 0.0659, 0.1074, -0.2600, 0.0080, -0.2190, -0.4301] + ) + max_diff = np.abs(expected_slice - audio_slice).max() + assert max_diff < 1e-3 + + def test_audioldm2_large(self): + audioldm_pipe = AudioLDM2Pipeline.from_pretrained("cvssp/audioldm2-large") + audioldm_pipe = audioldm_pipe.to(torch_device) + audioldm_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_inputs(torch_device) + audio = audioldm_pipe(**inputs).audios[0] + + assert audio.ndim == 1 + assert len(audio) == 81952 + + # check the portion of the generated audio with the largest dynamic range (reduces flakiness) + audio_slice = audio[8825:8835] + expected_slice = np.array( + [-0.1829, -0.1461, 0.0759, -0.1493, -0.1396, 0.5783, 0.3001, -0.3038, -0.0639, -0.2244] + ) + max_diff = np.abs(expected_slice - audio_slice).max() + assert max_diff < 1e-3 + + def test_audioldm2_tts(self): + audioldm_tts_pipe = AudioLDM2Pipeline.from_pretrained("anhnct/audioldm2_gigaspeech") + audioldm_tts_pipe = audioldm_tts_pipe.to(torch_device) + audioldm_tts_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_inputs_tts(torch_device) + audio = audioldm_tts_pipe(**inputs).audios[0] + + assert audio.ndim == 1 + assert len(audio) == 81952 + + # check the portion of the generated audio with the largest dynamic range (reduces flakiness) + audio_slice = audio[8825:8835] + expected_slice = np.array( + [-0.1829, -0.1461, 0.0759, -0.1493, -0.1396, 0.5783, 0.3001, -0.3038, -0.0639, -0.2244] + ) + max_diff = np.abs(expected_slice - audio_slice).max() + assert max_diff < 1e-3 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/aura_flow/__init__.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/aura_flow/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/aura_flow/test_pipeline_aura_flow.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/aura_flow/test_pipeline_aura_flow.py new file mode 100644 index 0000000000000000000000000000000000000000..1eb9d1035c3370aa8b77ab23f47c1ea63eb011d0 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/aura_flow/test_pipeline_aura_flow.py @@ -0,0 +1,137 @@ +import unittest + +import numpy as np +import torch +from transformers import AutoTokenizer, UMT5EncoderModel + +from diffusers import AuraFlowPipeline, AuraFlowTransformer2DModel, AutoencoderKL, FlowMatchEulerDiscreteScheduler + +from ..test_pipelines_common import ( + PipelineTesterMixin, + check_qkv_fusion_matches_attn_procs_length, + check_qkv_fusion_processors_exist, +) + + +class AuraFlowPipelineFastTests(unittest.TestCase, PipelineTesterMixin): + pipeline_class = AuraFlowPipeline + params = frozenset( + [ + "prompt", + "height", + "width", + "guidance_scale", + "negative_prompt", + "prompt_embeds", + "negative_prompt_embeds", + ] + ) + batch_params = frozenset(["prompt", "negative_prompt"]) + test_layerwise_casting = True + test_group_offloading = True + + def get_dummy_components(self): + torch.manual_seed(0) + transformer = AuraFlowTransformer2DModel( + sample_size=32, + patch_size=2, + in_channels=4, + num_mmdit_layers=1, + num_single_dit_layers=1, + attention_head_dim=8, + num_attention_heads=4, + caption_projection_dim=32, + joint_attention_dim=32, + out_channels=4, + pos_embed_max_size=256, + ) + + text_encoder = UMT5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-umt5") + tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") + + torch.manual_seed(0) + vae = AutoencoderKL( + block_out_channels=[32, 64], + in_channels=3, + out_channels=3, + down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], + up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], + latent_channels=4, + sample_size=32, + ) + + scheduler = FlowMatchEulerDiscreteScheduler() + + return { + "scheduler": scheduler, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + "transformer": transformer, + "vae": vae, + } + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device="cpu").manual_seed(seed) + + inputs = { + "prompt": "A painting of a squirrel eating a burger", + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 5.0, + "output_type": "np", + "height": None, + "width": None, + } + return inputs + + def test_attention_slicing_forward_pass(self): + # Attention slicing needs to implemented differently for this because how single DiT and MMDiT + # blocks interfere with each other. + return + + def test_fused_qkv_projections(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe = pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + image = pipe(**inputs).images + original_image_slice = image[0, -3:, -3:, -1] + + # TODO (sayakpaul): will refactor this once `fuse_qkv_projections()` has been added + # to the pipeline level. + pipe.transformer.fuse_qkv_projections() + assert check_qkv_fusion_processors_exist(pipe.transformer), ( + "Something wrong with the fused attention processors. Expected all the attention processors to be fused." + ) + assert check_qkv_fusion_matches_attn_procs_length( + pipe.transformer, pipe.transformer.original_attn_processors + ), "Something wrong with the attention processors concerning the fused QKV projections." + + inputs = self.get_dummy_inputs(device) + image = pipe(**inputs).images + image_slice_fused = image[0, -3:, -3:, -1] + + pipe.transformer.unfuse_qkv_projections() + inputs = self.get_dummy_inputs(device) + image = pipe(**inputs).images + image_slice_disabled = image[0, -3:, -3:, -1] + + assert np.allclose(original_image_slice, image_slice_fused, atol=1e-3, rtol=1e-3), ( + "Fusion of QKV projections shouldn't affect the outputs." + ) + assert np.allclose(image_slice_fused, image_slice_disabled, atol=1e-3, rtol=1e-3), ( + "Outputs, with QKV projection fusion enabled, shouldn't change when fused QKV projections are disabled." + ) + assert np.allclose(original_image_slice, image_slice_disabled, atol=1e-2, rtol=1e-2), ( + "Original outputs should match when fused QKV projections are disabled." + ) + + @unittest.skip("xformers attention processor does not exist for AuraFlow") + def test_xformers_attention_forwardGenerator_pass(self): + pass diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/bria/__init__.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/bria/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/bria/test_pipeline_bria.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/bria/test_pipeline_bria.py new file mode 100644 index 0000000000000000000000000000000000000000..844488e76f2ea6699fa6b3d50b5895615732a2d7 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/bria/test_pipeline_bria.py @@ -0,0 +1,319 @@ +# Copyright 2024 Bria AI and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import gc +import tempfile +import unittest + +import numpy as np +import torch +from huggingface_hub import hf_hub_download +from transformers import T5EncoderModel, T5TokenizerFast + +from diffusers import ( + AutoencoderKL, + BriaTransformer2DModel, + FlowMatchEulerDiscreteScheduler, +) +from diffusers.pipelines.bria import BriaPipeline + +# from ..test_pipelines_common import PipelineTesterMixin, check_qkv_fused_layers_exist +from tests.pipelines.test_pipelines_common import PipelineTesterMixin, to_np + +from ...testing_utils import ( + backend_empty_cache, + enable_full_determinism, + numpy_cosine_similarity_distance, + require_torch_accelerator, + slow, + torch_device, +) + + +enable_full_determinism() + + +class BriaPipelineFastTests(PipelineTesterMixin, unittest.TestCase): + pipeline_class = BriaPipeline + params = frozenset(["prompt", "height", "width", "guidance_scale", "prompt_embeds"]) + batch_params = frozenset(["prompt"]) + test_xformers_attention = False + + # there is no xformers processor for Flux + test_xformers_attention = False + test_layerwise_casting = True + test_group_offloading = True + + def get_dummy_components(self): + torch.manual_seed(0) + transformer = BriaTransformer2DModel( + patch_size=1, + in_channels=16, + num_layers=1, + num_single_layers=1, + attention_head_dim=8, + num_attention_heads=2, + joint_attention_dim=32, + pooled_projection_dim=None, + axes_dims_rope=[0, 4, 4], + ) + + torch.manual_seed(0) + vae = AutoencoderKL( + act_fn="silu", + block_out_channels=(32,), + in_channels=3, + out_channels=3, + down_block_types=["DownEncoderBlock2D"], + up_block_types=["UpDecoderBlock2D"], + latent_channels=4, + sample_size=32, + shift_factor=0, + scaling_factor=0.13025, + use_post_quant_conv=True, + use_quant_conv=True, + force_upcast=False, + ) + + scheduler = FlowMatchEulerDiscreteScheduler() + + torch.manual_seed(0) + text_encoder = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5") + tokenizer = T5TokenizerFast.from_pretrained("hf-internal-testing/tiny-random-t5") + + components = { + "scheduler": scheduler, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + "transformer": transformer, + "vae": vae, + "image_encoder": None, + "feature_extractor": None, + } + return components + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device="cpu").manual_seed(seed) + + inputs = { + "prompt": "A painting of a squirrel eating a burger", + "negative_prompt": "bad, ugly", + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 5.0, + "height": 16, + "width": 16, + "max_sequence_length": 48, + "output_type": "np", + } + return inputs + + def test_encode_prompt_works_in_isolation(self): + pass + + def test_bria_different_prompts(self): + pipe = self.pipeline_class(**self.get_dummy_components()).to(torch_device) + inputs = self.get_dummy_inputs(torch_device) + output_same_prompt = pipe(**inputs).images[0] + inputs = self.get_dummy_inputs(torch_device) + inputs["prompt"] = "a different prompt" + output_different_prompts = pipe(**inputs).images[0] + max_diff = np.abs(output_same_prompt - output_different_prompts).max() + assert max_diff > 1e-6 + + def test_image_output_shape(self): + pipe = self.pipeline_class(**self.get_dummy_components()).to(torch_device) + inputs = self.get_dummy_inputs(torch_device) + + height_width_pairs = [(32, 32), (72, 57)] + for height, width in height_width_pairs: + expected_height = height - height % (pipe.vae_scale_factor * 2) + expected_width = width - width % (pipe.vae_scale_factor * 2) + + inputs.update({"height": height, "width": width}) + image = pipe(**inputs).images[0] + output_height, output_width, _ = image.shape + assert (output_height, output_width) == (expected_height, expected_width) + + @unittest.skipIf(torch_device not in ["cuda", "xpu"], reason="float16 requires CUDA or XPU") + @require_torch_accelerator + def test_save_load_float16(self, expected_max_diff=1e-2): + components = self.get_dummy_components() + for name, module in components.items(): + if hasattr(module, "half"): + components[name] = module.to(torch_device).half() + + pipe = self.pipeline_class(**components) + for component in pipe.components.values(): + if hasattr(component, "set_default_attn_processor"): + component.set_default_attn_processor() + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(torch_device) + output = pipe(**inputs)[0] + + with tempfile.TemporaryDirectory() as tmpdir: + pipe.save_pretrained(tmpdir) + pipe_loaded = self.pipeline_class.from_pretrained(tmpdir, torch_dtype=torch.float16) + for component in pipe_loaded.components.values(): + if hasattr(component, "set_default_attn_processor"): + component.set_default_attn_processor() + pipe_loaded.to(torch_device) + pipe_loaded.set_progress_bar_config(disable=None) + + for name, component in pipe_loaded.components.items(): + if name == "vae": + continue + if hasattr(component, "dtype"): + self.assertTrue( + component.dtype == torch.float16, + f"`{name}.dtype` switched from `float16` to {component.dtype} after loading.", + ) + + inputs = self.get_dummy_inputs(torch_device) + output_loaded = pipe_loaded(**inputs)[0] + max_diff = np.abs(to_np(output) - to_np(output_loaded)).max() + self.assertLess( + max_diff, expected_max_diff, "The output of the fp16 pipeline changed after saving and loading." + ) + + def test_bria_image_output_shape(self): + pipe = self.pipeline_class(**self.get_dummy_components()).to(torch_device) + inputs = self.get_dummy_inputs(torch_device) + + height_width_pairs = [(16, 16), (32, 32), (64, 64)] + for height, width in height_width_pairs: + expected_height = height - height % (pipe.vae_scale_factor * 2) + expected_width = width - width % (pipe.vae_scale_factor * 2) + + inputs.update({"height": height, "width": width}) + image = pipe(**inputs).images[0] + output_height, output_width, _ = image.shape + assert (output_height, output_width) == (expected_height, expected_width) + + def test_to_dtype(self): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.set_progress_bar_config(disable=None) + + model_dtypes = [component.dtype for component in components.values() if hasattr(component, "dtype")] + self.assertTrue([dtype == torch.float32 for dtype in model_dtypes] == [True, True, True]) + + def test_torch_dtype_dict(self): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + + with tempfile.TemporaryDirectory() as tmpdirname: + pipe.save_pretrained(tmpdirname) + torch_dtype_dict = {"transformer": torch.bfloat16, "default": torch.float16} + loaded_pipe = self.pipeline_class.from_pretrained(tmpdirname, torch_dtype=torch_dtype_dict) + + self.assertEqual(loaded_pipe.transformer.dtype, torch.bfloat16) + self.assertEqual(loaded_pipe.text_encoder.dtype, torch.float16) + self.assertEqual(loaded_pipe.vae.dtype, torch.float16) + + with tempfile.TemporaryDirectory() as tmpdirname: + pipe.save_pretrained(tmpdirname) + torch_dtype_dict = {"default": torch.float16} + loaded_pipe = self.pipeline_class.from_pretrained(tmpdirname, torch_dtype=torch_dtype_dict) + + self.assertEqual(loaded_pipe.transformer.dtype, torch.float16) + self.assertEqual(loaded_pipe.text_encoder.dtype, torch.float16) + self.assertEqual(loaded_pipe.vae.dtype, torch.float16) + + +@slow +@require_torch_accelerator +class BriaPipelineSlowTests(unittest.TestCase): + pipeline_class = BriaPipeline + repo_id = "briaai/BRIA-3.2" + + def setUp(self): + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def get_inputs(self, device, seed=0): + generator = torch.Generator(device="cpu").manual_seed(seed) + + prompt_embeds = torch.load( + hf_hub_download(repo_id="diffusers/test-slices", repo_type="dataset", filename="flux/prompt_embeds.pt") + ).to(torch_device) + + return { + "prompt_embeds": prompt_embeds, + "num_inference_steps": 2, + "guidance_scale": 0.0, + "max_sequence_length": 256, + "output_type": "np", + "generator": generator, + } + + def test_bria_inference_bf16(self): + pipe = self.pipeline_class.from_pretrained( + self.repo_id, torch_dtype=torch.bfloat16, text_encoder=None, tokenizer=None + ) + pipe.to(torch_device) + + inputs = self.get_inputs(torch_device) + + image = pipe(**inputs).images[0] + image_slice = image[0, :10, :10].flatten() + + expected_slice = np.array( + [ + 0.59729785, + 0.6153719, + 0.595112, + 0.5884763, + 0.59366125, + 0.5795311, + 0.58325, + 0.58449626, + 0.57737637, + 0.58432233, + 0.5867875, + 0.57824117, + 0.5819089, + 0.5830988, + 0.57730293, + 0.57647324, + 0.5769151, + 0.57312685, + 0.57926565, + 0.5823928, + 0.57783926, + 0.57162863, + 0.575649, + 0.5745547, + 0.5740556, + 0.5799735, + 0.57799566, + 0.5715559, + 0.5771242, + 0.5773058, + ], + dtype=np.float32, + ) + max_diff = numpy_cosine_similarity_distance(expected_slice, image_slice) + self.assertLess(max_diff, 1e-4, f"Image slice is different from expected slice: {max_diff:.4f}") diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/chroma/__init__.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/chroma/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8b137891791fe96927ad78e64b0aad7bded08bdc --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/chroma/__init__.py @@ -0,0 +1 @@ + diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/chroma/test_pipeline_chroma.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/chroma/test_pipeline_chroma.py new file mode 100644 index 0000000000000000000000000000000000000000..3edd58b75f82b8a79aefad0c4191cd929f5ac354 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/chroma/test_pipeline_chroma.py @@ -0,0 +1,160 @@ +import unittest + +import numpy as np +import torch +from transformers import AutoTokenizer, T5EncoderModel + +from diffusers import AutoencoderKL, ChromaPipeline, ChromaTransformer2DModel, FlowMatchEulerDiscreteScheduler + +from ...testing_utils import torch_device +from ..test_pipelines_common import FluxIPAdapterTesterMixin, PipelineTesterMixin, check_qkv_fused_layers_exist + + +class ChromaPipelineFastTests( + unittest.TestCase, + PipelineTesterMixin, + FluxIPAdapterTesterMixin, +): + pipeline_class = ChromaPipeline + params = frozenset(["prompt", "height", "width", "guidance_scale", "prompt_embeds"]) + batch_params = frozenset(["prompt"]) + + # there is no xformers processor for Flux + test_xformers_attention = False + test_layerwise_casting = True + test_group_offloading = True + + def get_dummy_components(self, num_layers: int = 1, num_single_layers: int = 1): + torch.manual_seed(0) + transformer = ChromaTransformer2DModel( + patch_size=1, + in_channels=4, + num_layers=num_layers, + num_single_layers=num_single_layers, + attention_head_dim=16, + num_attention_heads=2, + joint_attention_dim=32, + axes_dims_rope=[4, 4, 8], + approximator_hidden_dim=32, + approximator_layers=1, + approximator_num_channels=16, + ) + + torch.manual_seed(0) + text_encoder = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5") + + tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") + + torch.manual_seed(0) + vae = AutoencoderKL( + sample_size=32, + in_channels=3, + out_channels=3, + block_out_channels=(4,), + layers_per_block=1, + latent_channels=1, + norm_num_groups=1, + use_quant_conv=False, + use_post_quant_conv=False, + shift_factor=0.0609, + scaling_factor=1.5035, + ) + + scheduler = FlowMatchEulerDiscreteScheduler() + + return { + "scheduler": scheduler, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + "transformer": transformer, + "vae": vae, + "image_encoder": None, + "feature_extractor": None, + } + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device="cpu").manual_seed(seed) + + inputs = { + "prompt": "A painting of a squirrel eating a burger", + "negative_prompt": "bad, ugly", + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 5.0, + "height": 8, + "width": 8, + "max_sequence_length": 48, + "output_type": "np", + } + return inputs + + def test_chroma_different_prompts(self): + pipe = self.pipeline_class(**self.get_dummy_components()).to(torch_device) + + inputs = self.get_dummy_inputs(torch_device) + output_same_prompt = pipe(**inputs).images[0] + + inputs = self.get_dummy_inputs(torch_device) + inputs["prompt"] = "a different prompt" + output_different_prompts = pipe(**inputs).images[0] + + max_diff = np.abs(output_same_prompt - output_different_prompts).max() + + # Outputs should be different here + # For some reasons, they don't show large differences + assert max_diff > 1e-6 + + def test_fused_qkv_projections(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe = pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + image = pipe(**inputs).images + original_image_slice = image[0, -3:, -3:, -1] + + # TODO (sayakpaul): will refactor this once `fuse_qkv_projections()` has been added + # to the pipeline level. + pipe.transformer.fuse_qkv_projections() + self.assertTrue( + check_qkv_fused_layers_exist(pipe.transformer, ["to_qkv"]), + ("Something wrong with the fused attention layers. Expected all the attention projections to be fused."), + ) + + inputs = self.get_dummy_inputs(device) + image = pipe(**inputs).images + image_slice_fused = image[0, -3:, -3:, -1] + + pipe.transformer.unfuse_qkv_projections() + inputs = self.get_dummy_inputs(device) + image = pipe(**inputs).images + image_slice_disabled = image[0, -3:, -3:, -1] + + assert np.allclose(original_image_slice, image_slice_fused, atol=1e-3, rtol=1e-3), ( + "Fusion of QKV projections shouldn't affect the outputs." + ) + assert np.allclose(image_slice_fused, image_slice_disabled, atol=1e-3, rtol=1e-3), ( + "Outputs, with QKV projection fusion enabled, shouldn't change when fused QKV projections are disabled." + ) + assert np.allclose(original_image_slice, image_slice_disabled, atol=1e-2, rtol=1e-2), ( + "Original outputs should match when fused QKV projections are disabled." + ) + + def test_chroma_image_output_shape(self): + pipe = self.pipeline_class(**self.get_dummy_components()).to(torch_device) + inputs = self.get_dummy_inputs(torch_device) + + height_width_pairs = [(32, 32), (72, 57)] + for height, width in height_width_pairs: + expected_height = height - height % (pipe.vae_scale_factor * 2) + expected_width = width - width % (pipe.vae_scale_factor * 2) + + inputs.update({"height": height, "width": width}) + image = pipe(**inputs).images[0] + output_height, output_width, _ = image.shape + assert (output_height, output_width) == (expected_height, expected_width) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/chroma/test_pipeline_chroma_img2img.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/chroma/test_pipeline_chroma_img2img.py new file mode 100644 index 0000000000000000000000000000000000000000..4ed1393037b9923275cf84792aaeac5d01798291 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/chroma/test_pipeline_chroma_img2img.py @@ -0,0 +1,163 @@ +import random +import unittest + +import numpy as np +import torch +from transformers import AutoTokenizer, T5EncoderModel + +from diffusers import AutoencoderKL, ChromaImg2ImgPipeline, ChromaTransformer2DModel, FlowMatchEulerDiscreteScheduler + +from ...testing_utils import floats_tensor, torch_device +from ..test_pipelines_common import FluxIPAdapterTesterMixin, PipelineTesterMixin, check_qkv_fused_layers_exist + + +class ChromaImg2ImgPipelineFastTests( + unittest.TestCase, + PipelineTesterMixin, + FluxIPAdapterTesterMixin, +): + pipeline_class = ChromaImg2ImgPipeline + params = frozenset(["prompt", "height", "width", "guidance_scale", "prompt_embeds"]) + batch_params = frozenset(["prompt"]) + + # there is no xformers processor for Flux + test_xformers_attention = False + test_layerwise_casting = True + test_group_offloading = True + + def get_dummy_components(self, num_layers: int = 1, num_single_layers: int = 1): + torch.manual_seed(0) + transformer = ChromaTransformer2DModel( + patch_size=1, + in_channels=4, + num_layers=num_layers, + num_single_layers=num_single_layers, + attention_head_dim=16, + num_attention_heads=2, + joint_attention_dim=32, + axes_dims_rope=[4, 4, 8], + approximator_hidden_dim=32, + approximator_layers=1, + approximator_num_channels=16, + ) + + torch.manual_seed(0) + text_encoder = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5") + + tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") + + torch.manual_seed(0) + vae = AutoencoderKL( + sample_size=32, + in_channels=3, + out_channels=3, + block_out_channels=(4,), + layers_per_block=1, + latent_channels=1, + norm_num_groups=1, + use_quant_conv=False, + use_post_quant_conv=False, + shift_factor=0.0609, + scaling_factor=1.5035, + ) + + scheduler = FlowMatchEulerDiscreteScheduler() + + return { + "scheduler": scheduler, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + "transformer": transformer, + "vae": vae, + "image_encoder": None, + "feature_extractor": None, + } + + def get_dummy_inputs(self, device, seed=0): + image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device="cpu").manual_seed(seed) + + inputs = { + "prompt": "A painting of a squirrel eating a burger", + "image": image, + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 5.0, + "height": 8, + "width": 8, + "max_sequence_length": 48, + "strength": 0.8, + "output_type": "np", + } + return inputs + + def test_chroma_different_prompts(self): + pipe = self.pipeline_class(**self.get_dummy_components()).to(torch_device) + + inputs = self.get_dummy_inputs(torch_device) + output_same_prompt = pipe(**inputs).images[0] + + inputs = self.get_dummy_inputs(torch_device) + inputs["prompt"] = "a different prompt" + output_different_prompts = pipe(**inputs).images[0] + + max_diff = np.abs(output_same_prompt - output_different_prompts).max() + + # Outputs should be different here + # For some reasons, they don't show large differences + assert max_diff > 1e-6 + + def test_fused_qkv_projections(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe = pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + image = pipe(**inputs).images + original_image_slice = image[0, -3:, -3:, -1] + + # TODO (sayakpaul): will refactor this once `fuse_qkv_projections()` has been added + # to the pipeline level. + pipe.transformer.fuse_qkv_projections() + self.assertTrue( + check_qkv_fused_layers_exist(pipe.transformer, ["to_qkv"]), + ("Something wrong with the fused attention layers. Expected all the attention projections to be fused."), + ) + + inputs = self.get_dummy_inputs(device) + image = pipe(**inputs).images + image_slice_fused = image[0, -3:, -3:, -1] + + pipe.transformer.unfuse_qkv_projections() + inputs = self.get_dummy_inputs(device) + image = pipe(**inputs).images + image_slice_disabled = image[0, -3:, -3:, -1] + + assert np.allclose(original_image_slice, image_slice_fused, atol=1e-3, rtol=1e-3), ( + "Fusion of QKV projections shouldn't affect the outputs." + ) + assert np.allclose(image_slice_fused, image_slice_disabled, atol=1e-3, rtol=1e-3), ( + "Outputs, with QKV projection fusion enabled, shouldn't change when fused QKV projections are disabled." + ) + assert np.allclose(original_image_slice, image_slice_disabled, atol=1e-2, rtol=1e-2), ( + "Original outputs should match when fused QKV projections are disabled." + ) + + def test_chroma_image_output_shape(self): + pipe = self.pipeline_class(**self.get_dummy_components()).to(torch_device) + inputs = self.get_dummy_inputs(torch_device) + + height_width_pairs = [(32, 32), (72, 57)] + for height, width in height_width_pairs: + expected_height = height - height % (pipe.vae_scale_factor * 2) + expected_width = width - width % (pipe.vae_scale_factor * 2) + + inputs.update({"height": height, "width": width}) + image = pipe(**inputs).images[0] + output_height, output_width, _ = image.shape + assert (output_height, output_width) == (expected_height, expected_width) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/cogvideo/__init__.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/cogvideo/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/cogvideo/test_cogvideox.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/cogvideo/test_cogvideox.py new file mode 100644 index 0000000000000000000000000000000000000000..dca1725d8a74de4e038b03131d623f401ac7b835 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/cogvideo/test_cogvideox.py @@ -0,0 +1,371 @@ +# Copyright 2025 The HuggingFace Team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import gc +import inspect +import unittest + +import numpy as np +import torch +from transformers import AutoTokenizer, T5EncoderModel + +from diffusers import AutoencoderKLCogVideoX, CogVideoXPipeline, CogVideoXTransformer3DModel, DDIMScheduler + +from ...testing_utils import ( + backend_empty_cache, + enable_full_determinism, + numpy_cosine_similarity_distance, + require_torch_accelerator, + slow, + torch_device, +) +from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS +from ..test_pipelines_common import ( + FasterCacheTesterMixin, + FirstBlockCacheTesterMixin, + PipelineTesterMixin, + PyramidAttentionBroadcastTesterMixin, + check_qkv_fusion_matches_attn_procs_length, + check_qkv_fusion_processors_exist, + to_np, +) + + +enable_full_determinism() + + +class CogVideoXPipelineFastTests( + PipelineTesterMixin, + PyramidAttentionBroadcastTesterMixin, + FasterCacheTesterMixin, + FirstBlockCacheTesterMixin, + unittest.TestCase, +): + pipeline_class = CogVideoXPipeline + params = TEXT_TO_IMAGE_PARAMS - {"cross_attention_kwargs"} + batch_params = TEXT_TO_IMAGE_BATCH_PARAMS + image_params = TEXT_TO_IMAGE_IMAGE_PARAMS + image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS + required_optional_params = frozenset( + [ + "num_inference_steps", + "generator", + "latents", + "return_dict", + "callback_on_step_end", + "callback_on_step_end_tensor_inputs", + ] + ) + test_xformers_attention = False + test_layerwise_casting = True + test_group_offloading = True + + def get_dummy_components(self, num_layers: int = 1): + torch.manual_seed(0) + transformer = CogVideoXTransformer3DModel( + # Product of num_attention_heads * attention_head_dim must be divisible by 16 for 3D positional embeddings + # But, since we are using tiny-random-t5 here, we need the internal dim of CogVideoXTransformer3DModel + # to be 32. The internal dim is product of num_attention_heads and attention_head_dim + num_attention_heads=4, + attention_head_dim=8, + in_channels=4, + out_channels=4, + time_embed_dim=2, + text_embed_dim=32, # Must match with tiny-random-t5 + num_layers=num_layers, + sample_width=2, # latent width: 2 -> final width: 16 + sample_height=2, # latent height: 2 -> final height: 16 + sample_frames=9, # latent frames: (9 - 1) / 4 + 1 = 3 -> final frames: 9 + patch_size=2, + temporal_compression_ratio=4, + max_text_seq_length=16, + ) + + torch.manual_seed(0) + vae = AutoencoderKLCogVideoX( + in_channels=3, + out_channels=3, + down_block_types=( + "CogVideoXDownBlock3D", + "CogVideoXDownBlock3D", + "CogVideoXDownBlock3D", + "CogVideoXDownBlock3D", + ), + up_block_types=( + "CogVideoXUpBlock3D", + "CogVideoXUpBlock3D", + "CogVideoXUpBlock3D", + "CogVideoXUpBlock3D", + ), + block_out_channels=(8, 8, 8, 8), + latent_channels=4, + layers_per_block=1, + norm_num_groups=2, + temporal_compression_ratio=4, + ) + + torch.manual_seed(0) + scheduler = DDIMScheduler() + text_encoder = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5") + tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") + + components = { + "transformer": transformer, + "vae": vae, + "scheduler": scheduler, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + } + return components + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + inputs = { + "prompt": "dance monkey", + "negative_prompt": "", + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 6.0, + # Cannot reduce because convolution kernel becomes bigger than sample + "height": 16, + "width": 16, + "num_frames": 8, + "max_sequence_length": 16, + "output_type": "pt", + } + return inputs + + def test_inference(self): + device = "cpu" + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + video = pipe(**inputs).frames + generated_video = video[0] + + self.assertEqual(generated_video.shape, (8, 3, 16, 16)) + expected_video = torch.randn(8, 3, 16, 16) + max_diff = np.abs(generated_video - expected_video).max() + self.assertLessEqual(max_diff, 1e10) + + def test_callback_inputs(self): + sig = inspect.signature(self.pipeline_class.__call__) + has_callback_tensor_inputs = "callback_on_step_end_tensor_inputs" in sig.parameters + has_callback_step_end = "callback_on_step_end" in sig.parameters + + if not (has_callback_tensor_inputs and has_callback_step_end): + return + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + self.assertTrue( + hasattr(pipe, "_callback_tensor_inputs"), + f" {self.pipeline_class} should have `_callback_tensor_inputs` that defines a list of tensor variables its callback function can use as inputs", + ) + + def callback_inputs_subset(pipe, i, t, callback_kwargs): + # iterate over callback args + for tensor_name, tensor_value in callback_kwargs.items(): + # check that we're only passing in allowed tensor inputs + assert tensor_name in pipe._callback_tensor_inputs + + return callback_kwargs + + def callback_inputs_all(pipe, i, t, callback_kwargs): + for tensor_name in pipe._callback_tensor_inputs: + assert tensor_name in callback_kwargs + + # iterate over callback args + for tensor_name, tensor_value in callback_kwargs.items(): + # check that we're only passing in allowed tensor inputs + assert tensor_name in pipe._callback_tensor_inputs + + return callback_kwargs + + inputs = self.get_dummy_inputs(torch_device) + + # Test passing in a subset + inputs["callback_on_step_end"] = callback_inputs_subset + inputs["callback_on_step_end_tensor_inputs"] = ["latents"] + output = pipe(**inputs)[0] + + # Test passing in a everything + inputs["callback_on_step_end"] = callback_inputs_all + inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs + output = pipe(**inputs)[0] + + def callback_inputs_change_tensor(pipe, i, t, callback_kwargs): + is_last = i == (pipe.num_timesteps - 1) + if is_last: + callback_kwargs["latents"] = torch.zeros_like(callback_kwargs["latents"]) + return callback_kwargs + + inputs["callback_on_step_end"] = callback_inputs_change_tensor + inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs + output = pipe(**inputs)[0] + assert output.abs().sum() < 1e10 + + def test_inference_batch_single_identical(self): + self._test_inference_batch_single_identical(batch_size=3, expected_max_diff=1e-3) + + def test_attention_slicing_forward_pass( + self, test_max_difference=True, test_mean_pixel_difference=True, expected_max_diff=1e-3 + ): + if not self.test_attention_slicing: + return + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + for component in pipe.components.values(): + if hasattr(component, "set_default_attn_processor"): + component.set_default_attn_processor() + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + generator_device = "cpu" + inputs = self.get_dummy_inputs(generator_device) + output_without_slicing = pipe(**inputs)[0] + + pipe.enable_attention_slicing(slice_size=1) + inputs = self.get_dummy_inputs(generator_device) + output_with_slicing1 = pipe(**inputs)[0] + + pipe.enable_attention_slicing(slice_size=2) + inputs = self.get_dummy_inputs(generator_device) + output_with_slicing2 = pipe(**inputs)[0] + + if test_max_difference: + max_diff1 = np.abs(to_np(output_with_slicing1) - to_np(output_without_slicing)).max() + max_diff2 = np.abs(to_np(output_with_slicing2) - to_np(output_without_slicing)).max() + self.assertLess( + max(max_diff1, max_diff2), + expected_max_diff, + "Attention slicing should not affect the inference results", + ) + + def test_vae_tiling(self, expected_diff_max: float = 0.2): + generator_device = "cpu" + components = self.get_dummy_components() + + pipe = self.pipeline_class(**components) + pipe.to("cpu") + pipe.set_progress_bar_config(disable=None) + + # Without tiling + inputs = self.get_dummy_inputs(generator_device) + inputs["height"] = inputs["width"] = 128 + output_without_tiling = pipe(**inputs)[0] + + # With tiling + pipe.vae.enable_tiling( + tile_sample_min_height=96, + tile_sample_min_width=96, + tile_overlap_factor_height=1 / 12, + tile_overlap_factor_width=1 / 12, + ) + inputs = self.get_dummy_inputs(generator_device) + inputs["height"] = inputs["width"] = 128 + output_with_tiling = pipe(**inputs)[0] + + self.assertLess( + (to_np(output_without_tiling) - to_np(output_with_tiling)).max(), + expected_diff_max, + "VAE tiling should not affect the inference results", + ) + + def test_fused_qkv_projections(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe = pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + frames = pipe(**inputs).frames # [B, F, C, H, W] + original_image_slice = frames[0, -2:, -1, -3:, -3:] + + pipe.fuse_qkv_projections() + assert check_qkv_fusion_processors_exist(pipe.transformer), ( + "Something wrong with the fused attention processors. Expected all the attention processors to be fused." + ) + assert check_qkv_fusion_matches_attn_procs_length( + pipe.transformer, pipe.transformer.original_attn_processors + ), "Something wrong with the attention processors concerning the fused QKV projections." + + inputs = self.get_dummy_inputs(device) + frames = pipe(**inputs).frames + image_slice_fused = frames[0, -2:, -1, -3:, -3:] + + pipe.transformer.unfuse_qkv_projections() + inputs = self.get_dummy_inputs(device) + frames = pipe(**inputs).frames + image_slice_disabled = frames[0, -2:, -1, -3:, -3:] + + assert np.allclose(original_image_slice, image_slice_fused, atol=1e-3, rtol=1e-3), ( + "Fusion of QKV projections shouldn't affect the outputs." + ) + assert np.allclose(image_slice_fused, image_slice_disabled, atol=1e-3, rtol=1e-3), ( + "Outputs, with QKV projection fusion enabled, shouldn't change when fused QKV projections are disabled." + ) + assert np.allclose(original_image_slice, image_slice_disabled, atol=1e-2, rtol=1e-2), ( + "Original outputs should match when fused QKV projections are disabled." + ) + + +@slow +@require_torch_accelerator +class CogVideoXPipelineIntegrationTests(unittest.TestCase): + prompt = "A painting of a squirrel eating a burger." + + def setUp(self): + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def test_cogvideox(self): + generator = torch.Generator("cpu").manual_seed(0) + + pipe = CogVideoXPipeline.from_pretrained("THUDM/CogVideoX-2b", torch_dtype=torch.float16) + pipe.enable_model_cpu_offload(device=torch_device) + prompt = self.prompt + + videos = pipe( + prompt=prompt, + height=480, + width=720, + num_frames=16, + generator=generator, + num_inference_steps=2, + output_type="pt", + ).frames + + video = videos[0] + expected_video = torch.randn(1, 16, 480, 720, 3).numpy() + + max_diff = numpy_cosine_similarity_distance(video, expected_video) + assert max_diff < 1e-3, f"Max diff is too high. got {video}" diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/cogvideo/test_cogvideox_fun_control.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/cogvideo/test_cogvideox_fun_control.py new file mode 100644 index 0000000000000000000000000000000000000000..097e8df7b35f34010b5313b686af455f42ade54c --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/cogvideo/test_cogvideox_fun_control.py @@ -0,0 +1,326 @@ +# Copyright 2025 The HuggingFace Team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +import unittest + +import numpy as np +import torch +from PIL import Image +from transformers import AutoTokenizer, T5EncoderModel + +from diffusers import AutoencoderKLCogVideoX, CogVideoXFunControlPipeline, CogVideoXTransformer3DModel, DDIMScheduler + +from ...testing_utils import ( + enable_full_determinism, + torch_device, +) +from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS +from ..test_pipelines_common import ( + PipelineTesterMixin, + check_qkv_fusion_matches_attn_procs_length, + check_qkv_fusion_processors_exist, + to_np, +) + + +enable_full_determinism() + + +class CogVideoXFunControlPipelineFastTests(PipelineTesterMixin, unittest.TestCase): + pipeline_class = CogVideoXFunControlPipeline + params = TEXT_TO_IMAGE_PARAMS - {"cross_attention_kwargs"} + batch_params = TEXT_TO_IMAGE_BATCH_PARAMS.union({"control_video"}) + image_params = TEXT_TO_IMAGE_IMAGE_PARAMS + image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS + required_optional_params = frozenset( + [ + "num_inference_steps", + "generator", + "latents", + "return_dict", + "callback_on_step_end", + "callback_on_step_end_tensor_inputs", + ] + ) + test_xformers_attention = False + test_layerwise_casting = True + test_group_offloading = True + + def get_dummy_components(self): + torch.manual_seed(0) + transformer = CogVideoXTransformer3DModel( + # Product of num_attention_heads * attention_head_dim must be divisible by 16 for 3D positional embeddings + # But, since we are using tiny-random-t5 here, we need the internal dim of CogVideoXTransformer3DModel + # to be 32. The internal dim is product of num_attention_heads and attention_head_dim + num_attention_heads=4, + attention_head_dim=8, + in_channels=8, + out_channels=4, + time_embed_dim=2, + text_embed_dim=32, # Must match with tiny-random-t5 + num_layers=1, + sample_width=2, # latent width: 2 -> final width: 16 + sample_height=2, # latent height: 2 -> final height: 16 + sample_frames=9, # latent frames: (9 - 1) / 4 + 1 = 3 -> final frames: 9 + patch_size=2, + temporal_compression_ratio=4, + max_text_seq_length=16, + ) + + torch.manual_seed(0) + vae = AutoencoderKLCogVideoX( + in_channels=3, + out_channels=3, + down_block_types=( + "CogVideoXDownBlock3D", + "CogVideoXDownBlock3D", + "CogVideoXDownBlock3D", + "CogVideoXDownBlock3D", + ), + up_block_types=( + "CogVideoXUpBlock3D", + "CogVideoXUpBlock3D", + "CogVideoXUpBlock3D", + "CogVideoXUpBlock3D", + ), + block_out_channels=(8, 8, 8, 8), + latent_channels=4, + layers_per_block=1, + norm_num_groups=2, + temporal_compression_ratio=4, + ) + + torch.manual_seed(0) + scheduler = DDIMScheduler() + text_encoder = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5") + tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") + + components = { + "transformer": transformer, + "vae": vae, + "scheduler": scheduler, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + } + return components + + def get_dummy_inputs(self, device, seed: int = 0, num_frames: int = 8): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + + # Cannot reduce because convolution kernel becomes bigger than sample + height = 16 + width = 16 + + control_video = [Image.new("RGB", (width, height))] * num_frames + + inputs = { + "prompt": "dance monkey", + "negative_prompt": "", + "control_video": control_video, + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 6.0, + "height": height, + "width": width, + "max_sequence_length": 16, + "output_type": "pt", + } + return inputs + + def test_inference(self): + device = "cpu" + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + video = pipe(**inputs).frames + generated_video = video[0] + + self.assertEqual(generated_video.shape, (8, 3, 16, 16)) + expected_video = torch.randn(8, 3, 16, 16) + max_diff = np.abs(generated_video - expected_video).max() + self.assertLessEqual(max_diff, 1e10) + + def test_callback_inputs(self): + sig = inspect.signature(self.pipeline_class.__call__) + has_callback_tensor_inputs = "callback_on_step_end_tensor_inputs" in sig.parameters + has_callback_step_end = "callback_on_step_end" in sig.parameters + + if not (has_callback_tensor_inputs and has_callback_step_end): + return + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + self.assertTrue( + hasattr(pipe, "_callback_tensor_inputs"), + f" {self.pipeline_class} should have `_callback_tensor_inputs` that defines a list of tensor variables its callback function can use as inputs", + ) + + def callback_inputs_subset(pipe, i, t, callback_kwargs): + # iterate over callback args + for tensor_name, tensor_value in callback_kwargs.items(): + # check that we're only passing in allowed tensor inputs + assert tensor_name in pipe._callback_tensor_inputs + + return callback_kwargs + + def callback_inputs_all(pipe, i, t, callback_kwargs): + for tensor_name in pipe._callback_tensor_inputs: + assert tensor_name in callback_kwargs + + # iterate over callback args + for tensor_name, tensor_value in callback_kwargs.items(): + # check that we're only passing in allowed tensor inputs + assert tensor_name in pipe._callback_tensor_inputs + + return callback_kwargs + + inputs = self.get_dummy_inputs(torch_device) + + # Test passing in a subset + inputs["callback_on_step_end"] = callback_inputs_subset + inputs["callback_on_step_end_tensor_inputs"] = ["latents"] + output = pipe(**inputs)[0] + + # Test passing in a everything + inputs["callback_on_step_end"] = callback_inputs_all + inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs + output = pipe(**inputs)[0] + + def callback_inputs_change_tensor(pipe, i, t, callback_kwargs): + is_last = i == (pipe.num_timesteps - 1) + if is_last: + callback_kwargs["latents"] = torch.zeros_like(callback_kwargs["latents"]) + return callback_kwargs + + inputs["callback_on_step_end"] = callback_inputs_change_tensor + inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs + output = pipe(**inputs)[0] + assert output.abs().sum() < 1e10 + + def test_inference_batch_single_identical(self): + self._test_inference_batch_single_identical(batch_size=3, expected_max_diff=1e-3) + + def test_attention_slicing_forward_pass( + self, test_max_difference=True, test_mean_pixel_difference=True, expected_max_diff=1e-3 + ): + if not self.test_attention_slicing: + return + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + for component in pipe.components.values(): + if hasattr(component, "set_default_attn_processor"): + component.set_default_attn_processor() + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + generator_device = "cpu" + inputs = self.get_dummy_inputs(generator_device) + output_without_slicing = pipe(**inputs)[0] + + pipe.enable_attention_slicing(slice_size=1) + inputs = self.get_dummy_inputs(generator_device) + output_with_slicing1 = pipe(**inputs)[0] + + pipe.enable_attention_slicing(slice_size=2) + inputs = self.get_dummy_inputs(generator_device) + output_with_slicing2 = pipe(**inputs)[0] + + if test_max_difference: + max_diff1 = np.abs(to_np(output_with_slicing1) - to_np(output_without_slicing)).max() + max_diff2 = np.abs(to_np(output_with_slicing2) - to_np(output_without_slicing)).max() + self.assertLess( + max(max_diff1, max_diff2), + expected_max_diff, + "Attention slicing should not affect the inference results", + ) + + def test_vae_tiling(self, expected_diff_max: float = 0.5): + # NOTE(aryan): This requires a higher expected_max_diff than other CogVideoX pipelines + generator_device = "cpu" + components = self.get_dummy_components() + + pipe = self.pipeline_class(**components) + pipe.to("cpu") + pipe.set_progress_bar_config(disable=None) + + # Without tiling + inputs = self.get_dummy_inputs(generator_device) + inputs["height"] = inputs["width"] = 128 + output_without_tiling = pipe(**inputs)[0] + + # With tiling + pipe.vae.enable_tiling( + tile_sample_min_height=96, + tile_sample_min_width=96, + tile_overlap_factor_height=1 / 12, + tile_overlap_factor_width=1 / 12, + ) + inputs = self.get_dummy_inputs(generator_device) + inputs["height"] = inputs["width"] = 128 + output_with_tiling = pipe(**inputs)[0] + + self.assertLess( + (to_np(output_without_tiling) - to_np(output_with_tiling)).max(), + expected_diff_max, + "VAE tiling should not affect the inference results", + ) + + def test_fused_qkv_projections(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe = pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + frames = pipe(**inputs).frames # [B, F, C, H, W] + original_image_slice = frames[0, -2:, -1, -3:, -3:] + + pipe.fuse_qkv_projections() + assert check_qkv_fusion_processors_exist(pipe.transformer), ( + "Something wrong with the fused attention processors. Expected all the attention processors to be fused." + ) + assert check_qkv_fusion_matches_attn_procs_length( + pipe.transformer, pipe.transformer.original_attn_processors + ), "Something wrong with the attention processors concerning the fused QKV projections." + + inputs = self.get_dummy_inputs(device) + frames = pipe(**inputs).frames + image_slice_fused = frames[0, -2:, -1, -3:, -3:] + + pipe.transformer.unfuse_qkv_projections() + inputs = self.get_dummy_inputs(device) + frames = pipe(**inputs).frames + image_slice_disabled = frames[0, -2:, -1, -3:, -3:] + + assert np.allclose(original_image_slice, image_slice_fused, atol=1e-3, rtol=1e-3), ( + "Fusion of QKV projections shouldn't affect the outputs." + ) + assert np.allclose(image_slice_fused, image_slice_disabled, atol=1e-3, rtol=1e-3), ( + "Outputs, with QKV projection fusion enabled, shouldn't change when fused QKV projections are disabled." + ) + assert np.allclose(original_image_slice, image_slice_disabled, atol=1e-2, rtol=1e-2), ( + "Original outputs should match when fused QKV projections are disabled." + ) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/cogvideo/test_cogvideox_image2video.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/cogvideo/test_cogvideox_image2video.py new file mode 100644 index 0000000000000000000000000000000000000000..1dd5e2ae14056c1c5121a7e383249084c695d264 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/cogvideo/test_cogvideox_image2video.py @@ -0,0 +1,388 @@ +# Copyright 2025 The HuggingFace Team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import gc +import inspect +import unittest + +import numpy as np +import torch +from PIL import Image +from transformers import AutoTokenizer, T5EncoderModel + +from diffusers import AutoencoderKLCogVideoX, CogVideoXImageToVideoPipeline, CogVideoXTransformer3DModel, DDIMScheduler +from diffusers.utils import load_image + +from ...testing_utils import ( + backend_empty_cache, + enable_full_determinism, + numpy_cosine_similarity_distance, + require_torch_accelerator, + slow, + torch_device, +) +from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS +from ..test_pipelines_common import ( + PipelineTesterMixin, + check_qkv_fusion_matches_attn_procs_length, + check_qkv_fusion_processors_exist, + to_np, +) + + +enable_full_determinism() + + +class CogVideoXImageToVideoPipelineFastTests(PipelineTesterMixin, unittest.TestCase): + pipeline_class = CogVideoXImageToVideoPipeline + params = TEXT_TO_IMAGE_PARAMS - {"cross_attention_kwargs"} + batch_params = TEXT_TO_IMAGE_BATCH_PARAMS.union({"image"}) + image_params = TEXT_TO_IMAGE_IMAGE_PARAMS + image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS + required_optional_params = frozenset( + [ + "num_inference_steps", + "generator", + "latents", + "return_dict", + "callback_on_step_end", + "callback_on_step_end_tensor_inputs", + ] + ) + test_xformers_attention = False + + def get_dummy_components(self): + torch.manual_seed(0) + transformer = CogVideoXTransformer3DModel( + # Product of num_attention_heads * attention_head_dim must be divisible by 16 for 3D positional embeddings + # But, since we are using tiny-random-t5 here, we need the internal dim of CogVideoXTransformer3DModel + # to be 32. The internal dim is product of num_attention_heads and attention_head_dim + # Note: The num_attention_heads and attention_head_dim is different from the T2V and I2V tests because + # attention_head_dim must be divisible by 16 for RoPE to work. We also need to maintain a product of 32 as + # detailed above. + num_attention_heads=2, + attention_head_dim=16, + in_channels=8, + out_channels=4, + time_embed_dim=2, + text_embed_dim=32, # Must match with tiny-random-t5 + num_layers=1, + sample_width=2, # latent width: 2 -> final width: 16 + sample_height=2, # latent height: 2 -> final height: 16 + sample_frames=9, # latent frames: (9 - 1) / 4 + 1 = 3 -> final frames: 9 + patch_size=2, + temporal_compression_ratio=4, + max_text_seq_length=16, + use_rotary_positional_embeddings=True, + use_learned_positional_embeddings=True, + ) + + torch.manual_seed(0) + vae = AutoencoderKLCogVideoX( + in_channels=3, + out_channels=3, + down_block_types=( + "CogVideoXDownBlock3D", + "CogVideoXDownBlock3D", + "CogVideoXDownBlock3D", + "CogVideoXDownBlock3D", + ), + up_block_types=( + "CogVideoXUpBlock3D", + "CogVideoXUpBlock3D", + "CogVideoXUpBlock3D", + "CogVideoXUpBlock3D", + ), + block_out_channels=(8, 8, 8, 8), + latent_channels=4, + layers_per_block=1, + norm_num_groups=2, + temporal_compression_ratio=4, + ) + + torch.manual_seed(0) + scheduler = DDIMScheduler() + text_encoder = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5") + tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") + + components = { + "transformer": transformer, + "vae": vae, + "scheduler": scheduler, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + } + return components + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + + # Cannot reduce below 16 because convolution kernel becomes bigger than sample + # Cannot reduce below 32 because 3D RoPE errors out + image_height = 16 + image_width = 16 + image = Image.new("RGB", (image_width, image_height)) + inputs = { + "image": image, + "prompt": "dance monkey", + "negative_prompt": "", + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 6.0, + "height": image_height, + "width": image_width, + "num_frames": 8, + "max_sequence_length": 16, + "output_type": "pt", + } + return inputs + + def test_inference(self): + device = "cpu" + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + video = pipe(**inputs).frames + generated_video = video[0] + + self.assertEqual(generated_video.shape, (8, 3, 16, 16)) + expected_video = torch.randn(8, 3, 16, 16) + max_diff = np.abs(generated_video - expected_video).max() + self.assertLessEqual(max_diff, 1e10) + + def test_callback_inputs(self): + sig = inspect.signature(self.pipeline_class.__call__) + has_callback_tensor_inputs = "callback_on_step_end_tensor_inputs" in sig.parameters + has_callback_step_end = "callback_on_step_end" in sig.parameters + + if not (has_callback_tensor_inputs and has_callback_step_end): + return + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + self.assertTrue( + hasattr(pipe, "_callback_tensor_inputs"), + f" {self.pipeline_class} should have `_callback_tensor_inputs` that defines a list of tensor variables its callback function can use as inputs", + ) + + def callback_inputs_subset(pipe, i, t, callback_kwargs): + # iterate over callback args + for tensor_name, tensor_value in callback_kwargs.items(): + # check that we're only passing in allowed tensor inputs + assert tensor_name in pipe._callback_tensor_inputs + + return callback_kwargs + + def callback_inputs_all(pipe, i, t, callback_kwargs): + for tensor_name in pipe._callback_tensor_inputs: + assert tensor_name in callback_kwargs + + # iterate over callback args + for tensor_name, tensor_value in callback_kwargs.items(): + # check that we're only passing in allowed tensor inputs + assert tensor_name in pipe._callback_tensor_inputs + + return callback_kwargs + + inputs = self.get_dummy_inputs(torch_device) + + # Test passing in a subset + inputs["callback_on_step_end"] = callback_inputs_subset + inputs["callback_on_step_end_tensor_inputs"] = ["latents"] + output = pipe(**inputs)[0] + + # Test passing in a everything + inputs["callback_on_step_end"] = callback_inputs_all + inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs + output = pipe(**inputs)[0] + + def callback_inputs_change_tensor(pipe, i, t, callback_kwargs): + is_last = i == (pipe.num_timesteps - 1) + if is_last: + callback_kwargs["latents"] = torch.zeros_like(callback_kwargs["latents"]) + return callback_kwargs + + inputs["callback_on_step_end"] = callback_inputs_change_tensor + inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs + output = pipe(**inputs)[0] + assert output.abs().sum() < 1e10 + + def test_inference_batch_single_identical(self): + self._test_inference_batch_single_identical(batch_size=3, expected_max_diff=1e-3) + + def test_attention_slicing_forward_pass( + self, test_max_difference=True, test_mean_pixel_difference=True, expected_max_diff=1e-3 + ): + if not self.test_attention_slicing: + return + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + for component in pipe.components.values(): + if hasattr(component, "set_default_attn_processor"): + component.set_default_attn_processor() + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + generator_device = "cpu" + inputs = self.get_dummy_inputs(generator_device) + output_without_slicing = pipe(**inputs)[0] + + pipe.enable_attention_slicing(slice_size=1) + inputs = self.get_dummy_inputs(generator_device) + output_with_slicing1 = pipe(**inputs)[0] + + pipe.enable_attention_slicing(slice_size=2) + inputs = self.get_dummy_inputs(generator_device) + output_with_slicing2 = pipe(**inputs)[0] + + if test_max_difference: + max_diff1 = np.abs(to_np(output_with_slicing1) - to_np(output_without_slicing)).max() + max_diff2 = np.abs(to_np(output_with_slicing2) - to_np(output_without_slicing)).max() + self.assertLess( + max(max_diff1, max_diff2), + expected_max_diff, + "Attention slicing should not affect the inference results", + ) + + def test_vae_tiling(self, expected_diff_max: float = 0.3): + # Note(aryan): Investigate why this needs a bit higher tolerance + generator_device = "cpu" + components = self.get_dummy_components() + + # The reason to modify it this way is because I2V Transformer limits the generation to resolutions used during initialization. + # This limitation comes from using learned positional embeddings which cannot be generated on-the-fly like sincos or RoPE embeddings. + # See the if-statement on "self.use_learned_positional_embeddings" in diffusers/models/embeddings.py + components["transformer"] = CogVideoXTransformer3DModel.from_config( + components["transformer"].config, + sample_height=16, + sample_width=16, + ) + + pipe = self.pipeline_class(**components) + pipe.to("cpu") + pipe.set_progress_bar_config(disable=None) + + # Without tiling + inputs = self.get_dummy_inputs(generator_device) + inputs["height"] = inputs["width"] = 128 + output_without_tiling = pipe(**inputs)[0] + + # With tiling + pipe.vae.enable_tiling( + tile_sample_min_height=96, + tile_sample_min_width=96, + tile_overlap_factor_height=1 / 12, + tile_overlap_factor_width=1 / 12, + ) + inputs = self.get_dummy_inputs(generator_device) + inputs["height"] = inputs["width"] = 128 + output_with_tiling = pipe(**inputs)[0] + + self.assertLess( + (to_np(output_without_tiling) - to_np(output_with_tiling)).max(), + expected_diff_max, + "VAE tiling should not affect the inference results", + ) + + def test_fused_qkv_projections(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe = pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + frames = pipe(**inputs).frames # [B, F, C, H, W] + original_image_slice = frames[0, -2:, -1, -3:, -3:] + + pipe.fuse_qkv_projections() + assert check_qkv_fusion_processors_exist(pipe.transformer), ( + "Something wrong with the fused attention processors. Expected all the attention processors to be fused." + ) + assert check_qkv_fusion_matches_attn_procs_length( + pipe.transformer, pipe.transformer.original_attn_processors + ), "Something wrong with the attention processors concerning the fused QKV projections." + + inputs = self.get_dummy_inputs(device) + frames = pipe(**inputs).frames + image_slice_fused = frames[0, -2:, -1, -3:, -3:] + + pipe.transformer.unfuse_qkv_projections() + inputs = self.get_dummy_inputs(device) + frames = pipe(**inputs).frames + image_slice_disabled = frames[0, -2:, -1, -3:, -3:] + + assert np.allclose(original_image_slice, image_slice_fused, atol=1e-3, rtol=1e-3), ( + "Fusion of QKV projections shouldn't affect the outputs." + ) + assert np.allclose(image_slice_fused, image_slice_disabled, atol=1e-3, rtol=1e-3), ( + "Outputs, with QKV projection fusion enabled, shouldn't change when fused QKV projections are disabled." + ) + assert np.allclose(original_image_slice, image_slice_disabled, atol=1e-2, rtol=1e-2), ( + "Original outputs should match when fused QKV projections are disabled." + ) + + +@slow +@require_torch_accelerator +class CogVideoXImageToVideoPipelineIntegrationTests(unittest.TestCase): + prompt = "A painting of a squirrel eating a burger." + + def setUp(self): + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def test_cogvideox(self): + generator = torch.Generator("cpu").manual_seed(0) + + pipe = CogVideoXImageToVideoPipeline.from_pretrained("THUDM/CogVideoX-5b-I2V", torch_dtype=torch.bfloat16) + pipe.enable_model_cpu_offload(device=torch_device) + + prompt = self.prompt + image = load_image( + "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/astronaut.jpg" + ) + + videos = pipe( + image=image, + prompt=prompt, + height=480, + width=720, + num_frames=16, + generator=generator, + num_inference_steps=2, + output_type="pt", + ).frames + + video = videos[0] + expected_video = torch.randn(1, 16, 480, 720, 3).numpy() + + max_diff = numpy_cosine_similarity_distance(video, expected_video) + assert max_diff < 1e-3, f"Max diff is too high. got {video}" diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/cogvideo/test_cogvideox_video2video.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/cogvideo/test_cogvideox_video2video.py new file mode 100644 index 0000000000000000000000000000000000000000..3a1da7c4e7f76f82f1e1a9dae801e2157ea5bee5 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/cogvideo/test_cogvideox_video2video.py @@ -0,0 +1,325 @@ +# Copyright 2025 The HuggingFace Team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +import unittest + +import numpy as np +import torch +from PIL import Image +from transformers import AutoTokenizer, T5EncoderModel + +from diffusers import AutoencoderKLCogVideoX, CogVideoXTransformer3DModel, CogVideoXVideoToVideoPipeline, DDIMScheduler + +from ...testing_utils import enable_full_determinism, torch_device +from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS +from ..test_pipelines_common import ( + PipelineTesterMixin, + check_qkv_fusion_matches_attn_procs_length, + check_qkv_fusion_processors_exist, + to_np, +) + + +enable_full_determinism() + + +class CogVideoXVideoToVideoPipelineFastTests(PipelineTesterMixin, unittest.TestCase): + pipeline_class = CogVideoXVideoToVideoPipeline + params = TEXT_TO_IMAGE_PARAMS - {"cross_attention_kwargs"} + batch_params = TEXT_TO_IMAGE_BATCH_PARAMS.union({"video"}) + image_params = TEXT_TO_IMAGE_IMAGE_PARAMS + image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS + required_optional_params = frozenset( + [ + "num_inference_steps", + "generator", + "latents", + "return_dict", + "callback_on_step_end", + "callback_on_step_end_tensor_inputs", + ] + ) + test_xformers_attention = False + + def get_dummy_components(self): + torch.manual_seed(0) + transformer = CogVideoXTransformer3DModel( + # Product of num_attention_heads * attention_head_dim must be divisible by 16 for 3D positional embeddings + # But, since we are using tiny-random-t5 here, we need the internal dim of CogVideoXTransformer3DModel + # to be 32. The internal dim is product of num_attention_heads and attention_head_dim + num_attention_heads=4, + attention_head_dim=8, + in_channels=4, + out_channels=4, + time_embed_dim=2, + text_embed_dim=32, # Must match with tiny-random-t5 + num_layers=1, + sample_width=2, # latent width: 2 -> final width: 16 + sample_height=2, # latent height: 2 -> final height: 16 + sample_frames=9, # latent frames: (9 - 1) / 4 + 1 = 3 -> final frames: 9 + patch_size=2, + temporal_compression_ratio=4, + max_text_seq_length=16, + ) + + torch.manual_seed(0) + vae = AutoencoderKLCogVideoX( + in_channels=3, + out_channels=3, + down_block_types=( + "CogVideoXDownBlock3D", + "CogVideoXDownBlock3D", + "CogVideoXDownBlock3D", + "CogVideoXDownBlock3D", + ), + up_block_types=( + "CogVideoXUpBlock3D", + "CogVideoXUpBlock3D", + "CogVideoXUpBlock3D", + "CogVideoXUpBlock3D", + ), + block_out_channels=(8, 8, 8, 8), + latent_channels=4, + layers_per_block=1, + norm_num_groups=2, + temporal_compression_ratio=4, + ) + + torch.manual_seed(0) + scheduler = DDIMScheduler() + text_encoder = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5") + tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") + + components = { + "transformer": transformer, + "vae": vae, + "scheduler": scheduler, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + } + return components + + def get_dummy_inputs(self, device, seed: int = 0, num_frames: int = 8): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + + video_height = 16 + video_width = 16 + video = [Image.new("RGB", (video_width, video_height))] * num_frames + + inputs = { + "video": video, + "prompt": "dance monkey", + "negative_prompt": "", + "generator": generator, + "num_inference_steps": 2, + "strength": 0.5, + "guidance_scale": 6.0, + # Cannot reduce because convolution kernel becomes bigger than sample + "height": video_height, + "width": video_width, + "max_sequence_length": 16, + "output_type": "pt", + } + return inputs + + def test_inference(self): + device = "cpu" + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + video = pipe(**inputs).frames + generated_video = video[0] + + self.assertEqual(generated_video.shape, (8, 3, 16, 16)) + expected_video = torch.randn(8, 3, 16, 16) + max_diff = np.abs(generated_video - expected_video).max() + self.assertLessEqual(max_diff, 1e10) + + def test_callback_inputs(self): + sig = inspect.signature(self.pipeline_class.__call__) + has_callback_tensor_inputs = "callback_on_step_end_tensor_inputs" in sig.parameters + has_callback_step_end = "callback_on_step_end" in sig.parameters + + if not (has_callback_tensor_inputs and has_callback_step_end): + return + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + self.assertTrue( + hasattr(pipe, "_callback_tensor_inputs"), + f" {self.pipeline_class} should have `_callback_tensor_inputs` that defines a list of tensor variables its callback function can use as inputs", + ) + + def callback_inputs_subset(pipe, i, t, callback_kwargs): + # iterate over callback args + for tensor_name, tensor_value in callback_kwargs.items(): + # check that we're only passing in allowed tensor inputs + assert tensor_name in pipe._callback_tensor_inputs + + return callback_kwargs + + def callback_inputs_all(pipe, i, t, callback_kwargs): + for tensor_name in pipe._callback_tensor_inputs: + assert tensor_name in callback_kwargs + + # iterate over callback args + for tensor_name, tensor_value in callback_kwargs.items(): + # check that we're only passing in allowed tensor inputs + assert tensor_name in pipe._callback_tensor_inputs + + return callback_kwargs + + inputs = self.get_dummy_inputs(torch_device) + + # Test passing in a subset + inputs["callback_on_step_end"] = callback_inputs_subset + inputs["callback_on_step_end_tensor_inputs"] = ["latents"] + output = pipe(**inputs)[0] + + # Test passing in a everything + inputs["callback_on_step_end"] = callback_inputs_all + inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs + output = pipe(**inputs)[0] + + def callback_inputs_change_tensor(pipe, i, t, callback_kwargs): + is_last = i == (pipe.num_timesteps - 1) + if is_last: + callback_kwargs["latents"] = torch.zeros_like(callback_kwargs["latents"]) + return callback_kwargs + + inputs["callback_on_step_end"] = callback_inputs_change_tensor + inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs + output = pipe(**inputs)[0] + assert output.abs().sum() < 1e10 + + def test_inference_batch_single_identical(self): + self._test_inference_batch_single_identical(batch_size=3, expected_max_diff=1e-3) + + def test_attention_slicing_forward_pass( + self, test_max_difference=True, test_mean_pixel_difference=True, expected_max_diff=1e-3 + ): + if not self.test_attention_slicing: + return + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + for component in pipe.components.values(): + if hasattr(component, "set_default_attn_processor"): + component.set_default_attn_processor() + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + generator_device = "cpu" + inputs = self.get_dummy_inputs(generator_device) + output_without_slicing = pipe(**inputs)[0] + + pipe.enable_attention_slicing(slice_size=1) + inputs = self.get_dummy_inputs(generator_device) + output_with_slicing1 = pipe(**inputs)[0] + + pipe.enable_attention_slicing(slice_size=2) + inputs = self.get_dummy_inputs(generator_device) + output_with_slicing2 = pipe(**inputs)[0] + + if test_max_difference: + max_diff1 = np.abs(to_np(output_with_slicing1) - to_np(output_without_slicing)).max() + max_diff2 = np.abs(to_np(output_with_slicing2) - to_np(output_without_slicing)).max() + self.assertLess( + max(max_diff1, max_diff2), + expected_max_diff, + "Attention slicing should not affect the inference results", + ) + + def test_vae_tiling(self, expected_diff_max: float = 0.2): + # Since VideoToVideo uses both encoder and decoder tiling, there seems to be much more numerical + # difference. We seem to need a higher tolerance here... + # TODO(aryan): Look into this more deeply + expected_diff_max = 0.4 + + generator_device = "cpu" + components = self.get_dummy_components() + + pipe = self.pipeline_class(**components) + pipe.to("cpu") + pipe.set_progress_bar_config(disable=None) + + # Without tiling + inputs = self.get_dummy_inputs(generator_device) + inputs["height"] = inputs["width"] = 128 + output_without_tiling = pipe(**inputs)[0] + + # With tiling + pipe.vae.enable_tiling( + tile_sample_min_height=96, + tile_sample_min_width=96, + tile_overlap_factor_height=1 / 12, + tile_overlap_factor_width=1 / 12, + ) + inputs = self.get_dummy_inputs(generator_device) + inputs["height"] = inputs["width"] = 128 + output_with_tiling = pipe(**inputs)[0] + + self.assertLess( + (to_np(output_without_tiling) - to_np(output_with_tiling)).max(), + expected_diff_max, + "VAE tiling should not affect the inference results", + ) + + def test_fused_qkv_projections(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe = pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + frames = pipe(**inputs).frames # [B, F, C, H, W] + original_image_slice = frames[0, -2:, -1, -3:, -3:] + + pipe.fuse_qkv_projections() + assert check_qkv_fusion_processors_exist(pipe.transformer), ( + "Something wrong with the fused attention processors. Expected all the attention processors to be fused." + ) + assert check_qkv_fusion_matches_attn_procs_length( + pipe.transformer, pipe.transformer.original_attn_processors + ), "Something wrong with the attention processors concerning the fused QKV projections." + + inputs = self.get_dummy_inputs(device) + frames = pipe(**inputs).frames + image_slice_fused = frames[0, -2:, -1, -3:, -3:] + + pipe.transformer.unfuse_qkv_projections() + inputs = self.get_dummy_inputs(device) + frames = pipe(**inputs).frames + image_slice_disabled = frames[0, -2:, -1, -3:, -3:] + + assert np.allclose(original_image_slice, image_slice_fused, atol=1e-3, rtol=1e-3), ( + "Fusion of QKV projections shouldn't affect the outputs." + ) + assert np.allclose(image_slice_fused, image_slice_disabled, atol=1e-3, rtol=1e-3), ( + "Outputs, with QKV projection fusion enabled, shouldn't change when fused QKV projections are disabled." + ) + assert np.allclose(original_image_slice, image_slice_disabled, atol=1e-2, rtol=1e-2), ( + "Original outputs should match when fused QKV projections are disabled." + ) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/cogview3/__init__.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/cogview3/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/cogview3/test_cogview3plus.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/cogview3/test_cogview3plus.py new file mode 100644 index 0000000000000000000000000000000000000000..819d4b952fc794bed6824a51ea0f9b25bc530097 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/cogview3/test_cogview3plus.py @@ -0,0 +1,275 @@ +# Copyright 2025 The HuggingFace Team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import gc +import inspect +import unittest + +import numpy as np +import torch +from transformers import AutoTokenizer, T5EncoderModel + +from diffusers import AutoencoderKL, CogVideoXDDIMScheduler, CogView3PlusPipeline, CogView3PlusTransformer2DModel + +from ...testing_utils import ( + backend_empty_cache, + enable_full_determinism, + numpy_cosine_similarity_distance, + require_torch_accelerator, + slow, + torch_device, +) +from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS +from ..test_pipelines_common import ( + PipelineTesterMixin, + to_np, +) + + +enable_full_determinism() + + +class CogView3PlusPipelineFastTests(PipelineTesterMixin, unittest.TestCase): + pipeline_class = CogView3PlusPipeline + params = TEXT_TO_IMAGE_PARAMS - {"cross_attention_kwargs"} + batch_params = TEXT_TO_IMAGE_BATCH_PARAMS + image_params = TEXT_TO_IMAGE_IMAGE_PARAMS + image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS + required_optional_params = frozenset( + [ + "num_inference_steps", + "generator", + "latents", + "return_dict", + "callback_on_step_end", + "callback_on_step_end_tensor_inputs", + ] + ) + test_xformers_attention = False + test_layerwise_casting = True + test_group_offloading = True + + def get_dummy_components(self): + torch.manual_seed(0) + transformer = CogView3PlusTransformer2DModel( + patch_size=2, + in_channels=4, + num_layers=1, + attention_head_dim=4, + num_attention_heads=2, + out_channels=4, + text_embed_dim=32, # Must match with tiny-random-t5 + time_embed_dim=8, + condition_dim=2, + pos_embed_max_size=8, + sample_size=8, + ) + + torch.manual_seed(0) + vae = AutoencoderKL( + block_out_channels=[32, 64], + in_channels=3, + out_channels=3, + down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], + up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], + latent_channels=4, + sample_size=128, + ) + + torch.manual_seed(0) + scheduler = CogVideoXDDIMScheduler() + text_encoder = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5") + tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") + + components = { + "transformer": transformer, + "vae": vae, + "scheduler": scheduler, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + } + return components + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + inputs = { + "prompt": "dance monkey", + "negative_prompt": "", + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 6.0, + "height": 16, + "width": 16, + "max_sequence_length": 16, + "output_type": "pt", + } + return inputs + + def test_inference(self): + device = "cpu" + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + image = pipe(**inputs)[0] + generated_image = image[0] + + self.assertEqual(generated_image.shape, (3, 16, 16)) + expected_image = torch.randn(3, 16, 16) + max_diff = np.abs(generated_image - expected_image).max() + self.assertLessEqual(max_diff, 1e10) + + def test_callback_inputs(self): + sig = inspect.signature(self.pipeline_class.__call__) + has_callback_tensor_inputs = "callback_on_step_end_tensor_inputs" in sig.parameters + has_callback_step_end = "callback_on_step_end" in sig.parameters + + if not (has_callback_tensor_inputs and has_callback_step_end): + return + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + self.assertTrue( + hasattr(pipe, "_callback_tensor_inputs"), + f" {self.pipeline_class} should have `_callback_tensor_inputs` that defines a list of tensor variables its callback function can use as inputs", + ) + + def callback_inputs_subset(pipe, i, t, callback_kwargs): + # iterate over callback args + for tensor_name, tensor_value in callback_kwargs.items(): + # check that we're only passing in allowed tensor inputs + assert tensor_name in pipe._callback_tensor_inputs + + return callback_kwargs + + def callback_inputs_all(pipe, i, t, callback_kwargs): + for tensor_name in pipe._callback_tensor_inputs: + assert tensor_name in callback_kwargs + + # iterate over callback args + for tensor_name, tensor_value in callback_kwargs.items(): + # check that we're only passing in allowed tensor inputs + assert tensor_name in pipe._callback_tensor_inputs + + return callback_kwargs + + inputs = self.get_dummy_inputs(torch_device) + + # Test passing in a subset + inputs["callback_on_step_end"] = callback_inputs_subset + inputs["callback_on_step_end_tensor_inputs"] = ["latents"] + output = pipe(**inputs)[0] + + # Test passing in a everything + inputs["callback_on_step_end"] = callback_inputs_all + inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs + output = pipe(**inputs)[0] + + def callback_inputs_change_tensor(pipe, i, t, callback_kwargs): + is_last = i == (pipe.num_timesteps - 1) + if is_last: + callback_kwargs["latents"] = torch.zeros_like(callback_kwargs["latents"]) + return callback_kwargs + + inputs["callback_on_step_end"] = callback_inputs_change_tensor + inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs + output = pipe(**inputs)[0] + assert output.abs().sum() < 1e10 + + def test_inference_batch_single_identical(self): + self._test_inference_batch_single_identical(batch_size=3, expected_max_diff=1e-3) + + def test_attention_slicing_forward_pass( + self, test_max_difference=True, test_mean_pixel_difference=True, expected_max_diff=1e-3 + ): + if not self.test_attention_slicing: + return + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + for component in pipe.components.values(): + if hasattr(component, "set_default_attn_processor"): + component.set_default_attn_processor() + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + generator_device = "cpu" + inputs = self.get_dummy_inputs(generator_device) + output_without_slicing = pipe(**inputs)[0] + + pipe.enable_attention_slicing(slice_size=1) + inputs = self.get_dummy_inputs(generator_device) + output_with_slicing1 = pipe(**inputs)[0] + + pipe.enable_attention_slicing(slice_size=2) + inputs = self.get_dummy_inputs(generator_device) + output_with_slicing2 = pipe(**inputs)[0] + + if test_max_difference: + max_diff1 = np.abs(to_np(output_with_slicing1) - to_np(output_without_slicing)).max() + max_diff2 = np.abs(to_np(output_with_slicing2) - to_np(output_without_slicing)).max() + self.assertLess( + max(max_diff1, max_diff2), + expected_max_diff, + "Attention slicing should not affect the inference results", + ) + + def test_encode_prompt_works_in_isolation(self): + return super().test_encode_prompt_works_in_isolation(atol=1e-3, rtol=1e-3) + + +@slow +@require_torch_accelerator +class CogView3PlusPipelineIntegrationTests(unittest.TestCase): + prompt = "A painting of a squirrel eating a burger." + + def setUp(self): + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def test_cogview3plus(self): + generator = torch.Generator("cpu").manual_seed(0) + + pipe = CogView3PlusPipeline.from_pretrained("THUDM/CogView3Plus-3b", torch_dtype=torch.float16) + pipe.enable_model_cpu_offload(device=torch_device) + prompt = self.prompt + + images = pipe( + prompt=prompt, + height=1024, + width=1024, + generator=generator, + num_inference_steps=2, + output_type="np", + )[0] + + image = images[0] + expected_image = torch.randn(1, 1024, 1024, 3).numpy() + + max_diff = numpy_cosine_similarity_distance(image, expected_image) + assert max_diff < 1e-3, f"Max diff is too high. got {image}" diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/cogview4/__init__.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/cogview4/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/cogview4/test_cogview4.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/cogview4/test_cogview4.py new file mode 100644 index 0000000000000000000000000000000000000000..a1f0fc7a715bd17a79f11368ab35829d649c955e --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/cogview4/test_cogview4.py @@ -0,0 +1,234 @@ +# Copyright 2025 The HuggingFace Team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +import unittest + +import numpy as np +import torch +from transformers import AutoTokenizer, GlmConfig, GlmForCausalLM + +from diffusers import AutoencoderKL, CogView4Pipeline, CogView4Transformer2DModel, FlowMatchEulerDiscreteScheduler + +from ...testing_utils import enable_full_determinism, torch_device +from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS +from ..test_pipelines_common import PipelineTesterMixin, to_np + + +enable_full_determinism() + + +class CogView4PipelineFastTests(PipelineTesterMixin, unittest.TestCase): + pipeline_class = CogView4Pipeline + params = TEXT_TO_IMAGE_PARAMS - {"cross_attention_kwargs"} + batch_params = TEXT_TO_IMAGE_BATCH_PARAMS + image_params = TEXT_TO_IMAGE_IMAGE_PARAMS + image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS + required_optional_params = frozenset( + [ + "num_inference_steps", + "generator", + "latents", + "return_dict", + "callback_on_step_end", + "callback_on_step_end_tensor_inputs", + ] + ) + + supports_dduf = False + test_xformers_attention = False + test_layerwise_casting = True + + def get_dummy_components(self): + torch.manual_seed(0) + transformer = CogView4Transformer2DModel( + patch_size=2, + in_channels=4, + num_layers=2, + attention_head_dim=4, + num_attention_heads=4, + out_channels=4, + text_embed_dim=32, + time_embed_dim=8, + condition_dim=4, + ) + + torch.manual_seed(0) + vae = AutoencoderKL( + block_out_channels=[32, 64], + in_channels=3, + out_channels=3, + down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], + up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], + latent_channels=4, + sample_size=128, + ) + + torch.manual_seed(0) + scheduler = FlowMatchEulerDiscreteScheduler( + base_shift=0.25, + max_shift=0.75, + base_image_seq_len=256, + use_dynamic_shifting=True, + time_shift_type="linear", + ) + + torch.manual_seed(0) + text_encoder_config = GlmConfig( + hidden_size=32, intermediate_size=8, num_hidden_layers=2, num_attention_heads=4, head_dim=8 + ) + text_encoder = GlmForCausalLM(text_encoder_config) + # TODO(aryan): change this to THUDM/CogView4 once released + tokenizer = AutoTokenizer.from_pretrained("THUDM/glm-4-9b-chat", trust_remote_code=True) + + components = { + "transformer": transformer, + "vae": vae, + "scheduler": scheduler, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + } + return components + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + inputs = { + "prompt": "dance monkey", + "negative_prompt": "", + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 6.0, + "height": 16, + "width": 16, + "max_sequence_length": 16, + "output_type": "pt", + } + return inputs + + def test_inference(self): + device = "cpu" + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + image = pipe(**inputs)[0] + generated_image = image[0] + + self.assertEqual(generated_image.shape, (3, 16, 16)) + expected_image = torch.randn(3, 16, 16) + max_diff = np.abs(generated_image - expected_image).max() + self.assertLessEqual(max_diff, 1e10) + + def test_callback_inputs(self): + sig = inspect.signature(self.pipeline_class.__call__) + has_callback_tensor_inputs = "callback_on_step_end_tensor_inputs" in sig.parameters + has_callback_step_end = "callback_on_step_end" in sig.parameters + + if not (has_callback_tensor_inputs and has_callback_step_end): + return + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + self.assertTrue( + hasattr(pipe, "_callback_tensor_inputs"), + f" {self.pipeline_class} should have `_callback_tensor_inputs` that defines a list of tensor variables its callback function can use as inputs", + ) + + def callback_inputs_subset(pipe, i, t, callback_kwargs): + # iterate over callback args + for tensor_name, tensor_value in callback_kwargs.items(): + # check that we're only passing in allowed tensor inputs + assert tensor_name in pipe._callback_tensor_inputs + + return callback_kwargs + + def callback_inputs_all(pipe, i, t, callback_kwargs): + for tensor_name in pipe._callback_tensor_inputs: + assert tensor_name in callback_kwargs + + # iterate over callback args + for tensor_name, tensor_value in callback_kwargs.items(): + # check that we're only passing in allowed tensor inputs + assert tensor_name in pipe._callback_tensor_inputs + + return callback_kwargs + + inputs = self.get_dummy_inputs(torch_device) + + # Test passing in a subset + inputs["callback_on_step_end"] = callback_inputs_subset + inputs["callback_on_step_end_tensor_inputs"] = ["latents"] + output = pipe(**inputs)[0] + + # Test passing in a everything + inputs["callback_on_step_end"] = callback_inputs_all + inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs + output = pipe(**inputs)[0] + + def callback_inputs_change_tensor(pipe, i, t, callback_kwargs): + is_last = i == (pipe.num_timesteps - 1) + if is_last: + callback_kwargs["latents"] = torch.zeros_like(callback_kwargs["latents"]) + return callback_kwargs + + inputs["callback_on_step_end"] = callback_inputs_change_tensor + inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs + output = pipe(**inputs)[0] + assert output.abs().sum() < 1e10 + + def test_inference_batch_single_identical(self): + self._test_inference_batch_single_identical(batch_size=3, expected_max_diff=1e-3) + + def test_attention_slicing_forward_pass( + self, test_max_difference=True, test_mean_pixel_difference=True, expected_max_diff=1e-3 + ): + if not self.test_attention_slicing: + return + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + for component in pipe.components.values(): + if hasattr(component, "set_default_attn_processor"): + component.set_default_attn_processor() + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + generator_device = "cpu" + inputs = self.get_dummy_inputs(generator_device) + output_without_slicing = pipe(**inputs)[0] + + pipe.enable_attention_slicing(slice_size=1) + inputs = self.get_dummy_inputs(generator_device) + output_with_slicing1 = pipe(**inputs)[0] + + pipe.enable_attention_slicing(slice_size=2) + inputs = self.get_dummy_inputs(generator_device) + output_with_slicing2 = pipe(**inputs)[0] + + if test_max_difference: + max_diff1 = np.abs(to_np(output_with_slicing1) - to_np(output_without_slicing)).max() + max_diff2 = np.abs(to_np(output_with_slicing2) - to_np(output_without_slicing)).max() + self.assertLess( + max(max_diff1, max_diff2), + expected_max_diff, + "Attention slicing should not affect the inference results", + ) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/consisid/__init__.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/consisid/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/consisid/test_consisid.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/consisid/test_consisid.py new file mode 100644 index 0000000000000000000000000000000000000000..4fd9e536cddcfaf74f6463e9c88566d3ff21e1c3 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/consisid/test_consisid.py @@ -0,0 +1,362 @@ +# Copyright 2025 The HuggingFace Team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import gc +import inspect +import unittest + +import numpy as np +import torch +from PIL import Image +from transformers import AutoTokenizer, T5EncoderModel + +from diffusers import AutoencoderKLCogVideoX, ConsisIDPipeline, ConsisIDTransformer3DModel, DDIMScheduler +from diffusers.utils import load_image + +from ...testing_utils import ( + backend_empty_cache, + enable_full_determinism, + numpy_cosine_similarity_distance, + require_torch_accelerator, + slow, + torch_device, +) +from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS +from ..test_pipelines_common import ( + PipelineTesterMixin, + to_np, +) + + +enable_full_determinism() + + +class ConsisIDPipelineFastTests(PipelineTesterMixin, unittest.TestCase): + pipeline_class = ConsisIDPipeline + params = TEXT_TO_IMAGE_PARAMS - {"cross_attention_kwargs"} + batch_params = TEXT_TO_IMAGE_BATCH_PARAMS.union({"image"}) + image_params = TEXT_TO_IMAGE_IMAGE_PARAMS + image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS + required_optional_params = frozenset( + [ + "num_inference_steps", + "generator", + "latents", + "return_dict", + "callback_on_step_end", + "callback_on_step_end_tensor_inputs", + ] + ) + test_xformers_attention = False + test_layerwise_casting = True + test_group_offloading = True + + def get_dummy_components(self): + torch.manual_seed(0) + transformer = ConsisIDTransformer3DModel( + num_attention_heads=2, + attention_head_dim=16, + in_channels=8, + out_channels=4, + time_embed_dim=2, + text_embed_dim=32, + num_layers=1, + sample_width=2, + sample_height=2, + sample_frames=9, + patch_size=2, + temporal_compression_ratio=4, + max_text_seq_length=16, + use_rotary_positional_embeddings=True, + use_learned_positional_embeddings=True, + cross_attn_interval=1, + is_kps=False, + is_train_face=True, + cross_attn_dim_head=1, + cross_attn_num_heads=1, + LFE_id_dim=2, + LFE_vit_dim=2, + LFE_depth=5, + LFE_dim_head=8, + LFE_num_heads=2, + LFE_num_id_token=1, + LFE_num_querie=1, + LFE_output_dim=21, + LFE_ff_mult=1, + LFE_num_scale=1, + ) + + torch.manual_seed(0) + vae = AutoencoderKLCogVideoX( + in_channels=3, + out_channels=3, + down_block_types=( + "CogVideoXDownBlock3D", + "CogVideoXDownBlock3D", + "CogVideoXDownBlock3D", + "CogVideoXDownBlock3D", + ), + up_block_types=( + "CogVideoXUpBlock3D", + "CogVideoXUpBlock3D", + "CogVideoXUpBlock3D", + "CogVideoXUpBlock3D", + ), + block_out_channels=(8, 8, 8, 8), + latent_channels=4, + layers_per_block=1, + norm_num_groups=2, + temporal_compression_ratio=4, + ) + + torch.manual_seed(0) + scheduler = DDIMScheduler() + text_encoder = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5") + tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") + + components = { + "transformer": transformer, + "vae": vae, + "scheduler": scheduler, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + } + return components + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + + image_height = 16 + image_width = 16 + image = Image.new("RGB", (image_width, image_height)) + id_vit_hidden = [torch.ones([1, 2, 2])] * 1 + id_cond = torch.ones(1, 2) + inputs = { + "image": image, + "prompt": "dance monkey", + "negative_prompt": "", + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 6.0, + "height": image_height, + "width": image_width, + "num_frames": 8, + "max_sequence_length": 16, + "id_vit_hidden": id_vit_hidden, + "id_cond": id_cond, + "output_type": "pt", + } + return inputs + + def test_inference(self): + device = "cpu" + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + video = pipe(**inputs).frames + generated_video = video[0] + + self.assertEqual(generated_video.shape, (8, 3, 16, 16)) + expected_video = torch.randn(8, 3, 16, 16) + max_diff = np.abs(generated_video - expected_video).max() + self.assertLessEqual(max_diff, 1e10) + + def test_callback_inputs(self): + sig = inspect.signature(self.pipeline_class.__call__) + has_callback_tensor_inputs = "callback_on_step_end_tensor_inputs" in sig.parameters + has_callback_step_end = "callback_on_step_end" in sig.parameters + + if not (has_callback_tensor_inputs and has_callback_step_end): + return + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + self.assertTrue( + hasattr(pipe, "_callback_tensor_inputs"), + f" {self.pipeline_class} should have `_callback_tensor_inputs` that defines a list of tensor variables its callback function can use as inputs", + ) + + def callback_inputs_subset(pipe, i, t, callback_kwargs): + # iterate over callback args + for tensor_name, tensor_value in callback_kwargs.items(): + # check that we're only passing in allowed tensor inputs + assert tensor_name in pipe._callback_tensor_inputs + + return callback_kwargs + + def callback_inputs_all(pipe, i, t, callback_kwargs): + for tensor_name in pipe._callback_tensor_inputs: + assert tensor_name in callback_kwargs + + # iterate over callback args + for tensor_name, tensor_value in callback_kwargs.items(): + # check that we're only passing in allowed tensor inputs + assert tensor_name in pipe._callback_tensor_inputs + + return callback_kwargs + + inputs = self.get_dummy_inputs(torch_device) + + # Test passing in a subset + inputs["callback_on_step_end"] = callback_inputs_subset + inputs["callback_on_step_end_tensor_inputs"] = ["latents"] + output = pipe(**inputs)[0] + + # Test passing in a everything + inputs["callback_on_step_end"] = callback_inputs_all + inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs + output = pipe(**inputs)[0] + + def callback_inputs_change_tensor(pipe, i, t, callback_kwargs): + is_last = i == (pipe.num_timesteps - 1) + if is_last: + callback_kwargs["latents"] = torch.zeros_like(callback_kwargs["latents"]) + return callback_kwargs + + inputs["callback_on_step_end"] = callback_inputs_change_tensor + inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs + output = pipe(**inputs)[0] + assert output.abs().sum() < 1e10 + + def test_inference_batch_single_identical(self): + self._test_inference_batch_single_identical(batch_size=3, expected_max_diff=1e-3) + + def test_attention_slicing_forward_pass( + self, test_max_difference=True, test_mean_pixel_difference=True, expected_max_diff=1e-3 + ): + if not self.test_attention_slicing: + return + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + for component in pipe.components.values(): + if hasattr(component, "set_default_attn_processor"): + component.set_default_attn_processor() + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + generator_device = "cpu" + inputs = self.get_dummy_inputs(generator_device) + output_without_slicing = pipe(**inputs)[0] + + pipe.enable_attention_slicing(slice_size=1) + inputs = self.get_dummy_inputs(generator_device) + output_with_slicing1 = pipe(**inputs)[0] + + pipe.enable_attention_slicing(slice_size=2) + inputs = self.get_dummy_inputs(generator_device) + output_with_slicing2 = pipe(**inputs)[0] + + if test_max_difference: + max_diff1 = np.abs(to_np(output_with_slicing1) - to_np(output_without_slicing)).max() + max_diff2 = np.abs(to_np(output_with_slicing2) - to_np(output_without_slicing)).max() + self.assertLess( + max(max_diff1, max_diff2), + expected_max_diff, + "Attention slicing should not affect the inference results", + ) + + def test_vae_tiling(self, expected_diff_max: float = 0.4): + generator_device = "cpu" + components = self.get_dummy_components() + + # The reason to modify it this way is because ConsisID Transformer limits the generation to resolutions used during initialization. + # This limitation comes from using learned positional embeddings which cannot be generated on-the-fly like sincos or RoPE embeddings. + # See the if-statement on "self.use_learned_positional_embeddings" in diffusers/models/embeddings.py + components["transformer"] = ConsisIDTransformer3DModel.from_config( + components["transformer"].config, + sample_height=16, + sample_width=16, + ) + + pipe = self.pipeline_class(**components) + pipe.to("cpu") + pipe.set_progress_bar_config(disable=None) + + # Without tiling + inputs = self.get_dummy_inputs(generator_device) + inputs["height"] = inputs["width"] = 128 + output_without_tiling = pipe(**inputs)[0] + + # With tiling + pipe.vae.enable_tiling( + tile_sample_min_height=96, + tile_sample_min_width=96, + tile_overlap_factor_height=1 / 12, + tile_overlap_factor_width=1 / 12, + ) + inputs = self.get_dummy_inputs(generator_device) + inputs["height"] = inputs["width"] = 128 + output_with_tiling = pipe(**inputs)[0] + + self.assertLess( + (to_np(output_without_tiling) - to_np(output_with_tiling)).max(), + expected_diff_max, + "VAE tiling should not affect the inference results", + ) + + +@slow +@require_torch_accelerator +class ConsisIDPipelineIntegrationTests(unittest.TestCase): + prompt = "A painting of a squirrel eating a burger." + + def setUp(self): + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def test_consisid(self): + generator = torch.Generator("cpu").manual_seed(0) + + pipe = ConsisIDPipeline.from_pretrained("BestWishYsh/ConsisID-preview", torch_dtype=torch.bfloat16) + pipe.enable_model_cpu_offload() + + prompt = self.prompt + image = load_image("https://github.com/PKU-YuanGroup/ConsisID/blob/main/asserts/example_images/2.png?raw=true") + id_vit_hidden = [torch.ones([1, 577, 1024])] * 5 + id_cond = torch.ones(1, 1280) + + videos = pipe( + image=image, + prompt=prompt, + height=480, + width=720, + num_frames=16, + id_vit_hidden=id_vit_hidden, + id_cond=id_cond, + generator=generator, + num_inference_steps=1, + output_type="pt", + ).frames + + video = videos[0] + expected_video = torch.randn(1, 16, 480, 720, 3).numpy() + + max_diff = numpy_cosine_similarity_distance(video.cpu(), expected_video) + assert max_diff < 1e-3, f"Max diff is too high. got {video}" diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/consistency_models/__init__.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/consistency_models/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/consistency_models/test_consistency_models.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/consistency_models/test_consistency_models.py new file mode 100644 index 0000000000000000000000000000000000000000..0ab0c0af25888506fe9a884fd9309f060d2381b2 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/consistency_models/test_consistency_models.py @@ -0,0 +1,309 @@ +import gc +import unittest + +import numpy as np +import torch +from torch.backends.cuda import sdp_kernel + +from diffusers import ( + CMStochasticIterativeScheduler, + ConsistencyModelPipeline, + UNet2DModel, +) +from diffusers.utils.torch_utils import randn_tensor + +from ...testing_utils import ( + Expectations, + backend_empty_cache, + enable_full_determinism, + nightly, + require_torch_2, + require_torch_accelerator, + torch_device, +) +from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS +from ..test_pipelines_common import PipelineTesterMixin + + +enable_full_determinism() + + +class ConsistencyModelPipelineFastTests(PipelineTesterMixin, unittest.TestCase): + pipeline_class = ConsistencyModelPipeline + params = UNCONDITIONAL_IMAGE_GENERATION_PARAMS + batch_params = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS + + # Override required_optional_params to remove num_images_per_prompt + required_optional_params = frozenset( + [ + "num_inference_steps", + "generator", + "latents", + "output_type", + "return_dict", + "callback", + "callback_steps", + ] + ) + + @property + def dummy_uncond_unet(self): + unet = UNet2DModel.from_pretrained( + "diffusers/consistency-models-test", + subfolder="test_unet", + ) + return unet + + @property + def dummy_cond_unet(self): + unet = UNet2DModel.from_pretrained( + "diffusers/consistency-models-test", + subfolder="test_unet_class_cond", + ) + return unet + + def get_dummy_components(self, class_cond=False): + if class_cond: + unet = self.dummy_cond_unet + else: + unet = self.dummy_uncond_unet + + # Default to CM multistep sampler + scheduler = CMStochasticIterativeScheduler( + num_train_timesteps=40, + sigma_min=0.002, + sigma_max=80.0, + ) + + components = { + "unet": unet, + "scheduler": scheduler, + } + + return components + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + + inputs = { + "batch_size": 1, + "num_inference_steps": None, + "timesteps": [22, 0], + "generator": generator, + "output_type": "np", + } + + return inputs + + def test_consistency_model_pipeline_multistep(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + pipe = ConsistencyModelPipeline(**components) + pipe = pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + image = pipe(**inputs).images + assert image.shape == (1, 32, 32, 3) + + image_slice = image[0, -3:, -3:, -1] + expected_slice = np.array([0.3572, 0.6273, 0.4031, 0.3961, 0.4321, 0.5730, 0.5266, 0.4780, 0.5004]) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 + + def test_consistency_model_pipeline_multistep_class_cond(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components(class_cond=True) + pipe = ConsistencyModelPipeline(**components) + pipe = pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + inputs["class_labels"] = 0 + image = pipe(**inputs).images + assert image.shape == (1, 32, 32, 3) + + image_slice = image[0, -3:, -3:, -1] + expected_slice = np.array([0.3572, 0.6273, 0.4031, 0.3961, 0.4321, 0.5730, 0.5266, 0.4780, 0.5004]) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 + + def test_consistency_model_pipeline_onestep(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + pipe = ConsistencyModelPipeline(**components) + pipe = pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + inputs["num_inference_steps"] = 1 + inputs["timesteps"] = None + image = pipe(**inputs).images + assert image.shape == (1, 32, 32, 3) + + image_slice = image[0, -3:, -3:, -1] + expected_slice = np.array([0.5004, 0.5004, 0.4994, 0.5008, 0.4976, 0.5018, 0.4990, 0.4982, 0.4987]) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 + + def test_consistency_model_pipeline_onestep_class_cond(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components(class_cond=True) + pipe = ConsistencyModelPipeline(**components) + pipe = pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + inputs["num_inference_steps"] = 1 + inputs["timesteps"] = None + inputs["class_labels"] = 0 + image = pipe(**inputs).images + assert image.shape == (1, 32, 32, 3) + + image_slice = image[0, -3:, -3:, -1] + expected_slice = np.array([0.5004, 0.5004, 0.4994, 0.5008, 0.4976, 0.5018, 0.4990, 0.4982, 0.4987]) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 + + +@nightly +@require_torch_accelerator +class ConsistencyModelPipelineSlowTests(unittest.TestCase): + def setUp(self): + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def get_inputs(self, seed=0, get_fixed_latents=False, device="cpu", dtype=torch.float32, shape=(1, 3, 64, 64)): + generator = torch.manual_seed(seed) + + inputs = { + "num_inference_steps": None, + "timesteps": [22, 0], + "class_labels": 0, + "generator": generator, + "output_type": "np", + } + + if get_fixed_latents: + latents = self.get_fixed_latents(seed=seed, device=device, dtype=dtype, shape=shape) + inputs["latents"] = latents + + return inputs + + def get_fixed_latents(self, seed=0, device="cpu", dtype=torch.float32, shape=(1, 3, 64, 64)): + if isinstance(device, str): + device = torch.device(device) + generator = torch.Generator(device=device).manual_seed(seed) + latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) + return latents + + def test_consistency_model_cd_multistep(self): + unet = UNet2DModel.from_pretrained("diffusers/consistency_models", subfolder="diffusers_cd_imagenet64_l2") + scheduler = CMStochasticIterativeScheduler( + num_train_timesteps=40, + sigma_min=0.002, + sigma_max=80.0, + ) + pipe = ConsistencyModelPipeline(unet=unet, scheduler=scheduler) + pipe.to(torch_device=torch_device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_inputs() + image = pipe(**inputs).images + assert image.shape == (1, 64, 64, 3) + + image_slice = image[0, -3:, -3:, -1] + + expected_slice = np.array([0.0146, 0.0158, 0.0092, 0.0086, 0.0000, 0.0000, 0.0000, 0.0000, 0.0058]) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 + + def test_consistency_model_cd_onestep(self): + unet = UNet2DModel.from_pretrained("diffusers/consistency_models", subfolder="diffusers_cd_imagenet64_l2") + scheduler = CMStochasticIterativeScheduler( + num_train_timesteps=40, + sigma_min=0.002, + sigma_max=80.0, + ) + pipe = ConsistencyModelPipeline(unet=unet, scheduler=scheduler) + pipe.to(torch_device=torch_device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_inputs() + inputs["num_inference_steps"] = 1 + inputs["timesteps"] = None + image = pipe(**inputs).images + assert image.shape == (1, 64, 64, 3) + + image_slice = image[0, -3:, -3:, -1] + + expected_slice = np.array([0.0059, 0.0003, 0.0000, 0.0023, 0.0052, 0.0007, 0.0165, 0.0081, 0.0095]) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 + + @require_torch_2 + def test_consistency_model_cd_multistep_flash_attn(self): + unet = UNet2DModel.from_pretrained("diffusers/consistency_models", subfolder="diffusers_cd_imagenet64_l2") + scheduler = CMStochasticIterativeScheduler( + num_train_timesteps=40, + sigma_min=0.002, + sigma_max=80.0, + ) + pipe = ConsistencyModelPipeline(unet=unet, scheduler=scheduler) + pipe.to(torch_device=torch_device, torch_dtype=torch.float16) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_inputs(get_fixed_latents=True, device=torch_device) + # Ensure usage of flash attention in torch 2.0 + with sdp_kernel(enable_flash=True, enable_math=False, enable_mem_efficient=False): + image = pipe(**inputs).images + + assert image.shape == (1, 64, 64, 3) + + image_slice = image[0, -3:, -3:, -1] + + expected_slices = Expectations( + { + ("xpu", 3): np.array([0.0816, 0.0518, 0.0445, 0.0594, 0.0739, 0.0534, 0.0805, 0.0457, 0.0765]), + ("cuda", 7): np.array([0.1845, 0.1371, 0.1211, 0.2035, 0.1954, 0.1323, 0.1773, 0.1593, 0.1314]), + ("cuda", 8): np.array([0.0816, 0.0518, 0.0445, 0.0594, 0.0739, 0.0534, 0.0805, 0.0457, 0.0765]), + } + ) + expected_slice = expected_slices.get_expectation() + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 + + @require_torch_2 + def test_consistency_model_cd_onestep_flash_attn(self): + unet = UNet2DModel.from_pretrained("diffusers/consistency_models", subfolder="diffusers_cd_imagenet64_l2") + scheduler = CMStochasticIterativeScheduler( + num_train_timesteps=40, + sigma_min=0.002, + sigma_max=80.0, + ) + pipe = ConsistencyModelPipeline(unet=unet, scheduler=scheduler) + pipe.to(torch_device=torch_device, torch_dtype=torch.float16) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_inputs(get_fixed_latents=True, device=torch_device) + inputs["num_inference_steps"] = 1 + inputs["timesteps"] = None + # Ensure usage of flash attention in torch 2.0 + with sdp_kernel(enable_flash=True, enable_math=False, enable_mem_efficient=False): + image = pipe(**inputs).images + assert image.shape == (1, 64, 64, 3) + + image_slice = image[0, -3:, -3:, -1] + + expected_slice = np.array([0.1623, 0.2009, 0.2387, 0.1731, 0.1168, 0.1202, 0.2031, 0.1327, 0.2447]) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/controlnet/__init__.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/controlnet/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/controlnet/test_controlnet.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/controlnet/test_controlnet.py new file mode 100644 index 0000000000000000000000000000000000000000..b142c2baf9574d4a4ac85fe08072bc43e07a0348 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/controlnet/test_controlnet.py @@ -0,0 +1,1081 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import gc +import tempfile +import unittest + +import numpy as np +import torch +from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer + +from diffusers import ( + AutoencoderKL, + ControlNetModel, + DDIMScheduler, + EulerDiscreteScheduler, + LCMScheduler, + StableDiffusionControlNetPipeline, + UNet2DConditionModel, +) +from diffusers.pipelines.controlnet.pipeline_controlnet import MultiControlNetModel +from diffusers.utils.import_utils import is_xformers_available +from diffusers.utils.torch_utils import randn_tensor + +from ...testing_utils import ( + backend_empty_cache, + backend_max_memory_allocated, + backend_reset_max_memory_allocated, + backend_reset_peak_memory_stats, + enable_full_determinism, + load_image, + load_numpy, + require_torch_accelerator, + slow, + torch_device, +) +from ..pipeline_params import ( + IMAGE_TO_IMAGE_IMAGE_PARAMS, + TEXT_TO_IMAGE_BATCH_PARAMS, + TEXT_TO_IMAGE_IMAGE_PARAMS, + TEXT_TO_IMAGE_PARAMS, +) +from ..test_pipelines_common import ( + IPAdapterTesterMixin, + PipelineKarrasSchedulerTesterMixin, + PipelineLatentTesterMixin, + PipelineTesterMixin, +) + + +enable_full_determinism() + + +class ControlNetPipelineFastTests( + IPAdapterTesterMixin, + PipelineLatentTesterMixin, + PipelineKarrasSchedulerTesterMixin, + PipelineTesterMixin, + unittest.TestCase, +): + pipeline_class = StableDiffusionControlNetPipeline + params = TEXT_TO_IMAGE_PARAMS + batch_params = TEXT_TO_IMAGE_BATCH_PARAMS + image_params = IMAGE_TO_IMAGE_IMAGE_PARAMS + image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS + test_layerwise_casting = True + test_group_offloading = True + + def get_dummy_components(self, time_cond_proj_dim=None): + torch.manual_seed(0) + unet = UNet2DConditionModel( + block_out_channels=(4, 8), + layers_per_block=2, + sample_size=32, + in_channels=4, + out_channels=4, + down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), + up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), + cross_attention_dim=32, + norm_num_groups=1, + time_cond_proj_dim=time_cond_proj_dim, + ) + torch.manual_seed(0) + controlnet = ControlNetModel( + block_out_channels=(4, 8), + layers_per_block=2, + in_channels=4, + down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), + cross_attention_dim=32, + conditioning_embedding_out_channels=(16, 32), + norm_num_groups=1, + ) + torch.manual_seed(0) + scheduler = DDIMScheduler( + beta_start=0.00085, + beta_end=0.012, + beta_schedule="scaled_linear", + clip_sample=False, + set_alpha_to_one=False, + ) + torch.manual_seed(0) + vae = AutoencoderKL( + block_out_channels=[4, 8], + in_channels=3, + out_channels=3, + down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], + up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], + latent_channels=4, + norm_num_groups=2, + ) + torch.manual_seed(0) + text_encoder_config = CLIPTextConfig( + bos_token_id=0, + eos_token_id=2, + hidden_size=32, + intermediate_size=37, + layer_norm_eps=1e-05, + num_attention_heads=4, + num_hidden_layers=5, + pad_token_id=1, + vocab_size=1000, + ) + text_encoder = CLIPTextModel(text_encoder_config) + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + + components = { + "unet": unet, + "controlnet": controlnet, + "scheduler": scheduler, + "vae": vae, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + "safety_checker": None, + "feature_extractor": None, + "image_encoder": None, + } + return components + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + + controlnet_embedder_scale_factor = 2 + image = randn_tensor( + (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), + generator=generator, + device=torch.device(device), + ) + + inputs = { + "prompt": "A painting of a squirrel eating a burger", + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 6.0, + "output_type": "np", + "image": image, + } + + return inputs + + def test_attention_slicing_forward_pass(self): + return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3) + + def test_ip_adapter(self): + expected_pipe_slice = None + if torch_device == "cpu": + expected_pipe_slice = np.array([0.5234, 0.3333, 0.1745, 0.7605, 0.6224, 0.4637, 0.6989, 0.7526, 0.4665]) + return super().test_ip_adapter(expected_pipe_slice=expected_pipe_slice) + + @unittest.skipIf( + torch_device != "cuda" or not is_xformers_available(), + reason="XFormers attention is only available with CUDA and `xformers` installed", + ) + def test_xformers_attention_forwardGenerator_pass(self): + self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3) + + def test_inference_batch_single_identical(self): + self._test_inference_batch_single_identical(expected_max_diff=2e-3) + + def test_controlnet_lcm(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + + components = self.get_dummy_components(time_cond_proj_dim=256) + sd_pipe = StableDiffusionControlNetPipeline(**components) + sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.scheduler.config) + sd_pipe = sd_pipe.to(torch_device) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + output = sd_pipe(**inputs) + image = output.images + + image_slice = image[0, -3:, -3:, -1] + + assert image.shape == (1, 64, 64, 3) + expected_slice = np.array( + [0.52700454, 0.3930534, 0.25509018, 0.7132304, 0.53696585, 0.46568912, 0.7095368, 0.7059624, 0.4744786] + ) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 + + def test_controlnet_lcm_custom_timesteps(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + + components = self.get_dummy_components(time_cond_proj_dim=256) + sd_pipe = StableDiffusionControlNetPipeline(**components) + sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.scheduler.config) + sd_pipe = sd_pipe.to(torch_device) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + del inputs["num_inference_steps"] + inputs["timesteps"] = [999, 499] + output = sd_pipe(**inputs) + image = output.images + + image_slice = image[0, -3:, -3:, -1] + + assert image.shape == (1, 64, 64, 3) + expected_slice = np.array( + [0.52700454, 0.3930534, 0.25509018, 0.7132304, 0.53696585, 0.46568912, 0.7095368, 0.7059624, 0.4744786] + ) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 + + def test_encode_prompt_works_in_isolation(self): + extra_required_param_value_dict = { + "device": torch.device(torch_device).type, + "do_classifier_free_guidance": self.get_dummy_inputs(device=torch_device).get("guidance_scale", 1.0) > 1.0, + } + return super().test_encode_prompt_works_in_isolation(extra_required_param_value_dict) + + +class StableDiffusionMultiControlNetPipelineFastTests( + IPAdapterTesterMixin, PipelineTesterMixin, PipelineKarrasSchedulerTesterMixin, unittest.TestCase +): + pipeline_class = StableDiffusionControlNetPipeline + params = TEXT_TO_IMAGE_PARAMS + batch_params = TEXT_TO_IMAGE_BATCH_PARAMS + image_params = frozenset([]) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess + + supports_dduf = False + + def get_dummy_components(self): + torch.manual_seed(0) + unet = UNet2DConditionModel( + block_out_channels=(4, 8), + layers_per_block=2, + sample_size=32, + in_channels=4, + out_channels=4, + down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), + up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), + cross_attention_dim=32, + norm_num_groups=1, + ) + torch.manual_seed(0) + + def init_weights(m): + if isinstance(m, torch.nn.Conv2d): + torch.nn.init.normal_(m.weight) + m.bias.data.fill_(1.0) + + controlnet1 = ControlNetModel( + block_out_channels=(4, 8), + layers_per_block=2, + in_channels=4, + down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), + cross_attention_dim=32, + conditioning_embedding_out_channels=(16, 32), + norm_num_groups=1, + ) + controlnet1.controlnet_down_blocks.apply(init_weights) + + torch.manual_seed(0) + controlnet2 = ControlNetModel( + block_out_channels=(4, 8), + layers_per_block=2, + in_channels=4, + down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), + cross_attention_dim=32, + conditioning_embedding_out_channels=(16, 32), + norm_num_groups=1, + ) + controlnet2.controlnet_down_blocks.apply(init_weights) + + torch.manual_seed(0) + scheduler = DDIMScheduler( + beta_start=0.00085, + beta_end=0.012, + beta_schedule="scaled_linear", + clip_sample=False, + set_alpha_to_one=False, + ) + torch.manual_seed(0) + vae = AutoencoderKL( + block_out_channels=[4, 8], + in_channels=3, + out_channels=3, + down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], + up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], + latent_channels=4, + norm_num_groups=2, + ) + torch.manual_seed(0) + text_encoder_config = CLIPTextConfig( + bos_token_id=0, + eos_token_id=2, + hidden_size=32, + intermediate_size=37, + layer_norm_eps=1e-05, + num_attention_heads=4, + num_hidden_layers=5, + pad_token_id=1, + vocab_size=1000, + ) + text_encoder = CLIPTextModel(text_encoder_config) + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + + controlnet = MultiControlNetModel([controlnet1, controlnet2]) + + components = { + "unet": unet, + "controlnet": controlnet, + "scheduler": scheduler, + "vae": vae, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + "safety_checker": None, + "feature_extractor": None, + "image_encoder": None, + } + return components + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + + controlnet_embedder_scale_factor = 2 + + images = [ + randn_tensor( + (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), + generator=generator, + device=torch.device(device), + ), + randn_tensor( + (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), + generator=generator, + device=torch.device(device), + ), + ] + + inputs = { + "prompt": "A painting of a squirrel eating a burger", + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 6.0, + "output_type": "np", + "image": images, + } + + return inputs + + def test_control_guidance_switch(self): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.to(torch_device) + + scale = 10.0 + steps = 4 + + inputs = self.get_dummy_inputs(torch_device) + inputs["num_inference_steps"] = steps + inputs["controlnet_conditioning_scale"] = scale + output_1 = pipe(**inputs)[0] + + inputs = self.get_dummy_inputs(torch_device) + inputs["num_inference_steps"] = steps + inputs["controlnet_conditioning_scale"] = scale + output_2 = pipe(**inputs, control_guidance_start=0.1, control_guidance_end=0.2)[0] + + inputs = self.get_dummy_inputs(torch_device) + inputs["num_inference_steps"] = steps + inputs["controlnet_conditioning_scale"] = scale + output_3 = pipe(**inputs, control_guidance_start=[0.1, 0.3], control_guidance_end=[0.2, 0.7])[0] + + inputs = self.get_dummy_inputs(torch_device) + inputs["num_inference_steps"] = steps + inputs["controlnet_conditioning_scale"] = scale + output_4 = pipe(**inputs, control_guidance_start=0.4, control_guidance_end=[0.5, 0.8])[0] + + # make sure that all outputs are different + assert np.sum(np.abs(output_1 - output_2)) > 1e-3 + assert np.sum(np.abs(output_1 - output_3)) > 1e-3 + assert np.sum(np.abs(output_1 - output_4)) > 1e-3 + + def test_attention_slicing_forward_pass(self): + return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3) + + @unittest.skipIf( + torch_device != "cuda" or not is_xformers_available(), + reason="XFormers attention is only available with CUDA and `xformers` installed", + ) + def test_xformers_attention_forwardGenerator_pass(self): + self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3) + + def test_inference_batch_single_identical(self): + self._test_inference_batch_single_identical(expected_max_diff=2e-3) + + def test_ip_adapter(self): + expected_pipe_slice = None + if torch_device == "cpu": + expected_pipe_slice = np.array([0.2422, 0.3425, 0.4048, 0.5351, 0.3503, 0.2419, 0.4645, 0.4570, 0.3804]) + return super().test_ip_adapter(expected_pipe_slice=expected_pipe_slice) + + def test_save_pretrained_raise_not_implemented_exception(self): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + with tempfile.TemporaryDirectory() as tmpdir: + try: + # save_pretrained is not implemented for Multi-ControlNet + pipe.save_pretrained(tmpdir) + except NotImplementedError: + pass + + def test_inference_multiple_prompt_input(self): + device = "cpu" + + components = self.get_dummy_components() + sd_pipe = StableDiffusionControlNetPipeline(**components) + sd_pipe = sd_pipe.to(torch_device) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + inputs["prompt"] = [inputs["prompt"], inputs["prompt"]] + inputs["image"] = [inputs["image"], inputs["image"]] + output = sd_pipe(**inputs) + image = output.images + + assert image.shape == (2, 64, 64, 3) + + image_1, image_2 = image + # make sure that the outputs are different + assert np.sum(np.abs(image_1 - image_2)) > 1e-3 + + # multiple prompts, single image conditioning + inputs = self.get_dummy_inputs(device) + inputs["prompt"] = [inputs["prompt"], inputs["prompt"]] + output_1 = sd_pipe(**inputs) + + assert np.abs(image - output_1.images).max() < 1e-3 + + # multiple prompts, multiple image conditioning + inputs = self.get_dummy_inputs(device) + inputs["prompt"] = [inputs["prompt"], inputs["prompt"], inputs["prompt"], inputs["prompt"]] + inputs["image"] = [inputs["image"], inputs["image"], inputs["image"], inputs["image"]] + output_2 = sd_pipe(**inputs) + image = output_2.images + + assert image.shape == (4, 64, 64, 3) + + def test_encode_prompt_works_in_isolation(self): + extra_required_param_value_dict = { + "device": torch.device(torch_device).type, + "do_classifier_free_guidance": self.get_dummy_inputs(device=torch_device).get("guidance_scale", 1.0) > 1.0, + } + return super().test_encode_prompt_works_in_isolation(extra_required_param_value_dict) + + +class StableDiffusionMultiControlNetOneModelPipelineFastTests( + IPAdapterTesterMixin, PipelineTesterMixin, PipelineKarrasSchedulerTesterMixin, unittest.TestCase +): + pipeline_class = StableDiffusionControlNetPipeline + params = TEXT_TO_IMAGE_PARAMS + batch_params = TEXT_TO_IMAGE_BATCH_PARAMS + image_params = frozenset([]) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess + + supports_dduf = False + + def get_dummy_components(self): + torch.manual_seed(0) + unet = UNet2DConditionModel( + block_out_channels=(4, 8), + layers_per_block=2, + sample_size=32, + in_channels=4, + out_channels=4, + down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), + up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), + cross_attention_dim=32, + norm_num_groups=1, + ) + torch.manual_seed(0) + + def init_weights(m): + if isinstance(m, torch.nn.Conv2d): + torch.nn.init.normal_(m.weight) + m.bias.data.fill_(1.0) + + controlnet = ControlNetModel( + block_out_channels=(4, 8), + layers_per_block=2, + in_channels=4, + down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), + cross_attention_dim=32, + conditioning_embedding_out_channels=(16, 32), + norm_num_groups=1, + ) + controlnet.controlnet_down_blocks.apply(init_weights) + + torch.manual_seed(0) + scheduler = DDIMScheduler( + beta_start=0.00085, + beta_end=0.012, + beta_schedule="scaled_linear", + clip_sample=False, + set_alpha_to_one=False, + ) + torch.manual_seed(0) + vae = AutoencoderKL( + block_out_channels=[4, 8], + in_channels=3, + out_channels=3, + down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], + up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], + latent_channels=4, + norm_num_groups=2, + ) + torch.manual_seed(0) + text_encoder_config = CLIPTextConfig( + bos_token_id=0, + eos_token_id=2, + hidden_size=32, + intermediate_size=37, + layer_norm_eps=1e-05, + num_attention_heads=4, + num_hidden_layers=5, + pad_token_id=1, + vocab_size=1000, + ) + text_encoder = CLIPTextModel(text_encoder_config) + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + + controlnet = MultiControlNetModel([controlnet]) + + components = { + "unet": unet, + "controlnet": controlnet, + "scheduler": scheduler, + "vae": vae, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + "safety_checker": None, + "feature_extractor": None, + "image_encoder": None, + } + return components + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + + controlnet_embedder_scale_factor = 2 + + images = [ + randn_tensor( + (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), + generator=generator, + device=torch.device(device), + ), + ] + + inputs = { + "prompt": "A painting of a squirrel eating a burger", + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 6.0, + "output_type": "np", + "image": images, + } + + return inputs + + def test_control_guidance_switch(self): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.to(torch_device) + + scale = 10.0 + steps = 4 + + inputs = self.get_dummy_inputs(torch_device) + inputs["num_inference_steps"] = steps + inputs["controlnet_conditioning_scale"] = scale + output_1 = pipe(**inputs)[0] + + inputs = self.get_dummy_inputs(torch_device) + inputs["num_inference_steps"] = steps + inputs["controlnet_conditioning_scale"] = scale + output_2 = pipe(**inputs, control_guidance_start=0.1, control_guidance_end=0.2)[0] + + inputs = self.get_dummy_inputs(torch_device) + inputs["num_inference_steps"] = steps + inputs["controlnet_conditioning_scale"] = scale + output_3 = pipe( + **inputs, + control_guidance_start=[0.1], + control_guidance_end=[0.2], + )[0] + + inputs = self.get_dummy_inputs(torch_device) + inputs["num_inference_steps"] = steps + inputs["controlnet_conditioning_scale"] = scale + output_4 = pipe(**inputs, control_guidance_start=0.4, control_guidance_end=[0.5])[0] + + # make sure that all outputs are different + assert np.sum(np.abs(output_1 - output_2)) > 1e-3 + assert np.sum(np.abs(output_1 - output_3)) > 1e-3 + assert np.sum(np.abs(output_1 - output_4)) > 1e-3 + + def test_attention_slicing_forward_pass(self): + return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3) + + @unittest.skipIf( + torch_device != "cuda" or not is_xformers_available(), + reason="XFormers attention is only available with CUDA and `xformers` installed", + ) + def test_xformers_attention_forwardGenerator_pass(self): + self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3) + + def test_inference_batch_single_identical(self): + self._test_inference_batch_single_identical(expected_max_diff=2e-3) + + def test_ip_adapter(self): + expected_pipe_slice = None + if torch_device == "cpu": + expected_pipe_slice = np.array([0.5264, 0.3203, 0.1602, 0.8235, 0.6332, 0.4593, 0.7226, 0.7777, 0.4780]) + return super().test_ip_adapter(expected_pipe_slice=expected_pipe_slice) + + def test_save_pretrained_raise_not_implemented_exception(self): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + with tempfile.TemporaryDirectory() as tmpdir: + try: + # save_pretrained is not implemented for Multi-ControlNet + pipe.save_pretrained(tmpdir) + except NotImplementedError: + pass + + def test_encode_prompt_works_in_isolation(self): + extra_required_param_value_dict = { + "device": torch.device(torch_device).type, + "do_classifier_free_guidance": self.get_dummy_inputs(device=torch_device).get("guidance_scale", 1.0) > 1.0, + } + return super().test_encode_prompt_works_in_isolation(extra_required_param_value_dict) + + +@slow +@require_torch_accelerator +class ControlNetPipelineSlowTests(unittest.TestCase): + def setUp(self): + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def test_canny(self): + controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny") + + pipe = StableDiffusionControlNetPipeline.from_pretrained( + "stable-diffusion-v1-5/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet + ) + pipe.enable_model_cpu_offload(device=torch_device) + pipe.set_progress_bar_config(disable=None) + + generator = torch.Generator(device="cpu").manual_seed(0) + prompt = "bird" + image = load_image( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" + ) + + output = pipe(prompt, image, generator=generator, output_type="np", num_inference_steps=3) + + image = output.images[0] + + assert image.shape == (768, 512, 3) + + expected_image = load_numpy( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny_out.npy" + ) + + assert np.abs(expected_image - image).max() < 9e-2 + + def test_depth(self): + controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-depth") + + pipe = StableDiffusionControlNetPipeline.from_pretrained( + "stable-diffusion-v1-5/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet + ) + pipe.enable_model_cpu_offload(device=torch_device) + pipe.set_progress_bar_config(disable=None) + + generator = torch.Generator(device="cpu").manual_seed(0) + prompt = "Stormtrooper's lecture" + image = load_image( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/stormtrooper_depth.png" + ) + + output = pipe(prompt, image, generator=generator, output_type="np", num_inference_steps=3) + + image = output.images[0] + + assert image.shape == (512, 512, 3) + + expected_image = load_numpy( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/stormtrooper_depth_out.npy" + ) + + assert np.abs(expected_image - image).max() < 8e-1 + + def test_hed(self): + controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-hed") + + pipe = StableDiffusionControlNetPipeline.from_pretrained( + "stable-diffusion-v1-5/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet + ) + pipe.enable_model_cpu_offload(device=torch_device) + pipe.set_progress_bar_config(disable=None) + + generator = torch.Generator(device="cpu").manual_seed(0) + prompt = "oil painting of handsome old man, masterpiece" + image = load_image( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/man_hed.png" + ) + + output = pipe(prompt, image, generator=generator, output_type="np", num_inference_steps=3) + + image = output.images[0] + + assert image.shape == (704, 512, 3) + + expected_image = load_numpy( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/man_hed_out.npy" + ) + + assert np.abs(expected_image - image).max() < 8e-2 + + def test_mlsd(self): + controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-mlsd") + + pipe = StableDiffusionControlNetPipeline.from_pretrained( + "stable-diffusion-v1-5/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet + ) + pipe.enable_model_cpu_offload(device=torch_device) + pipe.set_progress_bar_config(disable=None) + + generator = torch.Generator(device="cpu").manual_seed(0) + prompt = "room" + image = load_image( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/room_mlsd.png" + ) + + output = pipe(prompt, image, generator=generator, output_type="np", num_inference_steps=3) + + image = output.images[0] + + assert image.shape == (704, 512, 3) + + expected_image = load_numpy( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/room_mlsd_out.npy" + ) + + assert np.abs(expected_image - image).max() < 5e-2 + + def test_normal(self): + controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-normal") + + pipe = StableDiffusionControlNetPipeline.from_pretrained( + "stable-diffusion-v1-5/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet + ) + pipe.enable_model_cpu_offload(device=torch_device) + pipe.set_progress_bar_config(disable=None) + + generator = torch.Generator(device="cpu").manual_seed(0) + prompt = "cute toy" + image = load_image( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/cute_toy_normal.png" + ) + + output = pipe(prompt, image, generator=generator, output_type="np", num_inference_steps=3) + + image = output.images[0] + + assert image.shape == (512, 512, 3) + + expected_image = load_numpy( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/cute_toy_normal_out.npy" + ) + + assert np.abs(expected_image - image).max() < 5e-2 + + def test_openpose(self): + controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-openpose") + + pipe = StableDiffusionControlNetPipeline.from_pretrained( + "stable-diffusion-v1-5/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet + ) + pipe.enable_model_cpu_offload(device=torch_device) + pipe.set_progress_bar_config(disable=None) + + generator = torch.Generator(device="cpu").manual_seed(0) + prompt = "Chef in the kitchen" + image = load_image( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png" + ) + + output = pipe(prompt, image, generator=generator, output_type="np", num_inference_steps=3) + + image = output.images[0] + + assert image.shape == (768, 512, 3) + + expected_image = load_numpy( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/chef_pose_out.npy" + ) + + assert np.abs(expected_image - image).max() < 8e-2 + + def test_scribble(self): + controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-scribble") + + pipe = StableDiffusionControlNetPipeline.from_pretrained( + "stable-diffusion-v1-5/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet + ) + pipe.enable_model_cpu_offload(device=torch_device) + pipe.set_progress_bar_config(disable=None) + + generator = torch.Generator(device="cpu").manual_seed(5) + prompt = "bag" + image = load_image( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bag_scribble.png" + ) + + output = pipe(prompt, image, generator=generator, output_type="np", num_inference_steps=3) + + image = output.images[0] + + assert image.shape == (640, 512, 3) + + expected_image = load_numpy( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bag_scribble_out.npy" + ) + + assert np.abs(expected_image - image).max() < 8e-2 + + def test_seg(self): + controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-seg") + + pipe = StableDiffusionControlNetPipeline.from_pretrained( + "stable-diffusion-v1-5/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet + ) + pipe.enable_model_cpu_offload(device=torch_device) + pipe.set_progress_bar_config(disable=None) + + generator = torch.Generator(device="cpu").manual_seed(5) + prompt = "house" + image = load_image( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/house_seg.png" + ) + + output = pipe(prompt, image, generator=generator, output_type="np", num_inference_steps=3) + + image = output.images[0] + + assert image.shape == (512, 512, 3) + + expected_image = load_numpy( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/house_seg_out.npy" + ) + + assert np.abs(expected_image - image).max() < 8e-2 + + def test_sequential_cpu_offloading(self): + backend_empty_cache(torch_device) + backend_reset_max_memory_allocated(torch_device) + backend_reset_peak_memory_stats(torch_device) + + controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-seg") + + pipe = StableDiffusionControlNetPipeline.from_pretrained( + "stable-diffusion-v1-5/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet + ) + pipe.set_progress_bar_config(disable=None) + pipe.enable_attention_slicing() + pipe.enable_sequential_cpu_offload(device=torch_device) + + prompt = "house" + image = load_image( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/house_seg.png" + ) + + _ = pipe( + prompt, + image, + num_inference_steps=2, + output_type="np", + ) + + mem_bytes = backend_max_memory_allocated(torch_device) + # make sure that less than 7 GB is allocated + assert mem_bytes < 4 * 10**9 + + def test_canny_guess_mode(self): + controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny") + + pipe = StableDiffusionControlNetPipeline.from_pretrained( + "stable-diffusion-v1-5/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet + ) + pipe.enable_model_cpu_offload(device=torch_device) + pipe.set_progress_bar_config(disable=None) + + generator = torch.Generator(device="cpu").manual_seed(0) + prompt = "" + image = load_image( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" + ) + + output = pipe( + prompt, + image, + generator=generator, + output_type="np", + num_inference_steps=3, + guidance_scale=3.0, + guess_mode=True, + ) + + image = output.images[0] + assert image.shape == (768, 512, 3) + + image_slice = image[-3:, -3:, -1] + expected_slice = np.array([0.2724, 0.2846, 0.2724, 0.3843, 0.3682, 0.2736, 0.4675, 0.3862, 0.2887]) + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 + + def test_canny_guess_mode_euler(self): + controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny") + + pipe = StableDiffusionControlNetPipeline.from_pretrained( + "stable-diffusion-v1-5/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet + ) + pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config) + pipe.enable_model_cpu_offload(device=torch_device) + pipe.set_progress_bar_config(disable=None) + + generator = torch.Generator(device="cpu").manual_seed(0) + prompt = "" + image = load_image( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" + ) + + output = pipe( + prompt, + image, + generator=generator, + output_type="np", + num_inference_steps=3, + guidance_scale=3.0, + guess_mode=True, + ) + + image = output.images[0] + assert image.shape == (768, 512, 3) + + image_slice = image[-3:, -3:, -1] + expected_slice = np.array([0.1655, 0.1721, 0.1623, 0.1685, 0.1711, 0.1646, 0.1651, 0.1631, 0.1494]) + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 + + def test_v11_shuffle_global_pool_conditions(self): + controlnet = ControlNetModel.from_pretrained("lllyasviel/control_v11e_sd15_shuffle") + + pipe = StableDiffusionControlNetPipeline.from_pretrained( + "stable-diffusion-v1-5/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet + ) + pipe.enable_model_cpu_offload(device=torch_device) + pipe.set_progress_bar_config(disable=None) + + generator = torch.Generator(device="cpu").manual_seed(0) + prompt = "New York" + image = load_image( + "https://huggingface.co/lllyasviel/control_v11e_sd15_shuffle/resolve/main/images/control.png" + ) + + output = pipe( + prompt, + image, + generator=generator, + output_type="np", + num_inference_steps=3, + guidance_scale=7.0, + ) + + image = output.images[0] + assert image.shape == (512, 640, 3) + + image_slice = image[-3:, -3:, -1] + expected_slice = np.array([0.1338, 0.1597, 0.1202, 0.1687, 0.1377, 0.1017, 0.2070, 0.1574, 0.1348]) + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 + + +@slow +@require_torch_accelerator +class StableDiffusionMultiControlNetPipelineSlowTests(unittest.TestCase): + def setUp(self): + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def test_pose_and_canny(self): + controlnet_canny = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny") + controlnet_pose = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-openpose") + + pipe = StableDiffusionControlNetPipeline.from_pretrained( + "stable-diffusion-v1-5/stable-diffusion-v1-5", + safety_checker=None, + controlnet=[controlnet_pose, controlnet_canny], + ) + pipe.enable_model_cpu_offload(device=torch_device) + pipe.set_progress_bar_config(disable=None) + + generator = torch.Generator(device="cpu").manual_seed(0) + prompt = "bird and Chef" + image_canny = load_image( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" + ) + image_pose = load_image( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png" + ) + + output = pipe(prompt, [image_pose, image_canny], generator=generator, output_type="np", num_inference_steps=3) + + image = output.images[0] + + assert image.shape == (768, 512, 3) + + expected_image = load_numpy( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose_canny_out.npy" + ) + + assert np.abs(expected_image - image).max() < 5e-2 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/controlnet/test_controlnet_img2img.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/controlnet/test_controlnet_img2img.py new file mode 100644 index 0000000000000000000000000000000000000000..c5d438e934276e158e74f4a8371c4c8ace7f51e1 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/controlnet/test_controlnet_img2img.py @@ -0,0 +1,459 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/ + +import gc +import random +import tempfile +import unittest + +import numpy as np +import torch +from PIL import Image +from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer + +from diffusers import ( + AutoencoderKL, + ControlNetModel, + DDIMScheduler, + StableDiffusionControlNetImg2ImgPipeline, + UNet2DConditionModel, +) +from diffusers.pipelines.controlnet.pipeline_controlnet import MultiControlNetModel +from diffusers.utils import load_image +from diffusers.utils.import_utils import is_xformers_available +from diffusers.utils.torch_utils import randn_tensor + +from ...testing_utils import ( + backend_empty_cache, + enable_full_determinism, + floats_tensor, + load_numpy, + require_torch_accelerator, + slow, + torch_device, +) +from ..pipeline_params import ( + IMAGE_TO_IMAGE_IMAGE_PARAMS, + TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, + TEXT_GUIDED_IMAGE_VARIATION_PARAMS, +) +from ..test_pipelines_common import ( + IPAdapterTesterMixin, + PipelineKarrasSchedulerTesterMixin, + PipelineLatentTesterMixin, + PipelineTesterMixin, +) + + +enable_full_determinism() + + +class ControlNetImg2ImgPipelineFastTests( + IPAdapterTesterMixin, + PipelineLatentTesterMixin, + PipelineKarrasSchedulerTesterMixin, + PipelineTesterMixin, + unittest.TestCase, +): + pipeline_class = StableDiffusionControlNetImg2ImgPipeline + params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"} + batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS + image_params = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"control_image"}) + image_latents_params = IMAGE_TO_IMAGE_IMAGE_PARAMS + + def get_dummy_components(self): + torch.manual_seed(0) + unet = UNet2DConditionModel( + block_out_channels=(4, 8), + layers_per_block=2, + sample_size=32, + in_channels=4, + out_channels=4, + down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), + up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), + cross_attention_dim=32, + norm_num_groups=1, + ) + torch.manual_seed(0) + controlnet = ControlNetModel( + block_out_channels=(4, 8), + layers_per_block=2, + in_channels=4, + down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), + cross_attention_dim=32, + conditioning_embedding_out_channels=(16, 32), + norm_num_groups=1, + ) + torch.manual_seed(0) + scheduler = DDIMScheduler( + beta_start=0.00085, + beta_end=0.012, + beta_schedule="scaled_linear", + clip_sample=False, + set_alpha_to_one=False, + ) + torch.manual_seed(0) + vae = AutoencoderKL( + block_out_channels=[4, 8], + in_channels=3, + out_channels=3, + down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], + up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], + latent_channels=4, + norm_num_groups=2, + ) + torch.manual_seed(0) + text_encoder_config = CLIPTextConfig( + bos_token_id=0, + eos_token_id=2, + hidden_size=32, + intermediate_size=37, + layer_norm_eps=1e-05, + num_attention_heads=4, + num_hidden_layers=5, + pad_token_id=1, + vocab_size=1000, + ) + text_encoder = CLIPTextModel(text_encoder_config) + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + + components = { + "unet": unet, + "controlnet": controlnet, + "scheduler": scheduler, + "vae": vae, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + "safety_checker": None, + "feature_extractor": None, + "image_encoder": None, + } + return components + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + + controlnet_embedder_scale_factor = 2 + control_image = randn_tensor( + (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), + generator=generator, + device=torch.device(device), + ) + image = floats_tensor(control_image.shape, rng=random.Random(seed)).to(device) + image = image.cpu().permute(0, 2, 3, 1)[0] + image = Image.fromarray(np.uint8(image)).convert("RGB").resize((64, 64)) + inputs = { + "prompt": "A painting of a squirrel eating a burger", + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 6.0, + "output_type": "np", + "image": image, + "control_image": control_image, + } + + return inputs + + def test_attention_slicing_forward_pass(self): + return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3) + + def test_ip_adapter(self): + expected_pipe_slice = None + if torch_device == "cpu": + expected_pipe_slice = np.array([0.7096, 0.5149, 0.3571, 0.5897, 0.4715, 0.4052, 0.6098, 0.6886, 0.4213]) + return super().test_ip_adapter(expected_pipe_slice=expected_pipe_slice) + + @unittest.skipIf( + torch_device != "cuda" or not is_xformers_available(), + reason="XFormers attention is only available with CUDA and `xformers` installed", + ) + def test_xformers_attention_forwardGenerator_pass(self): + self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3) + + def test_inference_batch_single_identical(self): + self._test_inference_batch_single_identical(expected_max_diff=2e-3) + + def test_encode_prompt_works_in_isolation(self): + extra_required_param_value_dict = { + "device": torch.device(torch_device).type, + "do_classifier_free_guidance": self.get_dummy_inputs(device=torch_device).get("guidance_scale", 1.0) > 1.0, + } + return super().test_encode_prompt_works_in_isolation(extra_required_param_value_dict) + + +class StableDiffusionMultiControlNetPipelineFastTests( + IPAdapterTesterMixin, PipelineTesterMixin, PipelineKarrasSchedulerTesterMixin, unittest.TestCase +): + pipeline_class = StableDiffusionControlNetImg2ImgPipeline + params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"} + batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS + image_params = frozenset([]) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess + + supports_dduf = False + + def get_dummy_components(self): + torch.manual_seed(0) + unet = UNet2DConditionModel( + block_out_channels=(4, 8), + layers_per_block=2, + sample_size=32, + in_channels=4, + out_channels=4, + down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), + up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), + cross_attention_dim=32, + norm_num_groups=1, + ) + torch.manual_seed(0) + + def init_weights(m): + if isinstance(m, torch.nn.Conv2d): + torch.nn.init.normal_(m.weight) + m.bias.data.fill_(1.0) + + controlnet1 = ControlNetModel( + block_out_channels=(4, 8), + layers_per_block=2, + in_channels=4, + down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), + cross_attention_dim=32, + conditioning_embedding_out_channels=(16, 32), + norm_num_groups=1, + ) + controlnet1.controlnet_down_blocks.apply(init_weights) + + torch.manual_seed(0) + controlnet2 = ControlNetModel( + block_out_channels=(4, 8), + layers_per_block=2, + in_channels=4, + down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), + cross_attention_dim=32, + conditioning_embedding_out_channels=(16, 32), + norm_num_groups=1, + ) + controlnet2.controlnet_down_blocks.apply(init_weights) + + torch.manual_seed(0) + scheduler = DDIMScheduler( + beta_start=0.00085, + beta_end=0.012, + beta_schedule="scaled_linear", + clip_sample=False, + set_alpha_to_one=False, + ) + torch.manual_seed(0) + vae = AutoencoderKL( + block_out_channels=[4, 8], + in_channels=3, + out_channels=3, + down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], + up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], + latent_channels=4, + norm_num_groups=2, + ) + torch.manual_seed(0) + text_encoder_config = CLIPTextConfig( + bos_token_id=0, + eos_token_id=2, + hidden_size=32, + intermediate_size=37, + layer_norm_eps=1e-05, + num_attention_heads=4, + num_hidden_layers=5, + pad_token_id=1, + vocab_size=1000, + ) + text_encoder = CLIPTextModel(text_encoder_config) + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + + controlnet = MultiControlNetModel([controlnet1, controlnet2]) + + components = { + "unet": unet, + "controlnet": controlnet, + "scheduler": scheduler, + "vae": vae, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + "safety_checker": None, + "feature_extractor": None, + "image_encoder": None, + } + return components + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + + controlnet_embedder_scale_factor = 2 + + control_image = [ + randn_tensor( + (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), + generator=generator, + device=torch.device(device), + ), + randn_tensor( + (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), + generator=generator, + device=torch.device(device), + ), + ] + + image = floats_tensor(control_image[0].shape, rng=random.Random(seed)).to(device) + image = image.cpu().permute(0, 2, 3, 1)[0] + image = Image.fromarray(np.uint8(image)).convert("RGB").resize((64, 64)) + inputs = { + "prompt": "A painting of a squirrel eating a burger", + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 6.0, + "output_type": "np", + "image": image, + "control_image": control_image, + } + + return inputs + + def test_control_guidance_switch(self): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.to(torch_device) + + scale = 10.0 + steps = 4 + + inputs = self.get_dummy_inputs(torch_device) + inputs["num_inference_steps"] = steps + inputs["controlnet_conditioning_scale"] = scale + output_1 = pipe(**inputs)[0] + + inputs = self.get_dummy_inputs(torch_device) + inputs["num_inference_steps"] = steps + inputs["controlnet_conditioning_scale"] = scale + output_2 = pipe(**inputs, control_guidance_start=0.1, control_guidance_end=0.2)[0] + + inputs = self.get_dummy_inputs(torch_device) + inputs["num_inference_steps"] = steps + inputs["controlnet_conditioning_scale"] = scale + output_3 = pipe(**inputs, control_guidance_start=[0.1, 0.3], control_guidance_end=[0.2, 0.7])[0] + + inputs = self.get_dummy_inputs(torch_device) + inputs["num_inference_steps"] = steps + inputs["controlnet_conditioning_scale"] = scale + output_4 = pipe(**inputs, control_guidance_start=0.4, control_guidance_end=[0.5, 0.8])[0] + + # make sure that all outputs are different + assert np.sum(np.abs(output_1 - output_2)) > 1e-3 + assert np.sum(np.abs(output_1 - output_3)) > 1e-3 + assert np.sum(np.abs(output_1 - output_4)) > 1e-3 + + def test_attention_slicing_forward_pass(self): + return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3) + + @unittest.skipIf( + torch_device != "cuda" or not is_xformers_available(), + reason="XFormers attention is only available with CUDA and `xformers` installed", + ) + def test_xformers_attention_forwardGenerator_pass(self): + self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3) + + def test_inference_batch_single_identical(self): + self._test_inference_batch_single_identical(expected_max_diff=2e-3) + + def test_ip_adapter(self): + expected_pipe_slice = None + if torch_device == "cpu": + expected_pipe_slice = np.array([0.5293, 0.7339, 0.6642, 0.3950, 0.5212, 0.5175, 0.7002, 0.5907, 0.5182]) + return super().test_ip_adapter(expected_pipe_slice=expected_pipe_slice) + + def test_save_pretrained_raise_not_implemented_exception(self): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + with tempfile.TemporaryDirectory() as tmpdir: + try: + # save_pretrained is not implemented for Multi-ControlNet + pipe.save_pretrained(tmpdir) + except NotImplementedError: + pass + + def test_encode_prompt_works_in_isolation(self): + extra_required_param_value_dict = { + "device": torch.device(torch_device).type, + "do_classifier_free_guidance": self.get_dummy_inputs(device=torch_device).get("guidance_scale", 1.0) > 1.0, + } + return super().test_encode_prompt_works_in_isolation(extra_required_param_value_dict) + + +@slow +@require_torch_accelerator +class ControlNetImg2ImgPipelineSlowTests(unittest.TestCase): + def setUp(self): + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def test_canny(self): + controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny") + + pipe = StableDiffusionControlNetImg2ImgPipeline.from_pretrained( + "stable-diffusion-v1-5/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet + ) + pipe.enable_model_cpu_offload(device=torch_device) + pipe.set_progress_bar_config(disable=None) + + generator = torch.Generator(device="cpu").manual_seed(0) + prompt = "evil space-punk bird" + control_image = load_image( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" + ).resize((512, 512)) + image = load_image( + "https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png" + ).resize((512, 512)) + + output = pipe( + prompt, + image, + control_image=control_image, + generator=generator, + output_type="np", + num_inference_steps=50, + strength=0.6, + ) + + image = output.images[0] + + assert image.shape == (512, 512, 3) + + expected_image = load_numpy( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy" + ) + + assert np.abs(expected_image - image).max() < 9e-2 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/controlnet/test_controlnet_inpaint.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/controlnet/test_controlnet_inpaint.py new file mode 100644 index 0000000000000000000000000000000000000000..ebbe869e9e5e35d78617245813f32a53d2fb0139 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/controlnet/test_controlnet_inpaint.py @@ -0,0 +1,575 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This model implementation is heavily based on: + +import gc +import random +import tempfile +import unittest + +import numpy as np +import torch +from PIL import Image +from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer + +from diffusers import ( + AutoencoderKL, + ControlNetModel, + DDIMScheduler, + StableDiffusionControlNetInpaintPipeline, + UNet2DConditionModel, +) +from diffusers.pipelines.controlnet.pipeline_controlnet import MultiControlNetModel +from diffusers.utils import load_image +from diffusers.utils.import_utils import is_xformers_available +from diffusers.utils.torch_utils import randn_tensor + +from ...testing_utils import ( + backend_empty_cache, + enable_full_determinism, + floats_tensor, + load_numpy, + numpy_cosine_similarity_distance, + require_torch_accelerator, + slow, + torch_device, +) +from ..pipeline_params import ( + TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, + TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, + TEXT_TO_IMAGE_IMAGE_PARAMS, +) +from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin + + +enable_full_determinism() + + +class ControlNetInpaintPipelineFastTests( + PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase +): + pipeline_class = StableDiffusionControlNetInpaintPipeline + params = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS + batch_params = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS + image_params = frozenset({"control_image"}) # skip `image` and `mask` for now, only test for control_image + image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS + + def get_dummy_components(self): + torch.manual_seed(0) + unet = UNet2DConditionModel( + block_out_channels=(32, 64), + layers_per_block=2, + sample_size=32, + in_channels=9, + out_channels=4, + down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), + up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), + cross_attention_dim=32, + ) + torch.manual_seed(0) + controlnet = ControlNetModel( + block_out_channels=(32, 64), + layers_per_block=2, + in_channels=4, + down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), + cross_attention_dim=32, + conditioning_embedding_out_channels=(16, 32), + ) + torch.manual_seed(0) + scheduler = DDIMScheduler( + beta_start=0.00085, + beta_end=0.012, + beta_schedule="scaled_linear", + clip_sample=False, + set_alpha_to_one=False, + ) + torch.manual_seed(0) + vae = AutoencoderKL( + block_out_channels=[32, 64], + in_channels=3, + out_channels=3, + down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], + up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], + latent_channels=4, + ) + torch.manual_seed(0) + text_encoder_config = CLIPTextConfig( + bos_token_id=0, + eos_token_id=2, + hidden_size=32, + intermediate_size=37, + layer_norm_eps=1e-05, + num_attention_heads=4, + num_hidden_layers=5, + pad_token_id=1, + vocab_size=1000, + ) + text_encoder = CLIPTextModel(text_encoder_config) + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + + components = { + "unet": unet, + "controlnet": controlnet, + "scheduler": scheduler, + "vae": vae, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + "safety_checker": None, + "feature_extractor": None, + "image_encoder": None, + } + return components + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + + controlnet_embedder_scale_factor = 2 + control_image = randn_tensor( + (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), + generator=generator, + device=torch.device(device), + ) + init_image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) + init_image = init_image.cpu().permute(0, 2, 3, 1)[0] + + image = Image.fromarray(np.uint8(init_image)).convert("RGB").resize((64, 64)) + mask_image = Image.fromarray(np.uint8(init_image + 4)).convert("RGB").resize((64, 64)) + + inputs = { + "prompt": "A painting of a squirrel eating a burger", + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 6.0, + "output_type": "np", + "image": image, + "mask_image": mask_image, + "control_image": control_image, + } + + return inputs + + def test_attention_slicing_forward_pass(self): + return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3) + + @unittest.skipIf( + torch_device != "cuda" or not is_xformers_available(), + reason="XFormers attention is only available with CUDA and `xformers` installed", + ) + def test_xformers_attention_forwardGenerator_pass(self): + self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3) + + def test_inference_batch_single_identical(self): + self._test_inference_batch_single_identical(expected_max_diff=2e-3) + + def test_encode_prompt_works_in_isolation(self): + extra_required_param_value_dict = { + "device": torch.device(torch_device).type, + "do_classifier_free_guidance": self.get_dummy_inputs(device=torch_device).get("guidance_scale", 1.0) > 1.0, + } + return super().test_encode_prompt_works_in_isolation(extra_required_param_value_dict) + + +class ControlNetSimpleInpaintPipelineFastTests(ControlNetInpaintPipelineFastTests): + pipeline_class = StableDiffusionControlNetInpaintPipeline + params = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS + batch_params = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS + image_params = frozenset([]) + + def get_dummy_components(self): + torch.manual_seed(0) + unet = UNet2DConditionModel( + block_out_channels=(32, 64), + layers_per_block=2, + sample_size=32, + in_channels=4, + out_channels=4, + down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), + up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), + cross_attention_dim=32, + ) + torch.manual_seed(0) + controlnet = ControlNetModel( + block_out_channels=(32, 64), + layers_per_block=2, + in_channels=4, + down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), + cross_attention_dim=32, + conditioning_embedding_out_channels=(16, 32), + ) + torch.manual_seed(0) + scheduler = DDIMScheduler( + beta_start=0.00085, + beta_end=0.012, + beta_schedule="scaled_linear", + clip_sample=False, + set_alpha_to_one=False, + ) + torch.manual_seed(0) + vae = AutoencoderKL( + block_out_channels=[32, 64], + in_channels=3, + out_channels=3, + down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], + up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], + latent_channels=4, + ) + torch.manual_seed(0) + text_encoder_config = CLIPTextConfig( + bos_token_id=0, + eos_token_id=2, + hidden_size=32, + intermediate_size=37, + layer_norm_eps=1e-05, + num_attention_heads=4, + num_hidden_layers=5, + pad_token_id=1, + vocab_size=1000, + ) + text_encoder = CLIPTextModel(text_encoder_config) + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + + components = { + "unet": unet, + "controlnet": controlnet, + "scheduler": scheduler, + "vae": vae, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + "safety_checker": None, + "feature_extractor": None, + "image_encoder": None, + } + return components + + +class MultiControlNetInpaintPipelineFastTests( + PipelineTesterMixin, PipelineKarrasSchedulerTesterMixin, unittest.TestCase +): + pipeline_class = StableDiffusionControlNetInpaintPipeline + params = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS + batch_params = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS + + supports_dduf = False + + def get_dummy_components(self): + torch.manual_seed(0) + unet = UNet2DConditionModel( + block_out_channels=(32, 64), + layers_per_block=2, + sample_size=32, + in_channels=9, + out_channels=4, + down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), + up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), + cross_attention_dim=32, + ) + torch.manual_seed(0) + + def init_weights(m): + if isinstance(m, torch.nn.Conv2d): + torch.nn.init.normal_(m.weight) + m.bias.data.fill_(1.0) + + controlnet1 = ControlNetModel( + block_out_channels=(32, 64), + layers_per_block=2, + in_channels=4, + down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), + cross_attention_dim=32, + conditioning_embedding_out_channels=(16, 32), + ) + controlnet1.controlnet_down_blocks.apply(init_weights) + + torch.manual_seed(0) + controlnet2 = ControlNetModel( + block_out_channels=(32, 64), + layers_per_block=2, + in_channels=4, + down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), + cross_attention_dim=32, + conditioning_embedding_out_channels=(16, 32), + ) + controlnet2.controlnet_down_blocks.apply(init_weights) + + torch.manual_seed(0) + scheduler = DDIMScheduler( + beta_start=0.00085, + beta_end=0.012, + beta_schedule="scaled_linear", + clip_sample=False, + set_alpha_to_one=False, + ) + torch.manual_seed(0) + vae = AutoencoderKL( + block_out_channels=[32, 64], + in_channels=3, + out_channels=3, + down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], + up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], + latent_channels=4, + ) + torch.manual_seed(0) + text_encoder_config = CLIPTextConfig( + bos_token_id=0, + eos_token_id=2, + hidden_size=32, + intermediate_size=37, + layer_norm_eps=1e-05, + num_attention_heads=4, + num_hidden_layers=5, + pad_token_id=1, + vocab_size=1000, + ) + text_encoder = CLIPTextModel(text_encoder_config) + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + + controlnet = MultiControlNetModel([controlnet1, controlnet2]) + + components = { + "unet": unet, + "controlnet": controlnet, + "scheduler": scheduler, + "vae": vae, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + "safety_checker": None, + "feature_extractor": None, + "image_encoder": None, + } + return components + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + + controlnet_embedder_scale_factor = 2 + + control_image = [ + randn_tensor( + (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), + generator=generator, + device=torch.device(device), + ), + randn_tensor( + (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), + generator=generator, + device=torch.device(device), + ), + ] + init_image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) + init_image = init_image.cpu().permute(0, 2, 3, 1)[0] + + image = Image.fromarray(np.uint8(init_image)).convert("RGB").resize((64, 64)) + mask_image = Image.fromarray(np.uint8(init_image + 4)).convert("RGB").resize((64, 64)) + + inputs = { + "prompt": "A painting of a squirrel eating a burger", + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 6.0, + "output_type": "np", + "image": image, + "mask_image": mask_image, + "control_image": control_image, + } + + return inputs + + def test_control_guidance_switch(self): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.to(torch_device) + + scale = 10.0 + steps = 4 + + inputs = self.get_dummy_inputs(torch_device) + inputs["num_inference_steps"] = steps + inputs["controlnet_conditioning_scale"] = scale + output_1 = pipe(**inputs)[0] + + inputs = self.get_dummy_inputs(torch_device) + inputs["num_inference_steps"] = steps + inputs["controlnet_conditioning_scale"] = scale + output_2 = pipe(**inputs, control_guidance_start=0.1, control_guidance_end=0.2)[0] + + inputs = self.get_dummy_inputs(torch_device) + inputs["num_inference_steps"] = steps + inputs["controlnet_conditioning_scale"] = scale + output_3 = pipe(**inputs, control_guidance_start=[0.1, 0.3], control_guidance_end=[0.2, 0.7])[0] + + inputs = self.get_dummy_inputs(torch_device) + inputs["num_inference_steps"] = steps + inputs["controlnet_conditioning_scale"] = scale + output_4 = pipe(**inputs, control_guidance_start=0.4, control_guidance_end=[0.5, 0.8])[0] + + # make sure that all outputs are different + assert np.sum(np.abs(output_1 - output_2)) > 1e-3 + assert np.sum(np.abs(output_1 - output_3)) > 1e-3 + assert np.sum(np.abs(output_1 - output_4)) > 1e-3 + + def test_attention_slicing_forward_pass(self): + return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3) + + @unittest.skipIf( + torch_device != "cuda" or not is_xformers_available(), + reason="XFormers attention is only available with CUDA and `xformers` installed", + ) + def test_xformers_attention_forwardGenerator_pass(self): + self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3) + + def test_inference_batch_single_identical(self): + self._test_inference_batch_single_identical(expected_max_diff=2e-3) + + def test_save_pretrained_raise_not_implemented_exception(self): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + with tempfile.TemporaryDirectory() as tmpdir: + try: + # save_pretrained is not implemented for Multi-ControlNet + pipe.save_pretrained(tmpdir) + except NotImplementedError: + pass + + def test_encode_prompt_works_in_isolation(self): + extra_required_param_value_dict = { + "device": torch.device(torch_device).type, + "do_classifier_free_guidance": self.get_dummy_inputs(device=torch_device).get("guidance_scale", 1.0) > 1.0, + } + return super().test_encode_prompt_works_in_isolation(extra_required_param_value_dict) + + +@slow +@require_torch_accelerator +class ControlNetInpaintPipelineSlowTests(unittest.TestCase): + def setUp(self): + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def test_canny(self): + controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny") + + pipe = StableDiffusionControlNetInpaintPipeline.from_pretrained( + "botp/stable-diffusion-v1-5-inpainting", safety_checker=None, controlnet=controlnet + ) + pipe.enable_model_cpu_offload(device=torch_device) + pipe.set_progress_bar_config(disable=None) + + generator = torch.Generator(device="cpu").manual_seed(0) + image = load_image( + "https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png" + ).resize((512, 512)) + + mask_image = load_image( + "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" + "/stable_diffusion_inpaint/input_bench_mask.png" + ).resize((512, 512)) + + prompt = "pitch black hole" + + control_image = load_image( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" + ).resize((512, 512)) + + output = pipe( + prompt, + image=image, + mask_image=mask_image, + control_image=control_image, + generator=generator, + output_type="np", + num_inference_steps=3, + ) + + image = output.images[0] + + assert image.shape == (512, 512, 3) + + expected_image = load_numpy( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/inpaint.npy" + ) + + assert np.abs(expected_image - image).max() < 9e-2 + + def test_inpaint(self): + controlnet = ControlNetModel.from_pretrained("lllyasviel/control_v11p_sd15_inpaint") + + pipe = StableDiffusionControlNetInpaintPipeline.from_pretrained( + "stable-diffusion-v1-5/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet + ) + pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config) + pipe.enable_model_cpu_offload(device=torch_device) + pipe.set_progress_bar_config(disable=None) + + generator = torch.Generator(device="cpu").manual_seed(33) + + init_image = load_image( + "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_inpaint/boy.png" + ) + init_image = init_image.resize((512, 512)) + + mask_image = load_image( + "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_inpaint/boy_mask.png" + ) + mask_image = mask_image.resize((512, 512)) + + prompt = "a handsome man with ray-ban sunglasses" + + def make_inpaint_condition(image, image_mask): + image = np.array(image.convert("RGB")).astype(np.float32) / 255.0 + image_mask = np.array(image_mask.convert("L")).astype(np.float32) / 255.0 + + assert image.shape[0:1] == image_mask.shape[0:1], "image and image_mask must have the same image size" + image[image_mask > 0.5] = -1.0 # set as masked pixel + image = np.expand_dims(image, 0).transpose(0, 3, 1, 2) + image = torch.from_numpy(image) + return image + + control_image = make_inpaint_condition(init_image, mask_image) + + output = pipe( + prompt, + image=init_image, + mask_image=mask_image, + control_image=control_image, + guidance_scale=9.0, + eta=1.0, + generator=generator, + num_inference_steps=20, + output_type="np", + ) + image = output.images[0] + + assert image.shape == (512, 512, 3) + + expected_image = load_numpy( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/boy_ray_ban.npy" + ) + + assert numpy_cosine_similarity_distance(expected_image.flatten(), image.flatten()) < 1e-2 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/controlnet/test_controlnet_inpaint_sdxl.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/controlnet/test_controlnet_inpaint_sdxl.py new file mode 100644 index 0000000000000000000000000000000000000000..c91f2c700c15fe0588fde7664a622dd85fbdd858 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/controlnet/test_controlnet_inpaint_sdxl.py @@ -0,0 +1,356 @@ +# coding=utf-8 +# Copyright 2025 Harutatsu Akiyama, Jinbin Bai, and HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import random +import unittest + +import numpy as np +import torch +from PIL import Image +from transformers import ( + CLIPImageProcessor, + CLIPTextConfig, + CLIPTextModel, + CLIPTextModelWithProjection, + CLIPTokenizer, + CLIPVisionConfig, + CLIPVisionModelWithProjection, +) + +from diffusers import ( + AutoencoderKL, + ControlNetModel, + EulerDiscreteScheduler, + StableDiffusionXLControlNetInpaintPipeline, + UNet2DConditionModel, +) +from diffusers.utils.import_utils import is_xformers_available + +from ...testing_utils import ( + enable_full_determinism, + floats_tensor, + require_torch_accelerator, + torch_device, +) +from ..pipeline_params import ( + IMAGE_TO_IMAGE_IMAGE_PARAMS, + TEXT_TO_IMAGE_BATCH_PARAMS, + TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS, + TEXT_TO_IMAGE_IMAGE_PARAMS, + TEXT_TO_IMAGE_PARAMS, +) +from ..test_pipelines_common import ( + PipelineKarrasSchedulerTesterMixin, + PipelineLatentTesterMixin, + PipelineTesterMixin, +) + + +enable_full_determinism() + + +class ControlNetPipelineSDXLFastTests( + PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase +): + pipeline_class = StableDiffusionXLControlNetInpaintPipeline + params = TEXT_TO_IMAGE_PARAMS + batch_params = TEXT_TO_IMAGE_BATCH_PARAMS + image_params = frozenset(IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"mask_image", "control_image"})) + image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS + callback_cfg_params = TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS.union( + { + "add_text_embeds", + "add_time_ids", + "mask", + "masked_image_latents", + } + ) + + supports_dduf = False + + def get_dummy_components(self): + torch.manual_seed(0) + unet = UNet2DConditionModel( + block_out_channels=(32, 64), + layers_per_block=2, + sample_size=32, + in_channels=4, + out_channels=4, + down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), + up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), + # SD2-specific config below + attention_head_dim=(2, 4), + use_linear_projection=True, + addition_embed_type="text_time", + addition_time_embed_dim=8, + transformer_layers_per_block=(1, 2), + projection_class_embeddings_input_dim=80, # 6 * 8 + 32 + cross_attention_dim=64, + ) + torch.manual_seed(0) + controlnet = ControlNetModel( + block_out_channels=(32, 64), + layers_per_block=2, + in_channels=4, + down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), + conditioning_embedding_out_channels=(16, 32), + # SD2-specific config below + attention_head_dim=(2, 4), + use_linear_projection=True, + addition_embed_type="text_time", + addition_time_embed_dim=8, + transformer_layers_per_block=(1, 2), + projection_class_embeddings_input_dim=80, # 6 * 8 + 32 + cross_attention_dim=64, + ) + scheduler = EulerDiscreteScheduler( + beta_start=0.00085, + beta_end=0.012, + steps_offset=1, + beta_schedule="scaled_linear", + timestep_spacing="leading", + ) + torch.manual_seed(0) + vae = AutoencoderKL( + block_out_channels=[32, 64], + in_channels=3, + out_channels=3, + down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], + up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], + latent_channels=4, + ) + torch.manual_seed(0) + text_encoder_config = CLIPTextConfig( + bos_token_id=0, + eos_token_id=2, + hidden_size=32, + intermediate_size=37, + layer_norm_eps=1e-05, + num_attention_heads=4, + num_hidden_layers=5, + pad_token_id=1, + vocab_size=1000, + # SD2-specific config below + hidden_act="gelu", + projection_dim=32, + ) + text_encoder = CLIPTextModel(text_encoder_config) + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + + torch.manual_seed(0) + text_encoder_2 = CLIPTextModelWithProjection(text_encoder_config) + tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + + image_encoder_config = CLIPVisionConfig( + hidden_size=32, + image_size=224, + projection_dim=32, + intermediate_size=37, + num_attention_heads=4, + num_channels=3, + num_hidden_layers=5, + patch_size=14, + ) + + image_encoder = CLIPVisionModelWithProjection(image_encoder_config) + + feature_extractor = CLIPImageProcessor( + crop_size=224, + do_center_crop=True, + do_normalize=True, + do_resize=True, + image_mean=[0.48145466, 0.4578275, 0.40821073], + image_std=[0.26862954, 0.26130258, 0.27577711], + resample=3, + size=224, + ) + + components = { + "unet": unet, + "controlnet": controlnet, + "scheduler": scheduler, + "vae": vae, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + "text_encoder_2": text_encoder_2, + "tokenizer_2": tokenizer_2, + "image_encoder": image_encoder, + "feature_extractor": feature_extractor, + } + return components + + def get_dummy_inputs(self, device, seed=0, img_res=64): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + + # Get random floats in [0, 1] as image + image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) + image = image.cpu().permute(0, 2, 3, 1)[0] + mask_image = torch.ones_like(image) + controlnet_embedder_scale_factor = 2 + control_image = ( + floats_tensor( + (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), + rng=random.Random(seed), + ) + .to(device) + .cpu() + ) + control_image = control_image.cpu().permute(0, 2, 3, 1)[0] + # Convert image and mask_image to [0, 255] + image = 255 * image + mask_image = 255 * mask_image + control_image = 255 * control_image + # Convert to PIL image + init_image = Image.fromarray(np.uint8(image)).convert("RGB").resize((img_res, img_res)) + mask_image = Image.fromarray(np.uint8(mask_image)).convert("L").resize((img_res, img_res)) + control_image = Image.fromarray(np.uint8(control_image)).convert("RGB").resize((img_res, img_res)) + + inputs = { + "prompt": "A painting of a squirrel eating a burger", + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 6.0, + "output_type": "np", + "image": init_image, + "mask_image": mask_image, + "control_image": control_image, + } + return inputs + + def test_attention_slicing_forward_pass(self): + return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3) + + @unittest.skipIf( + torch_device != "cuda" or not is_xformers_available(), + reason="XFormers attention is only available with CUDA and `xformers` installed", + ) + def test_xformers_attention_forwardGenerator_pass(self): + self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3) + + def test_inference_batch_single_identical(self): + self._test_inference_batch_single_identical(expected_max_diff=2e-3) + + @require_torch_accelerator + def test_stable_diffusion_xl_offloads(self): + pipes = [] + components = self.get_dummy_components() + sd_pipe = self.pipeline_class(**components).to(torch_device) + pipes.append(sd_pipe) + + components = self.get_dummy_components() + sd_pipe = self.pipeline_class(**components) + sd_pipe.enable_model_cpu_offload(device=torch_device) + pipes.append(sd_pipe) + + components = self.get_dummy_components() + sd_pipe = self.pipeline_class(**components) + sd_pipe.enable_sequential_cpu_offload(device=torch_device) + pipes.append(sd_pipe) + + image_slices = [] + for pipe in pipes: + pipe.unet.set_default_attn_processor() + + inputs = self.get_dummy_inputs(torch_device) + image = pipe(**inputs).images + + image_slices.append(image[0, -3:, -3:, -1].flatten()) + + assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3 + assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3 + + def test_stable_diffusion_xl_multi_prompts(self): + components = self.get_dummy_components() + sd_pipe = self.pipeline_class(**components).to(torch_device) + + # forward with single prompt + inputs = self.get_dummy_inputs(torch_device) + output = sd_pipe(**inputs) + image_slice_1 = output.images[0, -3:, -3:, -1] + + # forward with same prompt duplicated + inputs = self.get_dummy_inputs(torch_device) + inputs["prompt_2"] = inputs["prompt"] + output = sd_pipe(**inputs) + image_slice_2 = output.images[0, -3:, -3:, -1] + + # ensure the results are equal + assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4 + + # forward with different prompt + inputs = self.get_dummy_inputs(torch_device) + inputs["prompt_2"] = "different prompt" + output = sd_pipe(**inputs) + image_slice_3 = output.images[0, -3:, -3:, -1] + + # ensure the results are not equal + assert np.abs(image_slice_1.flatten() - image_slice_3.flatten()).max() > 1e-4 + + # manually set a negative_prompt + inputs = self.get_dummy_inputs(torch_device) + inputs["negative_prompt"] = "negative prompt" + output = sd_pipe(**inputs) + image_slice_1 = output.images[0, -3:, -3:, -1] + + # forward with same negative_prompt duplicated + inputs = self.get_dummy_inputs(torch_device) + inputs["negative_prompt"] = "negative prompt" + inputs["negative_prompt_2"] = inputs["negative_prompt"] + output = sd_pipe(**inputs) + image_slice_2 = output.images[0, -3:, -3:, -1] + + # ensure the results are equal + assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4 + + # forward with different negative_prompt + inputs = self.get_dummy_inputs(torch_device) + inputs["negative_prompt"] = "negative prompt" + inputs["negative_prompt_2"] = "different negative prompt" + output = sd_pipe(**inputs) + image_slice_3 = output.images[0, -3:, -3:, -1] + + # ensure the results are not equal + assert np.abs(image_slice_1.flatten() - image_slice_3.flatten()).max() > 1e-4 + + def test_controlnet_sdxl_guess(self): + device = "cpu" + + components = self.get_dummy_components() + + sd_pipe = self.pipeline_class(**components) + sd_pipe = sd_pipe.to(device) + + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + inputs["guess_mode"] = True + + output = sd_pipe(**inputs) + image_slice = output.images[0, -3:, -3:, -1] + + expected_slice = np.array([0.5460, 0.4943, 0.4635, 0.5832, 0.5366, 0.4815, 0.6034, 0.5741, 0.4341]) + + # make sure that it's equal + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-4 + + # TODO(Patrick, Sayak) - skip for now as this requires more refiner tests + def test_save_load_optional_components(self): + pass + + def test_float16_inference(self): + super().test_float16_inference(expected_max_diff=5e-1) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/controlnet/test_controlnet_sdxl.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/controlnet/test_controlnet_sdxl.py new file mode 100644 index 0000000000000000000000000000000000000000..42ec446dbfae095ec75f7d3af862fc0cb61518c9 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/controlnet/test_controlnet_sdxl.py @@ -0,0 +1,1082 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy +import gc +import unittest + +import numpy as np +import torch +from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer + +from diffusers import ( + AutoencoderKL, + ControlNetModel, + EulerDiscreteScheduler, + HeunDiscreteScheduler, + LCMScheduler, + StableDiffusionXLControlNetPipeline, + StableDiffusionXLImg2ImgPipeline, + UNet2DConditionModel, +) +from diffusers.models.unets.unet_2d_blocks import UNetMidBlock2D +from diffusers.pipelines.controlnet.pipeline_controlnet import MultiControlNetModel +from diffusers.utils.import_utils import is_xformers_available +from diffusers.utils.torch_utils import randn_tensor + +from ...testing_utils import ( + backend_empty_cache, + enable_full_determinism, + load_image, + require_torch_accelerator, + slow, + torch_device, +) +from ..pipeline_params import ( + IMAGE_TO_IMAGE_IMAGE_PARAMS, + TEXT_TO_IMAGE_BATCH_PARAMS, + TEXT_TO_IMAGE_IMAGE_PARAMS, + TEXT_TO_IMAGE_PARAMS, +) +from ..test_pipelines_common import ( + IPAdapterTesterMixin, + PipelineKarrasSchedulerTesterMixin, + PipelineLatentTesterMixin, + PipelineTesterMixin, +) + + +enable_full_determinism() + + +class StableDiffusionXLControlNetPipelineFastTests( + IPAdapterTesterMixin, + PipelineLatentTesterMixin, + PipelineKarrasSchedulerTesterMixin, + PipelineTesterMixin, + unittest.TestCase, +): + pipeline_class = StableDiffusionXLControlNetPipeline + params = TEXT_TO_IMAGE_PARAMS + batch_params = TEXT_TO_IMAGE_BATCH_PARAMS + image_params = IMAGE_TO_IMAGE_IMAGE_PARAMS + image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS + test_layerwise_casting = True + test_group_offloading = True + + def get_dummy_components(self, time_cond_proj_dim=None): + torch.manual_seed(0) + unet = UNet2DConditionModel( + block_out_channels=(32, 64), + layers_per_block=2, + sample_size=32, + in_channels=4, + out_channels=4, + down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), + up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), + # SD2-specific config below + attention_head_dim=(2, 4), + use_linear_projection=True, + addition_embed_type="text_time", + addition_time_embed_dim=8, + transformer_layers_per_block=(1, 2), + projection_class_embeddings_input_dim=80, # 6 * 8 + 32 + cross_attention_dim=64, + time_cond_proj_dim=time_cond_proj_dim, + ) + torch.manual_seed(0) + controlnet = ControlNetModel( + block_out_channels=(32, 64), + layers_per_block=2, + in_channels=4, + down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), + conditioning_embedding_out_channels=(16, 32), + # SD2-specific config below + attention_head_dim=(2, 4), + use_linear_projection=True, + addition_embed_type="text_time", + addition_time_embed_dim=8, + transformer_layers_per_block=(1, 2), + projection_class_embeddings_input_dim=80, # 6 * 8 + 32 + cross_attention_dim=64, + ) + torch.manual_seed(0) + scheduler = EulerDiscreteScheduler( + beta_start=0.00085, + beta_end=0.012, + steps_offset=1, + beta_schedule="scaled_linear", + timestep_spacing="leading", + ) + torch.manual_seed(0) + vae = AutoencoderKL( + block_out_channels=[32, 64], + in_channels=3, + out_channels=3, + down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], + up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], + latent_channels=4, + ) + torch.manual_seed(0) + text_encoder_config = CLIPTextConfig( + bos_token_id=0, + eos_token_id=2, + hidden_size=32, + intermediate_size=37, + layer_norm_eps=1e-05, + num_attention_heads=4, + num_hidden_layers=5, + pad_token_id=1, + vocab_size=1000, + # SD2-specific config below + hidden_act="gelu", + projection_dim=32, + ) + text_encoder = CLIPTextModel(text_encoder_config) + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + + text_encoder_2 = CLIPTextModelWithProjection(text_encoder_config) + tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + + components = { + "unet": unet, + "controlnet": controlnet, + "scheduler": scheduler, + "vae": vae, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + "text_encoder_2": text_encoder_2, + "tokenizer_2": tokenizer_2, + "feature_extractor": None, + "image_encoder": None, + } + return components + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + + controlnet_embedder_scale_factor = 2 + image = randn_tensor( + (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), + generator=generator, + device=torch.device(device), + ) + + inputs = { + "prompt": "A painting of a squirrel eating a burger", + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 6.0, + "output_type": "np", + "image": image, + } + + return inputs + + def test_attention_slicing_forward_pass(self): + return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3) + + def test_ip_adapter(self, from_ssd1b=False, expected_pipe_slice=None): + if not from_ssd1b: + expected_pipe_slice = None + if torch_device == "cpu": + expected_pipe_slice = np.array( + [0.7335, 0.5866, 0.5623, 0.6242, 0.5751, 0.5999, 0.4091, 0.4590, 0.5054] + ) + return super().test_ip_adapter(expected_pipe_slice=expected_pipe_slice) + + @unittest.skipIf( + torch_device != "cuda" or not is_xformers_available(), + reason="XFormers attention is only available with CUDA and `xformers` installed", + ) + def test_xformers_attention_forwardGenerator_pass(self): + self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3) + + def test_inference_batch_single_identical(self): + self._test_inference_batch_single_identical(expected_max_diff=2e-3) + + @unittest.skip("We test this functionality elsewhere already.") + def test_save_load_optional_components(self): + pass + + @require_torch_accelerator + def test_stable_diffusion_xl_offloads(self): + pipes = [] + components = self.get_dummy_components() + sd_pipe = self.pipeline_class(**components).to(torch_device) + pipes.append(sd_pipe) + + components = self.get_dummy_components() + sd_pipe = self.pipeline_class(**components) + sd_pipe.enable_model_cpu_offload(device=torch_device) + pipes.append(sd_pipe) + + components = self.get_dummy_components() + sd_pipe = self.pipeline_class(**components) + sd_pipe.enable_sequential_cpu_offload(device=torch_device) + pipes.append(sd_pipe) + + image_slices = [] + for pipe in pipes: + pipe.unet.set_default_attn_processor() + + inputs = self.get_dummy_inputs(torch_device) + image = pipe(**inputs).images + + image_slices.append(image[0, -3:, -3:, -1].flatten()) + + assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3 + assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3 + + def test_stable_diffusion_xl_multi_prompts(self): + components = self.get_dummy_components() + sd_pipe = self.pipeline_class(**components).to(torch_device) + + # forward with single prompt + inputs = self.get_dummy_inputs(torch_device) + output = sd_pipe(**inputs) + image_slice_1 = output.images[0, -3:, -3:, -1] + + # forward with same prompt duplicated + inputs = self.get_dummy_inputs(torch_device) + inputs["prompt_2"] = inputs["prompt"] + output = sd_pipe(**inputs) + image_slice_2 = output.images[0, -3:, -3:, -1] + + # ensure the results are equal + assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4 + + # forward with different prompt + inputs = self.get_dummy_inputs(torch_device) + inputs["prompt_2"] = "different prompt" + output = sd_pipe(**inputs) + image_slice_3 = output.images[0, -3:, -3:, -1] + + # ensure the results are not equal + assert np.abs(image_slice_1.flatten() - image_slice_3.flatten()).max() > 1e-4 + + # manually set a negative_prompt + inputs = self.get_dummy_inputs(torch_device) + inputs["negative_prompt"] = "negative prompt" + output = sd_pipe(**inputs) + image_slice_1 = output.images[0, -3:, -3:, -1] + + # forward with same negative_prompt duplicated + inputs = self.get_dummy_inputs(torch_device) + inputs["negative_prompt"] = "negative prompt" + inputs["negative_prompt_2"] = inputs["negative_prompt"] + output = sd_pipe(**inputs) + image_slice_2 = output.images[0, -3:, -3:, -1] + + # ensure the results are equal + assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4 + + # forward with different negative_prompt + inputs = self.get_dummy_inputs(torch_device) + inputs["negative_prompt"] = "negative prompt" + inputs["negative_prompt_2"] = "different negative prompt" + output = sd_pipe(**inputs) + image_slice_3 = output.images[0, -3:, -3:, -1] + + # ensure the results are not equal + assert np.abs(image_slice_1.flatten() - image_slice_3.flatten()).max() > 1e-4 + + def test_controlnet_sdxl_guess(self): + device = "cpu" + + components = self.get_dummy_components() + + sd_pipe = self.pipeline_class(**components) + sd_pipe = sd_pipe.to(device) + + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + inputs["guess_mode"] = True + + output = sd_pipe(**inputs) + image_slice = output.images[0, -3:, -3:, -1] + + expected_slice = np.array([0.7335, 0.5866, 0.5623, 0.6242, 0.5751, 0.5999, 0.4091, 0.4590, 0.5054]) + + # make sure that it's equal + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-4 + + def test_controlnet_sdxl_lcm(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + + components = self.get_dummy_components(time_cond_proj_dim=256) + sd_pipe = StableDiffusionXLControlNetPipeline(**components) + sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.scheduler.config) + sd_pipe = sd_pipe.to(torch_device) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + output = sd_pipe(**inputs) + image = output.images + + image_slice = image[0, -3:, -3:, -1] + + assert image.shape == (1, 64, 64, 3) + expected_slice = np.array([0.7820, 0.6195, 0.6193, 0.7045, 0.6706, 0.5837, 0.4147, 0.5232, 0.4868]) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 + + # Copied from test_stable_diffusion_xl.py:test_stable_diffusion_two_xl_mixture_of_denoiser_fast + # with `StableDiffusionXLControlNetPipeline` instead of `StableDiffusionXLPipeline` + def test_controlnet_sdxl_two_mixture_of_denoiser_fast(self): + components = self.get_dummy_components() + pipe_1 = StableDiffusionXLControlNetPipeline(**components).to(torch_device) + pipe_1.unet.set_default_attn_processor() + + components_without_controlnet = {k: v for k, v in components.items() if k != "controlnet"} + pipe_2 = StableDiffusionXLImg2ImgPipeline(**components_without_controlnet).to(torch_device) + pipe_2.unet.set_default_attn_processor() + + def assert_run_mixture( + num_steps, + split, + scheduler_cls_orig, + expected_tss, + num_train_timesteps=pipe_1.scheduler.config.num_train_timesteps, + ): + inputs = self.get_dummy_inputs(torch_device) + inputs["num_inference_steps"] = num_steps + + class scheduler_cls(scheduler_cls_orig): + pass + + pipe_1.scheduler = scheduler_cls.from_config(pipe_1.scheduler.config) + pipe_2.scheduler = scheduler_cls.from_config(pipe_2.scheduler.config) + + # Let's retrieve the number of timesteps we want to use + pipe_1.scheduler.set_timesteps(num_steps) + expected_steps = pipe_1.scheduler.timesteps.tolist() + + if pipe_1.scheduler.order == 2: + expected_steps_1 = list(filter(lambda ts: ts >= split, expected_tss)) + expected_steps_2 = expected_steps_1[-1:] + list(filter(lambda ts: ts < split, expected_tss)) + expected_steps = expected_steps_1 + expected_steps_2 + else: + expected_steps_1 = list(filter(lambda ts: ts >= split, expected_tss)) + expected_steps_2 = list(filter(lambda ts: ts < split, expected_tss)) + + # now we monkey patch step `done_steps` + # list into the step function for testing + done_steps = [] + old_step = copy.copy(scheduler_cls.step) + + def new_step(self, *args, **kwargs): + done_steps.append(args[1].cpu().item()) # args[1] is always the passed `t` + return old_step(self, *args, **kwargs) + + scheduler_cls.step = new_step + + inputs_1 = { + **inputs, + **{ + "denoising_end": 1.0 - (split / num_train_timesteps), + "output_type": "latent", + }, + } + latents = pipe_1(**inputs_1).images[0] + + assert expected_steps_1 == done_steps, f"Failure with {scheduler_cls.__name__} and {num_steps} and {split}" + + inputs_2 = { + **inputs, + **{ + "denoising_start": 1.0 - (split / num_train_timesteps), + "image": latents, + }, + } + pipe_2(**inputs_2).images[0] + + assert expected_steps_2 == done_steps[len(expected_steps_1) :] + assert expected_steps == done_steps, f"Failure with {scheduler_cls.__name__} and {num_steps} and {split}" + + steps = 10 + for split in [300, 700]: + for scheduler_cls_timesteps in [ + (EulerDiscreteScheduler, [901, 801, 701, 601, 501, 401, 301, 201, 101, 1]), + ( + HeunDiscreteScheduler, + [ + 901.0, + 801.0, + 801.0, + 701.0, + 701.0, + 601.0, + 601.0, + 501.0, + 501.0, + 401.0, + 401.0, + 301.0, + 301.0, + 201.0, + 201.0, + 101.0, + 101.0, + 1.0, + 1.0, + ], + ), + ]: + assert_run_mixture(steps, split, scheduler_cls_timesteps[0], scheduler_cls_timesteps[1]) + + +class StableDiffusionXLMultiControlNetPipelineFastTests( + PipelineTesterMixin, PipelineKarrasSchedulerTesterMixin, unittest.TestCase +): + pipeline_class = StableDiffusionXLControlNetPipeline + params = TEXT_TO_IMAGE_PARAMS + batch_params = TEXT_TO_IMAGE_BATCH_PARAMS + image_params = frozenset([]) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess + + supports_dduf = False + + def get_dummy_components(self): + torch.manual_seed(0) + unet = UNet2DConditionModel( + block_out_channels=(32, 64), + layers_per_block=2, + sample_size=32, + in_channels=4, + out_channels=4, + down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), + up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), + # SD2-specific config below + attention_head_dim=(2, 4), + use_linear_projection=True, + addition_embed_type="text_time", + addition_time_embed_dim=8, + transformer_layers_per_block=(1, 2), + projection_class_embeddings_input_dim=80, # 6 * 8 + 32 + cross_attention_dim=64, + ) + torch.manual_seed(0) + + def init_weights(m): + if isinstance(m, torch.nn.Conv2d): + torch.nn.init.normal_(m.weight) + m.bias.data.fill_(1.0) + + controlnet1 = ControlNetModel( + block_out_channels=(32, 64), + layers_per_block=2, + in_channels=4, + down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), + conditioning_embedding_out_channels=(16, 32), + # SD2-specific config below + attention_head_dim=(2, 4), + use_linear_projection=True, + addition_embed_type="text_time", + addition_time_embed_dim=8, + transformer_layers_per_block=(1, 2), + projection_class_embeddings_input_dim=80, # 6 * 8 + 32 + cross_attention_dim=64, + ) + controlnet1.controlnet_down_blocks.apply(init_weights) + + torch.manual_seed(0) + controlnet2 = ControlNetModel( + block_out_channels=(32, 64), + layers_per_block=2, + in_channels=4, + down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), + conditioning_embedding_out_channels=(16, 32), + # SD2-specific config below + attention_head_dim=(2, 4), + use_linear_projection=True, + addition_embed_type="text_time", + addition_time_embed_dim=8, + transformer_layers_per_block=(1, 2), + projection_class_embeddings_input_dim=80, # 6 * 8 + 32 + cross_attention_dim=64, + ) + controlnet2.controlnet_down_blocks.apply(init_weights) + + torch.manual_seed(0) + scheduler = EulerDiscreteScheduler( + beta_start=0.00085, + beta_end=0.012, + steps_offset=1, + beta_schedule="scaled_linear", + timestep_spacing="leading", + ) + torch.manual_seed(0) + vae = AutoencoderKL( + block_out_channels=[32, 64], + in_channels=3, + out_channels=3, + down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], + up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], + latent_channels=4, + ) + torch.manual_seed(0) + text_encoder_config = CLIPTextConfig( + bos_token_id=0, + eos_token_id=2, + hidden_size=32, + intermediate_size=37, + layer_norm_eps=1e-05, + num_attention_heads=4, + num_hidden_layers=5, + pad_token_id=1, + vocab_size=1000, + # SD2-specific config below + hidden_act="gelu", + projection_dim=32, + ) + text_encoder = CLIPTextModel(text_encoder_config) + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + + text_encoder_2 = CLIPTextModelWithProjection(text_encoder_config) + tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + + controlnet = MultiControlNetModel([controlnet1, controlnet2]) + + components = { + "unet": unet, + "controlnet": controlnet, + "scheduler": scheduler, + "vae": vae, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + "text_encoder_2": text_encoder_2, + "tokenizer_2": tokenizer_2, + "feature_extractor": None, + "image_encoder": None, + } + return components + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + + controlnet_embedder_scale_factor = 2 + + images = [ + randn_tensor( + (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), + generator=generator, + device=torch.device(device), + ), + randn_tensor( + (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), + generator=generator, + device=torch.device(device), + ), + ] + + inputs = { + "prompt": "A painting of a squirrel eating a burger", + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 6.0, + "output_type": "np", + "image": images, + } + + return inputs + + def test_control_guidance_switch(self): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.to(torch_device) + + scale = 10.0 + steps = 4 + + inputs = self.get_dummy_inputs(torch_device) + inputs["num_inference_steps"] = steps + inputs["controlnet_conditioning_scale"] = scale + output_1 = pipe(**inputs)[0] + + inputs = self.get_dummy_inputs(torch_device) + inputs["num_inference_steps"] = steps + inputs["controlnet_conditioning_scale"] = scale + output_2 = pipe(**inputs, control_guidance_start=0.1, control_guidance_end=0.2)[0] + + inputs = self.get_dummy_inputs(torch_device) + inputs["num_inference_steps"] = steps + inputs["controlnet_conditioning_scale"] = scale + output_3 = pipe(**inputs, control_guidance_start=[0.1, 0.3], control_guidance_end=[0.2, 0.7])[0] + + inputs = self.get_dummy_inputs(torch_device) + inputs["num_inference_steps"] = steps + inputs["controlnet_conditioning_scale"] = scale + output_4 = pipe(**inputs, control_guidance_start=0.4, control_guidance_end=[0.5, 0.8])[0] + + # make sure that all outputs are different + assert np.sum(np.abs(output_1 - output_2)) > 1e-3 + assert np.sum(np.abs(output_1 - output_3)) > 1e-3 + assert np.sum(np.abs(output_1 - output_4)) > 1e-3 + + def test_attention_slicing_forward_pass(self): + return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3) + + @unittest.skipIf( + torch_device != "cuda" or not is_xformers_available(), + reason="XFormers attention is only available with CUDA and `xformers` installed", + ) + def test_xformers_attention_forwardGenerator_pass(self): + self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3) + + def test_inference_batch_single_identical(self): + self._test_inference_batch_single_identical(expected_max_diff=2e-3) + + @unittest.skip("We test this functionality elsewhere already.") + def test_save_load_optional_components(self): + pass + + +class StableDiffusionXLMultiControlNetOneModelPipelineFastTests( + PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase +): + pipeline_class = StableDiffusionXLControlNetPipeline + params = TEXT_TO_IMAGE_PARAMS + batch_params = TEXT_TO_IMAGE_BATCH_PARAMS + image_params = frozenset([]) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess + + supports_dduf = False + + def get_dummy_components(self): + torch.manual_seed(0) + unet = UNet2DConditionModel( + block_out_channels=(32, 64), + layers_per_block=2, + sample_size=32, + in_channels=4, + out_channels=4, + down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), + up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), + # SD2-specific config below + attention_head_dim=(2, 4), + use_linear_projection=True, + addition_embed_type="text_time", + addition_time_embed_dim=8, + transformer_layers_per_block=(1, 2), + projection_class_embeddings_input_dim=80, # 6 * 8 + 32 + cross_attention_dim=64, + ) + torch.manual_seed(0) + + def init_weights(m): + if isinstance(m, torch.nn.Conv2d): + torch.nn.init.normal_(m.weight) + m.bias.data.fill_(1.0) + + controlnet = ControlNetModel( + block_out_channels=(32, 64), + layers_per_block=2, + in_channels=4, + down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), + conditioning_embedding_out_channels=(16, 32), + # SD2-specific config below + attention_head_dim=(2, 4), + use_linear_projection=True, + addition_embed_type="text_time", + addition_time_embed_dim=8, + transformer_layers_per_block=(1, 2), + projection_class_embeddings_input_dim=80, # 6 * 8 + 32 + cross_attention_dim=64, + ) + controlnet.controlnet_down_blocks.apply(init_weights) + + torch.manual_seed(0) + scheduler = EulerDiscreteScheduler( + beta_start=0.00085, + beta_end=0.012, + steps_offset=1, + beta_schedule="scaled_linear", + timestep_spacing="leading", + ) + torch.manual_seed(0) + vae = AutoencoderKL( + block_out_channels=[32, 64], + in_channels=3, + out_channels=3, + down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], + up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], + latent_channels=4, + ) + torch.manual_seed(0) + text_encoder_config = CLIPTextConfig( + bos_token_id=0, + eos_token_id=2, + hidden_size=32, + intermediate_size=37, + layer_norm_eps=1e-05, + num_attention_heads=4, + num_hidden_layers=5, + pad_token_id=1, + vocab_size=1000, + # SD2-specific config below + hidden_act="gelu", + projection_dim=32, + ) + text_encoder = CLIPTextModel(text_encoder_config) + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + + text_encoder_2 = CLIPTextModelWithProjection(text_encoder_config) + tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + + controlnet = MultiControlNetModel([controlnet]) + + components = { + "unet": unet, + "controlnet": controlnet, + "scheduler": scheduler, + "vae": vae, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + "text_encoder_2": text_encoder_2, + "tokenizer_2": tokenizer_2, + "feature_extractor": None, + "image_encoder": None, + } + return components + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + + controlnet_embedder_scale_factor = 2 + images = [ + randn_tensor( + (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), + generator=generator, + device=torch.device(device), + ), + ] + + inputs = { + "prompt": "A painting of a squirrel eating a burger", + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 6.0, + "output_type": "np", + "image": images, + } + + return inputs + + def test_control_guidance_switch(self): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.to(torch_device) + + scale = 10.0 + steps = 4 + + inputs = self.get_dummy_inputs(torch_device) + inputs["num_inference_steps"] = steps + inputs["controlnet_conditioning_scale"] = scale + output_1 = pipe(**inputs)[0] + + inputs = self.get_dummy_inputs(torch_device) + inputs["num_inference_steps"] = steps + inputs["controlnet_conditioning_scale"] = scale + output_2 = pipe(**inputs, control_guidance_start=0.1, control_guidance_end=0.2)[0] + + inputs = self.get_dummy_inputs(torch_device) + inputs["num_inference_steps"] = steps + inputs["controlnet_conditioning_scale"] = scale + output_3 = pipe( + **inputs, + control_guidance_start=[0.1], + control_guidance_end=[0.2], + )[0] + + inputs = self.get_dummy_inputs(torch_device) + inputs["num_inference_steps"] = steps + inputs["controlnet_conditioning_scale"] = scale + output_4 = pipe(**inputs, control_guidance_start=0.4, control_guidance_end=[0.5])[0] + + # make sure that all outputs are different + assert np.sum(np.abs(output_1 - output_2)) > 1e-3 + assert np.sum(np.abs(output_1 - output_3)) > 1e-3 + assert np.sum(np.abs(output_1 - output_4)) > 1e-3 + + def test_attention_slicing_forward_pass(self): + return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3) + + @unittest.skip("We test this functionality elsewhere already.") + def test_save_load_optional_components(self): + pass + + @unittest.skipIf( + torch_device != "cuda" or not is_xformers_available(), + reason="XFormers attention is only available with CUDA and `xformers` installed", + ) + def test_xformers_attention_forwardGenerator_pass(self): + self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3) + + def test_inference_batch_single_identical(self): + self._test_inference_batch_single_identical(expected_max_diff=2e-3) + + def test_negative_conditions(self): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.to(torch_device) + + inputs = self.get_dummy_inputs(torch_device) + image = pipe(**inputs).images + image_slice_without_neg_cond = image[0, -3:, -3:, -1] + + image = pipe( + **inputs, + negative_original_size=(512, 512), + negative_crops_coords_top_left=(0, 0), + negative_target_size=(1024, 1024), + ).images + image_slice_with_neg_cond = image[0, -3:, -3:, -1] + + self.assertTrue(np.abs(image_slice_without_neg_cond - image_slice_with_neg_cond).max() > 1e-2) + + +@slow +@require_torch_accelerator +class ControlNetSDXLPipelineSlowTests(unittest.TestCase): + def setUp(self): + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def test_canny(self): + controlnet = ControlNetModel.from_pretrained("diffusers/controlnet-canny-sdxl-1.0") + + pipe = StableDiffusionXLControlNetPipeline.from_pretrained( + "stabilityai/stable-diffusion-xl-base-1.0", controlnet=controlnet + ) + pipe.enable_sequential_cpu_offload(device=torch_device) + pipe.set_progress_bar_config(disable=None) + + generator = torch.Generator(device="cpu").manual_seed(0) + prompt = "bird" + image = load_image( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" + ) + + images = pipe(prompt, image=image, generator=generator, output_type="np", num_inference_steps=3).images + + assert images[0].shape == (768, 512, 3) + + original_image = images[0, -3:, -3:, -1].flatten() + expected_image = np.array([0.4185, 0.4127, 0.4089, 0.4046, 0.4115, 0.4096, 0.4081, 0.4112, 0.3913]) + assert np.allclose(original_image, expected_image, atol=1e-04) + + def test_depth(self): + controlnet = ControlNetModel.from_pretrained("diffusers/controlnet-depth-sdxl-1.0") + + pipe = StableDiffusionXLControlNetPipeline.from_pretrained( + "stabilityai/stable-diffusion-xl-base-1.0", controlnet=controlnet + ) + pipe.enable_sequential_cpu_offload(device=torch_device) + pipe.set_progress_bar_config(disable=None) + + generator = torch.Generator(device="cpu").manual_seed(0) + prompt = "Stormtrooper's lecture" + image = load_image( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/stormtrooper_depth.png" + ) + + images = pipe(prompt, image=image, generator=generator, output_type="np", num_inference_steps=3).images + + assert images[0].shape == (512, 512, 3) + + original_image = images[0, -3:, -3:, -1].flatten() + expected_image = np.array([0.4399, 0.5112, 0.5478, 0.4314, 0.472, 0.4823, 0.4647, 0.4957, 0.4853]) + assert np.allclose(original_image, expected_image, atol=1e-04) + + +class StableDiffusionSSD1BControlNetPipelineFastTests(StableDiffusionXLControlNetPipelineFastTests): + def test_controlnet_sdxl_guess(self): + device = "cpu" + + components = self.get_dummy_components() + + sd_pipe = self.pipeline_class(**components) + sd_pipe = sd_pipe.to(device) + + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + inputs["guess_mode"] = True + + output = sd_pipe(**inputs) + image_slice = output.images[0, -3:, -3:, -1] + + expected_slice = np.array([0.7212, 0.5890, 0.5491, 0.6425, 0.5970, 0.6091, 0.4418, 0.4556, 0.5032]) + + # make sure that it's equal + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-4 + + def test_ip_adapter(self): + expected_pipe_slice = None + if torch_device == "cpu": + expected_pipe_slice = np.array([0.7212, 0.5890, 0.5491, 0.6425, 0.5970, 0.6091, 0.4418, 0.4556, 0.5032]) + + return super().test_ip_adapter(from_ssd1b=True, expected_pipe_slice=expected_pipe_slice) + + def test_controlnet_sdxl_lcm(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + + components = self.get_dummy_components(time_cond_proj_dim=256) + sd_pipe = StableDiffusionXLControlNetPipeline(**components) + sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.scheduler.config) + sd_pipe = sd_pipe.to(torch_device) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + output = sd_pipe(**inputs) + image = output.images + + image_slice = image[0, -3:, -3:, -1] + + assert image.shape == (1, 64, 64, 3) + expected_slice = np.array([0.6787, 0.5117, 0.5558, 0.6963, 0.6571, 0.5928, 0.4121, 0.5468, 0.5057]) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 + + def test_conditioning_channels(self): + unet = UNet2DConditionModel( + block_out_channels=(32, 64), + layers_per_block=2, + sample_size=32, + in_channels=4, + out_channels=4, + down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), + up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), + mid_block_type="UNetMidBlock2D", + # SD2-specific config below + attention_head_dim=(2, 4), + use_linear_projection=True, + addition_embed_type="text_time", + addition_time_embed_dim=8, + transformer_layers_per_block=(1, 2), + projection_class_embeddings_input_dim=80, # 6 * 8 + 32 + cross_attention_dim=64, + time_cond_proj_dim=None, + ) + + controlnet = ControlNetModel.from_unet(unet, conditioning_channels=4) + assert type(controlnet.mid_block) is UNetMidBlock2D + assert controlnet.conditioning_channels == 4 + + def get_dummy_components(self, time_cond_proj_dim=None): + torch.manual_seed(0) + unet = UNet2DConditionModel( + block_out_channels=(32, 64), + layers_per_block=2, + sample_size=32, + in_channels=4, + out_channels=4, + down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), + up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), + mid_block_type="UNetMidBlock2D", + # SD2-specific config below + attention_head_dim=(2, 4), + use_linear_projection=True, + addition_embed_type="text_time", + addition_time_embed_dim=8, + transformer_layers_per_block=(1, 2), + projection_class_embeddings_input_dim=80, # 6 * 8 + 32 + cross_attention_dim=64, + time_cond_proj_dim=time_cond_proj_dim, + ) + torch.manual_seed(0) + controlnet = ControlNetModel( + block_out_channels=(32, 64), + layers_per_block=2, + in_channels=4, + down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), + conditioning_embedding_out_channels=(16, 32), + mid_block_type="UNetMidBlock2D", + # SD2-specific config below + attention_head_dim=(2, 4), + use_linear_projection=True, + addition_embed_type="text_time", + addition_time_embed_dim=8, + transformer_layers_per_block=(1, 2), + projection_class_embeddings_input_dim=80, # 6 * 8 + 32 + cross_attention_dim=64, + ) + torch.manual_seed(0) + scheduler = EulerDiscreteScheduler( + beta_start=0.00085, + beta_end=0.012, + steps_offset=1, + beta_schedule="scaled_linear", + timestep_spacing="leading", + ) + torch.manual_seed(0) + vae = AutoencoderKL( + block_out_channels=[32, 64], + in_channels=3, + out_channels=3, + down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], + up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], + latent_channels=4, + ) + torch.manual_seed(0) + text_encoder_config = CLIPTextConfig( + bos_token_id=0, + eos_token_id=2, + hidden_size=32, + intermediate_size=37, + layer_norm_eps=1e-05, + num_attention_heads=4, + num_hidden_layers=5, + pad_token_id=1, + vocab_size=1000, + # SD2-specific config below + hidden_act="gelu", + projection_dim=32, + ) + text_encoder = CLIPTextModel(text_encoder_config) + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + + text_encoder_2 = CLIPTextModelWithProjection(text_encoder_config) + tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + + components = { + "unet": unet, + "controlnet": controlnet, + "scheduler": scheduler, + "vae": vae, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + "text_encoder_2": text_encoder_2, + "tokenizer_2": tokenizer_2, + "feature_extractor": None, + "image_encoder": None, + } + return components diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/controlnet/test_controlnet_sdxl_img2img.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/controlnet/test_controlnet_sdxl_img2img.py new file mode 100644 index 0000000000000000000000000000000000000000..bd4a233741e8112ee8294b8f0188c14e022376b9 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/controlnet/test_controlnet_sdxl_img2img.py @@ -0,0 +1,329 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import random +import unittest + +import numpy as np +import torch +from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer + +from diffusers import ( + AutoencoderKL, + ControlNetModel, + EulerDiscreteScheduler, + StableDiffusionXLControlNetImg2ImgPipeline, + UNet2DConditionModel, +) +from diffusers.utils.import_utils import is_xformers_available + +from ...testing_utils import ( + enable_full_determinism, + floats_tensor, + require_torch_accelerator, + torch_device, +) +from ..pipeline_params import ( + IMAGE_TO_IMAGE_IMAGE_PARAMS, + TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, + TEXT_GUIDED_IMAGE_VARIATION_PARAMS, + TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS, +) +from ..test_pipelines_common import ( + IPAdapterTesterMixin, + PipelineKarrasSchedulerTesterMixin, + PipelineLatentTesterMixin, + PipelineTesterMixin, +) + + +enable_full_determinism() + + +class ControlNetPipelineSDXLImg2ImgFastTests( + IPAdapterTesterMixin, + PipelineLatentTesterMixin, + PipelineKarrasSchedulerTesterMixin, + PipelineTesterMixin, + unittest.TestCase, +): + pipeline_class = StableDiffusionXLControlNetImg2ImgPipeline + params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS + required_optional_params = PipelineTesterMixin.required_optional_params - {"latents"} + batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS + image_params = IMAGE_TO_IMAGE_IMAGE_PARAMS + image_latents_params = IMAGE_TO_IMAGE_IMAGE_PARAMS + callback_cfg_params = TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS.union( + {"add_text_embeds", "add_time_ids", "add_neg_time_ids"} + ) + + def get_dummy_components(self, skip_first_text_encoder=False): + torch.manual_seed(0) + unet = UNet2DConditionModel( + block_out_channels=(32, 64), + layers_per_block=2, + sample_size=32, + in_channels=4, + out_channels=4, + down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), + up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), + # SD2-specific config below + attention_head_dim=(2, 4), + use_linear_projection=True, + addition_embed_type="text_time", + addition_time_embed_dim=8, + transformer_layers_per_block=(1, 2), + projection_class_embeddings_input_dim=80, # 6 * 8 + 32 + cross_attention_dim=64 if not skip_first_text_encoder else 32, + ) + torch.manual_seed(0) + controlnet = ControlNetModel( + block_out_channels=(32, 64), + layers_per_block=2, + in_channels=4, + down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), + conditioning_embedding_out_channels=(16, 32), + # SD2-specific config below + attention_head_dim=(2, 4), + use_linear_projection=True, + addition_embed_type="text_time", + addition_time_embed_dim=8, + transformer_layers_per_block=(1, 2), + projection_class_embeddings_input_dim=80, # 6 * 8 + 32 + cross_attention_dim=64, + ) + torch.manual_seed(0) + scheduler = EulerDiscreteScheduler( + beta_start=0.00085, + beta_end=0.012, + steps_offset=1, + beta_schedule="scaled_linear", + timestep_spacing="leading", + ) + torch.manual_seed(0) + vae = AutoencoderKL( + block_out_channels=[32, 64], + in_channels=3, + out_channels=3, + down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], + up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], + latent_channels=4, + ) + torch.manual_seed(0) + text_encoder_config = CLIPTextConfig( + bos_token_id=0, + eos_token_id=2, + hidden_size=32, + intermediate_size=37, + layer_norm_eps=1e-05, + num_attention_heads=4, + num_hidden_layers=5, + pad_token_id=1, + vocab_size=1000, + # SD2-specific config below + hidden_act="gelu", + projection_dim=32, + ) + text_encoder = CLIPTextModel(text_encoder_config) + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + + text_encoder_2 = CLIPTextModelWithProjection(text_encoder_config) + tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + + components = { + "unet": unet, + "controlnet": controlnet, + "scheduler": scheduler, + "vae": vae, + "text_encoder": text_encoder if not skip_first_text_encoder else None, + "tokenizer": tokenizer if not skip_first_text_encoder else None, + "text_encoder_2": text_encoder_2, + "tokenizer_2": tokenizer_2, + "image_encoder": None, + "feature_extractor": None, + } + return components + + def get_dummy_inputs(self, device, seed=0): + controlnet_embedder_scale_factor = 2 + image = floats_tensor( + (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), + rng=random.Random(seed), + ).to(device) + + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + + inputs = { + "prompt": "A painting of a squirrel eating a burger", + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 6.0, + "output_type": "np", + "image": image, + "control_image": image, + } + + return inputs + + def test_ip_adapter(self): + expected_pipe_slice = None + if torch_device == "cpu": + expected_pipe_slice = np.array([0.6276, 0.5271, 0.5205, 0.5393, 0.5774, 0.5872, 0.5456, 0.5415, 0.5354]) + # TODO: update after slices.p + return super().test_ip_adapter(expected_pipe_slice=expected_pipe_slice) + + def test_stable_diffusion_xl_controlnet_img2img(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + sd_pipe = self.pipeline_class(**components) + sd_pipe = sd_pipe.to(device) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + image = sd_pipe(**inputs).images + image_slice = image[0, -3:, -3:, -1] + assert image.shape == (1, 64, 64, 3) + + expected_slice = np.array( + [0.5557202, 0.46418434, 0.46983826, 0.623529, 0.5557242, 0.49262643, 0.6070508, 0.5702978, 0.43777135] + ) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 + + def test_stable_diffusion_xl_controlnet_img2img_guess(self): + device = "cpu" + + components = self.get_dummy_components() + + sd_pipe = self.pipeline_class(**components) + sd_pipe = sd_pipe.to(device) + + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + inputs["guess_mode"] = True + + output = sd_pipe(**inputs) + image_slice = output.images[0, -3:, -3:, -1] + assert output.images.shape == (1, 64, 64, 3) + + expected_slice = np.array( + [0.5557202, 0.46418434, 0.46983826, 0.623529, 0.5557242, 0.49262643, 0.6070508, 0.5702978, 0.43777135] + ) + + # make sure that it's equal + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 + + def test_attention_slicing_forward_pass(self): + return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3) + + @unittest.skipIf( + torch_device != "cuda" or not is_xformers_available(), + reason="XFormers attention is only available with CUDA and `xformers` installed", + ) + def test_xformers_attention_forwardGenerator_pass(self): + self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3) + + def test_inference_batch_single_identical(self): + self._test_inference_batch_single_identical(expected_max_diff=2e-3) + + # TODO(Patrick, Sayak) - skip for now as this requires more refiner tests + def test_save_load_optional_components(self): + pass + + @require_torch_accelerator + def test_stable_diffusion_xl_offloads(self): + pipes = [] + components = self.get_dummy_components() + sd_pipe = self.pipeline_class(**components).to(torch_device) + pipes.append(sd_pipe) + + components = self.get_dummy_components() + sd_pipe = self.pipeline_class(**components) + sd_pipe.enable_model_cpu_offload(device=torch_device) + pipes.append(sd_pipe) + + components = self.get_dummy_components() + sd_pipe = self.pipeline_class(**components) + sd_pipe.enable_sequential_cpu_offload(device=torch_device) + pipes.append(sd_pipe) + + image_slices = [] + for pipe in pipes: + pipe.unet.set_default_attn_processor() + + inputs = self.get_dummy_inputs(torch_device) + image = pipe(**inputs).images + + image_slices.append(image[0, -3:, -3:, -1].flatten()) + + assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3 + assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3 + + def test_stable_diffusion_xl_multi_prompts(self): + components = self.get_dummy_components() + sd_pipe = self.pipeline_class(**components).to(torch_device) + + # forward with single prompt + inputs = self.get_dummy_inputs(torch_device) + output = sd_pipe(**inputs) + image_slice_1 = output.images[0, -3:, -3:, -1] + + # forward with same prompt duplicated + inputs = self.get_dummy_inputs(torch_device) + inputs["prompt_2"] = inputs["prompt"] + output = sd_pipe(**inputs) + image_slice_2 = output.images[0, -3:, -3:, -1] + + # ensure the results are equal + assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4 + + # forward with different prompt + inputs = self.get_dummy_inputs(torch_device) + inputs["prompt_2"] = "different prompt" + output = sd_pipe(**inputs) + image_slice_3 = output.images[0, -3:, -3:, -1] + + # ensure the results are not equal + assert np.abs(image_slice_1.flatten() - image_slice_3.flatten()).max() > 1e-4 + + # manually set a negative_prompt + inputs = self.get_dummy_inputs(torch_device) + inputs["negative_prompt"] = "negative prompt" + output = sd_pipe(**inputs) + image_slice_1 = output.images[0, -3:, -3:, -1] + + # forward with same negative_prompt duplicated + inputs = self.get_dummy_inputs(torch_device) + inputs["negative_prompt"] = "negative prompt" + inputs["negative_prompt_2"] = inputs["negative_prompt"] + output = sd_pipe(**inputs) + image_slice_2 = output.images[0, -3:, -3:, -1] + + # ensure the results are equal + assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4 + + # forward with different negative_prompt + inputs = self.get_dummy_inputs(torch_device) + inputs["negative_prompt"] = "negative prompt" + inputs["negative_prompt_2"] = "different negative prompt" + output = sd_pipe(**inputs) + image_slice_3 = output.images[0, -3:, -3:, -1] + + # ensure the results are not equal + assert np.abs(image_slice_1.flatten() - image_slice_3.flatten()).max() > 1e-4 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/controlnet_flux/__init__.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/controlnet_flux/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/controlnet_flux/test_controlnet_flux.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/controlnet_flux/test_controlnet_flux.py new file mode 100644 index 0000000000000000000000000000000000000000..0895d9de35810d5132788006d2cb209e85910f19 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/controlnet_flux/test_controlnet_flux.py @@ -0,0 +1,275 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc and The InstantX Team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import gc +import unittest + +import numpy as np +import torch +from huggingface_hub import hf_hub_download +from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer, T5EncoderModel, T5TokenizerFast + +from diffusers import ( + AutoencoderKL, + FlowMatchEulerDiscreteScheduler, + FluxControlNetPipeline, + FluxTransformer2DModel, +) +from diffusers.models import FluxControlNetModel +from diffusers.utils import load_image +from diffusers.utils.torch_utils import randn_tensor + +from ...testing_utils import ( + backend_empty_cache, + enable_full_determinism, + nightly, + numpy_cosine_similarity_distance, + require_big_accelerator, + torch_device, +) +from ..test_pipelines_common import FluxIPAdapterTesterMixin, PipelineTesterMixin + + +enable_full_determinism() + + +class FluxControlNetPipelineFastTests(unittest.TestCase, PipelineTesterMixin, FluxIPAdapterTesterMixin): + pipeline_class = FluxControlNetPipeline + + params = frozenset(["prompt", "height", "width", "guidance_scale", "prompt_embeds", "pooled_prompt_embeds"]) + batch_params = frozenset(["prompt"]) + test_layerwise_casting = True + test_group_offloading = True + + def get_dummy_components(self): + torch.manual_seed(0) + transformer = FluxTransformer2DModel( + patch_size=1, + in_channels=16, + num_layers=1, + num_single_layers=1, + attention_head_dim=16, + num_attention_heads=2, + joint_attention_dim=32, + pooled_projection_dim=32, + axes_dims_rope=[4, 4, 8], + ) + + torch.manual_seed(0) + controlnet = FluxControlNetModel( + patch_size=1, + in_channels=16, + num_layers=1, + num_single_layers=1, + attention_head_dim=16, + num_attention_heads=2, + joint_attention_dim=32, + pooled_projection_dim=32, + axes_dims_rope=[4, 4, 8], + ) + + clip_text_encoder_config = CLIPTextConfig( + bos_token_id=0, + eos_token_id=2, + hidden_size=32, + intermediate_size=37, + layer_norm_eps=1e-05, + num_attention_heads=4, + num_hidden_layers=5, + pad_token_id=1, + vocab_size=1000, + hidden_act="gelu", + projection_dim=32, + ) + torch.manual_seed(0) + text_encoder = CLIPTextModel(clip_text_encoder_config) + + torch.manual_seed(0) + text_encoder_2 = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5") + + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + tokenizer_2 = T5TokenizerFast.from_pretrained("hf-internal-testing/tiny-random-t5") + + torch.manual_seed(0) + vae = AutoencoderKL( + sample_size=32, + in_channels=3, + out_channels=3, + block_out_channels=(4,), + layers_per_block=1, + latent_channels=4, + norm_num_groups=1, + use_quant_conv=False, + use_post_quant_conv=False, + shift_factor=0.0609, + scaling_factor=1.5035, + ) + + scheduler = FlowMatchEulerDiscreteScheduler() + + return { + "scheduler": scheduler, + "text_encoder": text_encoder, + "text_encoder_2": text_encoder_2, + "tokenizer": tokenizer, + "tokenizer_2": tokenizer_2, + "transformer": transformer, + "vae": vae, + "controlnet": controlnet, + "image_encoder": None, + "feature_extractor": None, + } + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device="cpu").manual_seed(seed) + + control_image = randn_tensor( + (1, 3, 32, 32), + generator=generator, + device=torch.device(device), + dtype=torch.float16, + ) + + controlnet_conditioning_scale = 0.5 + + inputs = { + "prompt": "A painting of a squirrel eating a burger", + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 3.5, + "output_type": "np", + "control_image": control_image, + "controlnet_conditioning_scale": controlnet_conditioning_scale, + } + + return inputs + + def test_controlnet_flux(self): + components = self.get_dummy_components() + flux_pipe = FluxControlNetPipeline(**components) + flux_pipe = flux_pipe.to(torch_device, dtype=torch.float16) + flux_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(torch_device) + output = flux_pipe(**inputs) + image = output.images + + image_slice = image[0, -3:, -3:, -1] + + assert image.shape == (1, 32, 32, 3) + + expected_slice = np.array( + [0.47387695, 0.63134766, 0.5605469, 0.61621094, 0.7207031, 0.7089844, 0.70410156, 0.6113281, 0.64160156] + ) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2, ( + f"Expected: {expected_slice}, got: {image_slice.flatten()}" + ) + + @unittest.skip("xFormersAttnProcessor does not work with SD3 Joint Attention") + def test_xformers_attention_forwardGenerator_pass(self): + pass + + def test_flux_image_output_shape(self): + pipe = self.pipeline_class(**self.get_dummy_components()).to(torch_device) + inputs = self.get_dummy_inputs(torch_device) + + height_width_pairs = [(32, 32), (72, 56)] + for height, width in height_width_pairs: + expected_height = height - height % (pipe.vae_scale_factor * 2) + expected_width = width - width % (pipe.vae_scale_factor * 2) + + inputs.update( + { + "control_image": randn_tensor( + (1, 3, height, width), + device=torch_device, + dtype=torch.float16, + ) + } + ) + image = pipe(**inputs).images[0] + output_height, output_width, _ = image.shape + assert (output_height, output_width) == (expected_height, expected_width) + + +@nightly +@require_big_accelerator +class FluxControlNetPipelineSlowTests(unittest.TestCase): + pipeline_class = FluxControlNetPipeline + + def setUp(self): + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def test_canny(self): + controlnet = FluxControlNetModel.from_pretrained( + "InstantX/FLUX.1-dev-Controlnet-Canny-alpha", torch_dtype=torch.bfloat16 + ) + pipe = FluxControlNetPipeline.from_pretrained( + "black-forest-labs/FLUX.1-dev", + text_encoder=None, + text_encoder_2=None, + controlnet=controlnet, + torch_dtype=torch.bfloat16, + ).to(torch_device) + pipe.set_progress_bar_config(disable=None) + + generator = torch.Generator(device="cpu").manual_seed(0) + control_image = load_image( + "https://huggingface.co/InstantX/FLUX.1-dev-Controlnet-Canny-alpha/resolve/main/canny.jpg" + ).resize((512, 512)) + + prompt_embeds = torch.load( + hf_hub_download(repo_id="diffusers/test-slices", repo_type="dataset", filename="flux/prompt_embeds.pt") + ).to(torch_device) + pooled_prompt_embeds = torch.load( + hf_hub_download( + repo_id="diffusers/test-slices", repo_type="dataset", filename="flux/pooled_prompt_embeds.pt" + ) + ).to(torch_device) + + output = pipe( + prompt_embeds=prompt_embeds, + pooled_prompt_embeds=pooled_prompt_embeds, + control_image=control_image, + controlnet_conditioning_scale=0.6, + num_inference_steps=2, + guidance_scale=3.5, + max_sequence_length=256, + output_type="np", + height=512, + width=512, + generator=generator, + ) + + image = output.images[0] + + assert image.shape == (512, 512, 3) + + original_image = image[-3:, -3:, -1].flatten() + + expected_image = np.array([0.2734, 0.2852, 0.2852, 0.2734, 0.2754, 0.2891, 0.2617, 0.2637, 0.2773]) + + assert numpy_cosine_similarity_distance(original_image.flatten(), expected_image) < 1e-2 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/controlnet_flux/test_controlnet_flux_img2img.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/controlnet_flux/test_controlnet_flux_img2img.py new file mode 100644 index 0000000000000000000000000000000000000000..3d8378a5786d658cad907fbd4f13f8f6f3e67bc7 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/controlnet_flux/test_controlnet_flux_img2img.py @@ -0,0 +1,219 @@ +import unittest + +import numpy as np +import torch +from transformers import AutoTokenizer, CLIPTextConfig, CLIPTextModel, CLIPTokenizer, T5EncoderModel + +from diffusers import ( + AutoencoderKL, + FlowMatchEulerDiscreteScheduler, + FluxControlNetImg2ImgPipeline, + FluxControlNetModel, + FluxTransformer2DModel, +) +from diffusers.utils.torch_utils import randn_tensor + +from ...testing_utils import ( + torch_device, +) +from ..test_pipelines_common import PipelineTesterMixin, check_qkv_fused_layers_exist + + +class FluxControlNetImg2ImgPipelineFastTests(unittest.TestCase, PipelineTesterMixin): + pipeline_class = FluxControlNetImg2ImgPipeline + params = frozenset( + [ + "prompt", + "image", + "control_image", + "height", + "width", + "strength", + "guidance_scale", + "controlnet_conditioning_scale", + "prompt_embeds", + "pooled_prompt_embeds", + ] + ) + batch_params = frozenset(["prompt", "image", "control_image"]) + + test_xformers_attention = False + + def get_dummy_components(self): + torch.manual_seed(0) + transformer = FluxTransformer2DModel( + patch_size=1, + in_channels=4, + num_layers=1, + num_single_layers=1, + attention_head_dim=16, + num_attention_heads=2, + joint_attention_dim=32, + pooled_projection_dim=32, + axes_dims_rope=[4, 4, 8], + ) + clip_text_encoder_config = CLIPTextConfig( + bos_token_id=0, + eos_token_id=2, + hidden_size=32, + intermediate_size=37, + layer_norm_eps=1e-05, + num_attention_heads=4, + num_hidden_layers=5, + pad_token_id=1, + vocab_size=1000, + hidden_act="gelu", + projection_dim=32, + ) + + torch.manual_seed(0) + text_encoder = CLIPTextModel(clip_text_encoder_config) + + torch.manual_seed(0) + text_encoder_2 = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5") + + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + tokenizer_2 = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") + + torch.manual_seed(0) + vae = AutoencoderKL( + sample_size=32, + in_channels=3, + out_channels=3, + block_out_channels=(4,), + layers_per_block=1, + latent_channels=1, + norm_num_groups=1, + use_quant_conv=False, + use_post_quant_conv=False, + shift_factor=0.0609, + scaling_factor=1.5035, + ) + + torch.manual_seed(0) + controlnet = FluxControlNetModel( + in_channels=4, + num_layers=1, + num_single_layers=1, + attention_head_dim=16, + num_attention_heads=2, + joint_attention_dim=32, + pooled_projection_dim=32, + axes_dims_rope=[4, 4, 8], + ) + + scheduler = FlowMatchEulerDiscreteScheduler() + + return { + "scheduler": scheduler, + "text_encoder": text_encoder, + "text_encoder_2": text_encoder_2, + "tokenizer": tokenizer, + "tokenizer_2": tokenizer_2, + "transformer": transformer, + "vae": vae, + "controlnet": controlnet, + } + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device="cpu").manual_seed(seed) + + image = torch.randn(1, 3, 32, 32).to(device) + control_image = torch.randn(1, 3, 32, 32).to(device) + + inputs = { + "prompt": "A painting of a squirrel eating a burger", + "image": image, + "control_image": control_image, + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 5.0, + "controlnet_conditioning_scale": 1.0, + "strength": 0.8, + "height": 32, + "width": 32, + "max_sequence_length": 48, + "output_type": "np", + } + return inputs + + def test_flux_controlnet_different_prompts(self): + pipe = self.pipeline_class(**self.get_dummy_components()).to(torch_device) + + inputs = self.get_dummy_inputs(torch_device) + output_same_prompt = pipe(**inputs).images[0] + + inputs = self.get_dummy_inputs(torch_device) + inputs["prompt_2"] = "a different prompt" + output_different_prompts = pipe(**inputs).images[0] + + max_diff = np.abs(output_same_prompt - output_different_prompts).max() + + assert max_diff > 1e-6 + + def test_fused_qkv_projections(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe = pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + image = pipe(**inputs).images + original_image_slice = image[0, -3:, -3:, -1] + + pipe.transformer.fuse_qkv_projections() + self.assertTrue( + check_qkv_fused_layers_exist(pipe.transformer, ["to_qkv"]), + ("Something wrong with the fused attention layers. Expected all the attention projections to be fused."), + ) + + inputs = self.get_dummy_inputs(device) + image = pipe(**inputs).images + image_slice_fused = image[0, -3:, -3:, -1] + + pipe.transformer.unfuse_qkv_projections() + inputs = self.get_dummy_inputs(device) + image = pipe(**inputs).images + image_slice_disabled = image[0, -3:, -3:, -1] + + assert np.allclose(original_image_slice, image_slice_fused, atol=1e-3, rtol=1e-3), ( + "Fusion of QKV projections shouldn't affect the outputs." + ) + assert np.allclose(image_slice_fused, image_slice_disabled, atol=1e-3, rtol=1e-3), ( + "Outputs, with QKV projection fusion enabled, shouldn't change when fused QKV projections are disabled." + ) + assert np.allclose(original_image_slice, image_slice_disabled, atol=1e-2, rtol=1e-2), ( + "Original outputs should match when fused QKV projections are disabled." + ) + + def test_flux_image_output_shape(self): + pipe = self.pipeline_class(**self.get_dummy_components()).to(torch_device) + inputs = self.get_dummy_inputs(torch_device) + + height_width_pairs = [(32, 32), (72, 56)] + for height, width in height_width_pairs: + expected_height = height - height % (pipe.vae_scale_factor * 2) + expected_width = width - width % (pipe.vae_scale_factor * 2) + inputs.update( + { + "control_image": randn_tensor( + (1, 3, height, width), + device=torch_device, + dtype=torch.float16, + ), + "image": randn_tensor( + (1, 3, height, width), + device=torch_device, + dtype=torch.float16, + ), + "height": height, + "width": width, + } + ) + image = pipe(**inputs).images[0] + output_height, output_width, _ = image.shape + assert (output_height, output_width) == (expected_height, expected_width) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/controlnet_flux/test_controlnet_flux_inpaint.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/controlnet_flux/test_controlnet_flux_inpaint.py new file mode 100644 index 0000000000000000000000000000000000000000..3ba475deb8a834619a66328bc520cd5060ce98d9 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/controlnet_flux/test_controlnet_flux_inpaint.py @@ -0,0 +1,226 @@ +import random +import unittest + +import numpy as np +import torch + +# torch_device, # {{ edit_1 }} Removed unused import +from transformers import ( + AutoTokenizer, + CLIPTextConfig, + CLIPTextModel, + CLIPTokenizer, + T5EncoderModel, +) + +from diffusers import ( + AutoencoderKL, + FlowMatchEulerDiscreteScheduler, + FluxControlNetInpaintPipeline, + FluxControlNetModel, + FluxTransformer2DModel, +) +from diffusers.utils.torch_utils import randn_tensor + +from ...testing_utils import ( + enable_full_determinism, + floats_tensor, + torch_device, +) +from ..test_pipelines_common import PipelineTesterMixin + + +enable_full_determinism() + + +class FluxControlNetInpaintPipelineTests(unittest.TestCase, PipelineTesterMixin): + pipeline_class = FluxControlNetInpaintPipeline + params = frozenset( + [ + "prompt", + "height", + "width", + "guidance_scale", + "prompt_embeds", + "pooled_prompt_embeds", + "image", + "mask_image", + "control_image", + "strength", + "num_inference_steps", + "controlnet_conditioning_scale", + ] + ) + batch_params = frozenset(["prompt", "image", "mask_image", "control_image"]) + test_xformers_attention = False + + def get_dummy_components(self): + torch.manual_seed(0) + transformer = FluxTransformer2DModel( + patch_size=1, + in_channels=8, + num_layers=1, + num_single_layers=1, + attention_head_dim=16, + num_attention_heads=2, + joint_attention_dim=32, + pooled_projection_dim=32, + axes_dims_rope=[4, 4, 8], + ) + clip_text_encoder_config = CLIPTextConfig( + bos_token_id=0, + eos_token_id=2, + hidden_size=32, + intermediate_size=37, + layer_norm_eps=1e-05, + num_attention_heads=4, + num_hidden_layers=5, + pad_token_id=1, + vocab_size=1000, + hidden_act="gelu", + projection_dim=32, + ) + + torch.manual_seed(0) + text_encoder = CLIPTextModel(clip_text_encoder_config) + + torch.manual_seed(0) + text_encoder_2 = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5") + + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + tokenizer_2 = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") + + torch.manual_seed(0) + vae = AutoencoderKL( + sample_size=32, + in_channels=3, + out_channels=3, + block_out_channels=(4,), + layers_per_block=1, + latent_channels=2, + norm_num_groups=1, + use_quant_conv=False, + use_post_quant_conv=False, + shift_factor=0.0609, + scaling_factor=1.5035, + ) + + torch.manual_seed(0) + controlnet = FluxControlNetModel( + patch_size=1, + in_channels=8, + num_layers=1, + num_single_layers=1, + attention_head_dim=16, + num_attention_heads=2, + joint_attention_dim=32, + pooled_projection_dim=32, + axes_dims_rope=[4, 4, 8], + ) + + scheduler = FlowMatchEulerDiscreteScheduler() + + return { + "scheduler": scheduler, + "text_encoder": text_encoder, + "text_encoder_2": text_encoder_2, + "tokenizer": tokenizer, + "tokenizer_2": tokenizer_2, + "transformer": transformer, + "vae": vae, + "controlnet": controlnet, + } + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + + image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) + mask_image = torch.ones((1, 1, 32, 32)).to(device) + control_image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) + + inputs = { + "prompt": "A painting of a squirrel eating a burger", + "image": image, + "mask_image": mask_image, + "control_image": control_image, + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 5.0, + "height": 32, + "width": 32, + "max_sequence_length": 48, + "strength": 0.8, + "output_type": "np", + } + return inputs + + def test_flux_controlnet_inpaint_with_num_images_per_prompt(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe = pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + inputs["num_images_per_prompt"] = 2 + output = pipe(**inputs) + images = output.images + + assert images.shape == (2, 32, 32, 3) + + def test_flux_controlnet_inpaint_with_controlnet_conditioning_scale(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe = pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + output_default = pipe(**inputs) + image_default = output_default.images + + inputs["controlnet_conditioning_scale"] = 0.5 + output_scaled = pipe(**inputs) + image_scaled = output_scaled.images + + # Ensure that changing the controlnet_conditioning_scale produces a different output + assert not np.allclose(image_default, image_scaled, atol=0.01) + + def test_attention_slicing_forward_pass(self): + super().test_attention_slicing_forward_pass(expected_max_diff=3e-3) + + def test_inference_batch_single_identical(self): + super().test_inference_batch_single_identical(expected_max_diff=3e-3) + + def test_flux_image_output_shape(self): + pipe = self.pipeline_class(**self.get_dummy_components()).to(torch_device) + inputs = self.get_dummy_inputs(torch_device) + + height_width_pairs = [(32, 32), (72, 56)] + for height, width in height_width_pairs: + expected_height = height - height % (pipe.vae_scale_factor * 2) + expected_width = width - width % (pipe.vae_scale_factor * 2) + + inputs.update( + { + "control_image": randn_tensor( + (1, 3, height, width), + device=torch_device, + dtype=torch.float16, + ), + "image": randn_tensor( + (1, 3, height, width), + device=torch_device, + dtype=torch.float16, + ), + "mask_image": torch.ones((1, 1, height, width)).to(torch_device), + "height": height, + "width": width, + } + ) + image = pipe(**inputs).images[0] + output_height, output_width, _ = image.shape + assert (output_height, output_width) == (expected_height, expected_width) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/controlnet_hunyuandit/__init__.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/controlnet_hunyuandit/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/controlnet_hunyuandit/test_controlnet_hunyuandit.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/controlnet_hunyuandit/test_controlnet_hunyuandit.py new file mode 100644 index 0000000000000000000000000000000000000000..9619843779010b0a5455d14461e9dff19a48c782 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/controlnet_hunyuandit/test_controlnet_hunyuandit.py @@ -0,0 +1,364 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc and Tencent Hunyuan Team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import gc +import unittest + +import numpy as np +import torch +from transformers import AutoTokenizer, BertModel, T5EncoderModel + +from diffusers import ( + AutoencoderKL, + DDPMScheduler, + HunyuanDiT2DModel, + HunyuanDiTControlNetPipeline, +) +from diffusers.models import HunyuanDiT2DControlNetModel, HunyuanDiT2DMultiControlNetModel +from diffusers.utils import load_image +from diffusers.utils.torch_utils import randn_tensor + +from ...testing_utils import ( + backend_empty_cache, + enable_full_determinism, + require_torch_accelerator, + slow, + torch_device, +) +from ..test_pipelines_common import PipelineTesterMixin + + +enable_full_determinism() + + +class HunyuanDiTControlNetPipelineFastTests(unittest.TestCase, PipelineTesterMixin): + pipeline_class = HunyuanDiTControlNetPipeline + params = frozenset( + [ + "prompt", + "height", + "width", + "guidance_scale", + "negative_prompt", + "prompt_embeds", + "negative_prompt_embeds", + ] + ) + batch_params = frozenset(["prompt", "negative_prompt"]) + test_layerwise_casting = True + + def get_dummy_components(self): + torch.manual_seed(0) + transformer = HunyuanDiT2DModel( + sample_size=16, + num_layers=4, + patch_size=2, + attention_head_dim=8, + num_attention_heads=3, + in_channels=4, + cross_attention_dim=32, + cross_attention_dim_t5=32, + pooled_projection_dim=16, + hidden_size=24, + activation_fn="gelu-approximate", + ) + + torch.manual_seed(0) + controlnet = HunyuanDiT2DControlNetModel( + sample_size=16, + transformer_num_layers=4, + patch_size=2, + attention_head_dim=8, + num_attention_heads=3, + in_channels=4, + cross_attention_dim=32, + cross_attention_dim_t5=32, + pooled_projection_dim=16, + hidden_size=24, + activation_fn="gelu-approximate", + ) + + torch.manual_seed(0) + vae = AutoencoderKL() + + scheduler = DDPMScheduler() + text_encoder = BertModel.from_pretrained("hf-internal-testing/tiny-random-BertModel") + tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-BertModel") + text_encoder_2 = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5") + tokenizer_2 = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") + + components = { + "transformer": transformer.eval(), + "vae": vae.eval(), + "scheduler": scheduler, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + "text_encoder_2": text_encoder_2, + "tokenizer_2": tokenizer_2, + "safety_checker": None, + "feature_extractor": None, + "controlnet": controlnet, + } + return components + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device="cpu").manual_seed(seed) + + control_image = randn_tensor( + (1, 3, 16, 16), + generator=generator, + device=torch.device(device), + dtype=torch.float16, + ) + + controlnet_conditioning_scale = 0.5 + + inputs = { + "prompt": "A painting of a squirrel eating a burger", + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 5.0, + "output_type": "np", + "control_image": control_image, + "controlnet_conditioning_scale": controlnet_conditioning_scale, + } + + return inputs + + def test_controlnet_hunyuandit(self): + components = self.get_dummy_components() + pipe = HunyuanDiTControlNetPipeline(**components) + pipe = pipe.to(torch_device, dtype=torch.float16) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(torch_device) + output = pipe(**inputs) + image = output.images + + image_slice = image[0, -3:, -3:, -1] + assert image.shape == (1, 16, 16, 3) + + if torch_device == "xpu": + expected_slice = np.array( + [0.6376953, 0.84375, 0.58691406, 0.48046875, 0.43652344, 0.5517578, 0.54248047, 0.5644531, 0.48217773] + ) + else: + expected_slice = np.array( + [0.6953125, 0.89208984, 0.59375, 0.5078125, 0.5786133, 0.6035156, 0.5839844, 0.53564453, 0.52246094] + ) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2, ( + f"Expected: {expected_slice}, got: {image_slice.flatten()}" + ) + + def test_inference_batch_single_identical(self): + self._test_inference_batch_single_identical( + expected_max_diff=1e-3, + ) + + def test_sequential_cpu_offload_forward_pass(self): + # TODO(YiYi) need to fix later + pass + + def test_sequential_offload_forward_pass_twice(self): + # TODO(YiYi) need to fix later + pass + + def test_save_load_optional_components(self): + # TODO(YiYi) need to fix later + pass + + @unittest.skip( + "Test not supported as `encode_prompt` is called two times separately which deivates from about 99% of the pipelines we have." + ) + def test_encode_prompt_works_in_isolation(self): + pass + + +@slow +@require_torch_accelerator +class HunyuanDiTControlNetPipelineSlowTests(unittest.TestCase): + pipeline_class = HunyuanDiTControlNetPipeline + + def setUp(self): + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def test_canny(self): + controlnet = HunyuanDiT2DControlNetModel.from_pretrained( + "Tencent-Hunyuan/HunyuanDiT-v1.1-ControlNet-Diffusers-Canny", torch_dtype=torch.float16 + ) + pipe = HunyuanDiTControlNetPipeline.from_pretrained( + "Tencent-Hunyuan/HunyuanDiT-v1.1-Diffusers", controlnet=controlnet, torch_dtype=torch.float16 + ) + pipe.enable_model_cpu_offload(device=torch_device) + pipe.set_progress_bar_config(disable=None) + + generator = torch.Generator(device="cpu").manual_seed(0) + prompt = "At night, an ancient Chinese-style lion statue stands in front of the hotel, its eyes gleaming as if guarding the building. The background is the hotel entrance at night, with a close-up, eye-level, and centered composition. This photo presents a realistic photographic style, embodies Chinese sculpture culture, and reveals a mysterious atmosphere." + n_prompt = "" + control_image = load_image( + "https://huggingface.co/Tencent-Hunyuan/HunyuanDiT-v1.1-ControlNet-Diffusers-Canny/resolve/main/canny.jpg?download=true" + ) + + output = pipe( + prompt, + negative_prompt=n_prompt, + control_image=control_image, + controlnet_conditioning_scale=0.5, + guidance_scale=5.0, + num_inference_steps=2, + output_type="np", + generator=generator, + ) + image = output.images[0] + + assert image.shape == (1024, 1024, 3) + + original_image = image[-3:, -3:, -1].flatten() + + expected_image = np.array( + [0.43652344, 0.4399414, 0.44921875, 0.45043945, 0.45703125, 0.44873047, 0.43579102, 0.44018555, 0.42578125] + ) + + assert np.abs(original_image.flatten() - expected_image).max() < 1e-2 + + def test_pose(self): + controlnet = HunyuanDiT2DControlNetModel.from_pretrained( + "Tencent-Hunyuan/HunyuanDiT-v1.1-ControlNet-Diffusers-Pose", torch_dtype=torch.float16 + ) + pipe = HunyuanDiTControlNetPipeline.from_pretrained( + "Tencent-Hunyuan/HunyuanDiT-v1.1-Diffusers", controlnet=controlnet, torch_dtype=torch.float16 + ) + pipe.enable_model_cpu_offload(device=torch_device) + pipe.set_progress_bar_config(disable=None) + + generator = torch.Generator(device="cpu").manual_seed(0) + prompt = "An Asian woman, dressed in a green top, wearing a purple headscarf and a purple scarf, stands in front of a blackboard. The background is the blackboard. The photo is presented in a close-up, eye-level, and centered composition, adopting a realistic photographic style" + n_prompt = "" + control_image = load_image( + "https://huggingface.co/Tencent-Hunyuan/HunyuanDiT-v1.1-ControlNet-Diffusers-Pose/resolve/main/pose.jpg?download=true" + ) + + output = pipe( + prompt, + negative_prompt=n_prompt, + control_image=control_image, + controlnet_conditioning_scale=0.5, + guidance_scale=5.0, + num_inference_steps=2, + output_type="np", + generator=generator, + ) + image = output.images[0] + + assert image.shape == (1024, 1024, 3) + + original_image = image[-3:, -3:, -1].flatten() + + expected_image = np.array( + [0.4091797, 0.4177246, 0.39526367, 0.4194336, 0.40356445, 0.3857422, 0.39208984, 0.40429688, 0.37451172] + ) + + assert np.abs(original_image.flatten() - expected_image).max() < 1e-2 + + def test_depth(self): + controlnet = HunyuanDiT2DControlNetModel.from_pretrained( + "Tencent-Hunyuan/HunyuanDiT-v1.1-ControlNet-Diffusers-Depth", torch_dtype=torch.float16 + ) + pipe = HunyuanDiTControlNetPipeline.from_pretrained( + "Tencent-Hunyuan/HunyuanDiT-v1.1-Diffusers", controlnet=controlnet, torch_dtype=torch.float16 + ) + pipe.enable_model_cpu_offload(device=torch_device) + pipe.set_progress_bar_config(disable=None) + + generator = torch.Generator(device="cpu").manual_seed(0) + prompt = "In the dense forest, a black and white panda sits quietly in green trees and red flowers, surrounded by mountains, rivers, and the ocean. The background is the forest in a bright environment." + n_prompt = "" + control_image = load_image( + "https://huggingface.co/Tencent-Hunyuan/HunyuanDiT-v1.1-ControlNet-Diffusers-Depth/resolve/main/depth.jpg?download=true" + ) + + output = pipe( + prompt, + negative_prompt=n_prompt, + control_image=control_image, + controlnet_conditioning_scale=0.5, + guidance_scale=5.0, + num_inference_steps=2, + output_type="np", + generator=generator, + ) + image = output.images[0] + + assert image.shape == (1024, 1024, 3) + + original_image = image[-3:, -3:, -1].flatten() + + expected_image = np.array( + [0.31982422, 0.32177734, 0.30126953, 0.3190918, 0.3100586, 0.31396484, 0.3232422, 0.33544922, 0.30810547] + ) + + assert np.abs(original_image.flatten() - expected_image).max() < 1e-2 + + def test_multi_controlnet(self): + controlnet = HunyuanDiT2DControlNetModel.from_pretrained( + "Tencent-Hunyuan/HunyuanDiT-v1.1-ControlNet-Diffusers-Canny", torch_dtype=torch.float16 + ) + controlnet = HunyuanDiT2DMultiControlNetModel([controlnet, controlnet]) + + pipe = HunyuanDiTControlNetPipeline.from_pretrained( + "Tencent-Hunyuan/HunyuanDiT-v1.1-Diffusers", controlnet=controlnet, torch_dtype=torch.float16 + ) + pipe.enable_model_cpu_offload(device=torch_device) + pipe.set_progress_bar_config(disable=None) + + generator = torch.Generator(device="cpu").manual_seed(0) + prompt = "At night, an ancient Chinese-style lion statue stands in front of the hotel, its eyes gleaming as if guarding the building. The background is the hotel entrance at night, with a close-up, eye-level, and centered composition. This photo presents a realistic photographic style, embodies Chinese sculpture culture, and reveals a mysterious atmosphere." + n_prompt = "" + control_image = load_image( + "https://huggingface.co/Tencent-Hunyuan/HunyuanDiT-v1.1-ControlNet-Diffusers-Canny/resolve/main/canny.jpg?download=true" + ) + + output = pipe( + prompt, + negative_prompt=n_prompt, + control_image=[control_image, control_image], + controlnet_conditioning_scale=[0.25, 0.25], + guidance_scale=5.0, + num_inference_steps=2, + output_type="np", + generator=generator, + ) + image = output.images[0] + + assert image.shape == (1024, 1024, 3) + + original_image = image[-3:, -3:, -1].flatten() + + expected_image = np.array( + [0.43652344, 0.44018555, 0.4494629, 0.44995117, 0.45654297, 0.44848633, 0.43603516, 0.4404297, 0.42626953] + ) + + assert np.abs(original_image.flatten() - expected_image).max() < 1e-2 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/controlnet_sd3/__init__.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/controlnet_sd3/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/controlnet_sd3/test_controlnet_inpaint_sd3.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/controlnet_sd3/test_controlnet_inpaint_sd3.py new file mode 100644 index 0000000000000000000000000000000000000000..34c34b7a2ce744d8f4d165b281f30f775318c1d7 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/controlnet_sd3/test_controlnet_inpaint_sd3.py @@ -0,0 +1,203 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import numpy as np +import torch +from transformers import AutoTokenizer, CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer, T5EncoderModel + +from diffusers import ( + AutoencoderKL, + FlowMatchEulerDiscreteScheduler, + SD3Transformer2DModel, + StableDiffusion3ControlNetInpaintingPipeline, +) +from diffusers.models import SD3ControlNetModel +from diffusers.utils.torch_utils import randn_tensor + +from ...testing_utils import ( + enable_full_determinism, + torch_device, +) +from ..test_pipelines_common import PipelineTesterMixin + + +enable_full_determinism() + + +class StableDiffusion3ControlInpaintNetPipelineFastTests(unittest.TestCase, PipelineTesterMixin): + pipeline_class = StableDiffusion3ControlNetInpaintingPipeline + params = frozenset( + [ + "prompt", + "height", + "width", + "guidance_scale", + "negative_prompt", + "prompt_embeds", + "negative_prompt_embeds", + ] + ) + batch_params = frozenset(["prompt", "negative_prompt"]) + + def get_dummy_components(self): + torch.manual_seed(0) + transformer = SD3Transformer2DModel( + sample_size=32, + patch_size=1, + in_channels=8, + num_layers=4, + attention_head_dim=8, + num_attention_heads=4, + joint_attention_dim=32, + caption_projection_dim=32, + pooled_projection_dim=64, + out_channels=8, + ) + + torch.manual_seed(0) + controlnet = SD3ControlNetModel( + sample_size=32, + patch_size=1, + in_channels=8, + num_layers=1, + attention_head_dim=8, + num_attention_heads=4, + joint_attention_dim=32, + caption_projection_dim=32, + pooled_projection_dim=64, + out_channels=8, + extra_conditioning_channels=1, + ) + clip_text_encoder_config = CLIPTextConfig( + bos_token_id=0, + eos_token_id=2, + hidden_size=32, + intermediate_size=37, + layer_norm_eps=1e-05, + num_attention_heads=4, + num_hidden_layers=5, + pad_token_id=1, + vocab_size=1000, + hidden_act="gelu", + projection_dim=32, + ) + + torch.manual_seed(0) + text_encoder = CLIPTextModelWithProjection(clip_text_encoder_config) + + torch.manual_seed(0) + text_encoder_2 = CLIPTextModelWithProjection(clip_text_encoder_config) + + torch.manual_seed(0) + text_encoder_3 = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5") + + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + tokenizer_3 = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") + + torch.manual_seed(0) + vae = AutoencoderKL( + sample_size=32, + in_channels=3, + out_channels=3, + block_out_channels=(4,), + layers_per_block=1, + latent_channels=8, + norm_num_groups=1, + use_quant_conv=False, + use_post_quant_conv=False, + shift_factor=0.0609, + scaling_factor=1.5035, + ) + + scheduler = FlowMatchEulerDiscreteScheduler() + + return { + "scheduler": scheduler, + "text_encoder": text_encoder, + "text_encoder_2": text_encoder_2, + "text_encoder_3": text_encoder_3, + "tokenizer": tokenizer, + "tokenizer_2": tokenizer_2, + "tokenizer_3": tokenizer_3, + "transformer": transformer, + "vae": vae, + "controlnet": controlnet, + "image_encoder": None, + "feature_extractor": None, + } + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device="cpu").manual_seed(seed) + + control_image = randn_tensor( + (1, 3, 32, 32), + generator=generator, + device=torch.device(device), + dtype=torch.float16, + ) + + control_mask = randn_tensor( + (1, 1, 32, 32), + generator=generator, + device=torch.device(device), + dtype=torch.float16, + ) + + controlnet_conditioning_scale = 0.95 + + inputs = { + "prompt": "A painting of a squirrel eating a burger", + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 7.0, + "output_type": "np", + "control_image": control_image, + "control_mask": control_mask, + "controlnet_conditioning_scale": controlnet_conditioning_scale, + } + + return inputs + + def test_controlnet_inpaint_sd3(self): + components = self.get_dummy_components() + sd_pipe = StableDiffusion3ControlNetInpaintingPipeline(**components) + sd_pipe = sd_pipe.to(torch_device, dtype=torch.float16) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(torch_device) + output = sd_pipe(**inputs) + image = output.images + + image_slice = image[0, -3:, -3:, -1] + + assert image.shape == (1, 32, 32, 3) + + expected_slice = np.array( + [0.51708984, 0.7421875, 0.4580078, 0.6435547, 0.65625, 0.43603516, 0.5151367, 0.65722656, 0.60839844] + ) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2, ( + f"Expected: {expected_slice}, got: {image_slice.flatten()}" + ) + + @unittest.skip("xFormersAttnProcessor does not work with SD3 Joint Attention") + def test_xformers_attention_forwardGenerator_pass(self): + pass diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/controlnet_sd3/test_controlnet_sd3.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/controlnet_sd3/test_controlnet_sd3.py new file mode 100644 index 0000000000000000000000000000000000000000..2b6cf8d1e8be32ae911d2aaf3afea927cdfc7f9d --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/controlnet_sd3/test_controlnet_sd3.py @@ -0,0 +1,365 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc and The InstantX Team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import gc +import unittest +from typing import Optional + +import numpy as np +import torch +from transformers import AutoTokenizer, CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer, T5EncoderModel + +from diffusers import ( + AutoencoderKL, + FlowMatchEulerDiscreteScheduler, + SD3Transformer2DModel, + StableDiffusion3ControlNetPipeline, +) +from diffusers.models import SD3ControlNetModel, SD3MultiControlNetModel +from diffusers.utils import load_image +from diffusers.utils.torch_utils import randn_tensor + +from ...testing_utils import ( + backend_empty_cache, + enable_full_determinism, + numpy_cosine_similarity_distance, + require_big_accelerator, + slow, + torch_device, +) +from ..test_pipelines_common import PipelineTesterMixin + + +enable_full_determinism() + + +class StableDiffusion3ControlNetPipelineFastTests(unittest.TestCase, PipelineTesterMixin): + pipeline_class = StableDiffusion3ControlNetPipeline + params = frozenset( + [ + "prompt", + "height", + "width", + "guidance_scale", + "negative_prompt", + "prompt_embeds", + "negative_prompt_embeds", + ] + ) + batch_params = frozenset(["prompt", "negative_prompt"]) + test_layerwise_casting = True + test_group_offloading = True + + def get_dummy_components( + self, num_controlnet_layers: int = 3, qk_norm: Optional[str] = "rms_norm", use_dual_attention=False + ): + torch.manual_seed(0) + transformer = SD3Transformer2DModel( + sample_size=32, + patch_size=1, + in_channels=8, + num_layers=4, + attention_head_dim=8, + num_attention_heads=4, + joint_attention_dim=32, + caption_projection_dim=32, + pooled_projection_dim=64, + out_channels=8, + qk_norm=qk_norm, + dual_attention_layers=() if not use_dual_attention else (0, 1), + ) + + torch.manual_seed(0) + controlnet = SD3ControlNetModel( + sample_size=32, + patch_size=1, + in_channels=8, + num_layers=num_controlnet_layers, + attention_head_dim=8, + num_attention_heads=4, + joint_attention_dim=32, + caption_projection_dim=32, + pooled_projection_dim=64, + out_channels=8, + qk_norm=qk_norm, + dual_attention_layers=() if not use_dual_attention else (0,), + ) + + clip_text_encoder_config = CLIPTextConfig( + bos_token_id=0, + eos_token_id=2, + hidden_size=32, + intermediate_size=37, + layer_norm_eps=1e-05, + num_attention_heads=4, + num_hidden_layers=5, + pad_token_id=1, + vocab_size=1000, + hidden_act="gelu", + projection_dim=32, + ) + + torch.manual_seed(0) + text_encoder = CLIPTextModelWithProjection(clip_text_encoder_config) + + torch.manual_seed(0) + text_encoder_2 = CLIPTextModelWithProjection(clip_text_encoder_config) + + torch.manual_seed(0) + text_encoder_3 = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5") + + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + tokenizer_3 = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") + + torch.manual_seed(0) + vae = AutoencoderKL( + sample_size=32, + in_channels=3, + out_channels=3, + block_out_channels=(4,), + layers_per_block=1, + latent_channels=8, + norm_num_groups=1, + use_quant_conv=False, + use_post_quant_conv=False, + shift_factor=0.0609, + scaling_factor=1.5035, + ) + + scheduler = FlowMatchEulerDiscreteScheduler() + + return { + "scheduler": scheduler, + "text_encoder": text_encoder, + "text_encoder_2": text_encoder_2, + "text_encoder_3": text_encoder_3, + "tokenizer": tokenizer, + "tokenizer_2": tokenizer_2, + "tokenizer_3": tokenizer_3, + "transformer": transformer, + "vae": vae, + "controlnet": controlnet, + "image_encoder": None, + "feature_extractor": None, + } + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device="cpu").manual_seed(seed) + + control_image = randn_tensor( + (1, 3, 32, 32), + generator=generator, + device=torch.device(device), + dtype=torch.float16, + ) + + controlnet_conditioning_scale = 0.5 + + inputs = { + "prompt": "A painting of a squirrel eating a burger", + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 5.0, + "output_type": "np", + "control_image": control_image, + "controlnet_conditioning_scale": controlnet_conditioning_scale, + } + + return inputs + + def run_pipe(self, components, use_sd35=False): + sd_pipe = StableDiffusion3ControlNetPipeline(**components) + sd_pipe = sd_pipe.to(torch_device, dtype=torch.float16) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(torch_device) + output = sd_pipe(**inputs) + image = output.images + + image_slice = image[0, -3:, -3:, -1] + + assert image.shape == (1, 32, 32, 3) + + if not use_sd35: + expected_slice = np.array([0.5767, 0.7100, 0.5981, 0.5674, 0.5952, 0.4102, 0.5093, 0.5044, 0.6030]) + else: + expected_slice = np.array([1.0000, 0.9072, 0.4209, 0.2744, 0.5737, 0.3840, 0.6113, 0.6250, 0.6328]) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2, ( + f"Expected: {expected_slice}, got: {image_slice.flatten()}" + ) + + def test_controlnet_sd3(self): + components = self.get_dummy_components() + self.run_pipe(components) + + def test_controlnet_sd35(self): + components = self.get_dummy_components(num_controlnet_layers=1, qk_norm="rms_norm", use_dual_attention=True) + self.run_pipe(components, use_sd35=True) + + @unittest.skip("xFormersAttnProcessor does not work with SD3 Joint Attention") + def test_xformers_attention_forwardGenerator_pass(self): + pass + + +@slow +@require_big_accelerator +class StableDiffusion3ControlNetPipelineSlowTests(unittest.TestCase): + pipeline_class = StableDiffusion3ControlNetPipeline + + def setUp(self): + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def test_canny(self): + controlnet = SD3ControlNetModel.from_pretrained("InstantX/SD3-Controlnet-Canny", torch_dtype=torch.float16) + pipe = StableDiffusion3ControlNetPipeline.from_pretrained( + "stabilityai/stable-diffusion-3-medium-diffusers", controlnet=controlnet, torch_dtype=torch.float16 + ) + pipe.enable_model_cpu_offload(device=torch_device) + pipe.set_progress_bar_config(disable=None) + + generator = torch.Generator(device="cpu").manual_seed(0) + prompt = "Anime style illustration of a girl wearing a suit. A moon in sky. In the background we see a big rain approaching. text 'InstantX' on image" + n_prompt = "NSFW, nude, naked, porn, ugly" + control_image = load_image("https://huggingface.co/InstantX/SD3-Controlnet-Canny/resolve/main/canny.jpg") + + output = pipe( + prompt, + negative_prompt=n_prompt, + control_image=control_image, + controlnet_conditioning_scale=0.5, + guidance_scale=5.0, + num_inference_steps=2, + output_type="np", + generator=generator, + ) + image = output.images[0] + + assert image.shape == (1024, 1024, 3) + + original_image = image[-3:, -3:, -1].flatten() + + expected_image = np.array([0.7314, 0.7075, 0.6611, 0.7539, 0.7563, 0.6650, 0.6123, 0.7275, 0.7222]) + + assert numpy_cosine_similarity_distance(original_image.flatten(), expected_image) < 1e-2 + + def test_pose(self): + controlnet = SD3ControlNetModel.from_pretrained("InstantX/SD3-Controlnet-Pose", torch_dtype=torch.float16) + pipe = StableDiffusion3ControlNetPipeline.from_pretrained( + "stabilityai/stable-diffusion-3-medium-diffusers", controlnet=controlnet, torch_dtype=torch.float16 + ) + pipe.enable_model_cpu_offload(device=torch_device) + pipe.set_progress_bar_config(disable=None) + + generator = torch.Generator(device="cpu").manual_seed(0) + prompt = 'Anime style illustration of a girl wearing a suit. A moon in sky. In the background we see a big rain approaching. text "InstantX" on image' + n_prompt = "NSFW, nude, naked, porn, ugly" + control_image = load_image("https://huggingface.co/InstantX/SD3-Controlnet-Pose/resolve/main/pose.jpg") + + output = pipe( + prompt, + negative_prompt=n_prompt, + control_image=control_image, + controlnet_conditioning_scale=0.5, + guidance_scale=5.0, + num_inference_steps=2, + output_type="np", + generator=generator, + ) + image = output.images[0] + + assert image.shape == (1024, 1024, 3) + + original_image = image[-3:, -3:, -1].flatten() + expected_image = np.array([0.9048, 0.8740, 0.8936, 0.8516, 0.8799, 0.9360, 0.8379, 0.8408, 0.8652]) + + assert numpy_cosine_similarity_distance(original_image.flatten(), expected_image) < 1e-2 + + def test_tile(self): + controlnet = SD3ControlNetModel.from_pretrained("InstantX/SD3-Controlnet-Tile", torch_dtype=torch.float16) + pipe = StableDiffusion3ControlNetPipeline.from_pretrained( + "stabilityai/stable-diffusion-3-medium-diffusers", controlnet=controlnet, torch_dtype=torch.float16 + ) + pipe.enable_model_cpu_offload(device=torch_device) + pipe.set_progress_bar_config(disable=None) + + generator = torch.Generator(device="cpu").manual_seed(0) + prompt = 'Anime style illustration of a girl wearing a suit. A moon in sky. In the background we see a big rain approaching. text "InstantX" on image' + n_prompt = "NSFW, nude, naked, porn, ugly" + control_image = load_image("https://huggingface.co/InstantX/SD3-Controlnet-Tile/resolve/main/tile.jpg") + + output = pipe( + prompt, + negative_prompt=n_prompt, + control_image=control_image, + controlnet_conditioning_scale=0.5, + guidance_scale=5.0, + num_inference_steps=2, + output_type="np", + generator=generator, + ) + image = output.images[0] + + assert image.shape == (1024, 1024, 3) + + original_image = image[-3:, -3:, -1].flatten() + expected_image = np.array([0.6699, 0.6836, 0.6226, 0.6572, 0.7310, 0.6646, 0.6650, 0.6694, 0.6011]) + + assert numpy_cosine_similarity_distance(original_image.flatten(), expected_image) < 1e-2 + + def test_multi_controlnet(self): + controlnet = SD3ControlNetModel.from_pretrained("InstantX/SD3-Controlnet-Canny", torch_dtype=torch.float16) + controlnet = SD3MultiControlNetModel([controlnet, controlnet]) + + pipe = StableDiffusion3ControlNetPipeline.from_pretrained( + "stabilityai/stable-diffusion-3-medium-diffusers", controlnet=controlnet, torch_dtype=torch.float16 + ) + pipe.enable_model_cpu_offload(device=torch_device) + pipe.set_progress_bar_config(disable=None) + + generator = torch.Generator(device="cpu").manual_seed(0) + prompt = "Anime style illustration of a girl wearing a suit. A moon in sky. In the background we see a big rain approaching. text 'InstantX' on image" + n_prompt = "NSFW, nude, naked, porn, ugly" + control_image = load_image("https://huggingface.co/InstantX/SD3-Controlnet-Canny/resolve/main/canny.jpg") + + output = pipe( + prompt, + negative_prompt=n_prompt, + control_image=[control_image, control_image], + controlnet_conditioning_scale=[0.25, 0.25], + guidance_scale=5.0, + num_inference_steps=2, + output_type="np", + generator=generator, + ) + image = output.images[0] + + assert image.shape == (1024, 1024, 3) + + original_image = image[-3:, -3:, -1].flatten() + expected_image = np.array([0.7207, 0.7041, 0.6543, 0.7500, 0.7490, 0.6592, 0.6001, 0.7168, 0.7231]) + + assert numpy_cosine_similarity_distance(original_image.flatten(), expected_image) < 1e-2 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/cosmos/__init__.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/cosmos/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/cosmos/cosmos_guardrail.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/cosmos/cosmos_guardrail.py new file mode 100644 index 0000000000000000000000000000000000000000..4de14fbaaf9d579c8afe2f1e0b9223360bef1914 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/cosmos/cosmos_guardrail.py @@ -0,0 +1,47 @@ +# Copyright 2025 The HuggingFace Team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# ===== This file is an implementation of a dummy guardrail for the fast tests ===== + +from typing import Union + +import numpy as np +import torch + +from diffusers.configuration_utils import ConfigMixin +from diffusers.models.modeling_utils import ModelMixin + + +class DummyCosmosSafetyChecker(ModelMixin, ConfigMixin): + def __init__(self) -> None: + super().__init__() + + self._dtype = torch.float32 + + def check_text_safety(self, prompt: str) -> bool: + return True + + def check_video_safety(self, frames: np.ndarray) -> np.ndarray: + return frames + + def to(self, device: Union[str, torch.device] = None, dtype: torch.dtype = None) -> None: + self._dtype = dtype + + @property + def device(self) -> torch.device: + return None + + @property + def dtype(self) -> torch.dtype: + return self._dtype diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/cosmos/test_cosmos.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/cosmos/test_cosmos.py new file mode 100644 index 0000000000000000000000000000000000000000..32eea9c98c2cd13d677bdf008f32608f31b37779 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/cosmos/test_cosmos.py @@ -0,0 +1,354 @@ +# Copyright 2025 The HuggingFace Team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +import json +import os +import tempfile +import unittest + +import numpy as np +import torch +from transformers import AutoTokenizer, T5EncoderModel + +from diffusers import AutoencoderKLCosmos, CosmosTextToWorldPipeline, CosmosTransformer3DModel, EDMEulerScheduler + +from ...testing_utils import enable_full_determinism, torch_device +from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS +from ..test_pipelines_common import PipelineTesterMixin, to_np +from .cosmos_guardrail import DummyCosmosSafetyChecker + + +enable_full_determinism() + + +class CosmosTextToWorldPipelineWrapper(CosmosTextToWorldPipeline): + @staticmethod + def from_pretrained(*args, **kwargs): + kwargs["safety_checker"] = DummyCosmosSafetyChecker() + return CosmosTextToWorldPipeline.from_pretrained(*args, **kwargs) + + +class CosmosTextToWorldPipelineFastTests(PipelineTesterMixin, unittest.TestCase): + pipeline_class = CosmosTextToWorldPipelineWrapper + params = TEXT_TO_IMAGE_PARAMS - {"cross_attention_kwargs"} + batch_params = TEXT_TO_IMAGE_BATCH_PARAMS + image_params = TEXT_TO_IMAGE_IMAGE_PARAMS + image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS + required_optional_params = frozenset( + [ + "num_inference_steps", + "generator", + "latents", + "return_dict", + "callback_on_step_end", + "callback_on_step_end_tensor_inputs", + ] + ) + supports_dduf = False + test_xformers_attention = False + test_layerwise_casting = True + test_group_offloading = True + + def get_dummy_components(self): + torch.manual_seed(0) + transformer = CosmosTransformer3DModel( + in_channels=4, + out_channels=4, + num_attention_heads=2, + attention_head_dim=16, + num_layers=2, + mlp_ratio=2, + text_embed_dim=32, + adaln_lora_dim=4, + max_size=(4, 32, 32), + patch_size=(1, 2, 2), + rope_scale=(2.0, 1.0, 1.0), + concat_padding_mask=True, + extra_pos_embed_type="learnable", + ) + + torch.manual_seed(0) + vae = AutoencoderKLCosmos( + in_channels=3, + out_channels=3, + latent_channels=4, + encoder_block_out_channels=(8, 8, 8, 8), + decode_block_out_channels=(8, 8, 8, 8), + attention_resolutions=(8,), + resolution=64, + num_layers=2, + patch_size=4, + patch_type="haar", + scaling_factor=1.0, + spatial_compression_ratio=4, + temporal_compression_ratio=4, + ) + + torch.manual_seed(0) + scheduler = EDMEulerScheduler( + sigma_min=0.002, + sigma_max=80, + sigma_data=0.5, + sigma_schedule="karras", + num_train_timesteps=1000, + prediction_type="epsilon", + rho=7.0, + final_sigmas_type="sigma_min", + ) + text_encoder = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5") + tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") + + components = { + "transformer": transformer, + "vae": vae, + "scheduler": scheduler, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + # We cannot run the Cosmos Guardrail for fast tests due to the large model size + "safety_checker": DummyCosmosSafetyChecker(), + } + return components + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + + inputs = { + "prompt": "dance monkey", + "negative_prompt": "bad quality", + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 3.0, + "height": 32, + "width": 32, + "num_frames": 9, + "max_sequence_length": 16, + "output_type": "pt", + } + + return inputs + + def test_inference(self): + device = "cpu" + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + video = pipe(**inputs).frames + generated_video = video[0] + self.assertEqual(generated_video.shape, (9, 3, 32, 32)) + + # fmt: off + expected_slice = torch.tensor([0.0, 0.9686, 0.8549, 0.8078, 0.0, 0.8431, 1.0, 0.4863, 0.7098, 0.1098, 0.8157, 0.4235, 0.6353, 0.2549, 0.5137, 0.5333]) + # fmt: on + + generated_slice = generated_video.flatten() + generated_slice = torch.cat([generated_slice[:8], generated_slice[-8:]]) + self.assertTrue(torch.allclose(generated_slice, expected_slice, atol=1e-3)) + + def test_callback_inputs(self): + sig = inspect.signature(self.pipeline_class.__call__) + has_callback_tensor_inputs = "callback_on_step_end_tensor_inputs" in sig.parameters + has_callback_step_end = "callback_on_step_end" in sig.parameters + + if not (has_callback_tensor_inputs and has_callback_step_end): + return + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + self.assertTrue( + hasattr(pipe, "_callback_tensor_inputs"), + f" {self.pipeline_class} should have `_callback_tensor_inputs` that defines a list of tensor variables its callback function can use as inputs", + ) + + def callback_inputs_subset(pipe, i, t, callback_kwargs): + # iterate over callback args + for tensor_name, tensor_value in callback_kwargs.items(): + # check that we're only passing in allowed tensor inputs + assert tensor_name in pipe._callback_tensor_inputs + + return callback_kwargs + + def callback_inputs_all(pipe, i, t, callback_kwargs): + for tensor_name in pipe._callback_tensor_inputs: + assert tensor_name in callback_kwargs + + # iterate over callback args + for tensor_name, tensor_value in callback_kwargs.items(): + # check that we're only passing in allowed tensor inputs + assert tensor_name in pipe._callback_tensor_inputs + + return callback_kwargs + + inputs = self.get_dummy_inputs(torch_device) + + # Test passing in a subset + inputs["callback_on_step_end"] = callback_inputs_subset + inputs["callback_on_step_end_tensor_inputs"] = ["latents"] + output = pipe(**inputs)[0] + + # Test passing in a everything + inputs["callback_on_step_end"] = callback_inputs_all + inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs + output = pipe(**inputs)[0] + + def callback_inputs_change_tensor(pipe, i, t, callback_kwargs): + is_last = i == (pipe.num_timesteps - 1) + if is_last: + callback_kwargs["latents"] = torch.zeros_like(callback_kwargs["latents"]) + return callback_kwargs + + inputs["callback_on_step_end"] = callback_inputs_change_tensor + inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs + output = pipe(**inputs)[0] + assert output.abs().sum() < 1e10 + + def test_inference_batch_single_identical(self): + self._test_inference_batch_single_identical(batch_size=3, expected_max_diff=1e-2) + + def test_attention_slicing_forward_pass( + self, test_max_difference=True, test_mean_pixel_difference=True, expected_max_diff=1e-3 + ): + if not self.test_attention_slicing: + return + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + for component in pipe.components.values(): + if hasattr(component, "set_default_attn_processor"): + component.set_default_attn_processor() + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + generator_device = "cpu" + inputs = self.get_dummy_inputs(generator_device) + output_without_slicing = pipe(**inputs)[0] + + pipe.enable_attention_slicing(slice_size=1) + inputs = self.get_dummy_inputs(generator_device) + output_with_slicing1 = pipe(**inputs)[0] + + pipe.enable_attention_slicing(slice_size=2) + inputs = self.get_dummy_inputs(generator_device) + output_with_slicing2 = pipe(**inputs)[0] + + if test_max_difference: + max_diff1 = np.abs(to_np(output_with_slicing1) - to_np(output_without_slicing)).max() + max_diff2 = np.abs(to_np(output_with_slicing2) - to_np(output_without_slicing)).max() + self.assertLess( + max(max_diff1, max_diff2), + expected_max_diff, + "Attention slicing should not affect the inference results", + ) + + def test_vae_tiling(self, expected_diff_max: float = 0.2): + generator_device = "cpu" + components = self.get_dummy_components() + + pipe = self.pipeline_class(**components) + pipe.to("cpu") + pipe.set_progress_bar_config(disable=None) + + # Without tiling + inputs = self.get_dummy_inputs(generator_device) + inputs["height"] = inputs["width"] = 128 + output_without_tiling = pipe(**inputs)[0] + + # With tiling + pipe.vae.enable_tiling( + tile_sample_min_height=96, + tile_sample_min_width=96, + tile_sample_stride_height=64, + tile_sample_stride_width=64, + ) + inputs = self.get_dummy_inputs(generator_device) + inputs["height"] = inputs["width"] = 128 + output_with_tiling = pipe(**inputs)[0] + + self.assertLess( + (to_np(output_without_tiling) - to_np(output_with_tiling)).max(), + expected_diff_max, + "VAE tiling should not affect the inference results", + ) + + def test_save_load_optional_components(self, expected_max_difference=1e-4): + self.pipeline_class._optional_components.remove("safety_checker") + super().test_save_load_optional_components(expected_max_difference=expected_max_difference) + self.pipeline_class._optional_components.append("safety_checker") + + def test_serialization_with_variants(self): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + model_components = [ + component_name + for component_name, component in pipe.components.items() + if isinstance(component, torch.nn.Module) + ] + model_components.remove("safety_checker") + variant = "fp16" + + with tempfile.TemporaryDirectory() as tmpdir: + pipe.save_pretrained(tmpdir, variant=variant, safe_serialization=False) + + with open(f"{tmpdir}/model_index.json", "r") as f: + config = json.load(f) + + for subfolder in os.listdir(tmpdir): + if not os.path.isfile(subfolder) and subfolder in model_components: + folder_path = os.path.join(tmpdir, subfolder) + is_folder = os.path.isdir(folder_path) and subfolder in config + assert is_folder and any(p.split(".")[1].startswith(variant) for p in os.listdir(folder_path)) + + def test_torch_dtype_dict(self): + components = self.get_dummy_components() + if not components: + self.skipTest("No dummy components defined.") + + pipe = self.pipeline_class(**components) + + specified_key = next(iter(components.keys())) + + with tempfile.TemporaryDirectory(ignore_cleanup_errors=True) as tmpdirname: + pipe.save_pretrained(tmpdirname, safe_serialization=False) + torch_dtype_dict = {specified_key: torch.bfloat16, "default": torch.float16} + loaded_pipe = self.pipeline_class.from_pretrained( + tmpdirname, safety_checker=DummyCosmosSafetyChecker(), torch_dtype=torch_dtype_dict + ) + + for name, component in loaded_pipe.components.items(): + if name == "safety_checker": + continue + if isinstance(component, torch.nn.Module) and hasattr(component, "dtype"): + expected_dtype = torch_dtype_dict.get(name, torch_dtype_dict.get("default", torch.float32)) + self.assertEqual( + component.dtype, + expected_dtype, + f"Component '{name}' has dtype {component.dtype} but expected {expected_dtype}", + ) + + @unittest.skip( + "The pipeline should not be runnable without a safety checker. The test creates a pipeline without passing in " + "a safety checker, which makes the pipeline default to the actual Cosmos Guardrail. The Cosmos Guardrail is " + "too large and slow to run on CI." + ) + def test_encode_prompt_works_in_isolation(self): + pass diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/cosmos/test_cosmos2_text2image.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/cosmos/test_cosmos2_text2image.py new file mode 100644 index 0000000000000000000000000000000000000000..8e3c5e4c29f4e1b78e0bc8ae28ea4bcbf17c97f1 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/cosmos/test_cosmos2_text2image.py @@ -0,0 +1,341 @@ +# Copyright 2025 The HuggingFace Team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +import json +import os +import tempfile +import unittest + +import numpy as np +import torch +from transformers import AutoTokenizer, T5EncoderModel + +from diffusers import ( + AutoencoderKLWan, + Cosmos2TextToImagePipeline, + CosmosTransformer3DModel, + FlowMatchEulerDiscreteScheduler, +) + +from ...testing_utils import enable_full_determinism, torch_device +from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS +from ..test_pipelines_common import PipelineTesterMixin, to_np +from .cosmos_guardrail import DummyCosmosSafetyChecker + + +enable_full_determinism() + + +class Cosmos2TextToImagePipelineWrapper(Cosmos2TextToImagePipeline): + @staticmethod + def from_pretrained(*args, **kwargs): + kwargs["safety_checker"] = DummyCosmosSafetyChecker() + return Cosmos2TextToImagePipeline.from_pretrained(*args, **kwargs) + + +class Cosmos2TextToImagePipelineFastTests(PipelineTesterMixin, unittest.TestCase): + pipeline_class = Cosmos2TextToImagePipelineWrapper + params = TEXT_TO_IMAGE_PARAMS - {"cross_attention_kwargs"} + batch_params = TEXT_TO_IMAGE_BATCH_PARAMS + image_params = TEXT_TO_IMAGE_IMAGE_PARAMS + image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS + required_optional_params = frozenset( + [ + "num_inference_steps", + "generator", + "latents", + "return_dict", + "callback_on_step_end", + "callback_on_step_end_tensor_inputs", + ] + ) + supports_dduf = False + test_xformers_attention = False + test_layerwise_casting = True + test_group_offloading = True + + def get_dummy_components(self): + torch.manual_seed(0) + transformer = CosmosTransformer3DModel( + in_channels=16, + out_channels=16, + num_attention_heads=2, + attention_head_dim=16, + num_layers=2, + mlp_ratio=2, + text_embed_dim=32, + adaln_lora_dim=4, + max_size=(4, 32, 32), + patch_size=(1, 2, 2), + rope_scale=(2.0, 1.0, 1.0), + concat_padding_mask=True, + extra_pos_embed_type="learnable", + ) + + torch.manual_seed(0) + vae = AutoencoderKLWan( + base_dim=3, + z_dim=16, + dim_mult=[1, 1, 1, 1], + num_res_blocks=1, + temperal_downsample=[False, True, True], + ) + + torch.manual_seed(0) + scheduler = FlowMatchEulerDiscreteScheduler(use_karras_sigmas=True) + text_encoder = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5") + tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") + + components = { + "transformer": transformer, + "vae": vae, + "scheduler": scheduler, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + # We cannot run the Cosmos Guardrail for fast tests due to the large model size + "safety_checker": DummyCosmosSafetyChecker(), + } + return components + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + + inputs = { + "prompt": "dance monkey", + "negative_prompt": "bad quality", + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 3.0, + "height": 32, + "width": 32, + "max_sequence_length": 16, + "output_type": "pt", + } + + return inputs + + def test_inference(self): + device = "cpu" + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + image = pipe(**inputs).images + generated_image = image[0] + self.assertEqual(generated_image.shape, (3, 32, 32)) + + # fmt: off + expected_slice = torch.tensor([0.451, 0.451, 0.4471, 0.451, 0.451, 0.451, 0.451, 0.451, 0.4784, 0.4784, 0.4784, 0.4784, 0.4784, 0.4902, 0.4588, 0.5333]) + # fmt: on + + generated_slice = generated_image.flatten() + generated_slice = torch.cat([generated_slice[:8], generated_slice[-8:]]) + self.assertTrue(torch.allclose(generated_slice, expected_slice, atol=1e-3)) + + def test_callback_inputs(self): + sig = inspect.signature(self.pipeline_class.__call__) + has_callback_tensor_inputs = "callback_on_step_end_tensor_inputs" in sig.parameters + has_callback_step_end = "callback_on_step_end" in sig.parameters + + if not (has_callback_tensor_inputs and has_callback_step_end): + return + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + self.assertTrue( + hasattr(pipe, "_callback_tensor_inputs"), + f" {self.pipeline_class} should have `_callback_tensor_inputs` that defines a list of tensor variables its callback function can use as inputs", + ) + + def callback_inputs_subset(pipe, i, t, callback_kwargs): + # iterate over callback args + for tensor_name, tensor_value in callback_kwargs.items(): + # check that we're only passing in allowed tensor inputs + assert tensor_name in pipe._callback_tensor_inputs + + return callback_kwargs + + def callback_inputs_all(pipe, i, t, callback_kwargs): + for tensor_name in pipe._callback_tensor_inputs: + assert tensor_name in callback_kwargs + + # iterate over callback args + for tensor_name, tensor_value in callback_kwargs.items(): + # check that we're only passing in allowed tensor inputs + assert tensor_name in pipe._callback_tensor_inputs + + return callback_kwargs + + inputs = self.get_dummy_inputs(torch_device) + + # Test passing in a subset + inputs["callback_on_step_end"] = callback_inputs_subset + inputs["callback_on_step_end_tensor_inputs"] = ["latents"] + output = pipe(**inputs)[0] + + # Test passing in a everything + inputs["callback_on_step_end"] = callback_inputs_all + inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs + output = pipe(**inputs)[0] + + def callback_inputs_change_tensor(pipe, i, t, callback_kwargs): + is_last = i == (pipe.num_timesteps - 1) + if is_last: + callback_kwargs["latents"] = torch.zeros_like(callback_kwargs["latents"]) + return callback_kwargs + + inputs["callback_on_step_end"] = callback_inputs_change_tensor + inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs + output = pipe(**inputs)[0] + assert output.abs().sum() < 1e10 + + def test_inference_batch_single_identical(self): + self._test_inference_batch_single_identical(batch_size=3, expected_max_diff=1e-2) + + def test_attention_slicing_forward_pass( + self, test_max_difference=True, test_mean_pixel_difference=True, expected_max_diff=1e-3 + ): + if not self.test_attention_slicing: + return + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + for component in pipe.components.values(): + if hasattr(component, "set_default_attn_processor"): + component.set_default_attn_processor() + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + generator_device = "cpu" + inputs = self.get_dummy_inputs(generator_device) + output_without_slicing = pipe(**inputs)[0] + + pipe.enable_attention_slicing(slice_size=1) + inputs = self.get_dummy_inputs(generator_device) + output_with_slicing1 = pipe(**inputs)[0] + + pipe.enable_attention_slicing(slice_size=2) + inputs = self.get_dummy_inputs(generator_device) + output_with_slicing2 = pipe(**inputs)[0] + + if test_max_difference: + max_diff1 = np.abs(to_np(output_with_slicing1) - to_np(output_without_slicing)).max() + max_diff2 = np.abs(to_np(output_with_slicing2) - to_np(output_without_slicing)).max() + self.assertLess( + max(max_diff1, max_diff2), + expected_max_diff, + "Attention slicing should not affect the inference results", + ) + + def test_vae_tiling(self, expected_diff_max: float = 0.2): + generator_device = "cpu" + components = self.get_dummy_components() + + pipe = self.pipeline_class(**components) + pipe.to("cpu") + pipe.set_progress_bar_config(disable=None) + + # Without tiling + inputs = self.get_dummy_inputs(generator_device) + inputs["height"] = inputs["width"] = 128 + output_without_tiling = pipe(**inputs)[0] + + # With tiling + pipe.vae.enable_tiling( + tile_sample_min_height=96, + tile_sample_min_width=96, + tile_sample_stride_height=64, + tile_sample_stride_width=64, + ) + inputs = self.get_dummy_inputs(generator_device) + inputs["height"] = inputs["width"] = 128 + output_with_tiling = pipe(**inputs)[0] + + self.assertLess( + (to_np(output_without_tiling) - to_np(output_with_tiling)).max(), + expected_diff_max, + "VAE tiling should not affect the inference results", + ) + + def test_save_load_optional_components(self, expected_max_difference=1e-4): + self.pipeline_class._optional_components.remove("safety_checker") + super().test_save_load_optional_components(expected_max_difference=expected_max_difference) + self.pipeline_class._optional_components.append("safety_checker") + + def test_serialization_with_variants(self): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + model_components = [ + component_name + for component_name, component in pipe.components.items() + if isinstance(component, torch.nn.Module) + ] + model_components.remove("safety_checker") + variant = "fp16" + + with tempfile.TemporaryDirectory() as tmpdir: + pipe.save_pretrained(tmpdir, variant=variant, safe_serialization=False) + + with open(f"{tmpdir}/model_index.json", "r") as f: + config = json.load(f) + + for subfolder in os.listdir(tmpdir): + if not os.path.isfile(subfolder) and subfolder in model_components: + folder_path = os.path.join(tmpdir, subfolder) + is_folder = os.path.isdir(folder_path) and subfolder in config + assert is_folder and any(p.split(".")[1].startswith(variant) for p in os.listdir(folder_path)) + + def test_torch_dtype_dict(self): + components = self.get_dummy_components() + if not components: + self.skipTest("No dummy components defined.") + + pipe = self.pipeline_class(**components) + + specified_key = next(iter(components.keys())) + + with tempfile.TemporaryDirectory(ignore_cleanup_errors=True) as tmpdirname: + pipe.save_pretrained(tmpdirname, safe_serialization=False) + torch_dtype_dict = {specified_key: torch.bfloat16, "default": torch.float16} + loaded_pipe = self.pipeline_class.from_pretrained( + tmpdirname, safety_checker=DummyCosmosSafetyChecker(), torch_dtype=torch_dtype_dict + ) + + for name, component in loaded_pipe.components.items(): + if name == "safety_checker": + continue + if isinstance(component, torch.nn.Module) and hasattr(component, "dtype"): + expected_dtype = torch_dtype_dict.get(name, torch_dtype_dict.get("default", torch.float32)) + self.assertEqual( + component.dtype, + expected_dtype, + f"Component '{name}' has dtype {component.dtype} but expected {expected_dtype}", + ) + + @unittest.skip( + "The pipeline should not be runnable without a safety checker. The test creates a pipeline without passing in " + "a safety checker, which makes the pipeline default to the actual Cosmos Guardrail. The Cosmos Guardrail is " + "too large and slow to run on CI." + ) + def test_encode_prompt_works_in_isolation(self): + pass diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/cosmos/test_cosmos2_video2world.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/cosmos/test_cosmos2_video2world.py new file mode 100644 index 0000000000000000000000000000000000000000..b0ca0e160d984fbee497a588d57cf1803f07ecaa --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/cosmos/test_cosmos2_video2world.py @@ -0,0 +1,355 @@ +# Copyright 2025 The HuggingFace Team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +import json +import os +import tempfile +import unittest + +import numpy as np +import PIL.Image +import torch +from transformers import AutoTokenizer, T5EncoderModel + +from diffusers import ( + AutoencoderKLWan, + Cosmos2VideoToWorldPipeline, + CosmosTransformer3DModel, + FlowMatchEulerDiscreteScheduler, +) + +from ...testing_utils import enable_full_determinism, torch_device +from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS +from ..test_pipelines_common import PipelineTesterMixin, to_np +from .cosmos_guardrail import DummyCosmosSafetyChecker + + +enable_full_determinism() + + +class Cosmos2VideoToWorldPipelineWrapper(Cosmos2VideoToWorldPipeline): + @staticmethod + def from_pretrained(*args, **kwargs): + kwargs["safety_checker"] = DummyCosmosSafetyChecker() + return Cosmos2VideoToWorldPipeline.from_pretrained(*args, **kwargs) + + +class Cosmos2VideoToWorldPipelineFastTests(PipelineTesterMixin, unittest.TestCase): + pipeline_class = Cosmos2VideoToWorldPipelineWrapper + params = TEXT_TO_IMAGE_PARAMS - {"cross_attention_kwargs"} + batch_params = TEXT_TO_IMAGE_BATCH_PARAMS.union({"image", "video"}) + image_params = TEXT_TO_IMAGE_IMAGE_PARAMS + image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS + required_optional_params = frozenset( + [ + "num_inference_steps", + "generator", + "latents", + "return_dict", + "callback_on_step_end", + "callback_on_step_end_tensor_inputs", + ] + ) + supports_dduf = False + test_xformers_attention = False + test_layerwise_casting = True + test_group_offloading = True + + def get_dummy_components(self): + torch.manual_seed(0) + transformer = CosmosTransformer3DModel( + in_channels=16 + 1, + out_channels=16, + num_attention_heads=2, + attention_head_dim=16, + num_layers=2, + mlp_ratio=2, + text_embed_dim=32, + adaln_lora_dim=4, + max_size=(4, 32, 32), + patch_size=(1, 2, 2), + rope_scale=(2.0, 1.0, 1.0), + concat_padding_mask=True, + extra_pos_embed_type="learnable", + ) + + torch.manual_seed(0) + vae = AutoencoderKLWan( + base_dim=3, + z_dim=16, + dim_mult=[1, 1, 1, 1], + num_res_blocks=1, + temperal_downsample=[False, True, True], + ) + + torch.manual_seed(0) + scheduler = FlowMatchEulerDiscreteScheduler(use_karras_sigmas=True) + text_encoder = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5") + tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") + + components = { + "transformer": transformer, + "vae": vae, + "scheduler": scheduler, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + # We cannot run the Cosmos Guardrail for fast tests due to the large model size + "safety_checker": DummyCosmosSafetyChecker(), + } + return components + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + + image_height = 32 + image_width = 32 + image = PIL.Image.new("RGB", (image_width, image_height)) + + inputs = { + "image": image, + "prompt": "dance monkey", + "negative_prompt": "bad quality", + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 3.0, + "height": image_height, + "width": image_width, + "num_frames": 9, + "max_sequence_length": 16, + "output_type": "pt", + } + + return inputs + + def test_inference(self): + device = "cpu" + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + video = pipe(**inputs).frames + generated_video = video[0] + self.assertEqual(generated_video.shape, (9, 3, 32, 32)) + + # fmt: off + expected_slice = torch.tensor([0.451, 0.451, 0.4471, 0.451, 0.451, 0.451, 0.451, 0.451, 0.5098, 0.5137, 0.5176, 0.5098, 0.5255, 0.5412, 0.5098, 0.5059]) + # fmt: on + + generated_slice = generated_video.flatten() + generated_slice = torch.cat([generated_slice[:8], generated_slice[-8:]]) + self.assertTrue(torch.allclose(generated_slice, expected_slice, atol=1e-3)) + + def test_components_function(self): + init_components = self.get_dummy_components() + init_components = {k: v for k, v in init_components.items() if not isinstance(v, (str, int, float))} + pipe = self.pipeline_class(**init_components) + self.assertTrue(hasattr(pipe, "components")) + self.assertTrue(set(pipe.components.keys()) == set(init_components.keys())) + + def test_callback_inputs(self): + sig = inspect.signature(self.pipeline_class.__call__) + has_callback_tensor_inputs = "callback_on_step_end_tensor_inputs" in sig.parameters + has_callback_step_end = "callback_on_step_end" in sig.parameters + + if not (has_callback_tensor_inputs and has_callback_step_end): + return + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + self.assertTrue( + hasattr(pipe, "_callback_tensor_inputs"), + f" {self.pipeline_class} should have `_callback_tensor_inputs` that defines a list of tensor variables its callback function can use as inputs", + ) + + def callback_inputs_subset(pipe, i, t, callback_kwargs): + # iterate over callback args + for tensor_name, tensor_value in callback_kwargs.items(): + # check that we're only passing in allowed tensor inputs + assert tensor_name in pipe._callback_tensor_inputs + + return callback_kwargs + + def callback_inputs_all(pipe, i, t, callback_kwargs): + for tensor_name in pipe._callback_tensor_inputs: + assert tensor_name in callback_kwargs + + # iterate over callback args + for tensor_name, tensor_value in callback_kwargs.items(): + # check that we're only passing in allowed tensor inputs + assert tensor_name in pipe._callback_tensor_inputs + + return callback_kwargs + + inputs = self.get_dummy_inputs(torch_device) + + # Test passing in a subset + inputs["callback_on_step_end"] = callback_inputs_subset + inputs["callback_on_step_end_tensor_inputs"] = ["latents"] + output = pipe(**inputs)[0] + + # Test passing in a everything + inputs["callback_on_step_end"] = callback_inputs_all + inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs + output = pipe(**inputs)[0] + + def callback_inputs_change_tensor(pipe, i, t, callback_kwargs): + is_last = i == (pipe.num_timesteps - 1) + if is_last: + callback_kwargs["latents"] = torch.zeros_like(callback_kwargs["latents"]) + return callback_kwargs + + inputs["callback_on_step_end"] = callback_inputs_change_tensor + inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs + output = pipe(**inputs)[0] + assert output.abs().sum() < 1e10 + + def test_inference_batch_single_identical(self): + self._test_inference_batch_single_identical(batch_size=3, expected_max_diff=1e-2) + + def test_attention_slicing_forward_pass( + self, test_max_difference=True, test_mean_pixel_difference=True, expected_max_diff=1e-3 + ): + if not self.test_attention_slicing: + return + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + for component in pipe.components.values(): + if hasattr(component, "set_default_attn_processor"): + component.set_default_attn_processor() + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + generator_device = "cpu" + inputs = self.get_dummy_inputs(generator_device) + output_without_slicing = pipe(**inputs)[0] + + pipe.enable_attention_slicing(slice_size=1) + inputs = self.get_dummy_inputs(generator_device) + output_with_slicing1 = pipe(**inputs)[0] + + pipe.enable_attention_slicing(slice_size=2) + inputs = self.get_dummy_inputs(generator_device) + output_with_slicing2 = pipe(**inputs)[0] + + if test_max_difference: + max_diff1 = np.abs(to_np(output_with_slicing1) - to_np(output_without_slicing)).max() + max_diff2 = np.abs(to_np(output_with_slicing2) - to_np(output_without_slicing)).max() + self.assertLess( + max(max_diff1, max_diff2), + expected_max_diff, + "Attention slicing should not affect the inference results", + ) + + def test_vae_tiling(self, expected_diff_max: float = 0.2): + generator_device = "cpu" + components = self.get_dummy_components() + + pipe = self.pipeline_class(**components) + pipe.to("cpu") + pipe.set_progress_bar_config(disable=None) + + # Without tiling + inputs = self.get_dummy_inputs(generator_device) + inputs["height"] = inputs["width"] = 128 + output_without_tiling = pipe(**inputs)[0] + + # With tiling + pipe.vae.enable_tiling( + tile_sample_min_height=96, + tile_sample_min_width=96, + tile_sample_stride_height=64, + tile_sample_stride_width=64, + ) + inputs = self.get_dummy_inputs(generator_device) + inputs["height"] = inputs["width"] = 128 + output_with_tiling = pipe(**inputs)[0] + + self.assertLess( + (to_np(output_without_tiling) - to_np(output_with_tiling)).max(), + expected_diff_max, + "VAE tiling should not affect the inference results", + ) + + def test_save_load_optional_components(self, expected_max_difference=1e-4): + self.pipeline_class._optional_components.remove("safety_checker") + super().test_save_load_optional_components(expected_max_difference=expected_max_difference) + self.pipeline_class._optional_components.append("safety_checker") + + def test_serialization_with_variants(self): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + model_components = [ + component_name + for component_name, component in pipe.components.items() + if isinstance(component, torch.nn.Module) + ] + model_components.remove("safety_checker") + variant = "fp16" + + with tempfile.TemporaryDirectory() as tmpdir: + pipe.save_pretrained(tmpdir, variant=variant, safe_serialization=False) + + with open(f"{tmpdir}/model_index.json", "r") as f: + config = json.load(f) + + for subfolder in os.listdir(tmpdir): + if not os.path.isfile(subfolder) and subfolder in model_components: + folder_path = os.path.join(tmpdir, subfolder) + is_folder = os.path.isdir(folder_path) and subfolder in config + assert is_folder and any(p.split(".")[1].startswith(variant) for p in os.listdir(folder_path)) + + def test_torch_dtype_dict(self): + components = self.get_dummy_components() + if not components: + self.skipTest("No dummy components defined.") + + pipe = self.pipeline_class(**components) + + specified_key = next(iter(components.keys())) + + with tempfile.TemporaryDirectory(ignore_cleanup_errors=True) as tmpdirname: + pipe.save_pretrained(tmpdirname, safe_serialization=False) + torch_dtype_dict = {specified_key: torch.bfloat16, "default": torch.float16} + loaded_pipe = self.pipeline_class.from_pretrained( + tmpdirname, safety_checker=DummyCosmosSafetyChecker(), torch_dtype=torch_dtype_dict + ) + + for name, component in loaded_pipe.components.items(): + if name == "safety_checker": + continue + if isinstance(component, torch.nn.Module) and hasattr(component, "dtype"): + expected_dtype = torch_dtype_dict.get(name, torch_dtype_dict.get("default", torch.float32)) + self.assertEqual( + component.dtype, + expected_dtype, + f"Component '{name}' has dtype {component.dtype} but expected {expected_dtype}", + ) + + @unittest.skip( + "The pipeline should not be runnable without a safety checker. The test creates a pipeline without passing in " + "a safety checker, which makes the pipeline default to the actual Cosmos Guardrail. The Cosmos Guardrail is " + "too large and slow to run on CI." + ) + def test_encode_prompt_works_in_isolation(self): + pass diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/cosmos/test_cosmos_video2world.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/cosmos/test_cosmos_video2world.py new file mode 100644 index 0000000000000000000000000000000000000000..2633c2007ac2edc2007906369200590e9c1e24a9 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/cosmos/test_cosmos_video2world.py @@ -0,0 +1,367 @@ +# Copyright 2025 The HuggingFace Team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +import json +import os +import tempfile +import unittest + +import numpy as np +import PIL.Image +import torch +from transformers import AutoTokenizer, T5EncoderModel + +from diffusers import AutoencoderKLCosmos, CosmosTransformer3DModel, CosmosVideoToWorldPipeline, EDMEulerScheduler + +from ...testing_utils import enable_full_determinism, torch_device +from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS +from ..test_pipelines_common import PipelineTesterMixin, to_np +from .cosmos_guardrail import DummyCosmosSafetyChecker + + +enable_full_determinism() + + +class CosmosVideoToWorldPipelineWrapper(CosmosVideoToWorldPipeline): + @staticmethod + def from_pretrained(*args, **kwargs): + kwargs["safety_checker"] = DummyCosmosSafetyChecker() + return CosmosVideoToWorldPipeline.from_pretrained(*args, **kwargs) + + +class CosmosVideoToWorldPipelineFastTests(PipelineTesterMixin, unittest.TestCase): + pipeline_class = CosmosVideoToWorldPipelineWrapper + params = TEXT_TO_IMAGE_PARAMS - {"cross_attention_kwargs"} + batch_params = TEXT_TO_IMAGE_BATCH_PARAMS.union({"image", "video"}) + image_params = TEXT_TO_IMAGE_IMAGE_PARAMS + image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS + required_optional_params = frozenset( + [ + "num_inference_steps", + "generator", + "latents", + "return_dict", + "callback_on_step_end", + "callback_on_step_end_tensor_inputs", + ] + ) + supports_dduf = False + test_xformers_attention = False + test_layerwise_casting = True + test_group_offloading = True + + def get_dummy_components(self): + torch.manual_seed(0) + transformer = CosmosTransformer3DModel( + in_channels=4 + 1, + out_channels=4, + num_attention_heads=2, + attention_head_dim=16, + num_layers=2, + mlp_ratio=2, + text_embed_dim=32, + adaln_lora_dim=4, + max_size=(4, 32, 32), + patch_size=(1, 2, 2), + rope_scale=(2.0, 1.0, 1.0), + concat_padding_mask=True, + extra_pos_embed_type="learnable", + ) + + torch.manual_seed(0) + vae = AutoencoderKLCosmos( + in_channels=3, + out_channels=3, + latent_channels=4, + encoder_block_out_channels=(8, 8, 8, 8), + decode_block_out_channels=(8, 8, 8, 8), + attention_resolutions=(8,), + resolution=64, + num_layers=2, + patch_size=4, + patch_type="haar", + scaling_factor=1.0, + spatial_compression_ratio=4, + temporal_compression_ratio=4, + ) + + torch.manual_seed(0) + scheduler = EDMEulerScheduler( + sigma_min=0.002, + sigma_max=80, + sigma_data=0.5, + sigma_schedule="karras", + num_train_timesteps=1000, + prediction_type="epsilon", + rho=7.0, + final_sigmas_type="sigma_min", + ) + text_encoder = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5") + tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") + + components = { + "transformer": transformer, + "vae": vae, + "scheduler": scheduler, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + # We cannot run the Cosmos Guardrail for fast tests due to the large model size + "safety_checker": DummyCosmosSafetyChecker(), + } + return components + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + + image_height = 32 + image_width = 32 + image = PIL.Image.new("RGB", (image_width, image_height)) + + inputs = { + "image": image, + "prompt": "dance monkey", + "negative_prompt": "bad quality", + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 3.0, + "height": image_height, + "width": image_width, + "num_frames": 9, + "max_sequence_length": 16, + "output_type": "pt", + } + + return inputs + + def test_inference(self): + device = "cpu" + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + video = pipe(**inputs).frames + generated_video = video[0] + self.assertEqual(generated_video.shape, (9, 3, 32, 32)) + + # fmt: off + expected_slice = torch.tensor([0.0, 0.8275, 0.7529, 0.7294, 0.0, 0.6, 1.0, 0.3804, 0.6667, 0.0863, 0.8784, 0.5922, 0.6627, 0.2784, 0.5725, 0.7765]) + # fmt: on + + generated_slice = generated_video.flatten() + generated_slice = torch.cat([generated_slice[:8], generated_slice[-8:]]) + self.assertTrue(torch.allclose(generated_slice, expected_slice, atol=1e-3)) + + def test_components_function(self): + init_components = self.get_dummy_components() + init_components = {k: v for k, v in init_components.items() if not isinstance(v, (str, int, float))} + pipe = self.pipeline_class(**init_components) + self.assertTrue(hasattr(pipe, "components")) + self.assertTrue(set(pipe.components.keys()) == set(init_components.keys())) + + def test_callback_inputs(self): + sig = inspect.signature(self.pipeline_class.__call__) + has_callback_tensor_inputs = "callback_on_step_end_tensor_inputs" in sig.parameters + has_callback_step_end = "callback_on_step_end" in sig.parameters + + if not (has_callback_tensor_inputs and has_callback_step_end): + return + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + self.assertTrue( + hasattr(pipe, "_callback_tensor_inputs"), + f" {self.pipeline_class} should have `_callback_tensor_inputs` that defines a list of tensor variables its callback function can use as inputs", + ) + + def callback_inputs_subset(pipe, i, t, callback_kwargs): + # iterate over callback args + for tensor_name, tensor_value in callback_kwargs.items(): + # check that we're only passing in allowed tensor inputs + assert tensor_name in pipe._callback_tensor_inputs + + return callback_kwargs + + def callback_inputs_all(pipe, i, t, callback_kwargs): + for tensor_name in pipe._callback_tensor_inputs: + assert tensor_name in callback_kwargs + + # iterate over callback args + for tensor_name, tensor_value in callback_kwargs.items(): + # check that we're only passing in allowed tensor inputs + assert tensor_name in pipe._callback_tensor_inputs + + return callback_kwargs + + inputs = self.get_dummy_inputs(torch_device) + + # Test passing in a subset + inputs["callback_on_step_end"] = callback_inputs_subset + inputs["callback_on_step_end_tensor_inputs"] = ["latents"] + output = pipe(**inputs)[0] + + # Test passing in a everything + inputs["callback_on_step_end"] = callback_inputs_all + inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs + output = pipe(**inputs)[0] + + def callback_inputs_change_tensor(pipe, i, t, callback_kwargs): + is_last = i == (pipe.num_timesteps - 1) + if is_last: + callback_kwargs["latents"] = torch.zeros_like(callback_kwargs["latents"]) + return callback_kwargs + + inputs["callback_on_step_end"] = callback_inputs_change_tensor + inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs + output = pipe(**inputs)[0] + assert output.abs().sum() < 1e10 + + def test_inference_batch_single_identical(self): + self._test_inference_batch_single_identical(batch_size=3, expected_max_diff=1e-2) + + def test_attention_slicing_forward_pass( + self, test_max_difference=True, test_mean_pixel_difference=True, expected_max_diff=1e-3 + ): + if not self.test_attention_slicing: + return + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + for component in pipe.components.values(): + if hasattr(component, "set_default_attn_processor"): + component.set_default_attn_processor() + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + generator_device = "cpu" + inputs = self.get_dummy_inputs(generator_device) + output_without_slicing = pipe(**inputs)[0] + + pipe.enable_attention_slicing(slice_size=1) + inputs = self.get_dummy_inputs(generator_device) + output_with_slicing1 = pipe(**inputs)[0] + + pipe.enable_attention_slicing(slice_size=2) + inputs = self.get_dummy_inputs(generator_device) + output_with_slicing2 = pipe(**inputs)[0] + + if test_max_difference: + max_diff1 = np.abs(to_np(output_with_slicing1) - to_np(output_without_slicing)).max() + max_diff2 = np.abs(to_np(output_with_slicing2) - to_np(output_without_slicing)).max() + self.assertLess( + max(max_diff1, max_diff2), + expected_max_diff, + "Attention slicing should not affect the inference results", + ) + + def test_vae_tiling(self, expected_diff_max: float = 0.2): + generator_device = "cpu" + components = self.get_dummy_components() + + pipe = self.pipeline_class(**components) + pipe.to("cpu") + pipe.set_progress_bar_config(disable=None) + + # Without tiling + inputs = self.get_dummy_inputs(generator_device) + inputs["height"] = inputs["width"] = 128 + output_without_tiling = pipe(**inputs)[0] + + # With tiling + pipe.vae.enable_tiling( + tile_sample_min_height=96, + tile_sample_min_width=96, + tile_sample_stride_height=64, + tile_sample_stride_width=64, + ) + inputs = self.get_dummy_inputs(generator_device) + inputs["height"] = inputs["width"] = 128 + output_with_tiling = pipe(**inputs)[0] + + self.assertLess( + (to_np(output_without_tiling) - to_np(output_with_tiling)).max(), + expected_diff_max, + "VAE tiling should not affect the inference results", + ) + + def test_save_load_optional_components(self, expected_max_difference=1e-4): + self.pipeline_class._optional_components.remove("safety_checker") + super().test_save_load_optional_components(expected_max_difference=expected_max_difference) + self.pipeline_class._optional_components.append("safety_checker") + + def test_serialization_with_variants(self): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + model_components = [ + component_name + for component_name, component in pipe.components.items() + if isinstance(component, torch.nn.Module) + ] + model_components.remove("safety_checker") + variant = "fp16" + + with tempfile.TemporaryDirectory() as tmpdir: + pipe.save_pretrained(tmpdir, variant=variant, safe_serialization=False) + + with open(f"{tmpdir}/model_index.json", "r") as f: + config = json.load(f) + + for subfolder in os.listdir(tmpdir): + if not os.path.isfile(subfolder) and subfolder in model_components: + folder_path = os.path.join(tmpdir, subfolder) + is_folder = os.path.isdir(folder_path) and subfolder in config + assert is_folder and any(p.split(".")[1].startswith(variant) for p in os.listdir(folder_path)) + + def test_torch_dtype_dict(self): + components = self.get_dummy_components() + if not components: + self.skipTest("No dummy components defined.") + + pipe = self.pipeline_class(**components) + + specified_key = next(iter(components.keys())) + + with tempfile.TemporaryDirectory(ignore_cleanup_errors=True) as tmpdirname: + pipe.save_pretrained(tmpdirname, safe_serialization=False) + torch_dtype_dict = {specified_key: torch.bfloat16, "default": torch.float16} + loaded_pipe = self.pipeline_class.from_pretrained( + tmpdirname, safety_checker=DummyCosmosSafetyChecker(), torch_dtype=torch_dtype_dict + ) + + for name, component in loaded_pipe.components.items(): + if name == "safety_checker": + continue + if isinstance(component, torch.nn.Module) and hasattr(component, "dtype"): + expected_dtype = torch_dtype_dict.get(name, torch_dtype_dict.get("default", torch.float32)) + self.assertEqual( + component.dtype, + expected_dtype, + f"Component '{name}' has dtype {component.dtype} but expected {expected_dtype}", + ) + + @unittest.skip( + "The pipeline should not be runnable without a safety checker. The test creates a pipeline without passing in " + "a safety checker, which makes the pipeline default to the actual Cosmos Guardrail. The Cosmos Guardrail is " + "too large and slow to run on CI." + ) + def test_encode_prompt_works_in_isolation(self): + pass diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/ddim/__init__.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/ddim/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/ddim/test_ddim.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/ddim/test_ddim.py new file mode 100644 index 0000000000000000000000000000000000000000..731635bea60577fd00eb59a3714874e53ebf9f7f --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/ddim/test_ddim.py @@ -0,0 +1,142 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import numpy as np +import torch + +from diffusers import DDIMPipeline, DDIMScheduler, UNet2DModel + +from ...testing_utils import enable_full_determinism, require_torch_accelerator, slow, torch_device +from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS +from ..test_pipelines_common import PipelineTesterMixin + + +enable_full_determinism() + + +class DDIMPipelineFastTests(PipelineTesterMixin, unittest.TestCase): + pipeline_class = DDIMPipeline + params = UNCONDITIONAL_IMAGE_GENERATION_PARAMS + required_optional_params = PipelineTesterMixin.required_optional_params - { + "num_images_per_prompt", + "latents", + "callback", + "callback_steps", + } + batch_params = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS + + def get_dummy_components(self): + torch.manual_seed(0) + unet = UNet2DModel( + block_out_channels=(4, 8), + layers_per_block=1, + norm_num_groups=4, + sample_size=8, + in_channels=3, + out_channels=3, + down_block_types=("DownBlock2D", "AttnDownBlock2D"), + up_block_types=("AttnUpBlock2D", "UpBlock2D"), + ) + scheduler = DDIMScheduler() + components = {"unet": unet, "scheduler": scheduler} + return components + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + inputs = { + "batch_size": 1, + "generator": generator, + "num_inference_steps": 2, + "output_type": "np", + } + return inputs + + def test_inference(self): + device = "cpu" + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + image = pipe(**inputs).images + image_slice = image[0, -3:, -3:, -1] + + self.assertEqual(image.shape, (1, 8, 8, 3)) + expected_slice = np.array([0.0, 9.979e-01, 0.0, 9.999e-01, 9.986e-01, 9.991e-01, 7.106e-04, 0.0, 0.0]) + max_diff = np.abs(image_slice.flatten() - expected_slice).max() + self.assertLessEqual(max_diff, 1e-3) + + def test_dict_tuple_outputs_equivalent(self): + super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3) + + def test_save_load_local(self): + super().test_save_load_local(expected_max_difference=3e-3) + + def test_save_load_optional_components(self): + super().test_save_load_optional_components(expected_max_difference=3e-3) + + def test_inference_batch_single_identical(self): + super().test_inference_batch_single_identical(expected_max_diff=3e-3) + + +@slow +@require_torch_accelerator +class DDIMPipelineIntegrationTests(unittest.TestCase): + def test_inference_cifar10(self): + model_id = "google/ddpm-cifar10-32" + + unet = UNet2DModel.from_pretrained(model_id) + scheduler = DDIMScheduler() + + ddim = DDIMPipeline(unet=unet, scheduler=scheduler) + ddim.to(torch_device) + ddim.set_progress_bar_config(disable=None) + + generator = torch.manual_seed(0) + image = ddim(generator=generator, eta=0.0, output_type="np").images + + image_slice = image[0, -3:, -3:, -1] + + assert image.shape == (1, 32, 32, 3) + expected_slice = np.array([0.1723, 0.1617, 0.1600, 0.1626, 0.1497, 0.1513, 0.1505, 0.1442, 0.1453]) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 + + def test_inference_ema_bedroom(self): + model_id = "google/ddpm-ema-bedroom-256" + + unet = UNet2DModel.from_pretrained(model_id) + scheduler = DDIMScheduler.from_pretrained(model_id) + + ddpm = DDIMPipeline(unet=unet, scheduler=scheduler) + ddpm.to(torch_device) + ddpm.set_progress_bar_config(disable=None) + + generator = torch.manual_seed(0) + image = ddpm(generator=generator, output_type="np").images + + image_slice = image[0, -3:, -3:, -1] + + assert image.shape == (1, 256, 256, 3) + expected_slice = np.array([0.0060, 0.0201, 0.0344, 0.0024, 0.0018, 0.0002, 0.0022, 0.0000, 0.0069]) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/ddpm/__init__.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/ddpm/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/ddpm/test_ddpm.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/ddpm/test_ddpm.py new file mode 100644 index 0000000000000000000000000000000000000000..04ee741d8eb8a234ad255826177869e0933b135a --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/ddpm/test_ddpm.py @@ -0,0 +1,111 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import numpy as np +import torch + +from diffusers import DDPMPipeline, DDPMScheduler, UNet2DModel + +from ...testing_utils import enable_full_determinism, require_torch_accelerator, slow, torch_device + + +enable_full_determinism() + + +class DDPMPipelineFastTests(unittest.TestCase): + @property + def dummy_uncond_unet(self): + torch.manual_seed(0) + model = UNet2DModel( + block_out_channels=(4, 8), + layers_per_block=1, + norm_num_groups=4, + sample_size=8, + in_channels=3, + out_channels=3, + down_block_types=("DownBlock2D", "AttnDownBlock2D"), + up_block_types=("AttnUpBlock2D", "UpBlock2D"), + ) + return model + + def test_fast_inference(self): + device = "cpu" + unet = self.dummy_uncond_unet + scheduler = DDPMScheduler() + + ddpm = DDPMPipeline(unet=unet, scheduler=scheduler) + ddpm.to(device) + ddpm.set_progress_bar_config(disable=None) + + generator = torch.Generator(device=device).manual_seed(0) + image = ddpm(generator=generator, num_inference_steps=2, output_type="np").images + + generator = torch.Generator(device=device).manual_seed(0) + image_from_tuple = ddpm(generator=generator, num_inference_steps=2, output_type="np", return_dict=False)[0] + + image_slice = image[0, -3:, -3:, -1] + image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] + + assert image.shape == (1, 8, 8, 3) + expected_slice = np.array([0.0, 0.9996672, 0.00329116, 1.0, 0.9995991, 1.0, 0.0060907, 0.00115037, 0.0]) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 + assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 + + def test_inference_predict_sample(self): + unet = self.dummy_uncond_unet + scheduler = DDPMScheduler(prediction_type="sample") + + ddpm = DDPMPipeline(unet=unet, scheduler=scheduler) + ddpm.to(torch_device) + ddpm.set_progress_bar_config(disable=None) + + generator = torch.manual_seed(0) + image = ddpm(generator=generator, num_inference_steps=2, output_type="np").images + + generator = torch.manual_seed(0) + image_eps = ddpm(generator=generator, num_inference_steps=2, output_type="np")[0] + + image_slice = image[0, -3:, -3:, -1] + image_eps_slice = image_eps[0, -3:, -3:, -1] + + assert image.shape == (1, 8, 8, 3) + tolerance = 1e-2 if torch_device != "mps" else 3e-2 + assert np.abs(image_slice.flatten() - image_eps_slice.flatten()).max() < tolerance + + +@slow +@require_torch_accelerator +class DDPMPipelineIntegrationTests(unittest.TestCase): + def test_inference_cifar10(self): + model_id = "google/ddpm-cifar10-32" + + unet = UNet2DModel.from_pretrained(model_id) + scheduler = DDPMScheduler.from_pretrained(model_id) + + ddpm = DDPMPipeline(unet=unet, scheduler=scheduler) + ddpm.to(torch_device) + ddpm.set_progress_bar_config(disable=None) + + generator = torch.manual_seed(0) + image = ddpm(generator=generator, output_type="np").images + + image_slice = image[0, -3:, -3:, -1] + + assert image.shape == (1, 32, 32, 3) + expected_slice = np.array([0.4200, 0.3588, 0.1939, 0.3847, 0.3382, 0.2647, 0.4155, 0.3582, 0.3385]) + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/deepfloyd_if/__init__.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/deepfloyd_if/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d47374b07e22ff9bd904ae9e24f5833e9b524dcb --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/deepfloyd_if/__init__.py @@ -0,0 +1,272 @@ +import tempfile + +import numpy as np +import torch +from transformers import AutoTokenizer, T5EncoderModel + +from diffusers import DDPMScheduler, UNet2DConditionModel +from diffusers.models.attention_processor import AttnAddedKVProcessor +from diffusers.pipelines.deepfloyd_if import IFWatermarker + +from ...testing_utils import torch_device +from ..test_pipelines_common import to_np + + +# WARN: the hf-internal-testing/tiny-random-t5 text encoder has some non-determinism in the `save_load` tests. + + +class IFPipelineTesterMixin: + def _get_dummy_components(self): + torch.manual_seed(0) + text_encoder = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5") + + torch.manual_seed(0) + tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") + + torch.manual_seed(0) + unet = UNet2DConditionModel( + sample_size=32, + layers_per_block=1, + block_out_channels=[32, 64], + down_block_types=[ + "ResnetDownsampleBlock2D", + "SimpleCrossAttnDownBlock2D", + ], + mid_block_type="UNetMidBlock2DSimpleCrossAttn", + up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"], + in_channels=3, + out_channels=6, + cross_attention_dim=32, + encoder_hid_dim=32, + attention_head_dim=8, + addition_embed_type="text", + addition_embed_type_num_heads=2, + cross_attention_norm="group_norm", + resnet_time_scale_shift="scale_shift", + act_fn="gelu", + ) + unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests + + torch.manual_seed(0) + scheduler = DDPMScheduler( + num_train_timesteps=1000, + beta_schedule="squaredcos_cap_v2", + beta_start=0.0001, + beta_end=0.02, + thresholding=True, + dynamic_thresholding_ratio=0.95, + sample_max_value=1.0, + prediction_type="epsilon", + variance_type="learned_range", + ) + + torch.manual_seed(0) + watermarker = IFWatermarker() + + return { + "text_encoder": text_encoder, + "tokenizer": tokenizer, + "unet": unet, + "scheduler": scheduler, + "watermarker": watermarker, + "safety_checker": None, + "feature_extractor": None, + } + + def _get_superresolution_dummy_components(self): + torch.manual_seed(0) + text_encoder = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5") + + torch.manual_seed(0) + tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") + + torch.manual_seed(0) + unet = UNet2DConditionModel( + sample_size=32, + layers_per_block=[1, 2], + block_out_channels=[32, 64], + down_block_types=[ + "ResnetDownsampleBlock2D", + "SimpleCrossAttnDownBlock2D", + ], + mid_block_type="UNetMidBlock2DSimpleCrossAttn", + up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"], + in_channels=6, + out_channels=6, + cross_attention_dim=32, + encoder_hid_dim=32, + attention_head_dim=8, + addition_embed_type="text", + addition_embed_type_num_heads=2, + cross_attention_norm="group_norm", + resnet_time_scale_shift="scale_shift", + act_fn="gelu", + class_embed_type="timestep", + mid_block_scale_factor=1.414, + time_embedding_act_fn="gelu", + time_embedding_dim=32, + ) + unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests + + torch.manual_seed(0) + scheduler = DDPMScheduler( + num_train_timesteps=1000, + beta_schedule="squaredcos_cap_v2", + beta_start=0.0001, + beta_end=0.02, + thresholding=True, + dynamic_thresholding_ratio=0.95, + sample_max_value=1.0, + prediction_type="epsilon", + variance_type="learned_range", + ) + + torch.manual_seed(0) + image_noising_scheduler = DDPMScheduler( + num_train_timesteps=1000, + beta_schedule="squaredcos_cap_v2", + beta_start=0.0001, + beta_end=0.02, + ) + + torch.manual_seed(0) + watermarker = IFWatermarker() + + return { + "text_encoder": text_encoder, + "tokenizer": tokenizer, + "unet": unet, + "scheduler": scheduler, + "image_noising_scheduler": image_noising_scheduler, + "watermarker": watermarker, + "safety_checker": None, + "feature_extractor": None, + } + + # this test is modified from the base class because if pipelines set the text encoder + # as optional with the intention that the user is allowed to encode the prompt once + # and then pass the embeddings directly to the pipeline. The base class test uses + # the unmodified arguments from `self.get_dummy_inputs` which will pass the unencoded + # prompt to the pipeline when the text encoder is set to None, throwing an error. + # So we make the test reflect the intended usage of setting the text encoder to None. + def _test_save_load_optional_components(self): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(torch_device) + + prompt = inputs["prompt"] + generator = inputs["generator"] + num_inference_steps = inputs["num_inference_steps"] + output_type = inputs["output_type"] + + if "image" in inputs: + image = inputs["image"] + else: + image = None + + if "mask_image" in inputs: + mask_image = inputs["mask_image"] + else: + mask_image = None + + if "original_image" in inputs: + original_image = inputs["original_image"] + else: + original_image = None + + prompt_embeds, negative_prompt_embeds = pipe.encode_prompt(prompt) + + # inputs with prompt converted to embeddings + inputs = { + "prompt_embeds": prompt_embeds, + "negative_prompt_embeds": negative_prompt_embeds, + "generator": generator, + "num_inference_steps": num_inference_steps, + "output_type": output_type, + } + + if image is not None: + inputs["image"] = image + + if mask_image is not None: + inputs["mask_image"] = mask_image + + if original_image is not None: + inputs["original_image"] = original_image + + # set all optional components to None + for optional_component in pipe._optional_components: + setattr(pipe, optional_component, None) + + output = pipe(**inputs)[0] + + with tempfile.TemporaryDirectory() as tmpdir: + pipe.save_pretrained(tmpdir) + pipe_loaded = self.pipeline_class.from_pretrained(tmpdir) + pipe_loaded.to(torch_device) + pipe_loaded.set_progress_bar_config(disable=None) + + pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests + + for optional_component in pipe._optional_components: + self.assertTrue( + getattr(pipe_loaded, optional_component) is None, + f"`{optional_component}` did not stay set to None after loading.", + ) + + inputs = self.get_dummy_inputs(torch_device) + + generator = inputs["generator"] + num_inference_steps = inputs["num_inference_steps"] + output_type = inputs["output_type"] + + # inputs with prompt converted to embeddings + inputs = { + "prompt_embeds": prompt_embeds, + "negative_prompt_embeds": negative_prompt_embeds, + "generator": generator, + "num_inference_steps": num_inference_steps, + "output_type": output_type, + } + + if image is not None: + inputs["image"] = image + + if mask_image is not None: + inputs["mask_image"] = mask_image + + if original_image is not None: + inputs["original_image"] = original_image + + output_loaded = pipe_loaded(**inputs)[0] + + max_diff = np.abs(to_np(output) - to_np(output_loaded)).max() + self.assertLess(max_diff, 1e-4) + + # Modified from `PipelineTesterMixin` to set the attn processor as it's not serialized. + # This should be handled in the base test and then this method can be removed. + def _test_save_load_local(self): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(torch_device) + output = pipe(**inputs)[0] + + with tempfile.TemporaryDirectory() as tmpdir: + pipe.save_pretrained(tmpdir) + pipe_loaded = self.pipeline_class.from_pretrained(tmpdir) + pipe_loaded.to(torch_device) + pipe_loaded.set_progress_bar_config(disable=None) + + pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor()) # For reproducibility tests + + inputs = self.get_dummy_inputs(torch_device) + output_loaded = pipe_loaded(**inputs)[0] + + max_diff = np.abs(to_np(output) - to_np(output_loaded)).max() + self.assertLess(max_diff, 1e-4) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/deepfloyd_if/test_if.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/deepfloyd_if/test_if.py new file mode 100644 index 0000000000000000000000000000000000000000..e1870ddcbae92958cb0e91e021e27f5082e9d808 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/deepfloyd_if/test_if.py @@ -0,0 +1,146 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import gc +import unittest + +import torch + +from diffusers import ( + IFPipeline, +) +from diffusers.models.attention_processor import AttnAddedKVProcessor +from diffusers.utils.import_utils import is_xformers_available + +from ...testing_utils import ( + backend_empty_cache, + backend_max_memory_allocated, + backend_reset_max_memory_allocated, + backend_reset_peak_memory_stats, + load_numpy, + require_accelerator, + require_hf_hub_version_greater, + require_torch_accelerator, + require_transformers_version_greater, + skip_mps, + slow, + torch_device, +) +from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS +from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference +from . import IFPipelineTesterMixin + + +@skip_mps +class IFPipelineFastTests(PipelineTesterMixin, IFPipelineTesterMixin, unittest.TestCase): + pipeline_class = IFPipeline + params = TEXT_TO_IMAGE_PARAMS - {"width", "height", "latents"} + batch_params = TEXT_TO_IMAGE_BATCH_PARAMS + required_optional_params = PipelineTesterMixin.required_optional_params - {"latents"} + + def get_dummy_components(self): + return self._get_dummy_components() + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + + inputs = { + "prompt": "A painting of a squirrel eating a burger", + "generator": generator, + "num_inference_steps": 2, + "output_type": "np", + } + + return inputs + + @unittest.skipIf(torch_device not in ["cuda", "xpu"], reason="float16 requires CUDA or XPU") + @require_accelerator + def test_save_load_float16(self): + # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder + super().test_save_load_float16(expected_max_diff=1e-1) + + def test_attention_slicing_forward_pass(self): + self._test_attention_slicing_forward_pass(expected_max_diff=1e-2) + + def test_save_load_local(self): + self._test_save_load_local() + + def test_inference_batch_single_identical(self): + self._test_inference_batch_single_identical( + expected_max_diff=1e-2, + ) + + @unittest.skipIf( + torch_device != "cuda" or not is_xformers_available(), + reason="XFormers attention is only available with CUDA and `xformers` installed", + ) + def test_xformers_attention_forwardGenerator_pass(self): + self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3) + + @require_hf_hub_version_greater("0.26.5") + @require_transformers_version_greater("4.47.1") + def test_save_load_dduf(self): + super().test_save_load_dduf(atol=1e-2, rtol=1e-2) + + @unittest.skip("Functionality is tested elsewhere.") + def test_save_load_optional_components(self): + pass + + +@slow +@require_torch_accelerator +class IFPipelineSlowTests(unittest.TestCase): + def setUp(self): + # clean up the VRAM before each test + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + # clean up the VRAM after each test + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def test_if_text_to_image(self): + pipe = IFPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0", variant="fp16", torch_dtype=torch.float16) + pipe.unet.set_attn_processor(AttnAddedKVProcessor()) + pipe.enable_model_cpu_offload(device=torch_device) + + backend_reset_max_memory_allocated(torch_device) + backend_empty_cache(torch_device) + backend_reset_peak_memory_stats(torch_device) + + generator = torch.Generator(device="cpu").manual_seed(0) + output = pipe( + prompt="anime turtle", + num_inference_steps=2, + generator=generator, + output_type="np", + ) + + image = output.images[0] + + mem_bytes = backend_max_memory_allocated(torch_device) + assert mem_bytes < 12 * 10**9 + + expected_image = load_numpy( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy" + ) + assert_mean_pixel_difference(image, expected_image) + pipe.remove_all_hooks() diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/deepfloyd_if/test_if_img2img.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/deepfloyd_if/test_if_img2img.py new file mode 100644 index 0000000000000000000000000000000000000000..9d3c96052be65aafe0713bce30808cb700a5d166 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/deepfloyd_if/test_if_img2img.py @@ -0,0 +1,163 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import gc +import random +import unittest + +import torch + +from diffusers import IFImg2ImgPipeline +from diffusers.models.attention_processor import AttnAddedKVProcessor +from diffusers.utils.import_utils import is_xformers_available + +from ...testing_utils import ( + backend_empty_cache, + backend_max_memory_allocated, + backend_reset_max_memory_allocated, + backend_reset_peak_memory_stats, + floats_tensor, + load_numpy, + require_accelerator, + require_hf_hub_version_greater, + require_torch_accelerator, + require_transformers_version_greater, + skip_mps, + slow, + torch_device, +) +from ..pipeline_params import ( + TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, + TEXT_GUIDED_IMAGE_VARIATION_PARAMS, +) +from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference +from . import IFPipelineTesterMixin + + +@skip_mps +class IFImg2ImgPipelineFastTests(PipelineTesterMixin, IFPipelineTesterMixin, unittest.TestCase): + pipeline_class = IFImg2ImgPipeline + params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"width", "height"} + batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS + required_optional_params = PipelineTesterMixin.required_optional_params - {"latents"} + + def get_dummy_components(self): + return self._get_dummy_components() + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + + image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) + + inputs = { + "prompt": "A painting of a squirrel eating a burger", + "image": image, + "generator": generator, + "num_inference_steps": 2, + "output_type": "np", + } + + return inputs + + @unittest.skipIf( + torch_device != "cuda" or not is_xformers_available(), + reason="XFormers attention is only available with CUDA and `xformers` installed", + ) + def test_xformers_attention_forwardGenerator_pass(self): + self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3) + + @unittest.skipIf(torch_device not in ["cuda", "xpu"], reason="float16 requires CUDA or XPU") + @require_accelerator + def test_save_load_float16(self): + # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder + super().test_save_load_float16(expected_max_diff=1e-1) + + @unittest.skipIf(torch_device not in ["cuda", "xpu"], reason="float16 requires CUDA or XPU") + @require_accelerator + def test_float16_inference(self): + super().test_float16_inference(expected_max_diff=1e-1) + + def test_attention_slicing_forward_pass(self): + self._test_attention_slicing_forward_pass(expected_max_diff=1e-2) + + def test_save_load_local(self): + self._test_save_load_local() + + def test_inference_batch_single_identical(self): + self._test_inference_batch_single_identical( + expected_max_diff=1e-2, + ) + + @require_hf_hub_version_greater("0.26.5") + @require_transformers_version_greater("4.47.1") + def test_save_load_dduf(self): + super().test_save_load_dduf(atol=1e-2, rtol=1e-2) + + @unittest.skip("Functionality is tested elsewhere.") + def test_save_load_optional_components(self): + pass + + +@slow +@require_torch_accelerator +class IFImg2ImgPipelineSlowTests(unittest.TestCase): + def setUp(self): + # clean up the VRAM before each test + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + # clean up the VRAM after each test + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def test_if_img2img(self): + pipe = IFImg2ImgPipeline.from_pretrained( + "DeepFloyd/IF-I-L-v1.0", + variant="fp16", + torch_dtype=torch.float16, + ) + pipe.unet.set_attn_processor(AttnAddedKVProcessor()) + pipe.enable_model_cpu_offload(device=torch_device) + + backend_reset_max_memory_allocated(torch_device) + backend_empty_cache(torch_device) + backend_reset_peak_memory_stats(torch_device) + + image = floats_tensor((1, 3, 64, 64), rng=random.Random(0)).to(torch_device) + generator = torch.Generator(device="cpu").manual_seed(0) + output = pipe( + prompt="anime turtle", + image=image, + num_inference_steps=2, + generator=generator, + output_type="np", + ) + image = output.images[0] + + mem_bytes = backend_max_memory_allocated(torch_device) + assert mem_bytes < 12 * 10**9 + + expected_image = load_numpy( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy" + ) + assert_mean_pixel_difference(image, expected_image) + + pipe.remove_all_hooks() diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/deepfloyd_if/test_if_img2img_superresolution.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/deepfloyd_if/test_if_img2img_superresolution.py new file mode 100644 index 0000000000000000000000000000000000000000..e2114910edb0b60de305f8e3f9e245ed177d8dd6 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/deepfloyd_if/test_if_img2img_superresolution.py @@ -0,0 +1,168 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import gc +import random +import unittest + +import torch + +from diffusers import IFImg2ImgSuperResolutionPipeline +from diffusers.models.attention_processor import AttnAddedKVProcessor +from diffusers.utils.import_utils import is_xformers_available + +from ...testing_utils import ( + backend_empty_cache, + backend_max_memory_allocated, + backend_reset_max_memory_allocated, + backend_reset_peak_memory_stats, + floats_tensor, + load_numpy, + require_accelerator, + require_hf_hub_version_greater, + require_torch_accelerator, + require_transformers_version_greater, + skip_mps, + slow, + torch_device, +) +from ..pipeline_params import ( + TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, + TEXT_GUIDED_IMAGE_VARIATION_PARAMS, +) +from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference +from . import IFPipelineTesterMixin + + +@skip_mps +class IFImg2ImgSuperResolutionPipelineFastTests(PipelineTesterMixin, IFPipelineTesterMixin, unittest.TestCase): + pipeline_class = IFImg2ImgSuperResolutionPipeline + params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"width", "height"} + batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"original_image"}) + required_optional_params = PipelineTesterMixin.required_optional_params - {"latents"} + + def get_dummy_components(self): + return self._get_superresolution_dummy_components() + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + + original_image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) + image = floats_tensor((1, 3, 16, 16), rng=random.Random(seed)).to(device) + + inputs = { + "prompt": "A painting of a squirrel eating a burger", + "image": image, + "original_image": original_image, + "generator": generator, + "num_inference_steps": 2, + "output_type": "np", + } + + return inputs + + @unittest.skipIf( + torch_device != "cuda" or not is_xformers_available(), + reason="XFormers attention is only available with CUDA and `xformers` installed", + ) + def test_xformers_attention_forwardGenerator_pass(self): + self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3) + + @unittest.skipIf(torch_device not in ["cuda", "xpu"], reason="float16 requires CUDA or XPU") + @require_accelerator + def test_save_load_float16(self): + # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder + super().test_save_load_float16(expected_max_diff=1e-1) + + def test_attention_slicing_forward_pass(self): + self._test_attention_slicing_forward_pass(expected_max_diff=1e-2) + + def test_save_load_local(self): + self._test_save_load_local() + + def test_inference_batch_single_identical(self): + self._test_inference_batch_single_identical( + expected_max_diff=1e-2, + ) + + @require_hf_hub_version_greater("0.26.5") + @require_transformers_version_greater("4.47.1") + def test_save_load_dduf(self): + super().test_save_load_dduf(atol=1e-2, rtol=1e-2) + + @unittest.skip("Functionality is tested elsewhere.") + def test_save_load_optional_components(self): + pass + + +@slow +@require_torch_accelerator +class IFImg2ImgSuperResolutionPipelineSlowTests(unittest.TestCase): + def setUp(self): + # clean up the VRAM before each test + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + # clean up the VRAM after each test + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def test_if_img2img_superresolution(self): + pipe = IFImg2ImgSuperResolutionPipeline.from_pretrained( + "DeepFloyd/IF-II-L-v1.0", + variant="fp16", + torch_dtype=torch.float16, + ) + pipe.unet.set_attn_processor(AttnAddedKVProcessor()) + pipe.enable_model_cpu_offload(device=torch_device) + + backend_reset_max_memory_allocated(torch_device) + backend_empty_cache(torch_device) + backend_reset_peak_memory_stats(torch_device) + + generator = torch.Generator(device="cpu").manual_seed(0) + + original_image = floats_tensor((1, 3, 256, 256), rng=random.Random(0)).to(torch_device) + image = floats_tensor((1, 3, 64, 64), rng=random.Random(0)).to(torch_device) + + output = pipe( + prompt="anime turtle", + image=image, + original_image=original_image, + generator=generator, + num_inference_steps=2, + output_type="np", + ) + + image = output.images[0] + + assert image.shape == (256, 256, 3) + + mem_bytes = backend_max_memory_allocated(torch_device) + + assert mem_bytes < 12 * 10**9 + + expected_image = load_numpy( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy" + ) + assert_mean_pixel_difference(image, expected_image) + + pipe.remove_all_hooks() diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/deepfloyd_if/test_if_inpainting.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/deepfloyd_if/test_if_inpainting.py new file mode 100644 index 0000000000000000000000000000000000000000..2679e0b776905cdd969b3d86aefe7f0590475d02 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/deepfloyd_if/test_if_inpainting.py @@ -0,0 +1,160 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import gc +import random +import unittest + +import torch + +from diffusers import IFInpaintingPipeline +from diffusers.models.attention_processor import AttnAddedKVProcessor +from diffusers.utils.import_utils import is_xformers_available + +from ...testing_utils import ( + backend_empty_cache, + backend_max_memory_allocated, + backend_reset_max_memory_allocated, + backend_reset_peak_memory_stats, + floats_tensor, + load_numpy, + require_accelerator, + require_hf_hub_version_greater, + require_torch_accelerator, + require_transformers_version_greater, + skip_mps, + slow, + torch_device, +) +from ..pipeline_params import ( + TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, + TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, +) +from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference +from . import IFPipelineTesterMixin + + +@skip_mps +class IFInpaintingPipelineFastTests(PipelineTesterMixin, IFPipelineTesterMixin, unittest.TestCase): + pipeline_class = IFInpaintingPipeline + params = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"} + batch_params = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS + required_optional_params = PipelineTesterMixin.required_optional_params - {"latents"} + + def get_dummy_components(self): + return self._get_dummy_components() + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + + image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) + mask_image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) + + inputs = { + "prompt": "A painting of a squirrel eating a burger", + "image": image, + "mask_image": mask_image, + "generator": generator, + "num_inference_steps": 2, + "output_type": "np", + } + + return inputs + + @unittest.skipIf( + torch_device != "cuda" or not is_xformers_available(), + reason="XFormers attention is only available with CUDA and `xformers` installed", + ) + def test_xformers_attention_forwardGenerator_pass(self): + self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3) + + @unittest.skipIf(torch_device not in ["cuda", "xpu"], reason="float16 requires CUDA or XPU") + @require_accelerator + def test_save_load_float16(self): + # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder + super().test_save_load_float16(expected_max_diff=1e-1) + + def test_attention_slicing_forward_pass(self): + self._test_attention_slicing_forward_pass(expected_max_diff=1e-2) + + def test_save_load_local(self): + self._test_save_load_local() + + def test_inference_batch_single_identical(self): + self._test_inference_batch_single_identical( + expected_max_diff=1e-2, + ) + + @require_hf_hub_version_greater("0.26.5") + @require_transformers_version_greater("4.47.1") + def test_save_load_dduf(self): + super().test_save_load_dduf(atol=1e-2, rtol=1e-2) + + @unittest.skip("Test done elsewhere.") + def test_save_load_optional_components(self, expected_max_difference=0.0001): + pass + + +@slow +@require_torch_accelerator +class IFInpaintingPipelineSlowTests(unittest.TestCase): + def setUp(self): + # clean up the VRAM before each test + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + # clean up the VRAM after each test + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def test_if_inpainting(self): + pipe = IFInpaintingPipeline.from_pretrained( + "DeepFloyd/IF-I-XL-v1.0", variant="fp16", torch_dtype=torch.float16 + ) + pipe.unet.set_attn_processor(AttnAddedKVProcessor()) + pipe.enable_model_cpu_offload(device=torch_device) + + backend_empty_cache(torch_device) + backend_reset_max_memory_allocated(torch_device) + backend_reset_peak_memory_stats(torch_device) + + image = floats_tensor((1, 3, 64, 64), rng=random.Random(0)).to(torch_device) + mask_image = floats_tensor((1, 3, 64, 64), rng=random.Random(1)).to(torch_device) + + generator = torch.Generator(device="cpu").manual_seed(0) + output = pipe( + prompt="anime prompts", + image=image, + mask_image=mask_image, + num_inference_steps=2, + generator=generator, + output_type="np", + ) + image = output.images[0] + + mem_bytes = backend_max_memory_allocated(torch_device) + assert mem_bytes < 12 * 10**9 + + expected_image = load_numpy( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy" + ) + assert_mean_pixel_difference(image, expected_image) + pipe.remove_all_hooks() diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/deepfloyd_if/test_if_inpainting_superresolution.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/deepfloyd_if/test_if_inpainting_superresolution.py new file mode 100644 index 0000000000000000000000000000000000000000..3d64556c6e41c1854e1d57feb137cd419b27781a --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/deepfloyd_if/test_if_inpainting_superresolution.py @@ -0,0 +1,170 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import gc +import random +import unittest + +import torch + +from diffusers import IFInpaintingSuperResolutionPipeline +from diffusers.models.attention_processor import AttnAddedKVProcessor +from diffusers.utils.import_utils import is_xformers_available + +from ...testing_utils import ( + backend_empty_cache, + backend_max_memory_allocated, + backend_reset_max_memory_allocated, + backend_reset_peak_memory_stats, + floats_tensor, + load_numpy, + require_accelerator, + require_hf_hub_version_greater, + require_torch_accelerator, + require_transformers_version_greater, + skip_mps, + slow, + torch_device, +) +from ..pipeline_params import ( + TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, + TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, +) +from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference +from . import IFPipelineTesterMixin + + +@skip_mps +class IFInpaintingSuperResolutionPipelineFastTests(PipelineTesterMixin, IFPipelineTesterMixin, unittest.TestCase): + pipeline_class = IFInpaintingSuperResolutionPipeline + params = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"} + batch_params = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"original_image"}) + required_optional_params = PipelineTesterMixin.required_optional_params - {"latents"} + + def get_dummy_components(self): + return self._get_superresolution_dummy_components() + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + + image = floats_tensor((1, 3, 16, 16), rng=random.Random(seed)).to(device) + original_image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) + mask_image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) + + inputs = { + "prompt": "A painting of a squirrel eating a burger", + "image": image, + "original_image": original_image, + "mask_image": mask_image, + "generator": generator, + "num_inference_steps": 2, + "output_type": "np", + } + + return inputs + + @unittest.skipIf( + torch_device != "cuda" or not is_xformers_available(), + reason="XFormers attention is only available with CUDA and `xformers` installed", + ) + def test_xformers_attention_forwardGenerator_pass(self): + self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3) + + @unittest.skipIf(torch_device not in ["cuda", "xpu"], reason="float16 requires CUDA or XPU") + @require_accelerator + def test_save_load_float16(self): + # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder + super().test_save_load_float16(expected_max_diff=1e-1) + + def test_attention_slicing_forward_pass(self): + self._test_attention_slicing_forward_pass(expected_max_diff=1e-2) + + def test_save_load_local(self): + self._test_save_load_local() + + def test_inference_batch_single_identical(self): + self._test_inference_batch_single_identical( + expected_max_diff=1e-2, + ) + + @require_hf_hub_version_greater("0.26.5") + @require_transformers_version_greater("4.47.1") + def test_save_load_dduf(self): + super().test_save_load_dduf(atol=1e-2, rtol=1e-2) + + @unittest.skip("Test done elsewhere.") + def test_save_load_optional_components(self, expected_max_difference=0.0001): + pass + + +@slow +@require_torch_accelerator +class IFInpaintingSuperResolutionPipelineSlowTests(unittest.TestCase): + def setUp(self): + # clean up the VRAM before each test + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + # clean up the VRAM after each test + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def test_if_inpainting_superresolution(self): + pipe = IFInpaintingSuperResolutionPipeline.from_pretrained( + "DeepFloyd/IF-II-L-v1.0", variant="fp16", torch_dtype=torch.float16 + ) + pipe.unet.set_attn_processor(AttnAddedKVProcessor()) + pipe.enable_model_cpu_offload(device=torch_device) + + # Super resolution test + backend_empty_cache(torch_device) + backend_reset_max_memory_allocated(torch_device) + backend_reset_peak_memory_stats(torch_device) + + generator = torch.Generator(device="cpu").manual_seed(0) + + image = floats_tensor((1, 3, 64, 64), rng=random.Random(0)).to(torch_device) + original_image = floats_tensor((1, 3, 256, 256), rng=random.Random(0)).to(torch_device) + mask_image = floats_tensor((1, 3, 256, 256), rng=random.Random(1)).to(torch_device) + + output = pipe( + prompt="anime turtle", + image=image, + original_image=original_image, + mask_image=mask_image, + generator=generator, + num_inference_steps=2, + output_type="np", + ) + + image = output.images[0] + + assert image.shape == (256, 256, 3) + + mem_bytes = backend_max_memory_allocated(torch_device) + assert mem_bytes < 12 * 10**9 + + expected_image = load_numpy( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy" + ) + assert_mean_pixel_difference(image, expected_image) + + pipe.remove_all_hooks() diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/deepfloyd_if/test_if_superresolution.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/deepfloyd_if/test_if_superresolution.py new file mode 100644 index 0000000000000000000000000000000000000000..fa7c0fb2e062caec47a0044f8766178703c581d0 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/deepfloyd_if/test_if_superresolution.py @@ -0,0 +1,157 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import gc +import random +import unittest + +import torch + +from diffusers import IFSuperResolutionPipeline +from diffusers.models.attention_processor import AttnAddedKVProcessor +from diffusers.utils.import_utils import is_xformers_available + +from ...testing_utils import ( + backend_empty_cache, + backend_max_memory_allocated, + backend_reset_max_memory_allocated, + backend_reset_peak_memory_stats, + floats_tensor, + load_numpy, + require_accelerator, + require_hf_hub_version_greater, + require_torch_accelerator, + require_transformers_version_greater, + skip_mps, + slow, + torch_device, +) +from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS +from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference +from . import IFPipelineTesterMixin + + +@skip_mps +class IFSuperResolutionPipelineFastTests(PipelineTesterMixin, IFPipelineTesterMixin, unittest.TestCase): + pipeline_class = IFSuperResolutionPipeline + params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"width", "height"} + batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS + required_optional_params = PipelineTesterMixin.required_optional_params - {"latents"} + + def get_dummy_components(self): + return self._get_superresolution_dummy_components() + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + + image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) + + inputs = { + "prompt": "A painting of a squirrel eating a burger", + "image": image, + "generator": generator, + "num_inference_steps": 2, + "output_type": "np", + } + + return inputs + + @unittest.skipIf( + torch_device != "cuda" or not is_xformers_available(), + reason="XFormers attention is only available with CUDA and `xformers` installed", + ) + def test_xformers_attention_forwardGenerator_pass(self): + self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3) + + @unittest.skipIf(torch_device not in ["cuda", "xpu"], reason="float16 requires CUDA or XPU") + @require_accelerator + def test_save_load_float16(self): + # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder + super().test_save_load_float16(expected_max_diff=1e-1) + + def test_attention_slicing_forward_pass(self): + self._test_attention_slicing_forward_pass(expected_max_diff=1e-2) + + def test_save_load_local(self): + self._test_save_load_local() + + def test_inference_batch_single_identical(self): + self._test_inference_batch_single_identical( + expected_max_diff=1e-2, + ) + + @require_hf_hub_version_greater("0.26.5") + @require_transformers_version_greater("4.47.1") + def test_save_load_dduf(self): + super().test_save_load_dduf(atol=1e-2, rtol=1e-2) + + @unittest.skip("Test done elsewhere.") + def test_save_load_optional_components(self, expected_max_difference=0.0001): + pass + + +@slow +@require_torch_accelerator +class IFSuperResolutionPipelineSlowTests(unittest.TestCase): + def setUp(self): + # clean up the VRAM before each test + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + # clean up the VRAM after each test + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def test_if_superresolution(self): + pipe = IFSuperResolutionPipeline.from_pretrained( + "DeepFloyd/IF-II-L-v1.0", variant="fp16", torch_dtype=torch.float16 + ) + pipe.unet.set_attn_processor(AttnAddedKVProcessor()) + pipe.enable_model_cpu_offload(device=torch_device) + + # Super resolution test + backend_empty_cache(torch_device) + backend_reset_max_memory_allocated(torch_device) + backend_reset_peak_memory_stats(torch_device) + + image = floats_tensor((1, 3, 64, 64), rng=random.Random(0)).to(torch_device) + generator = torch.Generator(device="cpu").manual_seed(0) + output = pipe( + prompt="anime turtle", + image=image, + generator=generator, + num_inference_steps=2, + output_type="np", + ) + + image = output.images[0] + + assert image.shape == (256, 256, 3) + + mem_bytes = backend_max_memory_allocated(torch_device) + assert mem_bytes < 12 * 10**9 + + expected_image = load_numpy( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy" + ) + assert_mean_pixel_difference(image, expected_image) + + pipe.remove_all_hooks() diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/dit/__init__.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/dit/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/dit/test_dit.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/dit/test_dit.py new file mode 100644 index 0000000000000000000000000000000000000000..cd5c08ced3fce1762abea70cc5f2a249dbb41868 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/dit/test_dit.py @@ -0,0 +1,166 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import gc +import unittest + +import numpy as np +import torch + +from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DiTTransformer2DModel, DPMSolverMultistepScheduler +from diffusers.utils import is_xformers_available + +from ...testing_utils import ( + backend_empty_cache, + enable_full_determinism, + load_numpy, + nightly, + numpy_cosine_similarity_distance, + require_torch_accelerator, + torch_device, +) +from ..pipeline_params import ( + CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS, + CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS, +) +from ..test_pipelines_common import PipelineTesterMixin + + +enable_full_determinism() + + +class DiTPipelineFastTests(PipelineTesterMixin, unittest.TestCase): + pipeline_class = DiTPipeline + params = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS + required_optional_params = PipelineTesterMixin.required_optional_params - { + "latents", + "num_images_per_prompt", + "callback", + "callback_steps", + } + batch_params = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS + + def get_dummy_components(self): + torch.manual_seed(0) + transformer = DiTTransformer2DModel( + sample_size=16, + num_layers=2, + patch_size=4, + attention_head_dim=8, + num_attention_heads=2, + in_channels=4, + out_channels=8, + attention_bias=True, + activation_fn="gelu-approximate", + num_embeds_ada_norm=1000, + norm_type="ada_norm_zero", + norm_elementwise_affine=False, + ) + vae = AutoencoderKL() + scheduler = DDIMScheduler() + components = {"transformer": transformer.eval(), "vae": vae.eval(), "scheduler": scheduler} + return components + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + inputs = { + "class_labels": [1], + "generator": generator, + "num_inference_steps": 2, + "output_type": "np", + } + return inputs + + def test_inference(self): + device = "cpu" + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + image = pipe(**inputs).images + image_slice = image[0, -3:, -3:, -1] + + self.assertEqual(image.shape, (1, 16, 16, 3)) + expected_slice = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457]) + max_diff = np.abs(image_slice.flatten() - expected_slice).max() + self.assertLessEqual(max_diff, 1e-3) + + def test_inference_batch_single_identical(self): + self._test_inference_batch_single_identical(expected_max_diff=1e-3) + + @unittest.skipIf( + torch_device != "cuda" or not is_xformers_available(), + reason="XFormers attention is only available with CUDA and `xformers` installed", + ) + def test_xformers_attention_forwardGenerator_pass(self): + self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3) + + +@nightly +@require_torch_accelerator +class DiTPipelineIntegrationTests(unittest.TestCase): + def setUp(self): + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def test_dit_256(self): + generator = torch.manual_seed(0) + + pipe = DiTPipeline.from_pretrained("facebook/DiT-XL-2-256") + pipe.to(torch_device) + + words = ["vase", "umbrella", "white shark", "white wolf"] + ids = pipe.get_label_ids(words) + + images = pipe(ids, generator=generator, num_inference_steps=40, output_type="np").images + + for word, image in zip(words, images): + expected_image = load_numpy( + f"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy" + ) + assert np.abs((expected_image - image).max()) < 1e-2 + + def test_dit_512(self): + pipe = DiTPipeline.from_pretrained("facebook/DiT-XL-2-512") + pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) + pipe.to(torch_device) + + words = ["vase", "umbrella"] + ids = pipe.get_label_ids(words) + + generator = torch.manual_seed(0) + images = pipe(ids, generator=generator, num_inference_steps=25, output_type="np").images + + for word, image in zip(words, images): + expected_image = load_numpy( + f"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}_512.npy" + ) + + expected_slice = expected_image.flatten() + output_slice = image.flatten() + + assert numpy_cosine_similarity_distance(expected_slice, output_slice) < 1e-2 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/easyanimate/__init__.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/easyanimate/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/easyanimate/test_easyanimate.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/easyanimate/test_easyanimate.py new file mode 100644 index 0000000000000000000000000000000000000000..2dbb8639f1746719c347fb2938540dd08e5fc2c4 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/easyanimate/test_easyanimate.py @@ -0,0 +1,295 @@ +# Copyright 2025 The HuggingFace Team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import gc +import inspect +import unittest + +import numpy as np +import torch +from transformers import Qwen2Tokenizer, Qwen2VLForConditionalGeneration + +from diffusers import ( + AutoencoderKLMagvit, + EasyAnimatePipeline, + EasyAnimateTransformer3DModel, + FlowMatchEulerDiscreteScheduler, +) + +from ...testing_utils import ( + backend_empty_cache, + enable_full_determinism, + numpy_cosine_similarity_distance, + require_torch_accelerator, + slow, + torch_device, +) +from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS +from ..test_pipelines_common import PipelineTesterMixin, to_np + + +enable_full_determinism() + + +class EasyAnimatePipelineFastTests(PipelineTesterMixin, unittest.TestCase): + pipeline_class = EasyAnimatePipeline + params = TEXT_TO_IMAGE_PARAMS - {"cross_attention_kwargs"} + batch_params = TEXT_TO_IMAGE_BATCH_PARAMS + image_params = TEXT_TO_IMAGE_IMAGE_PARAMS + image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS + required_optional_params = frozenset( + [ + "num_inference_steps", + "generator", + "latents", + "return_dict", + "callback_on_step_end", + "callback_on_step_end_tensor_inputs", + ] + ) + + supports_dduf = False + + def get_dummy_components(self): + torch.manual_seed(0) + transformer = EasyAnimateTransformer3DModel( + num_attention_heads=2, + attention_head_dim=16, + in_channels=4, + out_channels=4, + time_embed_dim=2, + text_embed_dim=16, # Must match with tiny-random-t5 + num_layers=1, + sample_width=16, # latent width: 2 -> final width: 16 + sample_height=16, # latent height: 2 -> final height: 16 + patch_size=2, + ) + + torch.manual_seed(0) + vae = AutoencoderKLMagvit( + in_channels=3, + out_channels=3, + down_block_types=( + "SpatialDownBlock3D", + "SpatialTemporalDownBlock3D", + "SpatialTemporalDownBlock3D", + "SpatialTemporalDownBlock3D", + ), + up_block_types=( + "SpatialUpBlock3D", + "SpatialTemporalUpBlock3D", + "SpatialTemporalUpBlock3D", + "SpatialTemporalUpBlock3D", + ), + block_out_channels=(8, 8, 8, 8), + latent_channels=4, + layers_per_block=1, + norm_num_groups=2, + spatial_group_norm=False, + ) + + torch.manual_seed(0) + scheduler = FlowMatchEulerDiscreteScheduler() + text_encoder = Qwen2VLForConditionalGeneration.from_pretrained( + "hf-internal-testing/tiny-random-Qwen2VLForConditionalGeneration" + ) + tokenizer = Qwen2Tokenizer.from_pretrained("hf-internal-testing/tiny-random-Qwen2VLForConditionalGeneration") + + components = { + "transformer": transformer, + "vae": vae, + "scheduler": scheduler, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + } + return components + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + inputs = { + "prompt": "dance monkey", + "negative_prompt": "", + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 6.0, + "height": 16, + "width": 16, + "num_frames": 5, + "output_type": "pt", + } + return inputs + + def test_inference(self): + device = "cpu" + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + video = pipe(**inputs).frames + generated_video = video[0] + + self.assertEqual(generated_video.shape, (5, 3, 16, 16)) + expected_video = torch.randn(5, 3, 16, 16) + max_diff = np.abs(generated_video - expected_video).max() + self.assertLessEqual(max_diff, 1e10) + + def test_callback_inputs(self): + sig = inspect.signature(self.pipeline_class.__call__) + has_callback_tensor_inputs = "callback_on_step_end_tensor_inputs" in sig.parameters + has_callback_step_end = "callback_on_step_end" in sig.parameters + + if not (has_callback_tensor_inputs and has_callback_step_end): + return + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + self.assertTrue( + hasattr(pipe, "_callback_tensor_inputs"), + f" {self.pipeline_class} should have `_callback_tensor_inputs` that defines a list of tensor variables its callback function can use as inputs", + ) + + def callback_inputs_subset(pipe, i, t, callback_kwargs): + # iterate over callback args + for tensor_name, tensor_value in callback_kwargs.items(): + # check that we're only passing in allowed tensor inputs + assert tensor_name in pipe._callback_tensor_inputs + + return callback_kwargs + + def callback_inputs_all(pipe, i, t, callback_kwargs): + for tensor_name in pipe._callback_tensor_inputs: + assert tensor_name in callback_kwargs + + # iterate over callback args + for tensor_name, tensor_value in callback_kwargs.items(): + # check that we're only passing in allowed tensor inputs + assert tensor_name in pipe._callback_tensor_inputs + + return callback_kwargs + + inputs = self.get_dummy_inputs(torch_device) + + # Test passing in a subset + inputs["callback_on_step_end"] = callback_inputs_subset + inputs["callback_on_step_end_tensor_inputs"] = ["latents"] + output = pipe(**inputs)[0] + + # Test passing in a everything + inputs["callback_on_step_end"] = callback_inputs_all + inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs + output = pipe(**inputs)[0] + + def callback_inputs_change_tensor(pipe, i, t, callback_kwargs): + is_last = i == (pipe.num_timesteps - 1) + if is_last: + callback_kwargs["latents"] = torch.zeros_like(callback_kwargs["latents"]) + return callback_kwargs + + inputs["callback_on_step_end"] = callback_inputs_change_tensor + inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs + output = pipe(**inputs)[0] + assert output.abs().sum() < 1e10 + + def test_inference_batch_single_identical(self): + self._test_inference_batch_single_identical(batch_size=3, expected_max_diff=1e-3) + + def test_attention_slicing_forward_pass( + self, test_max_difference=True, test_mean_pixel_difference=True, expected_max_diff=1e-3 + ): + if not self.test_attention_slicing: + return + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + for component in pipe.components.values(): + if hasattr(component, "set_default_attn_processor"): + component.set_default_attn_processor() + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + generator_device = "cpu" + inputs = self.get_dummy_inputs(generator_device) + output_without_slicing = pipe(**inputs)[0] + + pipe.enable_attention_slicing(slice_size=1) + inputs = self.get_dummy_inputs(generator_device) + output_with_slicing1 = pipe(**inputs)[0] + + pipe.enable_attention_slicing(slice_size=2) + inputs = self.get_dummy_inputs(generator_device) + output_with_slicing2 = pipe(**inputs)[0] + + if test_max_difference: + max_diff1 = np.abs(to_np(output_with_slicing1) - to_np(output_without_slicing)).max() + max_diff2 = np.abs(to_np(output_with_slicing2) - to_np(output_without_slicing)).max() + self.assertLess( + max(max_diff1, max_diff2), + expected_max_diff, + "Attention slicing should not affect the inference results", + ) + + def test_dict_tuple_outputs_equivalent(self, expected_slice=None, expected_max_difference=0.001): + # Seems to need a higher tolerance + return super().test_dict_tuple_outputs_equivalent(expected_slice, expected_max_difference) + + def test_encode_prompt_works_in_isolation(self): + # Seems to need a higher tolerance + return super().test_encode_prompt_works_in_isolation(atol=1e-3, rtol=1e-3) + + +@slow +@require_torch_accelerator +class EasyAnimatePipelineIntegrationTests(unittest.TestCase): + prompt = "A painting of a squirrel eating a burger." + + def setUp(self): + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def test_EasyAnimate(self): + generator = torch.Generator("cpu").manual_seed(0) + + pipe = EasyAnimatePipeline.from_pretrained("alibaba-pai/EasyAnimateV5.1-12b-zh", torch_dtype=torch.float16) + pipe.enable_model_cpu_offload() + prompt = self.prompt + + videos = pipe( + prompt=prompt, + height=480, + width=720, + num_frames=5, + generator=generator, + num_inference_steps=2, + output_type="pt", + ).frames + + video = videos[0] + expected_video = torch.randn(1, 5, 480, 720, 3).numpy() + + max_diff = numpy_cosine_similarity_distance(video, expected_video) + assert max_diff < 1e-3, f"Max diff is too high. got {video}" diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/flux/__init__.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/flux/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/flux/test_pipeline_flux.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/flux/test_pipeline_flux.py new file mode 100644 index 0000000000000000000000000000000000000000..c3e8517d640701ee291b14f18906952a0daa826a --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/flux/test_pipeline_flux.py @@ -0,0 +1,368 @@ +import gc +import unittest + +import numpy as np +import torch +from huggingface_hub import hf_hub_download +from transformers import AutoTokenizer, CLIPTextConfig, CLIPTextModel, CLIPTokenizer, T5EncoderModel + +from diffusers import ( + AutoencoderKL, + FasterCacheConfig, + FlowMatchEulerDiscreteScheduler, + FluxPipeline, + FluxTransformer2DModel, +) + +from ...testing_utils import ( + backend_empty_cache, + nightly, + numpy_cosine_similarity_distance, + require_big_accelerator, + slow, + torch_device, +) +from ..test_pipelines_common import ( + FasterCacheTesterMixin, + FirstBlockCacheTesterMixin, + FluxIPAdapterTesterMixin, + PipelineTesterMixin, + PyramidAttentionBroadcastTesterMixin, + check_qkv_fused_layers_exist, +) + + +class FluxPipelineFastTests( + PipelineTesterMixin, + FluxIPAdapterTesterMixin, + PyramidAttentionBroadcastTesterMixin, + FasterCacheTesterMixin, + FirstBlockCacheTesterMixin, + unittest.TestCase, +): + pipeline_class = FluxPipeline + params = frozenset(["prompt", "height", "width", "guidance_scale", "prompt_embeds", "pooled_prompt_embeds"]) + batch_params = frozenset(["prompt"]) + + # there is no xformers processor for Flux + test_xformers_attention = False + test_layerwise_casting = True + test_group_offloading = True + + faster_cache_config = FasterCacheConfig( + spatial_attention_block_skip_range=2, + spatial_attention_timestep_skip_range=(-1, 901), + unconditional_batch_skip_range=2, + attention_weight_callback=lambda _: 0.5, + is_guidance_distilled=True, + ) + + def get_dummy_components(self, num_layers: int = 1, num_single_layers: int = 1): + torch.manual_seed(0) + transformer = FluxTransformer2DModel( + patch_size=1, + in_channels=4, + num_layers=num_layers, + num_single_layers=num_single_layers, + attention_head_dim=16, + num_attention_heads=2, + joint_attention_dim=32, + pooled_projection_dim=32, + axes_dims_rope=[4, 4, 8], + ) + clip_text_encoder_config = CLIPTextConfig( + bos_token_id=0, + eos_token_id=2, + hidden_size=32, + intermediate_size=37, + layer_norm_eps=1e-05, + num_attention_heads=4, + num_hidden_layers=5, + pad_token_id=1, + vocab_size=1000, + hidden_act="gelu", + projection_dim=32, + ) + + torch.manual_seed(0) + text_encoder = CLIPTextModel(clip_text_encoder_config) + + torch.manual_seed(0) + text_encoder_2 = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5") + + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + tokenizer_2 = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") + + torch.manual_seed(0) + vae = AutoencoderKL( + sample_size=32, + in_channels=3, + out_channels=3, + block_out_channels=(4,), + layers_per_block=1, + latent_channels=1, + norm_num_groups=1, + use_quant_conv=False, + use_post_quant_conv=False, + shift_factor=0.0609, + scaling_factor=1.5035, + ) + + scheduler = FlowMatchEulerDiscreteScheduler() + + return { + "scheduler": scheduler, + "text_encoder": text_encoder, + "text_encoder_2": text_encoder_2, + "tokenizer": tokenizer, + "tokenizer_2": tokenizer_2, + "transformer": transformer, + "vae": vae, + "image_encoder": None, + "feature_extractor": None, + } + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device="cpu").manual_seed(seed) + + inputs = { + "prompt": "A painting of a squirrel eating a burger", + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 5.0, + "height": 8, + "width": 8, + "max_sequence_length": 48, + "output_type": "np", + } + return inputs + + def test_flux_different_prompts(self): + pipe = self.pipeline_class(**self.get_dummy_components()).to(torch_device) + + inputs = self.get_dummy_inputs(torch_device) + output_same_prompt = pipe(**inputs).images[0] + + inputs = self.get_dummy_inputs(torch_device) + inputs["prompt_2"] = "a different prompt" + output_different_prompts = pipe(**inputs).images[0] + + max_diff = np.abs(output_same_prompt - output_different_prompts).max() + + # Outputs should be different here + # For some reasons, they don't show large differences + self.assertGreater(max_diff, 1e-6, "Outputs should be different for different prompts.") + + def test_fused_qkv_projections(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe = pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + image = pipe(**inputs).images + original_image_slice = image[0, -3:, -3:, -1] + + # TODO (sayakpaul): will refactor this once `fuse_qkv_projections()` has been added + # to the pipeline level. + pipe.transformer.fuse_qkv_projections() + self.assertTrue( + check_qkv_fused_layers_exist(pipe.transformer, ["to_qkv"]), + ("Something wrong with the fused attention layers. Expected all the attention projections to be fused."), + ) + + inputs = self.get_dummy_inputs(device) + image = pipe(**inputs).images + image_slice_fused = image[0, -3:, -3:, -1] + + pipe.transformer.unfuse_qkv_projections() + inputs = self.get_dummy_inputs(device) + image = pipe(**inputs).images + image_slice_disabled = image[0, -3:, -3:, -1] + + self.assertTrue( + np.allclose(original_image_slice, image_slice_fused, atol=1e-3, rtol=1e-3), + ("Fusion of QKV projections shouldn't affect the outputs."), + ) + self.assertTrue( + np.allclose(image_slice_fused, image_slice_disabled, atol=1e-3, rtol=1e-3), + ("Outputs, with QKV projection fusion enabled, shouldn't change when fused QKV projections are disabled."), + ) + self.assertTrue( + np.allclose(original_image_slice, image_slice_disabled, atol=1e-2, rtol=1e-2), + ("Original outputs should match when fused QKV projections are disabled."), + ) + + def test_flux_image_output_shape(self): + pipe = self.pipeline_class(**self.get_dummy_components()).to(torch_device) + inputs = self.get_dummy_inputs(torch_device) + + height_width_pairs = [(32, 32), (72, 57)] + for height, width in height_width_pairs: + expected_height = height - height % (pipe.vae_scale_factor * 2) + expected_width = width - width % (pipe.vae_scale_factor * 2) + + inputs.update({"height": height, "width": width}) + image = pipe(**inputs).images[0] + output_height, output_width, _ = image.shape + self.assertEqual( + (output_height, output_width), + (expected_height, expected_width), + f"Output shape {image.shape} does not match expected shape {(expected_height, expected_width)}", + ) + + def test_flux_true_cfg(self): + pipe = self.pipeline_class(**self.get_dummy_components()).to(torch_device) + inputs = self.get_dummy_inputs(torch_device) + inputs.pop("generator") + + no_true_cfg_out = pipe(**inputs, generator=torch.manual_seed(0)).images[0] + inputs["negative_prompt"] = "bad quality" + inputs["true_cfg_scale"] = 2.0 + true_cfg_out = pipe(**inputs, generator=torch.manual_seed(0)).images[0] + self.assertFalse( + np.allclose(no_true_cfg_out, true_cfg_out), "Outputs should be different when true_cfg_scale is set." + ) + + +@nightly +@require_big_accelerator +class FluxPipelineSlowTests(unittest.TestCase): + pipeline_class = FluxPipeline + repo_id = "black-forest-labs/FLUX.1-schnell" + + def setUp(self): + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def get_inputs(self, device, seed=0): + generator = torch.Generator(device="cpu").manual_seed(seed) + + prompt_embeds = torch.load( + hf_hub_download(repo_id="diffusers/test-slices", repo_type="dataset", filename="flux/prompt_embeds.pt") + ).to(torch_device) + pooled_prompt_embeds = torch.load( + hf_hub_download( + repo_id="diffusers/test-slices", repo_type="dataset", filename="flux/pooled_prompt_embeds.pt" + ) + ).to(torch_device) + return { + "prompt_embeds": prompt_embeds, + "pooled_prompt_embeds": pooled_prompt_embeds, + "num_inference_steps": 2, + "guidance_scale": 0.0, + "max_sequence_length": 256, + "output_type": "np", + "generator": generator, + } + + def test_flux_inference(self): + pipe = self.pipeline_class.from_pretrained( + self.repo_id, torch_dtype=torch.bfloat16, text_encoder=None, text_encoder_2=None + ).to(torch_device) + + inputs = self.get_inputs(torch_device) + + image = pipe(**inputs).images[0] + image_slice = image[0, :10, :10] + # fmt: off + expected_slice = np.array( + [0.3242, 0.3203, 0.3164, 0.3164, 0.3125, 0.3125, 0.3281, 0.3242, 0.3203, 0.3301, 0.3262, 0.3242, 0.3281, 0.3242, 0.3203, 0.3262, 0.3262, 0.3164, 0.3262, 0.3281, 0.3184, 0.3281, 0.3281, 0.3203, 0.3281, 0.3281, 0.3164, 0.3320, 0.3320, 0.3203], + dtype=np.float32, + ) + # fmt: on + + max_diff = numpy_cosine_similarity_distance(expected_slice.flatten(), image_slice.flatten()) + self.assertLess( + max_diff, 1e-4, f"Image slice is different from expected slice: {image_slice} != {expected_slice}" + ) + + +@slow +@require_big_accelerator +class FluxIPAdapterPipelineSlowTests(unittest.TestCase): + pipeline_class = FluxPipeline + repo_id = "black-forest-labs/FLUX.1-dev" + image_encoder_pretrained_model_name_or_path = "openai/clip-vit-large-patch14" + weight_name = "ip_adapter.safetensors" + ip_adapter_repo_id = "XLabs-AI/flux-ip-adapter" + + def setUp(self): + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def get_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device="cpu").manual_seed(seed) + + prompt_embeds = torch.load( + hf_hub_download(repo_id="diffusers/test-slices", repo_type="dataset", filename="flux/prompt_embeds.pt") + ) + pooled_prompt_embeds = torch.load( + hf_hub_download( + repo_id="diffusers/test-slices", repo_type="dataset", filename="flux/pooled_prompt_embeds.pt" + ) + ) + negative_prompt_embeds = torch.zeros_like(prompt_embeds) + negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds) + ip_adapter_image = np.zeros((1024, 1024, 3), dtype=np.uint8) + return { + "prompt_embeds": prompt_embeds, + "pooled_prompt_embeds": pooled_prompt_embeds, + "negative_prompt_embeds": negative_prompt_embeds, + "negative_pooled_prompt_embeds": negative_pooled_prompt_embeds, + "ip_adapter_image": ip_adapter_image, + "num_inference_steps": 2, + "guidance_scale": 3.5, + "true_cfg_scale": 4.0, + "max_sequence_length": 256, + "output_type": "np", + "generator": generator, + } + + def test_flux_ip_adapter_inference(self): + pipe = self.pipeline_class.from_pretrained( + self.repo_id, torch_dtype=torch.bfloat16, text_encoder=None, text_encoder_2=None + ) + pipe.load_ip_adapter( + self.ip_adapter_repo_id, + weight_name=self.weight_name, + image_encoder_pretrained_model_name_or_path=self.image_encoder_pretrained_model_name_or_path, + ) + pipe.set_ip_adapter_scale(1.0) + pipe.enable_model_cpu_offload() + + inputs = self.get_inputs(torch_device) + + image = pipe(**inputs).images[0] + image_slice = image[0, :10, :10] + + # fmt: off + expected_slice = np.array( + [0.1855, 0.1680, 0.1406, 0.1953, 0.1699, 0.1465, 0.2012, 0.1738, 0.1484, 0.2051, 0.1797, 0.1523, 0.2012, 0.1719, 0.1445, 0.2070, 0.1777, 0.1465, 0.2090, 0.1836, 0.1484, 0.2129, 0.1875, 0.1523, 0.2090, 0.1816, 0.1484, 0.2110, 0.1836, 0.1543], + dtype=np.float32, + ) + # fmt: on + + max_diff = numpy_cosine_similarity_distance(expected_slice.flatten(), image_slice.flatten()) + self.assertLess( + max_diff, 1e-4, f"Image slice is different from expected slice: {image_slice} != {expected_slice}" + ) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/flux/test_pipeline_flux_control.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/flux/test_pipeline_flux_control.py new file mode 100644 index 0000000000000000000000000000000000000000..7e966470a33631b46f52c94035d0ee3867f97016 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/flux/test_pipeline_flux_control.py @@ -0,0 +1,175 @@ +import unittest + +import numpy as np +import torch +from PIL import Image +from transformers import AutoTokenizer, CLIPTextConfig, CLIPTextModel, CLIPTokenizer, T5EncoderModel + +from diffusers import AutoencoderKL, FlowMatchEulerDiscreteScheduler, FluxControlPipeline, FluxTransformer2DModel + +from ...testing_utils import torch_device +from ..test_pipelines_common import PipelineTesterMixin, check_qkv_fused_layers_exist + + +class FluxControlPipelineFastTests(unittest.TestCase, PipelineTesterMixin): + pipeline_class = FluxControlPipeline + params = frozenset(["prompt", "height", "width", "guidance_scale", "prompt_embeds", "pooled_prompt_embeds"]) + batch_params = frozenset(["prompt"]) + + # there is no xformers processor for Flux + test_xformers_attention = False + test_layerwise_casting = True + test_group_offloading = True + + def get_dummy_components(self): + torch.manual_seed(0) + transformer = FluxTransformer2DModel( + patch_size=1, + in_channels=8, + out_channels=4, + num_layers=1, + num_single_layers=1, + attention_head_dim=16, + num_attention_heads=2, + joint_attention_dim=32, + pooled_projection_dim=32, + axes_dims_rope=[4, 4, 8], + ) + clip_text_encoder_config = CLIPTextConfig( + bos_token_id=0, + eos_token_id=2, + hidden_size=32, + intermediate_size=37, + layer_norm_eps=1e-05, + num_attention_heads=4, + num_hidden_layers=5, + pad_token_id=1, + vocab_size=1000, + hidden_act="gelu", + projection_dim=32, + ) + + torch.manual_seed(0) + text_encoder = CLIPTextModel(clip_text_encoder_config) + + torch.manual_seed(0) + text_encoder_2 = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5") + + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + tokenizer_2 = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") + + torch.manual_seed(0) + vae = AutoencoderKL( + sample_size=32, + in_channels=3, + out_channels=3, + block_out_channels=(4,), + layers_per_block=1, + latent_channels=1, + norm_num_groups=1, + use_quant_conv=False, + use_post_quant_conv=False, + shift_factor=0.0609, + scaling_factor=1.5035, + ) + + scheduler = FlowMatchEulerDiscreteScheduler() + + return { + "scheduler": scheduler, + "text_encoder": text_encoder, + "text_encoder_2": text_encoder_2, + "tokenizer": tokenizer, + "tokenizer_2": tokenizer_2, + "transformer": transformer, + "vae": vae, + } + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device="cpu").manual_seed(seed) + + control_image = Image.new("RGB", (16, 16), 0) + + inputs = { + "prompt": "A painting of a squirrel eating a burger", + "control_image": control_image, + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 5.0, + "height": 8, + "width": 8, + "max_sequence_length": 48, + "output_type": "np", + } + return inputs + + def test_flux_different_prompts(self): + pipe = self.pipeline_class(**self.get_dummy_components()).to(torch_device) + + inputs = self.get_dummy_inputs(torch_device) + output_same_prompt = pipe(**inputs).images[0] + + inputs = self.get_dummy_inputs(torch_device) + inputs["prompt_2"] = "a different prompt" + output_different_prompts = pipe(**inputs).images[0] + + max_diff = np.abs(output_same_prompt - output_different_prompts).max() + + # Outputs should be different here + # For some reasons, they don't show large differences + assert max_diff > 1e-6 + + def test_fused_qkv_projections(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe = pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + image = pipe(**inputs).images + original_image_slice = image[0, -3:, -3:, -1] + + # TODO (sayakpaul): will refactor this once `fuse_qkv_projections()` has been added + # to the pipeline level. + pipe.transformer.fuse_qkv_projections() + self.assertTrue( + check_qkv_fused_layers_exist(pipe.transformer, ["to_qkv"]), + ("Something wrong with the fused attention layers. Expected all the attention projections to be fused."), + ) + + inputs = self.get_dummy_inputs(device) + image = pipe(**inputs).images + image_slice_fused = image[0, -3:, -3:, -1] + + pipe.transformer.unfuse_qkv_projections() + inputs = self.get_dummy_inputs(device) + image = pipe(**inputs).images + image_slice_disabled = image[0, -3:, -3:, -1] + + assert np.allclose(original_image_slice, image_slice_fused, atol=1e-3, rtol=1e-3), ( + "Fusion of QKV projections shouldn't affect the outputs." + ) + assert np.allclose(image_slice_fused, image_slice_disabled, atol=1e-3, rtol=1e-3), ( + "Outputs, with QKV projection fusion enabled, shouldn't change when fused QKV projections are disabled." + ) + assert np.allclose(original_image_slice, image_slice_disabled, atol=1e-2, rtol=1e-2), ( + "Original outputs should match when fused QKV projections are disabled." + ) + + def test_flux_image_output_shape(self): + pipe = self.pipeline_class(**self.get_dummy_components()).to(torch_device) + inputs = self.get_dummy_inputs(torch_device) + + height_width_pairs = [(32, 32), (72, 57)] + for height, width in height_width_pairs: + expected_height = height - height % (pipe.vae_scale_factor * 2) + expected_width = width - width % (pipe.vae_scale_factor * 2) + + inputs.update({"height": height, "width": width}) + image = pipe(**inputs).images[0] + output_height, output_width, _ = image.shape + assert (output_height, output_width) == (expected_height, expected_width) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/flux/test_pipeline_flux_control_img2img.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/flux/test_pipeline_flux_control_img2img.py new file mode 100644 index 0000000000000000000000000000000000000000..e56136f2e91bb1408beff1d76457f0cfe2138ff6 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/flux/test_pipeline_flux_control_img2img.py @@ -0,0 +1,144 @@ +import unittest + +import numpy as np +import torch +from PIL import Image +from transformers import AutoTokenizer, CLIPTextConfig, CLIPTextModel, CLIPTokenizer, T5EncoderModel + +from diffusers import ( + AutoencoderKL, + FlowMatchEulerDiscreteScheduler, + FluxControlImg2ImgPipeline, + FluxTransformer2DModel, +) + +from ...testing_utils import enable_full_determinism, torch_device +from ..test_pipelines_common import PipelineTesterMixin + + +enable_full_determinism() + + +class FluxControlImg2ImgPipelineFastTests(unittest.TestCase, PipelineTesterMixin): + pipeline_class = FluxControlImg2ImgPipeline + params = frozenset(["prompt", "height", "width", "guidance_scale", "prompt_embeds", "pooled_prompt_embeds"]) + batch_params = frozenset(["prompt"]) + test_xformers_attention = False + + def get_dummy_components(self): + torch.manual_seed(0) + transformer = FluxTransformer2DModel( + patch_size=1, + in_channels=8, + out_channels=4, + num_layers=1, + num_single_layers=1, + attention_head_dim=16, + num_attention_heads=2, + joint_attention_dim=32, + pooled_projection_dim=32, + axes_dims_rope=[4, 4, 8], + ) + clip_text_encoder_config = CLIPTextConfig( + bos_token_id=0, + eos_token_id=2, + hidden_size=32, + intermediate_size=37, + layer_norm_eps=1e-05, + num_attention_heads=4, + num_hidden_layers=5, + pad_token_id=1, + vocab_size=1000, + hidden_act="gelu", + projection_dim=32, + ) + + torch.manual_seed(0) + text_encoder = CLIPTextModel(clip_text_encoder_config) + + torch.manual_seed(0) + text_encoder_2 = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5") + + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + tokenizer_2 = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") + + torch.manual_seed(0) + vae = AutoencoderKL( + sample_size=32, + in_channels=3, + out_channels=3, + block_out_channels=(4,), + layers_per_block=1, + latent_channels=1, + norm_num_groups=1, + use_quant_conv=False, + use_post_quant_conv=False, + shift_factor=0.0609, + scaling_factor=1.5035, + ) + + scheduler = FlowMatchEulerDiscreteScheduler() + + return { + "scheduler": scheduler, + "text_encoder": text_encoder, + "text_encoder_2": text_encoder_2, + "tokenizer": tokenizer, + "tokenizer_2": tokenizer_2, + "transformer": transformer, + "vae": vae, + } + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device="cpu").manual_seed(seed) + + image = Image.new("RGB", (16, 16), 0) + control_image = Image.new("RGB", (16, 16), 0) + + inputs = { + "prompt": "A painting of a squirrel eating a burger", + "image": image, + "control_image": control_image, + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 5.0, + "height": 8, + "width": 8, + "max_sequence_length": 48, + "strength": 0.8, + "output_type": "np", + } + return inputs + + def test_flux_different_prompts(self): + pipe = self.pipeline_class(**self.get_dummy_components()).to(torch_device) + + inputs = self.get_dummy_inputs(torch_device) + output_same_prompt = pipe(**inputs).images[0] + + inputs = self.get_dummy_inputs(torch_device) + inputs["prompt_2"] = "a different prompt" + output_different_prompts = pipe(**inputs).images[0] + + max_diff = np.abs(output_same_prompt - output_different_prompts).max() + + # Outputs should be different here + # For some reasons, they don't show large differences + assert max_diff > 1e-6 + + def test_flux_image_output_shape(self): + pipe = self.pipeline_class(**self.get_dummy_components()).to(torch_device) + inputs = self.get_dummy_inputs(torch_device) + + height_width_pairs = [(32, 32), (72, 57)] + for height, width in height_width_pairs: + expected_height = height - height % (pipe.vae_scale_factor * 2) + expected_width = width - width % (pipe.vae_scale_factor * 2) + + inputs.update({"height": height, "width": width}) + image = pipe(**inputs).images[0] + output_height, output_width, _ = image.shape + assert (output_height, output_width) == (expected_height, expected_width) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/flux/test_pipeline_flux_control_inpaint.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/flux/test_pipeline_flux_control_inpaint.py new file mode 100644 index 0000000000000000000000000000000000000000..e42c5fc2aab538a8d33ba3c33667220c581d9e62 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/flux/test_pipeline_flux_control_inpaint.py @@ -0,0 +1,169 @@ +import unittest + +import numpy as np +import torch +from PIL import Image +from transformers import AutoTokenizer, CLIPTextConfig, CLIPTextModel, CLIPTokenizer, T5EncoderModel + +from diffusers import ( + AutoencoderKL, + FlowMatchEulerDiscreteScheduler, + FluxControlInpaintPipeline, + FluxTransformer2DModel, +) + +from ...testing_utils import ( + torch_device, +) +from ..test_pipelines_common import PipelineTesterMixin, check_qkv_fused_layers_exist + + +class FluxControlInpaintPipelineFastTests(unittest.TestCase, PipelineTesterMixin): + pipeline_class = FluxControlInpaintPipeline + params = frozenset(["prompt", "height", "width", "guidance_scale", "prompt_embeds", "pooled_prompt_embeds"]) + batch_params = frozenset(["prompt"]) + + # there is no xformers processor for Flux + test_xformers_attention = False + + def get_dummy_components(self): + torch.manual_seed(0) + transformer = FluxTransformer2DModel( + patch_size=1, + in_channels=8, + out_channels=4, + num_layers=1, + num_single_layers=1, + attention_head_dim=16, + num_attention_heads=2, + joint_attention_dim=32, + pooled_projection_dim=32, + axes_dims_rope=[4, 4, 8], + ) + clip_text_encoder_config = CLIPTextConfig( + bos_token_id=0, + eos_token_id=2, + hidden_size=32, + intermediate_size=37, + layer_norm_eps=1e-05, + num_attention_heads=4, + num_hidden_layers=5, + pad_token_id=1, + vocab_size=1000, + hidden_act="gelu", + projection_dim=32, + ) + + torch.manual_seed(0) + text_encoder = CLIPTextModel(clip_text_encoder_config) + + torch.manual_seed(0) + text_encoder_2 = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5") + + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + tokenizer_2 = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") + + torch.manual_seed(0) + vae = AutoencoderKL( + sample_size=32, + in_channels=3, + out_channels=3, + block_out_channels=(4,), + layers_per_block=1, + latent_channels=1, + norm_num_groups=1, + use_quant_conv=False, + use_post_quant_conv=False, + shift_factor=0.0609, + scaling_factor=1.5035, + ) + + scheduler = FlowMatchEulerDiscreteScheduler() + + return { + "scheduler": scheduler, + "text_encoder": text_encoder, + "text_encoder_2": text_encoder_2, + "tokenizer": tokenizer, + "tokenizer_2": tokenizer_2, + "transformer": transformer, + "vae": vae, + } + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device="cpu").manual_seed(seed) + + image = Image.new("RGB", (8, 8), 0) + control_image = Image.new("RGB", (8, 8), 0) + mask_image = Image.new("RGB", (8, 8), 255) + + inputs = { + "prompt": "A painting of a squirrel eating a burger", + "control_image": control_image, + "generator": generator, + "image": image, + "mask_image": mask_image, + "strength": 0.8, + "num_inference_steps": 2, + "guidance_scale": 30.0, + "height": 8, + "width": 8, + "max_sequence_length": 48, + "output_type": "np", + } + return inputs + + def test_fused_qkv_projections(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe = pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + image = pipe(**inputs).images + original_image_slice = image[0, -3:, -3:, -1] + + # TODO (sayakpaul): will refactor this once `fuse_qkv_projections()` has been added + # to the pipeline level. + pipe.transformer.fuse_qkv_projections() + self.assertTrue( + check_qkv_fused_layers_exist(pipe.transformer, ["to_qkv"]), + ("Something wrong with the fused attention layers. Expected all the attention projections to be fused."), + ) + + inputs = self.get_dummy_inputs(device) + image = pipe(**inputs).images + image_slice_fused = image[0, -3:, -3:, -1] + + pipe.transformer.unfuse_qkv_projections() + inputs = self.get_dummy_inputs(device) + image = pipe(**inputs).images + image_slice_disabled = image[0, -3:, -3:, -1] + + assert np.allclose(original_image_slice, image_slice_fused, atol=1e-3, rtol=1e-3), ( + "Fusion of QKV projections shouldn't affect the outputs." + ) + assert np.allclose(image_slice_fused, image_slice_disabled, atol=1e-3, rtol=1e-3), ( + "Outputs, with QKV projection fusion enabled, shouldn't change when fused QKV projections are disabled." + ) + assert np.allclose(original_image_slice, image_slice_disabled, atol=1e-2, rtol=1e-2), ( + "Original outputs should match when fused QKV projections are disabled." + ) + + def test_flux_image_output_shape(self): + pipe = self.pipeline_class(**self.get_dummy_components()).to(torch_device) + inputs = self.get_dummy_inputs(torch_device) + + height_width_pairs = [(32, 32), (72, 57)] + for height, width in height_width_pairs: + expected_height = height - height % (pipe.vae_scale_factor * 2) + expected_width = width - width % (pipe.vae_scale_factor * 2) + + inputs.update({"height": height, "width": width}) + image = pipe(**inputs).images[0] + output_height, output_width, _ = image.shape + assert (output_height, output_width) == (expected_height, expected_width) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/flux/test_pipeline_flux_fill.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/flux/test_pipeline_flux_fill.py new file mode 100644 index 0000000000000000000000000000000000000000..25a4a3354820ca43cab1d21335925a0c233f52b9 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/flux/test_pipeline_flux_fill.py @@ -0,0 +1,146 @@ +import random +import unittest + +import numpy as np +import torch +from transformers import AutoTokenizer, CLIPTextConfig, CLIPTextModel, CLIPTokenizer, T5EncoderModel + +from diffusers import AutoencoderKL, FlowMatchEulerDiscreteScheduler, FluxFillPipeline, FluxTransformer2DModel + +from ...testing_utils import ( + enable_full_determinism, + floats_tensor, + torch_device, +) +from ..test_pipelines_common import PipelineTesterMixin + + +enable_full_determinism() + + +class FluxFillPipelineFastTests(unittest.TestCase, PipelineTesterMixin): + pipeline_class = FluxFillPipeline + params = frozenset(["prompt", "height", "width", "guidance_scale", "prompt_embeds", "pooled_prompt_embeds"]) + batch_params = frozenset(["prompt"]) + test_xformers_attention = False + test_layerwise_casting = True + test_group_offloading = True + + def get_dummy_components(self): + torch.manual_seed(0) + transformer = FluxTransformer2DModel( + patch_size=1, + in_channels=20, + out_channels=8, + num_layers=1, + num_single_layers=1, + attention_head_dim=16, + num_attention_heads=2, + joint_attention_dim=32, + pooled_projection_dim=32, + axes_dims_rope=[4, 4, 8], + ) + clip_text_encoder_config = CLIPTextConfig( + bos_token_id=0, + eos_token_id=2, + hidden_size=32, + intermediate_size=37, + layer_norm_eps=1e-05, + num_attention_heads=4, + num_hidden_layers=5, + pad_token_id=1, + vocab_size=1000, + hidden_act="gelu", + projection_dim=32, + ) + + torch.manual_seed(0) + text_encoder = CLIPTextModel(clip_text_encoder_config) + + torch.manual_seed(0) + text_encoder_2 = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5") + + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + tokenizer_2 = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") + + torch.manual_seed(0) + vae = AutoencoderKL( + sample_size=32, + in_channels=3, + out_channels=3, + block_out_channels=(4,), + layers_per_block=1, + latent_channels=2, + norm_num_groups=1, + use_quant_conv=False, + use_post_quant_conv=False, + shift_factor=0.0609, + scaling_factor=1.5035, + ) + + scheduler = FlowMatchEulerDiscreteScheduler() + + return { + "scheduler": scheduler, + "text_encoder": text_encoder, + "text_encoder_2": text_encoder_2, + "tokenizer": tokenizer, + "tokenizer_2": tokenizer_2, + "transformer": transformer, + "vae": vae, + } + + def get_dummy_inputs(self, device, seed=0): + image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) + mask_image = torch.ones((1, 1, 32, 32)).to(device) + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device="cpu").manual_seed(seed) + + inputs = { + "prompt": "A painting of a squirrel eating a burger", + "image": image, + "mask_image": mask_image, + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 5.0, + "height": 32, + "width": 32, + "max_sequence_length": 48, + "output_type": "np", + } + return inputs + + def test_flux_fill_different_prompts(self): + pipe = self.pipeline_class(**self.get_dummy_components()).to(torch_device) + + inputs = self.get_dummy_inputs(torch_device) + output_same_prompt = pipe(**inputs).images[0] + + inputs = self.get_dummy_inputs(torch_device) + inputs["prompt_2"] = "a different prompt" + output_different_prompts = pipe(**inputs).images[0] + + max_diff = np.abs(output_same_prompt - output_different_prompts).max() + + # Outputs should be different here + # For some reasons, they don't show large differences + assert max_diff > 1e-6 + + def test_flux_image_output_shape(self): + pipe = self.pipeline_class(**self.get_dummy_components()).to(torch_device) + inputs = self.get_dummy_inputs(torch_device) + + height_width_pairs = [(32, 32), (72, 57)] + for height, width in height_width_pairs: + expected_height = height - height % (pipe.vae_scale_factor * 2) + expected_width = width - width % (pipe.vae_scale_factor * 2) + + inputs.update({"height": height, "width": width}) + image = pipe(**inputs).images[0] + output_height, output_width, _ = image.shape + assert (output_height, output_width) == (expected_height, expected_width) + + def test_inference_batch_single_identical(self): + self._test_inference_batch_single_identical(expected_max_diff=1e-3) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/flux/test_pipeline_flux_img2img.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/flux/test_pipeline_flux_img2img.py new file mode 100644 index 0000000000000000000000000000000000000000..6f435760aef59d2def7319984f700193d57ef03c --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/flux/test_pipeline_flux_img2img.py @@ -0,0 +1,141 @@ +import random +import unittest + +import numpy as np +import torch +from transformers import AutoTokenizer, CLIPTextConfig, CLIPTextModel, CLIPTokenizer, T5EncoderModel + +from diffusers import AutoencoderKL, FlowMatchEulerDiscreteScheduler, FluxImg2ImgPipeline, FluxTransformer2DModel + +from ...testing_utils import ( + enable_full_determinism, + floats_tensor, + torch_device, +) +from ..test_pipelines_common import FluxIPAdapterTesterMixin, PipelineTesterMixin + + +enable_full_determinism() + + +class FluxImg2ImgPipelineFastTests(unittest.TestCase, PipelineTesterMixin, FluxIPAdapterTesterMixin): + pipeline_class = FluxImg2ImgPipeline + params = frozenset(["prompt", "height", "width", "guidance_scale", "prompt_embeds", "pooled_prompt_embeds"]) + batch_params = frozenset(["prompt"]) + test_xformers_attention = False + + def get_dummy_components(self): + torch.manual_seed(0) + transformer = FluxTransformer2DModel( + patch_size=1, + in_channels=4, + num_layers=1, + num_single_layers=1, + attention_head_dim=16, + num_attention_heads=2, + joint_attention_dim=32, + pooled_projection_dim=32, + axes_dims_rope=[4, 4, 8], + ) + clip_text_encoder_config = CLIPTextConfig( + bos_token_id=0, + eos_token_id=2, + hidden_size=32, + intermediate_size=37, + layer_norm_eps=1e-05, + num_attention_heads=4, + num_hidden_layers=5, + pad_token_id=1, + vocab_size=1000, + hidden_act="gelu", + projection_dim=32, + ) + + torch.manual_seed(0) + text_encoder = CLIPTextModel(clip_text_encoder_config) + + torch.manual_seed(0) + text_encoder_2 = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5") + + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + tokenizer_2 = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") + + torch.manual_seed(0) + vae = AutoencoderKL( + sample_size=32, + in_channels=3, + out_channels=3, + block_out_channels=(4,), + layers_per_block=1, + latent_channels=1, + norm_num_groups=1, + use_quant_conv=False, + use_post_quant_conv=False, + shift_factor=0.0609, + scaling_factor=1.5035, + ) + + scheduler = FlowMatchEulerDiscreteScheduler() + + return { + "scheduler": scheduler, + "text_encoder": text_encoder, + "text_encoder_2": text_encoder_2, + "tokenizer": tokenizer, + "tokenizer_2": tokenizer_2, + "transformer": transformer, + "vae": vae, + "image_encoder": None, + "feature_extractor": None, + } + + def get_dummy_inputs(self, device, seed=0): + image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device="cpu").manual_seed(seed) + + inputs = { + "prompt": "A painting of a squirrel eating a burger", + "image": image, + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 5.0, + "height": 8, + "width": 8, + "max_sequence_length": 48, + "strength": 0.8, + "output_type": "np", + } + return inputs + + def test_flux_different_prompts(self): + pipe = self.pipeline_class(**self.get_dummy_components()).to(torch_device) + + inputs = self.get_dummy_inputs(torch_device) + output_same_prompt = pipe(**inputs).images[0] + + inputs = self.get_dummy_inputs(torch_device) + inputs["prompt_2"] = "a different prompt" + output_different_prompts = pipe(**inputs).images[0] + + max_diff = np.abs(output_same_prompt - output_different_prompts).max() + + # Outputs should be different here + # For some reasons, they don't show large differences + assert max_diff > 1e-6 + + def test_flux_image_output_shape(self): + pipe = self.pipeline_class(**self.get_dummy_components()).to(torch_device) + inputs = self.get_dummy_inputs(torch_device) + + height_width_pairs = [(32, 32), (72, 57)] + for height, width in height_width_pairs: + expected_height = height - height % (pipe.vae_scale_factor * 2) + expected_width = width - width % (pipe.vae_scale_factor * 2) + + inputs.update({"height": height, "width": width}) + image = pipe(**inputs).images[0] + output_height, output_width, _ = image.shape + assert (output_height, output_width) == (expected_height, expected_width) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/flux/test_pipeline_flux_inpaint.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/flux/test_pipeline_flux_inpaint.py new file mode 100644 index 0000000000000000000000000000000000000000..6324ff236e10b87af71bca6f381df1674624bb3a --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/flux/test_pipeline_flux_inpaint.py @@ -0,0 +1,143 @@ +import random +import unittest + +import numpy as np +import torch +from transformers import AutoTokenizer, CLIPTextConfig, CLIPTextModel, CLIPTokenizer, T5EncoderModel + +from diffusers import AutoencoderKL, FlowMatchEulerDiscreteScheduler, FluxInpaintPipeline, FluxTransformer2DModel + +from ...testing_utils import ( + enable_full_determinism, + floats_tensor, + torch_device, +) +from ..test_pipelines_common import FluxIPAdapterTesterMixin, PipelineTesterMixin + + +enable_full_determinism() + + +class FluxInpaintPipelineFastTests(unittest.TestCase, PipelineTesterMixin, FluxIPAdapterTesterMixin): + pipeline_class = FluxInpaintPipeline + params = frozenset(["prompt", "height", "width", "guidance_scale", "prompt_embeds", "pooled_prompt_embeds"]) + batch_params = frozenset(["prompt"]) + test_xformers_attention = False + + def get_dummy_components(self): + torch.manual_seed(0) + transformer = FluxTransformer2DModel( + patch_size=1, + in_channels=8, + num_layers=1, + num_single_layers=1, + attention_head_dim=16, + num_attention_heads=2, + joint_attention_dim=32, + pooled_projection_dim=32, + axes_dims_rope=[4, 4, 8], + ) + clip_text_encoder_config = CLIPTextConfig( + bos_token_id=0, + eos_token_id=2, + hidden_size=32, + intermediate_size=37, + layer_norm_eps=1e-05, + num_attention_heads=4, + num_hidden_layers=5, + pad_token_id=1, + vocab_size=1000, + hidden_act="gelu", + projection_dim=32, + ) + + torch.manual_seed(0) + text_encoder = CLIPTextModel(clip_text_encoder_config) + + torch.manual_seed(0) + text_encoder_2 = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5") + + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + tokenizer_2 = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") + + torch.manual_seed(0) + vae = AutoencoderKL( + sample_size=32, + in_channels=3, + out_channels=3, + block_out_channels=(4,), + layers_per_block=1, + latent_channels=2, + norm_num_groups=1, + use_quant_conv=False, + use_post_quant_conv=False, + shift_factor=0.0609, + scaling_factor=1.5035, + ) + + scheduler = FlowMatchEulerDiscreteScheduler() + + return { + "scheduler": scheduler, + "text_encoder": text_encoder, + "text_encoder_2": text_encoder_2, + "tokenizer": tokenizer, + "tokenizer_2": tokenizer_2, + "transformer": transformer, + "vae": vae, + "image_encoder": None, + "feature_extractor": None, + } + + def get_dummy_inputs(self, device, seed=0): + image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) + mask_image = torch.ones((1, 1, 32, 32)).to(device) + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device="cpu").manual_seed(seed) + + inputs = { + "prompt": "A painting of a squirrel eating a burger", + "image": image, + "mask_image": mask_image, + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 5.0, + "height": 32, + "width": 32, + "max_sequence_length": 48, + "strength": 0.8, + "output_type": "np", + } + return inputs + + def test_flux_inpaint_different_prompts(self): + pipe = self.pipeline_class(**self.get_dummy_components()).to(torch_device) + + inputs = self.get_dummy_inputs(torch_device) + output_same_prompt = pipe(**inputs).images[0] + + inputs = self.get_dummy_inputs(torch_device) + inputs["prompt_2"] = "a different prompt" + output_different_prompts = pipe(**inputs).images[0] + + max_diff = np.abs(output_same_prompt - output_different_prompts).max() + + # Outputs should be different here + # For some reasons, they don't show large differences + assert max_diff > 1e-6 + + def test_flux_image_output_shape(self): + pipe = self.pipeline_class(**self.get_dummy_components()).to(torch_device) + inputs = self.get_dummy_inputs(torch_device) + + height_width_pairs = [(32, 32), (72, 57)] + for height, width in height_width_pairs: + expected_height = height - height % (pipe.vae_scale_factor * 2) + expected_width = width - width % (pipe.vae_scale_factor * 2) + + inputs.update({"height": height, "width": width}) + image = pipe(**inputs).images[0] + output_height, output_width, _ = image.shape + assert (output_height, output_width) == (expected_height, expected_width) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/flux/test_pipeline_flux_kontext.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/flux/test_pipeline_flux_kontext.py new file mode 100644 index 0000000000000000000000000000000000000000..5c78964ea54f24d4c57bb19616ef65e9b49d9b67 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/flux/test_pipeline_flux_kontext.py @@ -0,0 +1,177 @@ +import unittest + +import numpy as np +import PIL.Image +import torch +from transformers import AutoTokenizer, CLIPTextConfig, CLIPTextModel, CLIPTokenizer, T5EncoderModel + +from diffusers import ( + AutoencoderKL, + FasterCacheConfig, + FlowMatchEulerDiscreteScheduler, + FluxKontextPipeline, + FluxTransformer2DModel, +) + +from ...testing_utils import torch_device +from ..test_pipelines_common import ( + FasterCacheTesterMixin, + FluxIPAdapterTesterMixin, + PipelineTesterMixin, + PyramidAttentionBroadcastTesterMixin, +) + + +class FluxKontextPipelineFastTests( + unittest.TestCase, + PipelineTesterMixin, + FluxIPAdapterTesterMixin, + PyramidAttentionBroadcastTesterMixin, + FasterCacheTesterMixin, +): + pipeline_class = FluxKontextPipeline + params = frozenset( + ["image", "prompt", "height", "width", "guidance_scale", "prompt_embeds", "pooled_prompt_embeds"] + ) + batch_params = frozenset(["image", "prompt"]) + + # there is no xformers processor for Flux + test_xformers_attention = False + test_layerwise_casting = True + test_group_offloading = True + + faster_cache_config = FasterCacheConfig( + spatial_attention_block_skip_range=2, + spatial_attention_timestep_skip_range=(-1, 901), + unconditional_batch_skip_range=2, + attention_weight_callback=lambda _: 0.5, + is_guidance_distilled=True, + ) + + def get_dummy_components(self, num_layers: int = 1, num_single_layers: int = 1): + torch.manual_seed(0) + transformer = FluxTransformer2DModel( + patch_size=1, + in_channels=4, + num_layers=num_layers, + num_single_layers=num_single_layers, + attention_head_dim=16, + num_attention_heads=2, + joint_attention_dim=32, + pooled_projection_dim=32, + axes_dims_rope=[4, 4, 8], + ) + clip_text_encoder_config = CLIPTextConfig( + bos_token_id=0, + eos_token_id=2, + hidden_size=32, + intermediate_size=37, + layer_norm_eps=1e-05, + num_attention_heads=4, + num_hidden_layers=5, + pad_token_id=1, + vocab_size=1000, + hidden_act="gelu", + projection_dim=32, + ) + + torch.manual_seed(0) + text_encoder = CLIPTextModel(clip_text_encoder_config) + + torch.manual_seed(0) + text_encoder_2 = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5") + + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + tokenizer_2 = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") + + torch.manual_seed(0) + vae = AutoencoderKL( + sample_size=32, + in_channels=3, + out_channels=3, + block_out_channels=(4,), + layers_per_block=1, + latent_channels=1, + norm_num_groups=1, + use_quant_conv=False, + use_post_quant_conv=False, + shift_factor=0.0609, + scaling_factor=1.5035, + ) + + scheduler = FlowMatchEulerDiscreteScheduler() + + return { + "scheduler": scheduler, + "text_encoder": text_encoder, + "text_encoder_2": text_encoder_2, + "tokenizer": tokenizer, + "tokenizer_2": tokenizer_2, + "transformer": transformer, + "vae": vae, + "image_encoder": None, + "feature_extractor": None, + } + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device="cpu").manual_seed(seed) + + image = PIL.Image.new("RGB", (32, 32), 0) + inputs = { + "image": image, + "prompt": "A painting of a squirrel eating a burger", + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 5.0, + "height": 8, + "width": 8, + "max_area": 8 * 8, + "max_sequence_length": 48, + "output_type": "np", + "_auto_resize": False, + } + return inputs + + def test_flux_different_prompts(self): + pipe = self.pipeline_class(**self.get_dummy_components()).to(torch_device) + + inputs = self.get_dummy_inputs(torch_device) + output_same_prompt = pipe(**inputs).images[0] + + inputs = self.get_dummy_inputs(torch_device) + inputs["prompt_2"] = "a different prompt" + output_different_prompts = pipe(**inputs).images[0] + + max_diff = np.abs(output_same_prompt - output_different_prompts).max() + + # Outputs should be different here + # For some reasons, they don't show large differences + assert max_diff > 1e-6 + + def test_flux_image_output_shape(self): + pipe = self.pipeline_class(**self.get_dummy_components()).to(torch_device) + inputs = self.get_dummy_inputs(torch_device) + + height_width_pairs = [(32, 32), (72, 57)] + for height, width in height_width_pairs: + expected_height = height - height % (pipe.vae_scale_factor * 2) + expected_width = width - width % (pipe.vae_scale_factor * 2) + + inputs.update({"height": height, "width": width, "max_area": height * width}) + image = pipe(**inputs).images[0] + output_height, output_width, _ = image.shape + assert (output_height, output_width) == (expected_height, expected_width) + + def test_flux_true_cfg(self): + pipe = self.pipeline_class(**self.get_dummy_components()).to(torch_device) + inputs = self.get_dummy_inputs(torch_device) + inputs.pop("generator") + + no_true_cfg_out = pipe(**inputs, generator=torch.manual_seed(0)).images[0] + inputs["negative_prompt"] = "bad quality" + inputs["true_cfg_scale"] = 2.0 + true_cfg_out = pipe(**inputs, generator=torch.manual_seed(0)).images[0] + assert not np.allclose(no_true_cfg_out, true_cfg_out) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/flux/test_pipeline_flux_kontext_inpaint.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/flux/test_pipeline_flux_kontext_inpaint.py new file mode 100644 index 0000000000000000000000000000000000000000..9a2e32056dcb2fd4c20593329d72b4b20e5f6248 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/flux/test_pipeline_flux_kontext_inpaint.py @@ -0,0 +1,190 @@ +import random +import unittest + +import numpy as np +import torch +from transformers import AutoTokenizer, CLIPTextConfig, CLIPTextModel, CLIPTokenizer, T5EncoderModel + +from diffusers import ( + AutoencoderKL, + FasterCacheConfig, + FlowMatchEulerDiscreteScheduler, + FluxKontextInpaintPipeline, + FluxTransformer2DModel, +) + +from ...testing_utils import floats_tensor, torch_device +from ..test_pipelines_common import ( + FasterCacheTesterMixin, + FluxIPAdapterTesterMixin, + PipelineTesterMixin, + PyramidAttentionBroadcastTesterMixin, +) + + +class FluxKontextInpaintPipelineFastTests( + unittest.TestCase, + PipelineTesterMixin, + FluxIPAdapterTesterMixin, + PyramidAttentionBroadcastTesterMixin, + FasterCacheTesterMixin, +): + pipeline_class = FluxKontextInpaintPipeline + params = frozenset( + ["image", "prompt", "height", "width", "guidance_scale", "prompt_embeds", "pooled_prompt_embeds"] + ) + batch_params = frozenset(["image", "prompt"]) + + # there is no xformers processor for Flux + test_xformers_attention = False + test_layerwise_casting = True + test_group_offloading = True + + faster_cache_config = FasterCacheConfig( + spatial_attention_block_skip_range=2, + spatial_attention_timestep_skip_range=(-1, 901), + unconditional_batch_skip_range=2, + attention_weight_callback=lambda _: 0.5, + is_guidance_distilled=True, + ) + + def get_dummy_components(self, num_layers: int = 1, num_single_layers: int = 1): + torch.manual_seed(0) + transformer = FluxTransformer2DModel( + patch_size=1, + in_channels=4, + num_layers=num_layers, + num_single_layers=num_single_layers, + attention_head_dim=16, + num_attention_heads=2, + joint_attention_dim=32, + pooled_projection_dim=32, + axes_dims_rope=[4, 4, 8], + ) + clip_text_encoder_config = CLIPTextConfig( + bos_token_id=0, + eos_token_id=2, + hidden_size=32, + intermediate_size=37, + layer_norm_eps=1e-05, + num_attention_heads=4, + num_hidden_layers=5, + pad_token_id=1, + vocab_size=1000, + hidden_act="gelu", + projection_dim=32, + ) + + torch.manual_seed(0) + text_encoder = CLIPTextModel(clip_text_encoder_config) + + torch.manual_seed(0) + text_encoder_2 = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5") + + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + tokenizer_2 = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") + + torch.manual_seed(0) + vae = AutoencoderKL( + sample_size=32, + in_channels=3, + out_channels=3, + block_out_channels=(4,), + layers_per_block=1, + latent_channels=1, + norm_num_groups=1, + use_quant_conv=False, + use_post_quant_conv=False, + shift_factor=0.0609, + scaling_factor=1.5035, + ) + + scheduler = FlowMatchEulerDiscreteScheduler() + + return { + "scheduler": scheduler, + "text_encoder": text_encoder, + "text_encoder_2": text_encoder_2, + "tokenizer": tokenizer, + "tokenizer_2": tokenizer_2, + "transformer": transformer, + "vae": vae, + "image_encoder": None, + "feature_extractor": None, + } + + def get_dummy_inputs(self, device, seed=0): + image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) + mask_image = torch.ones((1, 1, 32, 32)).to(device) + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device="cpu").manual_seed(seed) + + inputs = { + "prompt": "A painting of a squirrel eating a burger", + "image": image, + "mask_image": mask_image, + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 5.0, + "height": 32, + "width": 32, + "max_sequence_length": 48, + "strength": 0.8, + "output_type": "np", + "_auto_resize": False, + } + return inputs + + def test_flux_inpaint_different_prompts(self): + pipe = self.pipeline_class(**self.get_dummy_components()).to(torch_device) + + inputs = self.get_dummy_inputs(torch_device) + output_same_prompt = pipe(**inputs).images[0] + + inputs = self.get_dummy_inputs(torch_device) + inputs["prompt_2"] = "a different prompt" + output_different_prompts = pipe(**inputs).images[0] + + max_diff = np.abs(output_same_prompt - output_different_prompts).max() + + # Outputs should be different here + # For some reasons, they don't show large differences + assert max_diff > 1e-6 + + def test_flux_image_output_shape(self): + pipe = self.pipeline_class(**self.get_dummy_components()).to(torch_device) + inputs = self.get_dummy_inputs(torch_device) + + height_width_pairs = [(32, 32), (72, 56)] + for height, width in height_width_pairs: + expected_height = height - height % (pipe.vae_scale_factor * 2) + expected_width = width - width % (pipe.vae_scale_factor * 2) + # Because output shape is the same as the input shape, we need to create a dummy image and mask image + image = floats_tensor((1, 3, height, width), rng=random.Random(0)).to(torch_device) + mask_image = torch.ones((1, 1, height, width)).to(torch_device) + + inputs.update( + { + "height": height, + "width": width, + "max_area": height * width, + "image": image, + "mask_image": mask_image, + } + ) + image = pipe(**inputs).images[0] + output_height, output_width, _ = image.shape + assert (output_height, output_width) == (expected_height, expected_width) + + def test_flux_true_cfg(self): + pipe = self.pipeline_class(**self.get_dummy_components()).to(torch_device) + inputs = self.get_dummy_inputs(torch_device) + inputs.pop("generator") + + no_true_cfg_out = pipe(**inputs, generator=torch.manual_seed(0)).images[0] + inputs["negative_prompt"] = "bad quality" + inputs["true_cfg_scale"] = 2.0 + true_cfg_out = pipe(**inputs, generator=torch.manual_seed(0)).images[0] + assert not np.allclose(no_true_cfg_out, true_cfg_out) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/flux/test_pipeline_flux_redux.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/flux/test_pipeline_flux_redux.py new file mode 100644 index 0000000000000000000000000000000000000000..bbeee28e6a62a375310640e52221aaebf7feb25c --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/flux/test_pipeline_flux_redux.py @@ -0,0 +1,150 @@ +import gc +import unittest + +import numpy as np +import torch + +from diffusers import FluxPipeline, FluxPriorReduxPipeline +from diffusers.utils import load_image + +from ...testing_utils import ( + Expectations, + backend_empty_cache, + numpy_cosine_similarity_distance, + require_big_accelerator, + slow, + torch_device, +) + + +@slow +@require_big_accelerator +class FluxReduxSlowTests(unittest.TestCase): + pipeline_class = FluxPriorReduxPipeline + repo_id = "black-forest-labs/FLUX.1-Redux-dev" + base_pipeline_class = FluxPipeline + base_repo_id = "black-forest-labs/FLUX.1-schnell" + + def setUp(self): + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def get_inputs(self, device, seed=0): + init_image = load_image( + "https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/style_ziggy/img5.png" + ) + return {"image": init_image} + + def get_base_pipeline_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device="cpu").manual_seed(seed) + + return { + "num_inference_steps": 2, + "guidance_scale": 2.0, + "output_type": "np", + "generator": generator, + } + + def test_flux_redux_inference(self): + pipe_redux = self.pipeline_class.from_pretrained(self.repo_id, torch_dtype=torch.bfloat16) + pipe_base = self.base_pipeline_class.from_pretrained( + self.base_repo_id, torch_dtype=torch.bfloat16, text_encoder=None, text_encoder_2=None + ) + pipe_redux.to(torch_device) + pipe_base.enable_model_cpu_offload(device=torch_device) + + inputs = self.get_inputs(torch_device) + base_pipeline_inputs = self.get_base_pipeline_inputs(torch_device) + + redux_pipeline_output = pipe_redux(**inputs) + image = pipe_base(**base_pipeline_inputs, **redux_pipeline_output).images[0] + + image_slice = image[0, :10, :10] + expected_slices = Expectations( + { + ("cuda", 7): np.array( + [ + 0.30078125, + 0.37890625, + 0.46875, + 0.28125, + 0.36914062, + 0.47851562, + 0.28515625, + 0.375, + 0.4765625, + 0.28125, + 0.375, + 0.48046875, + 0.27929688, + 0.37695312, + 0.47851562, + 0.27734375, + 0.38085938, + 0.4765625, + 0.2734375, + 0.38085938, + 0.47265625, + 0.27539062, + 0.37890625, + 0.47265625, + 0.27734375, + 0.37695312, + 0.47070312, + 0.27929688, + 0.37890625, + 0.47460938, + ], + dtype=np.float32, + ), + ("xpu", 3): np.array( + [ + 0.20507812, + 0.30859375, + 0.3984375, + 0.18554688, + 0.30078125, + 0.41015625, + 0.19921875, + 0.3125, + 0.40625, + 0.19726562, + 0.3125, + 0.41601562, + 0.19335938, + 0.31445312, + 0.4140625, + 0.1953125, + 0.3203125, + 0.41796875, + 0.19726562, + 0.32421875, + 0.41992188, + 0.19726562, + 0.32421875, + 0.41992188, + 0.20117188, + 0.32421875, + 0.41796875, + 0.203125, + 0.32617188, + 0.41796875, + ], + dtype=np.float32, + ), + } + ) + expected_slice = expected_slices.get_expectation() + + max_diff = numpy_cosine_similarity_distance(expected_slice.flatten(), image_slice.flatten()) + + assert max_diff < 1e-4 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/hidream_image/__init__.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/hidream_image/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/hidream_image/test_pipeline_hidream.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/hidream_image/test_pipeline_hidream.py new file mode 100644 index 0000000000000000000000000000000000000000..ec8d36e1d35519ba9588e9614dd15ddfb2e594af --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/hidream_image/test_pipeline_hidream.py @@ -0,0 +1,160 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import numpy as np +import torch +from transformers import ( + AutoTokenizer, + CLIPTextConfig, + CLIPTextModelWithProjection, + CLIPTokenizer, + LlamaForCausalLM, + T5EncoderModel, +) + +from diffusers import ( + AutoencoderKL, + FlowMatchEulerDiscreteScheduler, + HiDreamImagePipeline, + HiDreamImageTransformer2DModel, +) + +from ...testing_utils import enable_full_determinism +from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS +from ..test_pipelines_common import PipelineTesterMixin + + +enable_full_determinism() + + +class HiDreamImagePipelineFastTests(PipelineTesterMixin, unittest.TestCase): + pipeline_class = HiDreamImagePipeline + params = TEXT_TO_IMAGE_PARAMS - {"cross_attention_kwargs", "prompt_embeds", "negative_prompt_embeds"} + batch_params = TEXT_TO_IMAGE_BATCH_PARAMS + image_params = TEXT_TO_IMAGE_IMAGE_PARAMS + image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS + + required_optional_params = PipelineTesterMixin.required_optional_params + test_layerwise_casting = True + supports_dduf = False + + def get_dummy_components(self): + torch.manual_seed(0) + transformer = HiDreamImageTransformer2DModel( + patch_size=2, + in_channels=4, + out_channels=4, + num_layers=1, + num_single_layers=1, + attention_head_dim=8, + num_attention_heads=4, + caption_channels=[32, 16], + text_emb_dim=64, + num_routed_experts=4, + num_activated_experts=2, + axes_dims_rope=(4, 2, 2), + max_resolution=(32, 32), + llama_layers=(0, 1), + ).eval() + torch.manual_seed(0) + vae = AutoencoderKL(scaling_factor=0.3611, shift_factor=0.1159) + clip_text_encoder_config = CLIPTextConfig( + bos_token_id=0, + eos_token_id=2, + hidden_size=32, + intermediate_size=37, + layer_norm_eps=1e-05, + num_attention_heads=4, + num_hidden_layers=5, + pad_token_id=1, + vocab_size=1000, + hidden_act="gelu", + projection_dim=32, + max_position_embeddings=128, + ) + + torch.manual_seed(0) + text_encoder = CLIPTextModelWithProjection(clip_text_encoder_config) + + torch.manual_seed(0) + text_encoder_2 = CLIPTextModelWithProjection(clip_text_encoder_config) + + torch.manual_seed(0) + text_encoder_3 = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5") + + torch.manual_seed(0) + text_encoder_4 = LlamaForCausalLM.from_pretrained("hf-internal-testing/tiny-random-LlamaForCausalLM") + text_encoder_4.generation_config.pad_token_id = 1 + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + tokenizer_3 = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") + tokenizer_4 = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-LlamaForCausalLM") + + scheduler = FlowMatchEulerDiscreteScheduler() + + components = { + "scheduler": scheduler, + "vae": vae, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + "text_encoder_2": text_encoder_2, + "tokenizer_2": tokenizer_2, + "text_encoder_3": text_encoder_3, + "tokenizer_3": tokenizer_3, + "text_encoder_4": text_encoder_4, + "tokenizer_4": tokenizer_4, + "transformer": transformer, + } + return components + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + inputs = { + "prompt": "A painting of a squirrel eating a burger", + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 5.0, + "output_type": "np", + } + return inputs + + def test_inference(self): + device = "cpu" + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + image = pipe(**inputs)[0] + generated_image = image[0] + self.assertEqual(generated_image.shape, (128, 128, 3)) + + # fmt: off + expected_slice = np.array([0.4507, 0.5256, 0.4205, 0.5791, 0.4848, 0.4831, 0.4443, 0.5107, 0.6586, 0.3163, 0.7318, 0.5933, 0.6252, 0.5512, 0.5357, 0.5983]) + # fmt: on + + generated_slice = generated_image.flatten() + generated_slice = np.concatenate([generated_slice[:8], generated_slice[-8:]]) + self.assertTrue(np.allclose(generated_slice, expected_slice, atol=1e-3)) + + def test_inference_batch_single_identical(self): + super().test_inference_batch_single_identical(expected_max_diff=3e-4) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/hunyuan_video/__init__.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/hunyuan_video/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/hunyuan_video/test_hunyuan_image2video.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/hunyuan_video/test_hunyuan_image2video.py new file mode 100644 index 0000000000000000000000000000000000000000..27b5bde3105040f94e0ed14b5cb1a392a09932c0 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/hunyuan_video/test_hunyuan_image2video.py @@ -0,0 +1,389 @@ +# Copyright 2025 The HuggingFace Team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +import unittest + +import numpy as np +import torch +from PIL import Image +from transformers import ( + CLIPImageProcessor, + CLIPTextConfig, + CLIPTextModel, + CLIPTokenizer, + LlamaConfig, + LlamaTokenizerFast, + LlavaConfig, + LlavaForConditionalGeneration, +) +from transformers.models.clip import CLIPVisionConfig + +from diffusers import ( + AutoencoderKLHunyuanVideo, + FlowMatchEulerDiscreteScheduler, + HunyuanVideoImageToVideoPipeline, + HunyuanVideoTransformer3DModel, +) + +from ...testing_utils import enable_full_determinism, torch_device +from ..test_pipelines_common import PipelineTesterMixin, PyramidAttentionBroadcastTesterMixin, to_np + + +enable_full_determinism() + + +class HunyuanVideoImageToVideoPipelineFastTests( + PipelineTesterMixin, PyramidAttentionBroadcastTesterMixin, unittest.TestCase +): + pipeline_class = HunyuanVideoImageToVideoPipeline + params = frozenset( + ["image", "prompt", "height", "width", "guidance_scale", "prompt_embeds", "pooled_prompt_embeds"] + ) + batch_params = frozenset(["prompt", "image"]) + required_optional_params = frozenset( + [ + "num_inference_steps", + "generator", + "latents", + "return_dict", + "callback_on_step_end", + "callback_on_step_end_tensor_inputs", + ] + ) + supports_dduf = False + + # there is no xformers processor for Flux + test_xformers_attention = False + test_layerwise_casting = True + test_group_offloading = True + + def get_dummy_components(self, num_layers: int = 1, num_single_layers: int = 1): + torch.manual_seed(0) + transformer = HunyuanVideoTransformer3DModel( + in_channels=2 * 4 + 1, + out_channels=4, + num_attention_heads=2, + attention_head_dim=10, + num_layers=num_layers, + num_single_layers=num_single_layers, + num_refiner_layers=1, + patch_size=1, + patch_size_t=1, + guidance_embeds=False, + text_embed_dim=16, + pooled_projection_dim=8, + rope_axes_dim=(2, 4, 4), + image_condition_type="latent_concat", + ) + + torch.manual_seed(0) + vae = AutoencoderKLHunyuanVideo( + in_channels=3, + out_channels=3, + latent_channels=4, + down_block_types=( + "HunyuanVideoDownBlock3D", + "HunyuanVideoDownBlock3D", + "HunyuanVideoDownBlock3D", + "HunyuanVideoDownBlock3D", + ), + up_block_types=( + "HunyuanVideoUpBlock3D", + "HunyuanVideoUpBlock3D", + "HunyuanVideoUpBlock3D", + "HunyuanVideoUpBlock3D", + ), + block_out_channels=(8, 8, 8, 8), + layers_per_block=1, + act_fn="silu", + norm_num_groups=4, + scaling_factor=0.476986, + spatial_compression_ratio=8, + temporal_compression_ratio=4, + mid_block_add_attention=True, + ) + + torch.manual_seed(0) + scheduler = FlowMatchEulerDiscreteScheduler(shift=7.0) + + text_config = LlamaConfig( + bos_token_id=0, + eos_token_id=2, + hidden_size=16, + intermediate_size=37, + layer_norm_eps=1e-05, + num_attention_heads=4, + num_hidden_layers=2, + pad_token_id=100, + vocab_size=1000, + hidden_act="gelu", + projection_dim=32, + ) + vision_config = CLIPVisionConfig( + hidden_size=8, + intermediate_size=37, + projection_dim=32, + num_attention_heads=4, + num_hidden_layers=2, + image_size=224, + ) + llava_text_encoder_config = LlavaConfig(vision_config, text_config, pad_token_id=100, image_token_index=101) + + clip_text_encoder_config = CLIPTextConfig( + bos_token_id=0, + eos_token_id=2, + hidden_size=8, + intermediate_size=37, + layer_norm_eps=1e-05, + num_attention_heads=4, + num_hidden_layers=2, + pad_token_id=1, + vocab_size=1000, + hidden_act="gelu", + projection_dim=32, + ) + + torch.manual_seed(0) + text_encoder = LlavaForConditionalGeneration(llava_text_encoder_config) + tokenizer = LlamaTokenizerFast.from_pretrained("finetrainers/dummy-hunyaunvideo", subfolder="tokenizer") + + torch.manual_seed(0) + text_encoder_2 = CLIPTextModel(clip_text_encoder_config) + tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + + torch.manual_seed(0) + image_processor = CLIPImageProcessor( + crop_size=224, + do_center_crop=True, + do_normalize=True, + do_resize=True, + image_mean=[0.48145466, 0.4578275, 0.40821073], + image_std=[0.26862954, 0.26130258, 0.27577711], + resample=3, + size=224, + ) + + components = { + "transformer": transformer, + "vae": vae, + "scheduler": scheduler, + "text_encoder": text_encoder, + "text_encoder_2": text_encoder_2, + "tokenizer": tokenizer, + "tokenizer_2": tokenizer_2, + "image_processor": image_processor, + } + return components + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + + image_height = 16 + image_width = 16 + image = Image.new("RGB", (image_width, image_height)) + inputs = { + "image": image, + "prompt": "dance monkey", + "prompt_template": { + "template": "{}", + "crop_start": 0, + "image_emb_len": 49, + "image_emb_start": 5, + "image_emb_end": 54, + "double_return_token_id": 0, + }, + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 4.5, + "height": image_height, + "width": image_width, + "num_frames": 9, + "max_sequence_length": 64, + "output_type": "pt", + } + return inputs + + def test_inference(self): + device = "cpu" + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + video = pipe(**inputs).frames + generated_video = video[0] + # NOTE: The expected video has 4 lesser frames because they are dropped in the pipeline + self.assertEqual(generated_video.shape, (5, 3, 16, 16)) + + # fmt: off + expected_slice = torch.tensor([0.444, 0.479, 0.4485, 0.5752, 0.3539, 0.1548, 0.2706, 0.3593, 0.5323, 0.6635, 0.6795, 0.5255, 0.5091, 0.345, 0.4276, 0.4128]) + # fmt: on + + generated_slice = generated_video.flatten() + generated_slice = torch.cat([generated_slice[:8], generated_slice[-8:]]) + self.assertTrue( + torch.allclose(generated_slice, expected_slice, atol=1e-3), + "The generated video does not match the expected slice.", + ) + + def test_callback_inputs(self): + sig = inspect.signature(self.pipeline_class.__call__) + has_callback_tensor_inputs = "callback_on_step_end_tensor_inputs" in sig.parameters + has_callback_step_end = "callback_on_step_end" in sig.parameters + + if not (has_callback_tensor_inputs and has_callback_step_end): + return + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + self.assertTrue( + hasattr(pipe, "_callback_tensor_inputs"), + f" {self.pipeline_class} should have `_callback_tensor_inputs` that defines a list of tensor variables its callback function can use as inputs", + ) + + def callback_inputs_subset(pipe, i, t, callback_kwargs): + # iterate over callback args + for tensor_name, tensor_value in callback_kwargs.items(): + # check that we're only passing in allowed tensor inputs + assert tensor_name in pipe._callback_tensor_inputs + + return callback_kwargs + + def callback_inputs_all(pipe, i, t, callback_kwargs): + for tensor_name in pipe._callback_tensor_inputs: + assert tensor_name in callback_kwargs + + # iterate over callback args + for tensor_name, tensor_value in callback_kwargs.items(): + # check that we're only passing in allowed tensor inputs + assert tensor_name in pipe._callback_tensor_inputs + + return callback_kwargs + + inputs = self.get_dummy_inputs(torch_device) + + # Test passing in a subset + inputs["callback_on_step_end"] = callback_inputs_subset + inputs["callback_on_step_end_tensor_inputs"] = ["latents"] + output = pipe(**inputs)[0] + + # Test passing in a everything + inputs["callback_on_step_end"] = callback_inputs_all + inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs + output = pipe(**inputs)[0] + + def callback_inputs_change_tensor(pipe, i, t, callback_kwargs): + is_last = i == (pipe.num_timesteps - 1) + if is_last: + callback_kwargs["latents"] = torch.zeros_like(callback_kwargs["latents"]) + return callback_kwargs + + inputs["callback_on_step_end"] = callback_inputs_change_tensor + inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs + output = pipe(**inputs)[0] + assert output.abs().sum() < 1e10 + + def test_attention_slicing_forward_pass( + self, test_max_difference=True, test_mean_pixel_difference=True, expected_max_diff=1e-3 + ): + if not self.test_attention_slicing: + return + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + for component in pipe.components.values(): + if hasattr(component, "set_default_attn_processor"): + component.set_default_attn_processor() + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + generator_device = "cpu" + inputs = self.get_dummy_inputs(generator_device) + output_without_slicing = pipe(**inputs)[0] + + pipe.enable_attention_slicing(slice_size=1) + inputs = self.get_dummy_inputs(generator_device) + output_with_slicing1 = pipe(**inputs)[0] + + pipe.enable_attention_slicing(slice_size=2) + inputs = self.get_dummy_inputs(generator_device) + output_with_slicing2 = pipe(**inputs)[0] + + if test_max_difference: + max_diff1 = np.abs(to_np(output_with_slicing1) - to_np(output_without_slicing)).max() + max_diff2 = np.abs(to_np(output_with_slicing2) - to_np(output_without_slicing)).max() + self.assertLess( + max(max_diff1, max_diff2), + expected_max_diff, + "Attention slicing should not affect the inference results", + ) + + def test_vae_tiling(self, expected_diff_max: float = 0.2): + # Seems to require higher tolerance than the other tests + expected_diff_max = 0.6 + generator_device = "cpu" + components = self.get_dummy_components() + + pipe = self.pipeline_class(**components) + pipe.to("cpu") + pipe.set_progress_bar_config(disable=None) + + # Without tiling + inputs = self.get_dummy_inputs(generator_device) + inputs["height"] = inputs["width"] = 128 + output_without_tiling = pipe(**inputs)[0] + + # With tiling + pipe.vae.enable_tiling( + tile_sample_min_height=96, + tile_sample_min_width=96, + tile_sample_stride_height=64, + tile_sample_stride_width=64, + ) + inputs = self.get_dummy_inputs(generator_device) + inputs["height"] = inputs["width"] = 128 + output_with_tiling = pipe(**inputs)[0] + + self.assertLess( + (to_np(output_without_tiling) - to_np(output_with_tiling)).max(), + expected_diff_max, + "VAE tiling should not affect the inference results", + ) + + # TODO(aryan): Create a dummy gemma model with smol vocab size + @unittest.skip( + "A very small vocab size is used for fast tests. So, any kind of prompt other than the empty default used in other tests will lead to a embedding lookup error. This test uses a long prompt that causes the error." + ) + def test_inference_batch_consistent(self): + pass + + @unittest.skip( + "A very small vocab size is used for fast tests. So, any kind of prompt other than the empty default used in other tests will lead to a embedding lookup error. This test uses a long prompt that causes the error." + ) + def test_inference_batch_single_identical(self): + pass + + @unittest.skip( + "Encode prompt currently does not work in isolation because of requiring image embeddings from image processor. The test does not handle this case, or we need to rewrite encode_prompt." + ) + def test_encode_prompt_works_in_isolation(self): + pass diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/hunyuan_video/test_hunyuan_skyreels_image2video.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/hunyuan_video/test_hunyuan_skyreels_image2video.py new file mode 100644 index 0000000000000000000000000000000000000000..7ebe797febfa2b15e36e9cb80ff618d01b1ca2ad --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/hunyuan_video/test_hunyuan_skyreels_image2video.py @@ -0,0 +1,345 @@ +# Copyright 2025 The HuggingFace Team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +import unittest + +import numpy as np +import torch +from PIL import Image +from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer, LlamaConfig, LlamaModel, LlamaTokenizer + +from diffusers import ( + AutoencoderKLHunyuanVideo, + FlowMatchEulerDiscreteScheduler, + HunyuanSkyreelsImageToVideoPipeline, + HunyuanVideoTransformer3DModel, +) + +from ...testing_utils import enable_full_determinism, torch_device +from ..test_pipelines_common import PipelineTesterMixin, PyramidAttentionBroadcastTesterMixin, to_np + + +enable_full_determinism() + + +class HunyuanSkyreelsImageToVideoPipelineFastTests( + PipelineTesterMixin, PyramidAttentionBroadcastTesterMixin, unittest.TestCase +): + pipeline_class = HunyuanSkyreelsImageToVideoPipeline + params = frozenset( + ["image", "prompt", "height", "width", "guidance_scale", "prompt_embeds", "pooled_prompt_embeds"] + ) + batch_params = frozenset(["prompt", "image"]) + required_optional_params = frozenset( + [ + "num_inference_steps", + "generator", + "latents", + "return_dict", + "callback_on_step_end", + "callback_on_step_end_tensor_inputs", + ] + ) + supports_dduf = False + + # there is no xformers processor for Flux + test_xformers_attention = False + test_layerwise_casting = True + test_group_offloading = True + + def get_dummy_components(self, num_layers: int = 1, num_single_layers: int = 1): + torch.manual_seed(0) + transformer = HunyuanVideoTransformer3DModel( + in_channels=8, + out_channels=4, + num_attention_heads=2, + attention_head_dim=10, + num_layers=num_layers, + num_single_layers=num_single_layers, + num_refiner_layers=1, + patch_size=1, + patch_size_t=1, + guidance_embeds=True, + text_embed_dim=16, + pooled_projection_dim=8, + rope_axes_dim=(2, 4, 4), + ) + + torch.manual_seed(0) + vae = AutoencoderKLHunyuanVideo( + in_channels=3, + out_channels=3, + latent_channels=4, + down_block_types=( + "HunyuanVideoDownBlock3D", + "HunyuanVideoDownBlock3D", + "HunyuanVideoDownBlock3D", + "HunyuanVideoDownBlock3D", + ), + up_block_types=( + "HunyuanVideoUpBlock3D", + "HunyuanVideoUpBlock3D", + "HunyuanVideoUpBlock3D", + "HunyuanVideoUpBlock3D", + ), + block_out_channels=(8, 8, 8, 8), + layers_per_block=1, + act_fn="silu", + norm_num_groups=4, + scaling_factor=0.476986, + spatial_compression_ratio=8, + temporal_compression_ratio=4, + mid_block_add_attention=True, + ) + + torch.manual_seed(0) + scheduler = FlowMatchEulerDiscreteScheduler(shift=7.0) + + llama_text_encoder_config = LlamaConfig( + bos_token_id=0, + eos_token_id=2, + hidden_size=16, + intermediate_size=37, + layer_norm_eps=1e-05, + num_attention_heads=4, + num_hidden_layers=2, + pad_token_id=1, + vocab_size=1000, + hidden_act="gelu", + projection_dim=32, + ) + clip_text_encoder_config = CLIPTextConfig( + bos_token_id=0, + eos_token_id=2, + hidden_size=8, + intermediate_size=37, + layer_norm_eps=1e-05, + num_attention_heads=4, + num_hidden_layers=2, + pad_token_id=1, + vocab_size=1000, + hidden_act="gelu", + projection_dim=32, + ) + + torch.manual_seed(0) + text_encoder = LlamaModel(llama_text_encoder_config) + tokenizer = LlamaTokenizer.from_pretrained("finetrainers/dummy-hunyaunvideo", subfolder="tokenizer") + + torch.manual_seed(0) + text_encoder_2 = CLIPTextModel(clip_text_encoder_config) + tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + + components = { + "transformer": transformer, + "vae": vae, + "scheduler": scheduler, + "text_encoder": text_encoder, + "text_encoder_2": text_encoder_2, + "tokenizer": tokenizer, + "tokenizer_2": tokenizer_2, + } + return components + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + + image_height = 16 + image_width = 16 + image = Image.new("RGB", (image_width, image_height)) + inputs = { + "image": image, + "prompt": "dance monkey", + "prompt_template": { + "template": "{}", + "crop_start": 0, + }, + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 4.5, + "height": 16, + "width": 16, + # 4 * k + 1 is the recommendation + "num_frames": 9, + "max_sequence_length": 16, + "output_type": "pt", + } + return inputs + + def test_inference(self): + device = "cpu" + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + video = pipe(**inputs).frames + generated_video = video[0] + self.assertEqual(generated_video.shape, (9, 3, 16, 16)) + + # fmt: off + expected_slice = torch.tensor([0.5832, 0.5498, 0.4839, 0.4744, 0.4515, 0.4832, 0.496, 0.563, 0.5918, 0.5979, 0.5101, 0.6168, 0.6613, 0.536, 0.55, 0.5775]) + # fmt: on + + generated_slice = generated_video.flatten() + generated_slice = torch.cat([generated_slice[:8], generated_slice[-8:]]) + self.assertTrue( + torch.allclose(generated_slice, expected_slice, atol=1e-3), + "The generated video does not match the expected slice.", + ) + + def test_callback_inputs(self): + sig = inspect.signature(self.pipeline_class.__call__) + has_callback_tensor_inputs = "callback_on_step_end_tensor_inputs" in sig.parameters + has_callback_step_end = "callback_on_step_end" in sig.parameters + + if not (has_callback_tensor_inputs and has_callback_step_end): + return + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + self.assertTrue( + hasattr(pipe, "_callback_tensor_inputs"), + f" {self.pipeline_class} should have `_callback_tensor_inputs` that defines a list of tensor variables its callback function can use as inputs", + ) + + def callback_inputs_subset(pipe, i, t, callback_kwargs): + # iterate over callback args + for tensor_name, tensor_value in callback_kwargs.items(): + # check that we're only passing in allowed tensor inputs + assert tensor_name in pipe._callback_tensor_inputs + + return callback_kwargs + + def callback_inputs_all(pipe, i, t, callback_kwargs): + for tensor_name in pipe._callback_tensor_inputs: + assert tensor_name in callback_kwargs + + # iterate over callback args + for tensor_name, tensor_value in callback_kwargs.items(): + # check that we're only passing in allowed tensor inputs + assert tensor_name in pipe._callback_tensor_inputs + + return callback_kwargs + + inputs = self.get_dummy_inputs(torch_device) + + # Test passing in a subset + inputs["callback_on_step_end"] = callback_inputs_subset + inputs["callback_on_step_end_tensor_inputs"] = ["latents"] + output = pipe(**inputs)[0] + + # Test passing in a everything + inputs["callback_on_step_end"] = callback_inputs_all + inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs + output = pipe(**inputs)[0] + + def callback_inputs_change_tensor(pipe, i, t, callback_kwargs): + is_last = i == (pipe.num_timesteps - 1) + if is_last: + callback_kwargs["latents"] = torch.zeros_like(callback_kwargs["latents"]) + return callback_kwargs + + inputs["callback_on_step_end"] = callback_inputs_change_tensor + inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs + output = pipe(**inputs)[0] + assert output.abs().sum() < 1e10 + + def test_attention_slicing_forward_pass( + self, test_max_difference=True, test_mean_pixel_difference=True, expected_max_diff=1e-3 + ): + if not self.test_attention_slicing: + return + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + for component in pipe.components.values(): + if hasattr(component, "set_default_attn_processor"): + component.set_default_attn_processor() + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + generator_device = "cpu" + inputs = self.get_dummy_inputs(generator_device) + output_without_slicing = pipe(**inputs)[0] + + pipe.enable_attention_slicing(slice_size=1) + inputs = self.get_dummy_inputs(generator_device) + output_with_slicing1 = pipe(**inputs)[0] + + pipe.enable_attention_slicing(slice_size=2) + inputs = self.get_dummy_inputs(generator_device) + output_with_slicing2 = pipe(**inputs)[0] + + if test_max_difference: + max_diff1 = np.abs(to_np(output_with_slicing1) - to_np(output_without_slicing)).max() + max_diff2 = np.abs(to_np(output_with_slicing2) - to_np(output_without_slicing)).max() + self.assertLess( + max(max_diff1, max_diff2), + expected_max_diff, + "Attention slicing should not affect the inference results", + ) + + def test_vae_tiling(self, expected_diff_max: float = 0.2): + # Seems to require higher tolerance than the other tests + expected_diff_max = 0.6 + generator_device = "cpu" + components = self.get_dummy_components() + + pipe = self.pipeline_class(**components) + pipe.to("cpu") + pipe.set_progress_bar_config(disable=None) + + # Without tiling + inputs = self.get_dummy_inputs(generator_device) + inputs["height"] = inputs["width"] = 128 + output_without_tiling = pipe(**inputs)[0] + + # With tiling + pipe.vae.enable_tiling( + tile_sample_min_height=96, + tile_sample_min_width=96, + tile_sample_stride_height=64, + tile_sample_stride_width=64, + ) + inputs = self.get_dummy_inputs(generator_device) + inputs["height"] = inputs["width"] = 128 + output_with_tiling = pipe(**inputs)[0] + + self.assertLess( + (to_np(output_without_tiling) - to_np(output_with_tiling)).max(), + expected_diff_max, + "VAE tiling should not affect the inference results", + ) + + # TODO(aryan): Create a dummy gemma model with smol vocab size + @unittest.skip( + "A very small vocab size is used for fast tests. So, any kind of prompt other than the empty default used in other tests will lead to a embedding lookup error. This test uses a long prompt that causes the error." + ) + def test_inference_batch_consistent(self): + pass + + @unittest.skip( + "A very small vocab size is used for fast tests. So, any kind of prompt other than the empty default used in other tests will lead to a embedding lookup error. This test uses a long prompt that causes the error." + ) + def test_inference_batch_single_identical(self): + pass diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/hunyuan_video/test_hunyuan_video.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/hunyuan_video/test_hunyuan_video.py new file mode 100644 index 0000000000000000000000000000000000000000..4bdf3ee20e1b4babffb2826994f6432464f16590 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/hunyuan_video/test_hunyuan_video.py @@ -0,0 +1,356 @@ +# Copyright 2025 The HuggingFace Team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +import unittest + +import numpy as np +import torch +from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer, LlamaConfig, LlamaModel, LlamaTokenizer + +from diffusers import ( + AutoencoderKLHunyuanVideo, + FasterCacheConfig, + FlowMatchEulerDiscreteScheduler, + HunyuanVideoPipeline, + HunyuanVideoTransformer3DModel, +) + +from ...testing_utils import enable_full_determinism, torch_device +from ..test_pipelines_common import ( + FasterCacheTesterMixin, + FirstBlockCacheTesterMixin, + PipelineTesterMixin, + PyramidAttentionBroadcastTesterMixin, + to_np, +) + + +enable_full_determinism() + + +class HunyuanVideoPipelineFastTests( + PipelineTesterMixin, + PyramidAttentionBroadcastTesterMixin, + FasterCacheTesterMixin, + FirstBlockCacheTesterMixin, + unittest.TestCase, +): + pipeline_class = HunyuanVideoPipeline + params = frozenset(["prompt", "height", "width", "guidance_scale", "prompt_embeds", "pooled_prompt_embeds"]) + batch_params = frozenset(["prompt"]) + required_optional_params = frozenset( + [ + "num_inference_steps", + "generator", + "latents", + "return_dict", + "callback_on_step_end", + "callback_on_step_end_tensor_inputs", + ] + ) + + # there is no xformers processor for Flux + test_xformers_attention = False + test_layerwise_casting = True + test_group_offloading = True + + faster_cache_config = FasterCacheConfig( + spatial_attention_block_skip_range=2, + spatial_attention_timestep_skip_range=(-1, 901), + unconditional_batch_skip_range=2, + attention_weight_callback=lambda _: 0.5, + is_guidance_distilled=True, + ) + + def get_dummy_components(self, num_layers: int = 1, num_single_layers: int = 1): + torch.manual_seed(0) + transformer = HunyuanVideoTransformer3DModel( + in_channels=4, + out_channels=4, + num_attention_heads=2, + attention_head_dim=10, + num_layers=num_layers, + num_single_layers=num_single_layers, + num_refiner_layers=1, + patch_size=1, + patch_size_t=1, + guidance_embeds=True, + text_embed_dim=16, + pooled_projection_dim=8, + rope_axes_dim=(2, 4, 4), + ) + + torch.manual_seed(0) + vae = AutoencoderKLHunyuanVideo( + in_channels=3, + out_channels=3, + latent_channels=4, + down_block_types=( + "HunyuanVideoDownBlock3D", + "HunyuanVideoDownBlock3D", + "HunyuanVideoDownBlock3D", + "HunyuanVideoDownBlock3D", + ), + up_block_types=( + "HunyuanVideoUpBlock3D", + "HunyuanVideoUpBlock3D", + "HunyuanVideoUpBlock3D", + "HunyuanVideoUpBlock3D", + ), + block_out_channels=(8, 8, 8, 8), + layers_per_block=1, + act_fn="silu", + norm_num_groups=4, + scaling_factor=0.476986, + spatial_compression_ratio=8, + temporal_compression_ratio=4, + mid_block_add_attention=True, + ) + + torch.manual_seed(0) + scheduler = FlowMatchEulerDiscreteScheduler(shift=7.0) + + llama_text_encoder_config = LlamaConfig( + bos_token_id=0, + eos_token_id=2, + hidden_size=16, + intermediate_size=37, + layer_norm_eps=1e-05, + num_attention_heads=4, + num_hidden_layers=2, + pad_token_id=1, + vocab_size=1000, + hidden_act="gelu", + projection_dim=32, + ) + clip_text_encoder_config = CLIPTextConfig( + bos_token_id=0, + eos_token_id=2, + hidden_size=8, + intermediate_size=37, + layer_norm_eps=1e-05, + num_attention_heads=4, + num_hidden_layers=2, + pad_token_id=1, + vocab_size=1000, + hidden_act="gelu", + projection_dim=32, + ) + + torch.manual_seed(0) + text_encoder = LlamaModel(llama_text_encoder_config) + tokenizer = LlamaTokenizer.from_pretrained("finetrainers/dummy-hunyaunvideo", subfolder="tokenizer") + + torch.manual_seed(0) + text_encoder_2 = CLIPTextModel(clip_text_encoder_config) + tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + + components = { + "transformer": transformer, + "vae": vae, + "scheduler": scheduler, + "text_encoder": text_encoder, + "text_encoder_2": text_encoder_2, + "tokenizer": tokenizer, + "tokenizer_2": tokenizer_2, + } + return components + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + + inputs = { + "prompt": "dance monkey", + "prompt_template": { + "template": "{}", + "crop_start": 0, + }, + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 4.5, + "height": 16, + "width": 16, + # 4 * k + 1 is the recommendation + "num_frames": 9, + "max_sequence_length": 16, + "output_type": "pt", + } + return inputs + + def test_inference(self): + device = "cpu" + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + video = pipe(**inputs).frames + generated_video = video[0] + self.assertEqual(generated_video.shape, (9, 3, 16, 16)) + + # fmt: off + expected_slice = torch.tensor([0.3946, 0.4649, 0.3196, 0.4569, 0.3312, 0.3687, 0.3216, 0.3972, 0.4469, 0.3888, 0.3929, 0.3802, 0.3479, 0.3888, 0.3825, 0.3542]) + # fmt: on + + generated_slice = generated_video.flatten() + generated_slice = torch.cat([generated_slice[:8], generated_slice[-8:]]) + self.assertTrue( + torch.allclose(generated_slice, expected_slice, atol=1e-3), + "The generated video does not match the expected slice.", + ) + + def test_callback_inputs(self): + sig = inspect.signature(self.pipeline_class.__call__) + has_callback_tensor_inputs = "callback_on_step_end_tensor_inputs" in sig.parameters + has_callback_step_end = "callback_on_step_end" in sig.parameters + + if not (has_callback_tensor_inputs and has_callback_step_end): + return + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + self.assertTrue( + hasattr(pipe, "_callback_tensor_inputs"), + f" {self.pipeline_class} should have `_callback_tensor_inputs` that defines a list of tensor variables its callback function can use as inputs", + ) + + def callback_inputs_subset(pipe, i, t, callback_kwargs): + # iterate over callback args + for tensor_name, tensor_value in callback_kwargs.items(): + # check that we're only passing in allowed tensor inputs + assert tensor_name in pipe._callback_tensor_inputs + + return callback_kwargs + + def callback_inputs_all(pipe, i, t, callback_kwargs): + for tensor_name in pipe._callback_tensor_inputs: + assert tensor_name in callback_kwargs + + # iterate over callback args + for tensor_name, tensor_value in callback_kwargs.items(): + # check that we're only passing in allowed tensor inputs + assert tensor_name in pipe._callback_tensor_inputs + + return callback_kwargs + + inputs = self.get_dummy_inputs(torch_device) + + # Test passing in a subset + inputs["callback_on_step_end"] = callback_inputs_subset + inputs["callback_on_step_end_tensor_inputs"] = ["latents"] + output = pipe(**inputs)[0] + + # Test passing in a everything + inputs["callback_on_step_end"] = callback_inputs_all + inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs + output = pipe(**inputs)[0] + + def callback_inputs_change_tensor(pipe, i, t, callback_kwargs): + is_last = i == (pipe.num_timesteps - 1) + if is_last: + callback_kwargs["latents"] = torch.zeros_like(callback_kwargs["latents"]) + return callback_kwargs + + inputs["callback_on_step_end"] = callback_inputs_change_tensor + inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs + output = pipe(**inputs)[0] + assert output.abs().sum() < 1e10 + + def test_attention_slicing_forward_pass( + self, test_max_difference=True, test_mean_pixel_difference=True, expected_max_diff=1e-3 + ): + if not self.test_attention_slicing: + return + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + for component in pipe.components.values(): + if hasattr(component, "set_default_attn_processor"): + component.set_default_attn_processor() + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + generator_device = "cpu" + inputs = self.get_dummy_inputs(generator_device) + output_without_slicing = pipe(**inputs)[0] + + pipe.enable_attention_slicing(slice_size=1) + inputs = self.get_dummy_inputs(generator_device) + output_with_slicing1 = pipe(**inputs)[0] + + pipe.enable_attention_slicing(slice_size=2) + inputs = self.get_dummy_inputs(generator_device) + output_with_slicing2 = pipe(**inputs)[0] + + if test_max_difference: + max_diff1 = np.abs(to_np(output_with_slicing1) - to_np(output_without_slicing)).max() + max_diff2 = np.abs(to_np(output_with_slicing2) - to_np(output_without_slicing)).max() + self.assertLess( + max(max_diff1, max_diff2), + expected_max_diff, + "Attention slicing should not affect the inference results", + ) + + def test_vae_tiling(self, expected_diff_max: float = 0.2): + # Seems to require higher tolerance than the other tests + expected_diff_max = 0.6 + generator_device = "cpu" + components = self.get_dummy_components() + + pipe = self.pipeline_class(**components) + pipe.to("cpu") + pipe.set_progress_bar_config(disable=None) + + # Without tiling + inputs = self.get_dummy_inputs(generator_device) + inputs["height"] = inputs["width"] = 128 + output_without_tiling = pipe(**inputs)[0] + + # With tiling + pipe.vae.enable_tiling( + tile_sample_min_height=96, + tile_sample_min_width=96, + tile_sample_stride_height=64, + tile_sample_stride_width=64, + ) + inputs = self.get_dummy_inputs(generator_device) + inputs["height"] = inputs["width"] = 128 + output_with_tiling = pipe(**inputs)[0] + + self.assertLess( + (to_np(output_without_tiling) - to_np(output_with_tiling)).max(), + expected_diff_max, + "VAE tiling should not affect the inference results", + ) + + # TODO(aryan): Create a dummy gemma model with smol vocab size + @unittest.skip( + "A very small vocab size is used for fast tests. So, any kind of prompt other than the empty default used in other tests will lead to a embedding lookup error. This test uses a long prompt that causes the error." + ) + def test_inference_batch_consistent(self): + pass + + @unittest.skip( + "A very small vocab size is used for fast tests. So, any kind of prompt other than the empty default used in other tests will lead to a embedding lookup error. This test uses a long prompt that causes the error." + ) + def test_inference_batch_single_identical(self): + pass diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/hunyuan_video/test_hunyuan_video_framepack.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/hunyuan_video/test_hunyuan_video_framepack.py new file mode 100644 index 0000000000000000000000000000000000000000..51c258b15c3843a587edc48936fc4553d7717596 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/hunyuan_video/test_hunyuan_video_framepack.py @@ -0,0 +1,404 @@ +# Copyright 2025 The HuggingFace Team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +import unittest + +import numpy as np +import torch +from PIL import Image +from transformers import ( + CLIPTextConfig, + CLIPTextModel, + CLIPTokenizer, + LlamaConfig, + LlamaModel, + LlamaTokenizer, + SiglipImageProcessor, + SiglipVisionModel, +) + +from diffusers import ( + AutoencoderKLHunyuanVideo, + FasterCacheConfig, + FlowMatchEulerDiscreteScheduler, + HunyuanVideoFramepackPipeline, + HunyuanVideoFramepackTransformer3DModel, +) + +from ...testing_utils import ( + enable_full_determinism, + torch_device, +) +from ..test_pipelines_common import ( + FasterCacheTesterMixin, + PipelineTesterMixin, + PyramidAttentionBroadcastTesterMixin, + to_np, +) + + +enable_full_determinism() + + +class HunyuanVideoFramepackPipelineFastTests( + PipelineTesterMixin, PyramidAttentionBroadcastTesterMixin, FasterCacheTesterMixin, unittest.TestCase +): + pipeline_class = HunyuanVideoFramepackPipeline + params = frozenset( + ["image", "prompt", "height", "width", "guidance_scale", "prompt_embeds", "pooled_prompt_embeds"] + ) + batch_params = frozenset(["image", "prompt"]) + required_optional_params = frozenset( + [ + "num_inference_steps", + "generator", + "return_dict", + "callback_on_step_end", + "callback_on_step_end_tensor_inputs", + ] + ) + + supports_dduf = False + test_xformers_attention = False + test_layerwise_casting = True + test_group_offloading = True + + faster_cache_config = FasterCacheConfig( + spatial_attention_block_skip_range=2, + spatial_attention_timestep_skip_range=(-1, 901), + unconditional_batch_skip_range=2, + attention_weight_callback=lambda _: 0.5, + is_guidance_distilled=True, + ) + + def get_dummy_components(self, num_layers: int = 1, num_single_layers: int = 1): + torch.manual_seed(0) + transformer = HunyuanVideoFramepackTransformer3DModel( + in_channels=4, + out_channels=4, + num_attention_heads=2, + attention_head_dim=10, + num_layers=num_layers, + num_single_layers=num_single_layers, + num_refiner_layers=1, + patch_size=2, + patch_size_t=1, + guidance_embeds=True, + text_embed_dim=16, + pooled_projection_dim=8, + rope_axes_dim=(2, 4, 4), + image_condition_type=None, + has_image_proj=True, + image_proj_dim=32, + has_clean_x_embedder=True, + ) + + torch.manual_seed(0) + vae = AutoencoderKLHunyuanVideo( + in_channels=3, + out_channels=3, + latent_channels=4, + down_block_types=( + "HunyuanVideoDownBlock3D", + "HunyuanVideoDownBlock3D", + "HunyuanVideoDownBlock3D", + "HunyuanVideoDownBlock3D", + ), + up_block_types=( + "HunyuanVideoUpBlock3D", + "HunyuanVideoUpBlock3D", + "HunyuanVideoUpBlock3D", + "HunyuanVideoUpBlock3D", + ), + block_out_channels=(8, 8, 8, 8), + layers_per_block=1, + act_fn="silu", + norm_num_groups=4, + scaling_factor=0.476986, + spatial_compression_ratio=8, + temporal_compression_ratio=4, + mid_block_add_attention=True, + ) + + torch.manual_seed(0) + scheduler = FlowMatchEulerDiscreteScheduler(shift=7.0) + + llama_text_encoder_config = LlamaConfig( + bos_token_id=0, + eos_token_id=2, + hidden_size=16, + intermediate_size=37, + layer_norm_eps=1e-05, + num_attention_heads=4, + num_hidden_layers=2, + pad_token_id=1, + vocab_size=1000, + hidden_act="gelu", + projection_dim=32, + ) + clip_text_encoder_config = CLIPTextConfig( + bos_token_id=0, + eos_token_id=2, + hidden_size=8, + intermediate_size=37, + layer_norm_eps=1e-05, + num_attention_heads=4, + num_hidden_layers=2, + pad_token_id=1, + vocab_size=1000, + hidden_act="gelu", + projection_dim=32, + ) + + torch.manual_seed(0) + text_encoder = LlamaModel(llama_text_encoder_config) + tokenizer = LlamaTokenizer.from_pretrained("finetrainers/dummy-hunyaunvideo", subfolder="tokenizer") + + torch.manual_seed(0) + text_encoder_2 = CLIPTextModel(clip_text_encoder_config) + tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + + feature_extractor = SiglipImageProcessor.from_pretrained( + "hf-internal-testing/tiny-random-SiglipVisionModel", size={"height": 30, "width": 30} + ) + image_encoder = SiglipVisionModel.from_pretrained("hf-internal-testing/tiny-random-SiglipVisionModel") + + components = { + "transformer": transformer, + "vae": vae, + "scheduler": scheduler, + "text_encoder": text_encoder, + "text_encoder_2": text_encoder_2, + "tokenizer": tokenizer, + "tokenizer_2": tokenizer_2, + "feature_extractor": feature_extractor, + "image_encoder": image_encoder, + } + return components + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + + image_height = 32 + image_width = 32 + image = Image.new("RGB", (image_width, image_height)) + inputs = { + "image": image, + "prompt": "dance monkey", + "prompt_template": { + "template": "{}", + "crop_start": 0, + }, + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 4.5, + "height": image_height, + "width": image_width, + "num_frames": 9, + "latent_window_size": 3, + "max_sequence_length": 256, + "output_type": "pt", + } + return inputs + + def test_inference(self): + device = "cpu" + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + video = pipe(**inputs).frames + generated_video = video[0] + self.assertEqual(generated_video.shape, (13, 3, 32, 32)) + + # fmt: off + expected_slice = torch.tensor([0.363, 0.3384, 0.3426, 0.3512, 0.3372, 0.3276, 0.417, 0.4061, 0.5221, 0.467, 0.4813, 0.4556, 0.4107, 0.3945, 0.4049, 0.4551]) + # fmt: on + + generated_slice = generated_video.flatten() + generated_slice = torch.cat([generated_slice[:8], generated_slice[-8:]]) + self.assertTrue( + torch.allclose(generated_slice, expected_slice, atol=1e-3), + "The generated video does not match the expected slice.", + ) + + def test_callback_inputs(self): + sig = inspect.signature(self.pipeline_class.__call__) + has_callback_tensor_inputs = "callback_on_step_end_tensor_inputs" in sig.parameters + has_callback_step_end = "callback_on_step_end" in sig.parameters + + if not (has_callback_tensor_inputs and has_callback_step_end): + return + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + self.assertTrue( + hasattr(pipe, "_callback_tensor_inputs"), + f" {self.pipeline_class} should have `_callback_tensor_inputs` that defines a list of tensor variables its callback function can use as inputs", + ) + + def callback_inputs_subset(pipe, i, t, callback_kwargs): + # iterate over callback args + for tensor_name, tensor_value in callback_kwargs.items(): + # check that we're only passing in allowed tensor inputs + assert tensor_name in pipe._callback_tensor_inputs + + return callback_kwargs + + def callback_inputs_all(pipe, i, t, callback_kwargs): + for tensor_name in pipe._callback_tensor_inputs: + assert tensor_name in callback_kwargs + + # iterate over callback args + for tensor_name, tensor_value in callback_kwargs.items(): + # check that we're only passing in allowed tensor inputs + assert tensor_name in pipe._callback_tensor_inputs + + return callback_kwargs + + inputs = self.get_dummy_inputs(torch_device) + + # Test passing in a subset + inputs["callback_on_step_end"] = callback_inputs_subset + inputs["callback_on_step_end_tensor_inputs"] = ["latents"] + output = pipe(**inputs)[0] + + # Test passing in a everything + inputs["callback_on_step_end"] = callback_inputs_all + inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs + output = pipe(**inputs)[0] + + def callback_inputs_change_tensor(pipe, i, t, callback_kwargs): + is_last = i == (pipe.num_timesteps - 1) + if is_last: + callback_kwargs["latents"] = torch.zeros_like(callback_kwargs["latents"]) + return callback_kwargs + + inputs["callback_on_step_end"] = callback_inputs_change_tensor + inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs + output = pipe(**inputs)[0] + assert output.abs().sum() < 1e10 + + def test_attention_slicing_forward_pass( + self, test_max_difference=True, test_mean_pixel_difference=True, expected_max_diff=1e-3 + ): + if not self.test_attention_slicing: + return + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + for component in pipe.components.values(): + if hasattr(component, "set_default_attn_processor"): + component.set_default_attn_processor() + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + generator_device = "cpu" + inputs = self.get_dummy_inputs(generator_device) + output_without_slicing = pipe(**inputs)[0] + + pipe.enable_attention_slicing(slice_size=1) + inputs = self.get_dummy_inputs(generator_device) + output_with_slicing1 = pipe(**inputs)[0] + + pipe.enable_attention_slicing(slice_size=2) + inputs = self.get_dummy_inputs(generator_device) + output_with_slicing2 = pipe(**inputs)[0] + + if test_max_difference: + max_diff1 = np.abs(to_np(output_with_slicing1) - to_np(output_without_slicing)).max() + max_diff2 = np.abs(to_np(output_with_slicing2) - to_np(output_without_slicing)).max() + self.assertLess( + max(max_diff1, max_diff2), + expected_max_diff, + "Attention slicing should not affect the inference results", + ) + + def test_vae_tiling(self, expected_diff_max: float = 0.2): + # Seems to require higher tolerance than the other tests + expected_diff_max = 0.6 + generator_device = "cpu" + components = self.get_dummy_components() + + pipe = self.pipeline_class(**components) + pipe.to("cpu") + pipe.set_progress_bar_config(disable=None) + + # Without tiling + inputs = self.get_dummy_inputs(generator_device) + inputs["height"] = inputs["width"] = 128 + output_without_tiling = pipe(**inputs)[0] + + # With tiling + pipe.vae.enable_tiling( + tile_sample_min_height=96, + tile_sample_min_width=96, + tile_sample_stride_height=64, + tile_sample_stride_width=64, + ) + inputs = self.get_dummy_inputs(generator_device) + inputs["height"] = inputs["width"] = 128 + output_with_tiling = pipe(**inputs)[0] + + self.assertLess( + (to_np(output_without_tiling) - to_np(output_with_tiling)).max(), + expected_diff_max, + "VAE tiling should not affect the inference results", + ) + + def test_float16_inference(self, expected_max_diff=0.2): + # NOTE: this test needs a higher tolerance because of multiple forwards through + # the model, which compounds the overall fp32 vs fp16 numerical differences. It + # shouldn't be expected that the results are the same, so we bump the tolerance. + return super().test_float16_inference(expected_max_diff) + + @unittest.skip("The image_encoder uses SiglipVisionModel, which does not support sequential CPU offloading.") + def test_sequential_cpu_offload_forward_pass(self): + # https://github.com/huggingface/transformers/blob/21cb353b7b4f77c6f5f5c3341d660f86ff416d04/src/transformers/models/siglip/modeling_siglip.py#L803 + # This is because it instantiates it's attention layer from torch.nn.MultiheadAttention, which calls to + # `torch.nn.functional.multi_head_attention_forward` with the weights and bias. Since the hook is never + # triggered with a forward pass call, the weights stay on the CPU. There are more examples where we skip + # this test because of MHA (example: HunyuanDiT because of AttentionPooling layer). + pass + + @unittest.skip("The image_encoder uses SiglipVisionModel, which does not support sequential CPU offloading.") + def test_sequential_offload_forward_pass_twice(self): + # https://github.com/huggingface/transformers/blob/21cb353b7b4f77c6f5f5c3341d660f86ff416d04/src/transformers/models/siglip/modeling_siglip.py#L803 + # This is because it instantiates it's attention layer from torch.nn.MultiheadAttention, which calls to + # `torch.nn.functional.multi_head_attention_forward` with the weights and bias. Since the hook is never + # triggered with a forward pass call, the weights stay on the CPU. There are more examples where we skip + # this test because of MHA (example: HunyuanDiT because of AttentionPooling layer). + pass + + # TODO(aryan): Create a dummy gemma model with smol vocab size + @unittest.skip( + "A very small vocab size is used for fast tests. So, any kind of prompt other than the empty default used in other tests will lead to a embedding lookup error. This test uses a long prompt that causes the error." + ) + def test_inference_batch_consistent(self): + pass + + @unittest.skip( + "A very small vocab size is used for fast tests. So, any kind of prompt other than the empty default used in other tests will lead to a embedding lookup error. This test uses a long prompt that causes the error." + ) + def test_inference_batch_single_identical(self): + pass diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/hunyuandit/__init__.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/hunyuandit/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/hunyuandit/test_hunyuan_dit.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/hunyuandit/test_hunyuan_dit.py new file mode 100644 index 0000000000000000000000000000000000000000..2a329f10bc80e3ba65ce354f9d853ea6a5348c2d --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/hunyuandit/test_hunyuan_dit.py @@ -0,0 +1,348 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import gc +import tempfile +import unittest + +import numpy as np +import torch +from transformers import AutoTokenizer, BertModel, T5EncoderModel + +from diffusers import AutoencoderKL, DDPMScheduler, HunyuanDiT2DModel, HunyuanDiTPipeline + +from ...testing_utils import ( + backend_empty_cache, + enable_full_determinism, + numpy_cosine_similarity_distance, + require_torch_accelerator, + slow, + torch_device, +) +from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS +from ..test_pipelines_common import ( + PipelineTesterMixin, + check_qkv_fusion_matches_attn_procs_length, + check_qkv_fusion_processors_exist, + to_np, +) + + +enable_full_determinism() + + +class HunyuanDiTPipelineFastTests(PipelineTesterMixin, unittest.TestCase): + pipeline_class = HunyuanDiTPipeline + params = TEXT_TO_IMAGE_PARAMS - {"cross_attention_kwargs"} + batch_params = TEXT_TO_IMAGE_BATCH_PARAMS + image_params = TEXT_TO_IMAGE_IMAGE_PARAMS + image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS + + required_optional_params = PipelineTesterMixin.required_optional_params + test_layerwise_casting = True + + def get_dummy_components(self): + torch.manual_seed(0) + transformer = HunyuanDiT2DModel( + sample_size=16, + num_layers=2, + patch_size=2, + attention_head_dim=8, + num_attention_heads=3, + in_channels=4, + cross_attention_dim=32, + cross_attention_dim_t5=32, + pooled_projection_dim=16, + hidden_size=24, + activation_fn="gelu-approximate", + ) + torch.manual_seed(0) + vae = AutoencoderKL() + + scheduler = DDPMScheduler() + text_encoder = BertModel.from_pretrained("hf-internal-testing/tiny-random-BertModel") + tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-BertModel") + text_encoder_2 = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5") + tokenizer_2 = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") + + components = { + "transformer": transformer.eval(), + "vae": vae.eval(), + "scheduler": scheduler, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + "text_encoder_2": text_encoder_2, + "tokenizer_2": tokenizer_2, + "safety_checker": None, + "feature_extractor": None, + } + return components + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + inputs = { + "prompt": "A painting of a squirrel eating a burger", + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 5.0, + "output_type": "np", + "use_resolution_binning": False, + } + return inputs + + def test_inference(self): + device = "cpu" + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + image = pipe(**inputs).images + image_slice = image[0, -3:, -3:, -1] + + self.assertEqual(image.shape, (1, 16, 16, 3)) + expected_slice = np.array( + [0.56939435, 0.34541583, 0.35915792, 0.46489206, 0.38775963, 0.45004836, 0.5957267, 0.59481275, 0.33287364] + ) + max_diff = np.abs(image_slice.flatten() - expected_slice).max() + self.assertLessEqual(max_diff, 1e-3) + + @unittest.skip("The HunyuanDiT Attention pooling layer does not support sequential CPU offloading.") + def test_sequential_cpu_offload_forward_pass(self): + # TODO(YiYi) need to fix later + # This is because it instantiates it's attention layer from torch.nn.MultiheadAttention, which calls to + # `torch.nn.functional.multi_head_attention_forward` with the weights and bias. Since the hook is never + # triggered with a forward pass call, the weights stay on the CPU. There are more examples where we skip + # this test because of MHA (example: HunyuanVideo Framepack) + pass + + @unittest.skip("The HunyuanDiT Attention pooling layer does not support sequential CPU offloading.") + def test_sequential_offload_forward_pass_twice(self): + # TODO(YiYi) need to fix later + # This is because it instantiates it's attention layer from torch.nn.MultiheadAttention, which calls to + # `torch.nn.functional.multi_head_attention_forward` with the weights and bias. Since the hook is never + # triggered with a forward pass call, the weights stay on the CPU. There are more examples where we skip + # this test because of MHA (example: HunyuanVideo Framepack) + pass + + def test_inference_batch_single_identical(self): + self._test_inference_batch_single_identical( + expected_max_diff=1e-3, + ) + + def test_feed_forward_chunking(self): + device = "cpu" + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + image = pipe(**inputs).images + image_slice_no_chunking = image[0, -3:, -3:, -1] + + pipe.transformer.enable_forward_chunking(chunk_size=1, dim=0) + inputs = self.get_dummy_inputs(device) + image = pipe(**inputs).images + image_slice_chunking = image[0, -3:, -3:, -1] + + max_diff = np.abs(to_np(image_slice_no_chunking) - to_np(image_slice_chunking)).max() + self.assertLess(max_diff, 1e-4) + + def test_fused_qkv_projections(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe = pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + inputs["return_dict"] = False + image = pipe(**inputs)[0] + original_image_slice = image[0, -3:, -3:, -1] + + pipe.transformer.fuse_qkv_projections() + # TODO (sayakpaul): will refactor this once `fuse_qkv_projections()` has been added + # to the pipeline level. + pipe.transformer.fuse_qkv_projections() + assert check_qkv_fusion_processors_exist(pipe.transformer), ( + "Something wrong with the fused attention processors. Expected all the attention processors to be fused." + ) + assert check_qkv_fusion_matches_attn_procs_length( + pipe.transformer, pipe.transformer.original_attn_processors + ), "Something wrong with the attention processors concerning the fused QKV projections." + + inputs = self.get_dummy_inputs(device) + inputs["return_dict"] = False + image_fused = pipe(**inputs)[0] + image_slice_fused = image_fused[0, -3:, -3:, -1] + + pipe.transformer.unfuse_qkv_projections() + inputs = self.get_dummy_inputs(device) + inputs["return_dict"] = False + image_disabled = pipe(**inputs)[0] + image_slice_disabled = image_disabled[0, -3:, -3:, -1] + + assert np.allclose(original_image_slice, image_slice_fused, atol=1e-2, rtol=1e-2), ( + "Fusion of QKV projections shouldn't affect the outputs." + ) + assert np.allclose(image_slice_fused, image_slice_disabled, atol=1e-2, rtol=1e-2), ( + "Outputs, with QKV projection fusion enabled, shouldn't change when fused QKV projections are disabled." + ) + assert np.allclose(original_image_slice, image_slice_disabled, atol=1e-2, rtol=1e-2), ( + "Original outputs should match when fused QKV projections are disabled." + ) + + @unittest.skip( + "Test not supported as `encode_prompt` is called two times separately which deivates from about 99% of the pipelines we have." + ) + def test_encode_prompt_works_in_isolation(self): + pass + + def test_save_load_optional_components(self): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(torch_device) + + prompt = inputs["prompt"] + generator = inputs["generator"] + num_inference_steps = inputs["num_inference_steps"] + output_type = inputs["output_type"] + + ( + prompt_embeds, + negative_prompt_embeds, + prompt_attention_mask, + negative_prompt_attention_mask, + ) = pipe.encode_prompt(prompt, device=torch_device, dtype=torch.float32, text_encoder_index=0) + + ( + prompt_embeds_2, + negative_prompt_embeds_2, + prompt_attention_mask_2, + negative_prompt_attention_mask_2, + ) = pipe.encode_prompt( + prompt, + device=torch_device, + dtype=torch.float32, + text_encoder_index=1, + ) + + # inputs with prompt converted to embeddings + inputs = { + "prompt_embeds": prompt_embeds, + "prompt_attention_mask": prompt_attention_mask, + "negative_prompt_embeds": negative_prompt_embeds, + "negative_prompt_attention_mask": negative_prompt_attention_mask, + "prompt_embeds_2": prompt_embeds_2, + "prompt_attention_mask_2": prompt_attention_mask_2, + "negative_prompt_embeds_2": negative_prompt_embeds_2, + "negative_prompt_attention_mask_2": negative_prompt_attention_mask_2, + "generator": generator, + "num_inference_steps": num_inference_steps, + "output_type": output_type, + "use_resolution_binning": False, + } + + # set all optional components to None + for optional_component in pipe._optional_components: + setattr(pipe, optional_component, None) + + output = pipe(**inputs)[0] + + with tempfile.TemporaryDirectory() as tmpdir: + pipe.save_pretrained(tmpdir) + pipe_loaded = self.pipeline_class.from_pretrained(tmpdir) + pipe_loaded.to(torch_device) + pipe_loaded.set_progress_bar_config(disable=None) + + for optional_component in pipe._optional_components: + self.assertTrue( + getattr(pipe_loaded, optional_component) is None, + f"`{optional_component}` did not stay set to None after loading.", + ) + + inputs = self.get_dummy_inputs(torch_device) + + generator = inputs["generator"] + num_inference_steps = inputs["num_inference_steps"] + output_type = inputs["output_type"] + + # inputs with prompt converted to embeddings + inputs = { + "prompt_embeds": prompt_embeds, + "prompt_attention_mask": prompt_attention_mask, + "negative_prompt_embeds": negative_prompt_embeds, + "negative_prompt_attention_mask": negative_prompt_attention_mask, + "prompt_embeds_2": prompt_embeds_2, + "prompt_attention_mask_2": prompt_attention_mask_2, + "negative_prompt_embeds_2": negative_prompt_embeds_2, + "negative_prompt_attention_mask_2": negative_prompt_attention_mask_2, + "generator": generator, + "num_inference_steps": num_inference_steps, + "output_type": output_type, + "use_resolution_binning": False, + } + + output_loaded = pipe_loaded(**inputs)[0] + + max_diff = np.abs(to_np(output) - to_np(output_loaded)).max() + self.assertLess(max_diff, 1e-4) + + +@slow +@require_torch_accelerator +class HunyuanDiTPipelineIntegrationTests(unittest.TestCase): + prompt = "一个宇航员在骑马" + + def setUp(self): + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def test_hunyuan_dit_1024(self): + generator = torch.Generator("cpu").manual_seed(0) + + pipe = HunyuanDiTPipeline.from_pretrained( + "XCLiu/HunyuanDiT-0523", revision="refs/pr/2", torch_dtype=torch.float16 + ) + pipe.enable_model_cpu_offload(device=torch_device) + prompt = self.prompt + + image = pipe( + prompt=prompt, height=1024, width=1024, generator=generator, num_inference_steps=2, output_type="np" + ).images + + image_slice = image[0, -3:, -3:, -1] + expected_slice = np.array( + [0.48388672, 0.33789062, 0.30737305, 0.47875977, 0.25097656, 0.30029297, 0.4440918, 0.26953125, 0.30078125] + ) + + max_diff = numpy_cosine_similarity_distance(image_slice.flatten(), expected_slice) + assert max_diff < 1e-3, f"Max diff is too high. got {image_slice.flatten()}" diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/ip_adapters/__init__.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/ip_adapters/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/ip_adapters/test_ip_adapter_stable_diffusion.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/ip_adapters/test_ip_adapter_stable_diffusion.py new file mode 100644 index 0000000000000000000000000000000000000000..32590111cdf3371a121830e1d9fe90c9934b0154 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/ip_adapters/test_ip_adapter_stable_diffusion.py @@ -0,0 +1,745 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import gc +import unittest + +import numpy as np +import torch +from transformers import ( + CLIPImageProcessor, + CLIPVisionModelWithProjection, +) + +from diffusers import ( + StableDiffusionImg2ImgPipeline, + StableDiffusionInpaintPipeline, + StableDiffusionPipeline, + StableDiffusionXLImg2ImgPipeline, + StableDiffusionXLInpaintPipeline, + StableDiffusionXLPipeline, +) +from diffusers.image_processor import IPAdapterMaskProcessor +from diffusers.utils import load_image + +from ...testing_utils import ( + Expectations, + backend_empty_cache, + enable_full_determinism, + is_flaky, + load_pt, + numpy_cosine_similarity_distance, + require_torch_accelerator, + slow, + torch_device, +) + + +enable_full_determinism() + + +class IPAdapterNightlyTestsMixin(unittest.TestCase): + dtype = torch.float16 + + def setUp(self): + # clean up the VRAM before each test + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + # clean up the VRAM after each test + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def get_image_encoder(self, repo_id, subfolder): + image_encoder = CLIPVisionModelWithProjection.from_pretrained( + repo_id, subfolder=subfolder, torch_dtype=self.dtype + ).to(torch_device) + return image_encoder + + def get_image_processor(self, repo_id): + image_processor = CLIPImageProcessor.from_pretrained(repo_id) + return image_processor + + def get_dummy_inputs( + self, for_image_to_image=False, for_inpainting=False, for_sdxl=False, for_masks=False, for_instant_style=False + ): + image = load_image( + "https://user-images.githubusercontent.com/24734142/266492875-2d50d223-8475-44f0-a7c6-08b51cb53572.png" + ) + if for_sdxl: + image = image.resize((1024, 1024)) + + input_kwargs = { + "prompt": "best quality, high quality", + "negative_prompt": "monochrome, lowres, bad anatomy, worst quality, low quality", + "num_inference_steps": 5, + "generator": torch.Generator(device="cpu").manual_seed(33), + "ip_adapter_image": image, + "output_type": "np", + } + if for_image_to_image: + image = load_image("https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/vermeer.jpg") + ip_image = load_image("https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/river.png") + + if for_sdxl: + image = image.resize((1024, 1024)) + ip_image = ip_image.resize((1024, 1024)) + + input_kwargs.update({"image": image, "ip_adapter_image": ip_image}) + + elif for_inpainting: + image = load_image("https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/inpaint_image.png") + mask = load_image("https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/mask.png") + ip_image = load_image("https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/girl.png") + + if for_sdxl: + image = image.resize((1024, 1024)) + mask = mask.resize((1024, 1024)) + ip_image = ip_image.resize((1024, 1024)) + + input_kwargs.update({"image": image, "mask_image": mask, "ip_adapter_image": ip_image}) + + elif for_masks: + face_image1 = load_image( + "https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/ip_mask_girl1.png" + ) + face_image2 = load_image( + "https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/ip_mask_girl2.png" + ) + mask1 = load_image("https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/ip_mask_mask1.png") + mask2 = load_image("https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/ip_mask_mask2.png") + input_kwargs.update( + { + "ip_adapter_image": [[face_image1], [face_image2]], + "cross_attention_kwargs": {"ip_adapter_masks": [mask1, mask2]}, + } + ) + + elif for_instant_style: + composition_mask = load_image( + "https://huggingface.co/datasets/OzzyGT/testing-resources/resolve/main/1024_whole_mask.png" + ) + female_mask = load_image( + "https://huggingface.co/datasets/OzzyGT/testing-resources/resolve/main/ip_adapter_None_20240321125641_mask.png" + ) + male_mask = load_image( + "https://huggingface.co/datasets/OzzyGT/testing-resources/resolve/main/ip_adapter_None_20240321125344_mask.png" + ) + background_mask = load_image( + "https://huggingface.co/datasets/OzzyGT/testing-resources/resolve/main/ip_adapter_6_20240321130722_mask.png" + ) + ip_composition_image = load_image( + "https://huggingface.co/datasets/OzzyGT/testing-resources/resolve/main/ip_adapter__20240321125152.png" + ) + ip_female_style = load_image( + "https://huggingface.co/datasets/OzzyGT/testing-resources/resolve/main/ip_adapter__20240321125625.png" + ) + ip_male_style = load_image( + "https://huggingface.co/datasets/OzzyGT/testing-resources/resolve/main/ip_adapter__20240321125329.png" + ) + ip_background = load_image( + "https://huggingface.co/datasets/OzzyGT/testing-resources/resolve/main/ip_adapter__20240321130643.png" + ) + input_kwargs.update( + { + "ip_adapter_image": [ip_composition_image, [ip_female_style, ip_male_style, ip_background]], + "cross_attention_kwargs": { + "ip_adapter_masks": [[composition_mask], [female_mask, male_mask, background_mask]] + }, + } + ) + + return input_kwargs + + +@slow +@require_torch_accelerator +class IPAdapterSDIntegrationTests(IPAdapterNightlyTestsMixin): + def test_text_to_image(self): + image_encoder = self.get_image_encoder(repo_id="h94/IP-Adapter", subfolder="models/image_encoder") + pipeline = StableDiffusionPipeline.from_pretrained( + "stable-diffusion-v1-5/stable-diffusion-v1-5", + image_encoder=image_encoder, + safety_checker=None, + torch_dtype=self.dtype, + ) + pipeline.to(torch_device) + pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="models", weight_name="ip-adapter_sd15.bin") + + inputs = self.get_dummy_inputs() + images = pipeline(**inputs).images + image_slice = images[0, :3, :3, -1].flatten() + + expected_slice = np.array([0.80810547, 0.88183594, 0.9296875, 0.9189453, 0.9848633, 1.0, 0.97021484, 1.0, 1.0]) + + max_diff = numpy_cosine_similarity_distance(image_slice, expected_slice) + assert max_diff < 5e-4 + + pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="models", weight_name="ip-adapter-plus_sd15.bin") + + inputs = self.get_dummy_inputs() + images = pipeline(**inputs).images + image_slice = images[0, :3, :3, -1].flatten() + + expected_slice = np.array( + [0.30444336, 0.26513672, 0.22436523, 0.2758789, 0.25585938, 0.20751953, 0.25390625, 0.24633789, 0.21923828] + ) + + max_diff = numpy_cosine_similarity_distance(image_slice, expected_slice) + assert max_diff < 5e-4 + + def test_image_to_image(self): + image_encoder = self.get_image_encoder(repo_id="h94/IP-Adapter", subfolder="models/image_encoder") + pipeline = StableDiffusionImg2ImgPipeline.from_pretrained( + "stable-diffusion-v1-5/stable-diffusion-v1-5", + image_encoder=image_encoder, + safety_checker=None, + torch_dtype=self.dtype, + ) + pipeline.to(torch_device) + pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="models", weight_name="ip-adapter_sd15.bin") + + inputs = self.get_dummy_inputs(for_image_to_image=True) + images = pipeline(**inputs).images + image_slice = images[0, :3, :3, -1].flatten() + + expected_slice = np.array( + [0.22167969, 0.21875, 0.21728516, 0.22607422, 0.21948242, 0.23925781, 0.22387695, 0.25268555, 0.2722168] + ) + + max_diff = numpy_cosine_similarity_distance(image_slice, expected_slice) + assert max_diff < 5e-4 + + pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="models", weight_name="ip-adapter-plus_sd15.bin") + + inputs = self.get_dummy_inputs(for_image_to_image=True) + images = pipeline(**inputs).images + image_slice = images[0, :3, :3, -1].flatten() + + expected_slice = np.array( + [0.35913086, 0.265625, 0.26367188, 0.24658203, 0.19750977, 0.39990234, 0.15258789, 0.20336914, 0.5517578] + ) + + max_diff = numpy_cosine_similarity_distance(image_slice, expected_slice) + assert max_diff < 5e-4 + + def test_inpainting(self): + image_encoder = self.get_image_encoder(repo_id="h94/IP-Adapter", subfolder="models/image_encoder") + pipeline = StableDiffusionInpaintPipeline.from_pretrained( + "stable-diffusion-v1-5/stable-diffusion-v1-5", + image_encoder=image_encoder, + safety_checker=None, + torch_dtype=self.dtype, + ) + pipeline.to(torch_device) + pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="models", weight_name="ip-adapter_sd15.bin") + + inputs = self.get_dummy_inputs(for_inpainting=True) + images = pipeline(**inputs).images + image_slice = images[0, :3, :3, -1].flatten() + + expected_slice = np.array( + [0.27148438, 0.24047852, 0.22167969, 0.23217773, 0.21118164, 0.21142578, 0.21875, 0.20751953, 0.20019531] + ) + + max_diff = numpy_cosine_similarity_distance(image_slice, expected_slice) + assert max_diff < 5e-4 + + pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="models", weight_name="ip-adapter-plus_sd15.bin") + + inputs = self.get_dummy_inputs(for_inpainting=True) + images = pipeline(**inputs).images + image_slice = images[0, :3, :3, -1].flatten() + + max_diff = numpy_cosine_similarity_distance(image_slice, expected_slice) + assert max_diff < 5e-4 + + def test_text_to_image_model_cpu_offload(self): + image_encoder = self.get_image_encoder(repo_id="h94/IP-Adapter", subfolder="models/image_encoder") + pipeline = StableDiffusionPipeline.from_pretrained( + "stable-diffusion-v1-5/stable-diffusion-v1-5", + image_encoder=image_encoder, + safety_checker=None, + torch_dtype=self.dtype, + ) + pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="models", weight_name="ip-adapter_sd15.bin") + pipeline.to(torch_device) + + inputs = self.get_dummy_inputs() + output_without_offload = pipeline(**inputs).images + + pipeline.enable_model_cpu_offload(device=torch_device) + inputs = self.get_dummy_inputs() + output_with_offload = pipeline(**inputs).images + max_diff = np.abs(output_with_offload - output_without_offload).max() + self.assertLess(max_diff, 1e-3, "CPU offloading should not affect the inference results") + + offloaded_modules = [ + v + for k, v in pipeline.components.items() + if isinstance(v, torch.nn.Module) and k not in pipeline._exclude_from_cpu_offload + ] + ( + self.assertTrue(all(v.device.type == "cpu" for v in offloaded_modules)), + f"Not offloaded: {[v for v in offloaded_modules if v.device.type != 'cpu']}", + ) + + def test_text_to_image_full_face(self): + image_encoder = self.get_image_encoder(repo_id="h94/IP-Adapter", subfolder="models/image_encoder") + pipeline = StableDiffusionPipeline.from_pretrained( + "stable-diffusion-v1-5/stable-diffusion-v1-5", + image_encoder=image_encoder, + safety_checker=None, + torch_dtype=self.dtype, + ) + pipeline.to(torch_device) + pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="models", weight_name="ip-adapter-full-face_sd15.bin") + pipeline.set_ip_adapter_scale(0.7) + + inputs = self.get_dummy_inputs() + images = pipeline(**inputs).images + image_slice = images[0, :3, :3, -1].flatten() + expected_slice = np.array([0.1704, 0.1296, 0.1272, 0.2212, 0.1514, 0.1479, 0.4172, 0.4263, 0.4360]) + + max_diff = numpy_cosine_similarity_distance(image_slice, expected_slice) + assert max_diff < 5e-4 + + def test_unload(self): + image_encoder = self.get_image_encoder(repo_id="h94/IP-Adapter", subfolder="models/image_encoder") + pipeline = StableDiffusionPipeline.from_pretrained( + "stable-diffusion-v1-5/stable-diffusion-v1-5", + image_encoder=image_encoder, + safety_checker=None, + torch_dtype=self.dtype, + ) + before_processors = [attn_proc.__class__ for attn_proc in pipeline.unet.attn_processors.values()] + pipeline.to(torch_device) + pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="models", weight_name="ip-adapter_sd15.bin") + pipeline.set_ip_adapter_scale(0.7) + + pipeline.unload_ip_adapter() + + assert getattr(pipeline, "image_encoder") is None + assert getattr(pipeline, "feature_extractor") is not None + after_processors = [attn_proc.__class__ for attn_proc in pipeline.unet.attn_processors.values()] + + assert before_processors == after_processors + + @is_flaky + def test_multi(self): + image_encoder = self.get_image_encoder(repo_id="h94/IP-Adapter", subfolder="models/image_encoder") + pipeline = StableDiffusionPipeline.from_pretrained( + "stable-diffusion-v1-5/stable-diffusion-v1-5", + image_encoder=image_encoder, + safety_checker=None, + torch_dtype=self.dtype, + ) + pipeline.to(torch_device) + pipeline.load_ip_adapter( + "h94/IP-Adapter", subfolder="models", weight_name=["ip-adapter_sd15.bin", "ip-adapter-plus_sd15.bin"] + ) + pipeline.set_ip_adapter_scale([0.7, 0.3]) + + inputs = self.get_dummy_inputs() + ip_adapter_image = inputs["ip_adapter_image"] + inputs["ip_adapter_image"] = [ip_adapter_image, [ip_adapter_image] * 2] + images = pipeline(**inputs).images + image_slice = images[0, :3, :3, -1].flatten() + expected_slice = np.array([0.5234, 0.5352, 0.5625, 0.5713, 0.5947, 0.6206, 0.5786, 0.6187, 0.6494]) + + max_diff = numpy_cosine_similarity_distance(image_slice, expected_slice) + assert max_diff < 5e-4 + + def test_text_to_image_face_id(self): + pipeline = StableDiffusionPipeline.from_pretrained( + "stable-diffusion-v1-5/stable-diffusion-v1-5", safety_checker=None, torch_dtype=self.dtype + ) + pipeline.to(torch_device) + pipeline.load_ip_adapter( + "h94/IP-Adapter-FaceID", + subfolder=None, + weight_name="ip-adapter-faceid_sd15.bin", + image_encoder_folder=None, + ) + pipeline.set_ip_adapter_scale(0.7) + + inputs = self.get_dummy_inputs() + id_embeds = load_pt( + "https://huggingface.co/datasets/fabiorigano/testing-images/resolve/main/ai_face2.ipadpt", + map_location=torch_device, + )[0] + id_embeds = id_embeds.reshape((2, 1, 1, 512)) + inputs["ip_adapter_image_embeds"] = [id_embeds] + inputs["ip_adapter_image"] = None + images = pipeline(**inputs).images + image_slice = images[0, :3, :3, -1].flatten() + + expected_slice = np.array([0.3237, 0.3186, 0.3406, 0.3154, 0.2942, 0.3220, 0.3188, 0.3528, 0.3242]) + max_diff = numpy_cosine_similarity_distance(image_slice, expected_slice) + assert max_diff < 5e-4 + + +@slow +@require_torch_accelerator +class IPAdapterSDXLIntegrationTests(IPAdapterNightlyTestsMixin): + def test_text_to_image_sdxl(self): + image_encoder = self.get_image_encoder(repo_id="h94/IP-Adapter", subfolder="sdxl_models/image_encoder") + feature_extractor = self.get_image_processor("laion/CLIP-ViT-bigG-14-laion2B-39B-b160k") + + pipeline = StableDiffusionXLPipeline.from_pretrained( + "stabilityai/stable-diffusion-xl-base-1.0", + image_encoder=image_encoder, + feature_extractor=feature_extractor, + torch_dtype=self.dtype, + ) + pipeline.enable_model_cpu_offload(device=torch_device) + pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="sdxl_models", weight_name="ip-adapter_sdxl.bin") + + inputs = self.get_dummy_inputs() + images = pipeline(**inputs).images + image_slice = images[0, :3, :3, -1].flatten() + + expected_slice = np.array( + [ + 0.09630299, + 0.09551358, + 0.08480701, + 0.09070173, + 0.09437338, + 0.09264627, + 0.08883232, + 0.09287417, + 0.09197289, + ] + ) + + max_diff = numpy_cosine_similarity_distance(image_slice, expected_slice) + assert max_diff < 5e-4 + + image_encoder = self.get_image_encoder(repo_id="h94/IP-Adapter", subfolder="models/image_encoder") + + pipeline = StableDiffusionXLPipeline.from_pretrained( + "stabilityai/stable-diffusion-xl-base-1.0", + image_encoder=image_encoder, + feature_extractor=feature_extractor, + torch_dtype=self.dtype, + ) + pipeline.to(torch_device) + pipeline.load_ip_adapter( + "h94/IP-Adapter", + subfolder="sdxl_models", + weight_name="ip-adapter-plus_sdxl_vit-h.bin", + ) + + inputs = self.get_dummy_inputs() + images = pipeline(**inputs).images + image_slice = images[0, :3, :3, -1].flatten() + + expected_slice = np.array([0.0596, 0.0539, 0.0459, 0.0580, 0.0560, 0.0548, 0.0501, 0.0563, 0.0500]) + + max_diff = numpy_cosine_similarity_distance(image_slice, expected_slice) + assert max_diff < 5e-4 + + def test_image_to_image_sdxl(self): + image_encoder = self.get_image_encoder(repo_id="h94/IP-Adapter", subfolder="sdxl_models/image_encoder") + feature_extractor = self.get_image_processor("laion/CLIP-ViT-bigG-14-laion2B-39B-b160k") + + pipeline = StableDiffusionXLImg2ImgPipeline.from_pretrained( + "stabilityai/stable-diffusion-xl-base-1.0", + image_encoder=image_encoder, + feature_extractor=feature_extractor, + torch_dtype=self.dtype, + ) + pipeline.enable_model_cpu_offload(device=torch_device) + pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="sdxl_models", weight_name="ip-adapter_sdxl.bin") + + inputs = self.get_dummy_inputs(for_image_to_image=True) + images = pipeline(**inputs).images + image_slice = images[0, :3, :3, -1].flatten() + + expected_slice = np.array( + [ + 0.06513795, + 0.07009393, + 0.07234055, + 0.07426041, + 0.07002589, + 0.06415862, + 0.07827643, + 0.07962808, + 0.07411247, + ] + ) + + assert np.allclose(image_slice, expected_slice, atol=1e-3) + + image_encoder = self.get_image_encoder(repo_id="h94/IP-Adapter", subfolder="models/image_encoder") + feature_extractor = self.get_image_processor("laion/CLIP-ViT-bigG-14-laion2B-39B-b160k") + + pipeline = StableDiffusionXLImg2ImgPipeline.from_pretrained( + "stabilityai/stable-diffusion-xl-base-1.0", + image_encoder=image_encoder, + feature_extractor=feature_extractor, + torch_dtype=self.dtype, + ) + pipeline.to(torch_device) + pipeline.load_ip_adapter( + "h94/IP-Adapter", + subfolder="sdxl_models", + weight_name="ip-adapter-plus_sdxl_vit-h.bin", + ) + + inputs = self.get_dummy_inputs(for_image_to_image=True) + images = pipeline(**inputs).images + image_slice = images[0, :3, :3, -1].flatten() + + expected_slice = np.array( + [ + 0.07126552, + 0.07025367, + 0.07348302, + 0.07580167, + 0.07467338, + 0.06918576, + 0.07480252, + 0.08279955, + 0.08547315, + ] + ) + + assert np.allclose(image_slice, expected_slice, atol=1e-3) + + def test_inpainting_sdxl(self): + image_encoder = self.get_image_encoder(repo_id="h94/IP-Adapter", subfolder="sdxl_models/image_encoder") + feature_extractor = self.get_image_processor("laion/CLIP-ViT-bigG-14-laion2B-39B-b160k") + + pipeline = StableDiffusionXLInpaintPipeline.from_pretrained( + "stabilityai/stable-diffusion-xl-base-1.0", + image_encoder=image_encoder, + feature_extractor=feature_extractor, + torch_dtype=self.dtype, + ) + pipeline.enable_model_cpu_offload(device=torch_device) + pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="sdxl_models", weight_name="ip-adapter_sdxl.bin") + + inputs = self.get_dummy_inputs(for_inpainting=True) + images = pipeline(**inputs).images + image_slice = images[0, :3, :3, -1].flatten() + image_slice.tolist() + + expected_slice = np.array( + [0.14181179, 0.1493012, 0.14283323, 0.14602411, 0.14915377, 0.15015268, 0.14725655, 0.15009224, 0.15164584] + ) + + max_diff = numpy_cosine_similarity_distance(image_slice, expected_slice) + assert max_diff < 5e-4 + + image_encoder = self.get_image_encoder(repo_id="h94/IP-Adapter", subfolder="models/image_encoder") + feature_extractor = self.get_image_processor("laion/CLIP-ViT-bigG-14-laion2B-39B-b160k") + + pipeline = StableDiffusionXLInpaintPipeline.from_pretrained( + "stabilityai/stable-diffusion-xl-base-1.0", + image_encoder=image_encoder, + feature_extractor=feature_extractor, + torch_dtype=self.dtype, + ) + pipeline.to(torch_device) + pipeline.load_ip_adapter( + "h94/IP-Adapter", + subfolder="sdxl_models", + weight_name="ip-adapter-plus_sdxl_vit-h.bin", + ) + + inputs = self.get_dummy_inputs(for_inpainting=True) + images = pipeline(**inputs).images + image_slice = images[0, :3, :3, -1].flatten() + image_slice.tolist() + + expected_slice = np.array([0.1398, 0.1476, 0.1407, 0.1442, 0.1470, 0.1480, 0.1449, 0.1481, 0.1494]) + + max_diff = numpy_cosine_similarity_distance(image_slice, expected_slice) + assert max_diff < 5e-4 + + def test_ip_adapter_mask(self): + image_encoder = self.get_image_encoder(repo_id="h94/IP-Adapter", subfolder="models/image_encoder") + pipeline = StableDiffusionXLPipeline.from_pretrained( + "stabilityai/stable-diffusion-xl-base-1.0", + image_encoder=image_encoder, + torch_dtype=self.dtype, + ) + pipeline.enable_model_cpu_offload(device=torch_device) + pipeline.load_ip_adapter( + "h94/IP-Adapter", subfolder="sdxl_models", weight_name="ip-adapter-plus-face_sdxl_vit-h.safetensors" + ) + pipeline.set_ip_adapter_scale(0.7) + + inputs = self.get_dummy_inputs(for_masks=True) + mask = inputs["cross_attention_kwargs"]["ip_adapter_masks"][0] + processor = IPAdapterMaskProcessor() + mask = processor.preprocess(mask) + inputs["cross_attention_kwargs"]["ip_adapter_masks"] = mask + inputs["ip_adapter_image"] = inputs["ip_adapter_image"][0] + images = pipeline(**inputs).images + image_slice = images[0, :3, :3, -1].flatten() + expected_slice = np.array( + [0.7307304, 0.73450166, 0.73731124, 0.7377061, 0.7318013, 0.73720926, 0.74746597, 0.7409929, 0.74074936] + ) + + max_diff = numpy_cosine_similarity_distance(image_slice, expected_slice) + assert max_diff < 5e-4 + + def test_ip_adapter_multiple_masks(self): + image_encoder = self.get_image_encoder(repo_id="h94/IP-Adapter", subfolder="models/image_encoder") + pipeline = StableDiffusionXLPipeline.from_pretrained( + "stabilityai/stable-diffusion-xl-base-1.0", + image_encoder=image_encoder, + torch_dtype=self.dtype, + ) + pipeline.enable_model_cpu_offload(device=torch_device) + pipeline.load_ip_adapter( + "h94/IP-Adapter", subfolder="sdxl_models", weight_name=["ip-adapter-plus-face_sdxl_vit-h.safetensors"] * 2 + ) + pipeline.set_ip_adapter_scale([0.7] * 2) + + inputs = self.get_dummy_inputs(for_masks=True) + masks = inputs["cross_attention_kwargs"]["ip_adapter_masks"] + processor = IPAdapterMaskProcessor() + masks = processor.preprocess(masks) + inputs["cross_attention_kwargs"]["ip_adapter_masks"] = masks + images = pipeline(**inputs).images + image_slice = images[0, :3, :3, -1].flatten() + expected_slice = np.array( + [0.79474676, 0.7977683, 0.8013954, 0.7988008, 0.7970615, 0.8029355, 0.80614823, 0.8050743, 0.80627424] + ) + + max_diff = numpy_cosine_similarity_distance(image_slice, expected_slice) + assert max_diff < 5e-4 + + def test_instant_style_multiple_masks(self): + image_encoder = CLIPVisionModelWithProjection.from_pretrained( + "h94/IP-Adapter", subfolder="models/image_encoder", torch_dtype=torch.float16 + ) + pipeline = StableDiffusionXLPipeline.from_pretrained( + "RunDiffusion/Juggernaut-XL-v9", torch_dtype=torch.float16, image_encoder=image_encoder, variant="fp16" + ) + pipeline.enable_model_cpu_offload(device=torch_device) + + pipeline.load_ip_adapter( + ["ostris/ip-composition-adapter", "h94/IP-Adapter"], + subfolder=["", "sdxl_models"], + weight_name=[ + "ip_plus_composition_sdxl.safetensors", + "ip-adapter_sdxl_vit-h.safetensors", + ], + image_encoder_folder=None, + ) + scale_1 = { + "down": [[0.0, 0.0, 1.0]], + "mid": [[0.0, 0.0, 1.0]], + "up": {"block_0": [[0.0, 0.0, 1.0], [1.0, 1.0, 1.0], [0.0, 0.0, 1.0]], "block_1": [[0.0, 0.0, 1.0]]}, + } + pipeline.set_ip_adapter_scale([1.0, scale_1]) + + inputs = self.get_dummy_inputs(for_instant_style=True) + processor = IPAdapterMaskProcessor() + masks1 = inputs["cross_attention_kwargs"]["ip_adapter_masks"][0] + masks2 = inputs["cross_attention_kwargs"]["ip_adapter_masks"][1] + masks1 = processor.preprocess(masks1, height=1024, width=1024) + masks2 = processor.preprocess(masks2, height=1024, width=1024) + masks2 = masks2.reshape(1, masks2.shape[0], masks2.shape[2], masks2.shape[3]) + inputs["cross_attention_kwargs"]["ip_adapter_masks"] = [masks1, masks2] + images = pipeline(**inputs).images + image_slice = images[0, :3, :3, -1].flatten() + + expected_slices = Expectations( + { + ("xpu", 3): np.array( + [ + 0.2520, + 0.1050, + 0.1510, + 0.0997, + 0.0893, + 0.0019, + 0.0000, + 0.0000, + 0.0210, + ] + ), + ("cuda", 7): np.array( + [ + 0.2323, + 0.1026, + 0.1338, + 0.0638, + 0.0662, + 0.0000, + 0.0000, + 0.0000, + 0.0199, + ] + ), + ("cuda", 8): np.array( + [ + 0.2518, + 0.1059, + 0.1553, + 0.0977, + 0.0852, + 0.0000, + 0.0000, + 0.0000, + 0.0220, + ] + ), + } + ) + expected_slice = expected_slices.get_expectation() + + max_diff = numpy_cosine_similarity_distance(image_slice, expected_slice) + assert max_diff < 5e-4 + + def test_ip_adapter_multiple_masks_one_adapter(self): + image_encoder = self.get_image_encoder(repo_id="h94/IP-Adapter", subfolder="models/image_encoder") + pipeline = StableDiffusionXLPipeline.from_pretrained( + "stabilityai/stable-diffusion-xl-base-1.0", + image_encoder=image_encoder, + torch_dtype=self.dtype, + ) + pipeline.enable_model_cpu_offload(device=torch_device) + pipeline.load_ip_adapter( + "h94/IP-Adapter", subfolder="sdxl_models", weight_name=["ip-adapter-plus-face_sdxl_vit-h.safetensors"] + ) + pipeline.set_ip_adapter_scale([[0.7, 0.7]]) + + inputs = self.get_dummy_inputs(for_masks=True) + masks = inputs["cross_attention_kwargs"]["ip_adapter_masks"] + processor = IPAdapterMaskProcessor() + masks = processor.preprocess(masks) + masks = masks.reshape(1, masks.shape[0], masks.shape[2], masks.shape[3]) + inputs["cross_attention_kwargs"]["ip_adapter_masks"] = [masks] + ip_images = inputs["ip_adapter_image"] + inputs["ip_adapter_image"] = [[image[0] for image in ip_images]] + images = pipeline(**inputs).images + image_slice = images[0, :3, :3, -1].flatten() + expected_slice = np.array( + [0.79474676, 0.7977683, 0.8013954, 0.7988008, 0.7970615, 0.8029355, 0.80614823, 0.8050743, 0.80627424] + ) + + max_diff = numpy_cosine_similarity_distance(image_slice, expected_slice) + assert max_diff < 5e-4 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/kandinsky/__init__.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/kandinsky/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/kandinsky/test_kandinsky.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/kandinsky/test_kandinsky.py new file mode 100644 index 0000000000000000000000000000000000000000..911c6e49aeba48495e05bbf9604de559b34faea5 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/kandinsky/test_kandinsky.py @@ -0,0 +1,332 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import gc +import random +import unittest + +import numpy as np +import torch +from transformers import XLMRobertaTokenizerFast + +from diffusers import DDIMScheduler, KandinskyPipeline, KandinskyPriorPipeline, UNet2DConditionModel, VQModel +from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP + +from ...testing_utils import ( + backend_empty_cache, + enable_full_determinism, + floats_tensor, + load_numpy, + require_torch_accelerator, + slow, + torch_device, +) +from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference + + +enable_full_determinism() + + +class Dummies: + @property + def text_embedder_hidden_size(self): + return 32 + + @property + def time_input_dim(self): + return 32 + + @property + def block_out_channels_0(self): + return self.time_input_dim + + @property + def time_embed_dim(self): + return self.time_input_dim * 4 + + @property + def cross_attention_dim(self): + return 32 + + @property + def dummy_tokenizer(self): + tokenizer = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base") + return tokenizer + + @property + def dummy_text_encoder(self): + torch.manual_seed(0) + config = MCLIPConfig( + numDims=self.cross_attention_dim, + transformerDimensions=self.text_embedder_hidden_size, + hidden_size=self.text_embedder_hidden_size, + intermediate_size=37, + num_attention_heads=4, + num_hidden_layers=5, + vocab_size=1005, + ) + + text_encoder = MultilingualCLIP(config) + text_encoder = text_encoder.eval() + + return text_encoder + + @property + def dummy_unet(self): + torch.manual_seed(0) + + model_kwargs = { + "in_channels": 4, + # Out channels is double in channels because predicts mean and variance + "out_channels": 8, + "addition_embed_type": "text_image", + "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), + "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), + "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", + "block_out_channels": (self.block_out_channels_0, self.block_out_channels_0 * 2), + "layers_per_block": 1, + "encoder_hid_dim": self.text_embedder_hidden_size, + "encoder_hid_dim_type": "text_image_proj", + "cross_attention_dim": self.cross_attention_dim, + "attention_head_dim": 4, + "resnet_time_scale_shift": "scale_shift", + "class_embed_type": None, + } + + model = UNet2DConditionModel(**model_kwargs) + return model + + @property + def dummy_movq_kwargs(self): + return { + "block_out_channels": [32, 64], + "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], + "in_channels": 3, + "latent_channels": 4, + "layers_per_block": 1, + "norm_num_groups": 8, + "norm_type": "spatial", + "num_vq_embeddings": 12, + "out_channels": 3, + "up_block_types": [ + "AttnUpDecoderBlock2D", + "UpDecoderBlock2D", + ], + "vq_embed_dim": 4, + } + + @property + def dummy_movq(self): + torch.manual_seed(0) + model = VQModel(**self.dummy_movq_kwargs) + return model + + def get_dummy_components(self): + text_encoder = self.dummy_text_encoder + tokenizer = self.dummy_tokenizer + unet = self.dummy_unet + movq = self.dummy_movq + + scheduler = DDIMScheduler( + num_train_timesteps=1000, + beta_schedule="linear", + beta_start=0.00085, + beta_end=0.012, + clip_sample=False, + set_alpha_to_one=False, + steps_offset=1, + prediction_type="epsilon", + thresholding=False, + ) + + components = { + "text_encoder": text_encoder, + "tokenizer": tokenizer, + "unet": unet, + "scheduler": scheduler, + "movq": movq, + } + return components + + def get_dummy_inputs(self, device, seed=0): + image_embeds = floats_tensor((1, self.cross_attention_dim), rng=random.Random(seed)).to(device) + negative_image_embeds = floats_tensor((1, self.cross_attention_dim), rng=random.Random(seed + 1)).to(device) + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + inputs = { + "prompt": "horse", + "image_embeds": image_embeds, + "negative_image_embeds": negative_image_embeds, + "generator": generator, + "height": 64, + "width": 64, + "guidance_scale": 4.0, + "num_inference_steps": 2, + "output_type": "np", + } + return inputs + + +class KandinskyPipelineFastTests(PipelineTesterMixin, unittest.TestCase): + pipeline_class = KandinskyPipeline + params = [ + "prompt", + "image_embeds", + "negative_image_embeds", + ] + batch_params = ["prompt", "negative_prompt", "image_embeds", "negative_image_embeds"] + required_optional_params = [ + "generator", + "height", + "width", + "latents", + "guidance_scale", + "negative_prompt", + "num_inference_steps", + "return_dict", + "guidance_scale", + "num_images_per_prompt", + "output_type", + "return_dict", + ] + test_xformers_attention = False + + supports_dduf = False + + def get_dummy_components(self): + dummy = Dummies() + return dummy.get_dummy_components() + + def get_dummy_inputs(self, device, seed=0): + dummy = Dummies() + return dummy.get_dummy_inputs(device=device, seed=seed) + + def test_kandinsky(self): + device = "cpu" + + components = self.get_dummy_components() + + pipe = self.pipeline_class(**components) + pipe = pipe.to(device) + + pipe.set_progress_bar_config(disable=None) + + output = pipe(**self.get_dummy_inputs(device)) + image = output.images + + image_from_tuple = pipe( + **self.get_dummy_inputs(device), + return_dict=False, + )[0] + + image_slice = image[0, -3:, -3:, -1] + image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] + + assert image.shape == (1, 64, 64, 3) + + expected_slice = np.array([1.0000, 1.0000, 0.2766, 1.0000, 0.5447, 0.1737, 1.0000, 0.4316, 0.9024]) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2, ( + f" expected_slice {expected_slice}, but got {image_slice.flatten()}" + ) + assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2, ( + f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" + ) + + @require_torch_accelerator + def test_offloads(self): + pipes = [] + components = self.get_dummy_components() + sd_pipe = self.pipeline_class(**components).to(torch_device) + pipes.append(sd_pipe) + + components = self.get_dummy_components() + sd_pipe = self.pipeline_class(**components) + sd_pipe.enable_model_cpu_offload(device=torch_device) + pipes.append(sd_pipe) + + components = self.get_dummy_components() + sd_pipe = self.pipeline_class(**components) + sd_pipe.enable_sequential_cpu_offload(device=torch_device) + pipes.append(sd_pipe) + + image_slices = [] + for pipe in pipes: + inputs = self.get_dummy_inputs(torch_device) + image = pipe(**inputs).images + + image_slices.append(image[0, -3:, -3:, -1].flatten()) + + assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3 + assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3 + + +@slow +@require_torch_accelerator +class KandinskyPipelineIntegrationTests(unittest.TestCase): + def setUp(self): + # clean up the VRAM before each test + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + # clean up the VRAM after each test + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def test_kandinsky_text2img(self): + expected_image = load_numpy( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" + "/kandinsky/kandinsky_text2img_cat_fp16.npy" + ) + + pipe_prior = KandinskyPriorPipeline.from_pretrained( + "kandinsky-community/kandinsky-2-1-prior", torch_dtype=torch.float16 + ) + pipe_prior.to(torch_device) + + pipeline = KandinskyPipeline.from_pretrained("kandinsky-community/kandinsky-2-1", torch_dtype=torch.float16) + pipeline.to(torch_device) + pipeline.set_progress_bar_config(disable=None) + + prompt = "red cat, 4k photo" + + generator = torch.Generator(device=torch_device).manual_seed(0) + image_emb, zero_image_emb = pipe_prior( + prompt, + generator=generator, + num_inference_steps=5, + negative_prompt="", + ).to_tuple() + + generator = torch.Generator(device=torch_device).manual_seed(0) + output = pipeline( + prompt, + image_embeds=image_emb, + negative_image_embeds=zero_image_emb, + generator=generator, + num_inference_steps=100, + output_type="np", + ) + + image = output.images[0] + + assert image.shape == (512, 512, 3) + + assert_mean_pixel_difference(image, expected_image) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/kandinsky/test_kandinsky_combined.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/kandinsky/test_kandinsky_combined.py new file mode 100644 index 0000000000000000000000000000000000000000..d744d1082135774920e79dbfccba5b55f9786a9d --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/kandinsky/test_kandinsky_combined.py @@ -0,0 +1,369 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import numpy as np + +from diffusers import KandinskyCombinedPipeline, KandinskyImg2ImgCombinedPipeline, KandinskyInpaintCombinedPipeline + +from ...testing_utils import enable_full_determinism, require_torch_accelerator, torch_device +from ..test_pipelines_common import PipelineTesterMixin +from .test_kandinsky import Dummies +from .test_kandinsky_img2img import Dummies as Img2ImgDummies +from .test_kandinsky_inpaint import Dummies as InpaintDummies +from .test_kandinsky_prior import Dummies as PriorDummies + + +enable_full_determinism() + + +class KandinskyPipelineCombinedFastTests(PipelineTesterMixin, unittest.TestCase): + pipeline_class = KandinskyCombinedPipeline + params = [ + "prompt", + ] + batch_params = ["prompt", "negative_prompt"] + required_optional_params = [ + "generator", + "height", + "width", + "latents", + "guidance_scale", + "negative_prompt", + "num_inference_steps", + "return_dict", + "guidance_scale", + "num_images_per_prompt", + "output_type", + "return_dict", + ] + test_xformers_attention = True + + supports_dduf = False + + def get_dummy_components(self): + dummy = Dummies() + prior_dummy = PriorDummies() + components = dummy.get_dummy_components() + + components.update({f"prior_{k}": v for k, v in prior_dummy.get_dummy_components().items()}) + return components + + def get_dummy_inputs(self, device, seed=0): + prior_dummy = PriorDummies() + inputs = prior_dummy.get_dummy_inputs(device=device, seed=seed) + inputs.update( + { + "height": 64, + "width": 64, + } + ) + return inputs + + def test_kandinsky(self): + device = "cpu" + + components = self.get_dummy_components() + + pipe = self.pipeline_class(**components) + pipe = pipe.to(device) + + pipe.set_progress_bar_config(disable=None) + + output = pipe(**self.get_dummy_inputs(device)) + image = output.images + + image_from_tuple = pipe( + **self.get_dummy_inputs(device), + return_dict=False, + )[0] + + image_slice = image[0, -3:, -3:, -1] + image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] + + assert image.shape == (1, 64, 64, 3) + + expected_slice = np.array([0.2893, 0.1464, 0.4603, 0.3529, 0.4612, 0.7701, 0.4027, 0.3051, 0.5155]) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2, ( + f" expected_slice {expected_slice}, but got {image_slice.flatten()}" + ) + assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2, ( + f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" + ) + + @require_torch_accelerator + def test_offloads(self): + pipes = [] + components = self.get_dummy_components() + sd_pipe = self.pipeline_class(**components).to(torch_device) + pipes.append(sd_pipe) + + components = self.get_dummy_components() + sd_pipe = self.pipeline_class(**components) + sd_pipe.enable_model_cpu_offload(device=torch_device) + pipes.append(sd_pipe) + + components = self.get_dummy_components() + sd_pipe = self.pipeline_class(**components) + sd_pipe.enable_sequential_cpu_offload(device=torch_device) + pipes.append(sd_pipe) + + image_slices = [] + for pipe in pipes: + inputs = self.get_dummy_inputs(torch_device) + image = pipe(**inputs).images + + image_slices.append(image[0, -3:, -3:, -1].flatten()) + + assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3 + assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3 + + def test_inference_batch_single_identical(self): + super().test_inference_batch_single_identical(expected_max_diff=1e-2) + + def test_float16_inference(self): + super().test_float16_inference(expected_max_diff=2e-1) + + def test_dict_tuple_outputs_equivalent(self): + super().test_dict_tuple_outputs_equivalent(expected_max_difference=5e-4) + + +class KandinskyPipelineImg2ImgCombinedFastTests(PipelineTesterMixin, unittest.TestCase): + pipeline_class = KandinskyImg2ImgCombinedPipeline + params = ["prompt", "image"] + batch_params = ["prompt", "negative_prompt", "image"] + required_optional_params = [ + "generator", + "height", + "width", + "latents", + "guidance_scale", + "negative_prompt", + "num_inference_steps", + "return_dict", + "guidance_scale", + "num_images_per_prompt", + "output_type", + "return_dict", + ] + test_xformers_attention = False + + supports_dduf = False + + def get_dummy_components(self): + dummy = Img2ImgDummies() + prior_dummy = PriorDummies() + components = dummy.get_dummy_components() + + components.update({f"prior_{k}": v for k, v in prior_dummy.get_dummy_components().items()}) + return components + + def get_dummy_inputs(self, device, seed=0): + prior_dummy = PriorDummies() + dummy = Img2ImgDummies() + inputs = prior_dummy.get_dummy_inputs(device=device, seed=seed) + inputs.update(dummy.get_dummy_inputs(device=device, seed=seed)) + inputs.pop("image_embeds") + inputs.pop("negative_image_embeds") + return inputs + + def test_kandinsky(self): + device = "cpu" + + components = self.get_dummy_components() + + pipe = self.pipeline_class(**components) + pipe = pipe.to(device) + + pipe.set_progress_bar_config(disable=None) + + output = pipe(**self.get_dummy_inputs(device)) + image = output.images + + image_from_tuple = pipe( + **self.get_dummy_inputs(device), + return_dict=False, + )[0] + + image_slice = image[0, -3:, -3:, -1] + image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] + + assert image.shape == (1, 64, 64, 3) + + expected_slice = np.array([0.4852, 0.4136, 0.4539, 0.4781, 0.4680, 0.5217, 0.4973, 0.4089, 0.4977]) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2, ( + f" expected_slice {expected_slice}, but got {image_slice.flatten()}" + ) + assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2, ( + f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" + ) + + @require_torch_accelerator + def test_offloads(self): + pipes = [] + components = self.get_dummy_components() + sd_pipe = self.pipeline_class(**components).to(torch_device) + pipes.append(sd_pipe) + + components = self.get_dummy_components() + sd_pipe = self.pipeline_class(**components) + sd_pipe.enable_model_cpu_offload(device=torch_device) + pipes.append(sd_pipe) + + components = self.get_dummy_components() + sd_pipe = self.pipeline_class(**components) + sd_pipe.enable_sequential_cpu_offload(device=torch_device) + pipes.append(sd_pipe) + + image_slices = [] + for pipe in pipes: + inputs = self.get_dummy_inputs(torch_device) + image = pipe(**inputs).images + + image_slices.append(image[0, -3:, -3:, -1].flatten()) + + assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3 + assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3 + + def test_inference_batch_single_identical(self): + super().test_inference_batch_single_identical(expected_max_diff=1e-2) + + def test_float16_inference(self): + super().test_float16_inference(expected_max_diff=5e-1) + + def test_dict_tuple_outputs_equivalent(self): + super().test_dict_tuple_outputs_equivalent(expected_max_difference=5e-4) + + def test_save_load_optional_components(self): + super().test_save_load_optional_components(expected_max_difference=5e-4) + + +class KandinskyPipelineInpaintCombinedFastTests(PipelineTesterMixin, unittest.TestCase): + pipeline_class = KandinskyInpaintCombinedPipeline + params = ["prompt", "image", "mask_image"] + batch_params = ["prompt", "negative_prompt", "image", "mask_image"] + required_optional_params = [ + "generator", + "height", + "width", + "latents", + "guidance_scale", + "negative_prompt", + "num_inference_steps", + "return_dict", + "guidance_scale", + "num_images_per_prompt", + "output_type", + "return_dict", + ] + test_xformers_attention = False + + supports_dduf = False + + def get_dummy_components(self): + dummy = InpaintDummies() + prior_dummy = PriorDummies() + components = dummy.get_dummy_components() + + components.update({f"prior_{k}": v for k, v in prior_dummy.get_dummy_components().items()}) + return components + + def get_dummy_inputs(self, device, seed=0): + prior_dummy = PriorDummies() + dummy = InpaintDummies() + inputs = prior_dummy.get_dummy_inputs(device=device, seed=seed) + inputs.update(dummy.get_dummy_inputs(device=device, seed=seed)) + inputs.pop("image_embeds") + inputs.pop("negative_image_embeds") + return inputs + + def test_kandinsky(self): + device = "cpu" + + components = self.get_dummy_components() + + pipe = self.pipeline_class(**components) + pipe = pipe.to(device) + + pipe.set_progress_bar_config(disable=None) + + output = pipe(**self.get_dummy_inputs(device)) + image = output.images + + image_from_tuple = pipe( + **self.get_dummy_inputs(device), + return_dict=False, + )[0] + + image_slice = image[0, -3:, -3:, -1] + + image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] + + assert image.shape == (1, 64, 64, 3) + + expected_slice = np.array([0.0320, 0.0860, 0.4013, 0.0518, 0.2484, 0.5847, 0.4411, 0.2321, 0.4593]) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2, ( + f" expected_slice {expected_slice}, but got {image_slice.flatten()}" + ) + assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2, ( + f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" + ) + + @require_torch_accelerator + def test_offloads(self): + pipes = [] + components = self.get_dummy_components() + sd_pipe = self.pipeline_class(**components).to(torch_device) + pipes.append(sd_pipe) + + components = self.get_dummy_components() + sd_pipe = self.pipeline_class(**components) + sd_pipe.enable_model_cpu_offload(device=torch_device) + pipes.append(sd_pipe) + + components = self.get_dummy_components() + sd_pipe = self.pipeline_class(**components) + sd_pipe.enable_sequential_cpu_offload(device=torch_device) + pipes.append(sd_pipe) + + image_slices = [] + for pipe in pipes: + inputs = self.get_dummy_inputs(torch_device) + image = pipe(**inputs).images + + image_slices.append(image[0, -3:, -3:, -1].flatten()) + + assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3 + assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3 + + def test_inference_batch_single_identical(self): + super().test_inference_batch_single_identical(expected_max_diff=1e-2) + + @unittest.skip("Difference between FP16 and FP32 too large on CI") + def test_float16_inference(self): + super().test_float16_inference(expected_max_diff=5e-1) + + def test_dict_tuple_outputs_equivalent(self): + super().test_dict_tuple_outputs_equivalent(expected_max_difference=5e-4) + + def test_save_load_optional_components(self): + super().test_save_load_optional_components(expected_max_difference=5e-4) + + def test_save_load_local(self): + super().test_save_load_local(expected_max_difference=5e-3) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/kandinsky/test_kandinsky_img2img.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/kandinsky/test_kandinsky_img2img.py new file mode 100644 index 0000000000000000000000000000000000000000..4074c8db22a0d0b19f5657b4b126e90c7a9f1598 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/kandinsky/test_kandinsky_img2img.py @@ -0,0 +1,432 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import gc +import random +import unittest + +import numpy as np +import torch +from PIL import Image +from transformers import XLMRobertaTokenizerFast + +from diffusers import ( + DDIMScheduler, + DDPMScheduler, + KandinskyImg2ImgPipeline, + KandinskyPriorPipeline, + UNet2DConditionModel, + VQModel, +) +from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP + +from ...testing_utils import ( + backend_empty_cache, + enable_full_determinism, + floats_tensor, + load_image, + load_numpy, + nightly, + require_torch_accelerator, + slow, + torch_device, +) +from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference + + +enable_full_determinism() + + +class Dummies: + @property + def text_embedder_hidden_size(self): + return 32 + + @property + def time_input_dim(self): + return 32 + + @property + def block_out_channels_0(self): + return self.time_input_dim + + @property + def time_embed_dim(self): + return self.time_input_dim * 4 + + @property + def cross_attention_dim(self): + return 32 + + @property + def dummy_tokenizer(self): + tokenizer = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base") + return tokenizer + + @property + def dummy_text_encoder(self): + torch.manual_seed(0) + config = MCLIPConfig( + numDims=self.cross_attention_dim, + transformerDimensions=self.text_embedder_hidden_size, + hidden_size=self.text_embedder_hidden_size, + intermediate_size=37, + num_attention_heads=4, + num_hidden_layers=5, + vocab_size=1005, + ) + + text_encoder = MultilingualCLIP(config) + text_encoder = text_encoder.eval() + + return text_encoder + + @property + def dummy_unet(self): + torch.manual_seed(0) + + model_kwargs = { + "in_channels": 4, + # Out channels is double in channels because predicts mean and variance + "out_channels": 8, + "addition_embed_type": "text_image", + "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), + "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), + "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", + "block_out_channels": (self.block_out_channels_0, self.block_out_channels_0 * 2), + "layers_per_block": 1, + "encoder_hid_dim": self.text_embedder_hidden_size, + "encoder_hid_dim_type": "text_image_proj", + "cross_attention_dim": self.cross_attention_dim, + "attention_head_dim": 4, + "resnet_time_scale_shift": "scale_shift", + "class_embed_type": None, + } + + model = UNet2DConditionModel(**model_kwargs) + return model + + @property + def dummy_movq_kwargs(self): + return { + "block_out_channels": [32, 64], + "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], + "in_channels": 3, + "latent_channels": 4, + "layers_per_block": 1, + "norm_num_groups": 8, + "norm_type": "spatial", + "num_vq_embeddings": 12, + "out_channels": 3, + "up_block_types": [ + "AttnUpDecoderBlock2D", + "UpDecoderBlock2D", + ], + "vq_embed_dim": 4, + } + + @property + def dummy_movq(self): + torch.manual_seed(0) + model = VQModel(**self.dummy_movq_kwargs) + return model + + def get_dummy_components(self): + text_encoder = self.dummy_text_encoder + tokenizer = self.dummy_tokenizer + unet = self.dummy_unet + movq = self.dummy_movq + + ddim_config = { + "num_train_timesteps": 1000, + "beta_schedule": "linear", + "beta_start": 0.00085, + "beta_end": 0.012, + "clip_sample": False, + "set_alpha_to_one": False, + "steps_offset": 0, + "prediction_type": "epsilon", + "thresholding": False, + } + + scheduler = DDIMScheduler(**ddim_config) + + components = { + "text_encoder": text_encoder, + "tokenizer": tokenizer, + "unet": unet, + "scheduler": scheduler, + "movq": movq, + } + + return components + + def get_dummy_inputs(self, device, seed=0): + image_embeds = floats_tensor((1, self.cross_attention_dim), rng=random.Random(seed)).to(device) + negative_image_embeds = floats_tensor((1, self.cross_attention_dim), rng=random.Random(seed + 1)).to(device) + # create init_image + image = floats_tensor((1, 3, 64, 64), rng=random.Random(seed)).to(device) + image = image.cpu().permute(0, 2, 3, 1)[0] + init_image = Image.fromarray(np.uint8(image)).convert("RGB").resize((256, 256)) + + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + inputs = { + "prompt": "horse", + "image": init_image, + "image_embeds": image_embeds, + "negative_image_embeds": negative_image_embeds, + "generator": generator, + "height": 64, + "width": 64, + "num_inference_steps": 10, + "guidance_scale": 7.0, + "strength": 0.2, + "output_type": "np", + } + return inputs + + +class KandinskyImg2ImgPipelineFastTests(PipelineTesterMixin, unittest.TestCase): + pipeline_class = KandinskyImg2ImgPipeline + params = ["prompt", "image_embeds", "negative_image_embeds", "image"] + batch_params = [ + "prompt", + "negative_prompt", + "image_embeds", + "negative_image_embeds", + "image", + ] + required_optional_params = [ + "generator", + "height", + "width", + "strength", + "guidance_scale", + "negative_prompt", + "num_inference_steps", + "return_dict", + "guidance_scale", + "num_images_per_prompt", + "output_type", + "return_dict", + ] + test_xformers_attention = False + + supports_dduf = False + + def get_dummy_components(self): + dummies = Dummies() + return dummies.get_dummy_components() + + def get_dummy_inputs(self, device, seed=0): + dummies = Dummies() + return dummies.get_dummy_inputs(device=device, seed=seed) + + def test_kandinsky_img2img(self): + device = "cpu" + + components = self.get_dummy_components() + + pipe = self.pipeline_class(**components) + pipe = pipe.to(device) + + pipe.set_progress_bar_config(disable=None) + + output = pipe(**self.get_dummy_inputs(device)) + image = output.images + + image_from_tuple = pipe( + **self.get_dummy_inputs(device), + return_dict=False, + )[0] + + image_slice = image[0, -3:, -3:, -1] + image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] + + assert image.shape == (1, 64, 64, 3) + + expected_slice = np.array([0.5816, 0.5872, 0.4634, 0.5982, 0.4767, 0.4710, 0.4669, 0.4717, 0.4966]) + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2, ( + f" expected_slice {expected_slice}, but got {image_slice.flatten()}" + ) + assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2, ( + f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" + ) + + @require_torch_accelerator + def test_offloads(self): + pipes = [] + components = self.get_dummy_components() + sd_pipe = self.pipeline_class(**components).to(torch_device) + pipes.append(sd_pipe) + + components = self.get_dummy_components() + sd_pipe = self.pipeline_class(**components) + sd_pipe.enable_model_cpu_offload() + pipes.append(sd_pipe) + + components = self.get_dummy_components() + sd_pipe = self.pipeline_class(**components) + sd_pipe.enable_sequential_cpu_offload() + pipes.append(sd_pipe) + + image_slices = [] + for pipe in pipes: + inputs = self.get_dummy_inputs(torch_device) + image = pipe(**inputs).images + + image_slices.append(image[0, -3:, -3:, -1].flatten()) + + assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3 + assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3 + + def test_dict_tuple_outputs_equivalent(self): + super().test_dict_tuple_outputs_equivalent(expected_max_difference=5e-4) + + +@slow +@require_torch_accelerator +class KandinskyImg2ImgPipelineIntegrationTests(unittest.TestCase): + def setUp(self): + # clean up the VRAM before each test + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + # clean up the VRAM after each test + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def test_kandinsky_img2img(self): + expected_image = load_numpy( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" + "/kandinsky/kandinsky_img2img_frog.npy" + ) + + init_image = load_image( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/kandinsky/cat.png" + ) + prompt = "A red cartoon frog, 4k" + + pipe_prior = KandinskyPriorPipeline.from_pretrained( + "kandinsky-community/kandinsky-2-1-prior", torch_dtype=torch.float16 + ) + pipe_prior.to(torch_device) + + pipeline = KandinskyImg2ImgPipeline.from_pretrained( + "kandinsky-community/kandinsky-2-1", torch_dtype=torch.float16 + ) + pipeline = pipeline.to(torch_device) + + pipeline.set_progress_bar_config(disable=None) + + generator = torch.Generator(device="cpu").manual_seed(0) + image_emb, zero_image_emb = pipe_prior( + prompt, + generator=generator, + num_inference_steps=5, + negative_prompt="", + ).to_tuple() + + output = pipeline( + prompt, + image=init_image, + image_embeds=image_emb, + negative_image_embeds=zero_image_emb, + generator=generator, + num_inference_steps=100, + height=768, + width=768, + strength=0.2, + output_type="np", + ) + + image = output.images[0] + + assert image.shape == (768, 768, 3) + + assert_mean_pixel_difference(image, expected_image) + + +@nightly +@require_torch_accelerator +class KandinskyImg2ImgPipelineNightlyTests(unittest.TestCase): + def setUp(self): + # clean up the VRAM before each test + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + # clean up the VRAM after each test + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def test_kandinsky_img2img_ddpm(self): + expected_image = load_numpy( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" + "/kandinsky/kandinsky_img2img_ddpm_frog.npy" + ) + + init_image = load_image( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/kandinsky/frog.png" + ) + prompt = "A red cartoon frog, 4k" + + pipe_prior = KandinskyPriorPipeline.from_pretrained( + "kandinsky-community/kandinsky-2-1-prior", torch_dtype=torch.float16 + ) + pipe_prior.to(torch_device) + + scheduler = DDPMScheduler.from_pretrained("kandinsky-community/kandinsky-2-1", subfolder="ddpm_scheduler") + pipeline = KandinskyImg2ImgPipeline.from_pretrained( + "kandinsky-community/kandinsky-2-1", scheduler=scheduler, torch_dtype=torch.float16 + ) + pipeline = pipeline.to(torch_device) + + pipeline.set_progress_bar_config(disable=None) + + generator = torch.Generator(device="cpu").manual_seed(0) + image_emb, zero_image_emb = pipe_prior( + prompt, + generator=generator, + num_inference_steps=5, + negative_prompt="", + ).to_tuple() + + output = pipeline( + prompt, + image=init_image, + image_embeds=image_emb, + negative_image_embeds=zero_image_emb, + generator=generator, + num_inference_steps=100, + height=768, + width=768, + strength=0.2, + output_type="np", + ) + + image = output.images[0] + + assert image.shape == (768, 768, 3) + + assert_mean_pixel_difference(image, expected_image) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/kandinsky/test_kandinsky_inpaint.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/kandinsky/test_kandinsky_inpaint.py new file mode 100644 index 0000000000000000000000000000000000000000..b789a63cdd03f3c2d6be68ca4ade503eac1fd344 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/kandinsky/test_kandinsky_inpaint.py @@ -0,0 +1,365 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import gc +import random +import unittest + +import numpy as np +import torch +from PIL import Image +from transformers import XLMRobertaTokenizerFast + +from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNet2DConditionModel, VQModel +from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP + +from ...testing_utils import ( + backend_empty_cache, + enable_full_determinism, + floats_tensor, + load_image, + load_numpy, + nightly, + require_torch_accelerator, + torch_device, +) +from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference + + +enable_full_determinism() + + +class Dummies: + @property + def text_embedder_hidden_size(self): + return 32 + + @property + def time_input_dim(self): + return 32 + + @property + def block_out_channels_0(self): + return self.time_input_dim + + @property + def time_embed_dim(self): + return self.time_input_dim * 4 + + @property + def cross_attention_dim(self): + return 32 + + @property + def dummy_tokenizer(self): + tokenizer = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base") + return tokenizer + + @property + def dummy_text_encoder(self): + torch.manual_seed(0) + config = MCLIPConfig( + numDims=self.cross_attention_dim, + transformerDimensions=self.text_embedder_hidden_size, + hidden_size=self.text_embedder_hidden_size, + intermediate_size=37, + num_attention_heads=4, + num_hidden_layers=5, + vocab_size=1005, + ) + + text_encoder = MultilingualCLIP(config) + text_encoder = text_encoder.eval() + + return text_encoder + + @property + def dummy_unet(self): + torch.manual_seed(0) + + model_kwargs = { + "in_channels": 9, + # Out channels is double in channels because predicts mean and variance + "out_channels": 8, + "addition_embed_type": "text_image", + "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), + "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), + "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", + "block_out_channels": (self.block_out_channels_0, self.block_out_channels_0 * 2), + "layers_per_block": 1, + "encoder_hid_dim": self.text_embedder_hidden_size, + "encoder_hid_dim_type": "text_image_proj", + "cross_attention_dim": self.cross_attention_dim, + "attention_head_dim": 4, + "resnet_time_scale_shift": "scale_shift", + "class_embed_type": None, + } + + model = UNet2DConditionModel(**model_kwargs) + return model + + @property + def dummy_movq_kwargs(self): + return { + "block_out_channels": [32, 64], + "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], + "in_channels": 3, + "latent_channels": 4, + "layers_per_block": 1, + "norm_num_groups": 8, + "norm_type": "spatial", + "num_vq_embeddings": 12, + "out_channels": 3, + "up_block_types": [ + "AttnUpDecoderBlock2D", + "UpDecoderBlock2D", + ], + "vq_embed_dim": 4, + } + + @property + def dummy_movq(self): + torch.manual_seed(0) + model = VQModel(**self.dummy_movq_kwargs) + return model + + def get_dummy_components(self): + text_encoder = self.dummy_text_encoder + tokenizer = self.dummy_tokenizer + unet = self.dummy_unet + movq = self.dummy_movq + + scheduler = DDIMScheduler( + num_train_timesteps=1000, + beta_schedule="linear", + beta_start=0.00085, + beta_end=0.012, + clip_sample=False, + set_alpha_to_one=False, + steps_offset=1, + prediction_type="epsilon", + thresholding=False, + ) + + components = { + "text_encoder": text_encoder, + "tokenizer": tokenizer, + "unet": unet, + "scheduler": scheduler, + "movq": movq, + } + + return components + + def get_dummy_inputs(self, device, seed=0): + image_embeds = floats_tensor((1, self.cross_attention_dim), rng=random.Random(seed)).to(device) + negative_image_embeds = floats_tensor((1, self.cross_attention_dim), rng=random.Random(seed + 1)).to(device) + # create init_image + image = floats_tensor((1, 3, 64, 64), rng=random.Random(seed)).to(device) + image = image.cpu().permute(0, 2, 3, 1)[0] + init_image = Image.fromarray(np.uint8(image)).convert("RGB").resize((256, 256)) + # create mask + mask = np.zeros((64, 64), dtype=np.float32) + mask[:32, :32] = 1 + + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + inputs = { + "prompt": "horse", + "image": init_image, + "mask_image": mask, + "image_embeds": image_embeds, + "negative_image_embeds": negative_image_embeds, + "generator": generator, + "height": 64, + "width": 64, + "num_inference_steps": 2, + "guidance_scale": 4.0, + "output_type": "np", + } + return inputs + + +class KandinskyInpaintPipelineFastTests(PipelineTesterMixin, unittest.TestCase): + pipeline_class = KandinskyInpaintPipeline + params = ["prompt", "image_embeds", "negative_image_embeds", "image", "mask_image"] + batch_params = [ + "prompt", + "negative_prompt", + "image_embeds", + "negative_image_embeds", + "image", + "mask_image", + ] + required_optional_params = [ + "generator", + "height", + "width", + "latents", + "guidance_scale", + "negative_prompt", + "num_inference_steps", + "return_dict", + "guidance_scale", + "num_images_per_prompt", + "output_type", + "return_dict", + ] + test_xformers_attention = False + + supports_dduf = False + + def get_dummy_components(self): + dummies = Dummies() + return dummies.get_dummy_components() + + def get_dummy_inputs(self, device, seed=0): + dummies = Dummies() + return dummies.get_dummy_inputs(device=device, seed=seed) + + def test_kandinsky_inpaint(self): + device = "cpu" + + components = self.get_dummy_components() + + pipe = self.pipeline_class(**components) + pipe = pipe.to(device) + + pipe.set_progress_bar_config(disable=None) + + output = pipe(**self.get_dummy_inputs(device)) + image = output.images + + image_from_tuple = pipe( + **self.get_dummy_inputs(device), + return_dict=False, + )[0] + + image_slice = image[0, -3:, -3:, -1] + image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] + + assert image.shape == (1, 64, 64, 3) + + expected_slice = np.array([0.8222, 0.8896, 0.4373, 0.8088, 0.4905, 0.2609, 0.6816, 0.4291, 0.5129]) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2, ( + f" expected_slice {expected_slice}, but got {image_slice.flatten()}" + ) + assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2, ( + f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" + ) + + def test_inference_batch_single_identical(self): + super().test_inference_batch_single_identical(expected_max_diff=3e-3) + + @require_torch_accelerator + def test_offloads(self): + pipes = [] + components = self.get_dummy_components() + sd_pipe = self.pipeline_class(**components).to(torch_device) + pipes.append(sd_pipe) + + components = self.get_dummy_components() + sd_pipe = self.pipeline_class(**components) + sd_pipe.enable_model_cpu_offload(device=torch_device) + pipes.append(sd_pipe) + + components = self.get_dummy_components() + sd_pipe = self.pipeline_class(**components) + sd_pipe.enable_sequential_cpu_offload(device=torch_device) + pipes.append(sd_pipe) + + image_slices = [] + for pipe in pipes: + inputs = self.get_dummy_inputs(torch_device) + image = pipe(**inputs).images + + image_slices.append(image[0, -3:, -3:, -1].flatten()) + + assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3 + assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3 + + def test_float16_inference(self): + super().test_float16_inference(expected_max_diff=5e-1) + + +@nightly +@require_torch_accelerator +class KandinskyInpaintPipelineIntegrationTests(unittest.TestCase): + def setUp(self): + # clean up the VRAM before each test + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + # clean up the VRAM after each test + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def test_kandinsky_inpaint(self): + expected_image = load_numpy( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" + "/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy" + ) + + init_image = load_image( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/kandinsky/cat.png" + ) + mask = np.zeros((768, 768), dtype=np.float32) + mask[:250, 250:-250] = 1 + + prompt = "a hat" + + pipe_prior = KandinskyPriorPipeline.from_pretrained( + "kandinsky-community/kandinsky-2-1-prior", torch_dtype=torch.float16 + ) + pipe_prior.to(torch_device) + + pipeline = KandinskyInpaintPipeline.from_pretrained( + "kandinsky-community/kandinsky-2-1-inpaint", torch_dtype=torch.float16 + ) + pipeline = pipeline.to(torch_device) + pipeline.set_progress_bar_config(disable=None) + + generator = torch.Generator(device="cpu").manual_seed(0) + image_emb, zero_image_emb = pipe_prior( + prompt, + generator=generator, + num_inference_steps=5, + negative_prompt="", + ).to_tuple() + + output = pipeline( + prompt, + image=init_image, + mask_image=mask, + image_embeds=image_emb, + negative_image_embeds=zero_image_emb, + generator=generator, + num_inference_steps=100, + height=768, + width=768, + output_type="np", + ) + + image = output.images[0] + + assert image.shape == (768, 768, 3) + + assert_mean_pixel_difference(image, expected_image) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/kandinsky/test_kandinsky_prior.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/kandinsky/test_kandinsky_prior.py new file mode 100644 index 0000000000000000000000000000000000000000..903a1e5decfae9249058f23e42013380f13499ac --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/kandinsky/test_kandinsky_prior.py @@ -0,0 +1,240 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import numpy as np +import torch +from torch import nn +from transformers import ( + CLIPImageProcessor, + CLIPTextConfig, + CLIPTextModelWithProjection, + CLIPTokenizer, + CLIPVisionConfig, + CLIPVisionModelWithProjection, +) + +from diffusers import KandinskyPriorPipeline, PriorTransformer, UnCLIPScheduler + +from ...testing_utils import enable_full_determinism, skip_mps, torch_device +from ..test_pipelines_common import PipelineTesterMixin + + +enable_full_determinism() + + +class Dummies: + @property + def text_embedder_hidden_size(self): + return 32 + + @property + def time_input_dim(self): + return 32 + + @property + def block_out_channels_0(self): + return self.time_input_dim + + @property + def time_embed_dim(self): + return self.time_input_dim * 4 + + @property + def cross_attention_dim(self): + return 100 + + @property + def dummy_tokenizer(self): + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + return tokenizer + + @property + def dummy_text_encoder(self): + torch.manual_seed(0) + config = CLIPTextConfig( + bos_token_id=0, + eos_token_id=2, + hidden_size=self.text_embedder_hidden_size, + projection_dim=self.text_embedder_hidden_size, + intermediate_size=37, + layer_norm_eps=1e-05, + num_attention_heads=4, + num_hidden_layers=5, + pad_token_id=1, + vocab_size=1000, + ) + return CLIPTextModelWithProjection(config) + + @property + def dummy_prior(self): + torch.manual_seed(0) + + model_kwargs = { + "num_attention_heads": 2, + "attention_head_dim": 12, + "embedding_dim": self.text_embedder_hidden_size, + "num_layers": 1, + } + + model = PriorTransformer(**model_kwargs) + # clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0 + model.clip_std = nn.Parameter(torch.ones(model.clip_std.shape)) + return model + + @property + def dummy_image_encoder(self): + torch.manual_seed(0) + config = CLIPVisionConfig( + hidden_size=self.text_embedder_hidden_size, + image_size=224, + projection_dim=self.text_embedder_hidden_size, + intermediate_size=37, + num_attention_heads=4, + num_channels=3, + num_hidden_layers=5, + patch_size=14, + ) + + model = CLIPVisionModelWithProjection(config) + return model + + @property + def dummy_image_processor(self): + image_processor = CLIPImageProcessor( + crop_size=224, + do_center_crop=True, + do_normalize=True, + do_resize=True, + image_mean=[0.48145466, 0.4578275, 0.40821073], + image_std=[0.26862954, 0.26130258, 0.27577711], + resample=3, + size=224, + ) + + return image_processor + + def get_dummy_components(self): + prior = self.dummy_prior + image_encoder = self.dummy_image_encoder + text_encoder = self.dummy_text_encoder + tokenizer = self.dummy_tokenizer + image_processor = self.dummy_image_processor + + scheduler = UnCLIPScheduler( + variance_type="fixed_small_log", + prediction_type="sample", + num_train_timesteps=1000, + clip_sample=True, + clip_sample_range=10.0, + ) + + components = { + "prior": prior, + "image_encoder": image_encoder, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + "scheduler": scheduler, + "image_processor": image_processor, + } + + return components + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + inputs = { + "prompt": "horse", + "generator": generator, + "guidance_scale": 4.0, + "num_inference_steps": 2, + "output_type": "np", + } + return inputs + + +class KandinskyPriorPipelineFastTests(PipelineTesterMixin, unittest.TestCase): + pipeline_class = KandinskyPriorPipeline + params = ["prompt"] + batch_params = ["prompt", "negative_prompt"] + required_optional_params = [ + "num_images_per_prompt", + "generator", + "num_inference_steps", + "latents", + "negative_prompt", + "guidance_scale", + "output_type", + "return_dict", + ] + test_xformers_attention = False + + supports_dduf = False + + def get_dummy_components(self): + dummy = Dummies() + return dummy.get_dummy_components() + + def get_dummy_inputs(self, device, seed=0): + dummy = Dummies() + return dummy.get_dummy_inputs(device=device, seed=seed) + + def test_kandinsky_prior(self): + device = "cpu" + + components = self.get_dummy_components() + + pipe = self.pipeline_class(**components) + pipe = pipe.to(device) + + pipe.set_progress_bar_config(disable=None) + + output = pipe(**self.get_dummy_inputs(device)) + image = output.image_embeds + + image_from_tuple = pipe( + **self.get_dummy_inputs(device), + return_dict=False, + )[0] + + image_slice = image[0, -10:] + + image_from_tuple_slice = image_from_tuple[0, -10:] + + assert image.shape == (1, 32) + + expected_slice = np.array( + [-0.5948, 0.1875, -0.1523, -1.1995, -1.4061, -0.6367, -1.4607, -0.6406, 0.8793, -0.3891] + ) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 + assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 + + @skip_mps + def test_inference_batch_single_identical(self): + self._test_inference_batch_single_identical(expected_max_diff=1e-2) + + @skip_mps + def test_attention_slicing_forward_pass(self): + test_max_difference = torch_device == "cpu" + test_mean_pixel_difference = False + + self._test_attention_slicing_forward_pass( + test_max_difference=test_max_difference, + test_mean_pixel_difference=test_mean_pixel_difference, + ) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/kandinsky2_2/__init__.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/kandinsky2_2/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/kandinsky2_2/test_kandinsky.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/kandinsky2_2/test_kandinsky.py new file mode 100644 index 0000000000000000000000000000000000000000..38294aa4c11195344f6683d7cc96261be97922fb --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/kandinsky2_2/test_kandinsky.py @@ -0,0 +1,279 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import gc +import random +import unittest + +import numpy as np +import torch + +from diffusers import DDIMScheduler, KandinskyV22Pipeline, KandinskyV22PriorPipeline, UNet2DConditionModel, VQModel + +from ...testing_utils import ( + backend_empty_cache, + enable_full_determinism, + floats_tensor, + load_numpy, + numpy_cosine_similarity_distance, + require_torch_accelerator, + slow, + torch_device, +) +from ..test_pipelines_common import PipelineTesterMixin + + +enable_full_determinism() + + +class Dummies: + @property + def text_embedder_hidden_size(self): + return 32 + + @property + def time_input_dim(self): + return 32 + + @property + def block_out_channels_0(self): + return self.time_input_dim + + @property + def time_embed_dim(self): + return self.time_input_dim * 4 + + @property + def cross_attention_dim(self): + return 32 + + @property + def dummy_unet(self): + torch.manual_seed(0) + + model_kwargs = { + "in_channels": 4, + # Out channels is double in channels because predicts mean and variance + "out_channels": 8, + "addition_embed_type": "image", + "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), + "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), + "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", + "block_out_channels": (self.block_out_channels_0, self.block_out_channels_0 * 2), + "layers_per_block": 1, + "encoder_hid_dim": self.text_embedder_hidden_size, + "encoder_hid_dim_type": "image_proj", + "cross_attention_dim": self.cross_attention_dim, + "attention_head_dim": 4, + "resnet_time_scale_shift": "scale_shift", + "class_embed_type": None, + } + + model = UNet2DConditionModel(**model_kwargs) + return model + + @property + def dummy_movq_kwargs(self): + return { + "block_out_channels": [32, 64], + "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], + "in_channels": 3, + "latent_channels": 4, + "layers_per_block": 1, + "norm_num_groups": 8, + "norm_type": "spatial", + "num_vq_embeddings": 12, + "out_channels": 3, + "up_block_types": [ + "AttnUpDecoderBlock2D", + "UpDecoderBlock2D", + ], + "vq_embed_dim": 4, + } + + @property + def dummy_movq(self): + torch.manual_seed(0) + model = VQModel(**self.dummy_movq_kwargs) + return model + + def get_dummy_components(self): + unet = self.dummy_unet + movq = self.dummy_movq + + scheduler = DDIMScheduler( + num_train_timesteps=1000, + beta_schedule="linear", + beta_start=0.00085, + beta_end=0.012, + clip_sample=False, + set_alpha_to_one=False, + steps_offset=1, + prediction_type="epsilon", + thresholding=False, + ) + + components = { + "unet": unet, + "scheduler": scheduler, + "movq": movq, + } + return components + + def get_dummy_inputs(self, device, seed=0): + image_embeds = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed)).to(device) + negative_image_embeds = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed + 1)).to( + device + ) + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + inputs = { + "image_embeds": image_embeds, + "negative_image_embeds": negative_image_embeds, + "generator": generator, + "height": 64, + "width": 64, + "guidance_scale": 4.0, + "num_inference_steps": 2, + "output_type": "np", + } + return inputs + + +class KandinskyV22PipelineFastTests(PipelineTesterMixin, unittest.TestCase): + pipeline_class = KandinskyV22Pipeline + params = [ + "image_embeds", + "negative_image_embeds", + ] + batch_params = ["image_embeds", "negative_image_embeds"] + required_optional_params = [ + "generator", + "height", + "width", + "latents", + "guidance_scale", + "num_inference_steps", + "return_dict", + "guidance_scale", + "num_images_per_prompt", + "output_type", + "return_dict", + ] + callback_cfg_params = ["image_embds"] + test_xformers_attention = False + + def get_dummy_inputs(self, device, seed=0): + dummies = Dummies() + return dummies.get_dummy_inputs(device=device, seed=seed) + + def get_dummy_components(self): + dummies = Dummies() + return dummies.get_dummy_components() + + def test_kandinsky(self): + device = "cpu" + + components = self.get_dummy_components() + + pipe = self.pipeline_class(**components) + pipe = pipe.to(device) + + pipe.set_progress_bar_config(disable=None) + + output = pipe(**self.get_dummy_inputs(device)) + image = output.images + + image_from_tuple = pipe( + **self.get_dummy_inputs(device), + return_dict=False, + )[0] + + image_slice = image[0, -3:, -3:, -1] + image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] + + assert image.shape == (1, 64, 64, 3) + + expected_slice = np.array([0.3420, 0.9505, 0.3919, 1.0000, 0.5188, 0.3109, 0.6139, 0.5624, 0.6811]) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2, ( + f" expected_slice {expected_slice}, but got {image_slice.flatten()}" + ) + + assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2, ( + f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" + ) + + def test_float16_inference(self): + super().test_float16_inference(expected_max_diff=1e-1) + + +@slow +@require_torch_accelerator +class KandinskyV22PipelineIntegrationTests(unittest.TestCase): + def setUp(self): + # clean up the VRAM before each test + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + # clean up the VRAM after each test + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def test_kandinsky_text2img(self): + expected_image = load_numpy( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" + "/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy" + ) + + pipe_prior = KandinskyV22PriorPipeline.from_pretrained( + "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16 + ) + pipe_prior.enable_model_cpu_offload(device=torch_device) + + pipeline = KandinskyV22Pipeline.from_pretrained( + "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16 + ) + pipeline.enable_model_cpu_offload(device=torch_device) + pipeline.set_progress_bar_config(disable=None) + + prompt = "red cat, 4k photo" + + generator = torch.Generator(device="cpu").manual_seed(0) + image_emb, zero_image_emb = pipe_prior( + prompt, + generator=generator, + num_inference_steps=3, + negative_prompt="", + ).to_tuple() + + generator = torch.Generator(device="cpu").manual_seed(0) + output = pipeline( + image_embeds=image_emb, + negative_image_embeds=zero_image_emb, + generator=generator, + num_inference_steps=3, + output_type="np", + ) + image = output.images[0] + assert image.shape == (512, 512, 3) + + max_diff = numpy_cosine_similarity_distance(expected_image.flatten(), image.flatten()) + assert max_diff < 1e-4 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/kandinsky2_2/test_kandinsky_combined.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/kandinsky2_2/test_kandinsky_combined.py new file mode 100644 index 0000000000000000000000000000000000000000..476fc584cc56a4a5f2cb310b2f63783ea8ce82b6 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/kandinsky2_2/test_kandinsky_combined.py @@ -0,0 +1,412 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import numpy as np + +from diffusers import ( + KandinskyV22CombinedPipeline, + KandinskyV22Img2ImgCombinedPipeline, + KandinskyV22InpaintCombinedPipeline, +) + +from ...testing_utils import enable_full_determinism, require_torch_accelerator, torch_device +from ..test_pipelines_common import PipelineTesterMixin +from .test_kandinsky import Dummies +from .test_kandinsky_img2img import Dummies as Img2ImgDummies +from .test_kandinsky_inpaint import Dummies as InpaintDummies +from .test_kandinsky_prior import Dummies as PriorDummies + + +enable_full_determinism() + + +class KandinskyV22PipelineCombinedFastTests(PipelineTesterMixin, unittest.TestCase): + pipeline_class = KandinskyV22CombinedPipeline + params = [ + "prompt", + ] + batch_params = ["prompt", "negative_prompt"] + required_optional_params = [ + "generator", + "height", + "width", + "latents", + "guidance_scale", + "negative_prompt", + "num_inference_steps", + "return_dict", + "guidance_scale", + "num_images_per_prompt", + "output_type", + "return_dict", + ] + test_xformers_attention = True + callback_cfg_params = ["image_embds"] + + supports_dduf = False + + def get_dummy_components(self): + dummy = Dummies() + prior_dummy = PriorDummies() + components = dummy.get_dummy_components() + + components.update({f"prior_{k}": v for k, v in prior_dummy.get_dummy_components().items()}) + return components + + def get_dummy_inputs(self, device, seed=0): + prior_dummy = PriorDummies() + inputs = prior_dummy.get_dummy_inputs(device=device, seed=seed) + inputs.update( + { + "height": 64, + "width": 64, + } + ) + return inputs + + def test_kandinsky(self): + device = "cpu" + + components = self.get_dummy_components() + + pipe = self.pipeline_class(**components) + pipe = pipe.to(device) + + pipe.set_progress_bar_config(disable=None) + + output = pipe(**self.get_dummy_inputs(device)) + image = output.images + + image_from_tuple = pipe( + **self.get_dummy_inputs(device), + return_dict=False, + )[0] + + image_slice = image[0, -3:, -3:, -1] + image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] + + assert image.shape == (1, 64, 64, 3) + + expected_slice = np.array([0.3076, 0.2729, 0.5668, 0.0522, 0.3384, 0.7028, 0.4908, 0.3659, 0.6243]) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2, ( + f" expected_slice {expected_slice}, but got {image_slice.flatten()}" + ) + assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2, ( + f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" + ) + + @require_torch_accelerator + def test_offloads(self): + pipes = [] + components = self.get_dummy_components() + sd_pipe = self.pipeline_class(**components).to(torch_device) + pipes.append(sd_pipe) + + components = self.get_dummy_components() + sd_pipe = self.pipeline_class(**components) + sd_pipe.enable_model_cpu_offload(device=torch_device) + pipes.append(sd_pipe) + + components = self.get_dummy_components() + sd_pipe = self.pipeline_class(**components) + sd_pipe.enable_sequential_cpu_offload(device=torch_device) + pipes.append(sd_pipe) + + image_slices = [] + for pipe in pipes: + inputs = self.get_dummy_inputs(torch_device) + image = pipe(**inputs).images + + image_slices.append(image[0, -3:, -3:, -1].flatten()) + + assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3 + assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3 + + def test_inference_batch_single_identical(self): + super().test_inference_batch_single_identical(expected_max_diff=1e-2) + + def test_float16_inference(self): + super().test_float16_inference(expected_max_diff=5e-1) + + def test_dict_tuple_outputs_equivalent(self): + super().test_dict_tuple_outputs_equivalent(expected_max_difference=5e-4) + + def test_model_cpu_offload_forward_pass(self): + super().test_model_cpu_offload_forward_pass(expected_max_diff=5e-4) + + def test_save_load_local(self): + super().test_save_load_local(expected_max_difference=5e-3) + + def test_save_load_optional_components(self): + super().test_save_load_optional_components(expected_max_difference=5e-3) + + def test_callback_inputs(self): + pass + + def test_callback_cfg(self): + pass + + +class KandinskyV22PipelineImg2ImgCombinedFastTests(PipelineTesterMixin, unittest.TestCase): + pipeline_class = KandinskyV22Img2ImgCombinedPipeline + params = ["prompt", "image"] + batch_params = ["prompt", "negative_prompt", "image"] + required_optional_params = [ + "generator", + "height", + "width", + "latents", + "guidance_scale", + "negative_prompt", + "num_inference_steps", + "return_dict", + "guidance_scale", + "num_images_per_prompt", + "output_type", + "return_dict", + ] + test_xformers_attention = False + callback_cfg_params = ["image_embds"] + + supports_dduf = False + + def get_dummy_components(self): + dummy = Img2ImgDummies() + prior_dummy = PriorDummies() + components = dummy.get_dummy_components() + + components.update({f"prior_{k}": v for k, v in prior_dummy.get_dummy_components().items()}) + return components + + def get_dummy_inputs(self, device, seed=0): + prior_dummy = PriorDummies() + dummy = Img2ImgDummies() + inputs = prior_dummy.get_dummy_inputs(device=device, seed=seed) + inputs.update(dummy.get_dummy_inputs(device=device, seed=seed)) + inputs.pop("image_embeds") + inputs.pop("negative_image_embeds") + return inputs + + def test_kandinsky(self): + device = "cpu" + + components = self.get_dummy_components() + + pipe = self.pipeline_class(**components) + pipe = pipe.to(device) + + pipe.set_progress_bar_config(disable=None) + + output = pipe(**self.get_dummy_inputs(device)) + image = output.images + + image_from_tuple = pipe( + **self.get_dummy_inputs(device), + return_dict=False, + )[0] + + image_slice = image[0, -3:, -3:, -1] + image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] + + assert image.shape == (1, 64, 64, 3) + + expected_slice = np.array([0.4445, 0.4287, 0.4596, 0.3919, 0.3730, 0.5039, 0.4834, 0.4269, 0.5521]) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2, ( + f" expected_slice {expected_slice}, but got {image_slice.flatten()}" + ) + assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2, ( + f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" + ) + + @require_torch_accelerator + def test_offloads(self): + pipes = [] + components = self.get_dummy_components() + sd_pipe = self.pipeline_class(**components).to(torch_device) + pipes.append(sd_pipe) + + components = self.get_dummy_components() + sd_pipe = self.pipeline_class(**components) + sd_pipe.enable_model_cpu_offload(device=torch_device) + pipes.append(sd_pipe) + + components = self.get_dummy_components() + sd_pipe = self.pipeline_class(**components) + sd_pipe.enable_sequential_cpu_offload(device=torch_device) + pipes.append(sd_pipe) + + image_slices = [] + for pipe in pipes: + inputs = self.get_dummy_inputs(torch_device) + image = pipe(**inputs).images + + image_slices.append(image[0, -3:, -3:, -1].flatten()) + + assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3 + assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3 + + def test_inference_batch_single_identical(self): + super().test_inference_batch_single_identical(expected_max_diff=1e-2) + + def test_float16_inference(self): + super().test_float16_inference(expected_max_diff=2e-1) + + def test_dict_tuple_outputs_equivalent(self): + super().test_dict_tuple_outputs_equivalent(expected_max_difference=5e-4) + + def test_model_cpu_offload_forward_pass(self): + super().test_model_cpu_offload_forward_pass(expected_max_diff=5e-4) + + def test_save_load_optional_components(self): + super().test_save_load_optional_components(expected_max_difference=5e-4) + + def save_load_local(self): + super().test_save_load_local(expected_max_difference=5e-3) + + def test_callback_inputs(self): + pass + + def test_callback_cfg(self): + pass + + +class KandinskyV22PipelineInpaintCombinedFastTests(PipelineTesterMixin, unittest.TestCase): + pipeline_class = KandinskyV22InpaintCombinedPipeline + params = ["prompt", "image", "mask_image"] + batch_params = ["prompt", "negative_prompt", "image", "mask_image"] + required_optional_params = [ + "generator", + "height", + "width", + "latents", + "guidance_scale", + "negative_prompt", + "num_inference_steps", + "return_dict", + "guidance_scale", + "num_images_per_prompt", + "output_type", + "return_dict", + ] + test_xformers_attention = False + + supports_dduf = False + + def get_dummy_components(self): + dummy = InpaintDummies() + prior_dummy = PriorDummies() + components = dummy.get_dummy_components() + + components.update({f"prior_{k}": v for k, v in prior_dummy.get_dummy_components().items()}) + return components + + def get_dummy_inputs(self, device, seed=0): + prior_dummy = PriorDummies() + dummy = InpaintDummies() + inputs = prior_dummy.get_dummy_inputs(device=device, seed=seed) + inputs.update(dummy.get_dummy_inputs(device=device, seed=seed)) + inputs.pop("image_embeds") + inputs.pop("negative_image_embeds") + return inputs + + def test_kandinsky(self): + device = "cpu" + + components = self.get_dummy_components() + + pipe = self.pipeline_class(**components) + pipe = pipe.to(device) + + pipe.set_progress_bar_config(disable=None) + + output = pipe(**self.get_dummy_inputs(device)) + image = output.images + + image_from_tuple = pipe( + **self.get_dummy_inputs(device), + return_dict=False, + )[0] + + image_slice = image[0, -3:, -3:, -1] + image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] + + assert image.shape == (1, 64, 64, 3) + + expected_slice = np.array([0.5039, 0.4926, 0.4898, 0.4978, 0.4838, 0.4942, 0.4738, 0.4702, 0.4816]) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2, ( + f" expected_slice {expected_slice}, but got {image_slice.flatten()}" + ) + assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2, ( + f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" + ) + + @require_torch_accelerator + def test_offloads(self): + pipes = [] + components = self.get_dummy_components() + sd_pipe = self.pipeline_class(**components).to(torch_device) + pipes.append(sd_pipe) + + components = self.get_dummy_components() + sd_pipe = self.pipeline_class(**components) + sd_pipe.enable_model_cpu_offload(device=torch_device) + pipes.append(sd_pipe) + + components = self.get_dummy_components() + sd_pipe = self.pipeline_class(**components) + sd_pipe.enable_sequential_cpu_offload(device=torch_device) + pipes.append(sd_pipe) + + image_slices = [] + for pipe in pipes: + inputs = self.get_dummy_inputs(torch_device) + image = pipe(**inputs).images + + image_slices.append(image[0, -3:, -3:, -1].flatten()) + + assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3 + assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3 + + def test_inference_batch_single_identical(self): + super().test_inference_batch_single_identical(expected_max_diff=1e-2) + + def test_float16_inference(self): + super().test_float16_inference(expected_max_diff=8e-1) + + def test_dict_tuple_outputs_equivalent(self): + super().test_dict_tuple_outputs_equivalent(expected_max_difference=5e-4) + + def test_model_cpu_offload_forward_pass(self): + super().test_model_cpu_offload_forward_pass(expected_max_diff=5e-4) + + def test_save_load_local(self): + super().test_save_load_local(expected_max_difference=5e-3) + + def test_save_load_optional_components(self): + super().test_save_load_optional_components(expected_max_difference=5e-4) + + def test_sequential_cpu_offload_forward_pass(self): + super().test_sequential_cpu_offload_forward_pass(expected_max_diff=5e-4) + + def test_callback_inputs(self): + pass + + def test_callback_cfg(self): + pass diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/kandinsky2_2/test_kandinsky_controlnet.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/kandinsky2_2/test_kandinsky_controlnet.py new file mode 100644 index 0000000000000000000000000000000000000000..4054e38c569121e0695c8d0ef890bf59cdfdcd9e --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/kandinsky2_2/test_kandinsky_controlnet.py @@ -0,0 +1,293 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import gc +import random +import unittest + +import numpy as np +import torch + +from diffusers import ( + DDIMScheduler, + KandinskyV22ControlnetPipeline, + KandinskyV22PriorPipeline, + UNet2DConditionModel, + VQModel, +) + +from ...testing_utils import ( + backend_empty_cache, + enable_full_determinism, + floats_tensor, + load_image, + load_numpy, + nightly, + numpy_cosine_similarity_distance, + require_torch_accelerator, + torch_device, +) +from ..test_pipelines_common import PipelineTesterMixin + + +enable_full_determinism() + + +class KandinskyV22ControlnetPipelineFastTests(PipelineTesterMixin, unittest.TestCase): + pipeline_class = KandinskyV22ControlnetPipeline + params = ["image_embeds", "negative_image_embeds", "hint"] + batch_params = ["image_embeds", "negative_image_embeds", "hint"] + required_optional_params = [ + "generator", + "height", + "width", + "latents", + "guidance_scale", + "num_inference_steps", + "return_dict", + "guidance_scale", + "num_images_per_prompt", + "output_type", + "return_dict", + ] + test_xformers_attention = False + + @property + def text_embedder_hidden_size(self): + return 32 + + @property + def time_input_dim(self): + return 32 + + @property + def block_out_channels_0(self): + return self.time_input_dim + + @property + def time_embed_dim(self): + return self.time_input_dim * 4 + + @property + def cross_attention_dim(self): + return 100 + + @property + def dummy_unet(self): + torch.manual_seed(0) + + model_kwargs = { + "in_channels": 8, + # Out channels is double in channels because predicts mean and variance + "out_channels": 8, + "addition_embed_type": "image_hint", + "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), + "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), + "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", + "block_out_channels": (self.block_out_channels_0, self.block_out_channels_0 * 2), + "layers_per_block": 1, + "encoder_hid_dim": self.text_embedder_hidden_size, + "encoder_hid_dim_type": "image_proj", + "cross_attention_dim": self.cross_attention_dim, + "attention_head_dim": 4, + "resnet_time_scale_shift": "scale_shift", + "class_embed_type": None, + } + + model = UNet2DConditionModel(**model_kwargs) + return model + + @property + def dummy_movq_kwargs(self): + return { + "block_out_channels": [32, 32, 64, 64], + "down_block_types": [ + "DownEncoderBlock2D", + "DownEncoderBlock2D", + "DownEncoderBlock2D", + "AttnDownEncoderBlock2D", + ], + "in_channels": 3, + "latent_channels": 4, + "layers_per_block": 1, + "norm_num_groups": 8, + "norm_type": "spatial", + "num_vq_embeddings": 12, + "out_channels": 3, + "up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"], + "vq_embed_dim": 4, + } + + @property + def dummy_movq(self): + torch.manual_seed(0) + model = VQModel(**self.dummy_movq_kwargs) + return model + + def get_dummy_components(self): + unet = self.dummy_unet + movq = self.dummy_movq + + scheduler = DDIMScheduler( + num_train_timesteps=1000, + beta_schedule="linear", + beta_start=0.00085, + beta_end=0.012, + clip_sample=False, + set_alpha_to_one=False, + steps_offset=1, + prediction_type="epsilon", + thresholding=False, + ) + + components = { + "unet": unet, + "scheduler": scheduler, + "movq": movq, + } + return components + + def get_dummy_inputs(self, device, seed=0): + image_embeds = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed)).to(device) + negative_image_embeds = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed + 1)).to( + device + ) + + # create hint + hint = floats_tensor((1, 3, 64, 64), rng=random.Random(seed)).to(device) + + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + inputs = { + "image_embeds": image_embeds, + "negative_image_embeds": negative_image_embeds, + "hint": hint, + "generator": generator, + "height": 64, + "width": 64, + "guidance_scale": 4.0, + "num_inference_steps": 2, + "output_type": "np", + } + return inputs + + def test_kandinsky_controlnet(self): + device = "cpu" + + components = self.get_dummy_components() + + pipe = self.pipeline_class(**components) + pipe = pipe.to(device) + + pipe.set_progress_bar_config(disable=None) + + output = pipe(**self.get_dummy_inputs(device)) + image = output.images + + image_from_tuple = pipe( + **self.get_dummy_inputs(device), + return_dict=False, + )[0] + + image_slice = image[0, -3:, -3:, -1] + image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] + + assert image.shape == (1, 64, 64, 3) + + expected_slice = np.array( + [0.6959826, 0.868279, 0.7558092, 0.68769467, 0.85805804, 0.65977496, 0.44885302, 0.5959111, 0.4251595] + ) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2, ( + f" expected_slice {expected_slice}, but got {image_slice.flatten()}" + ) + + assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2, ( + f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" + ) + + def test_float16_inference(self): + super().test_float16_inference(expected_max_diff=1e-1) + + def test_inference_batch_single_identical(self): + super().test_inference_batch_single_identical(expected_max_diff=5e-4) + + +@nightly +@require_torch_accelerator +class KandinskyV22ControlnetPipelineIntegrationTests(unittest.TestCase): + def setUp(self): + # clean up the VRAM before each test + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + # clean up the VRAM after each test + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def test_kandinsky_controlnet(self): + expected_image = load_numpy( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" + "/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy" + ) + + hint = load_image( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" + "/kandinskyv22/hint_image_cat.png" + ) + hint = torch.from_numpy(np.array(hint)).float() / 255.0 + hint = hint.permute(2, 0, 1).unsqueeze(0) + + pipe_prior = KandinskyV22PriorPipeline.from_pretrained( + "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16 + ) + pipe_prior.enable_model_cpu_offload() + + pipeline = KandinskyV22ControlnetPipeline.from_pretrained( + "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16 + ) + pipeline.enable_model_cpu_offload() + pipeline.set_progress_bar_config(disable=None) + + prompt = "A robot, 4k photo" + + generator = torch.Generator(device="cpu").manual_seed(0) + image_emb, zero_image_emb = pipe_prior( + prompt, + generator=generator, + num_inference_steps=2, + negative_prompt="", + ).to_tuple() + + generator = torch.Generator(device="cpu").manual_seed(0) + output = pipeline( + image_embeds=image_emb, + negative_image_embeds=zero_image_emb, + hint=hint, + generator=generator, + num_inference_steps=2, + output_type="np", + ) + + image = output.images[0] + + assert image.shape == (512, 512, 3) + max_diff = numpy_cosine_similarity_distance(expected_image.flatten(), image.flatten()) + assert max_diff < 2e-4 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/kandinsky2_2/test_kandinsky_controlnet_img2img.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/kandinsky2_2/test_kandinsky_controlnet_img2img.py new file mode 100644 index 0000000000000000000000000000000000000000..a4346605929b6e7b8774355b2cdfa0335276aed1 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/kandinsky2_2/test_kandinsky_controlnet_img2img.py @@ -0,0 +1,314 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import gc +import random +import unittest + +import numpy as np +import torch +from PIL import Image + +from diffusers import ( + DDIMScheduler, + KandinskyV22ControlnetImg2ImgPipeline, + KandinskyV22PriorEmb2EmbPipeline, + UNet2DConditionModel, + VQModel, +) + +from ...testing_utils import ( + backend_empty_cache, + enable_full_determinism, + floats_tensor, + load_image, + load_numpy, + nightly, + numpy_cosine_similarity_distance, + require_torch_accelerator, + torch_device, +) +from ..test_pipelines_common import PipelineTesterMixin + + +enable_full_determinism() + + +class KandinskyV22ControlnetImg2ImgPipelineFastTests(PipelineTesterMixin, unittest.TestCase): + pipeline_class = KandinskyV22ControlnetImg2ImgPipeline + params = ["image_embeds", "negative_image_embeds", "image", "hint"] + batch_params = ["image_embeds", "negative_image_embeds", "image", "hint"] + required_optional_params = [ + "generator", + "height", + "width", + "strength", + "guidance_scale", + "num_inference_steps", + "return_dict", + "guidance_scale", + "num_images_per_prompt", + "output_type", + "return_dict", + ] + test_xformers_attention = False + + @property + def text_embedder_hidden_size(self): + return 32 + + @property + def time_input_dim(self): + return 32 + + @property + def block_out_channels_0(self): + return self.time_input_dim + + @property + def time_embed_dim(self): + return self.time_input_dim * 4 + + @property + def cross_attention_dim(self): + return 100 + + @property + def dummy_unet(self): + torch.manual_seed(0) + + model_kwargs = { + "in_channels": 8, + # Out channels is double in channels because predicts mean and variance + "out_channels": 8, + "addition_embed_type": "image_hint", + "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), + "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), + "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", + "block_out_channels": (self.block_out_channels_0, self.block_out_channels_0 * 2), + "layers_per_block": 1, + "encoder_hid_dim": self.text_embedder_hidden_size, + "encoder_hid_dim_type": "image_proj", + "cross_attention_dim": self.cross_attention_dim, + "attention_head_dim": 4, + "resnet_time_scale_shift": "scale_shift", + "class_embed_type": None, + } + + model = UNet2DConditionModel(**model_kwargs) + return model + + @property + def dummy_movq_kwargs(self): + return { + "block_out_channels": [32, 32, 64, 64], + "down_block_types": [ + "DownEncoderBlock2D", + "DownEncoderBlock2D", + "DownEncoderBlock2D", + "AttnDownEncoderBlock2D", + ], + "in_channels": 3, + "latent_channels": 4, + "layers_per_block": 1, + "norm_num_groups": 8, + "norm_type": "spatial", + "num_vq_embeddings": 12, + "out_channels": 3, + "up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"], + "vq_embed_dim": 4, + } + + @property + def dummy_movq(self): + torch.manual_seed(0) + model = VQModel(**self.dummy_movq_kwargs) + return model + + def get_dummy_components(self): + unet = self.dummy_unet + movq = self.dummy_movq + + ddim_config = { + "num_train_timesteps": 1000, + "beta_schedule": "linear", + "beta_start": 0.00085, + "beta_end": 0.012, + "clip_sample": False, + "set_alpha_to_one": False, + "steps_offset": 0, + "prediction_type": "epsilon", + "thresholding": False, + } + + scheduler = DDIMScheduler(**ddim_config) + + components = { + "unet": unet, + "scheduler": scheduler, + "movq": movq, + } + + return components + + def get_dummy_inputs(self, device, seed=0): + image_embeds = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed)).to(device) + negative_image_embeds = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed + 1)).to( + device + ) + # create init_image + image = floats_tensor((1, 3, 64, 64), rng=random.Random(seed)).to(device) + image = image.cpu().permute(0, 2, 3, 1)[0] + init_image = Image.fromarray(np.uint8(image)).convert("RGB").resize((256, 256)) + # create hint + hint = floats_tensor((1, 3, 64, 64), rng=random.Random(seed)).to(device) + + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + inputs = { + "image": init_image, + "image_embeds": image_embeds, + "negative_image_embeds": negative_image_embeds, + "hint": hint, + "generator": generator, + "height": 64, + "width": 64, + "num_inference_steps": 10, + "guidance_scale": 7.0, + "strength": 0.2, + "output_type": "np", + } + return inputs + + def test_kandinsky_controlnet_img2img(self): + device = "cpu" + + components = self.get_dummy_components() + + pipe = self.pipeline_class(**components) + pipe = pipe.to(device) + + pipe.set_progress_bar_config(disable=None) + + output = pipe(**self.get_dummy_inputs(device)) + image = output.images + + image_from_tuple = pipe( + **self.get_dummy_inputs(device), + return_dict=False, + )[0] + + image_slice = image[0, -3:, -3:, -1] + image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] + + assert image.shape == (1, 64, 64, 3) + + expected_slice = np.array( + [0.54985034, 0.55509365, 0.52561504, 0.5570494, 0.5593818, 0.5263979, 0.50285643, 0.5069846, 0.51196736] + ) + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2, ( + f" expected_slice {expected_slice}, but got {image_slice.flatten()}" + ) + assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2, ( + f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" + ) + + def test_inference_batch_single_identical(self): + super().test_inference_batch_single_identical(expected_max_diff=1.75e-3) + + def test_float16_inference(self): + super().test_float16_inference(expected_max_diff=2e-1) + + +@nightly +@require_torch_accelerator +class KandinskyV22ControlnetImg2ImgPipelineIntegrationTests(unittest.TestCase): + def setUp(self): + # clean up the VRAM before each test + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + # clean up the VRAM after each test + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def test_kandinsky_controlnet_img2img(self): + expected_image = load_numpy( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" + "/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy" + ) + + init_image = load_image( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/kandinsky/cat.png" + ) + init_image = init_image.resize((512, 512)) + + hint = load_image( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" + "/kandinskyv22/hint_image_cat.png" + ) + hint = torch.from_numpy(np.array(hint)).float() / 255.0 + hint = hint.permute(2, 0, 1).unsqueeze(0) + + prompt = "A robot, 4k photo" + + pipe_prior = KandinskyV22PriorEmb2EmbPipeline.from_pretrained( + "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16 + ) + pipe_prior.enable_model_cpu_offload() + + pipeline = KandinskyV22ControlnetImg2ImgPipeline.from_pretrained( + "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16 + ) + pipeline.enable_model_cpu_offload() + + pipeline.set_progress_bar_config(disable=None) + + generator = torch.Generator(device="cpu").manual_seed(0) + + image_emb, zero_image_emb = pipe_prior( + prompt, + image=init_image, + strength=0.85, + generator=generator, + negative_prompt="", + num_inference_steps=5, + ).to_tuple() + + generator = torch.Generator(device="cpu").manual_seed(0) + output = pipeline( + image=init_image, + image_embeds=image_emb, + negative_image_embeds=zero_image_emb, + hint=hint, + generator=generator, + num_inference_steps=5, + height=512, + width=512, + strength=0.5, + output_type="np", + ) + + image = output.images[0] + + assert image.shape == (512, 512, 3) + + max_diff = numpy_cosine_similarity_distance(expected_image.flatten(), image.flatten()) + assert max_diff < 5e-4 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/kandinsky2_2/test_kandinsky_img2img.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/kandinsky2_2/test_kandinsky_img2img.py new file mode 100644 index 0000000000000000000000000000000000000000..99f3fe0f40f1f77a04779e7ef9409efd2a3d0b99 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/kandinsky2_2/test_kandinsky_img2img.py @@ -0,0 +1,305 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import gc +import random +import unittest + +import numpy as np +import torch +from PIL import Image + +from diffusers import ( + DDIMScheduler, + KandinskyV22Img2ImgPipeline, + KandinskyV22PriorPipeline, + UNet2DConditionModel, + VQModel, +) + +from ...testing_utils import ( + backend_empty_cache, + enable_full_determinism, + floats_tensor, + load_image, + load_numpy, + numpy_cosine_similarity_distance, + require_torch_accelerator, + slow, + torch_device, +) +from ..test_pipelines_common import PipelineTesterMixin + + +enable_full_determinism() + + +class Dummies: + @property + def text_embedder_hidden_size(self): + return 32 + + @property + def time_input_dim(self): + return 32 + + @property + def block_out_channels_0(self): + return self.time_input_dim + + @property + def time_embed_dim(self): + return self.time_input_dim * 4 + + @property + def cross_attention_dim(self): + return 32 + + @property + def dummy_unet(self): + torch.manual_seed(0) + + model_kwargs = { + "in_channels": 4, + # Out channels is double in channels because predicts mean and variance + "out_channels": 8, + "addition_embed_type": "image", + "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), + "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), + "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", + "block_out_channels": (self.block_out_channels_0, self.block_out_channels_0 * 2), + "layers_per_block": 1, + "encoder_hid_dim": self.text_embedder_hidden_size, + "encoder_hid_dim_type": "image_proj", + "cross_attention_dim": self.cross_attention_dim, + "attention_head_dim": 4, + "resnet_time_scale_shift": "scale_shift", + "class_embed_type": None, + } + + model = UNet2DConditionModel(**model_kwargs) + return model + + @property + def dummy_movq_kwargs(self): + return { + "block_out_channels": [32, 64], + "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], + "in_channels": 3, + "latent_channels": 4, + "layers_per_block": 1, + "norm_num_groups": 8, + "norm_type": "spatial", + "num_vq_embeddings": 12, + "out_channels": 3, + "up_block_types": [ + "AttnUpDecoderBlock2D", + "UpDecoderBlock2D", + ], + "vq_embed_dim": 4, + } + + @property + def dummy_movq(self): + torch.manual_seed(0) + model = VQModel(**self.dummy_movq_kwargs) + return model + + def get_dummy_components(self): + unet = self.dummy_unet + movq = self.dummy_movq + + ddim_config = { + "num_train_timesteps": 1000, + "beta_schedule": "linear", + "beta_start": 0.00085, + "beta_end": 0.012, + "clip_sample": False, + "set_alpha_to_one": False, + "steps_offset": 0, + "prediction_type": "epsilon", + "thresholding": False, + } + + scheduler = DDIMScheduler(**ddim_config) + + components = { + "unet": unet, + "scheduler": scheduler, + "movq": movq, + } + + return components + + def get_dummy_inputs(self, device, seed=0): + image_embeds = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed)).to(device) + negative_image_embeds = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed + 1)).to( + device + ) + # create init_image + image = floats_tensor((1, 3, 64, 64), rng=random.Random(seed)).to(device) + image = image.cpu().permute(0, 2, 3, 1)[0] + init_image = Image.fromarray(np.uint8(image)).convert("RGB").resize((256, 256)) + + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + inputs = { + "image": init_image, + "image_embeds": image_embeds, + "negative_image_embeds": negative_image_embeds, + "generator": generator, + "height": 64, + "width": 64, + "num_inference_steps": 10, + "guidance_scale": 7.0, + "strength": 0.2, + "output_type": "np", + } + return inputs + + +class KandinskyV22Img2ImgPipelineFastTests(PipelineTesterMixin, unittest.TestCase): + pipeline_class = KandinskyV22Img2ImgPipeline + params = ["image_embeds", "negative_image_embeds", "image"] + batch_params = [ + "image_embeds", + "negative_image_embeds", + "image", + ] + required_optional_params = [ + "generator", + "height", + "width", + "strength", + "guidance_scale", + "num_inference_steps", + "return_dict", + "guidance_scale", + "num_images_per_prompt", + "output_type", + "return_dict", + ] + test_xformers_attention = False + callback_cfg_params = ["image_embeds"] + + def get_dummy_components(self): + dummies = Dummies() + return dummies.get_dummy_components() + + def get_dummy_inputs(self, device, seed=0): + dummies = Dummies() + return dummies.get_dummy_inputs(device=device, seed=seed) + + def test_kandinsky_img2img(self): + device = "cpu" + + components = self.get_dummy_components() + + pipe = self.pipeline_class(**components) + pipe = pipe.to(device) + + pipe.set_progress_bar_config(disable=None) + + output = pipe(**self.get_dummy_inputs(device)) + image = output.images + + image_from_tuple = pipe( + **self.get_dummy_inputs(device), + return_dict=False, + )[0] + + image_slice = image[0, -3:, -3:, -1] + image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] + + assert image.shape == (1, 64, 64, 3) + + expected_slice = np.array([0.5712, 0.5443, 0.4725, 0.6195, 0.5184, 0.4651, 0.4473, 0.4590, 0.5016]) + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2, ( + f" expected_slice {expected_slice}, but got {image_slice.flatten()}" + ) + assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2, ( + f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" + ) + + def test_float16_inference(self): + super().test_float16_inference(expected_max_diff=2e-1) + + +@slow +@require_torch_accelerator +class KandinskyV22Img2ImgPipelineIntegrationTests(unittest.TestCase): + def setUp(self): + # clean up the VRAM before each test + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + # clean up the VRAM after each test + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def test_kandinsky_img2img(self): + expected_image = load_numpy( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" + "/kandinskyv22/kandinskyv22_img2img_frog.npy" + ) + + init_image = load_image( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/kandinsky/cat.png" + ) + prompt = "A red cartoon frog, 4k" + + pipe_prior = KandinskyV22PriorPipeline.from_pretrained( + "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16 + ) + pipe_prior.enable_model_cpu_offload(device=torch_device) + + pipeline = KandinskyV22Img2ImgPipeline.from_pretrained( + "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16 + ) + pipeline.enable_model_cpu_offload(device=torch_device) + pipeline.set_progress_bar_config(disable=None) + + generator = torch.Generator(device="cpu").manual_seed(0) + image_emb, zero_image_emb = pipe_prior( + prompt, + generator=generator, + num_inference_steps=5, + negative_prompt="", + ).to_tuple() + + generator = torch.Generator(device="cpu").manual_seed(0) + output = pipeline( + image=init_image, + image_embeds=image_emb, + negative_image_embeds=zero_image_emb, + generator=generator, + num_inference_steps=5, + height=768, + width=768, + strength=0.2, + output_type="np", + ) + + image = output.images[0] + + assert image.shape == (768, 768, 3) + + max_diff = numpy_cosine_similarity_distance(expected_image.flatten(), image.flatten()) + assert max_diff < 1e-4 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/kandinsky2_2/test_kandinsky_inpaint.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/kandinsky2_2/test_kandinsky_inpaint.py new file mode 100644 index 0000000000000000000000000000000000000000..d4eb650263af36dfba564cc95afca5bc51c15a78 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/kandinsky2_2/test_kandinsky_inpaint.py @@ -0,0 +1,361 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import gc +import random +import unittest + +import numpy as np +import torch +from PIL import Image + +from diffusers import ( + DDIMScheduler, + KandinskyV22InpaintPipeline, + KandinskyV22PriorPipeline, + UNet2DConditionModel, + VQModel, +) + +from ...testing_utils import ( + backend_empty_cache, + enable_full_determinism, + floats_tensor, + is_flaky, + load_image, + load_numpy, + numpy_cosine_similarity_distance, + require_torch_accelerator, + slow, + torch_device, +) +from ..test_pipelines_common import PipelineTesterMixin + + +enable_full_determinism() + + +class Dummies: + @property + def text_embedder_hidden_size(self): + return 32 + + @property + def time_input_dim(self): + return 32 + + @property + def block_out_channels_0(self): + return self.time_input_dim + + @property + def time_embed_dim(self): + return self.time_input_dim * 4 + + @property + def cross_attention_dim(self): + return 32 + + @property + def dummy_unet(self): + torch.manual_seed(0) + + model_kwargs = { + "in_channels": 9, + # Out channels is double in channels because predicts mean and variance + "out_channels": 8, + "addition_embed_type": "image", + "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), + "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), + "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", + "block_out_channels": (self.block_out_channels_0, self.block_out_channels_0 * 2), + "layers_per_block": 1, + "encoder_hid_dim": self.text_embedder_hidden_size, + "encoder_hid_dim_type": "image_proj", + "cross_attention_dim": self.cross_attention_dim, + "attention_head_dim": 4, + "resnet_time_scale_shift": "scale_shift", + "class_embed_type": None, + } + + model = UNet2DConditionModel(**model_kwargs) + return model + + @property + def dummy_movq_kwargs(self): + return { + "block_out_channels": [32, 64], + "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], + "in_channels": 3, + "latent_channels": 4, + "layers_per_block": 1, + "norm_num_groups": 8, + "norm_type": "spatial", + "num_vq_embeddings": 12, + "out_channels": 3, + "up_block_types": [ + "AttnUpDecoderBlock2D", + "UpDecoderBlock2D", + ], + "vq_embed_dim": 4, + } + + @property + def dummy_movq(self): + torch.manual_seed(0) + model = VQModel(**self.dummy_movq_kwargs) + return model + + def get_dummy_components(self): + unet = self.dummy_unet + movq = self.dummy_movq + + scheduler = DDIMScheduler( + num_train_timesteps=1000, + beta_schedule="linear", + beta_start=0.00085, + beta_end=0.012, + clip_sample=False, + set_alpha_to_one=False, + steps_offset=1, + prediction_type="epsilon", + thresholding=False, + ) + + components = { + "unet": unet, + "scheduler": scheduler, + "movq": movq, + } + + return components + + def get_dummy_inputs(self, device, seed=0): + image_embeds = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed)).to(device) + negative_image_embeds = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed + 1)).to( + device + ) + # create init_image + image = floats_tensor((1, 3, 64, 64), rng=random.Random(seed)).to(device) + image = image.cpu().permute(0, 2, 3, 1)[0] + init_image = Image.fromarray(np.uint8(image)).convert("RGB").resize((256, 256)) + # create mask + mask = np.zeros((64, 64), dtype=np.float32) + mask[:32, :32] = 1 + + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + inputs = { + "image": init_image, + "mask_image": mask, + "image_embeds": image_embeds, + "negative_image_embeds": negative_image_embeds, + "generator": generator, + "height": 64, + "width": 64, + "num_inference_steps": 2, + "guidance_scale": 4.0, + "output_type": "np", + } + return inputs + + +class KandinskyV22InpaintPipelineFastTests(PipelineTesterMixin, unittest.TestCase): + pipeline_class = KandinskyV22InpaintPipeline + params = ["image_embeds", "negative_image_embeds", "image", "mask_image"] + batch_params = [ + "image_embeds", + "negative_image_embeds", + "image", + "mask_image", + ] + required_optional_params = [ + "generator", + "height", + "width", + "latents", + "guidance_scale", + "num_inference_steps", + "return_dict", + "guidance_scale", + "num_images_per_prompt", + "output_type", + "return_dict", + ] + test_xformers_attention = False + callback_cfg_params = ["image_embeds", "masked_image", "mask_image"] + + def get_dummy_components(self): + dummies = Dummies() + return dummies.get_dummy_components() + + def get_dummy_inputs(self, device, seed=0): + dummies = Dummies() + return dummies.get_dummy_inputs(device=device, seed=seed) + + def test_kandinsky_inpaint(self): + device = "cpu" + + components = self.get_dummy_components() + + pipe = self.pipeline_class(**components) + pipe = pipe.to(device) + + pipe.set_progress_bar_config(disable=None) + + output = pipe(**self.get_dummy_inputs(device)) + image = output.images + + image_from_tuple = pipe( + **self.get_dummy_inputs(device), + return_dict=False, + )[0] + + image_slice = image[0, -3:, -3:, -1] + image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] + + assert image.shape == (1, 64, 64, 3) + + expected_slice = np.array( + [0.50775903, 0.49527195, 0.48824543, 0.50192237, 0.48644906, 0.49373814, 0.4780598, 0.47234827, 0.48327848] + ) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2, ( + f" expected_slice {expected_slice}, but got {image_slice.flatten()}" + ) + assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2, ( + f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" + ) + + def test_inference_batch_single_identical(self): + super().test_inference_batch_single_identical(expected_max_diff=3e-3) + + def test_float16_inference(self): + super().test_float16_inference(expected_max_diff=5e-1) + + @is_flaky() + def test_model_cpu_offload_forward_pass(self): + super().test_inference_batch_single_identical(expected_max_diff=8e-4) + + def test_save_load_optional_components(self): + super().test_save_load_optional_components(expected_max_difference=5e-4) + + def test_sequential_cpu_offload_forward_pass(self): + super().test_sequential_cpu_offload_forward_pass(expected_max_diff=5e-4) + + # override default test because we need to zero out mask too in order to make sure final latent is all zero + def test_callback_inputs(self): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + self.assertTrue( + hasattr(pipe, "_callback_tensor_inputs"), + f" {self.pipeline_class} should have `_callback_tensor_inputs` that defines a list of tensor variables its callback function can use as inputs", + ) + + def callback_inputs_test(pipe, i, t, callback_kwargs): + missing_callback_inputs = set() + for v in pipe._callback_tensor_inputs: + if v not in callback_kwargs: + missing_callback_inputs.add(v) + self.assertTrue( + len(missing_callback_inputs) == 0, f"Missing callback tensor inputs: {missing_callback_inputs}" + ) + last_i = pipe.num_timesteps - 1 + if i == last_i: + callback_kwargs["latents"] = torch.zeros_like(callback_kwargs["latents"]) + callback_kwargs["mask_image"] = torch.zeros_like(callback_kwargs["mask_image"]) + return callback_kwargs + + inputs = self.get_dummy_inputs(torch_device) + inputs["callback_on_step_end"] = callback_inputs_test + inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs + inputs["output_type"] = "latent" + + output = pipe(**inputs)[0] + assert output.abs().sum() == 0 + + +@slow +@require_torch_accelerator +class KandinskyV22InpaintPipelineIntegrationTests(unittest.TestCase): + def setUp(self): + # clean up the VRAM before each test + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + # clean up the VRAM after each test + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def test_kandinsky_inpaint(self): + expected_image = load_numpy( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" + "/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy" + ) + + init_image = load_image( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/kandinsky/cat.png" + ) + mask = np.zeros((768, 768), dtype=np.float32) + mask[:250, 250:-250] = 1 + + prompt = "a hat" + + pipe_prior = KandinskyV22PriorPipeline.from_pretrained( + "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16 + ) + pipe_prior.to(torch_device) + + pipeline = KandinskyV22InpaintPipeline.from_pretrained( + "kandinsky-community/kandinsky-2-2-decoder-inpaint", torch_dtype=torch.float16 + ) + pipeline = pipeline.to(torch_device) + pipeline.set_progress_bar_config(disable=None) + + generator = torch.Generator(device="cpu").manual_seed(0) + image_emb, zero_image_emb = pipe_prior( + prompt, + generator=generator, + num_inference_steps=2, + negative_prompt="", + ).to_tuple() + + generator = torch.Generator(device="cpu").manual_seed(0) + output = pipeline( + image=init_image, + mask_image=mask, + image_embeds=image_emb, + negative_image_embeds=zero_image_emb, + generator=generator, + num_inference_steps=2, + height=768, + width=768, + output_type="np", + ) + + image = output.images[0] + + assert image.shape == (768, 768, 3) + + max_diff = numpy_cosine_similarity_distance(expected_image.flatten(), image.flatten()) + assert max_diff < 1e-4 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/kandinsky2_2/test_kandinsky_prior.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/kandinsky2_2/test_kandinsky_prior.py new file mode 100644 index 0000000000000000000000000000000000000000..adcc6cc2167c847e1bc971eeb54382c69529d6d8 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/kandinsky2_2/test_kandinsky_prior.py @@ -0,0 +1,281 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +import unittest + +import numpy as np +import torch +from torch import nn +from transformers import ( + CLIPImageProcessor, + CLIPTextConfig, + CLIPTextModelWithProjection, + CLIPTokenizer, + CLIPVisionConfig, + CLIPVisionModelWithProjection, +) + +from diffusers import KandinskyV22PriorPipeline, PriorTransformer, UnCLIPScheduler + +from ...testing_utils import enable_full_determinism, skip_mps, torch_device +from ..test_pipelines_common import PipelineTesterMixin + + +enable_full_determinism() + + +class Dummies: + @property + def text_embedder_hidden_size(self): + return 32 + + @property + def time_input_dim(self): + return 32 + + @property + def block_out_channels_0(self): + return self.time_input_dim + + @property + def time_embed_dim(self): + return self.time_input_dim * 4 + + @property + def cross_attention_dim(self): + return 100 + + @property + def dummy_tokenizer(self): + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + return tokenizer + + @property + def dummy_text_encoder(self): + torch.manual_seed(0) + config = CLIPTextConfig( + bos_token_id=0, + eos_token_id=2, + hidden_size=self.text_embedder_hidden_size, + projection_dim=self.text_embedder_hidden_size, + intermediate_size=37, + layer_norm_eps=1e-05, + num_attention_heads=4, + num_hidden_layers=5, + pad_token_id=1, + vocab_size=1000, + ) + return CLIPTextModelWithProjection(config) + + @property + def dummy_prior(self): + torch.manual_seed(0) + + model_kwargs = { + "num_attention_heads": 2, + "attention_head_dim": 12, + "embedding_dim": self.text_embedder_hidden_size, + "num_layers": 1, + } + + model = PriorTransformer(**model_kwargs) + # clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0 + model.clip_std = nn.Parameter(torch.ones(model.clip_std.shape)) + return model + + @property + def dummy_image_encoder(self): + torch.manual_seed(0) + config = CLIPVisionConfig( + hidden_size=self.text_embedder_hidden_size, + image_size=224, + projection_dim=self.text_embedder_hidden_size, + intermediate_size=37, + num_attention_heads=4, + num_channels=3, + num_hidden_layers=5, + patch_size=14, + ) + + model = CLIPVisionModelWithProjection(config) + return model + + @property + def dummy_image_processor(self): + image_processor = CLIPImageProcessor( + crop_size=224, + do_center_crop=True, + do_normalize=True, + do_resize=True, + image_mean=[0.48145466, 0.4578275, 0.40821073], + image_std=[0.26862954, 0.26130258, 0.27577711], + resample=3, + size=224, + ) + + return image_processor + + def get_dummy_components(self): + prior = self.dummy_prior + image_encoder = self.dummy_image_encoder + text_encoder = self.dummy_text_encoder + tokenizer = self.dummy_tokenizer + image_processor = self.dummy_image_processor + + scheduler = UnCLIPScheduler( + variance_type="fixed_small_log", + prediction_type="sample", + num_train_timesteps=1000, + clip_sample=True, + clip_sample_range=10.0, + ) + + components = { + "prior": prior, + "image_encoder": image_encoder, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + "scheduler": scheduler, + "image_processor": image_processor, + } + + return components + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + inputs = { + "prompt": "horse", + "generator": generator, + "guidance_scale": 4.0, + "num_inference_steps": 2, + "output_type": "np", + } + return inputs + + +class KandinskyV22PriorPipelineFastTests(PipelineTesterMixin, unittest.TestCase): + pipeline_class = KandinskyV22PriorPipeline + params = ["prompt"] + batch_params = ["prompt", "negative_prompt"] + required_optional_params = [ + "num_images_per_prompt", + "generator", + "num_inference_steps", + "latents", + "negative_prompt", + "guidance_scale", + "output_type", + "return_dict", + ] + callback_cfg_params = ["prompt_embeds", "text_encoder_hidden_states", "text_mask"] + test_xformers_attention = False + + supports_dduf = False + + def get_dummy_components(self): + dummies = Dummies() + return dummies.get_dummy_components() + + def get_dummy_inputs(self, device, seed=0): + dummies = Dummies() + return dummies.get_dummy_inputs(device=device, seed=seed) + + def test_kandinsky_prior(self): + device = "cpu" + + components = self.get_dummy_components() + + pipe = self.pipeline_class(**components) + pipe = pipe.to(device) + + pipe.set_progress_bar_config(disable=None) + + output = pipe(**self.get_dummy_inputs(device)) + image = output.image_embeds + + image_from_tuple = pipe( + **self.get_dummy_inputs(device), + return_dict=False, + )[0] + + image_slice = image[0, -10:] + + image_from_tuple_slice = image_from_tuple[0, -10:] + + assert image.shape == (1, 32) + + expected_slice = np.array( + [-0.5948, 0.1875, -0.1523, -1.1995, -1.4061, -0.6367, -1.4607, -0.6406, 0.8793, -0.3891] + ) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 + assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 + + @skip_mps + def test_inference_batch_single_identical(self): + self._test_inference_batch_single_identical(expected_max_diff=1e-3) + + @skip_mps + def test_attention_slicing_forward_pass(self): + test_max_difference = torch_device == "cpu" + test_mean_pixel_difference = False + + self._test_attention_slicing_forward_pass( + test_max_difference=test_max_difference, + test_mean_pixel_difference=test_mean_pixel_difference, + ) + + # override default test because no output_type "latent", use "pt" instead + def test_callback_inputs(self): + sig = inspect.signature(self.pipeline_class.__call__) + + if not ("callback_on_step_end_tensor_inputs" in sig.parameters and "callback_on_step_end" in sig.parameters): + return + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + self.assertTrue( + hasattr(pipe, "_callback_tensor_inputs"), + f" {self.pipeline_class} should have `_callback_tensor_inputs` that defines a list of tensor variables its callback function can use as inputs", + ) + + def callback_inputs_test(pipe, i, t, callback_kwargs): + missing_callback_inputs = set() + for v in pipe._callback_tensor_inputs: + if v not in callback_kwargs: + missing_callback_inputs.add(v) + self.assertTrue( + len(missing_callback_inputs) == 0, f"Missing callback tensor inputs: {missing_callback_inputs}" + ) + last_i = pipe.num_timesteps - 1 + if i == last_i: + callback_kwargs["latents"] = torch.zeros_like(callback_kwargs["latents"]) + return callback_kwargs + + inputs = self.get_dummy_inputs(torch_device) + inputs["callback_on_step_end"] = callback_inputs_test + inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs + inputs["num_inference_steps"] = 2 + inputs["output_type"] = "pt" + + output = pipe(**inputs)[0] + assert output.abs().sum() == 0 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/kandinsky2_2/test_kandinsky_prior_emb2emb.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/kandinsky2_2/test_kandinsky_prior_emb2emb.py new file mode 100644 index 0000000000000000000000000000000000000000..5377d917791ad92cca3ebcb2c5651106cb4b03c0 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/kandinsky2_2/test_kandinsky_prior_emb2emb.py @@ -0,0 +1,244 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import random +import unittest + +import numpy as np +import torch +from PIL import Image +from torch import nn +from transformers import ( + CLIPImageProcessor, + CLIPTextConfig, + CLIPTextModelWithProjection, + CLIPTokenizer, + CLIPVisionConfig, + CLIPVisionModelWithProjection, +) + +from diffusers import KandinskyV22PriorEmb2EmbPipeline, PriorTransformer, UnCLIPScheduler + +from ...testing_utils import ( + enable_full_determinism, + floats_tensor, + skip_mps, + torch_device, +) +from ..test_pipelines_common import PipelineTesterMixin + + +enable_full_determinism() + + +class KandinskyV22PriorEmb2EmbPipelineFastTests(PipelineTesterMixin, unittest.TestCase): + pipeline_class = KandinskyV22PriorEmb2EmbPipeline + params = ["prompt", "image"] + batch_params = ["prompt", "image"] + required_optional_params = [ + "num_images_per_prompt", + "strength", + "generator", + "num_inference_steps", + "negative_prompt", + "guidance_scale", + "output_type", + "return_dict", + ] + test_xformers_attention = False + + supports_dduf = False + + @property + def text_embedder_hidden_size(self): + return 32 + + @property + def time_input_dim(self): + return 32 + + @property + def block_out_channels_0(self): + return self.time_input_dim + + @property + def time_embed_dim(self): + return self.time_input_dim * 4 + + @property + def cross_attention_dim(self): + return 100 + + @property + def dummy_tokenizer(self): + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + return tokenizer + + @property + def dummy_text_encoder(self): + torch.manual_seed(0) + config = CLIPTextConfig( + bos_token_id=0, + eos_token_id=2, + hidden_size=self.text_embedder_hidden_size, + projection_dim=self.text_embedder_hidden_size, + intermediate_size=37, + layer_norm_eps=1e-05, + num_attention_heads=4, + num_hidden_layers=5, + pad_token_id=1, + vocab_size=1000, + ) + return CLIPTextModelWithProjection(config) + + @property + def dummy_prior(self): + torch.manual_seed(0) + + model_kwargs = { + "num_attention_heads": 2, + "attention_head_dim": 12, + "embedding_dim": self.text_embedder_hidden_size, + "num_layers": 1, + } + + model = PriorTransformer(**model_kwargs) + # clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0 + model.clip_std = nn.Parameter(torch.ones(model.clip_std.shape)) + return model + + @property + def dummy_image_encoder(self): + torch.manual_seed(0) + config = CLIPVisionConfig( + hidden_size=self.text_embedder_hidden_size, + image_size=224, + projection_dim=self.text_embedder_hidden_size, + intermediate_size=37, + num_attention_heads=4, + num_channels=3, + num_hidden_layers=5, + patch_size=14, + ) + + model = CLIPVisionModelWithProjection(config) + return model + + @property + def dummy_image_processor(self): + image_processor = CLIPImageProcessor( + crop_size=224, + do_center_crop=True, + do_normalize=True, + do_resize=True, + image_mean=[0.48145466, 0.4578275, 0.40821073], + image_std=[0.26862954, 0.26130258, 0.27577711], + resample=3, + size=224, + ) + + return image_processor + + def get_dummy_components(self): + prior = self.dummy_prior + image_encoder = self.dummy_image_encoder + text_encoder = self.dummy_text_encoder + tokenizer = self.dummy_tokenizer + image_processor = self.dummy_image_processor + + scheduler = UnCLIPScheduler( + variance_type="fixed_small_log", + prediction_type="sample", + num_train_timesteps=1000, + clip_sample=True, + clip_sample_range=10.0, + ) + + components = { + "prior": prior, + "image_encoder": image_encoder, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + "scheduler": scheduler, + "image_processor": image_processor, + } + + return components + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + + image = floats_tensor((1, 3, 64, 64), rng=random.Random(seed)).to(device) + image = image.cpu().permute(0, 2, 3, 1)[0] + init_image = Image.fromarray(np.uint8(image)).convert("RGB").resize((256, 256)) + + inputs = { + "prompt": "horse", + "image": init_image, + "strength": 0.5, + "generator": generator, + "guidance_scale": 4.0, + "num_inference_steps": 2, + "output_type": "np", + } + return inputs + + def test_kandinsky_prior_emb2emb(self): + device = "cpu" + + components = self.get_dummy_components() + + pipe = self.pipeline_class(**components) + pipe = pipe.to(device) + + pipe.set_progress_bar_config(disable=None) + + output = pipe(**self.get_dummy_inputs(device)) + image = output.image_embeds + + image_from_tuple = pipe( + **self.get_dummy_inputs(device), + return_dict=False, + )[0] + + image_slice = image[0, -10:] + + image_from_tuple_slice = image_from_tuple[0, -10:] + + assert image.shape == (1, 32) + + expected_slice = np.array( + [-0.8947, 0.7225, -0.2400, -1.4224, -1.9268, -1.1454, -1.8220, -0.7972, 1.0465, -0.5207] + ) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 + assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 + + @skip_mps + def test_inference_batch_single_identical(self): + self._test_inference_batch_single_identical(expected_max_diff=1e-2) + + @skip_mps + def test_attention_slicing_forward_pass(self): + test_max_difference = torch_device == "cpu" + test_mean_pixel_difference = False + + self._test_attention_slicing_forward_pass( + test_max_difference=test_max_difference, + test_mean_pixel_difference=test_mean_pixel_difference, + ) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/kandinsky3/__init__.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/kandinsky3/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/kandinsky3/test_kandinsky3.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/kandinsky3/test_kandinsky3.py new file mode 100644 index 0000000000000000000000000000000000000000..55500f729bbb3ffc4f493a00c2f4470d45651f62 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/kandinsky3/test_kandinsky3.py @@ -0,0 +1,241 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import gc +import unittest + +import numpy as np +import torch +from PIL import Image +from transformers import AutoTokenizer, T5EncoderModel + +from diffusers import ( + AutoPipelineForImage2Image, + AutoPipelineForText2Image, + Kandinsky3Pipeline, + Kandinsky3UNet, + VQModel, +) +from diffusers.image_processor import VaeImageProcessor +from diffusers.schedulers.scheduling_ddpm import DDPMScheduler + +from ...testing_utils import ( + backend_empty_cache, + enable_full_determinism, + load_image, + require_torch_accelerator, + slow, + torch_device, +) +from ..pipeline_params import ( + TEXT_TO_IMAGE_BATCH_PARAMS, + TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS, + TEXT_TO_IMAGE_IMAGE_PARAMS, + TEXT_TO_IMAGE_PARAMS, +) +from ..test_pipelines_common import PipelineTesterMixin + + +enable_full_determinism() + + +class Kandinsky3PipelineFastTests(PipelineTesterMixin, unittest.TestCase): + pipeline_class = Kandinsky3Pipeline + params = TEXT_TO_IMAGE_PARAMS - {"cross_attention_kwargs"} + batch_params = TEXT_TO_IMAGE_BATCH_PARAMS + image_params = TEXT_TO_IMAGE_IMAGE_PARAMS + image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS + callback_cfg_params = TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS + test_xformers_attention = False + + @property + def dummy_movq_kwargs(self): + return { + "block_out_channels": [32, 64], + "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], + "in_channels": 3, + "latent_channels": 4, + "layers_per_block": 1, + "norm_num_groups": 8, + "norm_type": "spatial", + "num_vq_embeddings": 12, + "out_channels": 3, + "up_block_types": [ + "AttnUpDecoderBlock2D", + "UpDecoderBlock2D", + ], + "vq_embed_dim": 4, + } + + @property + def dummy_movq(self): + torch.manual_seed(0) + model = VQModel(**self.dummy_movq_kwargs) + return model + + def get_dummy_components(self, time_cond_proj_dim=None): + torch.manual_seed(0) + unet = Kandinsky3UNet( + in_channels=4, + time_embedding_dim=4, + groups=2, + attention_head_dim=4, + layers_per_block=3, + block_out_channels=(32, 64), + cross_attention_dim=4, + encoder_hid_dim=32, + ) + scheduler = DDPMScheduler( + beta_start=0.00085, + beta_end=0.012, + steps_offset=1, + beta_schedule="squaredcos_cap_v2", + clip_sample=True, + thresholding=False, + ) + torch.manual_seed(0) + movq = self.dummy_movq + torch.manual_seed(0) + text_encoder = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5") + + torch.manual_seed(0) + tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") + + components = { + "unet": unet, + "scheduler": scheduler, + "movq": movq, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + } + return components + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + inputs = { + "prompt": "A painting of a squirrel eating a burger", + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 6.0, + "output_type": "np", + "width": 16, + "height": 16, + } + return inputs + + def test_kandinsky3(self): + device = "cpu" + + components = self.get_dummy_components() + + pipe = self.pipeline_class(**components) + pipe = pipe.to(device) + + pipe.set_progress_bar_config(disable=None) + + output = pipe(**self.get_dummy_inputs(device)) + image = output.images + + image_slice = image[0, -3:, -3:, -1] + + assert image.shape == (1, 16, 16, 3) + + expected_slice = np.array([0.3768, 0.4373, 0.4865, 0.4890, 0.4299, 0.5122, 0.4921, 0.4924, 0.5599]) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2, ( + f" expected_slice {expected_slice}, but got {image_slice.flatten()}" + ) + + def test_float16_inference(self): + super().test_float16_inference(expected_max_diff=1e-1) + + def test_inference_batch_single_identical(self): + super().test_inference_batch_single_identical(expected_max_diff=1e-2) + + +@slow +@require_torch_accelerator +class Kandinsky3PipelineIntegrationTests(unittest.TestCase): + def setUp(self): + # clean up the VRAM before each test + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + # clean up the VRAM after each test + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def test_kandinskyV3(self): + pipe = AutoPipelineForText2Image.from_pretrained( + "kandinsky-community/kandinsky-3", variant="fp16", torch_dtype=torch.float16 + ) + pipe.enable_model_cpu_offload(device=torch_device) + pipe.set_progress_bar_config(disable=None) + + prompt = "A photograph of the inside of a subway train. There are raccoons sitting on the seats. One of them is reading a newspaper. The window shows the city in the background." + + generator = torch.Generator(device="cpu").manual_seed(0) + + image = pipe(prompt, num_inference_steps=5, generator=generator).images[0] + + assert image.size == (1024, 1024) + + expected_image = load_image( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/kandinsky3/t2i.png" + ) + + image_processor = VaeImageProcessor() + + image_np = image_processor.pil_to_numpy(image) + expected_image_np = image_processor.pil_to_numpy(expected_image) + + self.assertTrue(np.allclose(image_np, expected_image_np, atol=5e-2)) + + def test_kandinskyV3_img2img(self): + pipe = AutoPipelineForImage2Image.from_pretrained( + "kandinsky-community/kandinsky-3", variant="fp16", torch_dtype=torch.float16 + ) + pipe.enable_model_cpu_offload(device=torch_device) + pipe.set_progress_bar_config(disable=None) + + generator = torch.Generator(device="cpu").manual_seed(0) + + image = load_image( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/kandinsky3/t2i.png" + ) + w, h = 512, 512 + image = image.resize((w, h), resample=Image.BICUBIC, reducing_gap=1) + prompt = "A painting of the inside of a subway train with tiny raccoons." + + image = pipe(prompt, image=image, strength=0.75, num_inference_steps=5, generator=generator).images[0] + + assert image.size == (512, 512) + + expected_image = load_image( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/kandinsky3/i2i.png" + ) + + image_processor = VaeImageProcessor() + + image_np = image_processor.pil_to_numpy(image) + expected_image_np = image_processor.pil_to_numpy(expected_image) + + self.assertTrue(np.allclose(image_np, expected_image_np, atol=5e-2)) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/kandinsky3/test_kandinsky3_img2img.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/kandinsky3/test_kandinsky3_img2img.py new file mode 100644 index 0000000000000000000000000000000000000000..503fdb242dff822268c30ddf5f04fda0903248c5 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/kandinsky3/test_kandinsky3_img2img.py @@ -0,0 +1,239 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import gc +import random +import unittest + +import numpy as np +import torch +from PIL import Image +from transformers import AutoTokenizer, T5EncoderModel + +from diffusers import ( + AutoPipelineForImage2Image, + Kandinsky3Img2ImgPipeline, + Kandinsky3UNet, + VQModel, +) +from diffusers.image_processor import VaeImageProcessor +from diffusers.schedulers.scheduling_ddpm import DDPMScheduler + +from ...testing_utils import ( + backend_empty_cache, + enable_full_determinism, + floats_tensor, + load_image, + require_torch_accelerator, + slow, + torch_device, +) +from ..pipeline_params import ( + IMAGE_TO_IMAGE_IMAGE_PARAMS, + TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, + TEXT_GUIDED_IMAGE_VARIATION_PARAMS, + TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS, + TEXT_TO_IMAGE_IMAGE_PARAMS, +) +from ..test_pipelines_common import PipelineTesterMixin + + +enable_full_determinism() + + +class Kandinsky3Img2ImgPipelineFastTests(PipelineTesterMixin, unittest.TestCase): + pipeline_class = Kandinsky3Img2ImgPipeline + params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"} + batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS + image_params = IMAGE_TO_IMAGE_IMAGE_PARAMS + image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS + callback_cfg_params = TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS + test_xformers_attention = False + required_optional_params = frozenset( + [ + "num_inference_steps", + "num_images_per_prompt", + "generator", + "output_type", + "return_dict", + ] + ) + + @property + def dummy_movq_kwargs(self): + return { + "block_out_channels": [32, 64], + "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], + "in_channels": 3, + "latent_channels": 4, + "layers_per_block": 1, + "norm_num_groups": 8, + "norm_type": "spatial", + "num_vq_embeddings": 12, + "out_channels": 3, + "up_block_types": [ + "AttnUpDecoderBlock2D", + "UpDecoderBlock2D", + ], + "vq_embed_dim": 4, + } + + @property + def dummy_movq(self): + torch.manual_seed(0) + model = VQModel(**self.dummy_movq_kwargs) + return model + + def get_dummy_components(self, time_cond_proj_dim=None): + torch.manual_seed(0) + unet = Kandinsky3UNet( + in_channels=4, + time_embedding_dim=4, + groups=2, + attention_head_dim=4, + layers_per_block=3, + block_out_channels=(32, 64), + cross_attention_dim=4, + encoder_hid_dim=32, + ) + scheduler = DDPMScheduler( + beta_start=0.00085, + beta_end=0.012, + steps_offset=1, + beta_schedule="squaredcos_cap_v2", + clip_sample=True, + thresholding=False, + ) + torch.manual_seed(0) + movq = self.dummy_movq + torch.manual_seed(0) + text_encoder = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5") + + torch.manual_seed(0) + tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") + + components = { + "unet": unet, + "scheduler": scheduler, + "movq": movq, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + } + return components + + def get_dummy_inputs(self, device, seed=0): + # create init_image + image = floats_tensor((1, 3, 64, 64), rng=random.Random(seed)).to(device) + image = image.cpu().permute(0, 2, 3, 1)[0] + init_image = Image.fromarray(np.uint8(image)).convert("RGB") + + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + inputs = { + "prompt": "A painting of a squirrel eating a burger", + "image": init_image, + "generator": generator, + "strength": 0.75, + "num_inference_steps": 10, + "guidance_scale": 6.0, + "output_type": "np", + } + return inputs + + def test_dict_tuple_outputs_equivalent(self): + expected_slice = None + if torch_device == "cpu": + expected_slice = np.array([0.5762, 0.6112, 0.4150, 0.6018, 0.6167, 0.4626, 0.5426, 0.5641, 0.6536]) + super().test_dict_tuple_outputs_equivalent(expected_slice=expected_slice) + + def test_kandinsky3_img2img(self): + device = "cpu" + + components = self.get_dummy_components() + + pipe = self.pipeline_class(**components) + pipe = pipe.to(device) + + pipe.set_progress_bar_config(disable=None) + + output = pipe(**self.get_dummy_inputs(device)) + image = output.images + + image_slice = image[0, -3:, -3:, -1] + + assert image.shape == (1, 64, 64, 3) + + expected_slice = np.array( + [0.576259, 0.6132097, 0.41703486, 0.603196, 0.62062526, 0.4655338, 0.5434324, 0.5660727, 0.65433365] + ) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2, ( + f" expected_slice {expected_slice}, but got {image_slice.flatten()}" + ) + + def test_float16_inference(self): + super().test_float16_inference(expected_max_diff=1e-1) + + def test_inference_batch_single_identical(self): + super().test_inference_batch_single_identical(expected_max_diff=1e-2) + + +@slow +@require_torch_accelerator +class Kandinsky3Img2ImgPipelineIntegrationTests(unittest.TestCase): + def setUp(self): + # clean up the VRAM before each test + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + # clean up the VRAM after each test + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def test_kandinskyV3_img2img(self): + pipe = AutoPipelineForImage2Image.from_pretrained( + "kandinsky-community/kandinsky-3", variant="fp16", torch_dtype=torch.float16 + ) + pipe.enable_model_cpu_offload(device=torch_device) + pipe.set_progress_bar_config(disable=None) + + generator = torch.Generator(device="cpu").manual_seed(0) + + image = load_image( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/kandinsky3/t2i.png" + ) + w, h = 512, 512 + image = image.resize((w, h), resample=Image.BICUBIC, reducing_gap=1) + prompt = "A painting of the inside of a subway train with tiny raccoons." + + image = pipe(prompt, image=image, strength=0.75, num_inference_steps=5, generator=generator).images[0] + + assert image.size == (512, 512) + + expected_image = load_image( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/kandinsky3/i2i.png" + ) + + image_processor = VaeImageProcessor() + + image_np = image_processor.pil_to_numpy(image) + expected_image_np = image_processor.pil_to_numpy(expected_image) + + self.assertTrue(np.allclose(image_np, expected_image_np, atol=5e-2)) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/kolors/__init__.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/kolors/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/kolors/test_kolors.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/kolors/test_kolors.py new file mode 100644 index 0000000000000000000000000000000000000000..f1d4982d4d74ed94b9e847b3fac9ae88c5b84cc9 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/kolors/test_kolors.py @@ -0,0 +1,148 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import numpy as np +import torch + +from diffusers import ( + AutoencoderKL, + EulerDiscreteScheduler, + KolorsPipeline, + UNet2DConditionModel, +) +from diffusers.pipelines.kolors import ChatGLMModel, ChatGLMTokenizer + +from ...testing_utils import enable_full_determinism +from ..pipeline_params import ( + TEXT_TO_IMAGE_BATCH_PARAMS, + TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS, + TEXT_TO_IMAGE_IMAGE_PARAMS, + TEXT_TO_IMAGE_PARAMS, +) +from ..test_pipelines_common import PipelineTesterMixin + + +enable_full_determinism() + + +class KolorsPipelineFastTests(PipelineTesterMixin, unittest.TestCase): + pipeline_class = KolorsPipeline + params = TEXT_TO_IMAGE_PARAMS + batch_params = TEXT_TO_IMAGE_BATCH_PARAMS + image_params = TEXT_TO_IMAGE_IMAGE_PARAMS + image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS + callback_cfg_params = TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS.union({"add_text_embeds", "add_time_ids"}) + + supports_dduf = False + test_layerwise_casting = True + + def get_dummy_components(self, time_cond_proj_dim=None): + torch.manual_seed(0) + unet = UNet2DConditionModel( + block_out_channels=(2, 4), + layers_per_block=2, + time_cond_proj_dim=time_cond_proj_dim, + sample_size=32, + in_channels=4, + out_channels=4, + down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), + up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), + # specific config below + attention_head_dim=(2, 4), + use_linear_projection=True, + addition_embed_type="text_time", + addition_time_embed_dim=8, + transformer_layers_per_block=(1, 2), + projection_class_embeddings_input_dim=56, + cross_attention_dim=8, + norm_num_groups=1, + ) + scheduler = EulerDiscreteScheduler( + beta_start=0.00085, + beta_end=0.012, + steps_offset=1, + beta_schedule="scaled_linear", + timestep_spacing="leading", + ) + torch.manual_seed(0) + vae = AutoencoderKL( + block_out_channels=[32, 64], + in_channels=3, + out_channels=3, + down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], + up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], + latent_channels=4, + sample_size=128, + ) + torch.manual_seed(0) + text_encoder = ChatGLMModel.from_pretrained( + "hf-internal-testing/tiny-random-chatglm3-6b", torch_dtype=torch.float32 + ) + tokenizer = ChatGLMTokenizer.from_pretrained("hf-internal-testing/tiny-random-chatglm3-6b") + + components = { + "unet": unet, + "scheduler": scheduler, + "vae": vae, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + "image_encoder": None, + "feature_extractor": None, + } + return components + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + inputs = { + "prompt": "A painting of a squirrel eating a burger", + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 5.0, + "output_type": "np", + } + return inputs + + def test_inference(self): + device = "cpu" + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + image = pipe(**inputs).images + image_slice = image[0, -3:, -3:, -1] + + self.assertEqual(image.shape, (1, 64, 64, 3)) + expected_slice = np.array( + [0.26413745, 0.4425478, 0.4102801, 0.42693347, 0.52529025, 0.3867405, 0.47512037, 0.41538602, 0.43855375] + ) + max_diff = np.abs(image_slice.flatten() - expected_slice).max() + self.assertLessEqual(max_diff, 1e-3) + + def test_save_load_optional_components(self): + super().test_save_load_optional_components(expected_max_difference=2e-4) + + def test_save_load_float16(self): + super().test_save_load_float16(expected_max_diff=2e-1) + + def test_inference_batch_single_identical(self): + self._test_inference_batch_single_identical(expected_max_diff=5e-3) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/kolors/test_kolors_img2img.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/kolors/test_kolors_img2img.py new file mode 100644 index 0000000000000000000000000000000000000000..5a5d31a464567a99db6c64e0f6c334862d972143 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/kolors/test_kolors_img2img.py @@ -0,0 +1,160 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import random +import unittest + +import numpy as np +import torch + +from diffusers import ( + AutoencoderKL, + EulerDiscreteScheduler, + KolorsImg2ImgPipeline, + UNet2DConditionModel, +) +from diffusers.pipelines.kolors import ChatGLMModel, ChatGLMTokenizer + +from ...testing_utils import ( + enable_full_determinism, + floats_tensor, +) +from ..pipeline_params import ( + TEXT_TO_IMAGE_BATCH_PARAMS, + TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS, + TEXT_TO_IMAGE_IMAGE_PARAMS, + TEXT_TO_IMAGE_PARAMS, +) +from ..test_pipelines_common import PipelineTesterMixin + + +enable_full_determinism() + + +class KolorsPipelineImg2ImgFastTests(PipelineTesterMixin, unittest.TestCase): + pipeline_class = KolorsImg2ImgPipeline + params = TEXT_TO_IMAGE_PARAMS + batch_params = TEXT_TO_IMAGE_BATCH_PARAMS + image_params = TEXT_TO_IMAGE_IMAGE_PARAMS + image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS + callback_cfg_params = TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS.union({"add_text_embeds", "add_time_ids"}) + + supports_dduf = False + + # Copied from tests.pipelines.kolors.test_kolors.KolorsPipelineFastTests.get_dummy_components + def get_dummy_components(self, time_cond_proj_dim=None): + torch.manual_seed(0) + unet = UNet2DConditionModel( + block_out_channels=(2, 4), + layers_per_block=2, + time_cond_proj_dim=time_cond_proj_dim, + sample_size=32, + in_channels=4, + out_channels=4, + down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), + up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), + # specific config below + attention_head_dim=(2, 4), + use_linear_projection=True, + addition_embed_type="text_time", + addition_time_embed_dim=8, + transformer_layers_per_block=(1, 2), + projection_class_embeddings_input_dim=56, + cross_attention_dim=8, + norm_num_groups=1, + ) + scheduler = EulerDiscreteScheduler( + beta_start=0.00085, + beta_end=0.012, + steps_offset=1, + beta_schedule="scaled_linear", + timestep_spacing="leading", + ) + torch.manual_seed(0) + vae = AutoencoderKL( + block_out_channels=[32, 64], + in_channels=3, + out_channels=3, + down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], + up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], + latent_channels=4, + sample_size=128, + ) + torch.manual_seed(0) + text_encoder = ChatGLMModel.from_pretrained( + "hf-internal-testing/tiny-random-chatglm3-6b", torch_dtype=torch.float32 + ) + tokenizer = ChatGLMTokenizer.from_pretrained("hf-internal-testing/tiny-random-chatglm3-6b") + + components = { + "unet": unet, + "scheduler": scheduler, + "vae": vae, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + "image_encoder": None, + "feature_extractor": None, + } + return components + + def get_dummy_inputs(self, device, seed=0): + image = floats_tensor((1, 3, 64, 64), rng=random.Random(seed)).to(device) + image = image / 2 + 0.5 + + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + + inputs = { + "prompt": "A painting of a squirrel eating a burger", + "image": image, + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 5.0, + "output_type": "np", + "strength": 0.8, + } + + return inputs + + def test_inference(self): + device = "cpu" + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + image = pipe(**inputs).images + image_slice = image[0, -3:, -3:, -1] + + self.assertEqual(image.shape, (1, 64, 64, 3)) + expected_slice = np.array( + [0.54823864, 0.43654007, 0.4886489, 0.63072854, 0.53641886, 0.4896852, 0.62123513, 0.5621531, 0.42809626] + ) + max_diff = np.abs(image_slice.flatten() - expected_slice).max() + self.assertLessEqual(max_diff, 1e-3) + + def test_inference_batch_single_identical(self): + self._test_inference_batch_single_identical(batch_size=3, expected_max_diff=3e-3) + + def test_float16_inference(self): + super().test_float16_inference(expected_max_diff=7e-2) + + @unittest.skip("Test not supported because kolors img2img doesn't take pooled embeds as inputs unlike kolors t2i.") + def test_encode_prompt_works_in_isolation(self): + pass diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/latent_consistency_models/__init__.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/latent_consistency_models/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/latent_consistency_models/test_latent_consistency_models.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/latent_consistency_models/test_latent_consistency_models.py new file mode 100644 index 0000000000000000000000000000000000000000..c7666244b35f59e3a101df60d09adc9ed9a9cb2d --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/latent_consistency_models/test_latent_consistency_models.py @@ -0,0 +1,273 @@ +import gc +import inspect +import unittest + +import numpy as np +import torch +from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer + +from diffusers import ( + AutoencoderKL, + LatentConsistencyModelPipeline, + LCMScheduler, + UNet2DConditionModel, +) + +from ...testing_utils import ( + backend_empty_cache, + enable_full_determinism, + require_torch_accelerator, + slow, + torch_device, +) +from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS +from ..test_pipelines_common import IPAdapterTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin + + +enable_full_determinism() + + +class LatentConsistencyModelPipelineFastTests( + IPAdapterTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, unittest.TestCase +): + pipeline_class = LatentConsistencyModelPipeline + params = TEXT_TO_IMAGE_PARAMS - {"negative_prompt", "negative_prompt_embeds"} + batch_params = TEXT_TO_IMAGE_BATCH_PARAMS - {"negative_prompt"} + image_params = TEXT_TO_IMAGE_IMAGE_PARAMS + image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS + + def get_dummy_components(self): + torch.manual_seed(0) + unet = UNet2DConditionModel( + block_out_channels=(4, 8), + layers_per_block=1, + sample_size=32, + in_channels=4, + out_channels=4, + down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), + up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), + cross_attention_dim=32, + norm_num_groups=2, + time_cond_proj_dim=32, + ) + scheduler = LCMScheduler( + beta_start=0.00085, + beta_end=0.012, + beta_schedule="scaled_linear", + clip_sample=False, + set_alpha_to_one=False, + ) + torch.manual_seed(0) + vae = AutoencoderKL( + block_out_channels=[4, 8], + in_channels=3, + out_channels=3, + down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], + up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], + latent_channels=4, + norm_num_groups=2, + ) + torch.manual_seed(0) + text_encoder_config = CLIPTextConfig( + bos_token_id=0, + eos_token_id=2, + hidden_size=32, + intermediate_size=64, + layer_norm_eps=1e-05, + num_attention_heads=8, + num_hidden_layers=3, + pad_token_id=1, + vocab_size=1000, + ) + text_encoder = CLIPTextModel(text_encoder_config) + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + + components = { + "unet": unet, + "scheduler": scheduler, + "vae": vae, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + "safety_checker": None, + "feature_extractor": None, + "image_encoder": None, + "requires_safety_checker": False, + } + return components + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + inputs = { + "prompt": "A painting of a squirrel eating a burger", + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 6.0, + "output_type": "np", + } + return inputs + + def test_ip_adapter(self): + expected_pipe_slice = None + if torch_device == "cpu": + expected_pipe_slice = np.array([0.1403, 0.5072, 0.5316, 0.1202, 0.3865, 0.4211, 0.5363, 0.3557, 0.3645]) + return super().test_ip_adapter(expected_pipe_slice=expected_pipe_slice) + + def test_lcm_onestep(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + + components = self.get_dummy_components() + pipe = LatentConsistencyModelPipeline(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + inputs["num_inference_steps"] = 1 + output = pipe(**inputs) + image = output.images + assert image.shape == (1, 64, 64, 3) + + image_slice = image[0, -3:, -3:, -1] + expected_slice = np.array([0.1441, 0.5304, 0.5452, 0.1361, 0.4011, 0.4370, 0.5326, 0.3492, 0.3637]) + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 + + def test_lcm_multistep(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + + components = self.get_dummy_components() + pipe = LatentConsistencyModelPipeline(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + output = pipe(**inputs) + image = output.images + assert image.shape == (1, 64, 64, 3) + + image_slice = image[0, -3:, -3:, -1] + expected_slice = np.array([0.1403, 0.5072, 0.5316, 0.1202, 0.3865, 0.4211, 0.5363, 0.3557, 0.3645]) + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 + + def test_lcm_custom_timesteps(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + + components = self.get_dummy_components() + pipe = LatentConsistencyModelPipeline(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + del inputs["num_inference_steps"] + inputs["timesteps"] = [999, 499] + output = pipe(**inputs) + image = output.images + assert image.shape == (1, 64, 64, 3) + + image_slice = image[0, -3:, -3:, -1] + expected_slice = np.array([0.1403, 0.5072, 0.5316, 0.1202, 0.3865, 0.4211, 0.5363, 0.3557, 0.3645]) + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 + + def test_inference_batch_single_identical(self): + super().test_inference_batch_single_identical(expected_max_diff=5e-4) + + # skip because lcm pipeline apply cfg differently + def test_callback_cfg(self): + pass + + # override default test because the final latent variable is "denoised" instead of "latents" + def test_callback_inputs(self): + sig = inspect.signature(self.pipeline_class.__call__) + + if not ("callback_on_step_end_tensor_inputs" in sig.parameters and "callback_on_step_end" in sig.parameters): + return + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + self.assertTrue( + hasattr(pipe, "_callback_tensor_inputs"), + f" {self.pipeline_class} should have `_callback_tensor_inputs` that defines a list of tensor variables its callback function can use as inputs", + ) + + def callback_inputs_test(pipe, i, t, callback_kwargs): + missing_callback_inputs = set() + for v in pipe._callback_tensor_inputs: + if v not in callback_kwargs: + missing_callback_inputs.add(v) + self.assertTrue( + len(missing_callback_inputs) == 0, f"Missing callback tensor inputs: {missing_callback_inputs}" + ) + last_i = pipe.num_timesteps - 1 + if i == last_i: + callback_kwargs["denoised"] = torch.zeros_like(callback_kwargs["denoised"]) + return callback_kwargs + + inputs = self.get_dummy_inputs(torch_device) + inputs["callback_on_step_end"] = callback_inputs_test + inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs + inputs["output_type"] = "latent" + + output = pipe(**inputs)[0] + assert output.abs().sum() == 0 + + def test_encode_prompt_works_in_isolation(self): + extra_required_param_value_dict = { + "device": torch.device(torch_device).type, + "do_classifier_free_guidance": self.get_dummy_inputs(device=torch_device).get("guidance_scale", 1.0) > 1.0, + } + return super().test_encode_prompt_works_in_isolation(extra_required_param_value_dict) + + +@slow +@require_torch_accelerator +class LatentConsistencyModelPipelineSlowTests(unittest.TestCase): + def setUp(self): + gc.collect() + backend_empty_cache(torch_device) + + def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): + generator = torch.Generator(device=generator_device).manual_seed(seed) + latents = np.random.RandomState(seed).standard_normal((1, 4, 64, 64)) + latents = torch.from_numpy(latents).to(device=device, dtype=dtype) + inputs = { + "prompt": "a photograph of an astronaut riding a horse", + "latents": latents, + "generator": generator, + "num_inference_steps": 3, + "guidance_scale": 7.5, + "output_type": "np", + } + return inputs + + def test_lcm_onestep(self): + pipe = LatentConsistencyModelPipeline.from_pretrained("SimianLuo/LCM_Dreamshaper_v7", safety_checker=None) + pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_inputs(torch_device) + inputs["num_inference_steps"] = 1 + image = pipe(**inputs).images + assert image.shape == (1, 512, 512, 3) + + image_slice = image[0, -3:, -3:, -1].flatten() + expected_slice = np.array([0.1025, 0.0911, 0.0984, 0.0981, 0.0901, 0.0918, 0.1055, 0.0940, 0.0730]) + assert np.abs(image_slice - expected_slice).max() < 1e-3 + + def test_lcm_multistep(self): + pipe = LatentConsistencyModelPipeline.from_pretrained("SimianLuo/LCM_Dreamshaper_v7", safety_checker=None) + pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_inputs(torch_device) + image = pipe(**inputs).images + assert image.shape == (1, 512, 512, 3) + + image_slice = image[0, -3:, -3:, -1].flatten() + expected_slice = np.array([0.01855, 0.01855, 0.01489, 0.01392, 0.01782, 0.01465, 0.01831, 0.02539, 0.0]) + assert np.abs(image_slice - expected_slice).max() < 1e-3 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/latent_consistency_models/test_latent_consistency_models_img2img.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/latent_consistency_models/test_latent_consistency_models_img2img.py new file mode 100644 index 0000000000000000000000000000000000000000..d8e7745b78055e826db6909f1af3f08d19ebcf0c --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/latent_consistency_models/test_latent_consistency_models_img2img.py @@ -0,0 +1,291 @@ +import gc +import inspect +import random +import unittest + +import numpy as np +import torch +from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer + +from diffusers import ( + AutoencoderKL, + LatentConsistencyModelImg2ImgPipeline, + LCMScheduler, + UNet2DConditionModel, +) + +from ...testing_utils import ( + backend_empty_cache, + enable_full_determinism, + floats_tensor, + load_image, + require_torch_accelerator, + slow, + torch_device, +) +from ..pipeline_params import ( + IMAGE_TO_IMAGE_IMAGE_PARAMS, + TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, + TEXT_GUIDED_IMAGE_VARIATION_PARAMS, +) +from ..test_pipelines_common import IPAdapterTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin + + +enable_full_determinism() + + +class LatentConsistencyModelImg2ImgPipelineFastTests( + IPAdapterTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, unittest.TestCase +): + pipeline_class = LatentConsistencyModelImg2ImgPipeline + params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width", "negative_prompt", "negative_prompt_embeds"} + required_optional_params = PipelineTesterMixin.required_optional_params - {"latents", "negative_prompt"} + batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS + image_params = IMAGE_TO_IMAGE_IMAGE_PARAMS + image_latents_params = IMAGE_TO_IMAGE_IMAGE_PARAMS + + def get_dummy_components(self): + torch.manual_seed(0) + unet = UNet2DConditionModel( + block_out_channels=(4, 8), + layers_per_block=1, + sample_size=32, + in_channels=4, + out_channels=4, + down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), + up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), + cross_attention_dim=32, + norm_num_groups=2, + time_cond_proj_dim=32, + ) + scheduler = LCMScheduler( + beta_start=0.00085, + beta_end=0.012, + beta_schedule="scaled_linear", + clip_sample=False, + set_alpha_to_one=False, + ) + torch.manual_seed(0) + vae = AutoencoderKL( + block_out_channels=[4, 8], + in_channels=3, + out_channels=3, + down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], + up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], + latent_channels=4, + norm_num_groups=2, + ) + torch.manual_seed(0) + text_encoder_config = CLIPTextConfig( + bos_token_id=0, + eos_token_id=2, + hidden_size=32, + intermediate_size=64, + layer_norm_eps=1e-05, + num_attention_heads=8, + num_hidden_layers=3, + pad_token_id=1, + vocab_size=1000, + ) + text_encoder = CLIPTextModel(text_encoder_config) + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + + components = { + "unet": unet, + "scheduler": scheduler, + "vae": vae, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + "safety_checker": None, + "feature_extractor": None, + "image_encoder": None, + "requires_safety_checker": False, + } + return components + + def get_dummy_inputs(self, device, seed=0): + image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) + image = image / 2 + 0.5 + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + inputs = { + "prompt": "A painting of a squirrel eating a burger", + "image": image, + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 6.0, + "output_type": "np", + } + return inputs + + def test_ip_adapter(self): + expected_pipe_slice = None + if torch_device == "cpu": + expected_pipe_slice = np.array([0.4003, 0.3718, 0.2863, 0.5500, 0.5587, 0.3772, 0.4617, 0.4961, 0.4417]) + return super().test_ip_adapter(expected_pipe_slice=expected_pipe_slice) + + def test_lcm_onestep(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + inputs["num_inference_steps"] = 1 + output = pipe(**inputs) + image = output.images + assert image.shape == (1, 32, 32, 3) + + image_slice = image[0, -3:, -3:, -1] + expected_slice = np.array([0.4388, 0.3717, 0.2202, 0.7213, 0.6370, 0.3664, 0.5815, 0.6080, 0.4977]) + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 + + def test_lcm_multistep(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + output = pipe(**inputs) + image = output.images + assert image.shape == (1, 32, 32, 3) + + image_slice = image[0, -3:, -3:, -1] + expected_slice = np.array([0.4150, 0.3719, 0.2479, 0.6333, 0.6024, 0.3778, 0.5036, 0.5420, 0.4678]) + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 + + def test_lcm_custom_timesteps(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + del inputs["num_inference_steps"] + inputs["timesteps"] = [999, 499] + output = pipe(**inputs) + image = output.images + assert image.shape == (1, 32, 32, 3) + + image_slice = image[0, -3:, -3:, -1] + expected_slice = np.array([0.3994, 0.3471, 0.2540, 0.7030, 0.6193, 0.3645, 0.5777, 0.5850, 0.4965]) + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 + + def test_inference_batch_single_identical(self): + super().test_inference_batch_single_identical(expected_max_diff=5e-4) + + # override default test because the final latent variable is "denoised" instead of "latents" + def test_callback_inputs(self): + sig = inspect.signature(self.pipeline_class.__call__) + + if not ("callback_on_step_end_tensor_inputs" in sig.parameters and "callback_on_step_end" in sig.parameters): + return + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + self.assertTrue( + hasattr(pipe, "_callback_tensor_inputs"), + f" {self.pipeline_class} should have `_callback_tensor_inputs` that defines a list of tensor variables its callback function can use as inputs", + ) + + def callback_inputs_test(pipe, i, t, callback_kwargs): + missing_callback_inputs = set() + for v in pipe._callback_tensor_inputs: + if v not in callback_kwargs: + missing_callback_inputs.add(v) + self.assertTrue( + len(missing_callback_inputs) == 0, f"Missing callback tensor inputs: {missing_callback_inputs}" + ) + last_i = pipe.num_timesteps - 1 + if i == last_i: + callback_kwargs["denoised"] = torch.zeros_like(callback_kwargs["denoised"]) + return callback_kwargs + + inputs = self.get_dummy_inputs(torch_device) + inputs["callback_on_step_end"] = callback_inputs_test + inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs + inputs["output_type"] = "latent" + + output = pipe(**inputs)[0] + assert output.abs().sum() == 0 + + def test_encode_prompt_works_in_isolation(self): + extra_required_param_value_dict = { + "device": torch.device(torch_device).type, + "do_classifier_free_guidance": self.get_dummy_inputs(device=torch_device).get("guidance_scale", 1.0) > 1.0, + } + return super().test_encode_prompt_works_in_isolation(extra_required_param_value_dict) + + +@slow +@require_torch_accelerator +class LatentConsistencyModelImg2ImgPipelineSlowTests(unittest.TestCase): + def setUp(self): + gc.collect() + backend_empty_cache(torch_device) + + def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): + generator = torch.Generator(device=generator_device).manual_seed(seed) + latents = np.random.RandomState(seed).standard_normal((1, 4, 64, 64)) + latents = torch.from_numpy(latents).to(device=device, dtype=dtype) + init_image = load_image( + "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" + "/stable_diffusion_img2img/sketch-mountains-input.png" + ) + init_image = init_image.resize((512, 512)) + + inputs = { + "prompt": "a photograph of an astronaut riding a horse", + "latents": latents, + "generator": generator, + "num_inference_steps": 3, + "guidance_scale": 7.5, + "output_type": "np", + "image": init_image, + } + return inputs + + def test_lcm_onestep(self): + pipe = LatentConsistencyModelImg2ImgPipeline.from_pretrained( + "SimianLuo/LCM_Dreamshaper_v7", safety_checker=None + ) + pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_inputs(torch_device) + inputs["num_inference_steps"] = 1 + image = pipe(**inputs).images + assert image.shape == (1, 512, 512, 3) + + image_slice = image[0, -3:, -3:, -1].flatten() + expected_slice = np.array([0.3479, 0.3314, 0.3555, 0.3430, 0.3649, 0.3423, 0.3239, 0.3117, 0.3240]) + assert np.abs(image_slice - expected_slice).max() < 1e-3 + + def test_lcm_multistep(self): + pipe = LatentConsistencyModelImg2ImgPipeline.from_pretrained( + "SimianLuo/LCM_Dreamshaper_v7", safety_checker=None + ) + pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_inputs(torch_device) + image = pipe(**inputs).images + assert image.shape == (1, 512, 512, 3) + + image_slice = image[0, -3:, -3:, -1].flatten() + expected_slice = np.array([0.1442, 0.1201, 0.1598, 0.1281, 0.1412, 0.1502, 0.1455, 0.1544, 0.1231]) + assert np.abs(image_slice - expected_slice).max() < 1e-3 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/latent_diffusion/__init__.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/latent_diffusion/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/latent_diffusion/test_latent_diffusion.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/latent_diffusion/test_latent_diffusion.py new file mode 100644 index 0000000000000000000000000000000000000000..21c5bcf5a5b91853e4c64e51ef2d545eda4cb2fc --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/latent_diffusion/test_latent_diffusion.py @@ -0,0 +1,218 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import gc +import unittest + +import numpy as np +import torch +from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer + +from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNet2DConditionModel + +from ...testing_utils import ( + backend_empty_cache, + enable_full_determinism, + load_numpy, + nightly, + require_torch_accelerator, + torch_device, +) +from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS +from ..test_pipelines_common import PipelineTesterMixin + + +enable_full_determinism() + + +class LDMTextToImagePipelineFastTests(PipelineTesterMixin, unittest.TestCase): + pipeline_class = LDMTextToImagePipeline + params = TEXT_TO_IMAGE_PARAMS - { + "negative_prompt", + "negative_prompt_embeds", + "cross_attention_kwargs", + "prompt_embeds", + } + required_optional_params = PipelineTesterMixin.required_optional_params - { + "num_images_per_prompt", + "callback", + "callback_steps", + } + batch_params = TEXT_TO_IMAGE_BATCH_PARAMS + + def get_dummy_components(self): + torch.manual_seed(0) + unet = UNet2DConditionModel( + block_out_channels=(32, 64), + layers_per_block=2, + sample_size=32, + in_channels=4, + out_channels=4, + down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), + up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), + cross_attention_dim=32, + ) + scheduler = DDIMScheduler( + beta_start=0.00085, + beta_end=0.012, + beta_schedule="scaled_linear", + clip_sample=False, + set_alpha_to_one=False, + ) + torch.manual_seed(0) + vae = AutoencoderKL( + block_out_channels=(32, 64), + in_channels=3, + out_channels=3, + down_block_types=("DownEncoderBlock2D", "DownEncoderBlock2D"), + up_block_types=("UpDecoderBlock2D", "UpDecoderBlock2D"), + latent_channels=4, + ) + torch.manual_seed(0) + text_encoder_config = CLIPTextConfig( + bos_token_id=0, + eos_token_id=2, + hidden_size=32, + intermediate_size=37, + layer_norm_eps=1e-05, + num_attention_heads=4, + num_hidden_layers=5, + pad_token_id=1, + vocab_size=1000, + ) + text_encoder = CLIPTextModel(text_encoder_config) + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + + components = { + "unet": unet, + "scheduler": scheduler, + "vqvae": vae, + "bert": text_encoder, + "tokenizer": tokenizer, + } + return components + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + inputs = { + "prompt": "A painting of a squirrel eating a burger", + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 6.0, + "output_type": "np", + } + return inputs + + def test_inference_text2img(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + + components = self.get_dummy_components() + pipe = LDMTextToImagePipeline(**components) + pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + image = pipe(**inputs).images + image_slice = image[0, -3:, -3:, -1] + + assert image.shape == (1, 16, 16, 3) + expected_slice = np.array([0.6101, 0.6156, 0.5622, 0.4895, 0.6661, 0.3804, 0.5748, 0.6136, 0.5014]) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 + + +@nightly +@require_torch_accelerator +class LDMTextToImagePipelineSlowTests(unittest.TestCase): + def setUp(self): + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def get_inputs(self, device, dtype=torch.float32, seed=0): + generator = torch.manual_seed(seed) + latents = np.random.RandomState(seed).standard_normal((1, 4, 32, 32)) + latents = torch.from_numpy(latents).to(device=device, dtype=dtype) + inputs = { + "prompt": "A painting of a squirrel eating a burger", + "latents": latents, + "generator": generator, + "num_inference_steps": 3, + "guidance_scale": 6.0, + "output_type": "np", + } + return inputs + + def test_ldm_default_ddim(self): + pipe = LDMTextToImagePipeline.from_pretrained("CompVis/ldm-text2im-large-256").to(torch_device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_inputs(torch_device) + image = pipe(**inputs).images + image_slice = image[0, -3:, -3:, -1].flatten() + + assert image.shape == (1, 256, 256, 3) + expected_slice = np.array([0.51825, 0.52850, 0.52543, 0.54258, 0.52304, 0.52569, 0.54363, 0.55276, 0.56878]) + max_diff = np.abs(expected_slice - image_slice).max() + assert max_diff < 1e-3 + + +@nightly +@require_torch_accelerator +class LDMTextToImagePipelineNightlyTests(unittest.TestCase): + def setUp(self): + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def get_inputs(self, device, dtype=torch.float32, seed=0): + generator = torch.manual_seed(seed) + latents = np.random.RandomState(seed).standard_normal((1, 4, 32, 32)) + latents = torch.from_numpy(latents).to(device=device, dtype=dtype) + inputs = { + "prompt": "A painting of a squirrel eating a burger", + "latents": latents, + "generator": generator, + "num_inference_steps": 50, + "guidance_scale": 6.0, + "output_type": "np", + } + return inputs + + def test_ldm_default_ddim(self): + pipe = LDMTextToImagePipeline.from_pretrained("CompVis/ldm-text2im-large-256").to(torch_device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_inputs(torch_device) + image = pipe(**inputs).images[0] + + expected_image = load_numpy( + "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy" + ) + max_diff = np.abs(expected_image - image).max() + assert max_diff < 1e-3 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/latent_diffusion/test_latent_diffusion_superresolution.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/latent_diffusion/test_latent_diffusion_superresolution.py new file mode 100644 index 0000000000000000000000000000000000000000..b2cbdb9f5b45bda172309e36132308598d75161f --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/latent_diffusion/test_latent_diffusion_superresolution.py @@ -0,0 +1,140 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import random +import unittest + +import numpy as np +import torch + +from diffusers import DDIMScheduler, LDMSuperResolutionPipeline, UNet2DModel, VQModel +from diffusers.utils import PIL_INTERPOLATION + +from ...testing_utils import ( + enable_full_determinism, + floats_tensor, + load_image, + nightly, + require_accelerator, + require_torch, + torch_device, +) + + +enable_full_determinism() + + +class LDMSuperResolutionPipelineFastTests(unittest.TestCase): + @property + def dummy_image(self): + batch_size = 1 + num_channels = 3 + sizes = (32, 32) + + image = floats_tensor((batch_size, num_channels) + sizes, rng=random.Random(0)).to(torch_device) + return image + + @property + def dummy_uncond_unet(self): + torch.manual_seed(0) + model = UNet2DModel( + block_out_channels=(32, 64), + layers_per_block=2, + sample_size=32, + in_channels=6, + out_channels=3, + down_block_types=("DownBlock2D", "AttnDownBlock2D"), + up_block_types=("AttnUpBlock2D", "UpBlock2D"), + ) + return model + + @property + def dummy_vq_model(self): + torch.manual_seed(0) + model = VQModel( + block_out_channels=[32, 64], + in_channels=3, + out_channels=3, + down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], + up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], + latent_channels=3, + ) + return model + + def test_inference_superresolution(self): + device = "cpu" + unet = self.dummy_uncond_unet + scheduler = DDIMScheduler() + vqvae = self.dummy_vq_model + + ldm = LDMSuperResolutionPipeline(unet=unet, vqvae=vqvae, scheduler=scheduler) + ldm.to(device) + ldm.set_progress_bar_config(disable=None) + + init_image = self.dummy_image.to(device) + + generator = torch.Generator(device=device).manual_seed(0) + image = ldm(image=init_image, generator=generator, num_inference_steps=2, output_type="np").images + + image_slice = image[0, -3:, -3:, -1] + + assert image.shape == (1, 64, 64, 3) + expected_slice = np.array([0.8678, 0.8245, 0.6381, 0.6830, 0.4385, 0.5599, 0.4641, 0.6201, 0.5150]) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 + + @require_accelerator + def test_inference_superresolution_fp16(self): + unet = self.dummy_uncond_unet + scheduler = DDIMScheduler() + vqvae = self.dummy_vq_model + + # put models in fp16 + unet = unet.half() + vqvae = vqvae.half() + + ldm = LDMSuperResolutionPipeline(unet=unet, vqvae=vqvae, scheduler=scheduler) + ldm.to(torch_device) + ldm.set_progress_bar_config(disable=None) + + init_image = self.dummy_image.to(torch_device) + + image = ldm(init_image, num_inference_steps=2, output_type="np").images + + assert image.shape == (1, 64, 64, 3) + + +@nightly +@require_torch +class LDMSuperResolutionPipelineIntegrationTests(unittest.TestCase): + def test_inference_superresolution(self): + init_image = load_image( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" + "/vq_diffusion/teddy_bear_pool.png" + ) + init_image = init_image.resize((64, 64), resample=PIL_INTERPOLATION["lanczos"]) + + ldm = LDMSuperResolutionPipeline.from_pretrained("duongna/ldm-super-resolution") + ldm.set_progress_bar_config(disable=None) + + generator = torch.manual_seed(0) + image = ldm(image=init_image, generator=generator, num_inference_steps=20, output_type="np").images + + image_slice = image[0, -3:, -3:, -1] + + assert image.shape == (1, 256, 256, 3) + expected_slice = np.array([0.7644, 0.7679, 0.7642, 0.7633, 0.7666, 0.7560, 0.7425, 0.7257, 0.6907]) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/latte/__init__.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/latte/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/latte/test_latte.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/latte/test_latte.py new file mode 100644 index 0000000000000000000000000000000000000000..a40d4bf8eede4e98f5ae162e4d26282cf1f6538e --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/latte/test_latte.py @@ -0,0 +1,341 @@ +# coding=utf-8 +# Copyright 2025 Latte Team and HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import gc +import inspect +import tempfile +import unittest + +import numpy as np +import torch +from transformers import AutoTokenizer, T5EncoderModel + +from diffusers import ( + AutoencoderKL, + DDIMScheduler, + FasterCacheConfig, + LattePipeline, + LatteTransformer3DModel, + PyramidAttentionBroadcastConfig, +) +from diffusers.utils.import_utils import is_xformers_available + +from ...testing_utils import ( + backend_empty_cache, + enable_full_determinism, + numpy_cosine_similarity_distance, + require_torch_accelerator, + slow, + torch_device, +) +from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS +from ..test_pipelines_common import ( + FasterCacheTesterMixin, + PipelineTesterMixin, + PyramidAttentionBroadcastTesterMixin, + to_np, +) + + +enable_full_determinism() + + +class LattePipelineFastTests( + PipelineTesterMixin, PyramidAttentionBroadcastTesterMixin, FasterCacheTesterMixin, unittest.TestCase +): + pipeline_class = LattePipeline + params = TEXT_TO_IMAGE_PARAMS - {"cross_attention_kwargs"} + batch_params = TEXT_TO_IMAGE_BATCH_PARAMS + image_params = TEXT_TO_IMAGE_IMAGE_PARAMS + image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS + + required_optional_params = PipelineTesterMixin.required_optional_params + test_layerwise_casting = True + test_group_offloading = True + + pab_config = PyramidAttentionBroadcastConfig( + spatial_attention_block_skip_range=2, + temporal_attention_block_skip_range=2, + cross_attention_block_skip_range=2, + spatial_attention_timestep_skip_range=(100, 700), + temporal_attention_timestep_skip_range=(100, 800), + cross_attention_timestep_skip_range=(100, 800), + spatial_attention_block_identifiers=["transformer_blocks"], + temporal_attention_block_identifiers=["temporal_transformer_blocks"], + cross_attention_block_identifiers=["transformer_blocks"], + ) + + faster_cache_config = FasterCacheConfig( + spatial_attention_block_skip_range=2, + temporal_attention_block_skip_range=2, + spatial_attention_timestep_skip_range=(-1, 901), + temporal_attention_timestep_skip_range=(-1, 901), + unconditional_batch_skip_range=2, + attention_weight_callback=lambda _: 0.5, + ) + + def get_dummy_components(self, num_layers: int = 1): + torch.manual_seed(0) + transformer = LatteTransformer3DModel( + sample_size=8, + num_layers=num_layers, + patch_size=2, + attention_head_dim=8, + num_attention_heads=3, + caption_channels=32, + in_channels=4, + cross_attention_dim=24, + out_channels=8, + attention_bias=True, + activation_fn="gelu-approximate", + num_embeds_ada_norm=1000, + norm_type="ada_norm_single", + norm_elementwise_affine=False, + norm_eps=1e-6, + ) + torch.manual_seed(0) + vae = AutoencoderKL() + + scheduler = DDIMScheduler() + text_encoder = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5") + + tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") + + components = { + "transformer": transformer.eval(), + "vae": vae.eval(), + "scheduler": scheduler, + "text_encoder": text_encoder.eval(), + "tokenizer": tokenizer, + } + return components + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + inputs = { + "prompt": "A painting of a squirrel eating a burger", + "negative_prompt": "low quality", + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 5.0, + "height": 8, + "width": 8, + "video_length": 1, + "output_type": "pt", + "clean_caption": False, + } + return inputs + + def test_inference(self): + device = "cpu" + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + video = pipe(**inputs).frames + generated_video = video[0] + + self.assertEqual(generated_video.shape, (1, 3, 8, 8)) + expected_video = torch.randn(1, 3, 8, 8) + max_diff = np.abs(generated_video - expected_video).max() + self.assertLessEqual(max_diff, 1e10) + + def test_callback_inputs(self): + sig = inspect.signature(self.pipeline_class.__call__) + has_callback_tensor_inputs = "callback_on_step_end_tensor_inputs" in sig.parameters + has_callback_step_end = "callback_on_step_end" in sig.parameters + + if not (has_callback_tensor_inputs and has_callback_step_end): + return + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + self.assertTrue( + hasattr(pipe, "_callback_tensor_inputs"), + f" {self.pipeline_class} should have `_callback_tensor_inputs` that defines a list of tensor variables its callback function can use as inputs", + ) + + def callback_inputs_subset(pipe, i, t, callback_kwargs): + # iterate over callback args + for tensor_name, tensor_value in callback_kwargs.items(): + # check that we're only passing in allowed tensor inputs + assert tensor_name in pipe._callback_tensor_inputs + + return callback_kwargs + + def callback_inputs_all(pipe, i, t, callback_kwargs): + for tensor_name in pipe._callback_tensor_inputs: + assert tensor_name in callback_kwargs + + # iterate over callback args + for tensor_name, tensor_value in callback_kwargs.items(): + # check that we're only passing in allowed tensor inputs + assert tensor_name in pipe._callback_tensor_inputs + + return callback_kwargs + + inputs = self.get_dummy_inputs(torch_device) + + # Test passing in a subset + inputs["callback_on_step_end"] = callback_inputs_subset + inputs["callback_on_step_end_tensor_inputs"] = ["latents"] + output = pipe(**inputs)[0] + + # Test passing in a everything + inputs["callback_on_step_end"] = callback_inputs_all + inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs + output = pipe(**inputs)[0] + + def callback_inputs_change_tensor(pipe, i, t, callback_kwargs): + is_last = i == (pipe.num_timesteps - 1) + if is_last: + callback_kwargs["latents"] = torch.zeros_like(callback_kwargs["latents"]) + return callback_kwargs + + inputs["callback_on_step_end"] = callback_inputs_change_tensor + inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs + output = pipe(**inputs)[0] + assert output.abs().sum() < 1e10 + + def test_inference_batch_single_identical(self): + self._test_inference_batch_single_identical(batch_size=3, expected_max_diff=1e-3) + + @unittest.skip("Not supported.") + def test_attention_slicing_forward_pass(self): + pass + + @unittest.skipIf( + torch_device != "cuda" or not is_xformers_available(), + reason="XFormers attention is only available with CUDA and `xformers` installed", + ) + def test_xformers_attention_forwardGenerator_pass(self): + super()._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=False) + + @unittest.skip("Test not supported because `encode_prompt()` has multiple returns.") + def test_encode_prompt_works_in_isolation(self): + pass + + def test_save_load_optional_components(self): + if not hasattr(self.pipeline_class, "_optional_components"): + return + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + + for component in pipe.components.values(): + if hasattr(component, "set_default_attn_processor"): + component.set_default_attn_processor() + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(torch_device) + + prompt = inputs["prompt"] + generator = inputs["generator"] + + ( + prompt_embeds, + negative_prompt_embeds, + ) = pipe.encode_prompt(prompt) + + # inputs with prompt converted to embeddings + inputs = { + "prompt_embeds": prompt_embeds, + "negative_prompt": None, + "negative_prompt_embeds": negative_prompt_embeds, + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 5.0, + "height": 8, + "width": 8, + "video_length": 1, + "mask_feature": False, + "output_type": "pt", + "clean_caption": False, + } + + # set all optional components to None + for optional_component in pipe._optional_components: + setattr(pipe, optional_component, None) + + output = pipe(**inputs)[0] + + with tempfile.TemporaryDirectory() as tmpdir: + pipe.save_pretrained(tmpdir, safe_serialization=False) + pipe_loaded = self.pipeline_class.from_pretrained(tmpdir) + pipe_loaded.to(torch_device) + + for component in pipe_loaded.components.values(): + if hasattr(component, "set_default_attn_processor"): + component.set_default_attn_processor() + + pipe_loaded.set_progress_bar_config(disable=None) + + for optional_component in pipe._optional_components: + self.assertTrue( + getattr(pipe_loaded, optional_component) is None, + f"`{optional_component}` did not stay set to None after loading.", + ) + + output_loaded = pipe_loaded(**inputs)[0] + + max_diff = np.abs(to_np(output) - to_np(output_loaded)).max() + self.assertLess(max_diff, 1.0) + + +@slow +@require_torch_accelerator +class LattePipelineIntegrationTests(unittest.TestCase): + prompt = "A painting of a squirrel eating a burger." + + def setUp(self): + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def test_latte(self): + generator = torch.Generator("cpu").manual_seed(0) + + pipe = LattePipeline.from_pretrained("maxin-cn/Latte-1", torch_dtype=torch.float16) + pipe.enable_model_cpu_offload(device=torch_device) + prompt = self.prompt + + videos = pipe( + prompt=prompt, + height=512, + width=512, + generator=generator, + num_inference_steps=2, + clean_caption=False, + ).frames + + video = videos[0] + expected_video = torch.randn(1, 512, 512, 3).numpy() + + max_diff = numpy_cosine_similarity_distance(video.flatten(), expected_video) + assert max_diff < 1e-3, f"Max diff is too high. got {video.flatten()}" diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/ledits_pp/__init__.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/ledits_pp/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/ledits_pp/test_ledits_pp_stable_diffusion.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/ledits_pp/test_ledits_pp_stable_diffusion.py new file mode 100644 index 0000000000000000000000000000000000000000..6db20a464f19517f781ef99204889ab2d27dc964 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/ledits_pp/test_ledits_pp_stable_diffusion.py @@ -0,0 +1,280 @@ +# coding=utf-8 +# Copyright 2023 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import gc +import random +import unittest + +import numpy as np +import torch +from PIL import Image +from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer + +from diffusers import ( + AutoencoderKL, + DPMSolverMultistepScheduler, + LEditsPPPipelineStableDiffusion, + UNet2DConditionModel, +) + +from ...testing_utils import ( + Expectations, + backend_empty_cache, + enable_full_determinism, + floats_tensor, + load_image, + require_torch_accelerator, + skip_mps, + slow, + torch_device, +) + + +enable_full_determinism() + + +@skip_mps +class LEditsPPPipelineStableDiffusionFastTests(unittest.TestCase): + pipeline_class = LEditsPPPipelineStableDiffusion + + def get_dummy_components(self): + torch.manual_seed(0) + unet = UNet2DConditionModel( + block_out_channels=(32, 64, 64), + layers_per_block=2, + sample_size=32, + in_channels=4, + out_channels=4, + down_block_types=("DownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D"), + up_block_types=("CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "UpBlock2D"), + cross_attention_dim=32, + ) + scheduler = DPMSolverMultistepScheduler(algorithm_type="sde-dpmsolver++", solver_order=2) + torch.manual_seed(0) + vae = AutoencoderKL( + block_out_channels=[32, 64], + in_channels=3, + out_channels=3, + down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], + up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], + latent_channels=4, + ) + torch.manual_seed(0) + text_encoder_config = CLIPTextConfig( + bos_token_id=0, + eos_token_id=2, + hidden_size=32, + intermediate_size=37, + layer_norm_eps=1e-05, + num_attention_heads=4, + num_hidden_layers=5, + pad_token_id=1, + vocab_size=1000, + ) + text_encoder = CLIPTextModel(text_encoder_config) + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + + components = { + "unet": unet, + "scheduler": scheduler, + "vae": vae, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + "safety_checker": None, + "feature_extractor": None, + } + return components + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + inputs = { + "generator": generator, + "editing_prompt": ["wearing glasses", "sunshine"], + "reverse_editing_direction": [False, True], + "edit_guidance_scale": [10.0, 5.0], + } + return inputs + + def get_dummy_inversion_inputs(self, device, seed=0): + images = floats_tensor((2, 3, 32, 32), rng=random.Random(0)).cpu().permute(0, 2, 3, 1) + images = 255 * images + image_1 = Image.fromarray(np.uint8(images[0])).convert("RGB") + image_2 = Image.fromarray(np.uint8(images[1])).convert("RGB") + + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + + inputs = { + "image": [image_1, image_2], + "source_prompt": "", + "source_guidance_scale": 3.5, + "num_inversion_steps": 20, + "skip": 0.15, + "generator": generator, + } + return inputs + + def test_ledits_pp_inversion(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + sd_pipe = LEditsPPPipelineStableDiffusion(**components) + sd_pipe = sd_pipe.to(torch_device) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inversion_inputs(device) + inputs["image"] = inputs["image"][0] + sd_pipe.invert(**inputs) + assert sd_pipe.init_latents.shape == ( + 1, + 4, + int(32 / sd_pipe.vae_scale_factor), + int(32 / sd_pipe.vae_scale_factor), + ) + + latent_slice = sd_pipe.init_latents[0, -1, -3:, -3:].to(device) + + expected_slice = np.array([-0.9084, -0.0367, 0.2940, 0.0839, 0.6890, 0.2651, -0.7104, 2.1090, -0.7822]) + assert np.abs(latent_slice.flatten() - expected_slice).max() < 1e-3 + + def test_ledits_pp_inversion_batch(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + sd_pipe = LEditsPPPipelineStableDiffusion(**components) + sd_pipe = sd_pipe.to(torch_device) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inversion_inputs(device) + sd_pipe.invert(**inputs) + assert sd_pipe.init_latents.shape == ( + 2, + 4, + int(32 / sd_pipe.vae_scale_factor), + int(32 / sd_pipe.vae_scale_factor), + ) + + latent_slice = sd_pipe.init_latents[0, -1, -3:, -3:].to(device) + + expected_slice = np.array([0.2528, 0.1458, -0.2166, 0.4565, -0.5657, -1.0286, -0.9961, 0.5933, 1.1173]) + assert np.abs(latent_slice.flatten() - expected_slice).max() < 1e-3 + + latent_slice = sd_pipe.init_latents[1, -1, -3:, -3:].to(device) + + expected_slice = np.array([-0.0796, 2.0583, 0.5501, 0.5358, 0.0282, -0.2803, -1.0470, 0.7023, -0.0072]) + assert np.abs(latent_slice.flatten() - expected_slice).max() < 1e-3 + + def test_ledits_pp_warmup_steps(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + pipe = LEditsPPPipelineStableDiffusion(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + inversion_inputs = self.get_dummy_inversion_inputs(device) + pipe.invert(**inversion_inputs) + + inputs = self.get_dummy_inputs(device) + + inputs["edit_warmup_steps"] = [0, 5] + pipe(**inputs).images + + inputs["edit_warmup_steps"] = [5, 0] + pipe(**inputs).images + + inputs["edit_warmup_steps"] = [5, 10] + pipe(**inputs).images + + inputs["edit_warmup_steps"] = [10, 5] + pipe(**inputs).images + + +@slow +@require_torch_accelerator +class LEditsPPPipelineStableDiffusionSlowTests(unittest.TestCase): + def setUp(self): + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + @classmethod + def setUpClass(cls): + raw_image = load_image( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/pix2pix/cat_6.png" + ) + raw_image = raw_image.convert("RGB").resize((512, 512)) + cls.raw_image = raw_image + + def test_ledits_pp_editing(self): + pipe = LEditsPPPipelineStableDiffusion.from_pretrained( + "stable-diffusion-v1-5/stable-diffusion-v1-5", safety_checker=None, torch_dtype=torch.float16 + ) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + generator = torch.manual_seed(0) + _ = pipe.invert(image=self.raw_image, generator=generator) + generator = torch.manual_seed(0) + inputs = { + "generator": generator, + "editing_prompt": ["cat", "dog"], + "reverse_editing_direction": [True, False], + "edit_guidance_scale": [5.0, 5.0], + "edit_threshold": [0.8, 0.8], + } + reconstruction = pipe(**inputs, output_type="np").images[0] + + output_slice = reconstruction[150:153, 140:143, -1] + output_slice = output_slice.flatten() + expected_slices = Expectations( + { + ("xpu", 3): np.array( + [ + 0.9511719, + 0.94140625, + 0.87597656, + 0.9472656, + 0.9296875, + 0.8378906, + 0.94433594, + 0.91503906, + 0.8491211, + ] + ), + ("cuda", 7): np.array( + [ + 0.9453125, + 0.93310547, + 0.84521484, + 0.94628906, + 0.9111328, + 0.80859375, + 0.93847656, + 0.9042969, + 0.8144531, + ] + ), + } + ) + expected_slice = expected_slices.get_expectation() + assert np.abs(output_slice - expected_slice).max() < 1e-2 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/ledits_pp/test_ledits_pp_stable_diffusion_xl.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/ledits_pp/test_ledits_pp_stable_diffusion_xl.py new file mode 100644 index 0000000000000000000000000000000000000000..06c1ceb0cf5a224102ea216c3a37421e6a152d8e --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/ledits_pp/test_ledits_pp_stable_diffusion_xl.py @@ -0,0 +1,289 @@ +# coding=utf-8 +# Copyright 2023 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import random +import unittest + +import numpy as np +import torch +from PIL import Image +from transformers import ( + CLIPImageProcessor, + CLIPTextConfig, + CLIPTextModel, + CLIPTextModelWithProjection, + CLIPTokenizer, + CLIPVisionConfig, + CLIPVisionModelWithProjection, +) + +from diffusers import ( + AutoencoderKL, + DPMSolverMultistepScheduler, + LEditsPPPipelineStableDiffusionXL, + UNet2DConditionModel, +) + +# from diffusers.image_processor import VaeImageProcessor +from ...testing_utils import ( + enable_full_determinism, + floats_tensor, + load_image, + require_torch_accelerator, + skip_mps, + slow, + torch_device, +) + + +enable_full_determinism() + + +@skip_mps +class LEditsPPPipelineStableDiffusionXLFastTests(unittest.TestCase): + pipeline_class = LEditsPPPipelineStableDiffusionXL + + def get_dummy_components(self, skip_first_text_encoder=False, time_cond_proj_dim=None): + torch.manual_seed(0) + unet = UNet2DConditionModel( + block_out_channels=(32, 64), + layers_per_block=2, + sample_size=32, + in_channels=4, + out_channels=4, + time_cond_proj_dim=time_cond_proj_dim, + down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), + up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), + # SD2-specific config below + attention_head_dim=(2, 4), + use_linear_projection=True, + addition_embed_type="text_time", + addition_time_embed_dim=8, + transformer_layers_per_block=(1, 2), + projection_class_embeddings_input_dim=80, # 6 * 8 + 32 + cross_attention_dim=64 if not skip_first_text_encoder else 32, + ) + scheduler = DPMSolverMultistepScheduler(algorithm_type="sde-dpmsolver++", solver_order=2) + torch.manual_seed(0) + vae = AutoencoderKL( + block_out_channels=[32, 64], + in_channels=3, + out_channels=3, + down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], + up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], + latent_channels=4, + sample_size=128, + ) + torch.manual_seed(0) + image_encoder_config = CLIPVisionConfig( + hidden_size=32, + image_size=224, + projection_dim=32, + intermediate_size=37, + num_attention_heads=4, + num_channels=3, + num_hidden_layers=5, + patch_size=14, + ) + image_encoder = CLIPVisionModelWithProjection(image_encoder_config) + + feature_extractor = CLIPImageProcessor( + crop_size=224, + do_center_crop=True, + do_normalize=True, + do_resize=True, + image_mean=[0.48145466, 0.4578275, 0.40821073], + image_std=[0.26862954, 0.26130258, 0.27577711], + resample=3, + size=224, + ) + + torch.manual_seed(0) + text_encoder_config = CLIPTextConfig( + bos_token_id=0, + eos_token_id=2, + hidden_size=32, + intermediate_size=37, + layer_norm_eps=1e-05, + num_attention_heads=4, + num_hidden_layers=5, + pad_token_id=1, + vocab_size=1000, + # SD2-specific config below + hidden_act="gelu", + projection_dim=32, + ) + text_encoder = CLIPTextModel(text_encoder_config) + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + + text_encoder_2 = CLIPTextModelWithProjection(text_encoder_config) + tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + + components = { + "unet": unet, + "scheduler": scheduler, + "vae": vae, + "text_encoder": text_encoder if not skip_first_text_encoder else None, + "tokenizer": tokenizer if not skip_first_text_encoder else None, + "text_encoder_2": text_encoder_2, + "tokenizer_2": tokenizer_2, + "image_encoder": image_encoder, + "feature_extractor": feature_extractor, + } + return components + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + inputs = { + "generator": generator, + "editing_prompt": ["wearing glasses", "sunshine"], + "reverse_editing_direction": [False, True], + "edit_guidance_scale": [10.0, 5.0], + } + return inputs + + def get_dummy_inversion_inputs(self, device, seed=0): + images = floats_tensor((2, 3, 32, 32), rng=random.Random(0)).cpu().permute(0, 2, 3, 1) + images = 255 * images + image_1 = Image.fromarray(np.uint8(images[0])).convert("RGB") + image_2 = Image.fromarray(np.uint8(images[1])).convert("RGB") + + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + + inputs = { + "image": [image_1, image_2], + "source_prompt": "", + "source_guidance_scale": 3.5, + "num_inversion_steps": 20, + "skip": 0.15, + "generator": generator, + } + return inputs + + def test_ledits_pp_inversion(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + sd_pipe = LEditsPPPipelineStableDiffusionXL(**components) + sd_pipe = sd_pipe.to(torch_device) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inversion_inputs(device) + inputs["image"] = inputs["image"][0] + sd_pipe.invert(**inputs) + assert sd_pipe.init_latents.shape == ( + 1, + 4, + int(32 / sd_pipe.vae_scale_factor), + int(32 / sd_pipe.vae_scale_factor), + ) + + latent_slice = sd_pipe.init_latents[0, -1, -3:, -3:].to(device) + expected_slice = np.array([-0.9084, -0.0367, 0.2940, 0.0839, 0.6890, 0.2651, -0.7103, 2.1090, -0.7821]) + assert np.abs(latent_slice.flatten() - expected_slice).max() < 1e-3 + + def test_ledits_pp_inversion_batch(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + sd_pipe = LEditsPPPipelineStableDiffusionXL(**components) + sd_pipe = sd_pipe.to(torch_device) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inversion_inputs(device) + sd_pipe.invert(**inputs) + assert sd_pipe.init_latents.shape == ( + 2, + 4, + int(32 / sd_pipe.vae_scale_factor), + int(32 / sd_pipe.vae_scale_factor), + ) + + latent_slice = sd_pipe.init_latents[0, -1, -3:, -3:].to(device) + + expected_slice = np.array([0.2528, 0.1458, -0.2166, 0.4565, -0.5656, -1.0286, -0.9961, 0.5933, 1.1172]) + assert np.abs(latent_slice.flatten() - expected_slice).max() < 1e-3 + + latent_slice = sd_pipe.init_latents[1, -1, -3:, -3:].to(device) + + expected_slice = np.array([-0.0796, 2.0583, 0.5500, 0.5358, 0.0282, -0.2803, -1.0470, 0.7024, -0.0072]) + + assert np.abs(latent_slice.flatten() - expected_slice).max() < 1e-3 + + def test_ledits_pp_warmup_steps(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + pipe = LEditsPPPipelineStableDiffusionXL(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + inversion_inputs = self.get_dummy_inversion_inputs(device) + inversion_inputs["image"] = inversion_inputs["image"][0] + pipe.invert(**inversion_inputs) + + inputs = self.get_dummy_inputs(device) + + inputs["edit_warmup_steps"] = [0, 5] + pipe(**inputs).images + + inputs["edit_warmup_steps"] = [5, 0] + pipe(**inputs).images + + inputs["edit_warmup_steps"] = [5, 10] + pipe(**inputs).images + + inputs["edit_warmup_steps"] = [10, 5] + pipe(**inputs).images + + +@slow +@require_torch_accelerator +class LEditsPPPipelineStableDiffusionXLSlowTests(unittest.TestCase): + @classmethod + def setUpClass(cls): + raw_image = load_image( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/pix2pix/cat_6.png" + ) + raw_image = raw_image.convert("RGB").resize((512, 512)) + cls.raw_image = raw_image + + def test_ledits_pp_edit(self): + pipe = LEditsPPPipelineStableDiffusionXL.from_pretrained( + "stabilityai/stable-diffusion-xl-base-1.0", safety_checker=None, add_watermarker=None + ) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + generator = torch.manual_seed(0) + _ = pipe.invert(image=self.raw_image, generator=generator, num_zero_noise_steps=0) + inputs = { + "generator": generator, + "editing_prompt": ["cat", "dog"], + "reverse_editing_direction": [True, False], + "edit_guidance_scale": [2.0, 4.0], + "edit_threshold": [0.8, 0.8], + } + reconstruction = pipe(**inputs, output_type="np").images[0] + + output_slice = reconstruction[150:153, 140:143, -1] + output_slice = output_slice.flatten() + expected_slice = np.array( + [0.56419, 0.44121838, 0.2765603, 0.5708484, 0.42763475, 0.30945742, 0.5387106, 0.4735807, 0.3547244] + ) + assert np.abs(output_slice - expected_slice).max() < 1e-3 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/ltx/__init__.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/ltx/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/ltx/test_ltx.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/ltx/test_ltx.py new file mode 100644 index 0000000000000000000000000000000000000000..aaf4161b51fb46d6b8bd00e82a2cf104ffef2db6 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/ltx/test_ltx.py @@ -0,0 +1,267 @@ +# Copyright 2025 The HuggingFace Team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +import unittest + +import numpy as np +import torch +from transformers import AutoTokenizer, T5EncoderModel + +from diffusers import AutoencoderKLLTXVideo, FlowMatchEulerDiscreteScheduler, LTXPipeline, LTXVideoTransformer3DModel + +from ...testing_utils import enable_full_determinism, torch_device +from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS +from ..test_pipelines_common import FirstBlockCacheTesterMixin, PipelineTesterMixin, to_np + + +enable_full_determinism() + + +class LTXPipelineFastTests(PipelineTesterMixin, FirstBlockCacheTesterMixin, unittest.TestCase): + pipeline_class = LTXPipeline + params = TEXT_TO_IMAGE_PARAMS - {"cross_attention_kwargs"} + batch_params = TEXT_TO_IMAGE_BATCH_PARAMS + image_params = TEXT_TO_IMAGE_IMAGE_PARAMS + image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS + required_optional_params = frozenset( + [ + "num_inference_steps", + "generator", + "latents", + "return_dict", + "callback_on_step_end", + "callback_on_step_end_tensor_inputs", + ] + ) + test_xformers_attention = False + test_layerwise_casting = True + test_group_offloading = True + + def get_dummy_components(self, num_layers: int = 1): + torch.manual_seed(0) + transformer = LTXVideoTransformer3DModel( + in_channels=8, + out_channels=8, + patch_size=1, + patch_size_t=1, + num_attention_heads=4, + attention_head_dim=8, + cross_attention_dim=32, + num_layers=num_layers, + caption_channels=32, + ) + + torch.manual_seed(0) + vae = AutoencoderKLLTXVideo( + in_channels=3, + out_channels=3, + latent_channels=8, + block_out_channels=(8, 8, 8, 8), + decoder_block_out_channels=(8, 8, 8, 8), + layers_per_block=(1, 1, 1, 1, 1), + decoder_layers_per_block=(1, 1, 1, 1, 1), + spatio_temporal_scaling=(True, True, False, False), + decoder_spatio_temporal_scaling=(True, True, False, False), + decoder_inject_noise=(False, False, False, False, False), + upsample_residual=(False, False, False, False), + upsample_factor=(1, 1, 1, 1), + timestep_conditioning=False, + patch_size=1, + patch_size_t=1, + encoder_causal=True, + decoder_causal=False, + ) + vae.use_framewise_encoding = False + vae.use_framewise_decoding = False + + torch.manual_seed(0) + scheduler = FlowMatchEulerDiscreteScheduler() + text_encoder = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5") + tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") + + components = { + "transformer": transformer, + "vae": vae, + "scheduler": scheduler, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + } + return components + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + + inputs = { + "prompt": "dance monkey", + "negative_prompt": "", + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 3.0, + "height": 32, + "width": 32, + # 8 * k + 1 is the recommendation + "num_frames": 9, + "max_sequence_length": 16, + "output_type": "pt", + } + + return inputs + + def test_inference(self): + device = "cpu" + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + video = pipe(**inputs).frames + generated_video = video[0] + + self.assertEqual(generated_video.shape, (9, 3, 32, 32)) + expected_video = torch.randn(9, 3, 32, 32) + max_diff = np.abs(generated_video - expected_video).max() + self.assertLessEqual(max_diff, 1e10) + + def test_callback_inputs(self): + sig = inspect.signature(self.pipeline_class.__call__) + has_callback_tensor_inputs = "callback_on_step_end_tensor_inputs" in sig.parameters + has_callback_step_end = "callback_on_step_end" in sig.parameters + + if not (has_callback_tensor_inputs and has_callback_step_end): + return + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + self.assertTrue( + hasattr(pipe, "_callback_tensor_inputs"), + f" {self.pipeline_class} should have `_callback_tensor_inputs` that defines a list of tensor variables its callback function can use as inputs", + ) + + def callback_inputs_subset(pipe, i, t, callback_kwargs): + # iterate over callback args + for tensor_name, tensor_value in callback_kwargs.items(): + # check that we're only passing in allowed tensor inputs + assert tensor_name in pipe._callback_tensor_inputs + + return callback_kwargs + + def callback_inputs_all(pipe, i, t, callback_kwargs): + for tensor_name in pipe._callback_tensor_inputs: + assert tensor_name in callback_kwargs + + # iterate over callback args + for tensor_name, tensor_value in callback_kwargs.items(): + # check that we're only passing in allowed tensor inputs + assert tensor_name in pipe._callback_tensor_inputs + + return callback_kwargs + + inputs = self.get_dummy_inputs(torch_device) + + # Test passing in a subset + inputs["callback_on_step_end"] = callback_inputs_subset + inputs["callback_on_step_end_tensor_inputs"] = ["latents"] + output = pipe(**inputs)[0] + + # Test passing in a everything + inputs["callback_on_step_end"] = callback_inputs_all + inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs + output = pipe(**inputs)[0] + + def callback_inputs_change_tensor(pipe, i, t, callback_kwargs): + is_last = i == (pipe.num_timesteps - 1) + if is_last: + callback_kwargs["latents"] = torch.zeros_like(callback_kwargs["latents"]) + return callback_kwargs + + inputs["callback_on_step_end"] = callback_inputs_change_tensor + inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs + output = pipe(**inputs)[0] + assert output.abs().sum() < 1e10 + + def test_inference_batch_single_identical(self): + self._test_inference_batch_single_identical(batch_size=3, expected_max_diff=1e-3) + + def test_attention_slicing_forward_pass( + self, test_max_difference=True, test_mean_pixel_difference=True, expected_max_diff=1e-3 + ): + if not self.test_attention_slicing: + return + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + for component in pipe.components.values(): + if hasattr(component, "set_default_attn_processor"): + component.set_default_attn_processor() + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + generator_device = "cpu" + inputs = self.get_dummy_inputs(generator_device) + output_without_slicing = pipe(**inputs)[0] + + pipe.enable_attention_slicing(slice_size=1) + inputs = self.get_dummy_inputs(generator_device) + output_with_slicing1 = pipe(**inputs)[0] + + pipe.enable_attention_slicing(slice_size=2) + inputs = self.get_dummy_inputs(generator_device) + output_with_slicing2 = pipe(**inputs)[0] + + if test_max_difference: + max_diff1 = np.abs(to_np(output_with_slicing1) - to_np(output_without_slicing)).max() + max_diff2 = np.abs(to_np(output_with_slicing2) - to_np(output_without_slicing)).max() + self.assertLess( + max(max_diff1, max_diff2), + expected_max_diff, + "Attention slicing should not affect the inference results", + ) + + def test_vae_tiling(self, expected_diff_max: float = 0.2): + generator_device = "cpu" + components = self.get_dummy_components() + + pipe = self.pipeline_class(**components) + pipe.to("cpu") + pipe.set_progress_bar_config(disable=None) + + # Without tiling + inputs = self.get_dummy_inputs(generator_device) + inputs["height"] = inputs["width"] = 128 + output_without_tiling = pipe(**inputs)[0] + + # With tiling + pipe.vae.enable_tiling( + tile_sample_min_height=96, + tile_sample_min_width=96, + tile_sample_stride_height=64, + tile_sample_stride_width=64, + ) + inputs = self.get_dummy_inputs(generator_device) + inputs["height"] = inputs["width"] = 128 + output_with_tiling = pipe(**inputs)[0] + + self.assertLess( + (to_np(output_without_tiling) - to_np(output_with_tiling)).max(), + expected_diff_max, + "VAE tiling should not affect the inference results", + ) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/ltx/test_ltx_condition.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/ltx/test_ltx_condition.py new file mode 100644 index 0000000000000000000000000000000000000000..f5dfb018620902617d9c527b9620c1fb8a5503e3 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/ltx/test_ltx_condition.py @@ -0,0 +1,284 @@ +# Copyright 2025 The HuggingFace Team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +import unittest + +import numpy as np +import torch +from transformers import AutoTokenizer, T5EncoderModel + +from diffusers import ( + AutoencoderKLLTXVideo, + FlowMatchEulerDiscreteScheduler, + LTXConditionPipeline, + LTXVideoTransformer3DModel, +) +from diffusers.pipelines.ltx.pipeline_ltx_condition import LTXVideoCondition + +from ...testing_utils import enable_full_determinism, torch_device +from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS +from ..test_pipelines_common import PipelineTesterMixin, to_np + + +enable_full_determinism() + + +class LTXConditionPipelineFastTests(PipelineTesterMixin, unittest.TestCase): + pipeline_class = LTXConditionPipeline + params = TEXT_TO_IMAGE_PARAMS - {"cross_attention_kwargs"} + batch_params = TEXT_TO_IMAGE_BATCH_PARAMS.union({"image"}) + image_params = TEXT_TO_IMAGE_IMAGE_PARAMS + image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS + required_optional_params = frozenset( + [ + "num_inference_steps", + "generator", + "latents", + "return_dict", + "callback_on_step_end", + "callback_on_step_end_tensor_inputs", + ] + ) + test_xformers_attention = False + + def get_dummy_components(self): + torch.manual_seed(0) + transformer = LTXVideoTransformer3DModel( + in_channels=8, + out_channels=8, + patch_size=1, + patch_size_t=1, + num_attention_heads=4, + attention_head_dim=8, + cross_attention_dim=32, + num_layers=1, + caption_channels=32, + ) + + torch.manual_seed(0) + vae = AutoencoderKLLTXVideo( + in_channels=3, + out_channels=3, + latent_channels=8, + block_out_channels=(8, 8, 8, 8), + decoder_block_out_channels=(8, 8, 8, 8), + layers_per_block=(1, 1, 1, 1, 1), + decoder_layers_per_block=(1, 1, 1, 1, 1), + spatio_temporal_scaling=(True, True, False, False), + decoder_spatio_temporal_scaling=(True, True, False, False), + decoder_inject_noise=(False, False, False, False, False), + upsample_residual=(False, False, False, False), + upsample_factor=(1, 1, 1, 1), + timestep_conditioning=False, + patch_size=1, + patch_size_t=1, + encoder_causal=True, + decoder_causal=False, + ) + vae.use_framewise_encoding = False + vae.use_framewise_decoding = False + + torch.manual_seed(0) + scheduler = FlowMatchEulerDiscreteScheduler() + text_encoder = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5") + tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") + + components = { + "transformer": transformer, + "vae": vae, + "scheduler": scheduler, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + } + return components + + def get_dummy_inputs(self, device, seed=0, use_conditions=False): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + + image = torch.randn((1, 3, 32, 32), generator=generator, device=device) + if use_conditions: + conditions = LTXVideoCondition( + image=image, + ) + else: + conditions = None + + inputs = { + "conditions": conditions, + "image": None if use_conditions else image, + "prompt": "dance monkey", + "negative_prompt": "", + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 3.0, + "height": 32, + "width": 32, + # 8 * k + 1 is the recommendation + "num_frames": 9, + "max_sequence_length": 16, + "output_type": "pt", + } + + return inputs + + def test_inference(self): + device = "cpu" + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + inputs2 = self.get_dummy_inputs(device, use_conditions=True) + video = pipe(**inputs).frames + generated_video = video[0] + video2 = pipe(**inputs2).frames + generated_video2 = video2[0] + + self.assertEqual(generated_video.shape, (9, 3, 32, 32)) + + max_diff = np.abs(generated_video - generated_video2).max() + self.assertLessEqual(max_diff, 1e-3) + + def test_callback_inputs(self): + sig = inspect.signature(self.pipeline_class.__call__) + has_callback_tensor_inputs = "callback_on_step_end_tensor_inputs" in sig.parameters + has_callback_step_end = "callback_on_step_end" in sig.parameters + + if not (has_callback_tensor_inputs and has_callback_step_end): + return + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + self.assertTrue( + hasattr(pipe, "_callback_tensor_inputs"), + f" {self.pipeline_class} should have `_callback_tensor_inputs` that defines a list of tensor variables its callback function can use as inputs", + ) + + def callback_inputs_subset(pipe, i, t, callback_kwargs): + # iterate over callback args + for tensor_name, tensor_value in callback_kwargs.items(): + # check that we're only passing in allowed tensor inputs + assert tensor_name in pipe._callback_tensor_inputs + + return callback_kwargs + + def callback_inputs_all(pipe, i, t, callback_kwargs): + for tensor_name in pipe._callback_tensor_inputs: + assert tensor_name in callback_kwargs + + # iterate over callback args + for tensor_name, tensor_value in callback_kwargs.items(): + # check that we're only passing in allowed tensor inputs + assert tensor_name in pipe._callback_tensor_inputs + + return callback_kwargs + + inputs = self.get_dummy_inputs(torch_device) + + # Test passing in a subset + inputs["callback_on_step_end"] = callback_inputs_subset + inputs["callback_on_step_end_tensor_inputs"] = ["latents"] + output = pipe(**inputs)[0] + + # Test passing in a everything + inputs["callback_on_step_end"] = callback_inputs_all + inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs + output = pipe(**inputs)[0] + + def callback_inputs_change_tensor(pipe, i, t, callback_kwargs): + is_last = i == (pipe.num_timesteps - 1) + if is_last: + callback_kwargs["latents"] = torch.zeros_like(callback_kwargs["latents"]) + return callback_kwargs + + inputs["callback_on_step_end"] = callback_inputs_change_tensor + inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs + output = pipe(**inputs)[0] + assert output.abs().sum() < 1e10 + + def test_inference_batch_single_identical(self): + self._test_inference_batch_single_identical(batch_size=3, expected_max_diff=1e-3) + + def test_attention_slicing_forward_pass( + self, test_max_difference=True, test_mean_pixel_difference=True, expected_max_diff=1e-3 + ): + if not self.test_attention_slicing: + return + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + for component in pipe.components.values(): + if hasattr(component, "set_default_attn_processor"): + component.set_default_attn_processor() + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + generator_device = "cpu" + inputs = self.get_dummy_inputs(generator_device) + output_without_slicing = pipe(**inputs)[0] + + pipe.enable_attention_slicing(slice_size=1) + inputs = self.get_dummy_inputs(generator_device) + output_with_slicing1 = pipe(**inputs)[0] + + pipe.enable_attention_slicing(slice_size=2) + inputs = self.get_dummy_inputs(generator_device) + output_with_slicing2 = pipe(**inputs)[0] + + if test_max_difference: + max_diff1 = np.abs(to_np(output_with_slicing1) - to_np(output_without_slicing)).max() + max_diff2 = np.abs(to_np(output_with_slicing2) - to_np(output_without_slicing)).max() + self.assertLess( + max(max_diff1, max_diff2), + expected_max_diff, + "Attention slicing should not affect the inference results", + ) + + def test_vae_tiling(self, expected_diff_max: float = 0.2): + generator_device = "cpu" + components = self.get_dummy_components() + + pipe = self.pipeline_class(**components) + pipe.to("cpu") + pipe.set_progress_bar_config(disable=None) + + # Without tiling + inputs = self.get_dummy_inputs(generator_device) + inputs["height"] = inputs["width"] = 128 + output_without_tiling = pipe(**inputs)[0] + + # With tiling + pipe.vae.enable_tiling( + tile_sample_min_height=96, + tile_sample_min_width=96, + tile_sample_stride_height=64, + tile_sample_stride_width=64, + ) + inputs = self.get_dummy_inputs(generator_device) + inputs["height"] = inputs["width"] = 128 + output_with_tiling = pipe(**inputs)[0] + + self.assertLess( + (to_np(output_without_tiling) - to_np(output_with_tiling)).max(), + expected_diff_max, + "VAE tiling should not affect the inference results", + ) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/ltx/test_ltx_image2video.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/ltx/test_ltx_image2video.py new file mode 100644 index 0000000000000000000000000000000000000000..2702993d4a593328df353f2aa1c5892a0f28664e --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/ltx/test_ltx_image2video.py @@ -0,0 +1,273 @@ +# Copyright 2025 The HuggingFace Team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +import unittest + +import numpy as np +import torch +from transformers import AutoTokenizer, T5EncoderModel + +from diffusers import ( + AutoencoderKLLTXVideo, + FlowMatchEulerDiscreteScheduler, + LTXImageToVideoPipeline, + LTXVideoTransformer3DModel, +) + +from ...testing_utils import enable_full_determinism, torch_device +from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS +from ..test_pipelines_common import PipelineTesterMixin, to_np + + +enable_full_determinism() + + +class LTXImageToVideoPipelineFastTests(PipelineTesterMixin, unittest.TestCase): + pipeline_class = LTXImageToVideoPipeline + params = TEXT_TO_IMAGE_PARAMS - {"cross_attention_kwargs"} + batch_params = TEXT_TO_IMAGE_BATCH_PARAMS.union({"image"}) + image_params = TEXT_TO_IMAGE_IMAGE_PARAMS + image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS + required_optional_params = frozenset( + [ + "num_inference_steps", + "generator", + "latents", + "return_dict", + "callback_on_step_end", + "callback_on_step_end_tensor_inputs", + ] + ) + test_xformers_attention = False + + def get_dummy_components(self): + torch.manual_seed(0) + transformer = LTXVideoTransformer3DModel( + in_channels=8, + out_channels=8, + patch_size=1, + patch_size_t=1, + num_attention_heads=4, + attention_head_dim=8, + cross_attention_dim=32, + num_layers=1, + caption_channels=32, + ) + + torch.manual_seed(0) + vae = AutoencoderKLLTXVideo( + in_channels=3, + out_channels=3, + latent_channels=8, + block_out_channels=(8, 8, 8, 8), + decoder_block_out_channels=(8, 8, 8, 8), + layers_per_block=(1, 1, 1, 1, 1), + decoder_layers_per_block=(1, 1, 1, 1, 1), + spatio_temporal_scaling=(True, True, False, False), + decoder_spatio_temporal_scaling=(True, True, False, False), + decoder_inject_noise=(False, False, False, False, False), + upsample_residual=(False, False, False, False), + upsample_factor=(1, 1, 1, 1), + timestep_conditioning=False, + patch_size=1, + patch_size_t=1, + encoder_causal=True, + decoder_causal=False, + ) + vae.use_framewise_encoding = False + vae.use_framewise_decoding = False + + torch.manual_seed(0) + scheduler = FlowMatchEulerDiscreteScheduler() + text_encoder = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5") + tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") + + components = { + "transformer": transformer, + "vae": vae, + "scheduler": scheduler, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + } + return components + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + + image = torch.rand((1, 3, 32, 32), generator=generator, device=device) + + inputs = { + "image": image, + "prompt": "dance monkey", + "negative_prompt": "", + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 3.0, + "height": 32, + "width": 32, + # 8 * k + 1 is the recommendation + "num_frames": 9, + "max_sequence_length": 16, + "output_type": "pt", + } + + return inputs + + def test_inference(self): + device = "cpu" + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + video = pipe(**inputs).frames + generated_video = video[0] + + self.assertEqual(generated_video.shape, (9, 3, 32, 32)) + expected_video = torch.randn(9, 3, 32, 32) + max_diff = torch.amax(torch.abs(generated_video - expected_video)) + self.assertLessEqual(max_diff, 1e10) + + def test_callback_inputs(self): + sig = inspect.signature(self.pipeline_class.__call__) + has_callback_tensor_inputs = "callback_on_step_end_tensor_inputs" in sig.parameters + has_callback_step_end = "callback_on_step_end" in sig.parameters + + if not (has_callback_tensor_inputs and has_callback_step_end): + return + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + self.assertTrue( + hasattr(pipe, "_callback_tensor_inputs"), + f" {self.pipeline_class} should have `_callback_tensor_inputs` that defines a list of tensor variables its callback function can use as inputs", + ) + + def callback_inputs_subset(pipe, i, t, callback_kwargs): + # iterate over callback args + for tensor_name, tensor_value in callback_kwargs.items(): + # check that we're only passing in allowed tensor inputs + assert tensor_name in pipe._callback_tensor_inputs + + return callback_kwargs + + def callback_inputs_all(pipe, i, t, callback_kwargs): + for tensor_name in pipe._callback_tensor_inputs: + assert tensor_name in callback_kwargs + + # iterate over callback args + for tensor_name, tensor_value in callback_kwargs.items(): + # check that we're only passing in allowed tensor inputs + assert tensor_name in pipe._callback_tensor_inputs + + return callback_kwargs + + inputs = self.get_dummy_inputs(torch_device) + + # Test passing in a subset + inputs["callback_on_step_end"] = callback_inputs_subset + inputs["callback_on_step_end_tensor_inputs"] = ["latents"] + output = pipe(**inputs)[0] + + # Test passing in a everything + inputs["callback_on_step_end"] = callback_inputs_all + inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs + output = pipe(**inputs)[0] + + def callback_inputs_change_tensor(pipe, i, t, callback_kwargs): + is_last = i == (pipe.num_timesteps - 1) + if is_last: + callback_kwargs["latents"] = torch.zeros_like(callback_kwargs["latents"]) + return callback_kwargs + + inputs["callback_on_step_end"] = callback_inputs_change_tensor + inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs + output = pipe(**inputs)[0] + assert output.abs().sum() < 1e10 + + def test_inference_batch_single_identical(self): + self._test_inference_batch_single_identical(batch_size=3, expected_max_diff=1e-3) + + def test_attention_slicing_forward_pass( + self, test_max_difference=True, test_mean_pixel_difference=True, expected_max_diff=1e-3 + ): + if not self.test_attention_slicing: + return + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + for component in pipe.components.values(): + if hasattr(component, "set_default_attn_processor"): + component.set_default_attn_processor() + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + generator_device = "cpu" + inputs = self.get_dummy_inputs(generator_device) + output_without_slicing = pipe(**inputs)[0] + + pipe.enable_attention_slicing(slice_size=1) + inputs = self.get_dummy_inputs(generator_device) + output_with_slicing1 = pipe(**inputs)[0] + + pipe.enable_attention_slicing(slice_size=2) + inputs = self.get_dummy_inputs(generator_device) + output_with_slicing2 = pipe(**inputs)[0] + + if test_max_difference: + max_diff1 = np.abs(to_np(output_with_slicing1) - to_np(output_without_slicing)).max() + max_diff2 = np.abs(to_np(output_with_slicing2) - to_np(output_without_slicing)).max() + self.assertLess( + max(max_diff1, max_diff2), + expected_max_diff, + "Attention slicing should not affect the inference results", + ) + + def test_vae_tiling(self, expected_diff_max: float = 0.2): + generator_device = "cpu" + components = self.get_dummy_components() + + pipe = self.pipeline_class(**components) + pipe.to("cpu") + pipe.set_progress_bar_config(disable=None) + + # Without tiling + inputs = self.get_dummy_inputs(generator_device) + inputs["height"] = inputs["width"] = 128 + output_without_tiling = pipe(**inputs)[0] + + # With tiling + pipe.vae.enable_tiling( + tile_sample_min_height=96, + tile_sample_min_width=96, + tile_sample_stride_height=64, + tile_sample_stride_width=64, + ) + inputs = self.get_dummy_inputs(generator_device) + inputs["height"] = inputs["width"] = 128 + output_with_tiling = pipe(**inputs)[0] + + self.assertLess( + (to_np(output_without_tiling) - to_np(output_with_tiling)).max(), + expected_diff_max, + "VAE tiling should not affect the inference results", + ) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/ltx/test_ltx_latent_upsample.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/ltx/test_ltx_latent_upsample.py new file mode 100644 index 0000000000000000000000000000000000000000..0044a85c644bd2fffe862737ec8ec959449a3c8b --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/ltx/test_ltx_latent_upsample.py @@ -0,0 +1,159 @@ +# Copyright 2025 The HuggingFace Team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import numpy as np +import torch + +from diffusers import AutoencoderKLLTXVideo, LTXLatentUpsamplePipeline +from diffusers.pipelines.ltx.modeling_latent_upsampler import LTXLatentUpsamplerModel + +from ...testing_utils import enable_full_determinism +from ..test_pipelines_common import PipelineTesterMixin, to_np + + +enable_full_determinism() + + +class LTXLatentUpsamplePipelineFastTests(PipelineTesterMixin, unittest.TestCase): + pipeline_class = LTXLatentUpsamplePipeline + params = {"video", "generator"} + batch_params = {"video", "generator"} + required_optional_params = frozenset(["generator", "latents", "return_dict"]) + test_xformers_attention = False + supports_dduf = False + + def get_dummy_components(self): + torch.manual_seed(0) + vae = AutoencoderKLLTXVideo( + in_channels=3, + out_channels=3, + latent_channels=8, + block_out_channels=(8, 8, 8, 8), + decoder_block_out_channels=(8, 8, 8, 8), + layers_per_block=(1, 1, 1, 1, 1), + decoder_layers_per_block=(1, 1, 1, 1, 1), + spatio_temporal_scaling=(True, True, False, False), + decoder_spatio_temporal_scaling=(True, True, False, False), + decoder_inject_noise=(False, False, False, False, False), + upsample_residual=(False, False, False, False), + upsample_factor=(1, 1, 1, 1), + timestep_conditioning=False, + patch_size=1, + patch_size_t=1, + encoder_causal=True, + decoder_causal=False, + ) + vae.use_framewise_encoding = False + vae.use_framewise_decoding = False + + torch.manual_seed(0) + latent_upsampler = LTXLatentUpsamplerModel( + in_channels=8, + mid_channels=32, + num_blocks_per_stage=1, + dims=3, + spatial_upsample=True, + temporal_upsample=False, + ) + + components = { + "vae": vae, + "latent_upsampler": latent_upsampler, + } + return components + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + + video = torch.randn((5, 3, 32, 32), generator=generator, device=device) + + inputs = { + "video": video, + "generator": generator, + "height": 16, + "width": 16, + "output_type": "pt", + } + + return inputs + + def test_inference(self): + device = "cpu" + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + video = pipe(**inputs).frames + generated_video = video[0] + + self.assertEqual(generated_video.shape, (5, 3, 32, 32)) + expected_video = torch.randn(5, 3, 32, 32) + max_diff = np.abs(generated_video - expected_video).max() + self.assertLessEqual(max_diff, 1e10) + + def test_vae_tiling(self, expected_diff_max: float = 0.25): + generator_device = "cpu" + components = self.get_dummy_components() + + pipe = self.pipeline_class(**components) + pipe.to("cpu") + pipe.set_progress_bar_config(disable=None) + + # Without tiling + inputs = self.get_dummy_inputs(generator_device) + inputs["height"] = inputs["width"] = 128 + output_without_tiling = pipe(**inputs)[0] + + # With tiling + pipe.vae.enable_tiling( + tile_sample_min_height=96, + tile_sample_min_width=96, + tile_sample_stride_height=64, + tile_sample_stride_width=64, + ) + inputs = self.get_dummy_inputs(generator_device) + inputs["height"] = inputs["width"] = 128 + output_with_tiling = pipe(**inputs)[0] + + self.assertLess( + (to_np(output_without_tiling) - to_np(output_with_tiling)).max(), + expected_diff_max, + "VAE tiling should not affect the inference results", + ) + + @unittest.skip("Test is not applicable.") + def test_callback_inputs(self): + pass + + @unittest.skip("Test is not applicable.") + def test_attention_slicing_forward_pass( + self, test_max_difference=True, test_mean_pixel_difference=True, expected_max_diff=1e-3 + ): + pass + + @unittest.skip("Test is not applicable.") + def test_inference_batch_consistent(self): + pass + + @unittest.skip("Test is not applicable.") + def test_inference_batch_single_identical(self): + pass diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/lumina/__init__.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/lumina/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/lumina/test_lumina_nextdit.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/lumina/test_lumina_nextdit.py new file mode 100644 index 0000000000000000000000000000000000000000..d2c114825d34868ba6e15380e80fcde33572c83e --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/lumina/test_lumina_nextdit.py @@ -0,0 +1,164 @@ +import gc +import unittest + +import numpy as np +import torch +from transformers import AutoTokenizer, GemmaConfig, GemmaForCausalLM + +from diffusers import ( + AutoencoderKL, + FlowMatchEulerDiscreteScheduler, + LuminaNextDiT2DModel, + LuminaPipeline, +) + +from ...testing_utils import ( + backend_empty_cache, + numpy_cosine_similarity_distance, + require_torch_accelerator, + slow, + torch_device, +) +from ..test_pipelines_common import PipelineTesterMixin + + +class LuminaPipelineFastTests(unittest.TestCase, PipelineTesterMixin): + pipeline_class = LuminaPipeline + params = frozenset( + [ + "prompt", + "height", + "width", + "guidance_scale", + "negative_prompt", + "prompt_embeds", + "negative_prompt_embeds", + ] + ) + batch_params = frozenset(["prompt", "negative_prompt"]) + + supports_dduf = False + test_layerwise_casting = True + test_group_offloading = True + + def get_dummy_components(self): + torch.manual_seed(0) + transformer = LuminaNextDiT2DModel( + sample_size=4, + patch_size=2, + in_channels=4, + hidden_size=4, + num_layers=2, + num_attention_heads=1, + num_kv_heads=1, + multiple_of=16, + ffn_dim_multiplier=None, + norm_eps=1e-5, + learn_sigma=True, + qk_norm=True, + cross_attention_dim=8, + scaling_factor=1.0, + ) + torch.manual_seed(0) + vae = AutoencoderKL() + + scheduler = FlowMatchEulerDiscreteScheduler() + tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/dummy-gemma") + + torch.manual_seed(0) + config = GemmaConfig( + head_dim=2, + hidden_size=8, + intermediate_size=37, + num_attention_heads=4, + num_hidden_layers=2, + num_key_value_heads=4, + ) + text_encoder = GemmaForCausalLM(config) + + components = { + "transformer": transformer.eval(), + "vae": vae.eval(), + "scheduler": scheduler, + "text_encoder": text_encoder.eval(), + "tokenizer": tokenizer, + } + return components + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device="cpu").manual_seed(seed) + + inputs = { + "prompt": "A painting of a squirrel eating a burger", + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 5.0, + "output_type": "np", + } + return inputs + + @unittest.skip("xformers attention processor does not exist for Lumina") + def test_xformers_attention_forwardGenerator_pass(self): + pass + + +@slow +@require_torch_accelerator +class LuminaPipelineSlowTests(unittest.TestCase): + pipeline_class = LuminaPipeline + repo_id = "Alpha-VLLM/Lumina-Next-SFT-diffusers" + + def setUp(self): + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def get_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device="cpu").manual_seed(seed) + + return { + "prompt": "A photo of a cat", + "num_inference_steps": 2, + "guidance_scale": 5.0, + "output_type": "np", + "generator": generator, + } + + def test_lumina_inference(self): + pipe = self.pipeline_class.from_pretrained(self.repo_id, torch_dtype=torch.bfloat16) + pipe.enable_model_cpu_offload(device=torch_device) + + inputs = self.get_inputs(torch_device) + + image = pipe(**inputs).images[0] + image_slice = image[0, :10, :10] + expected_slice = np.array( + [ + [0.17773438, 0.18554688, 0.22070312], + [0.046875, 0.06640625, 0.10351562], + [0.0, 0.0, 0.02148438], + [0.0, 0.0, 0.0], + [0.0, 0.0, 0.0], + [0.0, 0.0, 0.0], + [0.0, 0.0, 0.0], + [0.0, 0.0, 0.0], + [0.0, 0.0, 0.0], + [0.0, 0.0, 0.0], + ], + dtype=np.float32, + ) + + max_diff = numpy_cosine_similarity_distance(expected_slice.flatten(), image_slice.flatten()) + + assert max_diff < 1e-4 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/lumina2/__init__.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/lumina2/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/lumina2/test_pipeline_lumina2.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/lumina2/test_pipeline_lumina2.py new file mode 100644 index 0000000000000000000000000000000000000000..d6d21b72a4ce726c07af3d976e765093837d5257 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/lumina2/test_pipeline_lumina2.py @@ -0,0 +1,117 @@ +import unittest + +import torch +from transformers import AutoTokenizer, Gemma2Config, Gemma2Model + +from diffusers import ( + AutoencoderKL, + FlowMatchEulerDiscreteScheduler, + Lumina2Pipeline, + Lumina2Transformer2DModel, +) + +from ..test_pipelines_common import PipelineTesterMixin + + +class Lumina2PipelineFastTests(unittest.TestCase, PipelineTesterMixin): + pipeline_class = Lumina2Pipeline + params = frozenset( + [ + "prompt", + "height", + "width", + "guidance_scale", + "negative_prompt", + "prompt_embeds", + "negative_prompt_embeds", + ] + ) + batch_params = frozenset(["prompt", "negative_prompt"]) + required_optional_params = frozenset( + [ + "num_inference_steps", + "generator", + "latents", + "return_dict", + "callback_on_step_end", + "callback_on_step_end_tensor_inputs", + ] + ) + + supports_dduf = False + test_xformers_attention = False + test_layerwise_casting = True + + def get_dummy_components(self): + torch.manual_seed(0) + transformer = Lumina2Transformer2DModel( + sample_size=4, + patch_size=2, + in_channels=4, + hidden_size=8, + num_layers=2, + num_attention_heads=1, + num_kv_heads=1, + multiple_of=16, + ffn_dim_multiplier=None, + norm_eps=1e-5, + scaling_factor=1.0, + axes_dim_rope=[4, 2, 2], + cap_feat_dim=8, + ) + + torch.manual_seed(0) + vae = AutoencoderKL( + sample_size=32, + in_channels=3, + out_channels=3, + block_out_channels=(4,), + layers_per_block=1, + latent_channels=4, + norm_num_groups=1, + use_quant_conv=False, + use_post_quant_conv=False, + shift_factor=0.0609, + scaling_factor=1.5035, + ) + + scheduler = FlowMatchEulerDiscreteScheduler() + tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/dummy-gemma") + + torch.manual_seed(0) + config = Gemma2Config( + head_dim=4, + hidden_size=8, + intermediate_size=8, + num_attention_heads=2, + num_hidden_layers=2, + num_key_value_heads=2, + sliding_window=2, + ) + text_encoder = Gemma2Model(config) + + components = { + "transformer": transformer, + "vae": vae.eval(), + "scheduler": scheduler, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + } + return components + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device="cpu").manual_seed(seed) + + inputs = { + "prompt": "A painting of a squirrel eating a burger", + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 5.0, + "height": 32, + "width": 32, + "output_type": "np", + } + return inputs diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/marigold/__init__.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/marigold/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/marigold/test_marigold_depth.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/marigold/test_marigold_depth.py new file mode 100644 index 0000000000000000000000000000000000000000..3e8ccbf5c07e7015886eba0c29083ec4e9c9a541 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/marigold/test_marigold_depth.py @@ -0,0 +1,463 @@ +# Copyright 2023-2025 Marigold Team, ETH Zürich. All rights reserved. +# Copyright 2024-2025 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# -------------------------------------------------------------------------- +# More information and citation instructions are available on the +# Marigold project website: https://marigoldcomputervision.github.io +# -------------------------------------------------------------------------- +import gc +import random +import unittest + +import numpy as np +import torch +from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer + +from diffusers import ( + AutoencoderKL, + AutoencoderTiny, + LCMScheduler, + MarigoldDepthPipeline, + UNet2DConditionModel, +) + +from ...testing_utils import ( + backend_empty_cache, + enable_full_determinism, + floats_tensor, + is_flaky, + load_image, + require_torch_accelerator, + slow, + torch_device, +) +from ..test_pipelines_common import PipelineTesterMixin + + +enable_full_determinism() + + +class MarigoldDepthPipelineFastTests(PipelineTesterMixin, unittest.TestCase): + pipeline_class = MarigoldDepthPipeline + params = frozenset(["image"]) + batch_params = frozenset(["image"]) + image_params = frozenset(["image"]) + image_latents_params = frozenset(["latents"]) + callback_cfg_params = frozenset([]) + test_xformers_attention = False + required_optional_params = frozenset( + [ + "num_inference_steps", + "generator", + "output_type", + ] + ) + + def get_dummy_components(self, time_cond_proj_dim=None): + torch.manual_seed(0) + unet = UNet2DConditionModel( + block_out_channels=(32, 64), + layers_per_block=2, + time_cond_proj_dim=time_cond_proj_dim, + sample_size=32, + in_channels=8, + out_channels=4, + down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), + up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), + cross_attention_dim=32, + ) + scheduler = LCMScheduler( + beta_start=0.00085, + beta_end=0.012, + prediction_type="v_prediction", + set_alpha_to_one=False, + steps_offset=1, + beta_schedule="scaled_linear", + clip_sample=False, + thresholding=False, + ) + torch.manual_seed(0) + vae = AutoencoderKL( + block_out_channels=[32, 64], + in_channels=3, + out_channels=3, + down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], + up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], + latent_channels=4, + ) + torch.manual_seed(0) + text_encoder_config = CLIPTextConfig( + bos_token_id=0, + eos_token_id=2, + hidden_size=32, + intermediate_size=37, + layer_norm_eps=1e-05, + num_attention_heads=4, + num_hidden_layers=5, + pad_token_id=1, + vocab_size=1000, + ) + text_encoder = CLIPTextModel(text_encoder_config) + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + + components = { + "unet": unet, + "scheduler": scheduler, + "vae": vae, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + "prediction_type": "depth", + "scale_invariant": True, + "shift_invariant": True, + } + return components + + def get_dummy_tiny_autoencoder(self): + return AutoencoderTiny(in_channels=3, out_channels=3, latent_channels=4) + + def get_dummy_inputs(self, device, seed=0): + image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) + image = image / 2 + 0.5 + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + inputs = { + "image": image, + "num_inference_steps": 1, + "processing_resolution": 0, + "generator": generator, + "output_type": "np", + } + return inputs + + def _test_marigold_depth( + self, + generator_seed: int = 0, + expected_slice: np.ndarray = None, + atol: float = 1e-4, + **pipe_kwargs, + ): + device = "cpu" + components = self.get_dummy_components() + + pipe = self.pipeline_class(**components) + pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + pipe_inputs = self.get_dummy_inputs(device, seed=generator_seed) + pipe_inputs.update(**pipe_kwargs) + + prediction = pipe(**pipe_inputs).prediction + + prediction_slice = prediction[0, -3:, -3:, -1].flatten() + + if pipe_inputs.get("match_input_resolution", True): + self.assertEqual(prediction.shape, (1, 32, 32, 1), "Unexpected output resolution") + else: + self.assertTrue(prediction.shape[0] == 1 and prediction.shape[3] == 1, "Unexpected output dimensions") + self.assertEqual( + max(prediction.shape[1:3]), + pipe_inputs.get("processing_resolution", 768), + "Unexpected output resolution", + ) + + self.assertTrue(np.allclose(prediction_slice, expected_slice, atol=atol)) + + def test_marigold_depth_dummy_defaults(self): + self._test_marigold_depth( + expected_slice=np.array([0.4529, 0.5184, 0.4985, 0.4355, 0.4273, 0.4153, 0.5229, 0.4818, 0.4627]), + ) + + def test_marigold_depth_dummy_G0_S1_P32_E1_B1_M1(self): + self._test_marigold_depth( + generator_seed=0, + expected_slice=np.array([0.4529, 0.5184, 0.4985, 0.4355, 0.4273, 0.4153, 0.5229, 0.4818, 0.4627]), + num_inference_steps=1, + processing_resolution=32, + ensemble_size=1, + batch_size=1, + match_input_resolution=True, + ) + + def test_marigold_depth_dummy_G0_S1_P16_E1_B1_M1(self): + self._test_marigold_depth( + generator_seed=0, + expected_slice=np.array([0.4511, 0.4531, 0.4542, 0.5024, 0.4987, 0.4969, 0.5281, 0.5215, 0.5182]), + num_inference_steps=1, + processing_resolution=16, + ensemble_size=1, + batch_size=1, + match_input_resolution=True, + ) + + def test_marigold_depth_dummy_G2024_S1_P32_E1_B1_M1(self): + self._test_marigold_depth( + generator_seed=2024, + expected_slice=np.array([0.4671, 0.4739, 0.5130, 0.4308, 0.4411, 0.4720, 0.5064, 0.4796, 0.4795]), + num_inference_steps=1, + processing_resolution=32, + ensemble_size=1, + batch_size=1, + match_input_resolution=True, + ) + + def test_marigold_depth_dummy_G0_S2_P32_E1_B1_M1(self): + self._test_marigold_depth( + generator_seed=0, + expected_slice=np.array([0.4165, 0.4485, 0.4647, 0.4003, 0.4577, 0.5074, 0.5106, 0.5077, 0.5042]), + num_inference_steps=2, + processing_resolution=32, + ensemble_size=1, + batch_size=1, + match_input_resolution=True, + ) + + def test_marigold_depth_dummy_G0_S1_P64_E1_B1_M1(self): + self._test_marigold_depth( + generator_seed=0, + expected_slice=np.array([0.4817, 0.5425, 0.5146, 0.5367, 0.5034, 0.4743, 0.4395, 0.4734, 0.4399]), + num_inference_steps=1, + processing_resolution=64, + ensemble_size=1, + batch_size=1, + match_input_resolution=True, + ) + + @is_flaky + def test_marigold_depth_dummy_G0_S1_P32_E3_B1_M1(self): + self._test_marigold_depth( + generator_seed=0, + expected_slice=np.array([0.3260, 0.3591, 0.2837, 0.2971, 0.2750, 0.2426, 0.4200, 0.3588, 0.3254]), + num_inference_steps=1, + processing_resolution=32, + ensemble_size=3, + ensembling_kwargs={"reduction": "mean"}, + batch_size=1, + match_input_resolution=True, + ) + + @is_flaky + def test_marigold_depth_dummy_G0_S1_P32_E4_B2_M1(self): + self._test_marigold_depth( + generator_seed=0, + expected_slice=np.array([0.3180, 0.4194, 0.3013, 0.2902, 0.3245, 0.2897, 0.4718, 0.4174, 0.3705]), + num_inference_steps=1, + processing_resolution=32, + ensemble_size=4, + ensembling_kwargs={"reduction": "mean"}, + batch_size=2, + match_input_resolution=True, + ) + + def test_marigold_depth_dummy_G0_S1_P16_E1_B1_M0(self): + self._test_marigold_depth( + generator_seed=0, + expected_slice=np.array([0.5515, 0.4588, 0.4197, 0.4741, 0.4229, 0.4328, 0.5333, 0.5314, 0.5182]), + num_inference_steps=1, + processing_resolution=16, + ensemble_size=1, + batch_size=1, + match_input_resolution=False, + ) + + def test_marigold_depth_dummy_no_num_inference_steps(self): + with self.assertRaises(ValueError) as e: + self._test_marigold_depth( + num_inference_steps=None, + expected_slice=np.array([0.0]), + ) + self.assertIn("num_inference_steps", str(e)) + + def test_marigold_depth_dummy_no_processing_resolution(self): + with self.assertRaises(ValueError) as e: + self._test_marigold_depth( + processing_resolution=None, + expected_slice=np.array([0.0]), + ) + self.assertIn("processing_resolution", str(e)) + + +@slow +@require_torch_accelerator +class MarigoldDepthPipelineIntegrationTests(unittest.TestCase): + def setUp(self): + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def _test_marigold_depth( + self, + is_fp16: bool = True, + device: str = "cuda", + generator_seed: int = 0, + expected_slice: np.ndarray = None, + model_id: str = "prs-eth/marigold-lcm-v1-0", + image_url: str = "https://marigoldmonodepth.github.io/images/einstein.jpg", + atol: float = 1e-4, + **pipe_kwargs, + ): + from_pretrained_kwargs = {} + if is_fp16: + from_pretrained_kwargs["variant"] = "fp16" + from_pretrained_kwargs["torch_dtype"] = torch.float16 + + pipe = MarigoldDepthPipeline.from_pretrained(model_id, **from_pretrained_kwargs) + pipe.enable_model_cpu_offload(device=torch_device) + pipe.set_progress_bar_config(disable=None) + + generator = torch.Generator(device=device).manual_seed(generator_seed) + + image = load_image(image_url) + width, height = image.size + + prediction = pipe(image, generator=generator, **pipe_kwargs).prediction + + prediction_slice = prediction[0, -3:, -3:, -1].flatten() + + if pipe_kwargs.get("match_input_resolution", True): + self.assertEqual(prediction.shape, (1, height, width, 1), "Unexpected output resolution") + else: + self.assertTrue(prediction.shape[0] == 1 and prediction.shape[3] == 1, "Unexpected output dimensions") + self.assertEqual( + max(prediction.shape[1:3]), + pipe_kwargs.get("processing_resolution", 768), + "Unexpected output resolution", + ) + + self.assertTrue(np.allclose(prediction_slice, expected_slice, atol=atol)) + + def test_marigold_depth_einstein_f32_cpu_G0_S1_P32_E1_B1_M1(self): + self._test_marigold_depth( + is_fp16=False, + device="cpu", + generator_seed=0, + expected_slice=np.array([0.4323, 0.4323, 0.4323, 0.4323, 0.4323, 0.4323, 0.4323, 0.4323, 0.4323]), + num_inference_steps=1, + processing_resolution=32, + ensemble_size=1, + batch_size=1, + match_input_resolution=True, + ) + + def test_marigold_depth_einstein_f32_cuda_G0_S1_P768_E1_B1_M1(self): + self._test_marigold_depth( + is_fp16=False, + device=torch_device, + generator_seed=0, + expected_slice=np.array([0.1244, 0.1265, 0.1292, 0.1240, 0.1252, 0.1266, 0.1246, 0.1226, 0.1180]), + num_inference_steps=1, + processing_resolution=768, + ensemble_size=1, + batch_size=1, + match_input_resolution=True, + ) + + def test_marigold_depth_einstein_f16_cuda_G0_S1_P768_E1_B1_M1(self): + self._test_marigold_depth( + is_fp16=True, + device=torch_device, + generator_seed=0, + expected_slice=np.array([0.1241, 0.1262, 0.1290, 0.1238, 0.1250, 0.1265, 0.1244, 0.1225, 0.1179]), + num_inference_steps=1, + processing_resolution=768, + ensemble_size=1, + batch_size=1, + match_input_resolution=True, + ) + + def test_marigold_depth_einstein_f16_cuda_G2024_S1_P768_E1_B1_M1(self): + self._test_marigold_depth( + is_fp16=True, + device=torch_device, + generator_seed=2024, + expected_slice=np.array([0.1710, 0.1725, 0.1738, 0.1700, 0.1700, 0.1696, 0.1698, 0.1663, 0.1592]), + num_inference_steps=1, + processing_resolution=768, + ensemble_size=1, + batch_size=1, + match_input_resolution=True, + ) + + def test_marigold_depth_einstein_f16_cuda_G0_S2_P768_E1_B1_M1(self): + self._test_marigold_depth( + is_fp16=True, + device=torch_device, + generator_seed=0, + expected_slice=np.array([0.1085, 0.1098, 0.1110, 0.1081, 0.1085, 0.1082, 0.1085, 0.1057, 0.0996]), + num_inference_steps=2, + processing_resolution=768, + ensemble_size=1, + batch_size=1, + match_input_resolution=True, + ) + + def test_marigold_depth_einstein_f16_cuda_G0_S1_P512_E1_B1_M1(self): + self._test_marigold_depth( + is_fp16=True, + device=torch_device, + generator_seed=0, + expected_slice=np.array([0.2683, 0.2693, 0.2698, 0.2666, 0.2632, 0.2615, 0.2656, 0.2603, 0.2573]), + num_inference_steps=1, + processing_resolution=512, + ensemble_size=1, + batch_size=1, + match_input_resolution=True, + ) + + def test_marigold_depth_einstein_f16_cuda_G0_S1_P768_E3_B1_M1(self): + self._test_marigold_depth( + is_fp16=True, + device=torch_device, + generator_seed=0, + expected_slice=np.array([0.1200, 0.1215, 0.1237, 0.1193, 0.1197, 0.1202, 0.1196, 0.1166, 0.1109]), + num_inference_steps=1, + processing_resolution=768, + ensemble_size=3, + ensembling_kwargs={"reduction": "mean"}, + batch_size=1, + match_input_resolution=True, + ) + + def test_marigold_depth_einstein_f16_cuda_G0_S1_P768_E4_B2_M1(self): + self._test_marigold_depth( + is_fp16=True, + device=torch_device, + generator_seed=0, + expected_slice=np.array([0.1121, 0.1135, 0.1155, 0.1111, 0.1115, 0.1118, 0.1111, 0.1079, 0.1019]), + num_inference_steps=1, + processing_resolution=768, + ensemble_size=4, + ensembling_kwargs={"reduction": "mean"}, + batch_size=2, + match_input_resolution=True, + ) + + def test_marigold_depth_einstein_f16_cuda_G0_S1_P512_E1_B1_M0(self): + self._test_marigold_depth( + is_fp16=True, + device=torch_device, + generator_seed=0, + expected_slice=np.array([0.2671, 0.2690, 0.2720, 0.2659, 0.2676, 0.2739, 0.2664, 0.2686, 0.2573]), + num_inference_steps=1, + processing_resolution=512, + ensemble_size=1, + batch_size=1, + match_input_resolution=False, + ) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/marigold/test_marigold_intrinsics.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/marigold/test_marigold_intrinsics.py new file mode 100644 index 0000000000000000000000000000000000000000..3f7ab9bf6e1780077cc2cf6c3d0862dc2010302c --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/marigold/test_marigold_intrinsics.py @@ -0,0 +1,572 @@ +# Copyright 2023-2025 Marigold Team, ETH Zürich. All rights reserved. +# Copyright 2024-2025 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# -------------------------------------------------------------------------- +# More information and citation instructions are available on the +# Marigold project website: https://marigoldcomputervision.github.io +# -------------------------------------------------------------------------- +import gc +import random +import unittest + +import numpy as np +import torch +from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer + +import diffusers +from diffusers import ( + AutoencoderKL, + AutoencoderTiny, + DDIMScheduler, + MarigoldIntrinsicsPipeline, + UNet2DConditionModel, +) + +from ...testing_utils import ( + backend_empty_cache, + enable_full_determinism, + floats_tensor, + load_image, + require_torch_accelerator, + slow, + torch_device, +) +from ..test_pipelines_common import PipelineTesterMixin, to_np + + +enable_full_determinism() + + +class MarigoldIntrinsicsPipelineTesterMixin(PipelineTesterMixin): + def _test_inference_batch_single_identical( + self, + batch_size=2, + expected_max_diff=1e-4, + additional_params_copy_to_batched_inputs=["num_inference_steps"], + ): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + for components in pipe.components.values(): + if hasattr(components, "set_default_attn_processor"): + components.set_default_attn_processor() + + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + inputs = self.get_dummy_inputs(torch_device) + # Reset generator in case it is has been used in self.get_dummy_inputs + inputs["generator"] = self.get_generator(0) + + logger = diffusers.logging.get_logger(pipe.__module__) + logger.setLevel(level=diffusers.logging.FATAL) + + # batchify inputs + batched_inputs = {} + batched_inputs.update(inputs) + + for name in self.batch_params: + if name not in inputs: + continue + + value = inputs[name] + if name == "prompt": + len_prompt = len(value) + batched_inputs[name] = [value[: len_prompt // i] for i in range(1, batch_size + 1)] + batched_inputs[name][-1] = 100 * "very long" + + else: + batched_inputs[name] = batch_size * [value] + + if "generator" in inputs: + batched_inputs["generator"] = [self.get_generator(i) for i in range(batch_size)] + + if "batch_size" in inputs: + batched_inputs["batch_size"] = batch_size + + for arg in additional_params_copy_to_batched_inputs: + batched_inputs[arg] = inputs[arg] + + output = pipe(**inputs) + output_batch = pipe(**batched_inputs) + + assert output_batch[0].shape[0] == batch_size * output[0].shape[0] # only changed here + + max_diff = np.abs(to_np(output_batch[0][0]) - to_np(output[0][0])).max() + assert max_diff < expected_max_diff + + def _test_inference_batch_consistent( + self, batch_sizes=[2], additional_params_copy_to_batched_inputs=["num_inference_steps"], batch_generator=True + ): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(torch_device) + inputs["generator"] = self.get_generator(0) + + logger = diffusers.logging.get_logger(pipe.__module__) + logger.setLevel(level=diffusers.logging.FATAL) + + # prepare batched inputs + batched_inputs = [] + for batch_size in batch_sizes: + batched_input = {} + batched_input.update(inputs) + + for name in self.batch_params: + if name not in inputs: + continue + + value = inputs[name] + if name == "prompt": + len_prompt = len(value) + # make unequal batch sizes + batched_input[name] = [value[: len_prompt // i] for i in range(1, batch_size + 1)] + + # make last batch super long + batched_input[name][-1] = 100 * "very long" + + else: + batched_input[name] = batch_size * [value] + + if batch_generator and "generator" in inputs: + batched_input["generator"] = [self.get_generator(i) for i in range(batch_size)] + + if "batch_size" in inputs: + batched_input["batch_size"] = batch_size + + batched_inputs.append(batched_input) + + logger.setLevel(level=diffusers.logging.WARNING) + for batch_size, batched_input in zip(batch_sizes, batched_inputs): + output = pipe(**batched_input) + assert len(output[0]) == batch_size * pipe.n_targets # only changed here + + +class MarigoldIntrinsicsPipelineFastTests(MarigoldIntrinsicsPipelineTesterMixin, unittest.TestCase): + pipeline_class = MarigoldIntrinsicsPipeline + params = frozenset(["image"]) + batch_params = frozenset(["image"]) + image_params = frozenset(["image"]) + image_latents_params = frozenset(["latents"]) + callback_cfg_params = frozenset([]) + test_xformers_attention = False + required_optional_params = frozenset( + [ + "num_inference_steps", + "generator", + "output_type", + ] + ) + + def get_dummy_components(self, time_cond_proj_dim=None): + torch.manual_seed(0) + unet = UNet2DConditionModel( + block_out_channels=(32, 64), + layers_per_block=2, + time_cond_proj_dim=time_cond_proj_dim, + sample_size=32, + in_channels=12, + out_channels=8, + down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), + up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), + cross_attention_dim=32, + ) + torch.manual_seed(0) + scheduler = DDIMScheduler( + beta_start=0.00085, + beta_end=0.012, + prediction_type="v_prediction", + set_alpha_to_one=False, + steps_offset=1, + beta_schedule="scaled_linear", + clip_sample=False, + thresholding=False, + ) + torch.manual_seed(0) + vae = AutoencoderKL( + block_out_channels=[32, 64], + in_channels=3, + out_channels=3, + down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], + up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], + latent_channels=4, + ) + torch.manual_seed(0) + text_encoder_config = CLIPTextConfig( + bos_token_id=0, + eos_token_id=2, + hidden_size=32, + intermediate_size=37, + layer_norm_eps=1e-05, + num_attention_heads=4, + num_hidden_layers=5, + pad_token_id=1, + vocab_size=1000, + ) + text_encoder = CLIPTextModel(text_encoder_config) + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + + components = { + "unet": unet, + "scheduler": scheduler, + "vae": vae, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + "prediction_type": "intrinsics", + } + return components + + def get_dummy_tiny_autoencoder(self): + return AutoencoderTiny(in_channels=3, out_channels=3, latent_channels=4) + + def get_dummy_inputs(self, device, seed=0): + image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) + image = image / 2 + 0.5 + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + inputs = { + "image": image, + "num_inference_steps": 1, + "processing_resolution": 0, + "generator": generator, + "output_type": "np", + } + return inputs + + def _test_marigold_intrinsics( + self, + generator_seed: int = 0, + expected_slice: np.ndarray = None, + atol: float = 1e-4, + **pipe_kwargs, + ): + device = "cpu" + components = self.get_dummy_components() + + pipe = self.pipeline_class(**components) + pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + pipe_inputs = self.get_dummy_inputs(device, seed=generator_seed) + pipe_inputs.update(**pipe_kwargs) + + prediction = pipe(**pipe_inputs).prediction + + prediction_slice = prediction[0, -3:, -3:, -1].flatten() + + if pipe_inputs.get("match_input_resolution", True): + self.assertEqual(prediction.shape, (2, 32, 32, 3), "Unexpected output resolution") + else: + self.assertTrue(prediction.shape[0] == 2 and prediction.shape[3] == 3, "Unexpected output dimensions") + self.assertEqual( + max(prediction.shape[1:3]), + pipe_inputs.get("processing_resolution", 768), + "Unexpected output resolution", + ) + + np.set_printoptions(precision=5, suppress=True) + msg = f"{prediction_slice}" + self.assertTrue(np.allclose(prediction_slice, expected_slice, atol=atol), msg) + # self.assertTrue(np.allclose(prediction_slice, expected_slice, atol=atol)) + + def test_marigold_depth_dummy_defaults(self): + self._test_marigold_intrinsics( + expected_slice=np.array([0.6423, 0.40664, 0.41185, 0.65832, 0.63935, 0.43971, 0.51786, 0.55216, 0.47683]), + ) + + def test_marigold_depth_dummy_G0_S1_P32_E1_B1_M1(self): + self._test_marigold_intrinsics( + generator_seed=0, + expected_slice=np.array([0.6423, 0.40664, 0.41185, 0.65832, 0.63935, 0.43971, 0.51786, 0.55216, 0.47683]), + num_inference_steps=1, + processing_resolution=32, + ensemble_size=1, + batch_size=1, + match_input_resolution=True, + ) + + def test_marigold_depth_dummy_G0_S1_P16_E1_B1_M1(self): + self._test_marigold_intrinsics( + generator_seed=0, + expected_slice=np.array([0.53132, 0.44487, 0.40164, 0.5326, 0.49073, 0.46979, 0.53324, 0.51366, 0.50387]), + num_inference_steps=1, + processing_resolution=16, + ensemble_size=1, + batch_size=1, + match_input_resolution=True, + ) + + def test_marigold_depth_dummy_G2024_S1_P32_E1_B1_M1(self): + self._test_marigold_intrinsics( + generator_seed=2024, + expected_slice=np.array([0.40257, 0.39468, 0.51373, 0.4161, 0.40162, 0.58535, 0.43581, 0.47834, 0.48951]), + num_inference_steps=1, + processing_resolution=32, + ensemble_size=1, + batch_size=1, + match_input_resolution=True, + ) + + def test_marigold_depth_dummy_G0_S2_P32_E1_B1_M1(self): + self._test_marigold_intrinsics( + generator_seed=0, + expected_slice=np.array([0.49636, 0.4518, 0.42722, 0.59044, 0.6362, 0.39011, 0.53522, 0.55153, 0.48699]), + num_inference_steps=2, + processing_resolution=32, + ensemble_size=1, + batch_size=1, + match_input_resolution=True, + ) + + def test_marigold_depth_dummy_G0_S1_P64_E1_B1_M1(self): + self._test_marigold_intrinsics( + generator_seed=0, + expected_slice=np.array([0.55547, 0.43511, 0.4887, 0.56399, 0.63867, 0.56337, 0.47889, 0.52925, 0.49235]), + num_inference_steps=1, + processing_resolution=64, + ensemble_size=1, + batch_size=1, + match_input_resolution=True, + ) + + def test_marigold_depth_dummy_G0_S1_P32_E3_B1_M1(self): + self._test_marigold_intrinsics( + generator_seed=0, + expected_slice=np.array([0.57249, 0.49824, 0.54438, 0.57733, 0.52404, 0.5255, 0.56493, 0.56336, 0.48579]), + num_inference_steps=1, + processing_resolution=32, + ensemble_size=3, + ensembling_kwargs={"reduction": "mean"}, + batch_size=1, + match_input_resolution=True, + ) + + def test_marigold_depth_dummy_G0_S1_P32_E4_B2_M1(self): + self._test_marigold_intrinsics( + generator_seed=0, + expected_slice=np.array([0.6294, 0.5575, 0.53414, 0.61077, 0.57156, 0.53974, 0.52956, 0.55467, 0.48751]), + num_inference_steps=1, + processing_resolution=32, + ensemble_size=4, + ensembling_kwargs={"reduction": "mean"}, + batch_size=2, + match_input_resolution=True, + ) + + def test_marigold_depth_dummy_G0_S1_P16_E1_B1_M0(self): + self._test_marigold_intrinsics( + generator_seed=0, + expected_slice=np.array([0.63511, 0.68137, 0.48783, 0.46689, 0.58505, 0.36757, 0.58465, 0.54302, 0.50387]), + num_inference_steps=1, + processing_resolution=16, + ensemble_size=1, + batch_size=1, + match_input_resolution=False, + ) + + def test_marigold_depth_dummy_no_num_inference_steps(self): + with self.assertRaises(ValueError) as e: + self._test_marigold_intrinsics( + num_inference_steps=None, + expected_slice=np.array([0.0]), + ) + self.assertIn("num_inference_steps", str(e)) + + def test_marigold_depth_dummy_no_processing_resolution(self): + with self.assertRaises(ValueError) as e: + self._test_marigold_intrinsics( + processing_resolution=None, + expected_slice=np.array([0.0]), + ) + self.assertIn("processing_resolution", str(e)) + + +@slow +@require_torch_accelerator +class MarigoldIntrinsicsPipelineIntegrationTests(unittest.TestCase): + def setUp(self): + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def _test_marigold_intrinsics( + self, + is_fp16: bool = True, + device: str = "cuda", + generator_seed: int = 0, + expected_slice: np.ndarray = None, + model_id: str = "prs-eth/marigold-iid-appearance-v1-1", + image_url: str = "https://marigoldmonodepth.github.io/images/einstein.jpg", + atol: float = 1e-4, + **pipe_kwargs, + ): + from_pretrained_kwargs = {} + if is_fp16: + from_pretrained_kwargs["variant"] = "fp16" + from_pretrained_kwargs["torch_dtype"] = torch.float16 + + pipe = MarigoldIntrinsicsPipeline.from_pretrained(model_id, **from_pretrained_kwargs) + if device in ["cuda", "xpu"]: + pipe.enable_model_cpu_offload() + pipe.set_progress_bar_config(disable=None) + + generator = torch.Generator(device=device).manual_seed(generator_seed) + + image = load_image(image_url) + width, height = image.size + + prediction = pipe(image, generator=generator, **pipe_kwargs).prediction + + prediction_slice = prediction[0, -3:, -3:, -1].flatten() + + if pipe_kwargs.get("match_input_resolution", True): + self.assertEqual(prediction.shape, (2, height, width, 3), "Unexpected output resolution") + else: + self.assertTrue(prediction.shape[0] == 2 and prediction.shape[3] == 3, "Unexpected output dimensions") + self.assertEqual( + max(prediction.shape[1:3]), + pipe_kwargs.get("processing_resolution", 768), + "Unexpected output resolution", + ) + + msg = f"{prediction_slice}" + self.assertTrue(np.allclose(prediction_slice, expected_slice, atol=atol), msg) + # self.assertTrue(np.allclose(prediction_slice, expected_slice, atol=atol)) + + def test_marigold_intrinsics_einstein_f32_cpu_G0_S1_P32_E1_B1_M1(self): + self._test_marigold_intrinsics( + is_fp16=False, + device="cpu", + generator_seed=0, + expected_slice=np.array([0.9162, 0.9162, 0.9162, 0.9162, 0.9162, 0.9162, 0.9162, 0.9162, 0.9162]), + num_inference_steps=1, + processing_resolution=32, + ensemble_size=1, + batch_size=1, + match_input_resolution=True, + ) + + def test_marigold_intrinsics_einstein_f32_accelerator_G0_S1_P768_E1_B1_M1(self): + self._test_marigold_intrinsics( + is_fp16=False, + device=torch_device, + generator_seed=0, + expected_slice=np.array([0.62127, 0.61906, 0.61687, 0.61946, 0.61903, 0.61961, 0.61808, 0.62099, 0.62894]), + num_inference_steps=1, + processing_resolution=768, + ensemble_size=1, + batch_size=1, + match_input_resolution=True, + ) + + def test_marigold_intrinsics_einstein_f16_accelerator_G0_S1_P768_E1_B1_M1(self): + self._test_marigold_intrinsics( + is_fp16=True, + device=torch_device, + generator_seed=0, + expected_slice=np.array([0.62109, 0.61914, 0.61719, 0.61963, 0.61914, 0.61963, 0.61816, 0.62109, 0.62891]), + num_inference_steps=1, + processing_resolution=768, + ensemble_size=1, + batch_size=1, + match_input_resolution=True, + ) + + def test_marigold_intrinsics_einstein_f16_accelerator_G2024_S1_P768_E1_B1_M1(self): + self._test_marigold_intrinsics( + is_fp16=True, + device=torch_device, + generator_seed=2024, + expected_slice=np.array([0.64111, 0.63916, 0.63623, 0.63965, 0.63916, 0.63965, 0.6377, 0.64062, 0.64941]), + num_inference_steps=1, + processing_resolution=768, + ensemble_size=1, + batch_size=1, + match_input_resolution=True, + ) + + def test_marigold_intrinsics_einstein_f16_accelerator_G0_S2_P768_E1_B1_M1(self): + self._test_marigold_intrinsics( + is_fp16=True, + device=torch_device, + generator_seed=0, + expected_slice=np.array([0.60254, 0.60059, 0.59961, 0.60156, 0.60107, 0.60205, 0.60254, 0.60449, 0.61133]), + num_inference_steps=2, + processing_resolution=768, + ensemble_size=1, + batch_size=1, + match_input_resolution=True, + ) + + def test_marigold_intrinsics_einstein_f16_accelerator_G0_S1_P512_E1_B1_M1(self): + self._test_marigold_intrinsics( + is_fp16=True, + device=torch_device, + generator_seed=0, + expected_slice=np.array([0.64551, 0.64453, 0.64404, 0.64502, 0.64844, 0.65039, 0.64502, 0.65039, 0.65332]), + num_inference_steps=1, + processing_resolution=512, + ensemble_size=1, + batch_size=1, + match_input_resolution=True, + ) + + def test_marigold_intrinsics_einstein_f16_accelerator_G0_S1_P768_E3_B1_M1(self): + self._test_marigold_intrinsics( + is_fp16=True, + device=torch_device, + generator_seed=0, + expected_slice=np.array([0.61572, 0.61377, 0.61182, 0.61426, 0.61377, 0.61426, 0.61279, 0.61572, 0.62354]), + num_inference_steps=1, + processing_resolution=768, + ensemble_size=3, + ensembling_kwargs={"reduction": "mean"}, + batch_size=1, + match_input_resolution=True, + ) + + def test_marigold_intrinsics_einstein_f16_accelerator_G0_S1_P768_E4_B2_M1(self): + self._test_marigold_intrinsics( + is_fp16=True, + device=torch_device, + generator_seed=0, + expected_slice=np.array([0.61914, 0.6167, 0.61475, 0.61719, 0.61719, 0.61768, 0.61572, 0.61914, 0.62695]), + num_inference_steps=1, + processing_resolution=768, + ensemble_size=4, + ensembling_kwargs={"reduction": "mean"}, + batch_size=2, + match_input_resolution=True, + ) + + def test_marigold_intrinsics_einstein_f16_accelerator_G0_S1_P512_E1_B1_M0(self): + self._test_marigold_intrinsics( + is_fp16=True, + device=torch_device, + generator_seed=0, + expected_slice=np.array([0.65332, 0.64697, 0.64648, 0.64844, 0.64697, 0.64111, 0.64941, 0.64209, 0.65332]), + num_inference_steps=1, + processing_resolution=512, + ensemble_size=1, + batch_size=1, + match_input_resolution=False, + ) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/marigold/test_marigold_normals.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/marigold/test_marigold_normals.py new file mode 100644 index 0000000000000000000000000000000000000000..108163bf22ec3deccae21bde8b6ee04ffb66330b --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/marigold/test_marigold_normals.py @@ -0,0 +1,460 @@ +# Copyright 2023-2025 Marigold Team, ETH Zürich. All rights reserved. +# Copyright 2024-2025 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# -------------------------------------------------------------------------- +# More information and citation instructions are available on the +# Marigold project website: https://marigoldcomputervision.github.io +# -------------------------------------------------------------------------- +import gc +import random +import unittest + +import numpy as np +import torch +from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer + +from diffusers import ( + AutoencoderKL, + AutoencoderTiny, + LCMScheduler, + MarigoldNormalsPipeline, + UNet2DConditionModel, +) + +from ...testing_utils import ( + backend_empty_cache, + enable_full_determinism, + floats_tensor, + load_image, + require_torch_accelerator, + slow, + torch_device, +) +from ..test_pipelines_common import PipelineTesterMixin + + +enable_full_determinism() + + +class MarigoldNormalsPipelineFastTests(PipelineTesterMixin, unittest.TestCase): + pipeline_class = MarigoldNormalsPipeline + params = frozenset(["image"]) + batch_params = frozenset(["image"]) + image_params = frozenset(["image"]) + image_latents_params = frozenset(["latents"]) + callback_cfg_params = frozenset([]) + test_xformers_attention = False + required_optional_params = frozenset( + [ + "num_inference_steps", + "generator", + "output_type", + ] + ) + + def get_dummy_components(self, time_cond_proj_dim=None): + torch.manual_seed(0) + unet = UNet2DConditionModel( + block_out_channels=(32, 64), + layers_per_block=2, + time_cond_proj_dim=time_cond_proj_dim, + sample_size=32, + in_channels=8, + out_channels=4, + down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), + up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), + cross_attention_dim=32, + ) + torch.manual_seed(0) + scheduler = LCMScheduler( + beta_start=0.00085, + beta_end=0.012, + prediction_type="v_prediction", + set_alpha_to_one=False, + steps_offset=1, + beta_schedule="scaled_linear", + clip_sample=False, + thresholding=False, + ) + torch.manual_seed(0) + vae = AutoencoderKL( + block_out_channels=[32, 64], + in_channels=3, + out_channels=3, + down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], + up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], + latent_channels=4, + ) + torch.manual_seed(0) + text_encoder_config = CLIPTextConfig( + bos_token_id=0, + eos_token_id=2, + hidden_size=32, + intermediate_size=37, + layer_norm_eps=1e-05, + num_attention_heads=4, + num_hidden_layers=5, + pad_token_id=1, + vocab_size=1000, + ) + text_encoder = CLIPTextModel(text_encoder_config) + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + + components = { + "unet": unet, + "scheduler": scheduler, + "vae": vae, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + "prediction_type": "normals", + "use_full_z_range": True, + } + return components + + def get_dummy_tiny_autoencoder(self): + return AutoencoderTiny(in_channels=3, out_channels=3, latent_channels=4) + + def get_dummy_inputs(self, device, seed=0): + image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) + image = image / 2 + 0.5 + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + inputs = { + "image": image, + "num_inference_steps": 1, + "processing_resolution": 0, + "generator": generator, + "output_type": "np", + } + return inputs + + def _test_marigold_normals( + self, + generator_seed: int = 0, + expected_slice: np.ndarray = None, + atol: float = 1e-4, + **pipe_kwargs, + ): + device = "cpu" + components = self.get_dummy_components() + + pipe = self.pipeline_class(**components) + pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + pipe_inputs = self.get_dummy_inputs(device, seed=generator_seed) + pipe_inputs.update(**pipe_kwargs) + + prediction = pipe(**pipe_inputs).prediction + + prediction_slice = prediction[0, -3:, -3:, -1].flatten() + + if pipe_inputs.get("match_input_resolution", True): + self.assertEqual(prediction.shape, (1, 32, 32, 3), "Unexpected output resolution") + else: + self.assertTrue(prediction.shape[0] == 1 and prediction.shape[3] == 3, "Unexpected output dimensions") + self.assertEqual( + max(prediction.shape[1:3]), + pipe_inputs.get("processing_resolution", 768), + "Unexpected output resolution", + ) + + self.assertTrue(np.allclose(prediction_slice, expected_slice, atol=atol)) + + def test_marigold_depth_dummy_defaults(self): + self._test_marigold_normals( + expected_slice=np.array([0.0967, 0.5234, 0.1448, -0.3155, -0.2550, -0.5578, 0.6854, 0.5657, -0.1263]), + ) + + def test_marigold_depth_dummy_G0_S1_P32_E1_B1_M1(self): + self._test_marigold_normals( + generator_seed=0, + expected_slice=np.array([0.0967, 0.5234, 0.1448, -0.3155, -0.2550, -0.5578, 0.6854, 0.5657, -0.1263]), + num_inference_steps=1, + processing_resolution=32, + ensemble_size=1, + batch_size=1, + match_input_resolution=True, + ) + + def test_marigold_depth_dummy_G0_S1_P16_E1_B1_M1(self): + self._test_marigold_normals( + generator_seed=0, + expected_slice=np.array([-0.4128, -0.5918, -0.6540, 0.2446, -0.2687, -0.4607, 0.2935, -0.0483, -0.2086]), + num_inference_steps=1, + processing_resolution=16, + ensemble_size=1, + batch_size=1, + match_input_resolution=True, + ) + + def test_marigold_depth_dummy_G2024_S1_P32_E1_B1_M1(self): + self._test_marigold_normals( + generator_seed=2024, + expected_slice=np.array([0.5731, -0.7631, -0.0199, 0.1609, -0.4628, -0.7044, 0.5761, -0.3471, -0.4498]), + num_inference_steps=1, + processing_resolution=32, + ensemble_size=1, + batch_size=1, + match_input_resolution=True, + ) + + def test_marigold_depth_dummy_G0_S2_P32_E1_B1_M1(self): + self._test_marigold_normals( + generator_seed=0, + expected_slice=np.array([0.1017, -0.6823, -0.2533, 0.1988, 0.3389, 0.8478, 0.7757, 0.5220, 0.8668]), + num_inference_steps=2, + processing_resolution=32, + ensemble_size=1, + batch_size=1, + match_input_resolution=True, + ) + + def test_marigold_depth_dummy_G0_S1_P64_E1_B1_M1(self): + self._test_marigold_normals( + generator_seed=0, + expected_slice=np.array([-0.2391, 0.7969, 0.6224, 0.0698, 0.5669, -0.2167, -0.1362, -0.8945, -0.5501]), + num_inference_steps=1, + processing_resolution=64, + ensemble_size=1, + batch_size=1, + match_input_resolution=True, + ) + + def test_marigold_depth_dummy_G0_S1_P32_E3_B1_M1(self): + self._test_marigold_normals( + generator_seed=0, + expected_slice=np.array([0.3826, -0.9634, -0.3835, 0.3514, 0.0691, -0.6182, 0.8709, 0.1590, -0.2181]), + num_inference_steps=1, + processing_resolution=32, + ensemble_size=3, + ensembling_kwargs={"reduction": "mean"}, + batch_size=1, + match_input_resolution=True, + ) + + def test_marigold_depth_dummy_G0_S1_P32_E4_B2_M1(self): + self._test_marigold_normals( + generator_seed=0, + expected_slice=np.array([0.2500, -0.3928, -0.2415, 0.1133, 0.2357, -0.4223, 0.9967, 0.4859, -0.1282]), + num_inference_steps=1, + processing_resolution=32, + ensemble_size=4, + ensembling_kwargs={"reduction": "mean"}, + batch_size=2, + match_input_resolution=True, + ) + + def test_marigold_depth_dummy_G0_S1_P16_E1_B1_M0(self): + self._test_marigold_normals( + generator_seed=0, + expected_slice=np.array([0.9588, 0.3326, -0.0825, -0.0994, -0.3534, -0.4302, 0.3562, 0.4421, -0.2086]), + num_inference_steps=1, + processing_resolution=16, + ensemble_size=1, + batch_size=1, + match_input_resolution=False, + ) + + def test_marigold_depth_dummy_no_num_inference_steps(self): + with self.assertRaises(ValueError) as e: + self._test_marigold_normals( + num_inference_steps=None, + expected_slice=np.array([0.0]), + ) + self.assertIn("num_inference_steps", str(e)) + + def test_marigold_depth_dummy_no_processing_resolution(self): + with self.assertRaises(ValueError) as e: + self._test_marigold_normals( + processing_resolution=None, + expected_slice=np.array([0.0]), + ) + self.assertIn("processing_resolution", str(e)) + + +@slow +@require_torch_accelerator +class MarigoldNormalsPipelineIntegrationTests(unittest.TestCase): + def setUp(self): + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def _test_marigold_normals( + self, + is_fp16: bool = True, + device: str = "cuda", + generator_seed: int = 0, + expected_slice: np.ndarray = None, + model_id: str = "prs-eth/marigold-normals-lcm-v0-1", + image_url: str = "https://marigoldmonodepth.github.io/images/einstein.jpg", + atol: float = 1e-4, + **pipe_kwargs, + ): + from_pretrained_kwargs = {} + if is_fp16: + from_pretrained_kwargs["variant"] = "fp16" + from_pretrained_kwargs["torch_dtype"] = torch.float16 + + pipe = MarigoldNormalsPipeline.from_pretrained(model_id, **from_pretrained_kwargs) + pipe.enable_model_cpu_offload(device=torch_device) + pipe.set_progress_bar_config(disable=None) + + generator = torch.Generator(device=device).manual_seed(generator_seed) + + image = load_image(image_url) + width, height = image.size + + prediction = pipe(image, generator=generator, **pipe_kwargs).prediction + + prediction_slice = prediction[0, -3:, -3:, -1].flatten() + + if pipe_kwargs.get("match_input_resolution", True): + self.assertEqual(prediction.shape, (1, height, width, 3), "Unexpected output resolution") + else: + self.assertTrue(prediction.shape[0] == 1 and prediction.shape[3] == 3, "Unexpected output dimensions") + self.assertEqual( + max(prediction.shape[1:3]), + pipe_kwargs.get("processing_resolution", 768), + "Unexpected output resolution", + ) + + self.assertTrue(np.allclose(prediction_slice, expected_slice, atol=atol)) + + def test_marigold_normals_einstein_f32_cpu_G0_S1_P32_E1_B1_M1(self): + self._test_marigold_normals( + is_fp16=False, + device=torch_device, + generator_seed=0, + expected_slice=np.array([0.8971, 0.8971, 0.8971, 0.8971, 0.8971, 0.8971, 0.8971, 0.8971, 0.8971]), + num_inference_steps=1, + processing_resolution=32, + ensemble_size=1, + batch_size=1, + match_input_resolution=True, + ) + + def test_marigold_normals_einstein_f32_cuda_G0_S1_P768_E1_B1_M1(self): + self._test_marigold_normals( + is_fp16=False, + device=torch_device, + generator_seed=0, + expected_slice=np.array([0.7980, 0.7952, 0.7914, 0.7931, 0.7871, 0.7816, 0.7844, 0.7710, 0.7601]), + num_inference_steps=1, + processing_resolution=768, + ensemble_size=1, + batch_size=1, + match_input_resolution=True, + ) + + def test_marigold_normals_einstein_f16_cuda_G0_S1_P768_E1_B1_M1(self): + self._test_marigold_normals( + is_fp16=True, + device=torch_device, + generator_seed=0, + expected_slice=np.array([0.7979, 0.7949, 0.7915, 0.7930, 0.7871, 0.7817, 0.7842, 0.7710, 0.7603]), + num_inference_steps=1, + processing_resolution=768, + ensemble_size=1, + batch_size=1, + match_input_resolution=True, + ) + + def test_marigold_normals_einstein_f16_cuda_G2024_S1_P768_E1_B1_M1(self): + self._test_marigold_normals( + is_fp16=True, + device=torch_device, + generator_seed=2024, + expected_slice=np.array([0.8428, 0.8428, 0.8433, 0.8369, 0.8325, 0.8315, 0.8271, 0.8135, 0.8057]), + num_inference_steps=1, + processing_resolution=768, + ensemble_size=1, + batch_size=1, + match_input_resolution=True, + ) + + def test_marigold_normals_einstein_f16_cuda_G0_S2_P768_E1_B1_M1(self): + self._test_marigold_normals( + is_fp16=True, + device=torch_device, + generator_seed=0, + expected_slice=np.array([0.7095, 0.7095, 0.7104, 0.7070, 0.7051, 0.7061, 0.7017, 0.6938, 0.6914]), + num_inference_steps=2, + processing_resolution=768, + ensemble_size=1, + batch_size=1, + match_input_resolution=True, + ) + + def test_marigold_normals_einstein_f16_cuda_G0_S1_P512_E1_B1_M1(self): + self._test_marigold_normals( + is_fp16=True, + device=torch_device, + generator_seed=0, + expected_slice=np.array([0.7168, 0.7163, 0.7163, 0.7080, 0.7061, 0.7046, 0.7031, 0.7007, 0.6987]), + num_inference_steps=1, + processing_resolution=512, + ensemble_size=1, + batch_size=1, + match_input_resolution=True, + ) + + def test_marigold_normals_einstein_f16_cuda_G0_S1_P768_E3_B1_M1(self): + self._test_marigold_normals( + is_fp16=True, + device=torch_device, + generator_seed=0, + expected_slice=np.array([0.7114, 0.7124, 0.7144, 0.7085, 0.7070, 0.7080, 0.7051, 0.6958, 0.6924]), + num_inference_steps=1, + processing_resolution=768, + ensemble_size=3, + ensembling_kwargs={"reduction": "mean"}, + batch_size=1, + match_input_resolution=True, + ) + + def test_marigold_normals_einstein_f16_cuda_G0_S1_P768_E4_B2_M1(self): + self._test_marigold_normals( + is_fp16=True, + device=torch_device, + generator_seed=0, + expected_slice=np.array([0.7412, 0.7441, 0.7490, 0.7383, 0.7388, 0.7437, 0.7329, 0.7271, 0.7300]), + num_inference_steps=1, + processing_resolution=768, + ensemble_size=4, + ensembling_kwargs={"reduction": "mean"}, + batch_size=2, + match_input_resolution=True, + ) + + def test_marigold_normals_einstein_f16_cuda_G0_S1_P512_E1_B1_M0(self): + self._test_marigold_normals( + is_fp16=True, + device=torch_device, + generator_seed=0, + expected_slice=np.array([0.7188, 0.7144, 0.7134, 0.7178, 0.7207, 0.7222, 0.7231, 0.7041, 0.6987]), + num_inference_steps=1, + processing_resolution=512, + ensemble_size=1, + batch_size=1, + match_input_resolution=False, + ) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/mochi/__init__.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/mochi/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/mochi/test_mochi.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/mochi/test_mochi.py new file mode 100644 index 0000000000000000000000000000000000000000..5615720a9343b649239b900349ea0e2a985ac169 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/mochi/test_mochi.py @@ -0,0 +1,306 @@ +# Copyright 2025 The HuggingFace Team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import gc +import inspect +import unittest + +import numpy as np +import torch +from transformers import AutoTokenizer, T5EncoderModel + +from diffusers import AutoencoderKLMochi, FlowMatchEulerDiscreteScheduler, MochiPipeline, MochiTransformer3DModel + +from ...testing_utils import ( + backend_empty_cache, + enable_full_determinism, + nightly, + numpy_cosine_similarity_distance, + require_big_accelerator, + require_torch_accelerator, + torch_device, +) +from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS +from ..test_pipelines_common import FasterCacheTesterMixin, FirstBlockCacheTesterMixin, PipelineTesterMixin, to_np + + +enable_full_determinism() + + +class MochiPipelineFastTests( + PipelineTesterMixin, FasterCacheTesterMixin, FirstBlockCacheTesterMixin, unittest.TestCase +): + pipeline_class = MochiPipeline + params = TEXT_TO_IMAGE_PARAMS - {"cross_attention_kwargs"} + batch_params = TEXT_TO_IMAGE_BATCH_PARAMS + image_params = TEXT_TO_IMAGE_IMAGE_PARAMS + image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS + required_optional_params = frozenset( + [ + "num_inference_steps", + "generator", + "latents", + "return_dict", + "callback_on_step_end", + "callback_on_step_end_tensor_inputs", + ] + ) + test_xformers_attention = False + test_layerwise_casting = True + test_group_offloading = True + + def get_dummy_components(self, num_layers: int = 2): + torch.manual_seed(0) + transformer = MochiTransformer3DModel( + patch_size=2, + num_attention_heads=2, + attention_head_dim=8, + num_layers=num_layers, + pooled_projection_dim=16, + in_channels=12, + out_channels=None, + qk_norm="rms_norm", + text_embed_dim=32, + time_embed_dim=4, + activation_fn="swiglu", + max_sequence_length=16, + ) + transformer.pos_frequencies.data = transformer.pos_frequencies.new_full(transformer.pos_frequencies.shape, 0) + + torch.manual_seed(0) + vae = AutoencoderKLMochi( + latent_channels=12, + out_channels=3, + encoder_block_out_channels=(32, 32, 32, 32), + decoder_block_out_channels=(32, 32, 32, 32), + layers_per_block=(1, 1, 1, 1, 1), + ) + + torch.manual_seed(0) + scheduler = FlowMatchEulerDiscreteScheduler() + text_encoder = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5") + tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") + + components = { + "transformer": transformer, + "vae": vae, + "scheduler": scheduler, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + } + return components + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + inputs = { + "prompt": "dance monkey", + "negative_prompt": "", + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 4.5, + "height": 16, + "width": 16, + # 6 * k + 1 is the recommendation + "num_frames": 7, + "max_sequence_length": 16, + "output_type": "pt", + } + return inputs + + def test_inference(self): + device = "cpu" + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + video = pipe(**inputs).frames + generated_video = video[0] + + self.assertEqual(generated_video.shape, (7, 3, 16, 16)) + expected_video = torch.randn(7, 3, 16, 16) + max_diff = np.abs(generated_video - expected_video).max() + self.assertLessEqual(max_diff, 1e10) + + def test_callback_inputs(self): + sig = inspect.signature(self.pipeline_class.__call__) + has_callback_tensor_inputs = "callback_on_step_end_tensor_inputs" in sig.parameters + has_callback_step_end = "callback_on_step_end" in sig.parameters + + if not (has_callback_tensor_inputs and has_callback_step_end): + return + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + self.assertTrue( + hasattr(pipe, "_callback_tensor_inputs"), + f" {self.pipeline_class} should have `_callback_tensor_inputs` that defines a list of tensor variables its callback function can use as inputs", + ) + + def callback_inputs_subset(pipe, i, t, callback_kwargs): + # iterate over callback args + for tensor_name, tensor_value in callback_kwargs.items(): + # check that we're only passing in allowed tensor inputs + assert tensor_name in pipe._callback_tensor_inputs + + return callback_kwargs + + def callback_inputs_all(pipe, i, t, callback_kwargs): + for tensor_name in pipe._callback_tensor_inputs: + assert tensor_name in callback_kwargs + + # iterate over callback args + for tensor_name, tensor_value in callback_kwargs.items(): + # check that we're only passing in allowed tensor inputs + assert tensor_name in pipe._callback_tensor_inputs + + return callback_kwargs + + inputs = self.get_dummy_inputs(torch_device) + + # Test passing in a subset + inputs["callback_on_step_end"] = callback_inputs_subset + inputs["callback_on_step_end_tensor_inputs"] = ["latents"] + output = pipe(**inputs)[0] + + # Test passing in a everything + inputs["callback_on_step_end"] = callback_inputs_all + inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs + output = pipe(**inputs)[0] + + def callback_inputs_change_tensor(pipe, i, t, callback_kwargs): + is_last = i == (pipe.num_timesteps - 1) + if is_last: + callback_kwargs["latents"] = torch.zeros_like(callback_kwargs["latents"]) + return callback_kwargs + + inputs["callback_on_step_end"] = callback_inputs_change_tensor + inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs + output = pipe(**inputs)[0] + assert output.abs().sum() < 1e10 + + def test_inference_batch_single_identical(self): + self._test_inference_batch_single_identical(batch_size=3, expected_max_diff=1e-3) + + def test_attention_slicing_forward_pass( + self, test_max_difference=True, test_mean_pixel_difference=True, expected_max_diff=1e-3 + ): + if not self.test_attention_slicing: + return + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + for component in pipe.components.values(): + if hasattr(component, "set_default_attn_processor"): + component.set_default_attn_processor() + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + generator_device = "cpu" + inputs = self.get_dummy_inputs(generator_device) + output_without_slicing = pipe(**inputs)[0] + + pipe.enable_attention_slicing(slice_size=1) + inputs = self.get_dummy_inputs(generator_device) + output_with_slicing1 = pipe(**inputs)[0] + + pipe.enable_attention_slicing(slice_size=2) + inputs = self.get_dummy_inputs(generator_device) + output_with_slicing2 = pipe(**inputs)[0] + + if test_max_difference: + max_diff1 = np.abs(to_np(output_with_slicing1) - to_np(output_without_slicing)).max() + max_diff2 = np.abs(to_np(output_with_slicing2) - to_np(output_without_slicing)).max() + self.assertLess( + max(max_diff1, max_diff2), + expected_max_diff, + "Attention slicing should not affect the inference results", + ) + + def test_vae_tiling(self, expected_diff_max: float = 0.2): + generator_device = "cpu" + components = self.get_dummy_components() + + pipe = self.pipeline_class(**components) + pipe.to("cpu") + pipe.set_progress_bar_config(disable=None) + + # Without tiling + inputs = self.get_dummy_inputs(generator_device) + inputs["height"] = inputs["width"] = 128 + output_without_tiling = pipe(**inputs)[0] + + # With tiling + pipe.vae.enable_tiling( + tile_sample_min_height=96, + tile_sample_min_width=96, + tile_sample_stride_height=64, + tile_sample_stride_width=64, + ) + inputs = self.get_dummy_inputs(generator_device) + inputs["height"] = inputs["width"] = 128 + output_with_tiling = pipe(**inputs)[0] + + self.assertLess( + (to_np(output_without_tiling) - to_np(output_with_tiling)).max(), + expected_diff_max, + "VAE tiling should not affect the inference results", + ) + + +@nightly +@require_torch_accelerator +@require_big_accelerator +class MochiPipelineIntegrationTests(unittest.TestCase): + prompt = "A painting of a squirrel eating a burger." + + def setUp(self): + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def test_mochi(self): + generator = torch.Generator("cpu").manual_seed(0) + + pipe = MochiPipeline.from_pretrained("genmo/mochi-1-preview", torch_dtype=torch.float16) + pipe.enable_model_cpu_offload(device=torch_device) + prompt = self.prompt + + videos = pipe( + prompt=prompt, + height=480, + width=848, + num_frames=19, + generator=generator, + num_inference_steps=2, + output_type="pt", + ).frames + + video = videos[0] + expected_video = torch.randn(1, 19, 480, 848, 3).numpy() + + max_diff = numpy_cosine_similarity_distance(video.cpu(), expected_video) + assert max_diff < 1e-3, f"Max diff is too high. got {video}" diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/omnigen/__init__.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/omnigen/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/omnigen/test_pipeline_omnigen.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/omnigen/test_pipeline_omnigen.py new file mode 100644 index 0000000000000000000000000000000000000000..28648aa76f00ca8d33a8b876cfc2372ed8e14677 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/omnigen/test_pipeline_omnigen.py @@ -0,0 +1,183 @@ +import gc +import unittest + +import numpy as np +import torch +from transformers import AutoTokenizer + +from diffusers import AutoencoderKL, FlowMatchEulerDiscreteScheduler, OmniGenPipeline, OmniGenTransformer2DModel + +from ...testing_utils import ( + Expectations, + backend_empty_cache, + numpy_cosine_similarity_distance, + require_torch_accelerator, + slow, + torch_device, +) +from ..test_pipelines_common import PipelineTesterMixin + + +class OmniGenPipelineFastTests(unittest.TestCase, PipelineTesterMixin): + pipeline_class = OmniGenPipeline + params = frozenset(["prompt", "guidance_scale"]) + batch_params = frozenset(["prompt"]) + + test_layerwise_casting = True + + def get_dummy_components(self): + torch.manual_seed(0) + + transformer = OmniGenTransformer2DModel( + hidden_size=16, + num_attention_heads=4, + num_key_value_heads=4, + intermediate_size=32, + num_layers=1, + in_channels=4, + time_step_dim=4, + rope_scaling={"long_factor": list(range(1, 3)), "short_factor": list(range(1, 3))}, + ) + + torch.manual_seed(0) + vae = AutoencoderKL( + sample_size=32, + in_channels=3, + out_channels=3, + block_out_channels=(4, 4, 4, 4), + layers_per_block=1, + latent_channels=4, + norm_num_groups=1, + up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"], + ) + + scheduler = FlowMatchEulerDiscreteScheduler(invert_sigmas=True, num_train_timesteps=1) + tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/llama-tokenizer") + + components = { + "transformer": transformer, + "vae": vae, + "scheduler": scheduler, + "tokenizer": tokenizer, + } + return components + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device="cpu").manual_seed(seed) + + inputs = { + "prompt": "A painting of a squirrel eating a burger", + "generator": generator, + "num_inference_steps": 1, + "guidance_scale": 3.0, + "output_type": "np", + "height": 16, + "width": 16, + } + return inputs + + def test_inference(self): + pipe = self.pipeline_class(**self.get_dummy_components()).to(torch_device) + + inputs = self.get_dummy_inputs(torch_device) + generated_image = pipe(**inputs).images[0] + + self.assertEqual(generated_image.shape, (16, 16, 3)) + + +@slow +@require_torch_accelerator +class OmniGenPipelineSlowTests(unittest.TestCase): + pipeline_class = OmniGenPipeline + repo_id = "shitao/OmniGen-v1-diffusers" + + def setUp(self): + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def get_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device="cpu").manual_seed(seed) + + return { + "prompt": "A photo of a cat", + "num_inference_steps": 2, + "guidance_scale": 2.5, + "output_type": "np", + "generator": generator, + } + + def test_omnigen_inference(self): + pipe = self.pipeline_class.from_pretrained(self.repo_id, torch_dtype=torch.bfloat16) + pipe.enable_model_cpu_offload() + + inputs = self.get_inputs(torch_device) + + image = pipe(**inputs).images[0] + image_slice = image[0, :10, :10] + + expected_slices = Expectations( + { + ("xpu", 3): np.array( + [ + [0.05859375, 0.05859375, 0.04492188], + [0.04882812, 0.04101562, 0.03320312], + [0.04882812, 0.04296875, 0.03125], + [0.04296875, 0.0390625, 0.03320312], + [0.04296875, 0.03710938, 0.03125], + [0.04492188, 0.0390625, 0.03320312], + [0.04296875, 0.03710938, 0.03125], + [0.04101562, 0.03710938, 0.02734375], + [0.04101562, 0.03515625, 0.02734375], + [0.04101562, 0.03515625, 0.02929688], + ], + dtype=np.float32, + ), + ("cuda", 7): np.array( + [ + [0.1783447, 0.16772744, 0.14339337], + [0.17066911, 0.15521264, 0.13757327], + [0.17072496, 0.15531206, 0.13524258], + [0.16746324, 0.1564025, 0.13794944], + [0.16490817, 0.15258026, 0.13697758], + [0.16971767, 0.15826806, 0.13928896], + [0.16782972, 0.15547255, 0.13783783], + [0.16464645, 0.15281534, 0.13522372], + [0.16535294, 0.15301755, 0.13526791], + [0.16365296, 0.15092957, 0.13443318], + ], + dtype=np.float32, + ), + ("cuda", 8): np.array( + [ + [0.0546875, 0.05664062, 0.04296875], + [0.046875, 0.04101562, 0.03320312], + [0.05078125, 0.04296875, 0.03125], + [0.04296875, 0.04101562, 0.03320312], + [0.0390625, 0.03710938, 0.02929688], + [0.04296875, 0.03710938, 0.03125], + [0.0390625, 0.03710938, 0.02929688], + [0.0390625, 0.03710938, 0.02734375], + [0.0390625, 0.03320312, 0.02734375], + [0.0390625, 0.03320312, 0.02734375], + ], + dtype=np.float32, + ), + } + ) + expected_slice = expected_slices.get_expectation() + + max_diff = numpy_cosine_similarity_distance(expected_slice.flatten(), image_slice.flatten()) + + assert max_diff < 1e-4 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/pag/__init__.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/pag/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/pag/test_pag_animatediff.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/pag/test_pag_animatediff.py new file mode 100644 index 0000000000000000000000000000000000000000..b1cbd82d76799a8080278ddddc45a1b2a618636f --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/pag/test_pag_animatediff.py @@ -0,0 +1,563 @@ +import inspect +import unittest + +import numpy as np +import torch +from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer + +from diffusers import ( + AnimateDiffPAGPipeline, + AnimateDiffPipeline, + AutoencoderKL, + DDIMScheduler, + DPMSolverMultistepScheduler, + LCMScheduler, + MotionAdapter, + StableDiffusionPipeline, + UNet2DConditionModel, + UNetMotionModel, +) +from diffusers.models.attention import FreeNoiseTransformerBlock +from diffusers.utils import is_xformers_available + +from ...testing_utils import require_accelerator, torch_device +from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS +from ..test_pipelines_common import ( + IPAdapterTesterMixin, + PipelineFromPipeTesterMixin, + PipelineTesterMixin, + SDFunctionTesterMixin, +) + + +def to_np(tensor): + if isinstance(tensor, torch.Tensor): + tensor = tensor.detach().cpu().numpy() + + return tensor + + +class AnimateDiffPAGPipelineFastTests( + IPAdapterTesterMixin, SDFunctionTesterMixin, PipelineTesterMixin, PipelineFromPipeTesterMixin, unittest.TestCase +): + pipeline_class = AnimateDiffPAGPipeline + params = TEXT_TO_IMAGE_PARAMS.union({"pag_scale", "pag_adaptive_scale"}) + batch_params = TEXT_TO_IMAGE_BATCH_PARAMS + required_optional_params = frozenset( + [ + "num_inference_steps", + "generator", + "latents", + "return_dict", + "callback_on_step_end", + "callback_on_step_end_tensor_inputs", + ] + ) + + def get_dummy_components(self): + cross_attention_dim = 8 + block_out_channels = (8, 8) + + torch.manual_seed(0) + unet = UNet2DConditionModel( + block_out_channels=block_out_channels, + layers_per_block=2, + sample_size=8, + in_channels=4, + out_channels=4, + down_block_types=("CrossAttnDownBlock2D", "DownBlock2D"), + up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), + cross_attention_dim=cross_attention_dim, + norm_num_groups=2, + ) + scheduler = DDIMScheduler( + beta_start=0.00085, + beta_end=0.012, + beta_schedule="linear", + clip_sample=False, + ) + torch.manual_seed(0) + vae = AutoencoderKL( + block_out_channels=block_out_channels, + in_channels=3, + out_channels=3, + down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], + up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], + latent_channels=4, + norm_num_groups=2, + ) + torch.manual_seed(0) + text_encoder_config = CLIPTextConfig( + bos_token_id=0, + eos_token_id=2, + hidden_size=cross_attention_dim, + intermediate_size=37, + layer_norm_eps=1e-05, + num_attention_heads=4, + num_hidden_layers=5, + pad_token_id=1, + vocab_size=1000, + ) + text_encoder = CLIPTextModel(text_encoder_config) + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + motion_adapter = MotionAdapter( + block_out_channels=block_out_channels, + motion_layers_per_block=2, + motion_norm_num_groups=2, + motion_num_attention_heads=4, + ) + + components = { + "unet": unet, + "scheduler": scheduler, + "vae": vae, + "motion_adapter": motion_adapter, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + "feature_extractor": None, + "image_encoder": None, + } + return components + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + + inputs = { + "prompt": "A painting of a squirrel eating a burger", + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 7.5, + "pag_scale": 3.0, + "output_type": "pt", + } + return inputs + + def test_from_pipe_consistent_config(self): + assert self.original_pipeline_class == StableDiffusionPipeline + original_repo = "hf-internal-testing/tinier-stable-diffusion-pipe" + original_kwargs = {"requires_safety_checker": False} + + # create original_pipeline_class(sd) + pipe_original = self.original_pipeline_class.from_pretrained(original_repo, **original_kwargs) + + # original_pipeline_class(sd) -> pipeline_class + pipe_components = self.get_dummy_components() + pipe_additional_components = {} + for name, component in pipe_components.items(): + if name not in pipe_original.components: + pipe_additional_components[name] = component + + pipe = self.pipeline_class.from_pipe(pipe_original, **pipe_additional_components) + + # pipeline_class -> original_pipeline_class(sd) + original_pipe_additional_components = {} + for name, component in pipe_original.components.items(): + if name not in pipe.components or not isinstance(component, pipe.components[name].__class__): + original_pipe_additional_components[name] = component + + pipe_original_2 = self.original_pipeline_class.from_pipe(pipe, **original_pipe_additional_components) + + # compare the config + original_config = {k: v for k, v in pipe_original.config.items() if not k.startswith("_")} + original_config_2 = {k: v for k, v in pipe_original_2.config.items() if not k.startswith("_")} + assert original_config_2 == original_config + + def test_motion_unet_loading(self): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + + assert isinstance(pipe.unet, UNetMotionModel) + + @unittest.skip("Attention slicing is not enabled in this pipeline") + def test_attention_slicing_forward_pass(self): + pass + + def test_ip_adapter(self): + expected_pipe_slice = None + + if torch_device == "cpu": + expected_pipe_slice = np.array( + [ + 0.5068, + 0.5294, + 0.4926, + 0.4810, + 0.4188, + 0.5935, + 0.5295, + 0.3947, + 0.5300, + 0.4706, + 0.3950, + 0.4737, + 0.4072, + 0.3227, + 0.5481, + 0.4864, + 0.4518, + 0.5315, + 0.5979, + 0.5374, + 0.3503, + 0.5275, + 0.6067, + 0.4914, + 0.5440, + 0.4775, + 0.5538, + ] + ) + return super().test_ip_adapter(expected_pipe_slice=expected_pipe_slice) + + def test_dict_tuple_outputs_equivalent(self): + expected_slice = None + if torch_device == "cpu": + expected_slice = np.array([0.5295, 0.3947, 0.5300, 0.4864, 0.4518, 0.5315, 0.5440, 0.4775, 0.5538]) + return super().test_dict_tuple_outputs_equivalent(expected_slice=expected_slice) + + @require_accelerator + def test_to_device(self): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.set_progress_bar_config(disable=None) + + pipe.to("cpu") + # pipeline creates a new motion UNet under the hood. So we need to check the device from pipe.components + model_devices = [ + component.device.type for component in pipe.components.values() if hasattr(component, "device") + ] + self.assertTrue(all(device == "cpu" for device in model_devices)) + + output_cpu = pipe(**self.get_dummy_inputs("cpu"))[0] + self.assertTrue(np.isnan(output_cpu).sum() == 0) + + pipe.to(torch_device) + model_devices = [ + component.device.type for component in pipe.components.values() if hasattr(component, "device") + ] + self.assertTrue(all(device == torch_device for device in model_devices)) + + output_device = pipe(**self.get_dummy_inputs(torch_device))[0] + self.assertTrue(np.isnan(to_np(output_device)).sum() == 0) + + def test_to_dtype(self): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.set_progress_bar_config(disable=None) + + # pipeline creates a new motion UNet under the hood. So we need to check the dtype from pipe.components + model_dtypes = [component.dtype for component in pipe.components.values() if hasattr(component, "dtype")] + self.assertTrue(all(dtype == torch.float32 for dtype in model_dtypes)) + + pipe.to(dtype=torch.float16) + model_dtypes = [component.dtype for component in pipe.components.values() if hasattr(component, "dtype")] + self.assertTrue(all(dtype == torch.float16 for dtype in model_dtypes)) + + def test_prompt_embeds(self): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.set_progress_bar_config(disable=None) + pipe.to(torch_device) + + inputs = self.get_dummy_inputs(torch_device) + inputs.pop("prompt") + inputs["prompt_embeds"] = torch.randn((1, 4, pipe.text_encoder.config.hidden_size), device=torch_device) + pipe(**inputs) + + def test_free_init(self): + components = self.get_dummy_components() + pipe: AnimateDiffPAGPipeline = self.pipeline_class(**components) + pipe.set_progress_bar_config(disable=None) + pipe.to(torch_device) + + inputs_normal = self.get_dummy_inputs(torch_device) + frames_normal = pipe(**inputs_normal).frames[0] + + pipe.enable_free_init( + num_iters=2, + use_fast_sampling=True, + method="butterworth", + order=4, + spatial_stop_frequency=0.25, + temporal_stop_frequency=0.25, + ) + inputs_enable_free_init = self.get_dummy_inputs(torch_device) + frames_enable_free_init = pipe(**inputs_enable_free_init).frames[0] + + pipe.disable_free_init() + inputs_disable_free_init = self.get_dummy_inputs(torch_device) + frames_disable_free_init = pipe(**inputs_disable_free_init).frames[0] + + sum_enabled = np.abs(to_np(frames_normal) - to_np(frames_enable_free_init)).sum() + max_diff_disabled = np.abs(to_np(frames_normal) - to_np(frames_disable_free_init)).max() + self.assertGreater( + sum_enabled, 1e1, "Enabling of FreeInit should lead to results different from the default pipeline results" + ) + self.assertLess( + max_diff_disabled, + 1e-3, + "Disabling of FreeInit should lead to results similar to the default pipeline results", + ) + + def test_free_init_with_schedulers(self): + components = self.get_dummy_components() + pipe: AnimateDiffPAGPipeline = self.pipeline_class(**components) + pipe.set_progress_bar_config(disable=None) + pipe.to(torch_device) + + inputs_normal = self.get_dummy_inputs(torch_device) + frames_normal = pipe(**inputs_normal).frames[0] + + schedulers_to_test = [ + DPMSolverMultistepScheduler.from_config( + components["scheduler"].config, + timestep_spacing="linspace", + beta_schedule="linear", + algorithm_type="dpmsolver++", + steps_offset=1, + clip_sample=False, + ), + LCMScheduler.from_config( + components["scheduler"].config, + timestep_spacing="linspace", + beta_schedule="linear", + steps_offset=1, + clip_sample=False, + ), + ] + components.pop("scheduler") + + for scheduler in schedulers_to_test: + components["scheduler"] = scheduler + pipe: AnimateDiffPAGPipeline = self.pipeline_class(**components) + pipe.set_progress_bar_config(disable=None) + pipe.to(torch_device) + + pipe.enable_free_init(num_iters=2, use_fast_sampling=False) + + inputs = self.get_dummy_inputs(torch_device) + frames_enable_free_init = pipe(**inputs).frames[0] + sum_enabled = np.abs(to_np(frames_normal) - to_np(frames_enable_free_init)).sum() + + self.assertGreater( + sum_enabled, + 1e1, + "Enabling of FreeInit should lead to results different from the default pipeline results", + ) + + def test_free_noise_blocks(self): + components = self.get_dummy_components() + pipe: AnimateDiffPAGPipeline = self.pipeline_class(**components) + pipe.set_progress_bar_config(disable=None) + pipe.to(torch_device) + + pipe.enable_free_noise() + for block in pipe.unet.down_blocks: + for motion_module in block.motion_modules: + for transformer_block in motion_module.transformer_blocks: + self.assertTrue( + isinstance(transformer_block, FreeNoiseTransformerBlock), + "Motion module transformer blocks must be an instance of `FreeNoiseTransformerBlock` after enabling FreeNoise.", + ) + + pipe.disable_free_noise() + for block in pipe.unet.down_blocks: + for motion_module in block.motion_modules: + for transformer_block in motion_module.transformer_blocks: + self.assertFalse( + isinstance(transformer_block, FreeNoiseTransformerBlock), + "Motion module transformer blocks must not be an instance of `FreeNoiseTransformerBlock` after disabling FreeNoise.", + ) + + def test_free_noise(self): + components = self.get_dummy_components() + pipe: AnimateDiffPAGPipeline = self.pipeline_class(**components) + pipe.set_progress_bar_config(disable=None) + pipe.to(torch_device) + + inputs_normal = self.get_dummy_inputs(torch_device) + frames_normal = pipe(**inputs_normal).frames[0] + + for context_length in [8, 9]: + for context_stride in [4, 6]: + pipe.enable_free_noise(context_length, context_stride) + + inputs_enable_free_noise = self.get_dummy_inputs(torch_device) + frames_enable_free_noise = pipe(**inputs_enable_free_noise).frames[0] + + pipe.disable_free_noise() + + inputs_disable_free_noise = self.get_dummy_inputs(torch_device) + frames_disable_free_noise = pipe(**inputs_disable_free_noise).frames[0] + + sum_enabled = np.abs(to_np(frames_normal) - to_np(frames_enable_free_noise)).sum() + max_diff_disabled = np.abs(to_np(frames_normal) - to_np(frames_disable_free_noise)).max() + self.assertGreater( + sum_enabled, + 1e1, + "Enabling of FreeNoise should lead to results different from the default pipeline results", + ) + self.assertLess( + max_diff_disabled, + 1e-4, + "Disabling of FreeNoise should lead to results similar to the default pipeline results", + ) + + @unittest.skipIf( + torch_device != "cuda" or not is_xformers_available(), + reason="XFormers attention is only available with CUDA and `xformers` installed", + ) + def test_xformers_attention_forwardGenerator_pass(self): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + for component in pipe.components.values(): + if hasattr(component, "set_default_attn_processor"): + component.set_default_attn_processor() + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(torch_device) + output_without_offload = pipe(**inputs).frames[0] + output_without_offload = ( + output_without_offload.cpu() if torch.is_tensor(output_without_offload) else output_without_offload + ) + + pipe.enable_xformers_memory_efficient_attention() + inputs = self.get_dummy_inputs(torch_device) + output_with_offload = pipe(**inputs).frames[0] + output_with_offload = ( + output_with_offload.cpu() if torch.is_tensor(output_with_offload) else output_without_offload + ) + + max_diff = np.abs(to_np(output_with_offload) - to_np(output_without_offload)).max() + self.assertLess(max_diff, 1e-4, "XFormers attention should not affect the inference results") + + def test_vae_slicing(self): + return super().test_vae_slicing(image_count=2) + + def test_pag_disable_enable(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + + # base pipeline (expect same output when pag is disabled) + components.pop("pag_applied_layers", None) + pipe_sd = AnimateDiffPipeline(**components) + pipe_sd = pipe_sd.to(device) + pipe_sd.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + del inputs["pag_scale"] + assert "pag_scale" not in inspect.signature(pipe_sd.__call__).parameters, ( + f"`pag_scale` should not be a call parameter of the base pipeline {pipe_sd.__class__.__name__}." + ) + out = pipe_sd(**inputs).frames[0, -3:, -3:, -1] + + components = self.get_dummy_components() + + # pag disabled with pag_scale=0.0 + pipe_pag = self.pipeline_class(**components) + pipe_pag = pipe_pag.to(device) + pipe_pag.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + inputs["pag_scale"] = 0.0 + out_pag_disabled = pipe_pag(**inputs).frames[0, -3:, -3:, -1] + + # pag enabled + pipe_pag = self.pipeline_class(**components) + pipe_pag = pipe_pag.to(device) + pipe_pag.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + out_pag_enabled = pipe_pag(**inputs).frames[0, -3:, -3:, -1] + + assert np.abs(out.flatten() - out_pag_disabled.flatten()).max() < 1e-3 + assert np.abs(out.flatten() - out_pag_enabled.flatten()).max() > 1e-3 + + def test_pag_applied_layers(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + + # base pipeline + components.pop("pag_applied_layers", None) + pipe = self.pipeline_class(**components) + pipe = pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + # pag_applied_layers = ["mid","up","down"] should apply to all self-attention layers + # Note that for motion modules in AnimateDiff, both attn1 and attn2 are self-attention + all_self_attn_layers = [ + k for k in pipe.unet.attn_processors.keys() if "attn1" in k or ("motion_modules" in k and "attn2" in k) + ] + original_attn_procs = pipe.unet.attn_processors + pag_layers = [ + "down", + "mid", + "up", + ] + pipe._set_pag_attn_processor(pag_applied_layers=pag_layers, do_classifier_free_guidance=False) + assert set(pipe.pag_attn_processors) == set(all_self_attn_layers) + + # pag_applied_layers = ["mid"], or ["mid_block.0"] should apply to all self-attention layers in mid_block, i.e. + # mid_block.motion_modules.0.transformer_blocks.0.attn1.processor + # mid_block.attentions.0.transformer_blocks.0.attn1.processor + all_self_attn_mid_layers = [ + "mid_block.attentions.0.transformer_blocks.0.attn1.processor", + "mid_block.motion_modules.0.transformer_blocks.0.attn1.processor", + "mid_block.motion_modules.0.transformer_blocks.0.attn2.processor", + ] + pipe.unet.set_attn_processor(original_attn_procs.copy()) + pag_layers = ["mid"] + pipe._set_pag_attn_processor(pag_applied_layers=pag_layers, do_classifier_free_guidance=False) + assert set(pipe.pag_attn_processors) == set(all_self_attn_mid_layers) + + pipe.unet.set_attn_processor(original_attn_procs.copy()) + pag_layers = ["mid_block"] + pipe._set_pag_attn_processor(pag_applied_layers=pag_layers, do_classifier_free_guidance=False) + assert set(pipe.pag_attn_processors) == set(all_self_attn_mid_layers) + + pipe.unet.set_attn_processor(original_attn_procs.copy()) + pag_layers = ["mid_block.(attentions|motion_modules)"] + pipe._set_pag_attn_processor(pag_applied_layers=pag_layers, do_classifier_free_guidance=False) + assert set(pipe.pag_attn_processors) == set(all_self_attn_mid_layers) + + pipe.unet.set_attn_processor(original_attn_procs.copy()) + pag_layers = ["mid_block.attentions.1"] + with self.assertRaises(ValueError): + pipe._set_pag_attn_processor(pag_applied_layers=pag_layers, do_classifier_free_guidance=False) + + # pag_applied_layers = "down" should apply to all self-attention layers in down_blocks + # down_blocks.1.(attentions|motion_modules).0.transformer_blocks.0.attn1.processor + # down_blocks.1.(attentions|motion_modules).0.transformer_blocks.1.attn1.processor + # down_blocks.1.(attentions|motion_modules).0.transformer_blocks.0.attn1.processor + + pipe.unet.set_attn_processor(original_attn_procs.copy()) + pag_layers = ["down"] + pipe._set_pag_attn_processor(pag_applied_layers=pag_layers, do_classifier_free_guidance=False) + assert len(pipe.pag_attn_processors) == 10 + + pipe.unet.set_attn_processor(original_attn_procs.copy()) + pag_layers = ["down_blocks.0"] + pipe._set_pag_attn_processor(pag_applied_layers=pag_layers, do_classifier_free_guidance=False) + assert (len(pipe.pag_attn_processors)) == 6 + + pipe.unet.set_attn_processor(original_attn_procs.copy()) + pag_layers = ["blocks.1"] + pipe._set_pag_attn_processor(pag_applied_layers=pag_layers, do_classifier_free_guidance=False) + assert len(pipe.pag_attn_processors) == 10 + + pipe.unet.set_attn_processor(original_attn_procs.copy()) + pag_layers = ["motion_modules.42"] + with self.assertRaises(ValueError): + pipe._set_pag_attn_processor(pag_applied_layers=pag_layers, do_classifier_free_guidance=False) + + def test_encode_prompt_works_in_isolation(self): + extra_required_param_value_dict = { + "device": torch.device(torch_device).type, + "num_images_per_prompt": 1, + "do_classifier_free_guidance": self.get_dummy_inputs(device=torch_device).get("guidance_scale", 1.0) > 1.0, + } + return super().test_encode_prompt_works_in_isolation(extra_required_param_value_dict) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/pag/test_pag_controlnet_sd.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/pag/test_pag_controlnet_sd.py new file mode 100644 index 0000000000000000000000000000000000000000..36d5ae100a58e8bf505fdf800bdd32a947f28057 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/pag/test_pag_controlnet_sd.py @@ -0,0 +1,253 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +import unittest + +import numpy as np +import torch +from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer + +from diffusers import ( + AutoencoderKL, + ControlNetModel, + DDIMScheduler, + StableDiffusionControlNetPAGPipeline, + StableDiffusionControlNetPipeline, + UNet2DConditionModel, +) +from diffusers.utils.torch_utils import randn_tensor + +from ...testing_utils import enable_full_determinism, torch_device +from ..pipeline_params import ( + TEXT_TO_IMAGE_BATCH_PARAMS, + TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS, + TEXT_TO_IMAGE_IMAGE_PARAMS, + TEXT_TO_IMAGE_PARAMS, +) +from ..test_pipelines_common import ( + IPAdapterTesterMixin, + PipelineFromPipeTesterMixin, + PipelineLatentTesterMixin, + PipelineTesterMixin, +) + + +enable_full_determinism() + + +class StableDiffusionControlNetPAGPipelineFastTests( + PipelineTesterMixin, + IPAdapterTesterMixin, + PipelineLatentTesterMixin, + PipelineFromPipeTesterMixin, + unittest.TestCase, +): + pipeline_class = StableDiffusionControlNetPAGPipeline + params = TEXT_TO_IMAGE_PARAMS.union({"pag_scale", "pag_adaptive_scale"}) + batch_params = TEXT_TO_IMAGE_BATCH_PARAMS + image_params = TEXT_TO_IMAGE_IMAGE_PARAMS + image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS + callback_cfg_params = TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS.union({"add_text_embeds", "add_time_ids"}) + + def get_dummy_components(self, time_cond_proj_dim=None): + # Copied from tests.pipelines.controlnet.test_controlnet_sdxl.StableDiffusionXLControlNetPipelineFastTests.get_dummy_components + torch.manual_seed(0) + unet = UNet2DConditionModel( + block_out_channels=(4, 8), + layers_per_block=2, + sample_size=32, + in_channels=4, + out_channels=4, + down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), + up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), + cross_attention_dim=8, + time_cond_proj_dim=time_cond_proj_dim, + norm_num_groups=2, + ) + torch.manual_seed(0) + controlnet = ControlNetModel( + block_out_channels=(4, 8), + layers_per_block=2, + in_channels=4, + down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), + conditioning_embedding_out_channels=(2, 4), + cross_attention_dim=8, + norm_num_groups=2, + ) + torch.manual_seed(0) + scheduler = DDIMScheduler( + beta_start=0.00085, + beta_end=0.012, + beta_schedule="scaled_linear", + clip_sample=False, + set_alpha_to_one=False, + ) + torch.manual_seed(0) + vae = AutoencoderKL( + block_out_channels=[4, 8], + in_channels=3, + out_channels=3, + down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], + up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], + latent_channels=4, + norm_num_groups=2, + ) + torch.manual_seed(0) + text_encoder_config = CLIPTextConfig( + bos_token_id=0, + eos_token_id=2, + hidden_size=8, + intermediate_size=16, + layer_norm_eps=1e-05, + num_attention_heads=2, + num_hidden_layers=2, + pad_token_id=1, + vocab_size=1000, + ) + text_encoder = CLIPTextModel(text_encoder_config) + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + + components = { + "unet": unet, + "controlnet": controlnet, + "scheduler": scheduler, + "vae": vae, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + "safety_checker": None, + "feature_extractor": None, + "image_encoder": None, + } + return components + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + + controlnet_embedder_scale_factor = 2 + image = randn_tensor( + (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), + generator=generator, + device=torch.device(device), + ) + + inputs = { + "prompt": "A painting of a squirrel eating a burger", + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 6.0, + "pag_scale": 3.0, + "output_type": "np", + "image": image, + } + + return inputs + + def test_pag_disable_enable(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + + # base pipeline (expect same output when pag is disabled) + pipe_sd = StableDiffusionControlNetPipeline(**components) + pipe_sd = pipe_sd.to(device) + pipe_sd.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + del inputs["pag_scale"] + assert "pag_scale" not in inspect.signature(pipe_sd.__call__).parameters, ( + f"`pag_scale` should not be a call parameter of the base pipeline {pipe_sd.__class__.__name__}." + ) + out = pipe_sd(**inputs).images[0, -3:, -3:, -1] + + # pag disabled with pag_scale=0.0 + pipe_pag = self.pipeline_class(**components) + pipe_pag = pipe_pag.to(device) + pipe_pag.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + inputs["pag_scale"] = 0.0 + out_pag_disabled = pipe_pag(**inputs).images[0, -3:, -3:, -1] + + # pag enabled + pipe_pag = self.pipeline_class(**components, pag_applied_layers=["mid", "up", "down"]) + pipe_pag = pipe_pag.to(device) + pipe_pag.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + out_pag_enabled = pipe_pag(**inputs).images[0, -3:, -3:, -1] + + assert np.abs(out.flatten() - out_pag_disabled.flatten()).max() < 1e-3 + assert np.abs(out.flatten() - out_pag_enabled.flatten()).max() > 1e-3 + + def test_pag_cfg(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + + pipe_pag = self.pipeline_class(**components, pag_applied_layers=["mid", "up", "down"]) + pipe_pag = pipe_pag.to(device) + pipe_pag.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + image = pipe_pag(**inputs).images + image_slice = image[0, -3:, -3:, -1] + + assert image.shape == ( + 1, + 64, + 64, + 3, + ), f"the shape of the output image should be (1, 64, 64, 3) but got {image.shape}" + expected_slice = np.array( + [0.45505235, 0.2785938, 0.16334778, 0.79689944, 0.53095645, 0.40135607, 0.7052706, 0.69065094, 0.41548574] + ) + + max_diff = np.abs(image_slice.flatten() - expected_slice).max() + assert max_diff < 1e-3, f"output is different from expected, {image_slice.flatten()}" + + def test_pag_uncond(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + + pipe_pag = self.pipeline_class(**components, pag_applied_layers=["mid", "up", "down"]) + pipe_pag = pipe_pag.to(device) + pipe_pag.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + inputs["guidance_scale"] = 0.0 + image = pipe_pag(**inputs).images + image_slice = image[0, -3:, -3:, -1] + + assert image.shape == ( + 1, + 64, + 64, + 3, + ), f"the shape of the output image should be (1, 64, 64, 3) but got {image.shape}" + expected_slice = np.array( + [0.45127502, 0.2797252, 0.15970308, 0.7993157, 0.5414344, 0.40160775, 0.7114598, 0.69803864, 0.4217583] + ) + + max_diff = np.abs(image_slice.flatten() - expected_slice).max() + assert max_diff < 1e-3, f"output is different from expected, {image_slice.flatten()}" + + def test_encode_prompt_works_in_isolation(self): + extra_required_param_value_dict = { + "device": torch.device(torch_device).type, + "do_classifier_free_guidance": self.get_dummy_inputs(device=torch_device).get("guidance_scale", 1.0) > 1.0, + } + return super().test_encode_prompt_works_in_isolation(extra_required_param_value_dict) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/pag/test_pag_controlnet_sd_inpaint.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/pag/test_pag_controlnet_sd_inpaint.py new file mode 100644 index 0000000000000000000000000000000000000000..948381f9769ef505cdd17f0ce38e8655e123c7b3 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/pag/test_pag_controlnet_sd_inpaint.py @@ -0,0 +1,249 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This model implementation is heavily based on: + +import inspect +import random +import unittest + +import numpy as np +import torch +from PIL import Image +from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer + +from diffusers import ( + AutoencoderKL, + ControlNetModel, + DDIMScheduler, + StableDiffusionControlNetInpaintPipeline, + StableDiffusionControlNetPAGInpaintPipeline, + UNet2DConditionModel, +) +from diffusers.utils.torch_utils import randn_tensor + +from ...testing_utils import enable_full_determinism, floats_tensor, torch_device +from ..pipeline_params import ( + TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, + TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, + TEXT_TO_IMAGE_IMAGE_PARAMS, +) +from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin + + +enable_full_determinism() + + +class StableDiffusionControlNetPAGInpaintPipelineFastTests( + PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase +): + pipeline_class = StableDiffusionControlNetPAGInpaintPipeline + params = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS + batch_params = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS + image_params = frozenset({"control_image"}) # skip `image` and `mask` for now, only test for control_image + image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS + + def get_dummy_components(self): + # Copied from tests.pipelines.controlnet.test_controlnet_inpaint.ControlNetInpaintPipelineFastTests.get_dummy_components + torch.manual_seed(0) + unet = UNet2DConditionModel( + block_out_channels=(32, 64), + layers_per_block=2, + sample_size=32, + in_channels=9, + out_channels=4, + down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), + up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), + cross_attention_dim=32, + ) + torch.manual_seed(0) + controlnet = ControlNetModel( + block_out_channels=(32, 64), + layers_per_block=2, + in_channels=4, + down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), + cross_attention_dim=32, + conditioning_embedding_out_channels=(16, 32), + ) + torch.manual_seed(0) + scheduler = DDIMScheduler( + beta_start=0.00085, + beta_end=0.012, + beta_schedule="scaled_linear", + clip_sample=False, + set_alpha_to_one=False, + ) + torch.manual_seed(0) + vae = AutoencoderKL( + block_out_channels=[32, 64], + in_channels=3, + out_channels=3, + down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], + up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], + latent_channels=4, + ) + torch.manual_seed(0) + text_encoder_config = CLIPTextConfig( + bos_token_id=0, + eos_token_id=2, + hidden_size=32, + intermediate_size=37, + layer_norm_eps=1e-05, + num_attention_heads=4, + num_hidden_layers=5, + pad_token_id=1, + vocab_size=1000, + ) + text_encoder = CLIPTextModel(text_encoder_config) + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + + components = { + "unet": unet, + "controlnet": controlnet, + "scheduler": scheduler, + "vae": vae, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + "safety_checker": None, + "feature_extractor": None, + "image_encoder": None, + } + return components + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + + controlnet_embedder_scale_factor = 2 + control_image = randn_tensor( + (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), + generator=generator, + device=torch.device(device), + ) + init_image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) + init_image = init_image.cpu().permute(0, 2, 3, 1)[0] + + image = Image.fromarray(np.uint8(init_image)).convert("RGB").resize((64, 64)) + mask_image = Image.fromarray(np.uint8(init_image + 4)).convert("RGB").resize((64, 64)) + + inputs = { + "prompt": "A painting of a squirrel eating a burger", + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 6.0, + "pag_scale": 3.0, + "output_type": "np", + "image": image, + "mask_image": mask_image, + "control_image": control_image, + } + + return inputs + + def test_pag_disable_enable(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + + # base pipeline (expect same output when pag is disabled) + pipe_sd = StableDiffusionControlNetInpaintPipeline(**components) + pipe_sd = pipe_sd.to(device) + pipe_sd.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + del inputs["pag_scale"] + assert "pag_scale" not in inspect.signature(pipe_sd.__call__).parameters, ( + f"`pag_scale` should not be a call parameter of the base pipeline {pipe_sd.__calss__.__name__}." + ) + out = pipe_sd(**inputs).images[0, -3:, -3:, -1] + + # pag disabled with pag_scale=0.0 + pipe_pag = self.pipeline_class(**components) + pipe_pag = pipe_pag.to(device) + pipe_pag.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + inputs["pag_scale"] = 0.0 + out_pag_disabled = pipe_pag(**inputs).images[0, -3:, -3:, -1] + + # pag enabled + pipe_pag = self.pipeline_class(**components, pag_applied_layers=["mid", "up", "down"]) + pipe_pag = pipe_pag.to(device) + pipe_pag.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + out_pag_enabled = pipe_pag(**inputs).images[0, -3:, -3:, -1] + + assert np.abs(out.flatten() - out_pag_disabled.flatten()).max() < 1e-3 + assert np.abs(out.flatten() - out_pag_enabled.flatten()).max() > 1e-3 + + def test_pag_cfg(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + + pipe_pag = self.pipeline_class(**components, pag_applied_layers=["mid", "up", "down"]) + pipe_pag = pipe_pag.to(device) + pipe_pag.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + image = pipe_pag(**inputs).images + image_slice = image[0, -3:, -3:, -1] + + assert image.shape == ( + 1, + 64, + 64, + 3, + ), f"the shape of the output image should be (1, 64, 64, 3) but got {image.shape}" + expected_slice = np.array( + [0.7488756, 0.61194265, 0.53382546, 0.5993959, 0.6193306, 0.56880975, 0.41277143, 0.5050145, 0.49376273] + ) + + max_diff = np.abs(image_slice.flatten() - expected_slice).max() + assert max_diff < 1e-3, f"output is different from expected, {image_slice.flatten()}" + + def test_pag_uncond(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + + pipe_pag = self.pipeline_class(**components, pag_applied_layers=["mid", "up", "down"]) + pipe_pag = pipe_pag.to(device) + pipe_pag.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + inputs["guidance_scale"] = 0.0 + image = pipe_pag(**inputs).images + image_slice = image[0, -3:, -3:, -1] + + assert image.shape == ( + 1, + 64, + 64, + 3, + ), f"the shape of the output image should be (1, 64, 64, 3) but got {image.shape}" + expected_slice = np.array( + [0.7410303, 0.5989337, 0.530866, 0.60571927, 0.6162597, 0.5719856, 0.4187478, 0.5101238, 0.4978468] + ) + + max_diff = np.abs(image_slice.flatten() - expected_slice).max() + assert max_diff < 1e-3, f"output is different from expected, {image_slice.flatten()}" + + def test_encode_prompt_works_in_isolation(self): + extra_required_param_value_dict = { + "device": torch.device(torch_device).type, + "do_classifier_free_guidance": self.get_dummy_inputs(device=torch_device).get("guidance_scale", 1.0) > 1.0, + } + return super().test_encode_prompt_works_in_isolation(extra_required_param_value_dict) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/pag/test_pag_controlnet_sdxl.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/pag/test_pag_controlnet_sdxl.py new file mode 100644 index 0000000000000000000000000000000000000000..51b00f6932bcce483aab4347d06a9affae7d1555 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/pag/test_pag_controlnet_sdxl.py @@ -0,0 +1,264 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +import unittest + +import numpy as np +import torch +from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer + +from diffusers import ( + AutoencoderKL, + ControlNetModel, + EulerDiscreteScheduler, + StableDiffusionXLControlNetPAGPipeline, + StableDiffusionXLControlNetPipeline, + UNet2DConditionModel, +) +from diffusers.utils.torch_utils import randn_tensor + +from ...testing_utils import enable_full_determinism +from ..pipeline_params import ( + TEXT_TO_IMAGE_BATCH_PARAMS, + TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS, + TEXT_TO_IMAGE_IMAGE_PARAMS, + TEXT_TO_IMAGE_PARAMS, +) +from ..test_pipelines_common import ( + IPAdapterTesterMixin, + PipelineFromPipeTesterMixin, + PipelineLatentTesterMixin, + PipelineTesterMixin, +) + + +enable_full_determinism() + + +class StableDiffusionXLControlNetPAGPipelineFastTests( + PipelineTesterMixin, + IPAdapterTesterMixin, + PipelineLatentTesterMixin, + PipelineFromPipeTesterMixin, + unittest.TestCase, +): + pipeline_class = StableDiffusionXLControlNetPAGPipeline + params = TEXT_TO_IMAGE_PARAMS.union({"pag_scale", "pag_adaptive_scale"}) + batch_params = TEXT_TO_IMAGE_BATCH_PARAMS + image_params = TEXT_TO_IMAGE_IMAGE_PARAMS + image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS + callback_cfg_params = TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS.union({"add_text_embeds", "add_time_ids"}) + + def get_dummy_components(self, time_cond_proj_dim=None): + # Copied from tests.pipelines.controlnet.test_controlnet_sdxl.StableDiffusionXLControlNetPipelineFastTests.get_dummy_components + torch.manual_seed(0) + unet = UNet2DConditionModel( + block_out_channels=(32, 64), + layers_per_block=2, + sample_size=32, + in_channels=4, + out_channels=4, + down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), + up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), + # SD2-specific config below + attention_head_dim=(2, 4), + use_linear_projection=True, + addition_embed_type="text_time", + addition_time_embed_dim=8, + transformer_layers_per_block=(1, 2), + projection_class_embeddings_input_dim=80, # 6 * 8 + 32 + cross_attention_dim=64, + time_cond_proj_dim=time_cond_proj_dim, + ) + torch.manual_seed(0) + controlnet = ControlNetModel( + block_out_channels=(32, 64), + layers_per_block=2, + in_channels=4, + down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), + conditioning_embedding_out_channels=(16, 32), + # SD2-specific config below + attention_head_dim=(2, 4), + use_linear_projection=True, + addition_embed_type="text_time", + addition_time_embed_dim=8, + transformer_layers_per_block=(1, 2), + projection_class_embeddings_input_dim=80, # 6 * 8 + 32 + cross_attention_dim=64, + ) + torch.manual_seed(0) + scheduler = EulerDiscreteScheduler( + beta_start=0.00085, + beta_end=0.012, + steps_offset=1, + beta_schedule="scaled_linear", + timestep_spacing="leading", + ) + torch.manual_seed(0) + vae = AutoencoderKL( + block_out_channels=[32, 64], + in_channels=3, + out_channels=3, + down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], + up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], + latent_channels=4, + ) + torch.manual_seed(0) + text_encoder_config = CLIPTextConfig( + bos_token_id=0, + eos_token_id=2, + hidden_size=32, + intermediate_size=37, + layer_norm_eps=1e-05, + num_attention_heads=4, + num_hidden_layers=5, + pad_token_id=1, + vocab_size=1000, + # SD2-specific config below + hidden_act="gelu", + projection_dim=32, + ) + text_encoder = CLIPTextModel(text_encoder_config) + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + + text_encoder_2 = CLIPTextModelWithProjection(text_encoder_config) + tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + + components = { + "unet": unet, + "controlnet": controlnet, + "scheduler": scheduler, + "vae": vae, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + "text_encoder_2": text_encoder_2, + "tokenizer_2": tokenizer_2, + "feature_extractor": None, + "image_encoder": None, + } + return components + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + + controlnet_embedder_scale_factor = 2 + image = randn_tensor( + (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), + generator=generator, + device=torch.device(device), + ) + + inputs = { + "prompt": "A painting of a squirrel eating a burger", + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 6.0, + "pag_scale": 3.0, + "output_type": "np", + "image": image, + } + + return inputs + + def test_pag_disable_enable(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + + # base pipeline (expect same output when pag is disabled) + pipe_sd = StableDiffusionXLControlNetPipeline(**components) + pipe_sd = pipe_sd.to(device) + pipe_sd.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + del inputs["pag_scale"] + assert "pag_scale" not in inspect.signature(pipe_sd.__call__).parameters, ( + f"`pag_scale` should not be a call parameter of the base pipeline {pipe_sd.__class__.__name__}." + ) + out = pipe_sd(**inputs).images[0, -3:, -3:, -1] + + # pag disabled with pag_scale=0.0 + pipe_pag = self.pipeline_class(**components) + pipe_pag = pipe_pag.to(device) + pipe_pag.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + inputs["pag_scale"] = 0.0 + out_pag_disabled = pipe_pag(**inputs).images[0, -3:, -3:, -1] + + # pag enabled + pipe_pag = self.pipeline_class(**components, pag_applied_layers=["mid", "up", "down"]) + pipe_pag = pipe_pag.to(device) + pipe_pag.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + out_pag_enabled = pipe_pag(**inputs).images[0, -3:, -3:, -1] + + assert np.abs(out.flatten() - out_pag_disabled.flatten()).max() < 1e-3 + assert np.abs(out.flatten() - out_pag_enabled.flatten()).max() > 1e-3 + + def test_pag_cfg(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + + pipe_pag = self.pipeline_class(**components, pag_applied_layers=["mid", "up", "down"]) + pipe_pag = pipe_pag.to(device) + pipe_pag.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + image = pipe_pag(**inputs).images + image_slice = image[0, -3:, -3:, -1] + + assert image.shape == ( + 1, + 64, + 64, + 3, + ), f"the shape of the output image should be (1, 64, 64, 3) but got {image.shape}" + expected_slice = np.array([0.7036, 0.5613, 0.5526, 0.6129, 0.5610, 0.5842, 0.4228, 0.4612, 0.5017]) + + max_diff = np.abs(image_slice.flatten() - expected_slice).max() + assert max_diff < 1e-3, f"output is different from expected, {image_slice.flatten()}" + + def test_pag_uncond(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + + pipe_pag = self.pipeline_class(**components, pag_applied_layers=["mid", "up", "down"]) + pipe_pag = pipe_pag.to(device) + pipe_pag.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + inputs["guidance_scale"] = 0.0 + image = pipe_pag(**inputs).images + image_slice = image[0, -3:, -3:, -1] + + assert image.shape == ( + 1, + 64, + 64, + 3, + ), f"the shape of the output image should be (1, 64, 64, 3) but got {image.shape}" + expected_slice = np.array([0.6888, 0.5398, 0.5603, 0.6086, 0.5541, 0.5957, 0.4332, 0.4643, 0.5154]) + + max_diff = np.abs(image_slice.flatten() - expected_slice).max() + assert max_diff < 1e-3, f"output is different from expected, {image_slice.flatten()}" + + @unittest.skip("We test this functionality elsewhere already.") + def test_save_load_optional_components(self): + pass diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/pag/test_pag_controlnet_sdxl_img2img.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/pag/test_pag_controlnet_sdxl_img2img.py new file mode 100644 index 0000000000000000000000000000000000000000..3c1088adbcf2cb87e1bc80bee87a23c92f83fa11 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/pag/test_pag_controlnet_sdxl_img2img.py @@ -0,0 +1,269 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +import random +import unittest + +import numpy as np +import torch +from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer + +from diffusers import ( + AutoencoderKL, + ControlNetModel, + EulerDiscreteScheduler, + StableDiffusionXLControlNetImg2ImgPipeline, + StableDiffusionXLControlNetPAGImg2ImgPipeline, + UNet2DConditionModel, +) + +from ...testing_utils import enable_full_determinism, floats_tensor +from ..pipeline_params import ( + IMAGE_TO_IMAGE_IMAGE_PARAMS, + TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, + TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS, +) +from ..test_pipelines_common import ( + IPAdapterTesterMixin, + PipelineFromPipeTesterMixin, + PipelineLatentTesterMixin, + PipelineTesterMixin, +) + + +enable_full_determinism() + + +class StableDiffusionXLControlNetPAGImg2ImgPipelineFastTests( + IPAdapterTesterMixin, + PipelineLatentTesterMixin, + PipelineTesterMixin, + PipelineFromPipeTesterMixin, + unittest.TestCase, +): + pipeline_class = StableDiffusionXLControlNetPAGImg2ImgPipeline + params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"pag_scale", "pag_adaptive_scale"}) + batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS + image_params = IMAGE_TO_IMAGE_IMAGE_PARAMS + image_latents_params = IMAGE_TO_IMAGE_IMAGE_PARAMS + callback_cfg_params = TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS.union( + {"add_text_embeds", "add_time_ids", "add_neg_time_ids"} + ) + + # Copied from tests.pipelines.controlnet.test_controlnet_sdxl_img2img.ControlNetPipelineSDXLImg2ImgFastTests.get_dummy_components + def get_dummy_components(self, skip_first_text_encoder=False): + torch.manual_seed(0) + unet = UNet2DConditionModel( + block_out_channels=(32, 64), + layers_per_block=2, + sample_size=32, + in_channels=4, + out_channels=4, + down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), + up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), + # SD2-specific config below + attention_head_dim=(2, 4), + use_linear_projection=True, + addition_embed_type="text_time", + addition_time_embed_dim=8, + transformer_layers_per_block=(1, 2), + projection_class_embeddings_input_dim=80, # 6 * 8 + 32 + cross_attention_dim=64 if not skip_first_text_encoder else 32, + ) + torch.manual_seed(0) + controlnet = ControlNetModel( + block_out_channels=(32, 64), + layers_per_block=2, + in_channels=4, + down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), + conditioning_embedding_out_channels=(16, 32), + # SD2-specific config below + attention_head_dim=(2, 4), + use_linear_projection=True, + addition_embed_type="text_time", + addition_time_embed_dim=8, + transformer_layers_per_block=(1, 2), + projection_class_embeddings_input_dim=80, # 6 * 8 + 32 + cross_attention_dim=64, + ) + torch.manual_seed(0) + scheduler = EulerDiscreteScheduler( + beta_start=0.00085, + beta_end=0.012, + steps_offset=1, + beta_schedule="scaled_linear", + timestep_spacing="leading", + ) + torch.manual_seed(0) + vae = AutoencoderKL( + block_out_channels=[32, 64], + in_channels=3, + out_channels=3, + down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], + up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], + latent_channels=4, + ) + torch.manual_seed(0) + text_encoder_config = CLIPTextConfig( + bos_token_id=0, + eos_token_id=2, + hidden_size=32, + intermediate_size=37, + layer_norm_eps=1e-05, + num_attention_heads=4, + num_hidden_layers=5, + pad_token_id=1, + vocab_size=1000, + # SD2-specific config below + hidden_act="gelu", + projection_dim=32, + ) + text_encoder = CLIPTextModel(text_encoder_config) + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + + text_encoder_2 = CLIPTextModelWithProjection(text_encoder_config) + tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + + components = { + "unet": unet, + "controlnet": controlnet, + "scheduler": scheduler, + "vae": vae, + "text_encoder": text_encoder if not skip_first_text_encoder else None, + "tokenizer": tokenizer if not skip_first_text_encoder else None, + "text_encoder_2": text_encoder_2, + "tokenizer_2": tokenizer_2, + "image_encoder": None, + "feature_extractor": None, + } + return components + + # based on tests.pipelines.controlnet.test_controlnet_sdxl_img2img.ControlNetPipelineSDXLImg2ImgFastTests.get_dummy_inputs + # add `pag_scale` to the inputs + def get_dummy_inputs(self, device, seed=0): + controlnet_embedder_scale_factor = 2 + image = floats_tensor( + (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), + rng=random.Random(seed), + ).to(device) + + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + + inputs = { + "prompt": "A painting of a squirrel eating a burger", + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 6.0, + "pag_scale": 3.0, + "output_type": "np", + "image": image, + "control_image": image, + } + + return inputs + + def test_pag_disable_enable(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + + # base pipeline + pipe_sd = StableDiffusionXLControlNetImg2ImgPipeline(**components) + pipe_sd = pipe_sd.to(device) + pipe_sd.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + del inputs["pag_scale"] + assert "pag_scale" not in inspect.signature(pipe_sd.__call__).parameters, ( + f"`pag_scale` should not be a call parameter of the base pipeline {pipe_sd.__class__.__name__}." + ) + out = pipe_sd(**inputs).images[0, -3:, -3:, -1] + + # pag disabled with pag_scale=0.0 + pipe_pag = self.pipeline_class(**components) + pipe_pag = pipe_pag.to(device) + pipe_pag.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + inputs["pag_scale"] = 0.0 + out_pag_disabled = pipe_pag(**inputs).images[0, -3:, -3:, -1] + + # pag enable + pipe_pag = self.pipeline_class(**components, pag_applied_layers=["mid", "up", "down"]) + pipe_pag = pipe_pag.to(device) + pipe_pag.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + out_pag_enabled = pipe_pag(**inputs).images[0, -3:, -3:, -1] + + assert np.abs(out.flatten() - out_pag_disabled.flatten()).max() < 1e-3 + assert np.abs(out.flatten() - out_pag_enabled.flatten()).max() > 1e-3 + + def test_save_load_optional_components(self): + pass + + def test_pag_cfg(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + + pipe_pag = self.pipeline_class(**components, pag_applied_layers=["mid", "up", "down"]) + pipe_pag = pipe_pag.to(device) + pipe_pag.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + image = pipe_pag(**inputs).images + image_slice = image[0, -3:, -3:, -1] + + assert image.shape == ( + 1, + 64, + 64, + 3, + ), f"the shape of the output image should be (1, 64, 64, 3) but got {image.shape}" + expected_slice = np.array( + [0.5562928, 0.44882968, 0.4588066, 0.63200223, 0.5694165, 0.4955688, 0.6126959, 0.57588536, 0.43827885] + ) + + max_diff = np.abs(image_slice.flatten() - expected_slice).max() + assert max_diff < 1e-3, f"output is different from expected, {image_slice.flatten()}" + + def test_pag_uncond(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + + pipe_pag = self.pipeline_class(**components, pag_applied_layers=["mid", "up", "down"]) + pipe_pag = pipe_pag.to(device) + pipe_pag.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + inputs["guidance_scale"] = 0.0 + image = pipe_pag(**inputs).images + image_slice = image[0, -3:, -3:, -1] + + assert image.shape == ( + 1, + 64, + 64, + 3, + ), f"the shape of the output image should be (1, 64, 64, 3) but got {image.shape}" + expected_slice = np.array( + [0.5543988, 0.45614323, 0.4665692, 0.6202247, 0.5598917, 0.49621183, 0.6084159, 0.5722314, 0.43945464] + ) + + max_diff = np.abs(image_slice.flatten() - expected_slice).max() + assert max_diff < 1e-3, f"output is different from expected, {image_slice.flatten()}" diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/pag/test_pag_hunyuan_dit.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/pag/test_pag_hunyuan_dit.py new file mode 100644 index 0000000000000000000000000000000000000000..f268a614f85caf1fb58d684c86d1cb3a370cf064 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/pag/test_pag_hunyuan_dit.py @@ -0,0 +1,363 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +import tempfile +import unittest + +import numpy as np +import torch +from transformers import AutoTokenizer, BertModel, T5EncoderModel + +from diffusers import ( + AutoencoderKL, + DDPMScheduler, + HunyuanDiT2DModel, + HunyuanDiTPAGPipeline, + HunyuanDiTPipeline, +) + +from ...testing_utils import enable_full_determinism, torch_device +from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS +from ..test_pipelines_common import PipelineTesterMixin, to_np + + +enable_full_determinism() + + +class HunyuanDiTPAGPipelineFastTests(PipelineTesterMixin, unittest.TestCase): + pipeline_class = HunyuanDiTPAGPipeline + params = TEXT_TO_IMAGE_PARAMS - {"cross_attention_kwargs"} + batch_params = TEXT_TO_IMAGE_BATCH_PARAMS + image_params = TEXT_TO_IMAGE_IMAGE_PARAMS + image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS + + required_optional_params = PipelineTesterMixin.required_optional_params + + def get_dummy_components(self): + torch.manual_seed(0) + transformer = HunyuanDiT2DModel( + sample_size=16, + num_layers=2, + patch_size=2, + attention_head_dim=8, + num_attention_heads=3, + in_channels=4, + cross_attention_dim=32, + cross_attention_dim_t5=32, + pooled_projection_dim=16, + hidden_size=24, + activation_fn="gelu-approximate", + ) + torch.manual_seed(0) + vae = AutoencoderKL() + + scheduler = DDPMScheduler() + text_encoder = BertModel.from_pretrained("hf-internal-testing/tiny-random-BertModel") + tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-BertModel") + text_encoder_2 = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5") + tokenizer_2 = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") + + components = { + "transformer": transformer.eval(), + "vae": vae.eval(), + "scheduler": scheduler, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + "text_encoder_2": text_encoder_2, + "tokenizer_2": tokenizer_2, + "safety_checker": None, + "feature_extractor": None, + } + return components + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + inputs = { + "prompt": "A painting of a squirrel eating a burger", + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 5.0, + "output_type": "np", + "use_resolution_binning": False, + "pag_scale": 0.0, + } + return inputs + + def test_inference(self): + device = "cpu" + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + image = pipe(**inputs).images + image_slice = image[0, -3:, -3:, -1] + + self.assertEqual(image.shape, (1, 16, 16, 3)) + expected_slice = np.array( + [0.56939435, 0.34541583, 0.35915792, 0.46489206, 0.38775963, 0.45004836, 0.5957267, 0.59481275, 0.33287364] + ) + max_diff = np.abs(image_slice.flatten() - expected_slice).max() + self.assertLessEqual(max_diff, 1e-3) + + @unittest.skip("Not supported.") + def test_sequential_cpu_offload_forward_pass(self): + # TODO(YiYi) need to fix later + pass + + @unittest.skip("Not supported.") + def test_sequential_offload_forward_pass_twice(self): + # TODO(YiYi) need to fix later + pass + + def test_inference_batch_single_identical(self): + self._test_inference_batch_single_identical( + expected_max_diff=1e-3, + ) + + def test_feed_forward_chunking(self): + device = "cpu" + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + image = pipe(**inputs).images + image_slice_no_chunking = image[0, -3:, -3:, -1] + + pipe.transformer.enable_forward_chunking(chunk_size=1, dim=0) + inputs = self.get_dummy_inputs(device) + image = pipe(**inputs).images + image_slice_chunking = image[0, -3:, -3:, -1] + + max_diff = np.abs(to_np(image_slice_no_chunking) - to_np(image_slice_chunking)).max() + self.assertLess(max_diff, 1e-4) + + def test_fused_qkv_projections(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe = pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + inputs["return_dict"] = False + image = pipe(**inputs)[0] + original_image_slice = image[0, -3:, -3:, -1] + + pipe.transformer.fuse_qkv_projections() + inputs = self.get_dummy_inputs(device) + inputs["return_dict"] = False + image_fused = pipe(**inputs)[0] + image_slice_fused = image_fused[0, -3:, -3:, -1] + + pipe.transformer.unfuse_qkv_projections() + inputs = self.get_dummy_inputs(device) + inputs["return_dict"] = False + image_disabled = pipe(**inputs)[0] + image_slice_disabled = image_disabled[0, -3:, -3:, -1] + + assert np.allclose(original_image_slice, image_slice_fused, atol=1e-2, rtol=1e-2), ( + "Fusion of QKV projections shouldn't affect the outputs." + ) + assert np.allclose(image_slice_fused, image_slice_disabled, atol=1e-2, rtol=1e-2), ( + "Outputs, with QKV projection fusion enabled, shouldn't change when fused QKV projections are disabled." + ) + assert np.allclose(original_image_slice, image_slice_disabled, atol=1e-2, rtol=1e-2), ( + "Original outputs should match when fused QKV projections are disabled." + ) + + def test_pag_disable_enable(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + + # base pipeline (expect same output when pag is disabled) + pipe_sd = HunyuanDiTPipeline(**components) + pipe_sd = pipe_sd.to(device) + pipe_sd.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + del inputs["pag_scale"] + assert "pag_scale" not in inspect.signature(pipe_sd.__call__).parameters, ( + f"`pag_scale` should not be a call parameter of the base pipeline {pipe_sd.__class__.__name__}." + ) + out = pipe_sd(**inputs).images[0, -3:, -3:, -1] + + components = self.get_dummy_components() + + # pag disabled with pag_scale=0.0 + pipe_pag = self.pipeline_class(**components) + pipe_pag = pipe_pag.to(device) + pipe_pag.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + inputs["pag_scale"] = 0.0 + out_pag_disabled = pipe_pag(**inputs).images[0, -3:, -3:, -1] + + # pag enabled + pipe_pag = self.pipeline_class(**components) + pipe_pag = pipe_pag.to(device) + pipe_pag.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + inputs["pag_scale"] = 3.0 + out_pag_enabled = pipe_pag(**inputs).images[0, -3:, -3:, -1] + + assert np.abs(out.flatten() - out_pag_disabled.flatten()).max() < 1e-3 + assert np.abs(out.flatten() - out_pag_enabled.flatten()).max() > 1e-3 + + def test_pag_applied_layers(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + + # base pipeline + pipe = self.pipeline_class(**components) + pipe = pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + all_self_attn_layers = [k for k in pipe.transformer.attn_processors.keys() if "attn1" in k] + original_attn_procs = pipe.transformer.attn_processors + pag_layers = ["blocks.0", "blocks.1"] + pipe._set_pag_attn_processor(pag_applied_layers=pag_layers, do_classifier_free_guidance=False) + assert set(pipe.pag_attn_processors) == set(all_self_attn_layers) + + # blocks.0 + block_0_self_attn = ["blocks.0.attn1.processor"] + pipe.transformer.set_attn_processor(original_attn_procs.copy()) + pag_layers = ["blocks.0"] + pipe._set_pag_attn_processor(pag_applied_layers=pag_layers, do_classifier_free_guidance=False) + assert set(pipe.pag_attn_processors) == set(block_0_self_attn) + + pipe.transformer.set_attn_processor(original_attn_procs.copy()) + pag_layers = ["blocks.0.attn1"] + pipe._set_pag_attn_processor(pag_applied_layers=pag_layers, do_classifier_free_guidance=False) + assert set(pipe.pag_attn_processors) == set(block_0_self_attn) + + pipe.transformer.set_attn_processor(original_attn_procs.copy()) + pag_layers = ["blocks.(0|1)"] + pipe._set_pag_attn_processor(pag_applied_layers=pag_layers, do_classifier_free_guidance=False) + assert (len(pipe.pag_attn_processors)) == 2 + + pipe.transformer.set_attn_processor(original_attn_procs.copy()) + pag_layers = ["blocks.0", r"blocks\.1"] + pipe._set_pag_attn_processor(pag_applied_layers=pag_layers, do_classifier_free_guidance=False) + assert len(pipe.pag_attn_processors) == 2 + + @unittest.skip( + "Test not supported as `encode_prompt` is called two times separately which deivates from about 99% of the pipelines we have." + ) + def test_encode_prompt_works_in_isolation(self): + pass + + def test_save_load_optional_components(self): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(torch_device) + + prompt = inputs["prompt"] + generator = inputs["generator"] + num_inference_steps = inputs["num_inference_steps"] + output_type = inputs["output_type"] + + ( + prompt_embeds, + negative_prompt_embeds, + prompt_attention_mask, + negative_prompt_attention_mask, + ) = pipe.encode_prompt(prompt, device=torch_device, dtype=torch.float32, text_encoder_index=0) + + ( + prompt_embeds_2, + negative_prompt_embeds_2, + prompt_attention_mask_2, + negative_prompt_attention_mask_2, + ) = pipe.encode_prompt( + prompt, + device=torch_device, + dtype=torch.float32, + text_encoder_index=1, + ) + + # inputs with prompt converted to embeddings + inputs = { + "prompt_embeds": prompt_embeds, + "prompt_attention_mask": prompt_attention_mask, + "negative_prompt_embeds": negative_prompt_embeds, + "negative_prompt_attention_mask": negative_prompt_attention_mask, + "prompt_embeds_2": prompt_embeds_2, + "prompt_attention_mask_2": prompt_attention_mask_2, + "negative_prompt_embeds_2": negative_prompt_embeds_2, + "negative_prompt_attention_mask_2": negative_prompt_attention_mask_2, + "generator": generator, + "num_inference_steps": num_inference_steps, + "output_type": output_type, + "use_resolution_binning": False, + } + + # set all optional components to None + for optional_component in pipe._optional_components: + setattr(pipe, optional_component, None) + + output = pipe(**inputs)[0] + + with tempfile.TemporaryDirectory() as tmpdir: + pipe.save_pretrained(tmpdir) + pipe_loaded = self.pipeline_class.from_pretrained(tmpdir) + pipe_loaded.to(torch_device) + pipe_loaded.set_progress_bar_config(disable=None) + + for optional_component in pipe._optional_components: + self.assertTrue( + getattr(pipe_loaded, optional_component) is None, + f"`{optional_component}` did not stay set to None after loading.", + ) + + inputs = self.get_dummy_inputs(torch_device) + + generator = inputs["generator"] + num_inference_steps = inputs["num_inference_steps"] + output_type = inputs["output_type"] + + # inputs with prompt converted to embeddings + inputs = { + "prompt_embeds": prompt_embeds, + "prompt_attention_mask": prompt_attention_mask, + "negative_prompt_embeds": negative_prompt_embeds, + "negative_prompt_attention_mask": negative_prompt_attention_mask, + "prompt_embeds_2": prompt_embeds_2, + "prompt_attention_mask_2": prompt_attention_mask_2, + "negative_prompt_embeds_2": negative_prompt_embeds_2, + "negative_prompt_attention_mask_2": negative_prompt_attention_mask_2, + "generator": generator, + "num_inference_steps": num_inference_steps, + "output_type": output_type, + "use_resolution_binning": False, + } + + output_loaded = pipe_loaded(**inputs)[0] + + max_diff = np.abs(to_np(output) - to_np(output_loaded)).max() + self.assertLess(max_diff, 1e-4) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/pag/test_pag_kolors.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/pag/test_pag_kolors.py new file mode 100644 index 0000000000000000000000000000000000000000..1bbb4e79e4bcee3d6aac7ad9aec7fe363e37e460 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/pag/test_pag_kolors.py @@ -0,0 +1,259 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +import unittest + +import numpy as np +import torch + +from diffusers import ( + AutoencoderKL, + EulerDiscreteScheduler, + KolorsPAGPipeline, + KolorsPipeline, + UNet2DConditionModel, +) +from diffusers.pipelines.kolors import ChatGLMModel, ChatGLMTokenizer + +from ...testing_utils import enable_full_determinism +from ..pipeline_params import ( + TEXT_TO_IMAGE_BATCH_PARAMS, + TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS, + TEXT_TO_IMAGE_IMAGE_PARAMS, + TEXT_TO_IMAGE_PARAMS, +) +from ..test_pipelines_common import ( + PipelineFromPipeTesterMixin, + PipelineTesterMixin, +) + + +enable_full_determinism() + + +class KolorsPAGPipelineFastTests( + PipelineTesterMixin, + PipelineFromPipeTesterMixin, + unittest.TestCase, +): + pipeline_class = KolorsPAGPipeline + params = TEXT_TO_IMAGE_PARAMS.union({"pag_scale", "pag_adaptive_scale"}) + batch_params = TEXT_TO_IMAGE_BATCH_PARAMS + image_params = TEXT_TO_IMAGE_IMAGE_PARAMS + image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS + callback_cfg_params = TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS.union({"add_text_embeds", "add_time_ids"}) + + supports_dduf = False + + # Copied from tests.pipelines.kolors.test_kolors.KolorsPipelineFastTests.get_dummy_components + def get_dummy_components(self, time_cond_proj_dim=None): + torch.manual_seed(0) + unet = UNet2DConditionModel( + block_out_channels=(2, 4), + layers_per_block=2, + time_cond_proj_dim=time_cond_proj_dim, + sample_size=32, + in_channels=4, + out_channels=4, + down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), + up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), + # specific config below + attention_head_dim=(2, 4), + use_linear_projection=True, + addition_embed_type="text_time", + addition_time_embed_dim=8, + transformer_layers_per_block=(1, 2), + projection_class_embeddings_input_dim=56, + cross_attention_dim=8, + norm_num_groups=1, + ) + scheduler = EulerDiscreteScheduler( + beta_start=0.00085, + beta_end=0.012, + steps_offset=1, + beta_schedule="scaled_linear", + timestep_spacing="leading", + ) + torch.manual_seed(0) + vae = AutoencoderKL( + block_out_channels=[32, 64], + in_channels=3, + out_channels=3, + down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], + up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], + latent_channels=4, + sample_size=128, + ) + torch.manual_seed(0) + text_encoder = ChatGLMModel.from_pretrained( + "hf-internal-testing/tiny-random-chatglm3-6b", torch_dtype=torch.float32 + ) + tokenizer = ChatGLMTokenizer.from_pretrained("hf-internal-testing/tiny-random-chatglm3-6b") + + components = { + "unet": unet, + "scheduler": scheduler, + "vae": vae, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + "image_encoder": None, + "feature_extractor": None, + } + return components + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + inputs = { + "prompt": "A painting of a squirrel eating a burger", + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 5.0, + "pag_scale": 0.9, + "output_type": "np", + } + return inputs + + def test_pag_disable_enable(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + + # base pipeline (expect same output when pag is disabled) + pipe_sd = KolorsPipeline(**components) + pipe_sd = pipe_sd.to(device) + pipe_sd.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + del inputs["pag_scale"] + assert "pag_scale" not in inspect.signature(pipe_sd.__call__).parameters, ( + f"`pag_scale` should not be a call parameter of the base pipeline {pipe_sd.__class__.__name__}." + ) + out = pipe_sd(**inputs).images[0, -3:, -3:, -1] + + # pag disabled with pag_scale=0.0 + pipe_pag = self.pipeline_class(**components) + pipe_pag = pipe_pag.to(device) + pipe_pag.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + inputs["pag_scale"] = 0.0 + out_pag_disabled = pipe_pag(**inputs).images[0, -3:, -3:, -1] + + # pag enabled + pipe_pag = self.pipeline_class(**components, pag_applied_layers=["mid", "up", "down"]) + pipe_pag = pipe_pag.to(device) + pipe_pag.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + out_pag_enabled = pipe_pag(**inputs).images[0, -3:, -3:, -1] + + assert np.abs(out.flatten() - out_pag_disabled.flatten()).max() < 1e-3 + assert np.abs(out.flatten() - out_pag_enabled.flatten()).max() > 1e-3 + + def test_pag_applied_layers(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + + # base pipeline + pipe = self.pipeline_class(**components) + pipe = pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + # pag_applied_layers = ["mid","up","down"] should apply to all self-attention layers + all_self_attn_layers = [k for k in pipe.unet.attn_processors.keys() if "attn1" in k] + original_attn_procs = pipe.unet.attn_processors + pag_layers = ["mid", "down", "up"] + pipe._set_pag_attn_processor(pag_applied_layers=pag_layers, do_classifier_free_guidance=False) + assert set(pipe.pag_attn_processors) == set(all_self_attn_layers) + + all_self_attn_mid_layers = [ + "mid_block.attentions.0.transformer_blocks.0.attn1.processor", + "mid_block.attentions.0.transformer_blocks.1.attn1.processor", + ] + pipe.unet.set_attn_processor(original_attn_procs.copy()) + pag_layers = ["mid"] + pipe._set_pag_attn_processor(pag_applied_layers=pag_layers, do_classifier_free_guidance=False) + assert set(pipe.pag_attn_processors) == set(all_self_attn_mid_layers) + + pipe.unet.set_attn_processor(original_attn_procs.copy()) + pag_layers = ["mid_block"] + pipe._set_pag_attn_processor(pag_applied_layers=pag_layers, do_classifier_free_guidance=False) + assert set(pipe.pag_attn_processors) == set(all_self_attn_mid_layers) + + pipe.unet.set_attn_processor(original_attn_procs.copy()) + pag_layers = ["mid_block.attentions.0"] + pipe._set_pag_attn_processor(pag_applied_layers=pag_layers, do_classifier_free_guidance=False) + assert set(pipe.pag_attn_processors) == set(all_self_attn_mid_layers) + + # pag_applied_layers = ["mid.block_0.attentions_1"] does not exist in the model + pipe.unet.set_attn_processor(original_attn_procs.copy()) + pag_layers = ["mid_block.attentions.1"] + with self.assertRaises(ValueError): + pipe._set_pag_attn_processor(pag_applied_layers=pag_layers, do_classifier_free_guidance=False) + + # pag_applied_layers = "down" should apply to all self-attention layers in down_blocks + pipe.unet.set_attn_processor(original_attn_procs.copy()) + pag_layers = ["down"] + pipe._set_pag_attn_processor(pag_applied_layers=pag_layers, do_classifier_free_guidance=False) + assert len(pipe.pag_attn_processors) == 4 + + pipe.unet.set_attn_processor(original_attn_procs.copy()) + pag_layers = ["down_blocks.0"] + with self.assertRaises(ValueError): + pipe._set_pag_attn_processor(pag_applied_layers=pag_layers, do_classifier_free_guidance=False) + + pipe.unet.set_attn_processor(original_attn_procs.copy()) + pag_layers = ["down_blocks.1"] + pipe._set_pag_attn_processor(pag_applied_layers=pag_layers, do_classifier_free_guidance=False) + assert len(pipe.pag_attn_processors) == 4 + + pipe.unet.set_attn_processor(original_attn_procs.copy()) + pag_layers = ["down_blocks.1.attentions.1"] + pipe._set_pag_attn_processor(pag_applied_layers=pag_layers, do_classifier_free_guidance=False) + assert len(pipe.pag_attn_processors) == 2 + + def test_pag_inference(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + + pipe_pag = self.pipeline_class(**components, pag_applied_layers=["mid", "up", "down"]) + pipe_pag = pipe_pag.to(device) + pipe_pag.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + image = pipe_pag(**inputs).images + image_slice = image[0, -3:, -3:, -1] + + assert image.shape == ( + 1, + 64, + 64, + 3, + ), f"the shape of the output image should be (1, 64, 64, 3) but got {image.shape}" + expected_slice = np.array( + [0.26030684, 0.43192005, 0.4042826, 0.4189067, 0.5181305, 0.3832534, 0.472135, 0.4145031, 0.43726248] + ) + + max_diff = np.abs(image_slice.flatten() - expected_slice).max() + self.assertLessEqual(max_diff, 1e-3) + + def test_inference_batch_single_identical(self): + self._test_inference_batch_single_identical(expected_max_diff=3e-3) + + def test_encode_prompt_works_in_isolation(self): + return super().test_encode_prompt_works_in_isolation(atol=1e-3, rtol=1e-3) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/pag/test_pag_pixart_sigma.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/pag/test_pag_pixart_sigma.py new file mode 100644 index 0000000000000000000000000000000000000000..c04ebad08fdc4e9be2f3852d95bc9dd45116587b --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/pag/test_pag_pixart_sigma.py @@ -0,0 +1,349 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +import tempfile +import unittest + +import numpy as np +import torch +from transformers import AutoTokenizer, T5EncoderModel + +import diffusers +from diffusers import ( + AutoencoderKL, + DDIMScheduler, + PixArtSigmaPAGPipeline, + PixArtSigmaPipeline, + PixArtTransformer2DModel, +) +from diffusers.utils import logging + +from ...testing_utils import ( + CaptureLogger, + enable_full_determinism, + torch_device, +) +from ..pipeline_params import ( + TEXT_TO_IMAGE_BATCH_PARAMS, + TEXT_TO_IMAGE_IMAGE_PARAMS, + TEXT_TO_IMAGE_PARAMS, +) +from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference, to_np + + +enable_full_determinism() + + +class PixArtSigmaPAGPipelineFastTests(PipelineTesterMixin, unittest.TestCase): + pipeline_class = PixArtSigmaPAGPipeline + params = TEXT_TO_IMAGE_PARAMS.union({"pag_scale", "pag_adaptive_scale"}) + params = set(params) + params.remove("cross_attention_kwargs") + batch_params = TEXT_TO_IMAGE_BATCH_PARAMS + image_params = TEXT_TO_IMAGE_IMAGE_PARAMS + image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS + required_optional_params = PipelineTesterMixin.required_optional_params + + def get_dummy_components(self): + torch.manual_seed(0) + transformer = PixArtTransformer2DModel( + sample_size=8, + num_layers=2, + patch_size=2, + attention_head_dim=8, + num_attention_heads=3, + caption_channels=32, + in_channels=4, + cross_attention_dim=24, + out_channels=8, + attention_bias=True, + activation_fn="gelu-approximate", + num_embeds_ada_norm=1000, + norm_type="ada_norm_single", + norm_elementwise_affine=False, + norm_eps=1e-6, + ) + torch.manual_seed(0) + vae = AutoencoderKL() + + scheduler = DDIMScheduler() + text_encoder = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5") + + tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") + + components = { + "transformer": transformer.eval(), + "vae": vae.eval(), + "scheduler": scheduler, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + } + return components + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + inputs = { + "prompt": "A painting of a squirrel eating a burger", + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 1.0, + "pag_scale": 3.0, + "use_resolution_binning": False, + "output_type": "np", + } + return inputs + + def test_pag_disable_enable(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + + # base pipeline (expect same output when pag is disabled) + pipe = PixArtSigmaPipeline(**components) + pipe = pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + del inputs["pag_scale"] + assert "pag_scale" not in inspect.signature(pipe.__call__).parameters, ( + f"`pag_scale` should not be a call parameter of the base pipeline {pipe.__class__.__name__}." + ) + out = pipe(**inputs).images[0, -3:, -3:, -1] + + # pag disabled with pag_scale=0.0 + components["pag_applied_layers"] = ["blocks.1"] + pipe_pag = self.pipeline_class(**components) + pipe_pag = pipe_pag.to(device) + pipe_pag.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + inputs["pag_scale"] = 0.0 + out_pag_disabled = pipe_pag(**inputs).images[0, -3:, -3:, -1] + + # pag enabled + pipe_pag = self.pipeline_class(**components) + pipe_pag = pipe_pag.to(device) + pipe_pag.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + out_pag_enabled = pipe_pag(**inputs).images[0, -3:, -3:, -1] + + assert np.abs(out.flatten() - out_pag_disabled.flatten()).max() < 1e-3 + assert np.abs(out.flatten() - out_pag_enabled.flatten()).max() > 1e-3 + + def test_pag_applied_layers(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + + # base pipeline + pipe = self.pipeline_class(**components) + pipe = pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + # "attn1" should apply to all self-attention layers. + all_self_attn_layers = [k for k in pipe.transformer.attn_processors.keys() if "attn1" in k] + pag_layers = ["blocks.0", "blocks.1"] + pipe._set_pag_attn_processor(pag_applied_layers=pag_layers, do_classifier_free_guidance=False) + assert set(pipe.pag_attn_processors) == set(all_self_attn_layers) + + def test_pag_inference(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + + pipe_pag = self.pipeline_class(**components) + pipe_pag = pipe_pag.to(device) + pipe_pag.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + image = pipe_pag(**inputs).images + image_slice = image[0, -3:, -3:, -1] + + assert image.shape == ( + 1, + 8, + 8, + 3, + ), f"the shape of the output image should be (1, 8, 8, 3) but got {image.shape}" + expected_slice = np.array([0.6499, 0.3250, 0.3572, 0.6780, 0.4453, 0.4582, 0.2770, 0.5168, 0.4594]) + + max_diff = np.abs(image_slice.flatten() - expected_slice).max() + self.assertLessEqual(max_diff, 1e-3) + + # Because the PAG PixArt Sigma has `pag_applied_layers`. + # Also, we shouldn't be doing `set_default_attn_processor()` after loading + # the pipeline with `pag_applied_layers`. + def test_save_load_local(self, expected_max_difference=1e-4): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(torch_device) + output = pipe(**inputs)[0] + + logger = logging.get_logger("diffusers.pipelines.pipeline_utils") + logger.setLevel(diffusers.logging.INFO) + + with tempfile.TemporaryDirectory() as tmpdir: + pipe.save_pretrained(tmpdir, safe_serialization=False) + + with CaptureLogger(logger) as cap_logger: + pipe_loaded = self.pipeline_class.from_pretrained(tmpdir, pag_applied_layers=["blocks.1"]) + + for name in pipe_loaded.components.keys(): + if name not in pipe_loaded._optional_components: + assert name in str(cap_logger) + + pipe_loaded.to(torch_device) + pipe_loaded.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(torch_device) + output_loaded = pipe_loaded(**inputs)[0] + + max_diff = np.abs(to_np(output) - to_np(output_loaded)).max() + self.assertLess(max_diff, expected_max_difference) + + # We shouldn't be setting `set_default_attn_processor` here. + def test_attention_slicing_forward_pass( + self, test_max_difference=True, test_mean_pixel_difference=True, expected_max_diff=1e-3 + ): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + generator_device = "cpu" + inputs = self.get_dummy_inputs(generator_device) + output_without_slicing = pipe(**inputs)[0] + + pipe.enable_attention_slicing(slice_size=1) + inputs = self.get_dummy_inputs(generator_device) + output_with_slicing1 = pipe(**inputs)[0] + + pipe.enable_attention_slicing(slice_size=2) + inputs = self.get_dummy_inputs(generator_device) + output_with_slicing2 = pipe(**inputs)[0] + + if test_max_difference: + max_diff1 = np.abs(to_np(output_with_slicing1) - to_np(output_without_slicing)).max() + max_diff2 = np.abs(to_np(output_with_slicing2) - to_np(output_without_slicing)).max() + self.assertLess( + max(max_diff1, max_diff2), + expected_max_diff, + "Attention slicing should not affect the inference results", + ) + + if test_mean_pixel_difference: + assert_mean_pixel_difference(to_np(output_with_slicing1[0]), to_np(output_without_slicing[0])) + assert_mean_pixel_difference(to_np(output_with_slicing2[0]), to_np(output_without_slicing[0])) + + # Because we have `pag_applied_layers` we cannot directly apply + # `set_default_attn_processor` + def test_dict_tuple_outputs_equivalent(self, expected_slice=None, expected_max_difference=1e-4): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + generator_device = "cpu" + if expected_slice is None: + output = pipe(**self.get_dummy_inputs(generator_device))[0] + else: + output = expected_slice + + output_tuple = pipe(**self.get_dummy_inputs(generator_device), return_dict=False)[0] + + if expected_slice is None: + max_diff = np.abs(to_np(output) - to_np(output_tuple)).max() + else: + if output_tuple.ndim != 5: + max_diff = np.abs(to_np(output) - to_np(output_tuple)[0, -3:, -3:, -1].flatten()).max() + else: + max_diff = np.abs(to_np(output) - to_np(output_tuple)[0, -3:, -3:, -1, -1].flatten()).max() + + self.assertLess(max_diff, expected_max_difference) + + # Same reason as above + def test_inference_batch_single_identical( + self, + batch_size=2, + expected_max_diff=1e-4, + additional_params_copy_to_batched_inputs=["num_inference_steps"], + ): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + inputs = self.get_dummy_inputs(torch_device) + # Reset generator in case it is has been used in self.get_dummy_inputs + inputs["generator"] = self.get_generator(0) + + logger = logging.get_logger(pipe.__module__) + logger.setLevel(level=diffusers.logging.FATAL) + + # batchify inputs + batched_inputs = {} + batched_inputs.update(inputs) + + for name in self.batch_params: + if name not in inputs: + continue + + value = inputs[name] + if name == "prompt": + len_prompt = len(value) + batched_inputs[name] = [value[: len_prompt // i] for i in range(1, batch_size + 1)] + batched_inputs[name][-1] = 100 * "very long" + + else: + batched_inputs[name] = batch_size * [value] + + if "generator" in inputs: + batched_inputs["generator"] = [self.get_generator(i) for i in range(batch_size)] + + if "batch_size" in inputs: + batched_inputs["batch_size"] = batch_size + + for arg in additional_params_copy_to_batched_inputs: + batched_inputs[arg] = inputs[arg] + + output = pipe(**inputs) + output_batch = pipe(**batched_inputs) + + assert output_batch[0].shape[0] == batch_size + + max_diff = np.abs(to_np(output_batch[0][0]) - to_np(output[0][0])).max() + assert max_diff < expected_max_diff + + # Because we're passing `pag_applied_layers` (type of List) in the components as well. + def test_components_function(self): + init_components = self.get_dummy_components() + init_components = {k: v for k, v in init_components.items() if not isinstance(v, (str, int, float, list))} + + pipe = self.pipeline_class(**init_components) + + self.assertTrue(hasattr(pipe, "components")) + self.assertTrue(set(pipe.components.keys()) == set(init_components.keys())) + + @unittest.skip("Test is already covered through encode_prompt isolation.") + def test_save_load_optional_components(self): + pass diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/pag/test_pag_sana.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/pag/test_pag_sana.py new file mode 100644 index 0000000000000000000000000000000000000000..5408595c729dbc21113b651e0d1f80ad402b85d5 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/pag/test_pag_sana.py @@ -0,0 +1,341 @@ +# Copyright 2025 The HuggingFace Team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +import unittest + +import numpy as np +import torch +from transformers import Gemma2Config, Gemma2ForCausalLM, GemmaTokenizer + +from diffusers import ( + AutoencoderDC, + FlowMatchEulerDiscreteScheduler, + SanaPAGPipeline, + SanaPipeline, + SanaTransformer2DModel, +) + +from ...testing_utils import enable_full_determinism, torch_device +from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS +from ..test_pipelines_common import PipelineTesterMixin, to_np + + +enable_full_determinism() + + +class SanaPAGPipelineFastTests(PipelineTesterMixin, unittest.TestCase): + pipeline_class = SanaPAGPipeline + params = TEXT_TO_IMAGE_PARAMS - {"cross_attention_kwargs"} + batch_params = TEXT_TO_IMAGE_BATCH_PARAMS + image_params = TEXT_TO_IMAGE_IMAGE_PARAMS + image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS + required_optional_params = frozenset( + [ + "num_inference_steps", + "generator", + "latents", + "return_dict", + "callback_on_step_end", + "callback_on_step_end_tensor_inputs", + ] + ) + test_xformers_attention = False + + supports_dduf = False + + def get_dummy_components(self): + torch.manual_seed(0) + transformer = SanaTransformer2DModel( + patch_size=1, + in_channels=4, + out_channels=4, + num_layers=2, + num_attention_heads=2, + attention_head_dim=4, + num_cross_attention_heads=2, + cross_attention_head_dim=4, + cross_attention_dim=8, + caption_channels=8, + sample_size=32, + ) + + torch.manual_seed(0) + vae = AutoencoderDC( + in_channels=3, + latent_channels=4, + attention_head_dim=2, + encoder_block_types=( + "ResBlock", + "EfficientViTBlock", + ), + decoder_block_types=( + "ResBlock", + "EfficientViTBlock", + ), + encoder_block_out_channels=(8, 8), + decoder_block_out_channels=(8, 8), + encoder_qkv_multiscales=((), (5,)), + decoder_qkv_multiscales=((), (5,)), + encoder_layers_per_block=(1, 1), + decoder_layers_per_block=[1, 1], + downsample_block_type="conv", + upsample_block_type="interpolate", + decoder_norm_types="rms_norm", + decoder_act_fns="silu", + scaling_factor=0.41407, + ) + + torch.manual_seed(0) + scheduler = FlowMatchEulerDiscreteScheduler(shift=7.0) + + torch.manual_seed(0) + text_encoder_config = Gemma2Config( + head_dim=16, + hidden_size=32, + initializer_range=0.02, + intermediate_size=64, + max_position_embeddings=8192, + model_type="gemma2", + num_attention_heads=2, + num_hidden_layers=1, + num_key_value_heads=2, + vocab_size=8, + attn_implementation="eager", + ) + text_encoder = Gemma2ForCausalLM(text_encoder_config) + tokenizer = GemmaTokenizer.from_pretrained("hf-internal-testing/dummy-gemma") + + components = { + "transformer": transformer, + "vae": vae, + "scheduler": scheduler, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + } + return components + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + inputs = { + "prompt": "", + "negative_prompt": "", + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 6.0, + "pag_scale": 3.0, + "height": 32, + "width": 32, + "max_sequence_length": 16, + "output_type": "pt", + "complex_human_instruction": None, + } + return inputs + + def test_inference(self): + device = "cpu" + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + image = pipe(**inputs)[0] + generated_image = image[0] + + self.assertEqual(generated_image.shape, (3, 32, 32)) + expected_image = torch.randn(3, 32, 32) + max_diff = np.abs(generated_image - expected_image).max() + self.assertLessEqual(max_diff, 1e10) + + def test_callback_inputs(self): + sig = inspect.signature(self.pipeline_class.__call__) + has_callback_tensor_inputs = "callback_on_step_end_tensor_inputs" in sig.parameters + has_callback_step_end = "callback_on_step_end" in sig.parameters + + if not (has_callback_tensor_inputs and has_callback_step_end): + return + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + self.assertTrue( + hasattr(pipe, "_callback_tensor_inputs"), + f" {self.pipeline_class} should have `_callback_tensor_inputs` that defines a list of tensor variables its callback function can use as inputs", + ) + + def callback_inputs_subset(pipe, i, t, callback_kwargs): + # iterate over callback args + for tensor_name, tensor_value in callback_kwargs.items(): + # check that we're only passing in allowed tensor inputs + assert tensor_name in pipe._callback_tensor_inputs + + return callback_kwargs + + def callback_inputs_all(pipe, i, t, callback_kwargs): + for tensor_name in pipe._callback_tensor_inputs: + assert tensor_name in callback_kwargs + + # iterate over callback args + for tensor_name, tensor_value in callback_kwargs.items(): + # check that we're only passing in allowed tensor inputs + assert tensor_name in pipe._callback_tensor_inputs + + return callback_kwargs + + inputs = self.get_dummy_inputs(torch_device) + + # Test passing in a subset + inputs["callback_on_step_end"] = callback_inputs_subset + inputs["callback_on_step_end_tensor_inputs"] = ["latents"] + output = pipe(**inputs)[0] + + # Test passing in a everything + inputs["callback_on_step_end"] = callback_inputs_all + inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs + output = pipe(**inputs)[0] + + def callback_inputs_change_tensor(pipe, i, t, callback_kwargs): + is_last = i == (pipe.num_timesteps - 1) + if is_last: + callback_kwargs["latents"] = torch.zeros_like(callback_kwargs["latents"]) + return callback_kwargs + + inputs["callback_on_step_end"] = callback_inputs_change_tensor + inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs + output = pipe(**inputs)[0] + assert output.abs().sum() < 1e10 + + def test_attention_slicing_forward_pass( + self, test_max_difference=True, test_mean_pixel_difference=True, expected_max_diff=1e-3 + ): + if not self.test_attention_slicing: + return + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + for component in pipe.components.values(): + if hasattr(component, "set_default_attn_processor"): + component.set_default_attn_processor() + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + generator_device = "cpu" + inputs = self.get_dummy_inputs(generator_device) + output_without_slicing = pipe(**inputs)[0] + + pipe.enable_attention_slicing(slice_size=1) + inputs = self.get_dummy_inputs(generator_device) + output_with_slicing1 = pipe(**inputs)[0] + + pipe.enable_attention_slicing(slice_size=2) + inputs = self.get_dummy_inputs(generator_device) + output_with_slicing2 = pipe(**inputs)[0] + + if test_max_difference: + max_diff1 = np.abs(to_np(output_with_slicing1) - to_np(output_without_slicing)).max() + max_diff2 = np.abs(to_np(output_with_slicing2) - to_np(output_without_slicing)).max() + self.assertLess( + max(max_diff1, max_diff2), + expected_max_diff, + "Attention slicing should not affect the inference results", + ) + + def test_pag_disable_enable(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + + # base pipeline (expect same output when pag is disabled) + pipe_sd = SanaPipeline(**components) + pipe_sd = pipe_sd.to(device) + pipe_sd.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + del inputs["pag_scale"] + assert "pag_scale" not in inspect.signature(pipe_sd.__call__).parameters, ( + f"`pag_scale` should not be a call parameter of the base pipeline {pipe_sd.__class__.__name__}." + ) + out = pipe_sd(**inputs).images[0, -3:, -3:, -1] + + components = self.get_dummy_components() + + # pag disabled with pag_scale=0.0 + pipe_pag = self.pipeline_class(**components) + pipe_pag = pipe_pag.to(device) + pipe_pag.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + inputs["pag_scale"] = 0.0 + out_pag_disabled = pipe_pag(**inputs).images[0, -3:, -3:, -1] + + assert np.abs(out.flatten() - out_pag_disabled.flatten()).max() < 1e-3 + + def test_pag_applied_layers(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + + # base pipeline + pipe = self.pipeline_class(**components) + pipe = pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + all_self_attn_layers = [k for k in pipe.transformer.attn_processors.keys() if "attn1" in k] + original_attn_procs = pipe.transformer.attn_processors + pag_layers = ["blocks.0", "blocks.1"] + pipe._set_pag_attn_processor(pag_applied_layers=pag_layers, do_classifier_free_guidance=False) + assert set(pipe.pag_attn_processors) == set(all_self_attn_layers) + + # blocks.0 + block_0_self_attn = ["transformer_blocks.0.attn1.processor"] + pipe.transformer.set_attn_processor(original_attn_procs.copy()) + pag_layers = ["blocks.0"] + pipe._set_pag_attn_processor(pag_applied_layers=pag_layers, do_classifier_free_guidance=False) + assert set(pipe.pag_attn_processors) == set(block_0_self_attn) + + pipe.transformer.set_attn_processor(original_attn_procs.copy()) + pag_layers = ["blocks.0.attn1"] + pipe._set_pag_attn_processor(pag_applied_layers=pag_layers, do_classifier_free_guidance=False) + assert set(pipe.pag_attn_processors) == set(block_0_self_attn) + + pipe.transformer.set_attn_processor(original_attn_procs.copy()) + pag_layers = ["blocks.(0|1)"] + pipe._set_pag_attn_processor(pag_applied_layers=pag_layers, do_classifier_free_guidance=False) + assert (len(pipe.pag_attn_processors)) == 2 + + pipe.transformer.set_attn_processor(original_attn_procs.copy()) + pag_layers = ["blocks.0", r"blocks\.1"] + pipe._set_pag_attn_processor(pag_applied_layers=pag_layers, do_classifier_free_guidance=False) + assert len(pipe.pag_attn_processors) == 2 + + # TODO(aryan): Create a dummy gemma model with smol vocab size + @unittest.skip( + "A very small vocab size is used for fast tests. So, any kind of prompt other than the empty default used in other tests will lead to a embedding lookup error. This test uses a long prompt that causes the error." + ) + def test_inference_batch_consistent(self): + pass + + @unittest.skip( + "A very small vocab size is used for fast tests. So, any kind of prompt other than the empty default used in other tests will lead to a embedding lookup error. This test uses a long prompt that causes the error." + ) + def test_inference_batch_single_identical(self): + pass + + def test_float16_inference(self): + # Requires higher tolerance as model seems very sensitive to dtype + super().test_float16_inference(expected_max_diff=0.08) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/pag/test_pag_sd.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/pag/test_pag_sd.py new file mode 100644 index 0000000000000000000000000000000000000000..064815d1369341480d6b91fdda417e276af824d3 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/pag/test_pag_sd.py @@ -0,0 +1,350 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import gc +import inspect +import unittest + +import numpy as np +import torch +from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer + +from diffusers import ( + AutoencoderKL, + AutoPipelineForText2Image, + DDIMScheduler, + StableDiffusionPAGPipeline, + StableDiffusionPipeline, + UNet2DConditionModel, +) + +from ...testing_utils import ( + backend_empty_cache, + enable_full_determinism, + require_torch_accelerator, + slow, + torch_device, +) +from ..pipeline_params import ( + TEXT_TO_IMAGE_BATCH_PARAMS, + TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS, + TEXT_TO_IMAGE_IMAGE_PARAMS, + TEXT_TO_IMAGE_PARAMS, +) +from ..test_pipelines_common import ( + IPAdapterTesterMixin, + PipelineFromPipeTesterMixin, + PipelineLatentTesterMixin, + PipelineTesterMixin, +) + + +enable_full_determinism() + + +class StableDiffusionPAGPipelineFastTests( + PipelineTesterMixin, + IPAdapterTesterMixin, + PipelineLatentTesterMixin, + PipelineFromPipeTesterMixin, + unittest.TestCase, +): + pipeline_class = StableDiffusionPAGPipeline + params = TEXT_TO_IMAGE_PARAMS.union({"pag_scale", "pag_adaptive_scale"}) + batch_params = TEXT_TO_IMAGE_BATCH_PARAMS + image_params = TEXT_TO_IMAGE_IMAGE_PARAMS + image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS + callback_cfg_params = TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS.union({"add_text_embeds", "add_time_ids"}) + + def get_dummy_components(self, time_cond_proj_dim=None): + cross_attention_dim = 8 + + torch.manual_seed(0) + unet = UNet2DConditionModel( + block_out_channels=(4, 8), + layers_per_block=2, + sample_size=32, + time_cond_proj_dim=time_cond_proj_dim, + in_channels=4, + out_channels=4, + down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), + up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), + cross_attention_dim=cross_attention_dim, + norm_num_groups=2, + ) + scheduler = DDIMScheduler( + beta_start=0.00085, + beta_end=0.012, + beta_schedule="scaled_linear", + clip_sample=False, + set_alpha_to_one=False, + ) + torch.manual_seed(0) + vae = AutoencoderKL( + block_out_channels=[4, 8], + in_channels=3, + out_channels=3, + down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], + up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], + latent_channels=4, + norm_num_groups=2, + ) + torch.manual_seed(0) + text_encoder_config = CLIPTextConfig( + bos_token_id=0, + eos_token_id=2, + hidden_size=cross_attention_dim, + intermediate_size=16, + layer_norm_eps=1e-05, + num_attention_heads=2, + num_hidden_layers=2, + pad_token_id=1, + vocab_size=1000, + ) + text_encoder = CLIPTextModel(text_encoder_config) + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + + components = { + "unet": unet, + "scheduler": scheduler, + "vae": vae, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + "safety_checker": None, + "feature_extractor": None, + "image_encoder": None, + } + return components + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + inputs = { + "prompt": "A painting of a squirrel eating a burger", + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 5.0, + "pag_scale": 0.9, + "output_type": "np", + } + return inputs + + def test_pag_disable_enable(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + + # base pipeline (expect same output when pag is disabled) + pipe_sd = StableDiffusionPipeline(**components) + pipe_sd = pipe_sd.to(device) + pipe_sd.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + del inputs["pag_scale"] + assert "pag_scale" not in inspect.signature(pipe_sd.__call__).parameters, ( + f"`pag_scale` should not be a call parameter of the base pipeline {pipe_sd.__class__.__name__}." + ) + out = pipe_sd(**inputs).images[0, -3:, -3:, -1] + + # pag disabled with pag_scale=0.0 + pipe_pag = self.pipeline_class(**components) + pipe_pag = pipe_pag.to(device) + pipe_pag.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + inputs["pag_scale"] = 0.0 + out_pag_disabled = pipe_pag(**inputs).images[0, -3:, -3:, -1] + + # pag enabled + pipe_pag = self.pipeline_class(**components, pag_applied_layers=["mid", "up", "down"]) + pipe_pag = pipe_pag.to(device) + pipe_pag.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + out_pag_enabled = pipe_pag(**inputs).images[0, -3:, -3:, -1] + + assert np.abs(out.flatten() - out_pag_disabled.flatten()).max() < 1e-3 + assert np.abs(out.flatten() - out_pag_enabled.flatten()).max() > 1e-3 + + def test_pag_applied_layers(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + + # base pipeline + pipe = self.pipeline_class(**components) + pipe = pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + # pag_applied_layers = ["mid","up","down"] should apply to all self-attention layers + all_self_attn_layers = [k for k in pipe.unet.attn_processors.keys() if "attn1" in k] + original_attn_procs = pipe.unet.attn_processors + pag_layers = [ + "down", + "mid", + "up", + ] + pipe._set_pag_attn_processor(pag_applied_layers=pag_layers, do_classifier_free_guidance=False) + assert set(pipe.pag_attn_processors) == set(all_self_attn_layers) + + # pag_applied_layers = ["mid"], or ["mid.block_0"] or ["mid.block_0.attentions_0"] should apply to all self-attention layers in mid_block, i.e. + # mid_block.attentions.0.transformer_blocks.0.attn1.processor + # mid_block.attentions.0.transformer_blocks.1.attn1.processor + all_self_attn_mid_layers = [ + "mid_block.attentions.0.transformer_blocks.0.attn1.processor", + # "mid_block.attentions.0.transformer_blocks.1.attn1.processor", + ] + pipe.unet.set_attn_processor(original_attn_procs.copy()) + pag_layers = ["mid"] + pipe._set_pag_attn_processor(pag_applied_layers=pag_layers, do_classifier_free_guidance=False) + assert set(pipe.pag_attn_processors) == set(all_self_attn_mid_layers) + + pipe.unet.set_attn_processor(original_attn_procs.copy()) + pag_layers = ["mid_block"] + pipe._set_pag_attn_processor(pag_applied_layers=pag_layers, do_classifier_free_guidance=False) + assert set(pipe.pag_attn_processors) == set(all_self_attn_mid_layers) + + pipe.unet.set_attn_processor(original_attn_procs.copy()) + pag_layers = ["mid_block.attentions.0"] + pipe._set_pag_attn_processor(pag_applied_layers=pag_layers, do_classifier_free_guidance=False) + assert set(pipe.pag_attn_processors) == set(all_self_attn_mid_layers) + + # pag_applied_layers = ["mid.block_0.attentions_1"] does not exist in the model + pipe.unet.set_attn_processor(original_attn_procs.copy()) + pag_layers = ["mid_block.attentions.1"] + with self.assertRaises(ValueError): + pipe._set_pag_attn_processor(pag_applied_layers=pag_layers, do_classifier_free_guidance=False) + + # pag_applied_layers = "down" should apply to all self-attention layers in down_blocks + # down_blocks.1.attentions.0.transformer_blocks.0.attn1.processor + # down_blocks.1.attentions.0.transformer_blocks.1.attn1.processor + # down_blocks.1.attentions.0.transformer_blocks.0.attn1.processor + + pipe.unet.set_attn_processor(original_attn_procs.copy()) + pag_layers = ["down"] + pipe._set_pag_attn_processor(pag_applied_layers=pag_layers, do_classifier_free_guidance=False) + assert len(pipe.pag_attn_processors) == 2 + + pipe.unet.set_attn_processor(original_attn_procs.copy()) + pag_layers = ["down_blocks.0"] + with self.assertRaises(ValueError): + pipe._set_pag_attn_processor(pag_applied_layers=pag_layers, do_classifier_free_guidance=False) + + pipe.unet.set_attn_processor(original_attn_procs.copy()) + pag_layers = ["down_blocks.1"] + pipe._set_pag_attn_processor(pag_applied_layers=pag_layers, do_classifier_free_guidance=False) + assert len(pipe.pag_attn_processors) == 2 + + pipe.unet.set_attn_processor(original_attn_procs.copy()) + pag_layers = ["down_blocks.1.attentions.1"] + pipe._set_pag_attn_processor(pag_applied_layers=pag_layers, do_classifier_free_guidance=False) + assert len(pipe.pag_attn_processors) == 1 + + def test_pag_inference(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + + pipe_pag = self.pipeline_class(**components, pag_applied_layers=["mid", "up", "down"]) + pipe_pag = pipe_pag.to(device) + pipe_pag.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + image = pipe_pag(**inputs).images + image_slice = image[0, -3:, -3:, -1] + + assert image.shape == ( + 1, + 64, + 64, + 3, + ), f"the shape of the output image should be (1, 64, 64, 3) but got {image.shape}" + + expected_slice = np.array( + [0.22802538, 0.44626093, 0.48905736, 0.29633686, 0.36400637, 0.4724258, 0.4678891, 0.32260418, 0.41611585] + ) + max_diff = np.abs(image_slice.flatten() - expected_slice).max() + self.assertLessEqual(max_diff, 1e-3) + + def test_encode_prompt_works_in_isolation(self): + extra_required_param_value_dict = { + "device": torch.device(torch_device).type, + "do_classifier_free_guidance": self.get_dummy_inputs(device=torch_device).get("guidance_scale", 1.0) > 1.0, + } + return super().test_encode_prompt_works_in_isolation(extra_required_param_value_dict) + + +@slow +@require_torch_accelerator +class StableDiffusionPAGPipelineIntegrationTests(unittest.TestCase): + pipeline_class = StableDiffusionPAGPipeline + repo_id = "stable-diffusion-v1-5/stable-diffusion-v1-5" + + def setUp(self): + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def get_inputs(self, device, generator_device="cpu", seed=1, guidance_scale=7.0): + generator = torch.Generator(device=generator_device).manual_seed(seed) + inputs = { + "prompt": "a polar bear sitting in a chair drinking a milkshake", + "negative_prompt": "deformed, ugly, wrong proportion, low res, bad anatomy, worst quality, low quality", + "generator": generator, + "num_inference_steps": 3, + "guidance_scale": guidance_scale, + "pag_scale": 3.0, + "output_type": "np", + } + return inputs + + def test_pag_cfg(self): + pipeline = AutoPipelineForText2Image.from_pretrained(self.repo_id, enable_pag=True, torch_dtype=torch.float16) + pipeline.enable_model_cpu_offload(device=torch_device) + pipeline.set_progress_bar_config(disable=None) + + inputs = self.get_inputs(torch_device) + image = pipeline(**inputs).images + + image_slice = image[0, -3:, -3:, -1].flatten() + assert image.shape == (1, 512, 512, 3) + + expected_slice = np.array( + [0.58251953, 0.5722656, 0.5683594, 0.55029297, 0.52001953, 0.52001953, 0.49951172, 0.45410156, 0.50146484] + ) + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3, ( + f"output is different from expected, {image_slice.flatten()}" + ) + + def test_pag_uncond(self): + pipeline = AutoPipelineForText2Image.from_pretrained(self.repo_id, enable_pag=True, torch_dtype=torch.float16) + pipeline.enable_model_cpu_offload(device=torch_device) + pipeline.set_progress_bar_config(disable=None) + + inputs = self.get_inputs(torch_device, guidance_scale=0.0) + image = pipeline(**inputs).images + + image_slice = image[0, -3:, -3:, -1].flatten() + assert image.shape == (1, 512, 512, 3) + expected_slice = np.array( + [0.5986328, 0.52441406, 0.3972168, 0.4741211, 0.34985352, 0.22705078, 0.4128418, 0.2866211, 0.31713867] + ) + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3, ( + f"output is different from expected, {image_slice.flatten()}" + ) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/pag/test_pag_sd3.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/pag/test_pag_sd3.py new file mode 100644 index 0000000000000000000000000000000000000000..26e6ca099286b880b38581ebb92723b3d276fbfb --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/pag/test_pag_sd3.py @@ -0,0 +1,263 @@ +import inspect +import unittest + +import numpy as np +import torch +from transformers import AutoTokenizer, CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer, T5EncoderModel + +from diffusers import ( + AutoencoderKL, + FlowMatchEulerDiscreteScheduler, + SD3Transformer2DModel, + StableDiffusion3PAGPipeline, + StableDiffusion3Pipeline, +) + +from ...testing_utils import ( + torch_device, +) +from ..test_pipelines_common import ( + PipelineTesterMixin, + check_qkv_fusion_matches_attn_procs_length, + check_qkv_fusion_processors_exist, +) + + +class StableDiffusion3PAGPipelineFastTests(unittest.TestCase, PipelineTesterMixin): + pipeline_class = StableDiffusion3PAGPipeline + params = frozenset( + [ + "prompt", + "height", + "width", + "guidance_scale", + "negative_prompt", + "prompt_embeds", + "negative_prompt_embeds", + ] + ) + batch_params = frozenset(["prompt", "negative_prompt"]) + test_xformers_attention = False + + def get_dummy_components(self): + torch.manual_seed(0) + transformer = SD3Transformer2DModel( + sample_size=32, + patch_size=1, + in_channels=4, + num_layers=2, + attention_head_dim=8, + num_attention_heads=4, + caption_projection_dim=32, + joint_attention_dim=32, + pooled_projection_dim=64, + out_channels=4, + ) + clip_text_encoder_config = CLIPTextConfig( + bos_token_id=0, + eos_token_id=2, + hidden_size=32, + intermediate_size=37, + layer_norm_eps=1e-05, + num_attention_heads=4, + num_hidden_layers=5, + pad_token_id=1, + vocab_size=1000, + hidden_act="gelu", + projection_dim=32, + ) + + torch.manual_seed(0) + text_encoder = CLIPTextModelWithProjection(clip_text_encoder_config) + + torch.manual_seed(0) + text_encoder_2 = CLIPTextModelWithProjection(clip_text_encoder_config) + + text_encoder_3 = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5") + + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + tokenizer_3 = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") + + torch.manual_seed(0) + vae = AutoencoderKL( + sample_size=32, + in_channels=3, + out_channels=3, + block_out_channels=(4,), + layers_per_block=1, + latent_channels=4, + norm_num_groups=1, + use_quant_conv=False, + use_post_quant_conv=False, + shift_factor=0.0609, + scaling_factor=1.5035, + ) + + scheduler = FlowMatchEulerDiscreteScheduler() + + return { + "scheduler": scheduler, + "text_encoder": text_encoder, + "text_encoder_2": text_encoder_2, + "text_encoder_3": text_encoder_3, + "tokenizer": tokenizer, + "tokenizer_2": tokenizer_2, + "tokenizer_3": tokenizer_3, + "transformer": transformer, + "vae": vae, + } + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device="cpu").manual_seed(seed) + + inputs = { + "prompt": "A painting of a squirrel eating a burger", + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 5.0, + "output_type": "np", + "pag_scale": 0.0, + } + return inputs + + def test_stable_diffusion_3_different_prompts(self): + pipe = self.pipeline_class(**self.get_dummy_components()).to(torch_device) + + inputs = self.get_dummy_inputs(torch_device) + output_same_prompt = pipe(**inputs).images[0] + + inputs = self.get_dummy_inputs(torch_device) + inputs["prompt_2"] = "a different prompt" + inputs["prompt_3"] = "another different prompt" + output_different_prompts = pipe(**inputs).images[0] + + max_diff = np.abs(output_same_prompt - output_different_prompts).max() + + # Outputs should be different here + assert max_diff > 1e-2 + + def test_stable_diffusion_3_different_negative_prompts(self): + pipe = self.pipeline_class(**self.get_dummy_components()).to(torch_device) + + inputs = self.get_dummy_inputs(torch_device) + output_same_prompt = pipe(**inputs).images[0] + + inputs = self.get_dummy_inputs(torch_device) + inputs["negative_prompt_2"] = "deformed" + inputs["negative_prompt_3"] = "blurry" + output_different_prompts = pipe(**inputs).images[0] + + max_diff = np.abs(output_same_prompt - output_different_prompts).max() + + # Outputs should be different here + assert max_diff > 1e-2 + + def test_fused_qkv_projections(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe = pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + image = pipe(**inputs).images + original_image_slice = image[0, -3:, -3:, -1] + + # TODO (sayakpaul): will refactor this once `fuse_qkv_projections()` has been added + # to the pipeline level. + pipe.transformer.fuse_qkv_projections() + assert check_qkv_fusion_processors_exist(pipe.transformer), ( + "Something wrong with the fused attention processors. Expected all the attention processors to be fused." + ) + assert check_qkv_fusion_matches_attn_procs_length( + pipe.transformer, pipe.transformer.original_attn_processors + ), "Something wrong with the attention processors concerning the fused QKV projections." + + inputs = self.get_dummy_inputs(device) + image = pipe(**inputs).images + image_slice_fused = image[0, -3:, -3:, -1] + + pipe.transformer.unfuse_qkv_projections() + inputs = self.get_dummy_inputs(device) + image = pipe(**inputs).images + image_slice_disabled = image[0, -3:, -3:, -1] + + assert np.allclose(original_image_slice, image_slice_fused, atol=1e-3, rtol=1e-3), ( + "Fusion of QKV projections shouldn't affect the outputs." + ) + assert np.allclose(image_slice_fused, image_slice_disabled, atol=1e-3, rtol=1e-3), ( + "Outputs, with QKV projection fusion enabled, shouldn't change when fused QKV projections are disabled." + ) + assert np.allclose(original_image_slice, image_slice_disabled, atol=1e-2, rtol=1e-2), ( + "Original outputs should match when fused QKV projections are disabled." + ) + + def test_pag_disable_enable(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + + # base pipeline (expect same output when pag is disabled) + pipe_sd = StableDiffusion3Pipeline(**components) + pipe_sd = pipe_sd.to(device) + pipe_sd.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + del inputs["pag_scale"] + assert "pag_scale" not in inspect.signature(pipe_sd.__call__).parameters, ( + f"`pag_scale` should not be a call parameter of the base pipeline {pipe_sd.__class__.__name__}." + ) + out = pipe_sd(**inputs).images[0, -3:, -3:, -1] + + components = self.get_dummy_components() + + # pag disabled with pag_scale=0.0 + pipe_pag = self.pipeline_class(**components) + pipe_pag = pipe_pag.to(device) + pipe_pag.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + inputs["pag_scale"] = 0.0 + out_pag_disabled = pipe_pag(**inputs).images[0, -3:, -3:, -1] + + assert np.abs(out.flatten() - out_pag_disabled.flatten()).max() < 1e-3 + + def test_pag_applied_layers(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + + # base pipeline + pipe = self.pipeline_class(**components) + pipe = pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + all_self_attn_layers = [k for k in pipe.transformer.attn_processors.keys() if "attn" in k] + original_attn_procs = pipe.transformer.attn_processors + pag_layers = ["blocks.0", "blocks.1"] + pipe._set_pag_attn_processor(pag_applied_layers=pag_layers, do_classifier_free_guidance=False) + assert set(pipe.pag_attn_processors) == set(all_self_attn_layers) + + # blocks.0 + block_0_self_attn = ["transformer_blocks.0.attn.processor"] + pipe.transformer.set_attn_processor(original_attn_procs.copy()) + pag_layers = ["blocks.0"] + pipe._set_pag_attn_processor(pag_applied_layers=pag_layers, do_classifier_free_guidance=False) + assert set(pipe.pag_attn_processors) == set(block_0_self_attn) + + pipe.transformer.set_attn_processor(original_attn_procs.copy()) + pag_layers = ["blocks.0.attn"] + pipe._set_pag_attn_processor(pag_applied_layers=pag_layers, do_classifier_free_guidance=False) + assert set(pipe.pag_attn_processors) == set(block_0_self_attn) + + pipe.transformer.set_attn_processor(original_attn_procs.copy()) + pag_layers = ["blocks.(0|1)"] + pipe._set_pag_attn_processor(pag_applied_layers=pag_layers, do_classifier_free_guidance=False) + assert (len(pipe.pag_attn_processors)) == 2 + + pipe.transformer.set_attn_processor(original_attn_procs.copy()) + pag_layers = ["blocks.0", r"blocks\.1"] + pipe._set_pag_attn_processor(pag_applied_layers=pag_layers, do_classifier_free_guidance=False) + assert len(pipe.pag_attn_processors) == 2 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/pag/test_pag_sd3_img2img.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/pag/test_pag_sd3_img2img.py new file mode 100644 index 0000000000000000000000000000000000000000..19a36e283de478a04e129e8059cb1d757975a554 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/pag/test_pag_sd3_img2img.py @@ -0,0 +1,277 @@ +import gc +import inspect +import random +import unittest + +import numpy as np +import torch +from transformers import AutoTokenizer, CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer, T5EncoderModel + +from diffusers import ( + AutoencoderKL, + AutoPipelineForImage2Image, + FlowMatchEulerDiscreteScheduler, + SD3Transformer2DModel, + StableDiffusion3Img2ImgPipeline, + StableDiffusion3PAGImg2ImgPipeline, +) + +from ...testing_utils import ( + backend_empty_cache, + enable_full_determinism, + floats_tensor, + load_image, + require_torch_accelerator, + slow, + torch_device, +) +from ..pipeline_params import ( + IMAGE_TO_IMAGE_IMAGE_PARAMS, + TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, + TEXT_GUIDED_IMAGE_VARIATION_PARAMS, + TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS, +) +from ..test_pipelines_common import ( + PipelineTesterMixin, +) + + +enable_full_determinism() + + +class StableDiffusion3PAGImg2ImgPipelineFastTests(unittest.TestCase, PipelineTesterMixin): + pipeline_class = StableDiffusion3PAGImg2ImgPipeline + params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"pag_scale", "pag_adaptive_scale"}) - {"height", "width"} + required_optional_params = PipelineTesterMixin.required_optional_params - {"latents"} + batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS + image_params = IMAGE_TO_IMAGE_IMAGE_PARAMS + image_latens_params = IMAGE_TO_IMAGE_IMAGE_PARAMS + callback_cfg_params = TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS + + test_xformers_attention = False + + def get_dummy_components(self): + torch.manual_seed(0) + transformer = SD3Transformer2DModel( + sample_size=32, + patch_size=1, + in_channels=4, + num_layers=2, + attention_head_dim=8, + num_attention_heads=4, + caption_projection_dim=32, + joint_attention_dim=32, + pooled_projection_dim=64, + out_channels=4, + ) + clip_text_encoder_config = CLIPTextConfig( + bos_token_id=0, + eos_token_id=2, + hidden_size=32, + intermediate_size=37, + layer_norm_eps=1e-05, + num_attention_heads=4, + num_hidden_layers=5, + pad_token_id=1, + vocab_size=1000, + hidden_act="gelu", + projection_dim=32, + ) + + torch.manual_seed(0) + text_encoder = CLIPTextModelWithProjection(clip_text_encoder_config) + + torch.manual_seed(0) + text_encoder_2 = CLIPTextModelWithProjection(clip_text_encoder_config) + + text_encoder_3 = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5") + + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + tokenizer_3 = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") + + torch.manual_seed(0) + vae = AutoencoderKL( + sample_size=32, + in_channels=3, + out_channels=3, + block_out_channels=(4,), + layers_per_block=1, + latent_channels=4, + norm_num_groups=1, + use_quant_conv=False, + use_post_quant_conv=False, + shift_factor=0.0609, + scaling_factor=1.5035, + ) + + scheduler = FlowMatchEulerDiscreteScheduler() + + return { + "scheduler": scheduler, + "text_encoder": text_encoder, + "text_encoder_2": text_encoder_2, + "text_encoder_3": text_encoder_3, + "tokenizer": tokenizer, + "tokenizer_2": tokenizer_2, + "tokenizer_3": tokenizer_3, + "transformer": transformer, + "vae": vae, + } + + def get_dummy_inputs(self, device, seed=0): + image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) + image = image / 2 + 0.5 + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device="cpu").manual_seed(seed) + + inputs = { + "prompt": "A painting of a squirrel eating a burger", + "image": image, + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 5.0, + "output_type": "np", + "pag_scale": 0.7, + } + return inputs + + def test_pag_disable_enable(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + + # base pipeline (expect same output when pag is disabled) + pipe_sd = StableDiffusion3Img2ImgPipeline(**components) + pipe_sd = pipe_sd.to(device) + pipe_sd.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + del inputs["pag_scale"] + assert "pag_scale" not in inspect.signature(pipe_sd.__call__).parameters, ( + f"`pag_scale` should not be a call parameter of the base pipeline {pipe_sd.__class__.__name__}." + ) + out = pipe_sd(**inputs).images[0, -3:, -3:, -1] + + components = self.get_dummy_components() + + # pag disabled with pag_scale=0.0 + pipe_pag = self.pipeline_class(**components) + pipe_pag = pipe_pag.to(device) + pipe_pag.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + inputs["pag_scale"] = 0.0 + out_pag_disabled = pipe_pag(**inputs).images[0, -3:, -3:, -1] + + assert np.abs(out.flatten() - out_pag_disabled.flatten()).max() < 1e-3 + + def test_pag_inference(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + + pipe_pag = self.pipeline_class(**components, pag_applied_layers=["blocks.0"]) + pipe_pag = pipe_pag.to(device) + pipe_pag.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + image = pipe_pag(**inputs).images + image_slice = image[0, -3:, -3:, -1] + + assert image.shape == ( + 1, + 32, + 32, + 3, + ), f"the shape of the output image should be (1, 32, 32, 3) but got {image.shape}" + + expected_slice = np.array( + [0.66063476, 0.44838923, 0.5484299, 0.7242875, 0.5970012, 0.6015729, 0.53080845, 0.52220416, 0.56397927] + ) + max_diff = np.abs(image_slice.flatten() - expected_slice).max() + self.assertLessEqual(max_diff, 1e-3) + + +@slow +@require_torch_accelerator +class StableDiffusion3PAGImg2ImgPipelineIntegrationTests(unittest.TestCase): + pipeline_class = StableDiffusion3PAGImg2ImgPipeline + repo_id = "stabilityai/stable-diffusion-3-medium-diffusers" + + def setUp(self): + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def get_inputs( + self, device, generator_device="cpu", dtype=torch.float32, seed=0, guidance_scale=7.0, pag_scale=0.7 + ): + img_url = ( + "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/sdxl-text2img.png" + ) + init_image = load_image(img_url) + + generator = torch.Generator(device=generator_device).manual_seed(seed) + inputs = { + "prompt": "an astronaut in a space suit walking through a jungle", + "generator": generator, + "image": init_image, + "num_inference_steps": 12, + "strength": 0.6, + "guidance_scale": guidance_scale, + "pag_scale": pag_scale, + "output_type": "np", + } + return inputs + + def test_pag_cfg(self): + pipeline = AutoPipelineForImage2Image.from_pretrained( + self.repo_id, enable_pag=True, torch_dtype=torch.float16, pag_applied_layers=["blocks.17"] + ) + pipeline.enable_model_cpu_offload(device=torch_device) + pipeline.set_progress_bar_config(disable=None) + + inputs = self.get_inputs(torch_device) + image = pipeline(**inputs).images + image_slice = image[0, -3:, -3:, -1].flatten() + assert image.shape == (1, 1024, 1024, 3) + expected_slice = np.array( + [ + 0.16772461, + 0.17626953, + 0.18432617, + 0.17822266, + 0.18359375, + 0.17626953, + 0.17407227, + 0.17700195, + 0.17822266, + ] + ) + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3, ( + f"output is different from expected, {image_slice.flatten()}" + ) + + def test_pag_uncond(self): + pipeline = AutoPipelineForImage2Image.from_pretrained( + self.repo_id, enable_pag=True, torch_dtype=torch.float16, pag_applied_layers=["blocks.(4|17)"] + ) + pipeline.enable_model_cpu_offload(device=torch_device) + pipeline.set_progress_bar_config(disable=None) + + inputs = self.get_inputs(torch_device, guidance_scale=0.0, pag_scale=1.8) + image = pipeline(**inputs).images + image_slice = image[0, -3:, -3:, -1].flatten() + assert image.shape == (1, 1024, 1024, 3) + expected_slice = np.array( + [0.1508789, 0.16210938, 0.17138672, 0.16210938, 0.17089844, 0.16137695, 0.16235352, 0.16430664, 0.16455078] + ) + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3, ( + f"output is different from expected, {image_slice.flatten()}" + ) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/pag/test_pag_sd_img2img.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/pag/test_pag_sd_img2img.py new file mode 100644 index 0000000000000000000000000000000000000000..0b440d5ec9fc3b2c7458490fa4bfae9e2b1f6056 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/pag/test_pag_sd_img2img.py @@ -0,0 +1,290 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import gc +import inspect +import random +import unittest + +import numpy as np +import torch +from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer + +from diffusers import ( + AutoencoderKL, + AutoencoderTiny, + AutoPipelineForImage2Image, + EulerDiscreteScheduler, + StableDiffusionImg2ImgPipeline, + StableDiffusionPAGImg2ImgPipeline, + UNet2DConditionModel, +) + +from ...testing_utils import ( + backend_empty_cache, + enable_full_determinism, + floats_tensor, + load_image, + require_torch_accelerator, + slow, + torch_device, +) +from ..pipeline_params import ( + IMAGE_TO_IMAGE_IMAGE_PARAMS, + TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, + TEXT_GUIDED_IMAGE_VARIATION_PARAMS, + TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS, +) +from ..test_pipelines_common import ( + IPAdapterTesterMixin, + PipelineKarrasSchedulerTesterMixin, + PipelineLatentTesterMixin, + PipelineTesterMixin, +) + + +enable_full_determinism() + + +class StableDiffusionPAGImg2ImgPipelineFastTests( + IPAdapterTesterMixin, + PipelineLatentTesterMixin, + PipelineKarrasSchedulerTesterMixin, + PipelineTesterMixin, + unittest.TestCase, +): + pipeline_class = StableDiffusionPAGImg2ImgPipeline + params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"pag_scale", "pag_adaptive_scale"}) - {"height", "width"} + required_optional_params = PipelineTesterMixin.required_optional_params - {"latents"} + batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS + image_params = IMAGE_TO_IMAGE_IMAGE_PARAMS + image_latents_params = IMAGE_TO_IMAGE_IMAGE_PARAMS + callback_cfg_params = TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS + + def get_dummy_components(self, time_cond_proj_dim=None): + torch.manual_seed(0) + unet = UNet2DConditionModel( + block_out_channels=(32, 64), + layers_per_block=2, + time_cond_proj_dim=time_cond_proj_dim, + sample_size=32, + in_channels=4, + out_channels=4, + down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), + up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), + cross_attention_dim=32, + ) + scheduler = EulerDiscreteScheduler( + beta_start=0.00085, + beta_end=0.012, + steps_offset=1, + beta_schedule="scaled_linear", + timestep_spacing="leading", + ) + torch.manual_seed(0) + vae = AutoencoderKL( + block_out_channels=[32, 64], + in_channels=3, + out_channels=3, + down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], + up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], + latent_channels=4, + sample_size=128, + ) + text_encoder_config = CLIPTextConfig( + bos_token_id=0, + eos_token_id=2, + hidden_size=32, + intermediate_size=37, + layer_norm_eps=1e-05, + num_attention_heads=4, + num_hidden_layers=5, + pad_token_id=1, + vocab_size=1000, + ) + text_encoder = CLIPTextModel(text_encoder_config) + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + + components = { + "unet": unet, + "scheduler": scheduler, + "vae": vae, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + "safety_checker": None, + "feature_extractor": None, + "image_encoder": None, + } + return components + + def get_dummy_tiny_autoencoder(self): + return AutoencoderTiny(in_channels=3, out_channels=3, latent_channels=4) + + def get_dummy_inputs(self, device, seed=0): + image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) + image = image / 2 + 0.5 + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + inputs = { + "prompt": "A painting of a squirrel eating a burger", + "image": image, + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 6.0, + "pag_scale": 0.9, + "output_type": "np", + } + return inputs + + def test_pag_disable_enable(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + + # base pipeline (expect same output when pag is disabled) + pipe_sd = StableDiffusionImg2ImgPipeline(**components) + pipe_sd = pipe_sd.to(device) + pipe_sd.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + del inputs["pag_scale"] + assert "pag_scale" not in inspect.signature(pipe_sd.__call__).parameters, ( + f"`pag_scale` should not be a call parameter of the base pipeline {pipe_sd.__class__.__name__}." + ) + out = pipe_sd(**inputs).images[0, -3:, -3:, -1] + + # pag disabled with pag_scale=0.0 + pipe_pag = self.pipeline_class(**components) + pipe_pag = pipe_pag.to(device) + pipe_pag.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + inputs["pag_scale"] = 0.0 + out_pag_disabled = pipe_pag(**inputs).images[0, -3:, -3:, -1] + + # pag enabled + pipe_pag = self.pipeline_class(**components, pag_applied_layers=["mid", "up", "down"]) + pipe_pag = pipe_pag.to(device) + pipe_pag.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + out_pag_enabled = pipe_pag(**inputs).images[0, -3:, -3:, -1] + + assert np.abs(out.flatten() - out_pag_disabled.flatten()).max() < 1e-3 + assert np.abs(out.flatten() - out_pag_enabled.flatten()).max() > 1e-3 + + def test_pag_inference(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + + pipe_pag = self.pipeline_class(**components, pag_applied_layers=["mid", "up", "down"]) + pipe_pag = pipe_pag.to(device) + pipe_pag.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + image = pipe_pag(**inputs).images + image_slice = image[0, -3:, -3:, -1] + + assert image.shape == ( + 1, + 32, + 32, + 3, + ), f"the shape of the output image should be (1, 32, 32, 3) but got {image.shape}" + + expected_slice = np.array( + [0.44203848, 0.49598145, 0.42248967, 0.6707724, 0.5683791, 0.43603387, 0.58316565, 0.60077155, 0.5174199] + ) + max_diff = np.abs(image_slice.flatten() - expected_slice).max() + self.assertLessEqual(max_diff, 1e-3) + + def test_encode_prompt_works_in_isolation(self): + extra_required_param_value_dict = { + "device": torch.device(torch_device).type, + "do_classifier_free_guidance": self.get_dummy_inputs(device=torch_device).get("guidance_scale", 1.0) > 1.0, + } + return super().test_encode_prompt_works_in_isolation(extra_required_param_value_dict) + + +@slow +@require_torch_accelerator +class StableDiffusionPAGImg2ImgPipelineIntegrationTests(unittest.TestCase): + pipeline_class = StableDiffusionPAGImg2ImgPipeline + repo_id = "Jiali/stable-diffusion-1.5" + + def setUp(self): + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): + generator = torch.Generator(device=generator_device).manual_seed(seed) + init_image = load_image( + "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" + "/stable_diffusion_img2img/sketch-mountains-input.png" + ) + inputs = { + "prompt": "a fantasy landscape, concept art, high resolution", + "image": init_image, + "generator": generator, + "num_inference_steps": 3, + "strength": 0.75, + "guidance_scale": 7.5, + "pag_scale": 3.0, + "output_type": "np", + } + return inputs + + def test_pag_cfg(self): + pipeline = AutoPipelineForImage2Image.from_pretrained(self.repo_id, enable_pag=True, torch_dtype=torch.float16) + pipeline.enable_model_cpu_offload(device=torch_device) + pipeline.set_progress_bar_config(disable=None) + + inputs = self.get_inputs(torch_device) + image = pipeline(**inputs).images + + image_slice = image[0, -3:, -3:, -1].flatten() + assert image.shape == (1, 512, 512, 3) + + expected_slice = np.array( + [0.58251953, 0.5722656, 0.5683594, 0.55029297, 0.52001953, 0.52001953, 0.49951172, 0.45410156, 0.50146484] + ) + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3, ( + f"output is different from expected, {image_slice.flatten()}" + ) + + def test_pag_uncond(self): + pipeline = AutoPipelineForImage2Image.from_pretrained(self.repo_id, enable_pag=True, torch_dtype=torch.float16) + pipeline.enable_model_cpu_offload(device=torch_device) + pipeline.set_progress_bar_config(disable=None) + + inputs = self.get_inputs(torch_device, guidance_scale=0.0) + image = pipeline(**inputs).images + + image_slice = image[0, -3:, -3:, -1].flatten() + assert image.shape == (1, 512, 512, 3) + expected_slice = np.array( + [0.5986328, 0.52441406, 0.3972168, 0.4741211, 0.34985352, 0.22705078, 0.4128418, 0.2866211, 0.31713867] + ) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3, ( + f"output is different from expected, {image_slice.flatten()}" + ) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/pag/test_pag_sd_inpaint.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/pag/test_pag_sd_inpaint.py new file mode 100644 index 0000000000000000000000000000000000000000..709df683705593308a0554362275e8e506fb1fd0 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/pag/test_pag_sd_inpaint.py @@ -0,0 +1,324 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import gc +import random +import unittest + +import numpy as np +import torch +from PIL import Image +from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer + +from diffusers import ( + AutoencoderKL, + AutoPipelineForInpainting, + PNDMScheduler, + StableDiffusionPAGInpaintPipeline, + UNet2DConditionModel, +) + +from ...testing_utils import ( + backend_empty_cache, + enable_full_determinism, + floats_tensor, + load_image, + require_torch_accelerator, + slow, + torch_device, +) +from ..pipeline_params import ( + TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, + TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, + TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS, +) +from ..test_pipelines_common import ( + IPAdapterTesterMixin, + PipelineFromPipeTesterMixin, + PipelineLatentTesterMixin, + PipelineTesterMixin, +) + + +enable_full_determinism() + + +class StableDiffusionPAGInpaintPipelineFastTests( + PipelineTesterMixin, + IPAdapterTesterMixin, + PipelineLatentTesterMixin, + PipelineFromPipeTesterMixin, + unittest.TestCase, +): + pipeline_class = StableDiffusionPAGInpaintPipeline + params = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS.union({"pag_scale", "pag_adaptive_scale"}) + batch_params = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS + image_params = frozenset([]) + image_latents_params = frozenset([]) + callback_cfg_params = TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS.union( + {"add_text_embeds", "add_time_ids", "mask", "masked_image_latents"} + ) + + def get_dummy_components(self, time_cond_proj_dim=None): + torch.manual_seed(0) + unet = UNet2DConditionModel( + block_out_channels=(32, 64), + time_cond_proj_dim=time_cond_proj_dim, + layers_per_block=2, + sample_size=32, + in_channels=4, + out_channels=4, + down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), + up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), + cross_attention_dim=32, + ) + scheduler = PNDMScheduler(skip_prk_steps=True) + torch.manual_seed(0) + vae = AutoencoderKL( + block_out_channels=[32, 64], + in_channels=3, + out_channels=3, + down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], + up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], + latent_channels=4, + ) + torch.manual_seed(0) + text_encoder_config = CLIPTextConfig( + bos_token_id=0, + eos_token_id=2, + hidden_size=32, + intermediate_size=37, + layer_norm_eps=1e-05, + num_attention_heads=4, + num_hidden_layers=5, + pad_token_id=1, + vocab_size=1000, + ) + text_encoder = CLIPTextModel(text_encoder_config) + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + + components = { + "unet": unet, + "scheduler": scheduler, + "vae": vae, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + "safety_checker": None, + "feature_extractor": None, + "image_encoder": None, + } + return components + + def get_dummy_inputs(self, device, seed=0): + # TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched + image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) + image = image.cpu().permute(0, 2, 3, 1)[0] + init_image = Image.fromarray(np.uint8(image)).convert("RGB").resize((64, 64)) + # create mask + image[8:, 8:, :] = 255 + mask_image = Image.fromarray(np.uint8(image)).convert("L").resize((64, 64)) + + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + inputs = { + "prompt": "A painting of a squirrel eating a burger", + "image": init_image, + "mask_image": mask_image, + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 6.0, + "strength": 1.0, + "pag_scale": 0.9, + "output_type": "np", + } + return inputs + + def test_pag_applied_layers(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + + # base pipeline + pipe = self.pipeline_class(**components) + pipe = pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + # pag_applied_layers = ["mid","up","down"] should apply to all self-attention layers + all_self_attn_layers = [k for k in pipe.unet.attn_processors.keys() if "attn1" in k] + original_attn_procs = pipe.unet.attn_processors + pag_layers = [ + "down", + "mid", + "up", + ] + pipe._set_pag_attn_processor(pag_applied_layers=pag_layers, do_classifier_free_guidance=False) + assert set(pipe.pag_attn_processors) == set(all_self_attn_layers) + + # pag_applied_layers = ["mid"], or ["mid.block_0"] or ["mid.block_0.attentions_0"] should apply to all self-attention layers in mid_block, i.e. + # mid_block.attentions.0.transformer_blocks.0.attn1.processor + # mid_block.attentions.0.transformer_blocks.1.attn1.processor + all_self_attn_mid_layers = [ + "mid_block.attentions.0.transformer_blocks.0.attn1.processor", + # "mid_block.attentions.0.transformer_blocks.1.attn1.processor", + ] + pipe.unet.set_attn_processor(original_attn_procs.copy()) + pag_layers = ["mid"] + pipe._set_pag_attn_processor(pag_applied_layers=pag_layers, do_classifier_free_guidance=False) + assert set(pipe.pag_attn_processors) == set(all_self_attn_mid_layers) + + pipe.unet.set_attn_processor(original_attn_procs.copy()) + pag_layers = ["mid_block"] + pipe._set_pag_attn_processor(pag_applied_layers=pag_layers, do_classifier_free_guidance=False) + assert set(pipe.pag_attn_processors) == set(all_self_attn_mid_layers) + + pipe.unet.set_attn_processor(original_attn_procs.copy()) + pag_layers = ["mid_block.attentions.0"] + pipe._set_pag_attn_processor(pag_applied_layers=pag_layers, do_classifier_free_guidance=False) + assert set(pipe.pag_attn_processors) == set(all_self_attn_mid_layers) + + # pag_applied_layers = ["mid.block_0.attentions_1"] does not exist in the model + pipe.unet.set_attn_processor(original_attn_procs.copy()) + pag_layers = ["mid_block.attentions.1"] + with self.assertRaises(ValueError): + pipe._set_pag_attn_processor(pag_applied_layers=pag_layers, do_classifier_free_guidance=False) + + # pag_applied_layers = "down" should apply to all self-attention layers in down_blocks + # down_blocks.1.attentions.0.transformer_blocks.0.attn1.processor + # down_blocks.1.attentions.0.transformer_blocks.1.attn1.processor + # down_blocks.1.attentions.0.transformer_blocks.0.attn1.processor + + pipe.unet.set_attn_processor(original_attn_procs.copy()) + pag_layers = ["down"] + pipe._set_pag_attn_processor(pag_applied_layers=pag_layers, do_classifier_free_guidance=False) + assert len(pipe.pag_attn_processors) == 2 + + pipe.unet.set_attn_processor(original_attn_procs.copy()) + pag_layers = ["down_blocks.0"] + with self.assertRaises(ValueError): + pipe._set_pag_attn_processor(pag_applied_layers=pag_layers, do_classifier_free_guidance=False) + + pipe.unet.set_attn_processor(original_attn_procs.copy()) + pag_layers = ["down_blocks.1"] + pipe._set_pag_attn_processor(pag_applied_layers=pag_layers, do_classifier_free_guidance=False) + assert len(pipe.pag_attn_processors) == 2 + + pipe.unet.set_attn_processor(original_attn_procs.copy()) + pag_layers = ["down_blocks.1.attentions.1"] + pipe._set_pag_attn_processor(pag_applied_layers=pag_layers, do_classifier_free_guidance=False) + assert len(pipe.pag_attn_processors) == 1 + + def test_pag_inference(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + + pipe_pag = self.pipeline_class(**components, pag_applied_layers=["mid", "up", "down"]) + pipe_pag = pipe_pag.to(device) + pipe_pag.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + image = pipe_pag(**inputs).images + image_slice = image[0, -3:, -3:, -1] + + assert image.shape == ( + 1, + 64, + 64, + 3, + ), f"the shape of the output image should be (1, 64, 64, 3) but got {image.shape}" + + expected_slice = np.array([0.7190, 0.5807, 0.6007, 0.5600, 0.6350, 0.6639, 0.5680, 0.5664, 0.5230]) + max_diff = np.abs(image_slice.flatten() - expected_slice).max() + assert max_diff < 1e-3, f"output is different from expected, {image_slice.flatten()}" + + def test_encode_prompt_works_in_isolation(self): + extra_required_param_value_dict = { + "device": torch.device(torch_device).type, + "do_classifier_free_guidance": self.get_dummy_inputs(device=torch_device).get("guidance_scale", 1.0) > 1.0, + } + return super().test_encode_prompt_works_in_isolation(extra_required_param_value_dict, atol=1e-3, rtol=1e-3) + + +@slow +@require_torch_accelerator +class StableDiffusionPAGPipelineIntegrationTests(unittest.TestCase): + pipeline_class = StableDiffusionPAGInpaintPipeline + repo_id = "runwayml/stable-diffusion-v1-5" + + def setUp(self): + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def get_inputs(self, device, generator_device="cpu", seed=0, guidance_scale=7.0): + img_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png" + mask_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png" + + init_image = load_image(img_url).convert("RGB") + mask_image = load_image(mask_url).convert("RGB") + + generator = torch.Generator(device=generator_device).manual_seed(seed) + inputs = { + "prompt": "A majestic tiger sitting on a bench", + "generator": generator, + "image": init_image, + "mask_image": mask_image, + "strength": 0.8, + "num_inference_steps": 3, + "guidance_scale": guidance_scale, + "pag_scale": 3.0, + "output_type": "np", + } + return inputs + + def test_pag_cfg(self): + pipeline = AutoPipelineForInpainting.from_pretrained(self.repo_id, enable_pag=True, torch_dtype=torch.float16) + pipeline.enable_model_cpu_offload(device=torch_device) + pipeline.set_progress_bar_config(disable=None) + + inputs = self.get_inputs(torch_device) + image = pipeline(**inputs).images + + image_slice = image[0, -3:, -3:, -1].flatten() + assert image.shape == (1, 512, 512, 3) + + expected_slice = np.array( + [0.38793945, 0.4111328, 0.47924805, 0.39208984, 0.4165039, 0.41674805, 0.37060547, 0.36791992, 0.40625] + ) + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3, ( + f"output is different from expected, {image_slice.flatten()}" + ) + + def test_pag_uncond(self): + pipeline = AutoPipelineForInpainting.from_pretrained(self.repo_id, enable_pag=True, torch_dtype=torch.float16) + pipeline.enable_model_cpu_offload(device=torch_device) + pipeline.set_progress_bar_config(disable=None) + + inputs = self.get_inputs(torch_device, guidance_scale=0.0) + image = pipeline(**inputs).images + + image_slice = image[0, -3:, -3:, -1].flatten() + assert image.shape == (1, 512, 512, 3) + expected_slice = np.array( + [0.3876953, 0.40356445, 0.4934082, 0.39697266, 0.41674805, 0.41015625, 0.375, 0.36914062, 0.40649414] + ) + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3, ( + f"output is different from expected, {image_slice.flatten()}" + ) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/pag/test_pag_sdxl.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/pag/test_pag_sdxl.py new file mode 100644 index 0000000000000000000000000000000000000000..cca5c61651b34b9d62101937687a3017f8db7ad7 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/pag/test_pag_sdxl.py @@ -0,0 +1,353 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import gc +import inspect +import unittest + +import numpy as np +import torch +from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer + +from diffusers import ( + AutoencoderKL, + AutoPipelineForText2Image, + EulerDiscreteScheduler, + StableDiffusionXLPAGPipeline, + StableDiffusionXLPipeline, + UNet2DConditionModel, +) + +from ...testing_utils import ( + backend_empty_cache, + enable_full_determinism, + require_torch_accelerator, + slow, + torch_device, +) +from ..pipeline_params import ( + TEXT_TO_IMAGE_BATCH_PARAMS, + TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS, + TEXT_TO_IMAGE_IMAGE_PARAMS, + TEXT_TO_IMAGE_PARAMS, +) +from ..test_pipelines_common import ( + IPAdapterTesterMixin, + PipelineFromPipeTesterMixin, + PipelineLatentTesterMixin, + PipelineTesterMixin, +) + + +enable_full_determinism() + + +class StableDiffusionXLPAGPipelineFastTests( + PipelineTesterMixin, + IPAdapterTesterMixin, + PipelineLatentTesterMixin, + PipelineFromPipeTesterMixin, + unittest.TestCase, +): + pipeline_class = StableDiffusionXLPAGPipeline + params = TEXT_TO_IMAGE_PARAMS.union({"pag_scale", "pag_adaptive_scale"}) + batch_params = TEXT_TO_IMAGE_BATCH_PARAMS + image_params = TEXT_TO_IMAGE_IMAGE_PARAMS + image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS + callback_cfg_params = TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS.union({"add_text_embeds", "add_time_ids"}) + + def get_dummy_components(self, time_cond_proj_dim=None): + # Copied from tests.pipelines.stable_diffusion_xl.test_stable_diffusion_xl.StableDiffusionXLPipelineFastTests.get_dummy_components + torch.manual_seed(0) + unet = UNet2DConditionModel( + block_out_channels=(2, 4), + layers_per_block=2, + time_cond_proj_dim=time_cond_proj_dim, + sample_size=32, + in_channels=4, + out_channels=4, + down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), + up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), + # SD2-specific config below + attention_head_dim=(2, 4), + use_linear_projection=True, + addition_embed_type="text_time", + addition_time_embed_dim=8, + transformer_layers_per_block=(1, 2), + projection_class_embeddings_input_dim=80, # 6 * 8 + 32 + cross_attention_dim=64, + norm_num_groups=1, + ) + scheduler = EulerDiscreteScheduler( + beta_start=0.00085, + beta_end=0.012, + steps_offset=1, + beta_schedule="scaled_linear", + timestep_spacing="leading", + ) + torch.manual_seed(0) + vae = AutoencoderKL( + block_out_channels=[32, 64], + in_channels=3, + out_channels=3, + down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], + up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], + latent_channels=4, + sample_size=128, + ) + torch.manual_seed(0) + text_encoder_config = CLIPTextConfig( + bos_token_id=0, + eos_token_id=2, + hidden_size=32, + intermediate_size=37, + layer_norm_eps=1e-05, + num_attention_heads=4, + num_hidden_layers=5, + pad_token_id=1, + vocab_size=1000, + # SD2-specific config below + hidden_act="gelu", + projection_dim=32, + ) + text_encoder = CLIPTextModel(text_encoder_config) + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + + text_encoder_2 = CLIPTextModelWithProjection(text_encoder_config) + tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + + components = { + "unet": unet, + "scheduler": scheduler, + "vae": vae, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + "text_encoder_2": text_encoder_2, + "tokenizer_2": tokenizer_2, + "image_encoder": None, + "feature_extractor": None, + } + return components + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + inputs = { + "prompt": "A painting of a squirrel eating a burger", + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 5.0, + "pag_scale": 0.9, + "output_type": "np", + } + return inputs + + def test_pag_disable_enable(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + + # base pipeline (expect same output when pag is disabled) + pipe_sd = StableDiffusionXLPipeline(**components) + pipe_sd = pipe_sd.to(device) + pipe_sd.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + del inputs["pag_scale"] + assert "pag_scale" not in inspect.signature(pipe_sd.__call__).parameters, ( + f"`pag_scale` should not be a call parameter of the base pipeline {pipe_sd.__class__.__name__}." + ) + out = pipe_sd(**inputs).images[0, -3:, -3:, -1] + + # pag disabled with pag_scale=0.0 + pipe_pag = self.pipeline_class(**components) + pipe_pag = pipe_pag.to(device) + pipe_pag.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + inputs["pag_scale"] = 0.0 + out_pag_disabled = pipe_pag(**inputs).images[0, -3:, -3:, -1] + + # pag enabled + pipe_pag = self.pipeline_class(**components, pag_applied_layers=["mid", "up", "down"]) + pipe_pag = pipe_pag.to(device) + pipe_pag.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + out_pag_enabled = pipe_pag(**inputs).images[0, -3:, -3:, -1] + + assert np.abs(out.flatten() - out_pag_disabled.flatten()).max() < 1e-3 + assert np.abs(out.flatten() - out_pag_enabled.flatten()).max() > 1e-3 + + def test_pag_applied_layers(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + + # base pipeline + pipe = self.pipeline_class(**components) + pipe = pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + # pag_applied_layers = ["mid","up","down"] should apply to all self-attention layers + all_self_attn_layers = [k for k in pipe.unet.attn_processors.keys() if "attn1" in k] + original_attn_procs = pipe.unet.attn_processors + pag_layers = ["mid", "down", "up"] + pipe._set_pag_attn_processor(pag_applied_layers=pag_layers, do_classifier_free_guidance=False) + assert set(pipe.pag_attn_processors) == set(all_self_attn_layers) + + # pag_applied_layers = ["mid"], or ["mid.block_0"] or ["mid.block_0.attentions_0"] should apply to all self-attention layers in mid_block, i.e. + # mid_block.attentions.0.transformer_blocks.0.attn1.processor + # mid_block.attentions.0.transformer_blocks.1.attn1.processor + all_self_attn_mid_layers = [ + "mid_block.attentions.0.transformer_blocks.0.attn1.processor", + "mid_block.attentions.0.transformer_blocks.1.attn1.processor", + ] + pipe.unet.set_attn_processor(original_attn_procs.copy()) + pag_layers = ["mid"] + pipe._set_pag_attn_processor(pag_applied_layers=pag_layers, do_classifier_free_guidance=False) + assert set(pipe.pag_attn_processors) == set(all_self_attn_mid_layers) + + pipe.unet.set_attn_processor(original_attn_procs.copy()) + pag_layers = ["mid_block"] + pipe._set_pag_attn_processor(pag_applied_layers=pag_layers, do_classifier_free_guidance=False) + assert set(pipe.pag_attn_processors) == set(all_self_attn_mid_layers) + + pipe.unet.set_attn_processor(original_attn_procs.copy()) + pag_layers = ["mid_block.attentions.0"] + pipe._set_pag_attn_processor(pag_applied_layers=pag_layers, do_classifier_free_guidance=False) + assert set(pipe.pag_attn_processors) == set(all_self_attn_mid_layers) + + # pag_applied_layers = ["mid.block_0.attentions_1"] does not exist in the model + pipe.unet.set_attn_processor(original_attn_procs.copy()) + pag_layers = ["mid_block.attentions.1"] + with self.assertRaises(ValueError): + pipe._set_pag_attn_processor(pag_applied_layers=pag_layers, do_classifier_free_guidance=False) + + # pag_applied_layers = "down" should apply to all self-attention layers in down_blocks + # down_blocks.1.attentions.0.transformer_blocks.0.attn1.processor + # down_blocks.1.attentions.0.transformer_blocks.1.attn1.processor + # down_blocks.1.attentions.1.transformer_blocks.0.attn1.processor + # down_blocks.1.attentions.1.transformer_blocks.1.attn1.processor + pipe.unet.set_attn_processor(original_attn_procs.copy()) + pag_layers = ["down"] + pipe._set_pag_attn_processor(pag_applied_layers=pag_layers, do_classifier_free_guidance=False) + assert len(pipe.pag_attn_processors) == 4 + + pipe.unet.set_attn_processor(original_attn_procs.copy()) + pag_layers = ["down_blocks.0"] + with self.assertRaises(ValueError): + pipe._set_pag_attn_processor(pag_applied_layers=pag_layers, do_classifier_free_guidance=False) + + pipe.unet.set_attn_processor(original_attn_procs.copy()) + pag_layers = ["down_blocks.1"] + pipe._set_pag_attn_processor(pag_applied_layers=pag_layers, do_classifier_free_guidance=False) + assert len(pipe.pag_attn_processors) == 4 + + pipe.unet.set_attn_processor(original_attn_procs.copy()) + pag_layers = ["down_blocks.1.attentions.1"] + pipe._set_pag_attn_processor(pag_applied_layers=pag_layers, do_classifier_free_guidance=False) + assert len(pipe.pag_attn_processors) == 2 + + def test_pag_inference(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + + pipe_pag = self.pipeline_class(**components, pag_applied_layers=["mid", "up", "down"]) + pipe_pag = pipe_pag.to(device) + pipe_pag.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + image = pipe_pag(**inputs).images + image_slice = image[0, -3:, -3:, -1] + + assert image.shape == ( + 1, + 64, + 64, + 3, + ), f"the shape of the output image should be (1, 64, 64, 3) but got {image.shape}" + expected_slice = np.array([0.5382, 0.5439, 0.4704, 0.4569, 0.5234, 0.4834, 0.5289, 0.5039, 0.4764]) + + max_diff = np.abs(image_slice.flatten() - expected_slice).max() + self.assertLessEqual(max_diff, 1e-3) + + @unittest.skip("We test this functionality elsewhere already.") + def test_save_load_optional_components(self): + pass + + +@slow +@require_torch_accelerator +class StableDiffusionXLPAGPipelineIntegrationTests(unittest.TestCase): + pipeline_class = StableDiffusionXLPAGPipeline + repo_id = "stabilityai/stable-diffusion-xl-base-1.0" + + def setUp(self): + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def get_inputs(self, device, generator_device="cpu", seed=0, guidance_scale=7.0): + generator = torch.Generator(device=generator_device).manual_seed(seed) + inputs = { + "prompt": "a polar bear sitting in a chair drinking a milkshake", + "negative_prompt": "deformed, ugly, wrong proportion, low res, bad anatomy, worst quality, low quality", + "generator": generator, + "num_inference_steps": 3, + "guidance_scale": guidance_scale, + "pag_scale": 3.0, + "output_type": "np", + } + return inputs + + def test_pag_cfg(self): + pipeline = AutoPipelineForText2Image.from_pretrained(self.repo_id, enable_pag=True, torch_dtype=torch.float16) + pipeline.enable_model_cpu_offload(device=torch_device) + pipeline.set_progress_bar_config(disable=None) + + inputs = self.get_inputs(torch_device) + image = pipeline(**inputs).images + + image_slice = image[0, -3:, -3:, -1].flatten() + assert image.shape == (1, 1024, 1024, 3) + expected_slice = np.array( + [0.3123679, 0.31725878, 0.32026544, 0.327533, 0.3266391, 0.3303998, 0.33544615, 0.34181812, 0.34102726] + ) + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3, ( + f"output is different from expected, {image_slice.flatten()}" + ) + + def test_pag_uncond(self): + pipeline = AutoPipelineForText2Image.from_pretrained(self.repo_id, enable_pag=True, torch_dtype=torch.float16) + pipeline.enable_model_cpu_offload(device=torch_device) + pipeline.set_progress_bar_config(disable=None) + + inputs = self.get_inputs(torch_device, guidance_scale=0.0) + image = pipeline(**inputs).images + + image_slice = image[0, -3:, -3:, -1].flatten() + assert image.shape == (1, 1024, 1024, 3) + expected_slice = np.array( + [0.47400922, 0.48650584, 0.4839625, 0.4724013, 0.4890427, 0.49544555, 0.51707107, 0.54299414, 0.5224372] + ) + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3, ( + f"output is different from expected, {image_slice.flatten()}" + ) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/pag/test_pag_sdxl_img2img.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/pag/test_pag_sdxl_img2img.py new file mode 100644 index 0000000000000000000000000000000000000000..d311500d3ca7f90ad4188afaa385ee24d649f3e4 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/pag/test_pag_sdxl_img2img.py @@ -0,0 +1,338 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import gc +import inspect +import random +import unittest + +import numpy as np +import torch +from transformers import ( + CLIPImageProcessor, + CLIPTextConfig, + CLIPTextModel, + CLIPTextModelWithProjection, + CLIPTokenizer, + CLIPVisionConfig, + CLIPVisionModelWithProjection, +) + +from diffusers import ( + AutoencoderKL, + AutoPipelineForImage2Image, + EulerDiscreteScheduler, + StableDiffusionXLImg2ImgPipeline, + StableDiffusionXLPAGImg2ImgPipeline, + UNet2DConditionModel, +) + +from ...testing_utils import ( + backend_empty_cache, + enable_full_determinism, + floats_tensor, + load_image, + require_torch_accelerator, + slow, + torch_device, +) +from ..pipeline_params import ( + IMAGE_TO_IMAGE_IMAGE_PARAMS, + TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, + TEXT_GUIDED_IMAGE_VARIATION_PARAMS, + TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS, +) +from ..test_pipelines_common import ( + IPAdapterTesterMixin, + PipelineFromPipeTesterMixin, + PipelineLatentTesterMixin, + PipelineTesterMixin, +) + + +enable_full_determinism() + + +class StableDiffusionXLPAGImg2ImgPipelineFastTests( + PipelineTesterMixin, + IPAdapterTesterMixin, + PipelineLatentTesterMixin, + PipelineFromPipeTesterMixin, + unittest.TestCase, +): + pipeline_class = StableDiffusionXLPAGImg2ImgPipeline + params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"pag_scale", "pag_adaptive_scale"}) - {"height", "width"} + batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS + image_params = IMAGE_TO_IMAGE_IMAGE_PARAMS + image_latents_params = IMAGE_TO_IMAGE_IMAGE_PARAMS + callback_cfg_params = TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS.union( + {"add_text_embeds", "add_time_ids", "add_neg_time_ids"} + ) + + supports_dduf = False + + # based on tests.pipelines.stable_diffusion_xl.test_stable_diffusion_xl_img2img_pipeline.get_dummy_components + def get_dummy_components( + self, skip_first_text_encoder=False, time_cond_proj_dim=None, requires_aesthetics_score=False + ): + torch.manual_seed(0) + unet = UNet2DConditionModel( + block_out_channels=(32, 64), + layers_per_block=2, + sample_size=32, + in_channels=4, + out_channels=4, + time_cond_proj_dim=time_cond_proj_dim, + down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), + up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), + # SD2-specific config below + attention_head_dim=(2, 4), + use_linear_projection=True, + addition_embed_type="text_time", + addition_time_embed_dim=8, + transformer_layers_per_block=(1, 2), + projection_class_embeddings_input_dim=72 if requires_aesthetics_score else 80, # 5 * 8 + 32 + cross_attention_dim=64 if not skip_first_text_encoder else 32, + ) + scheduler = EulerDiscreteScheduler( + beta_start=0.00085, + beta_end=0.012, + steps_offset=1, + beta_schedule="scaled_linear", + timestep_spacing="leading", + ) + torch.manual_seed(0) + vae = AutoencoderKL( + block_out_channels=[32, 64], + in_channels=3, + out_channels=3, + down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], + up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], + latent_channels=4, + sample_size=128, + ) + torch.manual_seed(0) + image_encoder_config = CLIPVisionConfig( + hidden_size=32, + image_size=224, + projection_dim=32, + intermediate_size=37, + num_attention_heads=4, + num_channels=3, + num_hidden_layers=5, + patch_size=14, + ) + + image_encoder = CLIPVisionModelWithProjection(image_encoder_config) + + feature_extractor = CLIPImageProcessor( + crop_size=224, + do_center_crop=True, + do_normalize=True, + do_resize=True, + image_mean=[0.48145466, 0.4578275, 0.40821073], + image_std=[0.26862954, 0.26130258, 0.27577711], + resample=3, + size=224, + ) + + torch.manual_seed(0) + text_encoder_config = CLIPTextConfig( + bos_token_id=0, + eos_token_id=2, + hidden_size=32, + intermediate_size=37, + layer_norm_eps=1e-05, + num_attention_heads=4, + num_hidden_layers=5, + pad_token_id=1, + vocab_size=1000, + # SD2-specific config below + hidden_act="gelu", + projection_dim=32, + ) + text_encoder = CLIPTextModel(text_encoder_config) + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + + text_encoder_2 = CLIPTextModelWithProjection(text_encoder_config) + tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + + components = { + "unet": unet, + "scheduler": scheduler, + "vae": vae, + "text_encoder": text_encoder if not skip_first_text_encoder else None, + "tokenizer": tokenizer if not skip_first_text_encoder else None, + "text_encoder_2": text_encoder_2, + "tokenizer_2": tokenizer_2, + "requires_aesthetics_score": requires_aesthetics_score, + "image_encoder": image_encoder, + "feature_extractor": feature_extractor, + } + return components + + # based on tests.pipelines.stable_diffusion_xl.test_stable_diffusion_xl_img2img_pipeline.StableDiffusionXLImg2ImgPipelineFastTests + # add `pag_scale` to the inputs + def get_dummy_inputs(self, device, seed=0): + image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) + image = image / 2 + 0.5 + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + inputs = { + "prompt": "A painting of a squirrel eating a burger", + "image": image, + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 5.0, + "pag_scale": 3.0, + "output_type": "np", + "strength": 0.8, + } + return inputs + + def test_pag_disable_enable(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components(requires_aesthetics_score=True) + + # base pipeline + pipe_sd = StableDiffusionXLImg2ImgPipeline(**components) + pipe_sd = pipe_sd.to(device) + pipe_sd.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + del inputs["pag_scale"] + assert "pag_scale" not in inspect.signature(pipe_sd.__call__).parameters, ( + f"`pag_scale` should not be a call parameter of the base pipeline {pipe_sd.__class__.__name__}." + ) + out = pipe_sd(**inputs).images[0, -3:, -3:, -1] + + # pag disabled with pag_scale=0.0 + pipe_pag = self.pipeline_class(**components) + pipe_pag = pipe_pag.to(device) + pipe_pag.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + inputs["pag_scale"] = 0.0 + out_pag_disabled = pipe_pag(**inputs).images[0, -3:, -3:, -1] + + # pag enabled + pipe_pag = self.pipeline_class(**components, pag_applied_layers=["mid", "up", "down"]) + pipe_pag = pipe_pag.to(device) + pipe_pag.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + out_pag_enabled = pipe_pag(**inputs).images[0, -3:, -3:, -1] + + assert np.abs(out.flatten() - out_pag_disabled.flatten()).max() < 1e-3 + assert np.abs(out.flatten() - out_pag_enabled.flatten()).max() > 1e-3 + + def test_pag_inference(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components(requires_aesthetics_score=True) + + pipe_pag = self.pipeline_class(**components, pag_applied_layers=["mid", "up", "down"]) + pipe_pag = pipe_pag.to(device) + pipe_pag.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + image = pipe_pag(**inputs).images + image_slice = image[0, -3:, -3:, -1] + + assert image.shape == ( + 1, + 32, + 32, + 3, + ), f"the shape of the output image should be (1, 64, 64, 3) but got {image.shape}" + expected_slice = np.array([0.4613, 0.4902, 0.4406, 0.6788, 0.5611, 0.4529, 0.5893, 0.5975, 0.5226]) + + max_diff = np.abs(image_slice.flatten() - expected_slice).max() + assert max_diff < 1e-3, f"output is different from expected, {image_slice.flatten()}" + + @unittest.skip("We test this functionality elsewhere already.") + def test_save_load_optional_components(self): + pass + + +@slow +@require_torch_accelerator +class StableDiffusionXLPAGImg2ImgPipelineIntegrationTests(unittest.TestCase): + repo_id = "stabilityai/stable-diffusion-xl-base-1.0" + + def setUp(self): + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def get_inputs(self, device, generator_device="cpu", seed=0, guidance_scale=7.0): + img_url = ( + "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/sdxl-text2img.png" + ) + + init_image = load_image(img_url) + + generator = torch.Generator(device=generator_device).manual_seed(seed) + inputs = { + "prompt": "a dog catching a frisbee in the jungle", + "generator": generator, + "image": init_image, + "strength": 0.8, + "num_inference_steps": 3, + "guidance_scale": guidance_scale, + "pag_scale": 3.0, + "output_type": "np", + } + return inputs + + def test_pag_cfg(self): + pipeline = AutoPipelineForImage2Image.from_pretrained(self.repo_id, enable_pag=True, torch_dtype=torch.float16) + pipeline.enable_model_cpu_offload(device=torch_device) + pipeline.set_progress_bar_config(disable=None) + + inputs = self.get_inputs(torch_device) + image = pipeline(**inputs).images + + image_slice = image[0, -3:, -3:, -1].flatten() + assert image.shape == (1, 1024, 1024, 3) + expected_slice = np.array( + [0.20301354, 0.21078318, 0.2021082, 0.20277798, 0.20681083, 0.19562206, 0.20121682, 0.21562952, 0.21277016] + ) + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3, ( + f"output is different from expected, {image_slice.flatten()}" + ) + + def test_pag_uncond(self): + pipeline = AutoPipelineForImage2Image.from_pretrained(self.repo_id, enable_pag=True, torch_dtype=torch.float16) + pipeline.enable_model_cpu_offload(device=torch_device) + pipeline.set_progress_bar_config(disable=None) + + inputs = self.get_inputs(torch_device, guidance_scale=0.0) + image = pipeline(**inputs).images + + image_slice = image[0, -3:, -3:, -1].flatten() + assert image.shape == (1, 1024, 1024, 3) + expected_slice = np.array( + [0.21303111, 0.22188407, 0.2124992, 0.21365267, 0.18823743, 0.17569828, 0.21113116, 0.19419771, 0.18919235] + ) + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3, ( + f"output is different from expected, {image_slice.flatten()}" + ) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/pag/test_pag_sdxl_inpaint.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/pag/test_pag_sdxl_inpaint.py new file mode 100644 index 0000000000000000000000000000000000000000..00a07582e205dd9fe1a3bdf024655d3b8c87929b --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/pag/test_pag_sdxl_inpaint.py @@ -0,0 +1,344 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import gc +import inspect +import random +import unittest + +import numpy as np +import torch +from PIL import Image +from transformers import ( + CLIPImageProcessor, + CLIPTextConfig, + CLIPTextModel, + CLIPTextModelWithProjection, + CLIPTokenizer, + CLIPVisionConfig, + CLIPVisionModelWithProjection, +) + +from diffusers import ( + AutoencoderKL, + AutoPipelineForInpainting, + EulerDiscreteScheduler, + StableDiffusionXLInpaintPipeline, + StableDiffusionXLPAGInpaintPipeline, + UNet2DConditionModel, +) + +from ...testing_utils import ( + backend_empty_cache, + enable_full_determinism, + floats_tensor, + load_image, + require_torch_accelerator, + slow, + torch_device, +) +from ..pipeline_params import ( + TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, + TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, + TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS, +) +from ..test_pipelines_common import ( + IPAdapterTesterMixin, + PipelineFromPipeTesterMixin, + PipelineLatentTesterMixin, + PipelineTesterMixin, +) + + +enable_full_determinism() + + +class StableDiffusionXLPAGInpaintPipelineFastTests( + PipelineTesterMixin, + IPAdapterTesterMixin, + PipelineLatentTesterMixin, + PipelineFromPipeTesterMixin, + unittest.TestCase, +): + pipeline_class = StableDiffusionXLPAGInpaintPipeline + params = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS.union({"pag_scale", "pag_adaptive_scale"}) + batch_params = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS + image_params = frozenset([]) + image_latents_params = frozenset([]) + callback_cfg_params = TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS.union( + {"add_text_embeds", "add_time_ids", "mask", "masked_image_latents"} + ) + + supports_dduf = False + + # based on tests.pipelines.stable_diffusion_xl.test_stable_diffusion_xl_inpaint.StableDiffusionXLInpaintPipelineFastTests.get_dummy_components + def get_dummy_components( + self, skip_first_text_encoder=False, time_cond_proj_dim=None, requires_aesthetics_score=False + ): + torch.manual_seed(0) + unet = UNet2DConditionModel( + block_out_channels=(32, 64), + layers_per_block=2, + sample_size=32, + in_channels=4, + out_channels=4, + time_cond_proj_dim=time_cond_proj_dim, + down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), + up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), + # SD2-specific config below + attention_head_dim=(2, 4), + use_linear_projection=True, + addition_embed_type="text_time", + addition_time_embed_dim=8, + transformer_layers_per_block=(1, 2), + projection_class_embeddings_input_dim=72 if requires_aesthetics_score else 80, # 5 * 8 + 32 + cross_attention_dim=64 if not skip_first_text_encoder else 32, + ) + scheduler = EulerDiscreteScheduler( + beta_start=0.00085, + beta_end=0.012, + steps_offset=1, + beta_schedule="scaled_linear", + timestep_spacing="leading", + ) + torch.manual_seed(0) + vae = AutoencoderKL( + block_out_channels=[32, 64], + in_channels=3, + out_channels=3, + down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], + up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], + latent_channels=4, + sample_size=128, + ) + torch.manual_seed(0) + text_encoder_config = CLIPTextConfig( + bos_token_id=0, + eos_token_id=2, + hidden_size=32, + intermediate_size=37, + layer_norm_eps=1e-05, + num_attention_heads=4, + num_hidden_layers=5, + pad_token_id=1, + vocab_size=1000, + # SD2-specific config below + hidden_act="gelu", + projection_dim=32, + ) + text_encoder = CLIPTextModel(text_encoder_config) + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + + text_encoder_2 = CLIPTextModelWithProjection(text_encoder_config) + tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + + torch.manual_seed(0) + image_encoder_config = CLIPVisionConfig( + hidden_size=32, + image_size=224, + projection_dim=32, + intermediate_size=37, + num_attention_heads=4, + num_channels=3, + num_hidden_layers=5, + patch_size=14, + ) + + image_encoder = CLIPVisionModelWithProjection(image_encoder_config) + + feature_extractor = CLIPImageProcessor( + crop_size=224, + do_center_crop=True, + do_normalize=True, + do_resize=True, + image_mean=[0.48145466, 0.4578275, 0.40821073], + image_std=[0.26862954, 0.26130258, 0.27577711], + resample=3, + size=224, + ) + + components = { + "unet": unet, + "scheduler": scheduler, + "vae": vae, + "text_encoder": text_encoder if not skip_first_text_encoder else None, + "tokenizer": tokenizer if not skip_first_text_encoder else None, + "text_encoder_2": text_encoder_2, + "tokenizer_2": tokenizer_2, + "image_encoder": image_encoder, + "feature_extractor": feature_extractor, + "requires_aesthetics_score": requires_aesthetics_score, + } + return components + + def get_dummy_inputs(self, device, seed=0): + # TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched + image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) + image = image.cpu().permute(0, 2, 3, 1)[0] + init_image = Image.fromarray(np.uint8(image)).convert("RGB").resize((64, 64)) + # create mask + image[8:, 8:, :] = 255 + mask_image = Image.fromarray(np.uint8(image)).convert("L").resize((64, 64)) + + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + inputs = { + "prompt": "A painting of a squirrel eating a burger", + "image": init_image, + "mask_image": mask_image, + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 6.0, + "strength": 1.0, + "pag_scale": 0.9, + "output_type": "np", + } + return inputs + + def test_pag_disable_enable(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components(requires_aesthetics_score=True) + + # base pipeline + pipe_sd = StableDiffusionXLInpaintPipeline(**components) + pipe_sd = pipe_sd.to(device) + pipe_sd.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + del inputs["pag_scale"] + assert "pag_scale" not in inspect.signature(pipe_sd.__call__).parameters, ( + f"`pag_scale` should not be a call parameter of the base pipeline {pipe_sd.__class__.__name__}." + ) + out = pipe_sd(**inputs).images[0, -3:, -3:, -1] + + # pag disabled with pag_scale=0.0 + pipe_pag = self.pipeline_class(**components) + pipe_pag = pipe_pag.to(device) + pipe_pag.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + inputs["pag_scale"] = 0.0 + out_pag_disabled = pipe_pag(**inputs).images[0, -3:, -3:, -1] + + # pag enabled + pipe_pag = self.pipeline_class(**components, pag_applied_layers=["mid", "up", "down"]) + pipe_pag = pipe_pag.to(device) + pipe_pag.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + out_pag_enabled = pipe_pag(**inputs).images[0, -3:, -3:, -1] + + assert np.abs(out.flatten() - out_pag_disabled.flatten()).max() < 1e-3 + assert np.abs(out.flatten() - out_pag_enabled.flatten()).max() > 1e-3 + + def test_pag_inference(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components(requires_aesthetics_score=True) + + pipe_pag = self.pipeline_class(**components, pag_applied_layers=["mid", "up", "down"]) + pipe_pag = pipe_pag.to(device) + pipe_pag.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + image = pipe_pag(**inputs).images + image_slice = image[0, -3:, -3:, -1] + + assert image.shape == ( + 1, + 64, + 64, + 3, + ), f"the shape of the output image should be (1, 64, 64, 3) but got {image.shape}" + expected_slice = np.array([0.8366, 0.5513, 0.6105, 0.6213, 0.6957, 0.7400, 0.6614, 0.6102, 0.5239]) + + max_diff = np.abs(image_slice.flatten() - expected_slice).max() + assert max_diff < 1e-3, f"output is different from expected, {image_slice.flatten()}" + + @unittest.skip("We test this functionality elsewhere already.") + def test_save_load_optional_components(self): + pass + + +@slow +@require_torch_accelerator +class StableDiffusionXLPAGInpaintPipelineIntegrationTests(unittest.TestCase): + repo_id = "stabilityai/stable-diffusion-xl-base-1.0" + + def setUp(self): + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def get_inputs(self, device, generator_device="cpu", seed=0, guidance_scale=7.0): + img_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png" + mask_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png" + + init_image = load_image(img_url).convert("RGB") + mask_image = load_image(mask_url).convert("RGB") + + generator = torch.Generator(device=generator_device).manual_seed(seed) + inputs = { + "prompt": "A majestic tiger sitting on a bench", + "generator": generator, + "image": init_image, + "mask_image": mask_image, + "strength": 0.8, + "num_inference_steps": 3, + "guidance_scale": guidance_scale, + "pag_scale": 3.0, + "output_type": "np", + } + return inputs + + def test_pag_cfg(self): + pipeline = AutoPipelineForInpainting.from_pretrained(self.repo_id, enable_pag=True, torch_dtype=torch.float16) + pipeline.enable_model_cpu_offload(device=torch_device) + pipeline.set_progress_bar_config(disable=None) + + inputs = self.get_inputs(torch_device) + image = pipeline(**inputs).images + + image_slice = image[0, -3:, -3:, -1].flatten() + assert image.shape == (1, 1024, 1024, 3) + expected_slice = np.array( + [0.41385046, 0.39608297, 0.4360491, 0.26872507, 0.32187328, 0.4242474, 0.2603805, 0.34167895, 0.46561807] + ) + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3, ( + f"output is different from expected, {image_slice.flatten()}" + ) + + def test_pag_uncond(self): + pipeline = AutoPipelineForInpainting.from_pretrained(self.repo_id, enable_pag=True, torch_dtype=torch.float16) + pipeline.enable_model_cpu_offload(device=torch_device) + pipeline.set_progress_bar_config(disable=None) + + inputs = self.get_inputs(torch_device, guidance_scale=0.0) + image = pipeline(**inputs).images + + image_slice = image[0, -3:, -3:, -1].flatten() + assert image.shape == (1, 1024, 1024, 3) + expected_slice = np.array( + [0.41597816, 0.39302617, 0.44287828, 0.2687074, 0.28315824, 0.40582314, 0.20877528, 0.2380802, 0.39447647] + ) + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3, ( + f"output is different from expected, {image_slice.flatten()}" + ) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/pipeline_params.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/pipeline_params.py new file mode 100644 index 0000000000000000000000000000000000000000..3db7c9fa1b0c5234793e45b66713c78323e58377 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/pipeline_params.py @@ -0,0 +1,134 @@ +# These are canonical sets of parameters for different types of pipelines. +# They are set on subclasses of `PipelineTesterMixin` as `params` and +# `batch_params`. +# +# If a pipeline's set of arguments has minor changes from one of the common sets +# of arguments, do not make modifications to the existing common sets of arguments. +# I.e. a text to image pipeline with non-configurable height and width arguments +# should set its attribute as `params = TEXT_TO_IMAGE_PARAMS - {'height', 'width'}`. + +TEXT_TO_IMAGE_PARAMS = frozenset( + [ + "prompt", + "height", + "width", + "guidance_scale", + "negative_prompt", + "prompt_embeds", + "negative_prompt_embeds", + "cross_attention_kwargs", + ] +) + +IMAGE_VARIATION_PARAMS = frozenset( + [ + "image", + "height", + "width", + "guidance_scale", + ] +) + +TEXT_GUIDED_IMAGE_VARIATION_PARAMS = frozenset( + [ + "prompt", + "image", + "height", + "width", + "guidance_scale", + "negative_prompt", + "prompt_embeds", + "negative_prompt_embeds", + ] +) + +TEXT_GUIDED_IMAGE_INPAINTING_PARAMS = frozenset( + [ + # Text guided image variation with an image mask + "prompt", + "image", + "mask_image", + "height", + "width", + "guidance_scale", + "negative_prompt", + "prompt_embeds", + "negative_prompt_embeds", + ] +) + +IMAGE_INPAINTING_PARAMS = frozenset( + [ + # image variation with an image mask + "image", + "mask_image", + "height", + "width", + "guidance_scale", + ] +) + +IMAGE_GUIDED_IMAGE_INPAINTING_PARAMS = frozenset( + [ + "example_image", + "image", + "mask_image", + "height", + "width", + "guidance_scale", + ] +) + +UNCONDITIONAL_IMAGE_GENERATION_PARAMS = frozenset(["batch_size"]) + +CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS = frozenset(["class_labels"]) + +CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS = frozenset(["class_labels"]) + +TEXT_TO_AUDIO_PARAMS = frozenset( + [ + "prompt", + "audio_length_in_s", + "guidance_scale", + "negative_prompt", + "prompt_embeds", + "negative_prompt_embeds", + "cross_attention_kwargs", + ] +) + +TOKENS_TO_AUDIO_GENERATION_PARAMS = frozenset(["input_tokens"]) + +UNCONDITIONAL_AUDIO_GENERATION_PARAMS = frozenset(["batch_size"]) + +# image params +TEXT_TO_IMAGE_IMAGE_PARAMS = frozenset([]) + +IMAGE_TO_IMAGE_IMAGE_PARAMS = frozenset(["image"]) + + +# batch params +TEXT_TO_IMAGE_BATCH_PARAMS = frozenset(["prompt", "negative_prompt"]) + +IMAGE_VARIATION_BATCH_PARAMS = frozenset(["image"]) + +TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS = frozenset(["prompt", "image", "negative_prompt"]) + +TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS = frozenset(["prompt", "image", "mask_image", "negative_prompt"]) + +IMAGE_INPAINTING_BATCH_PARAMS = frozenset(["image", "mask_image"]) + +IMAGE_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS = frozenset(["example_image", "image", "mask_image"]) + +UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS = frozenset([]) + +UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS = frozenset([]) + +TEXT_TO_AUDIO_BATCH_PARAMS = frozenset(["prompt", "negative_prompt"]) + +TOKENS_TO_AUDIO_GENERATION_BATCH_PARAMS = frozenset(["input_tokens"]) + +VIDEO_TO_VIDEO_BATCH_PARAMS = frozenset(["prompt", "negative_prompt", "video"]) + +# callback params +TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS = frozenset(["prompt_embeds"]) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/pixart_alpha/__init__.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/pixart_alpha/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/pixart_alpha/test_pixart.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/pixart_alpha/test_pixart.py new file mode 100644 index 0000000000000000000000000000000000000000..fd41c9887dcccf67c3324cc03eb6ef29990eefa7 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/pixart_alpha/test_pixart.py @@ -0,0 +1,375 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import gc +import tempfile +import unittest + +import numpy as np +import torch +from transformers import AutoTokenizer, T5EncoderModel + +from diffusers import ( + AutoencoderKL, + DDIMScheduler, + PixArtAlphaPipeline, + PixArtTransformer2DModel, +) + +from ...testing_utils import ( + backend_empty_cache, + enable_full_determinism, + numpy_cosine_similarity_distance, + require_torch_accelerator, + slow, + torch_device, +) +from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS +from ..test_pipelines_common import PipelineTesterMixin, to_np + + +enable_full_determinism() + + +class PixArtAlphaPipelineFastTests(PipelineTesterMixin, unittest.TestCase): + pipeline_class = PixArtAlphaPipeline + params = TEXT_TO_IMAGE_PARAMS - {"cross_attention_kwargs"} + batch_params = TEXT_TO_IMAGE_BATCH_PARAMS + image_params = TEXT_TO_IMAGE_IMAGE_PARAMS + image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS + + required_optional_params = PipelineTesterMixin.required_optional_params + test_layerwise_casting = True + test_group_offloading = True + + def get_dummy_components(self): + torch.manual_seed(0) + transformer = PixArtTransformer2DModel( + sample_size=8, + num_layers=2, + patch_size=2, + attention_head_dim=8, + num_attention_heads=3, + caption_channels=32, + in_channels=4, + cross_attention_dim=24, + out_channels=8, + attention_bias=True, + activation_fn="gelu-approximate", + num_embeds_ada_norm=1000, + norm_type="ada_norm_single", + norm_elementwise_affine=False, + norm_eps=1e-6, + ) + torch.manual_seed(0) + vae = AutoencoderKL() + + scheduler = DDIMScheduler() + text_encoder = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5") + + tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") + + components = { + "transformer": transformer.eval(), + "vae": vae.eval(), + "scheduler": scheduler, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + } + return components + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + inputs = { + "prompt": "A painting of a squirrel eating a burger", + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 5.0, + "use_resolution_binning": False, + "output_type": "np", + } + return inputs + + @unittest.skip("Not supported.") + def test_sequential_cpu_offload_forward_pass(self): + # TODO(PVP, Sayak) need to fix later + return + + def test_inference(self): + device = "cpu" + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + image = pipe(**inputs).images + image_slice = image[0, -3:, -3:, -1] + + self.assertEqual(image.shape, (1, 8, 8, 3)) + expected_slice = np.array([0.6319, 0.3526, 0.3806, 0.6327, 0.4639, 0.483, 0.2583, 0.5331, 0.4852]) + max_diff = np.abs(image_slice.flatten() - expected_slice).max() + self.assertLessEqual(max_diff, 1e-3) + + def test_inference_non_square_images(self): + device = "cpu" + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + image = pipe(**inputs, height=32, width=48).images + image_slice = image[0, -3:, -3:, -1] + self.assertEqual(image.shape, (1, 32, 48, 3)) + + expected_slice = np.array([0.6493, 0.537, 0.4081, 0.4762, 0.3695, 0.4711, 0.3026, 0.5218, 0.5263]) + max_diff = np.abs(image_slice.flatten() - expected_slice).max() + self.assertLessEqual(max_diff, 1e-3) + + @unittest.skip("Test is already covered through encode_prompt isolation.") + def test_save_load_optional_components(self): + pass + + def test_inference_with_embeddings_and_multiple_images(self): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(torch_device) + + prompt = inputs["prompt"] + generator = inputs["generator"] + num_inference_steps = inputs["num_inference_steps"] + output_type = inputs["output_type"] + + prompt_embeds, prompt_attn_mask, negative_prompt_embeds, neg_prompt_attn_mask = pipe.encode_prompt(prompt) + + # inputs with prompt converted to embeddings + inputs = { + "prompt_embeds": prompt_embeds, + "prompt_attention_mask": prompt_attn_mask, + "negative_prompt": None, + "negative_prompt_embeds": negative_prompt_embeds, + "negative_prompt_attention_mask": neg_prompt_attn_mask, + "generator": generator, + "num_inference_steps": num_inference_steps, + "output_type": output_type, + "num_images_per_prompt": 2, + "use_resolution_binning": False, + } + + # set all optional components to None + for optional_component in pipe._optional_components: + setattr(pipe, optional_component, None) + + output = pipe(**inputs)[0] + + with tempfile.TemporaryDirectory() as tmpdir: + pipe.save_pretrained(tmpdir) + pipe_loaded = self.pipeline_class.from_pretrained(tmpdir) + pipe_loaded.to(torch_device) + pipe_loaded.set_progress_bar_config(disable=None) + + for optional_component in pipe._optional_components: + self.assertTrue( + getattr(pipe_loaded, optional_component) is None, + f"`{optional_component}` did not stay set to None after loading.", + ) + + inputs = self.get_dummy_inputs(torch_device) + + generator = inputs["generator"] + num_inference_steps = inputs["num_inference_steps"] + output_type = inputs["output_type"] + + # inputs with prompt converted to embeddings + inputs = { + "prompt_embeds": prompt_embeds, + "prompt_attention_mask": prompt_attn_mask, + "negative_prompt": None, + "negative_prompt_embeds": negative_prompt_embeds, + "negative_prompt_attention_mask": neg_prompt_attn_mask, + "generator": generator, + "num_inference_steps": num_inference_steps, + "output_type": output_type, + "num_images_per_prompt": 2, + "use_resolution_binning": False, + } + + output_loaded = pipe_loaded(**inputs)[0] + + max_diff = np.abs(to_np(output) - to_np(output_loaded)).max() + self.assertLess(max_diff, 1e-4) + + def test_inference_with_multiple_images_per_prompt(self): + device = "cpu" + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + inputs["num_images_per_prompt"] = 2 + image = pipe(**inputs).images + image_slice = image[0, -3:, -3:, -1] + + self.assertEqual(image.shape, (2, 8, 8, 3)) + expected_slice = np.array([0.6319, 0.3526, 0.3806, 0.6327, 0.4639, 0.483, 0.2583, 0.5331, 0.4852]) + max_diff = np.abs(image_slice.flatten() - expected_slice).max() + self.assertLessEqual(max_diff, 1e-3) + + def test_raises_warning_for_mask_feature(self): + device = "cpu" + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + inputs.update({"mask_feature": True}) + + with self.assertWarns(FutureWarning) as warning_ctx: + _ = pipe(**inputs).images + + assert "mask_feature" in str(warning_ctx.warning) + + def test_inference_batch_single_identical(self): + self._test_inference_batch_single_identical(expected_max_diff=1e-3) + + +@slow +@require_torch_accelerator +class PixArtAlphaPipelineIntegrationTests(unittest.TestCase): + ckpt_id_1024 = "PixArt-alpha/PixArt-XL-2-1024-MS" + ckpt_id_512 = "PixArt-alpha/PixArt-XL-2-512x512" + prompt = "A small cactus with a happy face in the Sahara desert." + + def setUp(self): + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def test_pixart_1024(self): + generator = torch.Generator("cpu").manual_seed(0) + + pipe = PixArtAlphaPipeline.from_pretrained(self.ckpt_id_1024, torch_dtype=torch.float16) + pipe.enable_model_cpu_offload(device=torch_device) + prompt = self.prompt + + image = pipe(prompt, generator=generator, num_inference_steps=2, output_type="np").images + + image_slice = image[0, -3:, -3:, -1] + expected_slice = np.array([0.0742, 0.0835, 0.2114, 0.0295, 0.0784, 0.2361, 0.1738, 0.2251, 0.3589]) + + max_diff = numpy_cosine_similarity_distance(image_slice.flatten(), expected_slice) + self.assertLessEqual(max_diff, 1e-4) + + def test_pixart_512(self): + generator = torch.Generator("cpu").manual_seed(0) + + pipe = PixArtAlphaPipeline.from_pretrained(self.ckpt_id_512, torch_dtype=torch.float16) + pipe.enable_model_cpu_offload(device=torch_device) + + prompt = self.prompt + + image = pipe(prompt, generator=generator, num_inference_steps=2, output_type="np").images + + image_slice = image[0, -3:, -3:, -1] + expected_slice = np.array([0.3477, 0.3882, 0.4541, 0.3413, 0.3821, 0.4463, 0.4001, 0.4409, 0.4958]) + + max_diff = numpy_cosine_similarity_distance(image_slice.flatten(), expected_slice) + self.assertLessEqual(max_diff, 1e-4) + + def test_pixart_1024_without_resolution_binning(self): + generator = torch.manual_seed(0) + + pipe = PixArtAlphaPipeline.from_pretrained(self.ckpt_id_1024, torch_dtype=torch.float16) + pipe.enable_model_cpu_offload(device=torch_device) + + prompt = self.prompt + height, width = 1024, 768 + num_inference_steps = 2 + + image = pipe( + prompt, + height=height, + width=width, + generator=generator, + num_inference_steps=num_inference_steps, + output_type="np", + ).images + image_slice = image[0, -3:, -3:, -1] + + generator = torch.manual_seed(0) + no_res_bin_image = pipe( + prompt, + height=height, + width=width, + generator=generator, + num_inference_steps=num_inference_steps, + output_type="np", + use_resolution_binning=False, + ).images + no_res_bin_image_slice = no_res_bin_image[0, -3:, -3:, -1] + + assert not np.allclose(image_slice, no_res_bin_image_slice, atol=1e-4, rtol=1e-4) + + def test_pixart_512_without_resolution_binning(self): + generator = torch.manual_seed(0) + + pipe = PixArtAlphaPipeline.from_pretrained(self.ckpt_id_512, torch_dtype=torch.float16) + pipe.enable_model_cpu_offload(device=torch_device) + + prompt = self.prompt + height, width = 512, 768 + num_inference_steps = 2 + + image = pipe( + prompt, + height=height, + width=width, + generator=generator, + num_inference_steps=num_inference_steps, + output_type="np", + ).images + image_slice = image[0, -3:, -3:, -1] + + generator = torch.manual_seed(0) + no_res_bin_image = pipe( + prompt, + height=height, + width=width, + generator=generator, + num_inference_steps=num_inference_steps, + output_type="np", + use_resolution_binning=False, + ).images + no_res_bin_image_slice = no_res_bin_image[0, -3:, -3:, -1] + + assert not np.allclose(image_slice, no_res_bin_image_slice, atol=1e-4, rtol=1e-4) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/pixart_sigma/__init__.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/pixart_sigma/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/pixart_sigma/test_pixart.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/pixart_sigma/test_pixart.py new file mode 100644 index 0000000000000000000000000000000000000000..2cb80df81adf9ebf474a29685a3b98d76b3f7d82 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/pixart_sigma/test_pixart.py @@ -0,0 +1,414 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import gc +import tempfile +import unittest + +import numpy as np +import torch +from transformers import AutoTokenizer, T5EncoderModel + +from diffusers import ( + AutoencoderKL, + DDIMScheduler, + PixArtSigmaPipeline, + PixArtTransformer2DModel, +) + +from ...testing_utils import ( + backend_empty_cache, + enable_full_determinism, + numpy_cosine_similarity_distance, + require_torch_accelerator, + slow, + torch_device, +) +from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS +from ..test_pipelines_common import ( + PipelineTesterMixin, + check_qkv_fusion_matches_attn_procs_length, + check_qkv_fusion_processors_exist, + to_np, +) + + +enable_full_determinism() + + +class PixArtSigmaPipelineFastTests(PipelineTesterMixin, unittest.TestCase): + pipeline_class = PixArtSigmaPipeline + params = TEXT_TO_IMAGE_PARAMS - {"cross_attention_kwargs"} + batch_params = TEXT_TO_IMAGE_BATCH_PARAMS + image_params = TEXT_TO_IMAGE_IMAGE_PARAMS + image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS + + required_optional_params = PipelineTesterMixin.required_optional_params + test_layerwise_casting = True + test_group_offloading = True + + def get_dummy_components(self): + torch.manual_seed(0) + transformer = PixArtTransformer2DModel( + sample_size=8, + num_layers=2, + patch_size=2, + attention_head_dim=8, + num_attention_heads=3, + caption_channels=32, + in_channels=4, + cross_attention_dim=24, + out_channels=8, + attention_bias=True, + activation_fn="gelu-approximate", + num_embeds_ada_norm=1000, + norm_type="ada_norm_single", + norm_elementwise_affine=False, + norm_eps=1e-6, + ) + torch.manual_seed(0) + vae = AutoencoderKL() + + scheduler = DDIMScheduler() + text_encoder = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5") + + tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") + + components = { + "transformer": transformer.eval(), + "vae": vae.eval(), + "scheduler": scheduler, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + } + return components + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + inputs = { + "prompt": "A painting of a squirrel eating a burger", + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 5.0, + "use_resolution_binning": False, + "output_type": "np", + } + return inputs + + @unittest.skip("Not supported.") + def test_sequential_cpu_offload_forward_pass(self): + # TODO(PVP, Sayak) need to fix later + return + + def test_inference(self): + device = "cpu" + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + image = pipe(**inputs).images + image_slice = image[0, -3:, -3:, -1] + + self.assertEqual(image.shape, (1, 8, 8, 3)) + expected_slice = np.array([0.6319, 0.3526, 0.3806, 0.6327, 0.4639, 0.4830, 0.2583, 0.5331, 0.4852]) + max_diff = np.abs(image_slice.flatten() - expected_slice).max() + self.assertLessEqual(max_diff, 1e-3) + + def test_inference_non_square_images(self): + device = "cpu" + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + image = pipe(**inputs, height=32, width=48).images + image_slice = image[0, -3:, -3:, -1] + self.assertEqual(image.shape, (1, 32, 48, 3)) + + expected_slice = np.array([0.6493, 0.5370, 0.4081, 0.4762, 0.3695, 0.4711, 0.3026, 0.5218, 0.5263]) + max_diff = np.abs(image_slice.flatten() - expected_slice).max() + self.assertLessEqual(max_diff, 1e-3) + + def test_inference_with_embeddings_and_multiple_images(self): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(torch_device) + + prompt = inputs["prompt"] + generator = inputs["generator"] + num_inference_steps = inputs["num_inference_steps"] + output_type = inputs["output_type"] + + prompt_embeds, prompt_attn_mask, negative_prompt_embeds, neg_prompt_attn_mask = pipe.encode_prompt(prompt) + + # inputs with prompt converted to embeddings + inputs = { + "prompt_embeds": prompt_embeds, + "prompt_attention_mask": prompt_attn_mask, + "negative_prompt": None, + "negative_prompt_embeds": negative_prompt_embeds, + "negative_prompt_attention_mask": neg_prompt_attn_mask, + "generator": generator, + "num_inference_steps": num_inference_steps, + "output_type": output_type, + "num_images_per_prompt": 2, + "use_resolution_binning": False, + } + + # set all optional components to None + for optional_component in pipe._optional_components: + setattr(pipe, optional_component, None) + + output = pipe(**inputs)[0] + + with tempfile.TemporaryDirectory() as tmpdir: + pipe.save_pretrained(tmpdir) + pipe_loaded = self.pipeline_class.from_pretrained(tmpdir) + pipe_loaded.to(torch_device) + pipe_loaded.set_progress_bar_config(disable=None) + + for optional_component in pipe._optional_components: + self.assertTrue( + getattr(pipe_loaded, optional_component) is None, + f"`{optional_component}` did not stay set to None after loading.", + ) + + inputs = self.get_dummy_inputs(torch_device) + + generator = inputs["generator"] + num_inference_steps = inputs["num_inference_steps"] + output_type = inputs["output_type"] + + # inputs with prompt converted to embeddings + inputs = { + "prompt_embeds": prompt_embeds, + "prompt_attention_mask": prompt_attn_mask, + "negative_prompt": None, + "negative_prompt_embeds": negative_prompt_embeds, + "negative_prompt_attention_mask": neg_prompt_attn_mask, + "generator": generator, + "num_inference_steps": num_inference_steps, + "output_type": output_type, + "num_images_per_prompt": 2, + "use_resolution_binning": False, + } + + output_loaded = pipe_loaded(**inputs)[0] + + max_diff = np.abs(to_np(output) - to_np(output_loaded)).max() + self.assertLess(max_diff, 1e-4) + + def test_inference_with_multiple_images_per_prompt(self): + device = "cpu" + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + inputs["num_images_per_prompt"] = 2 + image = pipe(**inputs).images + image_slice = image[0, -3:, -3:, -1] + + self.assertEqual(image.shape, (2, 8, 8, 3)) + expected_slice = np.array([0.6319, 0.3526, 0.3806, 0.6327, 0.4639, 0.4830, 0.2583, 0.5331, 0.4852]) + max_diff = np.abs(image_slice.flatten() - expected_slice).max() + self.assertLessEqual(max_diff, 1e-3) + + @unittest.skip("Test is already covered through encode_prompt isolation.") + def test_save_load_optional_components(self): + pass + + def test_inference_batch_single_identical(self): + self._test_inference_batch_single_identical(expected_max_diff=1e-3) + + def test_fused_qkv_projections(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe = pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + image = pipe(**inputs).images + original_image_slice = image[0, -3:, -3:, -1] + + # TODO (sayakpaul): will refactor this once `fuse_qkv_projections()` has been added + # to the pipeline level. + pipe.transformer.fuse_qkv_projections() + assert check_qkv_fusion_processors_exist(pipe.transformer), ( + "Something wrong with the fused attention processors. Expected all the attention processors to be fused." + ) + assert check_qkv_fusion_matches_attn_procs_length( + pipe.transformer, pipe.transformer.original_attn_processors + ), "Something wrong with the attention processors concerning the fused QKV projections." + + inputs = self.get_dummy_inputs(device) + image = pipe(**inputs).images + image_slice_fused = image[0, -3:, -3:, -1] + + pipe.transformer.unfuse_qkv_projections() + inputs = self.get_dummy_inputs(device) + image = pipe(**inputs).images + image_slice_disabled = image[0, -3:, -3:, -1] + + assert np.allclose(original_image_slice, image_slice_fused, atol=1e-3, rtol=1e-3), ( + "Fusion of QKV projections shouldn't affect the outputs." + ) + assert np.allclose(image_slice_fused, image_slice_disabled, atol=1e-3, rtol=1e-3), ( + "Outputs, with QKV projection fusion enabled, shouldn't change when fused QKV projections are disabled." + ) + assert np.allclose(original_image_slice, image_slice_disabled, atol=1e-2, rtol=1e-2), ( + "Original outputs should match when fused QKV projections are disabled." + ) + + +@slow +@require_torch_accelerator +class PixArtSigmaPipelineIntegrationTests(unittest.TestCase): + ckpt_id_1024 = "PixArt-alpha/PixArt-Sigma-XL-2-1024-MS" + ckpt_id_512 = "PixArt-alpha/PixArt-Sigma-XL-2-512-MS" + prompt = "A small cactus with a happy face in the Sahara desert." + + def setUp(self): + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def test_pixart_1024(self): + generator = torch.Generator("cpu").manual_seed(0) + + pipe = PixArtSigmaPipeline.from_pretrained(self.ckpt_id_1024, torch_dtype=torch.float16) + pipe.enable_model_cpu_offload(device=torch_device) + prompt = self.prompt + + image = pipe(prompt, generator=generator, num_inference_steps=2, output_type="np").images + + image_slice = image[0, -3:, -3:, -1] + expected_slice = np.array([0.4517, 0.4446, 0.4375, 0.449, 0.4399, 0.4365, 0.4583, 0.4629, 0.4473]) + + max_diff = numpy_cosine_similarity_distance(image_slice.flatten(), expected_slice) + self.assertLessEqual(max_diff, 1e-4) + + def test_pixart_512(self): + generator = torch.Generator("cpu").manual_seed(0) + + transformer = PixArtTransformer2DModel.from_pretrained( + self.ckpt_id_512, subfolder="transformer", torch_dtype=torch.float16 + ) + pipe = PixArtSigmaPipeline.from_pretrained( + self.ckpt_id_1024, transformer=transformer, torch_dtype=torch.float16 + ) + pipe.enable_model_cpu_offload(device=torch_device) + + prompt = self.prompt + + image = pipe(prompt, generator=generator, num_inference_steps=2, output_type="np").images + + image_slice = image[0, -3:, -3:, -1] + expected_slice = np.array([0.0479, 0.0378, 0.0217, 0.0942, 0.064, 0.0791, 0.2073, 0.1975, 0.2017]) + + max_diff = numpy_cosine_similarity_distance(image_slice.flatten(), expected_slice) + self.assertLessEqual(max_diff, 1e-4) + + def test_pixart_1024_without_resolution_binning(self): + generator = torch.manual_seed(0) + + pipe = PixArtSigmaPipeline.from_pretrained(self.ckpt_id_1024, torch_dtype=torch.float16) + pipe.enable_model_cpu_offload(device=torch_device) + + prompt = self.prompt + height, width = 1024, 768 + num_inference_steps = 2 + + image = pipe( + prompt, + height=height, + width=width, + generator=generator, + num_inference_steps=num_inference_steps, + output_type="np", + ).images + image_slice = image[0, -3:, -3:, -1] + + generator = torch.manual_seed(0) + no_res_bin_image = pipe( + prompt, + height=height, + width=width, + generator=generator, + num_inference_steps=num_inference_steps, + output_type="np", + use_resolution_binning=False, + ).images + no_res_bin_image_slice = no_res_bin_image[0, -3:, -3:, -1] + + assert not np.allclose(image_slice, no_res_bin_image_slice, atol=1e-4, rtol=1e-4) + + def test_pixart_512_without_resolution_binning(self): + generator = torch.manual_seed(0) + + transformer = PixArtTransformer2DModel.from_pretrained( + self.ckpt_id_512, subfolder="transformer", torch_dtype=torch.float16 + ) + pipe = PixArtSigmaPipeline.from_pretrained( + self.ckpt_id_1024, transformer=transformer, torch_dtype=torch.float16 + ) + pipe.enable_model_cpu_offload(device=torch_device) + + prompt = self.prompt + height, width = 512, 768 + num_inference_steps = 2 + + image = pipe( + prompt, + height=height, + width=width, + generator=generator, + num_inference_steps=num_inference_steps, + output_type="np", + ).images + image_slice = image[0, -3:, -3:, -1] + + generator = torch.manual_seed(0) + no_res_bin_image = pipe( + prompt, + height=height, + width=width, + generator=generator, + num_inference_steps=num_inference_steps, + output_type="np", + use_resolution_binning=False, + ).images + no_res_bin_image_slice = no_res_bin_image[0, -3:, -3:, -1] + + assert not np.allclose(image_slice, no_res_bin_image_slice, atol=1e-4, rtol=1e-4) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/pndm/__init__.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/pndm/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/pndm/test_pndm.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/pndm/test_pndm.py new file mode 100644 index 0000000000000000000000000000000000000000..61d6efe88ccd1e75eb28d615389866bd37f365d7 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/pndm/test_pndm.py @@ -0,0 +1,88 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import numpy as np +import torch + +from diffusers import PNDMPipeline, PNDMScheduler, UNet2DModel + +from ...testing_utils import enable_full_determinism, nightly, require_torch, torch_device + + +enable_full_determinism() + + +class PNDMPipelineFastTests(unittest.TestCase): + @property + def dummy_uncond_unet(self): + torch.manual_seed(0) + model = UNet2DModel( + block_out_channels=(32, 64), + layers_per_block=2, + sample_size=32, + in_channels=3, + out_channels=3, + down_block_types=("DownBlock2D", "AttnDownBlock2D"), + up_block_types=("AttnUpBlock2D", "UpBlock2D"), + ) + return model + + def test_inference(self): + unet = self.dummy_uncond_unet + scheduler = PNDMScheduler() + + pndm = PNDMPipeline(unet=unet, scheduler=scheduler) + pndm.to(torch_device) + pndm.set_progress_bar_config(disable=None) + + generator = torch.manual_seed(0) + image = pndm(generator=generator, num_inference_steps=20, output_type="np").images + + generator = torch.manual_seed(0) + image_from_tuple = pndm(generator=generator, num_inference_steps=20, output_type="np", return_dict=False)[0] + + image_slice = image[0, -3:, -3:, -1] + image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] + + assert image.shape == (1, 32, 32, 3) + expected_slice = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0]) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 + assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 + + +@nightly +@require_torch +class PNDMPipelineIntegrationTests(unittest.TestCase): + def test_inference_cifar10(self): + model_id = "google/ddpm-cifar10-32" + + unet = UNet2DModel.from_pretrained(model_id) + scheduler = PNDMScheduler() + + pndm = PNDMPipeline(unet=unet, scheduler=scheduler) + pndm.to(torch_device) + pndm.set_progress_bar_config(disable=None) + generator = torch.manual_seed(0) + image = pndm(generator=generator, output_type="np").images + + image_slice = image[0, -3:, -3:, -1] + + assert image.shape == (1, 32, 32, 3) + expected_slice = np.array([0.1564, 0.14645, 0.1406, 0.14715, 0.12425, 0.14045, 0.13115, 0.12175, 0.125]) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/qwenimage/__init__.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/qwenimage/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/qwenimage/test_qwenimage.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/qwenimage/test_qwenimage.py new file mode 100644 index 0000000000000000000000000000000000000000..8ebfe7d08bc1ffe319bfdedaa499378f762e623a --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/qwenimage/test_qwenimage.py @@ -0,0 +1,236 @@ +# Copyright 2025 The HuggingFace Team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import numpy as np +import torch +from transformers import Qwen2_5_VLConfig, Qwen2_5_VLForConditionalGeneration, Qwen2Tokenizer + +from diffusers import ( + AutoencoderKLQwenImage, + FlowMatchEulerDiscreteScheduler, + QwenImagePipeline, + QwenImageTransformer2DModel, +) + +from ...testing_utils import enable_full_determinism, torch_device +from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS +from ..test_pipelines_common import PipelineTesterMixin, to_np + + +enable_full_determinism() + + +class QwenImagePipelineFastTests(PipelineTesterMixin, unittest.TestCase): + pipeline_class = QwenImagePipeline + params = TEXT_TO_IMAGE_PARAMS - {"cross_attention_kwargs"} + batch_params = TEXT_TO_IMAGE_BATCH_PARAMS + image_params = TEXT_TO_IMAGE_IMAGE_PARAMS + image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS + required_optional_params = frozenset( + [ + "num_inference_steps", + "generator", + "latents", + "return_dict", + "callback_on_step_end", + "callback_on_step_end_tensor_inputs", + ] + ) + supports_dduf = False + test_xformers_attention = False + test_layerwise_casting = True + test_group_offloading = True + + def get_dummy_components(self): + torch.manual_seed(0) + transformer = QwenImageTransformer2DModel( + patch_size=2, + in_channels=16, + out_channels=4, + num_layers=2, + attention_head_dim=16, + num_attention_heads=3, + joint_attention_dim=16, + guidance_embeds=False, + axes_dims_rope=(8, 4, 4), + ) + + torch.manual_seed(0) + z_dim = 4 + vae = AutoencoderKLQwenImage( + base_dim=z_dim * 6, + z_dim=z_dim, + dim_mult=[1, 2, 4], + num_res_blocks=1, + temperal_downsample=[False, True], + # fmt: off + latents_mean=[0.0] * 4, + latents_std=[1.0] * 4, + # fmt: on + ) + + torch.manual_seed(0) + scheduler = FlowMatchEulerDiscreteScheduler() + + torch.manual_seed(0) + config = Qwen2_5_VLConfig( + text_config={ + "hidden_size": 16, + "intermediate_size": 16, + "num_hidden_layers": 2, + "num_attention_heads": 2, + "num_key_value_heads": 2, + "rope_scaling": { + "mrope_section": [1, 1, 2], + "rope_type": "default", + "type": "default", + }, + "rope_theta": 1000000.0, + }, + vision_config={ + "depth": 2, + "hidden_size": 16, + "intermediate_size": 16, + "num_heads": 2, + "out_hidden_size": 16, + }, + hidden_size=16, + vocab_size=152064, + vision_end_token_id=151653, + vision_start_token_id=151652, + vision_token_id=151654, + ) + text_encoder = Qwen2_5_VLForConditionalGeneration(config) + tokenizer = Qwen2Tokenizer.from_pretrained("hf-internal-testing/tiny-random-Qwen2VLForConditionalGeneration") + + components = { + "transformer": transformer, + "vae": vae, + "scheduler": scheduler, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + } + return components + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + + inputs = { + "prompt": "dance monkey", + "negative_prompt": "bad quality", + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 3.0, + "true_cfg_scale": 1.0, + "height": 32, + "width": 32, + "max_sequence_length": 16, + "output_type": "pt", + } + + return inputs + + def test_inference(self): + device = "cpu" + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + image = pipe(**inputs).images + generated_image = image[0] + self.assertEqual(generated_image.shape, (3, 32, 32)) + + # fmt: off + expected_slice = torch.tensor([0.56331, 0.63677, 0.6015, 0.56369, 0.58166, 0.55277, 0.57176, 0.63261, 0.41466, 0.35561, 0.56229, 0.48334, 0.49714, 0.52622, 0.40872, 0.50208]) + # fmt: on + + generated_slice = generated_image.flatten() + generated_slice = torch.cat([generated_slice[:8], generated_slice[-8:]]) + self.assertTrue(torch.allclose(generated_slice, expected_slice, atol=1e-3)) + + def test_inference_batch_single_identical(self): + self._test_inference_batch_single_identical(batch_size=3, expected_max_diff=1e-1) + + def test_attention_slicing_forward_pass( + self, test_max_difference=True, test_mean_pixel_difference=True, expected_max_diff=1e-3 + ): + if not self.test_attention_slicing: + return + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + for component in pipe.components.values(): + if hasattr(component, "set_default_attn_processor"): + component.set_default_attn_processor() + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + generator_device = "cpu" + inputs = self.get_dummy_inputs(generator_device) + output_without_slicing = pipe(**inputs)[0] + + pipe.enable_attention_slicing(slice_size=1) + inputs = self.get_dummy_inputs(generator_device) + output_with_slicing1 = pipe(**inputs)[0] + + pipe.enable_attention_slicing(slice_size=2) + inputs = self.get_dummy_inputs(generator_device) + output_with_slicing2 = pipe(**inputs)[0] + + if test_max_difference: + max_diff1 = np.abs(to_np(output_with_slicing1) - to_np(output_without_slicing)).max() + max_diff2 = np.abs(to_np(output_with_slicing2) - to_np(output_without_slicing)).max() + self.assertLess( + max(max_diff1, max_diff2), + expected_max_diff, + "Attention slicing should not affect the inference results", + ) + + def test_vae_tiling(self, expected_diff_max: float = 0.2): + generator_device = "cpu" + components = self.get_dummy_components() + + pipe = self.pipeline_class(**components) + pipe.to("cpu") + pipe.set_progress_bar_config(disable=None) + + # Without tiling + inputs = self.get_dummy_inputs(generator_device) + inputs["height"] = inputs["width"] = 128 + output_without_tiling = pipe(**inputs)[0] + + # With tiling + pipe.vae.enable_tiling( + tile_sample_min_height=96, + tile_sample_min_width=96, + tile_sample_stride_height=64, + tile_sample_stride_width=64, + ) + inputs = self.get_dummy_inputs(generator_device) + inputs["height"] = inputs["width"] = 128 + output_with_tiling = pipe(**inputs)[0] + + self.assertLess( + (to_np(output_without_tiling) - to_np(output_with_tiling)).max(), + expected_diff_max, + "VAE tiling should not affect the inference results", + ) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/qwenimage/test_qwenimage_controlnet.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/qwenimage/test_qwenimage_controlnet.py new file mode 100644 index 0000000000000000000000000000000000000000..c78e5cb233d3b9f9d1401e9e830c7be0ea30e292 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/qwenimage/test_qwenimage_controlnet.py @@ -0,0 +1,339 @@ +# Copyright 2025 The HuggingFace Team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import numpy as np +import torch +from transformers import Qwen2_5_VLConfig, Qwen2_5_VLForConditionalGeneration, Qwen2Tokenizer + +from diffusers import ( + AutoencoderKLQwenImage, + FlowMatchEulerDiscreteScheduler, + QwenImageControlNetModel, + QwenImageControlNetPipeline, + QwenImageMultiControlNetModel, + QwenImageTransformer2DModel, +) +from diffusers.utils.testing_utils import enable_full_determinism, torch_device +from diffusers.utils.torch_utils import randn_tensor + +from ..pipeline_params import TEXT_TO_IMAGE_PARAMS +from ..test_pipelines_common import PipelineTesterMixin, to_np + + +enable_full_determinism() + + +class QwenControlNetPipelineFastTests(PipelineTesterMixin, unittest.TestCase): + pipeline_class = QwenImageControlNetPipeline + params = (TEXT_TO_IMAGE_PARAMS | frozenset(["control_image", "controlnet_conditioning_scale"])) - { + "cross_attention_kwargs" + } + batch_params = frozenset(["prompt", "negative_prompt", "control_image"]) + image_params = frozenset(["control_image"]) + image_latents_params = frozenset(["latents"]) + + required_optional_params = frozenset( + [ + "num_inference_steps", + "generator", + "latents", + "control_image", + "controlnet_conditioning_scale", + "return_dict", + "callback_on_step_end", + "callback_on_step_end_tensor_inputs", + ] + ) + + supports_dduf = False + test_xformers_attention = True + test_layerwise_casting = True + test_group_offloading = True + + def get_dummy_components(self): + torch.manual_seed(0) + transformer = QwenImageTransformer2DModel( + patch_size=2, + in_channels=16, + out_channels=4, + num_layers=2, + attention_head_dim=16, + num_attention_heads=3, + joint_attention_dim=16, + guidance_embeds=False, + axes_dims_rope=(8, 4, 4), + ) + + torch.manual_seed(0) + controlnet = QwenImageControlNetModel( + patch_size=2, + in_channels=16, + out_channels=4, + num_layers=2, + attention_head_dim=16, + num_attention_heads=3, + joint_attention_dim=16, + axes_dims_rope=(8, 4, 4), + ) + + torch.manual_seed(0) + z_dim = 4 + vae = AutoencoderKLQwenImage( + base_dim=z_dim * 6, + z_dim=z_dim, + dim_mult=[1, 2, 4], + num_res_blocks=1, + temperal_downsample=[False, True], + latents_mean=[0.0] * z_dim, + latents_std=[1.0] * z_dim, + ) + + torch.manual_seed(0) + scheduler = FlowMatchEulerDiscreteScheduler() + + torch.manual_seed(0) + config = Qwen2_5_VLConfig( + text_config={ + "hidden_size": 16, + "intermediate_size": 16, + "num_hidden_layers": 2, + "num_attention_heads": 2, + "num_key_value_heads": 2, + "rope_scaling": { + "mrope_section": [1, 1, 2], + "rope_type": "default", + "type": "default", + }, + "rope_theta": 1_000_000.0, + }, + vision_config={ + "depth": 2, + "hidden_size": 16, + "intermediate_size": 16, + "num_heads": 2, + "out_hidden_size": 16, + }, + hidden_size=16, + vocab_size=152064, + vision_end_token_id=151653, + vision_start_token_id=151652, + vision_token_id=151654, + ) + + text_encoder = Qwen2_5_VLForConditionalGeneration(config) + tokenizer = Qwen2Tokenizer.from_pretrained("hf-internal-testing/tiny-random-Qwen2VLForConditionalGeneration") + + components = { + "transformer": transformer, + "vae": vae, + "scheduler": scheduler, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + "controlnet": controlnet, + } + return components + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + + control_image = randn_tensor( + (1, 3, 32, 32), + generator=generator, + device=torch.device(device), + dtype=torch.float32, + ) + + inputs = { + "prompt": "dance monkey", + "negative_prompt": "bad quality", + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 3.0, + "true_cfg_scale": 1.0, + "height": 32, + "width": 32, + "max_sequence_length": 16, + "control_image": control_image, + "controlnet_conditioning_scale": 0.5, + "output_type": "pt", + } + + return inputs + + def test_qwen_controlnet(self): + device = "cpu" + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + image = pipe(**inputs).images + generated_image = image[0] + self.assertEqual(generated_image.shape, (3, 32, 32)) + + # Expected slice from the generated image + expected_slice = torch.tensor( + [ + 0.4726, + 0.5549, + 0.6324, + 0.6548, + 0.4968, + 0.4639, + 0.4749, + 0.4898, + 0.4725, + 0.4645, + 0.4435, + 0.3339, + 0.3400, + 0.4630, + 0.3879, + 0.4406, + ] + ) + + generated_slice = generated_image.flatten() + generated_slice = torch.cat([generated_slice[:8], generated_slice[-8:]]) + self.assertTrue(torch.allclose(generated_slice, expected_slice, atol=1e-3)) + + def test_qwen_controlnet_multicondition(self): + device = "cpu" + components = self.get_dummy_components() + + components["controlnet"] = QwenImageMultiControlNetModel([components["controlnet"]]) + + pipe = self.pipeline_class(**components) + pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + control_image = inputs["control_image"] + inputs["control_image"] = [control_image, control_image] + inputs["controlnet_conditioning_scale"] = [0.5, 0.5] + + image = pipe(**inputs).images + generated_image = image[0] + self.assertEqual(generated_image.shape, (3, 32, 32)) + # Expected slice from the generated image + expected_slice = torch.tensor( + [ + 0.6239, + 0.6642, + 0.5768, + 0.6039, + 0.5270, + 0.5070, + 0.5006, + 0.5271, + 0.4506, + 0.3085, + 0.3435, + 0.5152, + 0.5096, + 0.5422, + 0.4286, + 0.5752, + ] + ) + + generated_slice = generated_image.flatten() + generated_slice = torch.cat([generated_slice[:8], generated_slice[-8:]]) + self.assertTrue(torch.allclose(generated_slice, expected_slice, atol=1e-3)) + + def test_attention_slicing_forward_pass( + self, test_max_difference=True, test_mean_pixel_difference=True, expected_max_diff=1e-3 + ): + if not self.test_attention_slicing: + return + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + for component in pipe.components.values(): + if hasattr(component, "set_default_attn_processor"): + component.set_default_attn_processor() + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + generator_device = "cpu" + inputs = self.get_dummy_inputs(generator_device) + output_without_slicing = pipe(**inputs)[0] + + pipe.enable_attention_slicing(slice_size=1) + inputs = self.get_dummy_inputs(generator_device) + output_with_slicing1 = pipe(**inputs)[0] + + pipe.enable_attention_slicing(slice_size=2) + inputs = self.get_dummy_inputs(generator_device) + output_with_slicing2 = pipe(**inputs)[0] + + if test_max_difference: + max_diff1 = np.abs(to_np(output_with_slicing1) - to_np(output_without_slicing)).max() + max_diff2 = np.abs(to_np(output_with_slicing2) - to_np(output_without_slicing)).max() + self.assertLess( + max(max_diff1, max_diff2), + expected_max_diff, + "Attention slicing should not affect the inference results", + ) + + def test_inference_batch_single_identical(self): + self._test_inference_batch_single_identical(batch_size=3, expected_max_diff=1e-1) + + def test_vae_tiling(self, expected_diff_max: float = 0.2): + generator_device = "cpu" + components = self.get_dummy_components() + + pipe = self.pipeline_class(**components) + pipe.to("cpu") + pipe.set_progress_bar_config(disable=None) + + # Without tiling + inputs = self.get_dummy_inputs(generator_device) + inputs["height"] = inputs["width"] = 128 + inputs["control_image"] = randn_tensor( + (1, 3, 128, 128), + generator=inputs["generator"], + device=torch.device(generator_device), + dtype=torch.float32, + ) + output_without_tiling = pipe(**inputs)[0] + + # With tiling + pipe.vae.enable_tiling( + tile_sample_min_height=96, + tile_sample_min_width=96, + tile_sample_stride_height=64, + tile_sample_stride_width=64, + ) + inputs = self.get_dummy_inputs(generator_device) + inputs["height"] = inputs["width"] = 128 + inputs["control_image"] = randn_tensor( + (1, 3, 128, 128), + generator=inputs["generator"], + device=torch.device(generator_device), + dtype=torch.float32, + ) + output_with_tiling = pipe(**inputs)[0] + + self.assertLess( + (to_np(output_without_tiling) - to_np(output_with_tiling)).max(), + expected_diff_max, + "VAE tiling should not affect the inference results", + ) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/qwenimage/test_qwenimage_edit.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/qwenimage/test_qwenimage_edit.py new file mode 100644 index 0000000000000000000000000000000000000000..058548cf5f1b8ea7dcfb3e8eaa1758c439a7ba34 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/qwenimage/test_qwenimage_edit.py @@ -0,0 +1,243 @@ +# Copyright 2025 The HuggingFace Team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import numpy as np +import pytest +import torch +from PIL import Image +from transformers import Qwen2_5_VLConfig, Qwen2_5_VLForConditionalGeneration, Qwen2Tokenizer, Qwen2VLProcessor + +from diffusers import ( + AutoencoderKLQwenImage, + FlowMatchEulerDiscreteScheduler, + QwenImageEditPipeline, + QwenImageTransformer2DModel, +) + +from ...testing_utils import enable_full_determinism, torch_device +from ..pipeline_params import TEXT_TO_IMAGE_PARAMS +from ..test_pipelines_common import PipelineTesterMixin, to_np + + +enable_full_determinism() + + +class QwenImageEditPipelineFastTests(PipelineTesterMixin, unittest.TestCase): + pipeline_class = QwenImageEditPipeline + params = TEXT_TO_IMAGE_PARAMS - {"cross_attention_kwargs"} + batch_params = frozenset(["prompt", "image"]) + image_params = frozenset(["image"]) + image_latents_params = frozenset(["latents"]) + required_optional_params = frozenset( + [ + "num_inference_steps", + "generator", + "latents", + "return_dict", + "callback_on_step_end", + "callback_on_step_end_tensor_inputs", + ] + ) + supports_dduf = False + test_xformers_attention = False + test_layerwise_casting = True + test_group_offloading = True + + def get_dummy_components(self): + tiny_ckpt_id = "hf-internal-testing/tiny-random-Qwen2VLForConditionalGeneration" + + torch.manual_seed(0) + transformer = QwenImageTransformer2DModel( + patch_size=2, + in_channels=16, + out_channels=4, + num_layers=2, + attention_head_dim=16, + num_attention_heads=3, + joint_attention_dim=16, + guidance_embeds=False, + axes_dims_rope=(8, 4, 4), + ) + + torch.manual_seed(0) + z_dim = 4 + vae = AutoencoderKLQwenImage( + base_dim=z_dim * 6, + z_dim=z_dim, + dim_mult=[1, 2, 4], + num_res_blocks=1, + temperal_downsample=[False, True], + latents_mean=[0.0] * z_dim, + latents_std=[1.0] * z_dim, + ) + + torch.manual_seed(0) + scheduler = FlowMatchEulerDiscreteScheduler() + + torch.manual_seed(0) + config = Qwen2_5_VLConfig( + text_config={ + "hidden_size": 16, + "intermediate_size": 16, + "num_hidden_layers": 2, + "num_attention_heads": 2, + "num_key_value_heads": 2, + "rope_scaling": { + "mrope_section": [1, 1, 2], + "rope_type": "default", + "type": "default", + }, + "rope_theta": 1000000.0, + }, + vision_config={ + "depth": 2, + "hidden_size": 16, + "intermediate_size": 16, + "num_heads": 2, + "out_hidden_size": 16, + }, + hidden_size=16, + vocab_size=152064, + vision_end_token_id=151653, + vision_start_token_id=151652, + vision_token_id=151654, + ) + text_encoder = Qwen2_5_VLForConditionalGeneration(config) + tokenizer = Qwen2Tokenizer.from_pretrained(tiny_ckpt_id) + + components = { + "transformer": transformer, + "vae": vae, + "scheduler": scheduler, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + "processor": Qwen2VLProcessor.from_pretrained(tiny_ckpt_id), + } + return components + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + + inputs = { + "prompt": "dance monkey", + "image": Image.new("RGB", (32, 32)), + "negative_prompt": "bad quality", + "generator": generator, + "num_inference_steps": 2, + "true_cfg_scale": 1.0, + "height": 32, + "width": 32, + "max_sequence_length": 16, + "output_type": "pt", + } + + return inputs + + def test_inference(self): + device = "cpu" + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + image = pipe(**inputs).images + generated_image = image[0] + self.assertEqual(generated_image.shape, (3, 32, 32)) + + # fmt: off + expected_slice = torch.tensor([[0.5637, 0.6341, 0.6001, 0.5620, 0.5794, 0.5498, 0.5757, 0.6389, 0.4174, 0.3597, 0.5649, 0.4894, 0.4969, 0.5255, 0.4083, 0.4986]]) + # fmt: on + + generated_slice = generated_image.flatten() + generated_slice = torch.cat([generated_slice[:8], generated_slice[-8:]]) + self.assertTrue(torch.allclose(generated_slice, expected_slice, atol=1e-3)) + + def test_inference_batch_single_identical(self): + self._test_inference_batch_single_identical(batch_size=3, expected_max_diff=1e-1) + + def test_attention_slicing_forward_pass( + self, test_max_difference=True, test_mean_pixel_difference=True, expected_max_diff=1e-3 + ): + if not self.test_attention_slicing: + return + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + for component in pipe.components.values(): + if hasattr(component, "set_default_attn_processor"): + component.set_default_attn_processor() + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + generator_device = "cpu" + inputs = self.get_dummy_inputs(generator_device) + output_without_slicing = pipe(**inputs)[0] + + pipe.enable_attention_slicing(slice_size=1) + inputs = self.get_dummy_inputs(generator_device) + output_with_slicing1 = pipe(**inputs)[0] + + pipe.enable_attention_slicing(slice_size=2) + inputs = self.get_dummy_inputs(generator_device) + output_with_slicing2 = pipe(**inputs)[0] + + if test_max_difference: + max_diff1 = np.abs(to_np(output_with_slicing1) - to_np(output_without_slicing)).max() + max_diff2 = np.abs(to_np(output_with_slicing2) - to_np(output_without_slicing)).max() + self.assertLess( + max(max_diff1, max_diff2), + expected_max_diff, + "Attention slicing should not affect the inference results", + ) + + def test_vae_tiling(self, expected_diff_max: float = 0.2): + generator_device = "cpu" + components = self.get_dummy_components() + + pipe = self.pipeline_class(**components) + pipe.to("cpu") + pipe.set_progress_bar_config(disable=None) + + # Without tiling + inputs = self.get_dummy_inputs(generator_device) + inputs["height"] = inputs["width"] = 128 + output_without_tiling = pipe(**inputs)[0] + + # With tiling + pipe.vae.enable_tiling( + tile_sample_min_height=96, + tile_sample_min_width=96, + tile_sample_stride_height=64, + tile_sample_stride_width=64, + ) + inputs = self.get_dummy_inputs(generator_device) + inputs["height"] = inputs["width"] = 128 + output_with_tiling = pipe(**inputs)[0] + + self.assertLess( + (to_np(output_without_tiling) - to_np(output_with_tiling)).max(), + expected_diff_max, + "VAE tiling should not affect the inference results", + ) + + @pytest.mark.xfail(condition=True, reason="Preconfigured embeddings need to be revisited.", strict=True) + def test_encode_prompt_works_in_isolation(self, extra_required_param_value_dict=None, atol=1e-4, rtol=1e-4): + super().test_encode_prompt_works_in_isolation(extra_required_param_value_dict, atol, rtol) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/qwenimage/test_qwenimage_img2img.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/qwenimage/test_qwenimage_img2img.py new file mode 100644 index 0000000000000000000000000000000000000000..07e683ec7f5a6dde2c3f2ed34de079cd7a49efd6 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/qwenimage/test_qwenimage_img2img.py @@ -0,0 +1,218 @@ +import random +import unittest + +import numpy as np +import torch +from transformers import Qwen2_5_VLConfig, Qwen2_5_VLForConditionalGeneration, Qwen2Tokenizer + +from diffusers import ( + AutoencoderKLQwenImage, + FlowMatchEulerDiscreteScheduler, + QwenImageImg2ImgPipeline, + QwenImageTransformer2DModel, +) + +from ...testing_utils import ( + enable_full_determinism, + floats_tensor, + torch_device, +) +from ..test_pipelines_common import PipelineTesterMixin, to_np + + +enable_full_determinism() + + +class QwenImageImg2ImgPipelineFastTests(unittest.TestCase, PipelineTesterMixin): + pipeline_class = QwenImageImg2ImgPipeline + params = frozenset(["prompt", "image", "height", "width", "guidance_scale", "true_cfg_scale", "strength"]) + batch_params = frozenset(["prompt", "image"]) + image_params = frozenset(["image"]) + image_latents_params = frozenset(["latents"]) + required_optional_params = frozenset( + [ + "num_inference_steps", + "generator", + "latents", + "return_dict", + "callback_on_step_end", + "callback_on_step_end_tensor_inputs", + ] + ) + supports_dduf = False + test_xformers_attention = False + test_attention_slicing = True + test_layerwise_casting = True + test_group_offloading = True + + def get_dummy_components(self): + torch.manual_seed(0) + transformer = QwenImageTransformer2DModel( + patch_size=2, + in_channels=16, + out_channels=4, + num_layers=2, + attention_head_dim=16, + num_attention_heads=3, + joint_attention_dim=16, + guidance_embeds=False, + axes_dims_rope=(8, 4, 4), + ) + + torch.manual_seed(0) + z_dim = 4 + vae = AutoencoderKLQwenImage( + base_dim=z_dim * 6, + z_dim=z_dim, + dim_mult=[1, 2, 4], + num_res_blocks=1, + temperal_downsample=[False, True], + latents_mean=[0.0] * 4, + latents_std=[1.0] * 4, + ) + + torch.manual_seed(0) + scheduler = FlowMatchEulerDiscreteScheduler() + + torch.manual_seed(0) + config = Qwen2_5_VLConfig( + text_config={ + "hidden_size": 16, + "intermediate_size": 16, + "num_hidden_layers": 2, + "num_attention_heads": 2, + "num_key_value_heads": 2, + "rope_scaling": { + "mrope_section": [1, 1, 2], + "rope_type": "default", + "type": "default", + }, + "rope_theta": 1000000.0, + }, + vision_config={ + "depth": 2, + "hidden_size": 16, + "intermediate_size": 16, + "num_heads": 2, + "out_hidden_size": 16, + }, + hidden_size=16, + vocab_size=152064, + vision_end_token_id=151653, + vision_start_token_id=151652, + vision_token_id=151654, + ) + text_encoder = Qwen2_5_VLForConditionalGeneration(config) + tokenizer = Qwen2Tokenizer.from_pretrained("hf-internal-testing/tiny-random-Qwen2VLForConditionalGeneration") + + return { + "transformer": transformer, + "vae": vae, + "scheduler": scheduler, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + } + + def get_dummy_inputs(self, device, seed=0): + image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device="cpu").manual_seed(seed) + + inputs = { + "image": image, + "prompt": "dance monkey", + "negative_prompt": "bad quality", + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 3.0, + "true_cfg_scale": 1.0, + "height": 32, + "width": 32, + "max_sequence_length": 16, + "output_type": "pt", + } + + return inputs + + def test_inference(self): + device = "cpu" + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + image = pipe(**inputs).images + generated_image = image[0] + self.assertEqual(generated_image.shape, (3, 32, 32)) + + def test_inference_batch_single_identical(self): + self._test_inference_batch_single_identical(batch_size=3, expected_max_diff=1e-1) + + def test_attention_slicing_forward_pass( + self, test_max_difference=True, test_mean_pixel_difference=True, expected_max_diff=1e-3 + ): + if not self.test_attention_slicing: + return + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + for component in pipe.components.values(): + if hasattr(component, "set_default_attn_processor"): + component.set_default_attn_processor() + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + generator_device = "cpu" + inputs = self.get_dummy_inputs(generator_device) + output_without_slicing = pipe(**inputs).images[0] + + pipe.enable_attention_slicing(slice_size=1) + inputs = self.get_dummy_inputs(generator_device) + output_with_slicing1 = pipe(**inputs).images[0] + + pipe.enable_attention_slicing(slice_size=2) + inputs = self.get_dummy_inputs(generator_device) + output_with_slicing2 = pipe(**inputs).images[0] + + if test_max_difference: + max_diff1 = np.abs(to_np(output_with_slicing1) - to_np(output_without_slicing)).max() + max_diff2 = np.abs(to_np(output_with_slicing2) - to_np(output_without_slicing)).max() + self.assertLess( + max(max_diff1, max_diff2), + expected_max_diff, + "Attention slicing should not affect the inference results", + ) + + def test_vae_tiling(self, expected_diff_max: float = 0.2): + generator_device = "cpu" + components = self.get_dummy_components() + + pipe = self.pipeline_class(**components) + pipe.to("cpu") + pipe.set_progress_bar_config(disable=None) + + # Without tiling + inputs = self.get_dummy_inputs(generator_device) + inputs["height"] = inputs["width"] = 128 + output_without_tiling = pipe(**inputs)[0] + + # With tiling + pipe.vae.enable_tiling( + tile_sample_min_height=96, + tile_sample_min_width=96, + tile_sample_stride_height=64, + tile_sample_stride_width=64, + ) + inputs = self.get_dummy_inputs(generator_device) + inputs["height"] = inputs["width"] = 128 + output_with_tiling = pipe(**inputs)[0] + + self.assertLess( + (to_np(output_without_tiling) - to_np(output_with_tiling)).max(), + expected_diff_max, + "VAE tiling should not affect the inference results", + ) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/qwenimage/test_qwenimage_inpaint.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/qwenimage/test_qwenimage_inpaint.py new file mode 100644 index 0000000000000000000000000000000000000000..b564624540c3eafd81f84e501ad590246328cc92 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/qwenimage/test_qwenimage_inpaint.py @@ -0,0 +1,233 @@ +# Copyright 2025 The HuggingFace Team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import random +import unittest + +import numpy as np +import torch +from transformers import Qwen2_5_VLConfig, Qwen2_5_VLForConditionalGeneration, Qwen2Tokenizer + +from diffusers import ( + AutoencoderKLQwenImage, + FlowMatchEulerDiscreteScheduler, + QwenImageInpaintPipeline, + QwenImageTransformer2DModel, +) + +from ...testing_utils import enable_full_determinism, floats_tensor, torch_device +from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS +from ..test_pipelines_common import PipelineTesterMixin, to_np + + +enable_full_determinism() + + +class QwenImageInpaintPipelineFastTests(PipelineTesterMixin, unittest.TestCase): + pipeline_class = QwenImageInpaintPipeline + params = TEXT_TO_IMAGE_PARAMS - {"cross_attention_kwargs"} + batch_params = TEXT_TO_IMAGE_BATCH_PARAMS + image_params = TEXT_TO_IMAGE_IMAGE_PARAMS + image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS + required_optional_params = frozenset( + [ + "num_inference_steps", + "generator", + "latents", + "return_dict", + "callback_on_step_end", + "callback_on_step_end_tensor_inputs", + ] + ) + supports_dduf = False + test_xformers_attention = False + test_layerwise_casting = True + test_group_offloading = True + + def get_dummy_components(self): + torch.manual_seed(0) + transformer = QwenImageTransformer2DModel( + patch_size=2, + in_channels=16, + out_channels=4, + num_layers=2, + attention_head_dim=16, + num_attention_heads=3, + joint_attention_dim=16, + guidance_embeds=False, + axes_dims_rope=(8, 4, 4), + ) + + torch.manual_seed(0) + z_dim = 4 + vae = AutoencoderKLQwenImage( + base_dim=z_dim * 6, + z_dim=z_dim, + dim_mult=[1, 2, 4], + num_res_blocks=1, + temperal_downsample=[False, True], + # fmt: off + latents_mean=[0.0] * 4, + latents_std=[1.0] * 4, + # fmt: on + ) + + torch.manual_seed(0) + scheduler = FlowMatchEulerDiscreteScheduler() + + torch.manual_seed(0) + config = Qwen2_5_VLConfig( + text_config={ + "hidden_size": 16, + "intermediate_size": 16, + "num_hidden_layers": 2, + "num_attention_heads": 2, + "num_key_value_heads": 2, + "rope_scaling": { + "mrope_section": [1, 1, 2], + "rope_type": "default", + "type": "default", + }, + "rope_theta": 1000000.0, + }, + vision_config={ + "depth": 2, + "hidden_size": 16, + "intermediate_size": 16, + "num_heads": 2, + "out_hidden_size": 16, + }, + hidden_size=16, + vocab_size=152064, + vision_end_token_id=151653, + vision_start_token_id=151652, + vision_token_id=151654, + ) + text_encoder = Qwen2_5_VLForConditionalGeneration(config) + tokenizer = Qwen2Tokenizer.from_pretrained("hf-internal-testing/tiny-random-Qwen2VLForConditionalGeneration") + + components = { + "transformer": transformer, + "vae": vae, + "scheduler": scheduler, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + } + return components + + def get_dummy_inputs(self, device, seed=0): + image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) + mask_image = torch.ones((1, 1, 32, 32)).to(device) + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + + inputs = { + "prompt": "dance monkey", + "negative_prompt": "bad quality", + "image": image, + "mask_image": mask_image, + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 3.0, + "true_cfg_scale": 1.0, + "height": 32, + "width": 32, + "max_sequence_length": 16, + "output_type": "pt", + } + + return inputs + + def test_inference(self): + device = "cpu" + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + image = pipe(**inputs).images + generated_image = image[0] + self.assertEqual(generated_image.shape, (3, 32, 32)) + + def test_inference_batch_single_identical(self): + self._test_inference_batch_single_identical(batch_size=3, expected_max_diff=1e-1) + + def test_attention_slicing_forward_pass( + self, test_max_difference=True, test_mean_pixel_difference=True, expected_max_diff=1e-3 + ): + if not self.test_attention_slicing: + return + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + for component in pipe.components.values(): + if hasattr(component, "set_default_attn_processor"): + component.set_default_attn_processor() + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + generator_device = "cpu" + inputs = self.get_dummy_inputs(generator_device) + output_without_slicing = pipe(**inputs)[0] + + pipe.enable_attention_slicing(slice_size=1) + inputs = self.get_dummy_inputs(generator_device) + output_with_slicing1 = pipe(**inputs)[0] + + pipe.enable_attention_slicing(slice_size=2) + inputs = self.get_dummy_inputs(generator_device) + output_with_slicing2 = pipe(**inputs)[0] + + if test_max_difference: + max_diff1 = np.abs(to_np(output_with_slicing1) - to_np(output_without_slicing)).max() + max_diff2 = np.abs(to_np(output_with_slicing2) - to_np(output_without_slicing)).max() + self.assertLess( + max(max_diff1, max_diff2), + expected_max_diff, + "Attention slicing should not affect the inference results", + ) + + def test_vae_tiling(self, expected_diff_max: float = 0.2): + generator_device = "cpu" + components = self.get_dummy_components() + + pipe = self.pipeline_class(**components) + pipe.to("cpu") + pipe.set_progress_bar_config(disable=None) + + # Without tiling + inputs = self.get_dummy_inputs(generator_device) + inputs["height"] = inputs["width"] = 128 + output_without_tiling = pipe(**inputs)[0] + + # With tiling + pipe.vae.enable_tiling( + tile_sample_min_height=96, + tile_sample_min_width=96, + tile_sample_stride_height=64, + tile_sample_stride_width=64, + ) + inputs = self.get_dummy_inputs(generator_device) + inputs["height"] = inputs["width"] = 128 + output_with_tiling = pipe(**inputs)[0] + + self.assertLess( + (to_np(output_without_tiling) - to_np(output_with_tiling)).max(), + expected_diff_max, + "VAE tiling should not affect the inference results", + ) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/sana/__init__.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/sana/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/sana/test_sana.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/sana/test_sana.py new file mode 100644 index 0000000000000000000000000000000000000000..34ea3079b1431322697f1c3ff1c803e957fc09f8 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/sana/test_sana.py @@ -0,0 +1,373 @@ +# Copyright 2025 The HuggingFace Team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import gc +import inspect +import unittest + +import numpy as np +import torch +from transformers import Gemma2Config, Gemma2Model, GemmaTokenizer + +from diffusers import AutoencoderDC, FlowMatchEulerDiscreteScheduler, SanaPipeline, SanaTransformer2DModel + +from ...testing_utils import ( + backend_empty_cache, + enable_full_determinism, + require_torch_accelerator, + slow, + torch_device, +) +from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS +from ..test_pipelines_common import PipelineTesterMixin, to_np + + +enable_full_determinism() + + +class SanaPipelineFastTests(PipelineTesterMixin, unittest.TestCase): + pipeline_class = SanaPipeline + params = TEXT_TO_IMAGE_PARAMS - {"cross_attention_kwargs"} + batch_params = TEXT_TO_IMAGE_BATCH_PARAMS + image_params = TEXT_TO_IMAGE_IMAGE_PARAMS + image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS + required_optional_params = frozenset( + [ + "num_inference_steps", + "generator", + "latents", + "return_dict", + "callback_on_step_end", + "callback_on_step_end_tensor_inputs", + ] + ) + test_xformers_attention = False + test_layerwise_casting = True + test_group_offloading = True + + def get_dummy_components(self): + torch.manual_seed(0) + transformer = SanaTransformer2DModel( + patch_size=1, + in_channels=4, + out_channels=4, + num_layers=1, + num_attention_heads=2, + attention_head_dim=4, + num_cross_attention_heads=2, + cross_attention_head_dim=4, + cross_attention_dim=8, + caption_channels=8, + sample_size=32, + ) + + torch.manual_seed(0) + vae = AutoencoderDC( + in_channels=3, + latent_channels=4, + attention_head_dim=2, + encoder_block_types=( + "ResBlock", + "EfficientViTBlock", + ), + decoder_block_types=( + "ResBlock", + "EfficientViTBlock", + ), + encoder_block_out_channels=(8, 8), + decoder_block_out_channels=(8, 8), + encoder_qkv_multiscales=((), (5,)), + decoder_qkv_multiscales=((), (5,)), + encoder_layers_per_block=(1, 1), + decoder_layers_per_block=[1, 1], + downsample_block_type="conv", + upsample_block_type="interpolate", + decoder_norm_types="rms_norm", + decoder_act_fns="silu", + scaling_factor=0.41407, + ) + + torch.manual_seed(0) + scheduler = FlowMatchEulerDiscreteScheduler(shift=7.0) + + torch.manual_seed(0) + text_encoder_config = Gemma2Config( + head_dim=16, + hidden_size=8, + initializer_range=0.02, + intermediate_size=64, + max_position_embeddings=8192, + model_type="gemma2", + num_attention_heads=2, + num_hidden_layers=1, + num_key_value_heads=2, + vocab_size=8, + attn_implementation="eager", + ) + text_encoder = Gemma2Model(text_encoder_config) + tokenizer = GemmaTokenizer.from_pretrained("hf-internal-testing/dummy-gemma") + + components = { + "transformer": transformer, + "vae": vae, + "scheduler": scheduler, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + } + return components + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + inputs = { + "prompt": "", + "negative_prompt": "", + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 6.0, + "height": 32, + "width": 32, + "max_sequence_length": 16, + "output_type": "pt", + "complex_human_instruction": None, + } + return inputs + + def test_inference(self): + device = "cpu" + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + image = pipe(**inputs)[0] + generated_image = image[0] + + self.assertEqual(generated_image.shape, (3, 32, 32)) + expected_image = torch.randn(3, 32, 32) + max_diff = np.abs(generated_image - expected_image).max() + self.assertLessEqual(max_diff, 1e10) + + def test_callback_inputs(self): + sig = inspect.signature(self.pipeline_class.__call__) + has_callback_tensor_inputs = "callback_on_step_end_tensor_inputs" in sig.parameters + has_callback_step_end = "callback_on_step_end" in sig.parameters + + if not (has_callback_tensor_inputs and has_callback_step_end): + return + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + self.assertTrue( + hasattr(pipe, "_callback_tensor_inputs"), + f" {self.pipeline_class} should have `_callback_tensor_inputs` that defines a list of tensor variables its callback function can use as inputs", + ) + + def callback_inputs_subset(pipe, i, t, callback_kwargs): + # iterate over callback args + for tensor_name, tensor_value in callback_kwargs.items(): + # check that we're only passing in allowed tensor inputs + assert tensor_name in pipe._callback_tensor_inputs + + return callback_kwargs + + def callback_inputs_all(pipe, i, t, callback_kwargs): + for tensor_name in pipe._callback_tensor_inputs: + assert tensor_name in callback_kwargs + + # iterate over callback args + for tensor_name, tensor_value in callback_kwargs.items(): + # check that we're only passing in allowed tensor inputs + assert tensor_name in pipe._callback_tensor_inputs + + return callback_kwargs + + inputs = self.get_dummy_inputs(torch_device) + + # Test passing in a subset + inputs["callback_on_step_end"] = callback_inputs_subset + inputs["callback_on_step_end_tensor_inputs"] = ["latents"] + output = pipe(**inputs)[0] + + # Test passing in a everything + inputs["callback_on_step_end"] = callback_inputs_all + inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs + output = pipe(**inputs)[0] + + def callback_inputs_change_tensor(pipe, i, t, callback_kwargs): + is_last = i == (pipe.num_timesteps - 1) + if is_last: + callback_kwargs["latents"] = torch.zeros_like(callback_kwargs["latents"]) + return callback_kwargs + + inputs["callback_on_step_end"] = callback_inputs_change_tensor + inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs + output = pipe(**inputs)[0] + assert output.abs().sum() < 1e10 + + def test_attention_slicing_forward_pass( + self, test_max_difference=True, test_mean_pixel_difference=True, expected_max_diff=1e-3 + ): + if not self.test_attention_slicing: + return + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + for component in pipe.components.values(): + if hasattr(component, "set_default_attn_processor"): + component.set_default_attn_processor() + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + generator_device = "cpu" + inputs = self.get_dummy_inputs(generator_device) + output_without_slicing = pipe(**inputs)[0] + + pipe.enable_attention_slicing(slice_size=1) + inputs = self.get_dummy_inputs(generator_device) + output_with_slicing1 = pipe(**inputs)[0] + + pipe.enable_attention_slicing(slice_size=2) + inputs = self.get_dummy_inputs(generator_device) + output_with_slicing2 = pipe(**inputs)[0] + + if test_max_difference: + max_diff1 = np.abs(to_np(output_with_slicing1) - to_np(output_without_slicing)).max() + max_diff2 = np.abs(to_np(output_with_slicing2) - to_np(output_without_slicing)).max() + self.assertLess( + max(max_diff1, max_diff2), + expected_max_diff, + "Attention slicing should not affect the inference results", + ) + + def test_vae_tiling(self, expected_diff_max: float = 0.2): + generator_device = "cpu" + components = self.get_dummy_components() + + pipe = self.pipeline_class(**components) + pipe.to("cpu") + pipe.set_progress_bar_config(disable=None) + + # Without tiling + inputs = self.get_dummy_inputs(generator_device) + inputs["height"] = inputs["width"] = 128 + output_without_tiling = pipe(**inputs)[0] + + # With tiling + pipe.vae.enable_tiling( + tile_sample_min_height=96, + tile_sample_min_width=96, + tile_sample_stride_height=64, + tile_sample_stride_width=64, + ) + inputs = self.get_dummy_inputs(generator_device) + inputs["height"] = inputs["width"] = 128 + output_with_tiling = pipe(**inputs)[0] + + self.assertLess( + (to_np(output_without_tiling) - to_np(output_with_tiling)).max(), + expected_diff_max, + "VAE tiling should not affect the inference results", + ) + + # TODO(aryan): Create a dummy gemma model with smol vocab size + @unittest.skip( + "A very small vocab size is used for fast tests. So, any kind of prompt other than the empty default used in other tests will lead to a embedding lookup error. This test uses a long prompt that causes the error." + ) + def test_inference_batch_consistent(self): + pass + + @unittest.skip( + "A very small vocab size is used for fast tests. So, any kind of prompt other than the empty default used in other tests will lead to a embedding lookup error. This test uses a long prompt that causes the error." + ) + def test_inference_batch_single_identical(self): + pass + + def test_float16_inference(self): + # Requires higher tolerance as model seems very sensitive to dtype + super().test_float16_inference(expected_max_diff=0.08) + + +@slow +@require_torch_accelerator +class SanaPipelineIntegrationTests(unittest.TestCase): + prompt = "A painting of a squirrel eating a burger." + + def setUp(self): + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def test_sana_1024(self): + generator = torch.Generator("cpu").manual_seed(0) + + pipe = SanaPipeline.from_pretrained( + "Efficient-Large-Model/Sana_1600M_1024px_diffusers", torch_dtype=torch.float16 + ) + pipe.enable_model_cpu_offload(device=torch_device) + + image = pipe( + prompt=self.prompt, + height=1024, + width=1024, + generator=generator, + num_inference_steps=20, + output_type="np", + ).images[0] + + image = image.flatten() + output_slice = np.concatenate((image[:16], image[-16:])) + + # fmt: off + expected_slice = np.array([0.0427, 0.0789, 0.0662, 0.0464, 0.082, 0.0574, 0.0535, 0.0886, 0.0647, 0.0549, 0.0872, 0.0605, 0.0593, 0.0942, 0.0674, 0.0581, 0.0076, 0.0168, 0.0027, 0.0063, 0.0159, 0.0, 0.0071, 0.0198, 0.0034, 0.0105, 0.0212, 0.0, 0.0, 0.0166, 0.0042, 0.0125]) + # fmt: on + + self.assertTrue(np.allclose(output_slice, expected_slice, atol=1e-4)) + + def test_sana_512(self): + generator = torch.Generator("cpu").manual_seed(0) + + pipe = SanaPipeline.from_pretrained( + "Efficient-Large-Model/Sana_1600M_512px_diffusers", torch_dtype=torch.float16 + ) + pipe.enable_model_cpu_offload(device=torch_device) + + image = pipe( + prompt=self.prompt, + height=512, + width=512, + generator=generator, + num_inference_steps=20, + output_type="np", + ).images[0] + + image = image.flatten() + output_slice = np.concatenate((image[:16], image[-16:])) + + # fmt: off + expected_slice = np.array([0.0803, 0.0774, 0.1108, 0.0872, 0.093, 0.1118, 0.0952, 0.0898, 0.1038, 0.0818, 0.0754, 0.0894, 0.074, 0.0691, 0.0906, 0.0671, 0.0154, 0.0254, 0.0203, 0.0178, 0.0283, 0.0193, 0.0215, 0.0273, 0.0188, 0.0212, 0.0273, 0.0151, 0.0061, 0.0244, 0.0212, 0.0259]) + # fmt: on + + self.assertTrue(np.allclose(output_slice, expected_slice, atol=1e-4)) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/sana/test_sana_controlnet.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/sana/test_sana_controlnet.py new file mode 100644 index 0000000000000000000000000000000000000000..043e276fcb844741c0d18744db1347a1162b373e --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/sana/test_sana_controlnet.py @@ -0,0 +1,328 @@ +# Copyright 2025 The HuggingFace Team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +import unittest + +import numpy as np +import torch +from transformers import Gemma2Config, Gemma2Model, GemmaTokenizer + +from diffusers import ( + AutoencoderDC, + FlowMatchEulerDiscreteScheduler, + SanaControlNetModel, + SanaControlNetPipeline, + SanaTransformer2DModel, +) +from diffusers.utils.torch_utils import randn_tensor + +from ...testing_utils import ( + enable_full_determinism, + torch_device, +) +from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS +from ..test_pipelines_common import PipelineTesterMixin, to_np + + +enable_full_determinism() + + +class SanaControlNetPipelineFastTests(PipelineTesterMixin, unittest.TestCase): + pipeline_class = SanaControlNetPipeline + params = TEXT_TO_IMAGE_PARAMS - {"cross_attention_kwargs"} + batch_params = TEXT_TO_IMAGE_BATCH_PARAMS + image_params = TEXT_TO_IMAGE_IMAGE_PARAMS + image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS + required_optional_params = frozenset( + [ + "num_inference_steps", + "generator", + "latents", + "return_dict", + "callback_on_step_end", + "callback_on_step_end_tensor_inputs", + ] + ) + test_xformers_attention = False + test_layerwise_casting = True + test_group_offloading = True + + def get_dummy_components(self): + torch.manual_seed(0) + controlnet = SanaControlNetModel( + patch_size=1, + in_channels=4, + out_channels=4, + num_layers=1, + num_attention_heads=2, + attention_head_dim=4, + num_cross_attention_heads=2, + cross_attention_head_dim=4, + cross_attention_dim=8, + caption_channels=8, + sample_size=32, + ) + + torch.manual_seed(0) + transformer = SanaTransformer2DModel( + patch_size=1, + in_channels=4, + out_channels=4, + num_layers=1, + num_attention_heads=2, + attention_head_dim=4, + num_cross_attention_heads=2, + cross_attention_head_dim=4, + cross_attention_dim=8, + caption_channels=8, + sample_size=32, + ) + + torch.manual_seed(0) + vae = AutoencoderDC( + in_channels=3, + latent_channels=4, + attention_head_dim=2, + encoder_block_types=( + "ResBlock", + "EfficientViTBlock", + ), + decoder_block_types=( + "ResBlock", + "EfficientViTBlock", + ), + encoder_block_out_channels=(8, 8), + decoder_block_out_channels=(8, 8), + encoder_qkv_multiscales=((), (5,)), + decoder_qkv_multiscales=((), (5,)), + encoder_layers_per_block=(1, 1), + decoder_layers_per_block=[1, 1], + downsample_block_type="conv", + upsample_block_type="interpolate", + decoder_norm_types="rms_norm", + decoder_act_fns="silu", + scaling_factor=0.41407, + ) + + torch.manual_seed(0) + scheduler = FlowMatchEulerDiscreteScheduler(shift=7.0) + + torch.manual_seed(0) + text_encoder_config = Gemma2Config( + head_dim=16, + hidden_size=8, + initializer_range=0.02, + intermediate_size=64, + max_position_embeddings=8192, + model_type="gemma2", + num_attention_heads=2, + num_hidden_layers=1, + num_key_value_heads=2, + vocab_size=8, + attn_implementation="eager", + ) + text_encoder = Gemma2Model(text_encoder_config) + tokenizer = GemmaTokenizer.from_pretrained("hf-internal-testing/dummy-gemma") + + components = { + "transformer": transformer, + "vae": vae, + "scheduler": scheduler, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + "controlnet": controlnet, + } + return components + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + + control_image = randn_tensor((1, 3, 32, 32), generator=generator, device=device) + inputs = { + "prompt": "", + "negative_prompt": "", + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 6.0, + "height": 32, + "width": 32, + "max_sequence_length": 16, + "output_type": "pt", + "complex_human_instruction": None, + "control_image": control_image, + "controlnet_conditioning_scale": 1.0, + } + return inputs + + def test_inference(self): + device = "cpu" + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + image = pipe(**inputs)[0] + generated_image = image[0] + + self.assertEqual(generated_image.shape, (3, 32, 32)) + expected_image = torch.randn(3, 32, 32) + max_diff = np.abs(generated_image - expected_image).max() + self.assertLessEqual(max_diff, 1e10) + + def test_callback_inputs(self): + sig = inspect.signature(self.pipeline_class.__call__) + has_callback_tensor_inputs = "callback_on_step_end_tensor_inputs" in sig.parameters + has_callback_step_end = "callback_on_step_end" in sig.parameters + + if not (has_callback_tensor_inputs and has_callback_step_end): + return + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + self.assertTrue( + hasattr(pipe, "_callback_tensor_inputs"), + f" {self.pipeline_class} should have `_callback_tensor_inputs` that defines a list of tensor variables its callback function can use as inputs", + ) + + def callback_inputs_subset(pipe, i, t, callback_kwargs): + # iterate over callback args + for tensor_name, tensor_value in callback_kwargs.items(): + # check that we're only passing in allowed tensor inputs + assert tensor_name in pipe._callback_tensor_inputs + + return callback_kwargs + + def callback_inputs_all(pipe, i, t, callback_kwargs): + for tensor_name in pipe._callback_tensor_inputs: + assert tensor_name in callback_kwargs + + # iterate over callback args + for tensor_name, tensor_value in callback_kwargs.items(): + # check that we're only passing in allowed tensor inputs + assert tensor_name in pipe._callback_tensor_inputs + + return callback_kwargs + + inputs = self.get_dummy_inputs(torch_device) + + # Test passing in a subset + inputs["callback_on_step_end"] = callback_inputs_subset + inputs["callback_on_step_end_tensor_inputs"] = ["latents"] + output = pipe(**inputs)[0] + + # Test passing in a everything + inputs["callback_on_step_end"] = callback_inputs_all + inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs + output = pipe(**inputs)[0] + + def callback_inputs_change_tensor(pipe, i, t, callback_kwargs): + is_last = i == (pipe.num_timesteps - 1) + if is_last: + callback_kwargs["latents"] = torch.zeros_like(callback_kwargs["latents"]) + return callback_kwargs + + inputs["callback_on_step_end"] = callback_inputs_change_tensor + inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs + output = pipe(**inputs)[0] + assert output.abs().sum() < 1e10 + + def test_attention_slicing_forward_pass( + self, test_max_difference=True, test_mean_pixel_difference=True, expected_max_diff=1e-3 + ): + if not self.test_attention_slicing: + return + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + for component in pipe.components.values(): + if hasattr(component, "set_default_attn_processor"): + component.set_default_attn_processor() + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + generator_device = "cpu" + inputs = self.get_dummy_inputs(generator_device) + output_without_slicing = pipe(**inputs)[0] + + pipe.enable_attention_slicing(slice_size=1) + inputs = self.get_dummy_inputs(generator_device) + output_with_slicing1 = pipe(**inputs)[0] + + pipe.enable_attention_slicing(slice_size=2) + inputs = self.get_dummy_inputs(generator_device) + output_with_slicing2 = pipe(**inputs)[0] + + if test_max_difference: + max_diff1 = np.abs(to_np(output_with_slicing1) - to_np(output_without_slicing)).max() + max_diff2 = np.abs(to_np(output_with_slicing2) - to_np(output_without_slicing)).max() + self.assertLess( + max(max_diff1, max_diff2), + expected_max_diff, + "Attention slicing should not affect the inference results", + ) + + def test_vae_tiling(self, expected_diff_max: float = 0.2): + generator_device = "cpu" + components = self.get_dummy_components() + + pipe = self.pipeline_class(**components) + pipe.to("cpu") + pipe.set_progress_bar_config(disable=None) + + # Without tiling + inputs = self.get_dummy_inputs(generator_device) + inputs["height"] = inputs["width"] = 128 + output_without_tiling = pipe(**inputs)[0] + + # With tiling + pipe.vae.enable_tiling( + tile_sample_min_height=96, + tile_sample_min_width=96, + tile_sample_stride_height=64, + tile_sample_stride_width=64, + ) + inputs = self.get_dummy_inputs(generator_device) + inputs["height"] = inputs["width"] = 128 + output_with_tiling = pipe(**inputs)[0] + + self.assertLess( + (to_np(output_without_tiling) - to_np(output_with_tiling)).max(), + expected_diff_max, + "VAE tiling should not affect the inference results", + ) + + # TODO(aryan): Create a dummy gemma model with smol vocab size + @unittest.skip( + "A very small vocab size is used for fast tests. So, any kind of prompt other than the empty default used in other tests will lead to a embedding lookup error. This test uses a long prompt that causes the error." + ) + def test_inference_batch_consistent(self): + pass + + @unittest.skip( + "A very small vocab size is used for fast tests. So, any kind of prompt other than the empty default used in other tests will lead to a embedding lookup error. This test uses a long prompt that causes the error." + ) + def test_inference_batch_single_identical(self): + pass + + def test_float16_inference(self): + # Requires higher tolerance as model seems very sensitive to dtype + super().test_float16_inference(expected_max_diff=0.08) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/sana/test_sana_sprint.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/sana/test_sana_sprint.py new file mode 100644 index 0000000000000000000000000000000000000000..fee2304dce1b0a1e180c2c8a9a44ae2cdf8d5a20 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/sana/test_sana_sprint.py @@ -0,0 +1,302 @@ +# Copyright 2025 The HuggingFace Team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +import unittest + +import numpy as np +import torch +from transformers import Gemma2Config, Gemma2Model, GemmaTokenizer + +from diffusers import AutoencoderDC, SanaSprintPipeline, SanaTransformer2DModel, SCMScheduler + +from ...testing_utils import ( + enable_full_determinism, + torch_device, +) +from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS +from ..test_pipelines_common import PipelineTesterMixin, to_np + + +enable_full_determinism() + + +class SanaSprintPipelineFastTests(PipelineTesterMixin, unittest.TestCase): + pipeline_class = SanaSprintPipeline + params = TEXT_TO_IMAGE_PARAMS - {"cross_attention_kwargs", "negative_prompt", "negative_prompt_embeds"} + batch_params = TEXT_TO_IMAGE_BATCH_PARAMS - {"negative_prompt"} + image_params = TEXT_TO_IMAGE_IMAGE_PARAMS - {"negative_prompt"} + image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS + required_optional_params = frozenset( + [ + "num_inference_steps", + "generator", + "latents", + "return_dict", + "callback_on_step_end", + "callback_on_step_end_tensor_inputs", + ] + ) + test_xformers_attention = False + test_layerwise_casting = True + test_group_offloading = True + + def get_dummy_components(self): + torch.manual_seed(0) + transformer = SanaTransformer2DModel( + patch_size=1, + in_channels=4, + out_channels=4, + num_layers=1, + num_attention_heads=2, + attention_head_dim=4, + num_cross_attention_heads=2, + cross_attention_head_dim=4, + cross_attention_dim=8, + caption_channels=8, + sample_size=32, + qk_norm="rms_norm_across_heads", + guidance_embeds=True, + ) + + torch.manual_seed(0) + vae = AutoencoderDC( + in_channels=3, + latent_channels=4, + attention_head_dim=2, + encoder_block_types=( + "ResBlock", + "EfficientViTBlock", + ), + decoder_block_types=( + "ResBlock", + "EfficientViTBlock", + ), + encoder_block_out_channels=(8, 8), + decoder_block_out_channels=(8, 8), + encoder_qkv_multiscales=((), (5,)), + decoder_qkv_multiscales=((), (5,)), + encoder_layers_per_block=(1, 1), + decoder_layers_per_block=[1, 1], + downsample_block_type="conv", + upsample_block_type="interpolate", + decoder_norm_types="rms_norm", + decoder_act_fns="silu", + scaling_factor=0.41407, + ) + + torch.manual_seed(0) + scheduler = SCMScheduler() + + torch.manual_seed(0) + text_encoder_config = Gemma2Config( + head_dim=16, + hidden_size=8, + initializer_range=0.02, + intermediate_size=64, + max_position_embeddings=8192, + model_type="gemma2", + num_attention_heads=2, + num_hidden_layers=1, + num_key_value_heads=2, + vocab_size=8, + attn_implementation="eager", + ) + text_encoder = Gemma2Model(text_encoder_config) + tokenizer = GemmaTokenizer.from_pretrained("hf-internal-testing/dummy-gemma") + + components = { + "transformer": transformer, + "vae": vae, + "scheduler": scheduler, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + } + return components + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + inputs = { + "prompt": "", + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 6.0, + "height": 32, + "width": 32, + "max_sequence_length": 16, + "output_type": "pt", + "complex_human_instruction": None, + } + return inputs + + def test_inference(self): + device = "cpu" + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + image = pipe(**inputs)[0] + generated_image = image[0] + + self.assertEqual(generated_image.shape, (3, 32, 32)) + expected_image = torch.randn(3, 32, 32) + max_diff = np.abs(generated_image - expected_image).max() + self.assertLessEqual(max_diff, 1e10) + + def test_callback_inputs(self): + sig = inspect.signature(self.pipeline_class.__call__) + has_callback_tensor_inputs = "callback_on_step_end_tensor_inputs" in sig.parameters + has_callback_step_end = "callback_on_step_end" in sig.parameters + + if not (has_callback_tensor_inputs and has_callback_step_end): + return + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + self.assertTrue( + hasattr(pipe, "_callback_tensor_inputs"), + f" {self.pipeline_class} should have `_callback_tensor_inputs` that defines a list of tensor variables its callback function can use as inputs", + ) + + def callback_inputs_subset(pipe, i, t, callback_kwargs): + # iterate over callback args + for tensor_name, tensor_value in callback_kwargs.items(): + # check that we're only passing in allowed tensor inputs + assert tensor_name in pipe._callback_tensor_inputs + + return callback_kwargs + + def callback_inputs_all(pipe, i, t, callback_kwargs): + for tensor_name in pipe._callback_tensor_inputs: + assert tensor_name in callback_kwargs + + # iterate over callback args + for tensor_name, tensor_value in callback_kwargs.items(): + # check that we're only passing in allowed tensor inputs + assert tensor_name in pipe._callback_tensor_inputs + + return callback_kwargs + + inputs = self.get_dummy_inputs(torch_device) + + # Test passing in a subset + inputs["callback_on_step_end"] = callback_inputs_subset + inputs["callback_on_step_end_tensor_inputs"] = ["latents"] + output = pipe(**inputs)[0] + + # Test passing in a everything + inputs["callback_on_step_end"] = callback_inputs_all + inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs + output = pipe(**inputs)[0] + + def callback_inputs_change_tensor(pipe, i, t, callback_kwargs): + is_last = i == (pipe.num_timesteps - 1) + if is_last: + callback_kwargs["latents"] = torch.zeros_like(callback_kwargs["latents"]) + return callback_kwargs + + inputs["callback_on_step_end"] = callback_inputs_change_tensor + inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs + output = pipe(**inputs)[0] + assert output.abs().sum() < 1e10 + + def test_attention_slicing_forward_pass( + self, test_max_difference=True, test_mean_pixel_difference=True, expected_max_diff=1e-3 + ): + if not self.test_attention_slicing: + return + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + for component in pipe.components.values(): + if hasattr(component, "set_default_attn_processor"): + component.set_default_attn_processor() + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + generator_device = "cpu" + inputs = self.get_dummy_inputs(generator_device) + output_without_slicing = pipe(**inputs)[0] + + pipe.enable_attention_slicing(slice_size=1) + inputs = self.get_dummy_inputs(generator_device) + output_with_slicing1 = pipe(**inputs)[0] + + pipe.enable_attention_slicing(slice_size=2) + inputs = self.get_dummy_inputs(generator_device) + output_with_slicing2 = pipe(**inputs)[0] + + if test_max_difference: + max_diff1 = np.abs(to_np(output_with_slicing1) - to_np(output_without_slicing)).max() + max_diff2 = np.abs(to_np(output_with_slicing2) - to_np(output_without_slicing)).max() + self.assertLess( + max(max_diff1, max_diff2), + expected_max_diff, + "Attention slicing should not affect the inference results", + ) + + def test_vae_tiling(self, expected_diff_max: float = 0.2): + generator_device = "cpu" + components = self.get_dummy_components() + + pipe = self.pipeline_class(**components) + pipe.to("cpu") + pipe.set_progress_bar_config(disable=None) + + # Without tiling + inputs = self.get_dummy_inputs(generator_device) + inputs["height"] = inputs["width"] = 128 + output_without_tiling = pipe(**inputs)[0] + + # With tiling + pipe.vae.enable_tiling( + tile_sample_min_height=96, + tile_sample_min_width=96, + tile_sample_stride_height=64, + tile_sample_stride_width=64, + ) + inputs = self.get_dummy_inputs(generator_device) + inputs["height"] = inputs["width"] = 128 + output_with_tiling = pipe(**inputs)[0] + + self.assertLess( + (to_np(output_without_tiling) - to_np(output_with_tiling)).max(), + expected_diff_max, + "VAE tiling should not affect the inference results", + ) + + # TODO(aryan): Create a dummy gemma model with smol vocab size + @unittest.skip( + "A very small vocab size is used for fast tests. So, any kind of prompt other than the empty default used in other tests will lead to a embedding lookup error. This test uses a long prompt that causes the error." + ) + def test_inference_batch_consistent(self): + pass + + @unittest.skip( + "A very small vocab size is used for fast tests. So, any kind of prompt other than the empty default used in other tests will lead to a embedding lookup error. This test uses a long prompt that causes the error." + ) + def test_inference_batch_single_identical(self): + pass + + def test_float16_inference(self): + # Requires higher tolerance as model seems very sensitive to dtype + super().test_float16_inference(expected_max_diff=0.08) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/sana/test_sana_sprint_img2img.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/sana/test_sana_sprint_img2img.py new file mode 100644 index 0000000000000000000000000000000000000000..c218abb8e951bee50cce5d4c5a13c62edce7575a --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/sana/test_sana_sprint_img2img.py @@ -0,0 +1,314 @@ +# Copyright 2025 The HuggingFace Team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +import unittest + +import numpy as np +import torch +from transformers import Gemma2Config, Gemma2Model, GemmaTokenizer + +from diffusers import AutoencoderDC, SanaSprintImg2ImgPipeline, SanaTransformer2DModel, SCMScheduler +from diffusers.utils.torch_utils import randn_tensor + +from ...testing_utils import ( + enable_full_determinism, + torch_device, +) +from ..pipeline_params import ( + IMAGE_TO_IMAGE_IMAGE_PARAMS, + TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, + TEXT_GUIDED_IMAGE_VARIATION_PARAMS, +) +from ..test_pipelines_common import PipelineTesterMixin, to_np + + +enable_full_determinism() + + +class SanaSprintImg2ImgPipelineFastTests(PipelineTesterMixin, unittest.TestCase): + pipeline_class = SanaSprintImg2ImgPipeline + params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - { + "negative_prompt", + "negative_prompt_embeds", + } + batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS - {"negative_prompt"} + image_params = IMAGE_TO_IMAGE_IMAGE_PARAMS + image_latents_params = IMAGE_TO_IMAGE_IMAGE_PARAMS + required_optional_params = frozenset( + [ + "num_inference_steps", + "generator", + "latents", + "return_dict", + "callback_on_step_end", + "callback_on_step_end_tensor_inputs", + ] + ) + test_xformers_attention = False + test_layerwise_casting = True + test_group_offloading = True + + def get_dummy_components(self): + torch.manual_seed(0) + transformer = SanaTransformer2DModel( + patch_size=1, + in_channels=4, + out_channels=4, + num_layers=1, + num_attention_heads=2, + attention_head_dim=4, + num_cross_attention_heads=2, + cross_attention_head_dim=4, + cross_attention_dim=8, + caption_channels=8, + sample_size=32, + qk_norm="rms_norm_across_heads", + guidance_embeds=True, + ) + + torch.manual_seed(0) + vae = AutoencoderDC( + in_channels=3, + latent_channels=4, + attention_head_dim=2, + encoder_block_types=( + "ResBlock", + "EfficientViTBlock", + ), + decoder_block_types=( + "ResBlock", + "EfficientViTBlock", + ), + encoder_block_out_channels=(8, 8), + decoder_block_out_channels=(8, 8), + encoder_qkv_multiscales=((), (5,)), + decoder_qkv_multiscales=((), (5,)), + encoder_layers_per_block=(1, 1), + decoder_layers_per_block=[1, 1], + downsample_block_type="conv", + upsample_block_type="interpolate", + decoder_norm_types="rms_norm", + decoder_act_fns="silu", + scaling_factor=0.41407, + ) + + torch.manual_seed(0) + scheduler = SCMScheduler() + + torch.manual_seed(0) + text_encoder_config = Gemma2Config( + head_dim=16, + hidden_size=8, + initializer_range=0.02, + intermediate_size=64, + max_position_embeddings=8192, + model_type="gemma2", + num_attention_heads=2, + num_hidden_layers=1, + num_key_value_heads=2, + vocab_size=8, + attn_implementation="eager", + ) + text_encoder = Gemma2Model(text_encoder_config) + tokenizer = GemmaTokenizer.from_pretrained("hf-internal-testing/dummy-gemma") + + components = { + "transformer": transformer, + "vae": vae, + "scheduler": scheduler, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + } + return components + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + image = randn_tensor((1, 3, 32, 32), generator=generator, device=device) + inputs = { + "prompt": "", + "image": image, + "strength": 0.5, + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 6.0, + "height": 32, + "width": 32, + "max_sequence_length": 16, + "output_type": "pt", + "complex_human_instruction": None, + } + return inputs + + def test_inference(self): + device = "cpu" + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + image = pipe(**inputs)[0] + generated_image = image[0] + + self.assertEqual(generated_image.shape, (3, 32, 32)) + expected_image = torch.randn(3, 32, 32) + max_diff = np.abs(generated_image - expected_image).max() + self.assertLessEqual(max_diff, 1e10) + + def test_callback_inputs(self): + sig = inspect.signature(self.pipeline_class.__call__) + has_callback_tensor_inputs = "callback_on_step_end_tensor_inputs" in sig.parameters + has_callback_step_end = "callback_on_step_end" in sig.parameters + + if not (has_callback_tensor_inputs and has_callback_step_end): + return + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + self.assertTrue( + hasattr(pipe, "_callback_tensor_inputs"), + f" {self.pipeline_class} should have `_callback_tensor_inputs` that defines a list of tensor variables its callback function can use as inputs", + ) + + def callback_inputs_subset(pipe, i, t, callback_kwargs): + # iterate over callback args + for tensor_name, tensor_value in callback_kwargs.items(): + # check that we're only passing in allowed tensor inputs + assert tensor_name in pipe._callback_tensor_inputs + + return callback_kwargs + + def callback_inputs_all(pipe, i, t, callback_kwargs): + for tensor_name in pipe._callback_tensor_inputs: + assert tensor_name in callback_kwargs + + # iterate over callback args + for tensor_name, tensor_value in callback_kwargs.items(): + # check that we're only passing in allowed tensor inputs + assert tensor_name in pipe._callback_tensor_inputs + + return callback_kwargs + + inputs = self.get_dummy_inputs(torch_device) + + # Test passing in a subset + inputs["callback_on_step_end"] = callback_inputs_subset + inputs["callback_on_step_end_tensor_inputs"] = ["latents"] + output = pipe(**inputs)[0] + + # Test passing in a everything + inputs["callback_on_step_end"] = callback_inputs_all + inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs + output = pipe(**inputs)[0] + + def callback_inputs_change_tensor(pipe, i, t, callback_kwargs): + is_last = i == (pipe.num_timesteps - 1) + if is_last: + callback_kwargs["latents"] = torch.zeros_like(callback_kwargs["latents"]) + return callback_kwargs + + inputs["callback_on_step_end"] = callback_inputs_change_tensor + inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs + output = pipe(**inputs)[0] + assert output.abs().sum() < 1e10 + + def test_attention_slicing_forward_pass( + self, test_max_difference=True, test_mean_pixel_difference=True, expected_max_diff=1e-3 + ): + if not self.test_attention_slicing: + return + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + for component in pipe.components.values(): + if hasattr(component, "set_default_attn_processor"): + component.set_default_attn_processor() + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + generator_device = "cpu" + inputs = self.get_dummy_inputs(generator_device) + output_without_slicing = pipe(**inputs)[0] + + pipe.enable_attention_slicing(slice_size=1) + inputs = self.get_dummy_inputs(generator_device) + output_with_slicing1 = pipe(**inputs)[0] + + pipe.enable_attention_slicing(slice_size=2) + inputs = self.get_dummy_inputs(generator_device) + output_with_slicing2 = pipe(**inputs)[0] + + if test_max_difference: + max_diff1 = np.abs(to_np(output_with_slicing1) - to_np(output_without_slicing)).max() + max_diff2 = np.abs(to_np(output_with_slicing2) - to_np(output_without_slicing)).max() + self.assertLess( + max(max_diff1, max_diff2), + expected_max_diff, + "Attention slicing should not affect the inference results", + ) + + @unittest.skip("vae tiling resulted in a small margin over the expected max diff, so skipping this test for now") + def test_vae_tiling(self, expected_diff_max: float = 0.2): + generator_device = "cpu" + components = self.get_dummy_components() + + pipe = self.pipeline_class(**components) + pipe.to("cpu") + pipe.set_progress_bar_config(disable=None) + + # Without tiling + inputs = self.get_dummy_inputs(generator_device) + inputs["height"] = inputs["width"] = 128 + output_without_tiling = pipe(**inputs)[0] + + # With tiling + pipe.vae.enable_tiling( + tile_sample_min_height=96, + tile_sample_min_width=96, + tile_sample_stride_height=64, + tile_sample_stride_width=64, + ) + inputs = self.get_dummy_inputs(generator_device) + inputs["height"] = inputs["width"] = 128 + output_with_tiling = pipe(**inputs)[0] + + self.assertLess( + (to_np(output_without_tiling) - to_np(output_with_tiling)).max(), + expected_diff_max, + "VAE tiling should not affect the inference results", + ) + + # TODO(aryan): Create a dummy gemma model with smol vocab size + @unittest.skip( + "A very small vocab size is used for fast tests. So, any kind of prompt other than the empty default used in other tests will lead to a embedding lookup error. This test uses a long prompt that causes the error." + ) + def test_inference_batch_consistent(self): + pass + + @unittest.skip( + "A very small vocab size is used for fast tests. So, any kind of prompt other than the empty default used in other tests will lead to a embedding lookup error. This test uses a long prompt that causes the error." + ) + def test_inference_batch_single_identical(self): + pass + + def test_float16_inference(self): + # Requires higher tolerance as model seems very sensitive to dtype + super().test_float16_inference(expected_max_diff=0.08) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/shap_e/__init__.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/shap_e/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/shap_e/test_shap_e.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/shap_e/test_shap_e.py new file mode 100644 index 0000000000000000000000000000000000000000..99fd286929818aff5821d79ddebae70c6b48588b --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/shap_e/test_shap_e.py @@ -0,0 +1,267 @@ +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import gc +import unittest + +import numpy as np +import torch +from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer + +from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline +from diffusers.pipelines.shap_e import ShapERenderer + +from ...testing_utils import ( + backend_empty_cache, + load_numpy, + nightly, + require_torch_accelerator, + torch_device, +) +from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference + + +class ShapEPipelineFastTests(PipelineTesterMixin, unittest.TestCase): + pipeline_class = ShapEPipeline + params = ["prompt"] + batch_params = ["prompt"] + required_optional_params = [ + "num_images_per_prompt", + "num_inference_steps", + "generator", + "latents", + "guidance_scale", + "frame_size", + "output_type", + "return_dict", + ] + test_xformers_attention = False + + @property + def text_embedder_hidden_size(self): + return 16 + + @property + def time_input_dim(self): + return 16 + + @property + def time_embed_dim(self): + return self.time_input_dim * 4 + + @property + def renderer_dim(self): + return 8 + + @property + def dummy_tokenizer(self): + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + return tokenizer + + @property + def dummy_text_encoder(self): + torch.manual_seed(0) + config = CLIPTextConfig( + bos_token_id=0, + eos_token_id=2, + hidden_size=self.text_embedder_hidden_size, + projection_dim=self.text_embedder_hidden_size, + intermediate_size=37, + layer_norm_eps=1e-05, + num_attention_heads=4, + num_hidden_layers=5, + pad_token_id=1, + vocab_size=1000, + ) + return CLIPTextModelWithProjection(config) + + @property + def dummy_prior(self): + torch.manual_seed(0) + + model_kwargs = { + "num_attention_heads": 2, + "attention_head_dim": 16, + "embedding_dim": self.time_input_dim, + "num_embeddings": 32, + "embedding_proj_dim": self.text_embedder_hidden_size, + "time_embed_dim": self.time_embed_dim, + "num_layers": 1, + "clip_embed_dim": self.time_input_dim * 2, + "additional_embeddings": 0, + "time_embed_act_fn": "gelu", + "norm_in_type": "layer", + "encoder_hid_proj_type": None, + "added_emb_type": None, + } + + model = PriorTransformer(**model_kwargs) + return model + + @property + def dummy_renderer(self): + torch.manual_seed(0) + + model_kwargs = { + "param_shapes": ( + (self.renderer_dim, 93), + (self.renderer_dim, 8), + (self.renderer_dim, 8), + (self.renderer_dim, 8), + ), + "d_latent": self.time_input_dim, + "d_hidden": self.renderer_dim, + "n_output": 12, + "background": ( + 0.1, + 0.1, + 0.1, + ), + } + model = ShapERenderer(**model_kwargs) + return model + + def get_dummy_components(self): + prior = self.dummy_prior + text_encoder = self.dummy_text_encoder + tokenizer = self.dummy_tokenizer + shap_e_renderer = self.dummy_renderer + + scheduler = HeunDiscreteScheduler( + beta_schedule="exp", + num_train_timesteps=1024, + prediction_type="sample", + use_karras_sigmas=True, + clip_sample=True, + clip_sample_range=1.0, + ) + components = { + "prior": prior, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + "shap_e_renderer": shap_e_renderer, + "scheduler": scheduler, + } + + return components + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + inputs = { + "prompt": "horse", + "generator": generator, + "num_inference_steps": 1, + "frame_size": 32, + "output_type": "latent", + } + return inputs + + def test_shap_e(self): + device = "cpu" + + components = self.get_dummy_components() + + pipe = self.pipeline_class(**components) + pipe = pipe.to(device) + + pipe.set_progress_bar_config(disable=None) + + output = pipe(**self.get_dummy_inputs(device)) + image = output.images[0] + image = image.cpu().numpy() + image_slice = image[-3:, -3:] + + assert image.shape == (32, 16) + + expected_slice = np.array([-1.0000, -0.6559, 1.0000, -0.9096, -0.7252, 0.8211, -0.7647, -0.3308, 0.6462]) + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 + + def test_inference_batch_consistent(self): + # NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches + self._test_inference_batch_consistent(batch_sizes=[1, 2]) + + def test_inference_batch_single_identical(self): + self._test_inference_batch_single_identical(batch_size=2, expected_max_diff=6e-3) + + def test_num_images_per_prompt(self): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + batch_size = 1 + num_images_per_prompt = 2 + + inputs = self.get_dummy_inputs(torch_device) + + for key in inputs.keys(): + if key in self.batch_params: + inputs[key] = batch_size * [inputs[key]] + + images = pipe(**inputs, num_images_per_prompt=num_images_per_prompt)[0] + + assert images.shape[0] == batch_size * num_images_per_prompt + + def test_float16_inference(self): + super().test_float16_inference(expected_max_diff=5e-1) + + def test_save_load_local(self): + super().test_save_load_local(expected_max_difference=5e-3) + + @unittest.skip("Key error is raised with accelerate") + def test_sequential_cpu_offload_forward_pass(self): + pass + + +@nightly +@require_torch_accelerator +class ShapEPipelineIntegrationTests(unittest.TestCase): + def setUp(self): + # clean up the VRAM before each test + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + # clean up the VRAM after each test + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def test_shap_e(self): + expected_image = load_numpy( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" + "/shap_e/test_shap_e_np_out.npy" + ) + pipe = ShapEPipeline.from_pretrained("openai/shap-e") + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + generator = torch.Generator(device=torch_device).manual_seed(0) + + images = pipe( + "a shark", + generator=generator, + guidance_scale=15.0, + num_inference_steps=64, + frame_size=64, + output_type="np", + ).images[0] + + assert images.shape == (20, 64, 64, 3) + + assert_mean_pixel_difference(images, expected_image) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/shap_e/test_shap_e_img2img.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/shap_e/test_shap_e_img2img.py new file mode 100644 index 0000000000000000000000000000000000000000..b1867db249ea69f5b8b3158f3e7b5c38e18ba55f --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/shap_e/test_shap_e_img2img.py @@ -0,0 +1,293 @@ +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import gc +import random +import unittest + +import numpy as np +import torch +from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel + +from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImg2ImgPipeline +from diffusers.pipelines.shap_e import ShapERenderer + +from ...testing_utils import ( + backend_empty_cache, + floats_tensor, + load_image, + load_numpy, + nightly, + require_torch_accelerator, + torch_device, +) +from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference + + +class ShapEImg2ImgPipelineFastTests(PipelineTesterMixin, unittest.TestCase): + pipeline_class = ShapEImg2ImgPipeline + params = ["image"] + batch_params = ["image"] + required_optional_params = [ + "num_images_per_prompt", + "num_inference_steps", + "generator", + "latents", + "guidance_scale", + "frame_size", + "output_type", + "return_dict", + ] + test_xformers_attention = False + + supports_dduf = False + + @property + def text_embedder_hidden_size(self): + return 16 + + @property + def time_input_dim(self): + return 16 + + @property + def time_embed_dim(self): + return self.time_input_dim * 4 + + @property + def renderer_dim(self): + return 8 + + @property + def dummy_image_encoder(self): + torch.manual_seed(0) + config = CLIPVisionConfig( + hidden_size=self.text_embedder_hidden_size, + image_size=32, + projection_dim=self.text_embedder_hidden_size, + intermediate_size=24, + num_attention_heads=2, + num_channels=3, + num_hidden_layers=5, + patch_size=1, + ) + + model = CLIPVisionModel(config) + return model + + @property + def dummy_image_processor(self): + image_processor = CLIPImageProcessor( + crop_size=224, + do_center_crop=True, + do_normalize=True, + do_resize=True, + image_mean=[0.48145466, 0.4578275, 0.40821073], + image_std=[0.26862954, 0.26130258, 0.27577711], + resample=3, + size=224, + ) + + return image_processor + + @property + def dummy_prior(self): + torch.manual_seed(0) + + model_kwargs = { + "num_attention_heads": 2, + "attention_head_dim": 16, + "embedding_dim": self.time_input_dim, + "num_embeddings": 32, + "embedding_proj_dim": self.text_embedder_hidden_size, + "time_embed_dim": self.time_embed_dim, + "num_layers": 1, + "clip_embed_dim": self.time_input_dim * 2, + "additional_embeddings": 0, + "time_embed_act_fn": "gelu", + "norm_in_type": "layer", + "embedding_proj_norm_type": "layer", + "encoder_hid_proj_type": None, + "added_emb_type": None, + } + + model = PriorTransformer(**model_kwargs) + return model + + @property + def dummy_renderer(self): + torch.manual_seed(0) + + model_kwargs = { + "param_shapes": ( + (self.renderer_dim, 93), + (self.renderer_dim, 8), + (self.renderer_dim, 8), + (self.renderer_dim, 8), + ), + "d_latent": self.time_input_dim, + "d_hidden": self.renderer_dim, + "n_output": 12, + "background": ( + 0.1, + 0.1, + 0.1, + ), + } + model = ShapERenderer(**model_kwargs) + return model + + def get_dummy_components(self): + prior = self.dummy_prior + image_encoder = self.dummy_image_encoder + image_processor = self.dummy_image_processor + shap_e_renderer = self.dummy_renderer + + scheduler = HeunDiscreteScheduler( + beta_schedule="exp", + num_train_timesteps=1024, + prediction_type="sample", + use_karras_sigmas=True, + clip_sample=True, + clip_sample_range=1.0, + ) + components = { + "prior": prior, + "image_encoder": image_encoder, + "image_processor": image_processor, + "shap_e_renderer": shap_e_renderer, + "scheduler": scheduler, + } + + return components + + def get_dummy_inputs(self, device, seed=0): + input_image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) + + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + inputs = { + "image": input_image, + "generator": generator, + "num_inference_steps": 1, + "frame_size": 32, + "output_type": "latent", + } + return inputs + + def test_shap_e(self): + device = "cpu" + + components = self.get_dummy_components() + + pipe = self.pipeline_class(**components) + pipe = pipe.to(device) + + pipe.set_progress_bar_config(disable=None) + + output = pipe(**self.get_dummy_inputs(device)) + image = output.images[0] + image_slice = image[-3:, -3:].cpu().numpy() + + assert image.shape == (32, 16) + + expected_slice = np.array( + [-1.0, 0.40668195, 0.57322013, -0.9469888, 0.4283227, 0.30348337, -0.81094897, 0.74555075, 0.15342723] + ) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 + + def test_inference_batch_consistent(self): + # NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches + self._test_inference_batch_consistent(batch_sizes=[2]) + + def test_inference_batch_single_identical(self): + self._test_inference_batch_single_identical( + batch_size=2, + expected_max_diff=6e-3, + ) + + def test_num_images_per_prompt(self): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + batch_size = 1 + num_images_per_prompt = 2 + + inputs = self.get_dummy_inputs(torch_device) + + for key in inputs.keys(): + if key in self.batch_params: + inputs[key] = batch_size * [inputs[key]] + + images = pipe(**inputs, num_images_per_prompt=num_images_per_prompt)[0] + + assert images.shape[0] == batch_size * num_images_per_prompt + + def test_float16_inference(self): + super().test_float16_inference(expected_max_diff=1e-1) + + def test_save_load_local(self): + super().test_save_load_local(expected_max_difference=5e-3) + + @unittest.skip("Key error is raised with accelerate") + def test_sequential_cpu_offload_forward_pass(self): + pass + + +@nightly +@require_torch_accelerator +class ShapEImg2ImgPipelineIntegrationTests(unittest.TestCase): + def setUp(self): + # clean up the VRAM before each test + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + # clean up the VRAM after each test + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def test_shap_e_img2img(self): + input_image = load_image( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/shap_e/corgi.png" + ) + expected_image = load_numpy( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" + "/shap_e/test_shap_e_img2img_out.npy" + ) + pipe = ShapEImg2ImgPipeline.from_pretrained("openai/shap-e-img2img") + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + generator = torch.Generator(device=torch_device).manual_seed(0) + + images = pipe( + input_image, + generator=generator, + guidance_scale=3.0, + num_inference_steps=64, + frame_size=64, + output_type="np", + ).images[0] + + assert images.shape == (20, 64, 64, 3) + + assert_mean_pixel_difference(images, expected_image) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/skyreels_v2/__init__.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/skyreels_v2/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/skyreels_v2/test_skyreels_v2.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/skyreels_v2/test_skyreels_v2.py new file mode 100644 index 0000000000000000000000000000000000000000..1bcec877c30d58ceb773e7eaddb5407049a97ee0 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/skyreels_v2/test_skyreels_v2.py @@ -0,0 +1,137 @@ +# Copyright 2024 The HuggingFace Team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import numpy as np +import torch +from transformers import AutoTokenizer, T5EncoderModel + +from diffusers import ( + AutoencoderKLWan, + SkyReelsV2Pipeline, + SkyReelsV2Transformer3DModel, + UniPCMultistepScheduler, +) + +from ...testing_utils import ( + enable_full_determinism, +) +from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS +from ..test_pipelines_common import ( + PipelineTesterMixin, +) + + +enable_full_determinism() + + +class SkyReelsV2PipelineFastTests(PipelineTesterMixin, unittest.TestCase): + pipeline_class = SkyReelsV2Pipeline + params = TEXT_TO_IMAGE_PARAMS - {"cross_attention_kwargs"} + batch_params = TEXT_TO_IMAGE_BATCH_PARAMS + image_params = TEXT_TO_IMAGE_IMAGE_PARAMS + image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS + required_optional_params = frozenset( + [ + "num_inference_steps", + "generator", + "latents", + "return_dict", + "callback_on_step_end", + "callback_on_step_end_tensor_inputs", + ] + ) + test_xformers_attention = False + supports_dduf = False + + def get_dummy_components(self): + torch.manual_seed(0) + vae = AutoencoderKLWan( + base_dim=3, + z_dim=16, + dim_mult=[1, 1, 1, 1], + num_res_blocks=1, + temperal_downsample=[False, True, True], + ) + + torch.manual_seed(0) + scheduler = UniPCMultistepScheduler(flow_shift=8.0, use_flow_sigmas=True) + text_encoder = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5") + tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") + + torch.manual_seed(0) + transformer = SkyReelsV2Transformer3DModel( + patch_size=(1, 2, 2), + num_attention_heads=2, + attention_head_dim=12, + in_channels=16, + out_channels=16, + text_dim=32, + freq_dim=256, + ffn_dim=32, + num_layers=2, + cross_attn_norm=True, + qk_norm="rms_norm_across_heads", + rope_max_seq_len=32, + ) + + components = { + "transformer": transformer, + "vae": vae, + "scheduler": scheduler, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + } + return components + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + inputs = { + "prompt": "dance monkey", + "negative_prompt": "negative", # TODO + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 6.0, + "height": 16, + "width": 16, + "num_frames": 9, + "max_sequence_length": 16, + "output_type": "pt", + } + return inputs + + def test_inference(self): + device = "cpu" + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + video = pipe(**inputs).frames + generated_video = video[0] + + self.assertEqual(generated_video.shape, (9, 3, 16, 16)) + expected_video = torch.randn(9, 3, 16, 16) + max_diff = np.abs(generated_video - expected_video).max() + self.assertLessEqual(max_diff, 1e10) + + @unittest.skip("Test not supported") + def test_attention_slicing_forward_pass(self): + pass diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/skyreels_v2/test_skyreels_v2_df.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/skyreels_v2/test_skyreels_v2_df.py new file mode 100644 index 0000000000000000000000000000000000000000..74235d59efd6e298c0f49f95cbb45abf44f1fa2e --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/skyreels_v2/test_skyreels_v2_df.py @@ -0,0 +1,137 @@ +# Copyright 2024 The HuggingFace Team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import numpy as np +import torch +from transformers import AutoTokenizer, T5EncoderModel + +from diffusers import ( + AutoencoderKLWan, + SkyReelsV2DiffusionForcingPipeline, + SkyReelsV2Transformer3DModel, + UniPCMultistepScheduler, +) + +from ...testing_utils import ( + enable_full_determinism, +) +from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS +from ..test_pipelines_common import ( + PipelineTesterMixin, +) + + +enable_full_determinism() + + +class SkyReelsV2DiffusionForcingPipelineFastTests(PipelineTesterMixin, unittest.TestCase): + pipeline_class = SkyReelsV2DiffusionForcingPipeline + params = TEXT_TO_IMAGE_PARAMS - {"cross_attention_kwargs"} + batch_params = TEXT_TO_IMAGE_BATCH_PARAMS + image_params = TEXT_TO_IMAGE_IMAGE_PARAMS + image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS + required_optional_params = frozenset( + [ + "num_inference_steps", + "generator", + "latents", + "return_dict", + "callback_on_step_end", + "callback_on_step_end_tensor_inputs", + ] + ) + test_xformers_attention = False + supports_dduf = False + + def get_dummy_components(self): + torch.manual_seed(0) + vae = AutoencoderKLWan( + base_dim=3, + z_dim=16, + dim_mult=[1, 1, 1, 1], + num_res_blocks=1, + temperal_downsample=[False, True, True], + ) + + torch.manual_seed(0) + scheduler = UniPCMultistepScheduler(flow_shift=8.0, use_flow_sigmas=True) + text_encoder = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5") + tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") + + torch.manual_seed(0) + transformer = SkyReelsV2Transformer3DModel( + patch_size=(1, 2, 2), + num_attention_heads=2, + attention_head_dim=12, + in_channels=16, + out_channels=16, + text_dim=32, + freq_dim=256, + ffn_dim=32, + num_layers=2, + cross_attn_norm=True, + qk_norm="rms_norm_across_heads", + rope_max_seq_len=32, + ) + + components = { + "transformer": transformer, + "vae": vae, + "scheduler": scheduler, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + } + return components + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + inputs = { + "prompt": "dance monkey", + "negative_prompt": "negative", # TODO + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 6.0, + "height": 16, + "width": 16, + "num_frames": 9, + "max_sequence_length": 16, + "output_type": "pt", + } + return inputs + + def test_inference(self): + device = "cpu" + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + video = pipe(**inputs).frames + generated_video = video[0] + + self.assertEqual(generated_video.shape, (9, 3, 16, 16)) + expected_video = torch.randn(9, 3, 16, 16) + max_diff = np.abs(generated_video - expected_video).max() + self.assertLessEqual(max_diff, 1e10) + + @unittest.skip("Test not supported") + def test_attention_slicing_forward_pass(self): + pass diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/skyreels_v2/test_skyreels_v2_df_image_to_video.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/skyreels_v2/test_skyreels_v2_df_image_to_video.py new file mode 100644 index 0000000000000000000000000000000000000000..f0cbc710df05b92be44bf54ddf3f71c513d90920 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/skyreels_v2/test_skyreels_v2_df_image_to_video.py @@ -0,0 +1,215 @@ +# Copyright 2024 The HuggingFace Team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import numpy as np +import torch +from PIL import Image +from transformers import ( + AutoTokenizer, + T5EncoderModel, +) + +from diffusers import ( + AutoencoderKLWan, + SkyReelsV2DiffusionForcingImageToVideoPipeline, + SkyReelsV2Transformer3DModel, + UniPCMultistepScheduler, +) + +from ...testing_utils import enable_full_determinism +from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS +from ..test_pipelines_common import PipelineTesterMixin + + +enable_full_determinism() + + +class SkyReelsV2DiffusionForcingImageToVideoPipelineFastTests(PipelineTesterMixin, unittest.TestCase): + pipeline_class = SkyReelsV2DiffusionForcingImageToVideoPipeline + params = TEXT_TO_IMAGE_PARAMS - {"cross_attention_kwargs", "height", "width"} + batch_params = TEXT_TO_IMAGE_BATCH_PARAMS + image_params = TEXT_TO_IMAGE_IMAGE_PARAMS + image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS + required_optional_params = frozenset( + [ + "num_inference_steps", + "generator", + "latents", + "return_dict", + "callback_on_step_end", + "callback_on_step_end_tensor_inputs", + ] + ) + test_xformers_attention = False + supports_dduf = False + + def get_dummy_components(self): + torch.manual_seed(0) + vae = AutoencoderKLWan( + base_dim=3, + z_dim=16, + dim_mult=[1, 1, 1, 1], + num_res_blocks=1, + temperal_downsample=[False, True, True], + ) + + torch.manual_seed(0) + scheduler = UniPCMultistepScheduler(flow_shift=5.0, use_flow_sigmas=True) + text_encoder = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5") + tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") + + torch.manual_seed(0) + transformer = SkyReelsV2Transformer3DModel( + patch_size=(1, 2, 2), + num_attention_heads=2, + attention_head_dim=12, + in_channels=16, + out_channels=16, + text_dim=32, + freq_dim=256, + ffn_dim=32, + num_layers=2, + cross_attn_norm=True, + qk_norm="rms_norm_across_heads", + rope_max_seq_len=32, + image_dim=4, + ) + + components = { + "transformer": transformer, + "vae": vae, + "scheduler": scheduler, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + } + return components + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + image_height = 16 + image_width = 16 + image = Image.new("RGB", (image_width, image_height)) + inputs = { + "image": image, + "prompt": "dance monkey", + "negative_prompt": "negative", # TODO + "height": image_height, + "width": image_width, + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 5.0, + "num_frames": 9, + "max_sequence_length": 16, + "output_type": "pt", + } + return inputs + + def test_inference(self): + device = "cpu" + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + video = pipe(**inputs).frames + generated_video = video[0] + + self.assertEqual(generated_video.shape, (9, 3, 16, 16)) + expected_video = torch.randn(9, 3, 16, 16) + max_diff = np.abs(generated_video - expected_video).max() + self.assertLessEqual(max_diff, 1e10) + + @unittest.skip("Test not supported") + def test_attention_slicing_forward_pass(self): + pass + + @unittest.skip("TODO: revisit failing as it requires a very high threshold to pass") + def test_inference_batch_single_identical(self): + pass + + +class SkyReelsV2DiffusionForcingImageToVideoPipelineFastTests(SkyReelsV2DiffusionForcingImageToVideoPipelineFastTests): + def get_dummy_components(self): + torch.manual_seed(0) + vae = AutoencoderKLWan( + base_dim=3, + z_dim=16, + dim_mult=[1, 1, 1, 1], + num_res_blocks=1, + temperal_downsample=[False, True, True], + ) + + torch.manual_seed(0) + scheduler = UniPCMultistepScheduler(flow_shift=5.0, use_flow_sigmas=True) + text_encoder = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5") + tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") + + torch.manual_seed(0) + transformer = SkyReelsV2Transformer3DModel( + patch_size=(1, 2, 2), + num_attention_heads=2, + attention_head_dim=12, + in_channels=16, + out_channels=16, + text_dim=32, + freq_dim=256, + ffn_dim=32, + num_layers=2, + cross_attn_norm=True, + qk_norm="rms_norm_across_heads", + rope_max_seq_len=32, + image_dim=4, + pos_embed_seq_len=2 * (4 * 4 + 1), + ) + + components = { + "transformer": transformer, + "vae": vae, + "scheduler": scheduler, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + } + return components + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + image_height = 16 + image_width = 16 + image = Image.new("RGB", (image_width, image_height)) + last_image = Image.new("RGB", (image_width, image_height)) + inputs = { + "image": image, + "last_image": last_image, + "prompt": "dance monkey", + "negative_prompt": "negative", + "height": image_height, + "width": image_width, + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 5.0, + "num_frames": 9, + "max_sequence_length": 16, + "output_type": "pt", + } + return inputs diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/skyreels_v2/test_skyreels_v2_df_video_to_video.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/skyreels_v2/test_skyreels_v2_df_video_to_video.py new file mode 100644 index 0000000000000000000000000000000000000000..1b0b23318e633753423c51c052322cc64bffe6d1 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/skyreels_v2/test_skyreels_v2_df_video_to_video.py @@ -0,0 +1,201 @@ +# Copyright 2025 The HuggingFace Team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +import unittest + +import numpy as np +import torch +from PIL import Image +from transformers import AutoTokenizer, T5EncoderModel + +from diffusers import ( + AutoencoderKLWan, + SkyReelsV2DiffusionForcingVideoToVideoPipeline, + SkyReelsV2Transformer3DModel, + UniPCMultistepScheduler, +) + +from ...testing_utils import ( + enable_full_determinism, + torch_device, +) +from ..pipeline_params import TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS +from ..test_pipelines_common import ( + PipelineTesterMixin, +) + + +enable_full_determinism() + + +class SkyReelsV2DiffusionForcingVideoToVideoPipelineFastTests(PipelineTesterMixin, unittest.TestCase): + pipeline_class = SkyReelsV2DiffusionForcingVideoToVideoPipeline + params = TEXT_TO_IMAGE_PARAMS - {"cross_attention_kwargs"} + batch_params = frozenset(["video", "prompt", "negative_prompt"]) + image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS + required_optional_params = frozenset( + [ + "num_inference_steps", + "generator", + "latents", + "return_dict", + "callback_on_step_end", + "callback_on_step_end_tensor_inputs", + ] + ) + test_xformers_attention = False + supports_dduf = False + + def get_dummy_components(self): + torch.manual_seed(0) + vae = AutoencoderKLWan( + base_dim=3, + z_dim=16, + dim_mult=[1, 1, 1, 1], + num_res_blocks=1, + temperal_downsample=[False, True, True], + ) + + torch.manual_seed(0) + scheduler = UniPCMultistepScheduler(flow_shift=5.0, use_flow_sigmas=True) + text_encoder = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5") + tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") + + torch.manual_seed(0) + transformer = SkyReelsV2Transformer3DModel( + patch_size=(1, 2, 2), + num_attention_heads=2, + attention_head_dim=12, + in_channels=16, + out_channels=16, + text_dim=32, + freq_dim=256, + ffn_dim=32, + num_layers=2, + cross_attn_norm=True, + qk_norm="rms_norm_across_heads", + rope_max_seq_len=32, + ) + + components = { + "transformer": transformer, + "vae": vae, + "scheduler": scheduler, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + } + return components + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + + video = [Image.new("RGB", (16, 16))] * 7 + inputs = { + "video": video, + "prompt": "dance monkey", + "negative_prompt": "negative", # TODO + "generator": generator, + "num_inference_steps": 4, + "guidance_scale": 6.0, + "height": 16, + "width": 16, + "max_sequence_length": 16, + "output_type": "pt", + "overlap_history": 3, + "num_frames": 17, + "base_num_frames": 5, + } + return inputs + + def test_inference(self): + device = "cpu" + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + video = pipe(**inputs).frames + generated_video = video[0] + + total_frames = len(inputs["video"]) + inputs["num_frames"] + expected_shape = (total_frames, 3, 16, 16) + self.assertEqual(generated_video.shape, expected_shape) + expected_video = torch.randn(*expected_shape) + max_diff = np.abs(generated_video - expected_video).max() + self.assertLessEqual(max_diff, 1e10) + + def test_callback_cfg(self): + sig = inspect.signature(self.pipeline_class.__call__) + has_callback_tensor_inputs = "callback_on_step_end_tensor_inputs" in sig.parameters + has_callback_step_end = "callback_on_step_end" in sig.parameters + + if not (has_callback_tensor_inputs and has_callback_step_end): + return + + if "guidance_scale" not in sig.parameters: + return + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + self.assertTrue( + hasattr(pipe, "_callback_tensor_inputs"), + f" {self.pipeline_class} should have `_callback_tensor_inputs` that defines a list of tensor variables its callback function can use as inputs", + ) + + # Track the number of callback calls for diffusion forcing pipelines + callback_call_count = [0] # Use list to make it mutable in closure + + def callback_increase_guidance(pipe, i, t, callback_kwargs): + pipe._guidance_scale += 1.0 + callback_call_count[0] += 1 + return callback_kwargs + + inputs = self.get_dummy_inputs(torch_device) + + # use cfg guidance because some pipelines modify the shape of the latents + # outside of the denoising loop + inputs["guidance_scale"] = 2.0 + inputs["callback_on_step_end"] = callback_increase_guidance + inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs + _ = pipe(**inputs)[0] + + # For diffusion forcing pipelines, use the actual callback count + # since they run multiple iterations with nested denoising loops + expected_guidance_scale = inputs["guidance_scale"] + callback_call_count[0] + + assert pipe.guidance_scale == expected_guidance_scale + + @unittest.skip("Test not supported") + def test_attention_slicing_forward_pass(self): + pass + + @unittest.skip( + "SkyReelsV2DiffusionForcingVideoToVideoPipeline has to run in mixed precision. Casting the entire pipeline will result in errors" + ) + def test_float16_inference(self): + pass + + @unittest.skip( + "SkyReelsV2DiffusionForcingVideoToVideoPipeline has to run in mixed precision. Save/Load the entire pipeline in FP16 will result in errors" + ) + def test_save_load_float16(self): + pass diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/skyreels_v2/test_skyreels_v2_image_to_video.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/skyreels_v2/test_skyreels_v2_image_to_video.py new file mode 100644 index 0000000000000000000000000000000000000000..784f701a29d2cf1ac81877bae69dc8b199729455 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/skyreels_v2/test_skyreels_v2_image_to_video.py @@ -0,0 +1,220 @@ +# Copyright 2024 The HuggingFace Team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import numpy as np +import torch +from PIL import Image +from transformers import ( + AutoTokenizer, + CLIPImageProcessor, + CLIPVisionConfig, + CLIPVisionModelWithProjection, + T5EncoderModel, +) + +from diffusers import ( + AutoencoderKLWan, + SkyReelsV2ImageToVideoPipeline, + SkyReelsV2Transformer3DModel, + UniPCMultistepScheduler, +) + +from ...testing_utils import enable_full_determinism +from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS +from ..test_pipelines_common import PipelineTesterMixin + + +enable_full_determinism() + + +class SkyReelsV2ImageToVideoPipelineFastTests(PipelineTesterMixin, unittest.TestCase): + pipeline_class = SkyReelsV2ImageToVideoPipeline + params = TEXT_TO_IMAGE_PARAMS - {"cross_attention_kwargs", "height", "width"} + batch_params = TEXT_TO_IMAGE_BATCH_PARAMS + image_params = TEXT_TO_IMAGE_IMAGE_PARAMS + image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS + required_optional_params = frozenset( + [ + "num_inference_steps", + "generator", + "latents", + "return_dict", + "callback_on_step_end", + "callback_on_step_end_tensor_inputs", + ] + ) + test_xformers_attention = False + supports_dduf = False + + def get_dummy_components(self): + torch.manual_seed(0) + vae = AutoencoderKLWan( + base_dim=3, + z_dim=16, + dim_mult=[1, 1, 1, 1], + num_res_blocks=1, + temperal_downsample=[False, True, True], + ) + + torch.manual_seed(0) + scheduler = UniPCMultistepScheduler(flow_shift=5.0, use_flow_sigmas=True) + text_encoder = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5") + tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") + + torch.manual_seed(0) + transformer = SkyReelsV2Transformer3DModel( + patch_size=(1, 2, 2), + num_attention_heads=2, + attention_head_dim=12, + in_channels=36, + out_channels=16, + text_dim=32, + freq_dim=256, + ffn_dim=32, + num_layers=2, + cross_attn_norm=True, + qk_norm="rms_norm_across_heads", + rope_max_seq_len=32, + image_dim=4, + ) + + torch.manual_seed(0) + image_encoder_config = CLIPVisionConfig( + hidden_size=4, + projection_dim=4, + num_hidden_layers=2, + num_attention_heads=2, + image_size=32, + intermediate_size=16, + patch_size=1, + ) + image_encoder = CLIPVisionModelWithProjection(image_encoder_config) + + torch.manual_seed(0) + image_processor = CLIPImageProcessor(crop_size=32, size=32) + + components = { + "transformer": transformer, + "vae": vae, + "scheduler": scheduler, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + "image_encoder": image_encoder, + "image_processor": image_processor, + } + return components + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + image_height = 16 + image_width = 16 + image = Image.new("RGB", (image_width, image_height)) + inputs = { + "image": image, + "prompt": "dance monkey", + "negative_prompt": "negative", # TODO + "height": image_height, + "width": image_width, + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 6.0, + "num_frames": 9, + "max_sequence_length": 16, + "output_type": "pt", + } + return inputs + + def test_inference(self): + device = "cpu" + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + video = pipe(**inputs).frames + generated_video = video[0] + + self.assertEqual(generated_video.shape, (9, 3, 16, 16)) + expected_video = torch.randn(9, 3, 16, 16) + max_diff = np.abs(generated_video - expected_video).max() + self.assertLessEqual(max_diff, 1e10) + + def test_inference_with_last_image(self): + device = "cpu" + + components = self.get_dummy_components() + torch.manual_seed(0) + components["transformer"] = SkyReelsV2Transformer3DModel( + patch_size=(1, 2, 2), + num_attention_heads=2, + attention_head_dim=12, + in_channels=36, + out_channels=16, + text_dim=32, + freq_dim=256, + ffn_dim=32, + num_layers=2, + cross_attn_norm=True, + pos_embed_seq_len=2 * (4 * 4 + 1), + qk_norm="rms_norm_across_heads", + rope_max_seq_len=32, + image_dim=4, + ) + torch.manual_seed(0) + image_encoder_config = CLIPVisionConfig( + hidden_size=4, + projection_dim=4, + num_hidden_layers=2, + num_attention_heads=2, + image_size=4, + intermediate_size=16, + patch_size=1, + ) + components["image_encoder"] = CLIPVisionModelWithProjection(image_encoder_config) + + torch.manual_seed(0) + components["image_processor"] = CLIPImageProcessor(crop_size=4, size=4) + + pipe = self.pipeline_class(**components) + pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + image_height = 16 + image_width = 16 + last_image = Image.new("RGB", (image_width, image_height)) + inputs["last_image"] = last_image + + video = pipe(**inputs).frames + generated_video = video[0] + + self.assertEqual(generated_video.shape, (9, 3, 16, 16)) + expected_video = torch.randn(9, 3, 16, 16) + max_diff = np.abs(generated_video - expected_video).max() + self.assertLessEqual(max_diff, 1e10) + + @unittest.skip("Test not supported") + def test_attention_slicing_forward_pass(self): + pass + + @unittest.skip("TODO: revisit failing as it requires a very high threshold to pass") + def test_inference_batch_single_identical(self): + pass diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/stable_audio/__init__.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/stable_audio/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/stable_audio/test_stable_audio.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/stable_audio/test_stable_audio.py new file mode 100644 index 0000000000000000000000000000000000000000..dd03f4d07f07c3e860953b8e1b810640fab9a09a --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/stable_audio/test_stable_audio.py @@ -0,0 +1,480 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import gc +import unittest + +import numpy as np +import torch +from transformers import ( + T5EncoderModel, + T5Tokenizer, +) + +from diffusers import ( + AutoencoderOobleck, + CosineDPMSolverMultistepScheduler, + StableAudioDiTModel, + StableAudioPipeline, + StableAudioProjectionModel, +) +from diffusers.utils import is_xformers_available + +from ...testing_utils import ( + Expectations, + backend_empty_cache, + enable_full_determinism, + nightly, + require_torch_accelerator, + torch_device, +) +from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS +from ..test_pipelines_common import PipelineTesterMixin + + +enable_full_determinism() + + +class StableAudioPipelineFastTests(PipelineTesterMixin, unittest.TestCase): + pipeline_class = StableAudioPipeline + params = frozenset( + [ + "prompt", + "audio_end_in_s", + "audio_start_in_s", + "guidance_scale", + "negative_prompt", + "prompt_embeds", + "negative_prompt_embeds", + "initial_audio_waveforms", + ] + ) + batch_params = TEXT_TO_AUDIO_BATCH_PARAMS + required_optional_params = frozenset( + [ + "num_inference_steps", + "num_waveforms_per_prompt", + "generator", + "latents", + "output_type", + "return_dict", + "callback", + "callback_steps", + ] + ) + # There is not xformers version of the StableAudioPipeline custom attention processor + test_xformers_attention = False + supports_dduf = False + + def get_dummy_components(self): + torch.manual_seed(0) + transformer = StableAudioDiTModel( + sample_size=4, + in_channels=3, + num_layers=2, + attention_head_dim=4, + num_key_value_attention_heads=2, + out_channels=3, + cross_attention_dim=4, + time_proj_dim=8, + global_states_input_dim=8, + cross_attention_input_dim=4, + ) + scheduler = CosineDPMSolverMultistepScheduler( + solver_order=2, + prediction_type="v_prediction", + sigma_data=1.0, + sigma_schedule="exponential", + ) + torch.manual_seed(0) + vae = AutoencoderOobleck( + encoder_hidden_size=6, + downsampling_ratios=[1, 2], + decoder_channels=3, + decoder_input_channels=3, + audio_channels=2, + channel_multiples=[2, 4], + sampling_rate=4, + ) + torch.manual_seed(0) + t5_repo_id = "hf-internal-testing/tiny-random-T5ForConditionalGeneration" + text_encoder = T5EncoderModel.from_pretrained(t5_repo_id) + tokenizer = T5Tokenizer.from_pretrained(t5_repo_id, truncation=True, model_max_length=25) + + torch.manual_seed(0) + projection_model = StableAudioProjectionModel( + text_encoder_dim=text_encoder.config.d_model, + conditioning_dim=4, + min_value=0, + max_value=32, + ) + + components = { + "transformer": transformer, + "scheduler": scheduler, + "vae": vae, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + "projection_model": projection_model, + } + return components + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + inputs = { + "prompt": "A hammer hitting a wooden surface", + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 6.0, + } + return inputs + + def test_save_load_local(self): + # increase tolerance from 1e-4 -> 7e-3 to account for large composite model + super().test_save_load_local(expected_max_difference=7e-3) + + def test_save_load_optional_components(self): + # increase tolerance from 1e-4 -> 7e-3 to account for large composite model + super().test_save_load_optional_components(expected_max_difference=7e-3) + + def test_stable_audio_ddim(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + + components = self.get_dummy_components() + stable_audio_pipe = StableAudioPipeline(**components) + stable_audio_pipe = stable_audio_pipe.to(torch_device) + stable_audio_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + output = stable_audio_pipe(**inputs) + audio = output.audios[0] + + assert audio.ndim == 2 + assert audio.shape == (2, 7) + + def test_stable_audio_without_prompts(self): + components = self.get_dummy_components() + stable_audio_pipe = StableAudioPipeline(**components) + stable_audio_pipe = stable_audio_pipe.to(torch_device) + stable_audio_pipe = stable_audio_pipe.to(torch_device) + stable_audio_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(torch_device) + inputs["prompt"] = 3 * [inputs["prompt"]] + + # forward + output = stable_audio_pipe(**inputs) + audio_1 = output.audios[0] + + inputs = self.get_dummy_inputs(torch_device) + prompt = 3 * [inputs.pop("prompt")] + + text_inputs = stable_audio_pipe.tokenizer( + prompt, + padding="max_length", + max_length=stable_audio_pipe.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ).to(torch_device) + text_input_ids = text_inputs.input_ids + attention_mask = text_inputs.attention_mask + + prompt_embeds = stable_audio_pipe.text_encoder( + text_input_ids, + attention_mask=attention_mask, + )[0] + + inputs["prompt_embeds"] = prompt_embeds + inputs["attention_mask"] = attention_mask + + # forward + output = stable_audio_pipe(**inputs) + audio_2 = output.audios[0] + + assert (audio_1 - audio_2).abs().max() < 1e-2 + + def test_stable_audio_negative_without_prompts(self): + components = self.get_dummy_components() + stable_audio_pipe = StableAudioPipeline(**components) + stable_audio_pipe = stable_audio_pipe.to(torch_device) + stable_audio_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(torch_device) + negative_prompt = 3 * ["this is a negative prompt"] + inputs["negative_prompt"] = negative_prompt + inputs["prompt"] = 3 * [inputs["prompt"]] + + # forward + output = stable_audio_pipe(**inputs) + audio_1 = output.audios[0] + + inputs = self.get_dummy_inputs(torch_device) + prompt = 3 * [inputs.pop("prompt")] + + text_inputs = stable_audio_pipe.tokenizer( + prompt, + padding="max_length", + max_length=stable_audio_pipe.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ).to(torch_device) + text_input_ids = text_inputs.input_ids + attention_mask = text_inputs.attention_mask + + prompt_embeds = stable_audio_pipe.text_encoder( + text_input_ids, + attention_mask=attention_mask, + )[0] + + inputs["prompt_embeds"] = prompt_embeds + inputs["attention_mask"] = attention_mask + + negative_text_inputs = stable_audio_pipe.tokenizer( + negative_prompt, + padding="max_length", + max_length=stable_audio_pipe.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ).to(torch_device) + negative_text_input_ids = negative_text_inputs.input_ids + negative_attention_mask = negative_text_inputs.attention_mask + + negative_prompt_embeds = stable_audio_pipe.text_encoder( + negative_text_input_ids, + attention_mask=negative_attention_mask, + )[0] + + inputs["negative_prompt_embeds"] = negative_prompt_embeds + inputs["negative_attention_mask"] = negative_attention_mask + + # forward + output = stable_audio_pipe(**inputs) + audio_2 = output.audios[0] + + assert (audio_1 - audio_2).abs().max() < 1e-2 + + def test_stable_audio_negative_prompt(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + stable_audio_pipe = StableAudioPipeline(**components) + stable_audio_pipe = stable_audio_pipe.to(device) + stable_audio_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + negative_prompt = "egg cracking" + output = stable_audio_pipe(**inputs, negative_prompt=negative_prompt) + audio = output.audios[0] + + assert audio.ndim == 2 + assert audio.shape == (2, 7) + + def test_stable_audio_num_waveforms_per_prompt(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + stable_audio_pipe = StableAudioPipeline(**components) + stable_audio_pipe = stable_audio_pipe.to(device) + stable_audio_pipe.set_progress_bar_config(disable=None) + + prompt = "A hammer hitting a wooden surface" + + # test num_waveforms_per_prompt=1 (default) + audios = stable_audio_pipe(prompt, num_inference_steps=2).audios + + assert audios.shape == (1, 2, 7) + + # test num_waveforms_per_prompt=1 (default) for batch of prompts + batch_size = 2 + audios = stable_audio_pipe([prompt] * batch_size, num_inference_steps=2).audios + + assert audios.shape == (batch_size, 2, 7) + + # test num_waveforms_per_prompt for single prompt + num_waveforms_per_prompt = 2 + audios = stable_audio_pipe( + prompt, num_inference_steps=2, num_waveforms_per_prompt=num_waveforms_per_prompt + ).audios + + assert audios.shape == (num_waveforms_per_prompt, 2, 7) + + # test num_waveforms_per_prompt for batch of prompts + batch_size = 2 + audios = stable_audio_pipe( + [prompt] * batch_size, num_inference_steps=2, num_waveforms_per_prompt=num_waveforms_per_prompt + ).audios + + assert audios.shape == (batch_size * num_waveforms_per_prompt, 2, 7) + + def test_stable_audio_audio_end_in_s(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + stable_audio_pipe = StableAudioPipeline(**components) + stable_audio_pipe = stable_audio_pipe.to(torch_device) + stable_audio_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + output = stable_audio_pipe(audio_end_in_s=1.5, **inputs) + audio = output.audios[0] + + assert audio.ndim == 2 + assert audio.shape[1] / stable_audio_pipe.vae.sampling_rate == 1.5 + + output = stable_audio_pipe(audio_end_in_s=1.1875, **inputs) + audio = output.audios[0] + + assert audio.ndim == 2 + assert audio.shape[1] / stable_audio_pipe.vae.sampling_rate == 1.0 + + def test_attention_slicing_forward_pass(self): + self._test_attention_slicing_forward_pass(test_mean_pixel_difference=False) + + def test_inference_batch_single_identical(self): + self._test_inference_batch_single_identical(expected_max_diff=5e-4) + + @unittest.skipIf( + torch_device != "cuda" or not is_xformers_available(), + reason="XFormers attention is only available with CUDA and `xformers` installed", + ) + def test_xformers_attention_forwardGenerator_pass(self): + self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=False) + + def test_stable_audio_input_waveform(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + stable_audio_pipe = StableAudioPipeline(**components) + stable_audio_pipe = stable_audio_pipe.to(device) + stable_audio_pipe.set_progress_bar_config(disable=None) + + prompt = "A hammer hitting a wooden surface" + + initial_audio_waveforms = torch.ones((1, 5)) + + # test raises error when no sampling rate + with self.assertRaises(ValueError): + audios = stable_audio_pipe( + prompt, num_inference_steps=2, initial_audio_waveforms=initial_audio_waveforms + ).audios + + # test raises error when wrong sampling rate + with self.assertRaises(ValueError): + audios = stable_audio_pipe( + prompt, + num_inference_steps=2, + initial_audio_waveforms=initial_audio_waveforms, + initial_audio_sampling_rate=stable_audio_pipe.vae.sampling_rate - 1, + ).audios + + audios = stable_audio_pipe( + prompt, + num_inference_steps=2, + initial_audio_waveforms=initial_audio_waveforms, + initial_audio_sampling_rate=stable_audio_pipe.vae.sampling_rate, + ).audios + assert audios.shape == (1, 2, 7) + + # test works with num_waveforms_per_prompt + num_waveforms_per_prompt = 2 + audios = stable_audio_pipe( + prompt, + num_inference_steps=2, + num_waveforms_per_prompt=num_waveforms_per_prompt, + initial_audio_waveforms=initial_audio_waveforms, + initial_audio_sampling_rate=stable_audio_pipe.vae.sampling_rate, + ).audios + + assert audios.shape == (num_waveforms_per_prompt, 2, 7) + + # test num_waveforms_per_prompt for batch of prompts and input audio (two channels) + batch_size = 2 + initial_audio_waveforms = torch.ones((batch_size, 2, 5)) + audios = stable_audio_pipe( + [prompt] * batch_size, + num_inference_steps=2, + num_waveforms_per_prompt=num_waveforms_per_prompt, + initial_audio_waveforms=initial_audio_waveforms, + initial_audio_sampling_rate=stable_audio_pipe.vae.sampling_rate, + ).audios + + assert audios.shape == (batch_size * num_waveforms_per_prompt, 2, 7) + + @unittest.skip("Not supported yet") + def test_sequential_cpu_offload_forward_pass(self): + pass + + @unittest.skip("Not supported yet") + def test_sequential_offload_forward_pass_twice(self): + pass + + @unittest.skip("Test not supported because `rotary_embed_dim` doesn't have any sensible default.") + def test_encode_prompt_works_in_isolation(self): + pass + + +@nightly +@require_torch_accelerator +class StableAudioPipelineIntegrationTests(unittest.TestCase): + def setUp(self): + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): + generator = torch.Generator(device=generator_device).manual_seed(seed) + latents = np.random.RandomState(seed).standard_normal((1, 64, 1024)) + latents = torch.from_numpy(latents).to(device=device, dtype=dtype) + inputs = { + "prompt": "A hammer hitting a wooden surface", + "latents": latents, + "generator": generator, + "num_inference_steps": 3, + "audio_end_in_s": 30, + "guidance_scale": 2.5, + } + return inputs + + def test_stable_audio(self): + stable_audio_pipe = StableAudioPipeline.from_pretrained("stabilityai/stable-audio-open-1.0") + stable_audio_pipe = stable_audio_pipe.to(torch_device) + stable_audio_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_inputs(torch_device) + inputs["num_inference_steps"] = 25 + audio = stable_audio_pipe(**inputs).audios[0] + + assert audio.ndim == 2 + assert audio.shape == (2, int(inputs["audio_end_in_s"] * stable_audio_pipe.vae.sampling_rate)) + # check the portion of the generated audio with the largest dynamic range (reduces flakiness) + audio_slice = audio[0, 447590:447600] + # fmt: off + expected_slices = Expectations( + { + ("xpu", 3): np.array([-0.0285, 0.1083, 0.1863, 0.3165, 0.5312, 0.6971, 0.6958, 0.6177, 0.5598, 0.5048]), + ("cuda", 7): np.array([-0.0278, 0.1096, 0.1877, 0.3178, 0.5329, 0.6990, 0.6972, 0.6186, 0.5608, 0.5060]), + ("cuda", 8): np.array([-0.0285, 0.1082, 0.1862, 0.3163, 0.5306, 0.6964, 0.6953, 0.6172, 0.5593, 0.5044]), + } + ) + # fmt: on + + expected_slice = expected_slices.get_expectation() + max_diff = np.abs(expected_slice - audio_slice.detach().cpu().numpy()).max() + assert max_diff < 1.5e-3 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/stable_cascade/__init__.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/stable_cascade/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/stable_cascade/test_stable_cascade_combined.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/stable_cascade/test_stable_cascade_combined.py new file mode 100644 index 0000000000000000000000000000000000000000..afa0db39f3fa9f2125529c821dd060fdd910ead9 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/stable_cascade/test_stable_cascade_combined.py @@ -0,0 +1,244 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import numpy as np +import torch +from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer + +from diffusers import DDPMWuerstchenScheduler, StableCascadeCombinedPipeline +from diffusers.models import StableCascadeUNet +from diffusers.pipelines.wuerstchen import PaellaVQModel + +from ...testing_utils import enable_full_determinism, require_torch_accelerator, torch_device +from ..test_pipelines_common import PipelineTesterMixin + + +enable_full_determinism() + + +class StableCascadeCombinedPipelineFastTests(PipelineTesterMixin, unittest.TestCase): + pipeline_class = StableCascadeCombinedPipeline + params = ["prompt"] + batch_params = ["prompt", "negative_prompt"] + required_optional_params = [ + "generator", + "height", + "width", + "latents", + "prior_guidance_scale", + "decoder_guidance_scale", + "negative_prompt", + "num_inference_steps", + "return_dict", + "prior_num_inference_steps", + "output_type", + ] + test_xformers_attention = True + + @property + def text_embedder_hidden_size(self): + return 32 + + @property + def dummy_prior(self): + torch.manual_seed(0) + + model_kwargs = { + "conditioning_dim": 128, + "block_out_channels": (128, 128), + "num_attention_heads": (2, 2), + "down_num_layers_per_block": (1, 1), + "up_num_layers_per_block": (1, 1), + "clip_image_in_channels": 768, + "switch_level": (False,), + "clip_text_in_channels": self.text_embedder_hidden_size, + "clip_text_pooled_in_channels": self.text_embedder_hidden_size, + } + + model = StableCascadeUNet(**model_kwargs) + return model.eval() + + @property + def dummy_tokenizer(self): + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + return tokenizer + + @property + def dummy_text_encoder(self): + torch.manual_seed(0) + config = CLIPTextConfig( + bos_token_id=0, + eos_token_id=2, + projection_dim=self.text_embedder_hidden_size, + hidden_size=self.text_embedder_hidden_size, + intermediate_size=37, + layer_norm_eps=1e-05, + num_attention_heads=4, + num_hidden_layers=5, + pad_token_id=1, + vocab_size=1000, + ) + return CLIPTextModelWithProjection(config).eval() + + @property + def dummy_vqgan(self): + torch.manual_seed(0) + + model_kwargs = { + "bottleneck_blocks": 1, + "num_vq_embeddings": 2, + } + model = PaellaVQModel(**model_kwargs) + return model.eval() + + @property + def dummy_decoder(self): + torch.manual_seed(0) + model_kwargs = { + "in_channels": 4, + "out_channels": 4, + "conditioning_dim": 128, + "block_out_channels": (16, 32, 64, 128), + "num_attention_heads": (-1, -1, 1, 2), + "down_num_layers_per_block": (1, 1, 1, 1), + "up_num_layers_per_block": (1, 1, 1, 1), + "down_blocks_repeat_mappers": (1, 1, 1, 1), + "up_blocks_repeat_mappers": (3, 3, 2, 2), + "block_types_per_layer": ( + ("SDCascadeResBlock", "SDCascadeTimestepBlock"), + ("SDCascadeResBlock", "SDCascadeTimestepBlock"), + ("SDCascadeResBlock", "SDCascadeTimestepBlock", "SDCascadeAttnBlock"), + ("SDCascadeResBlock", "SDCascadeTimestepBlock", "SDCascadeAttnBlock"), + ), + "switch_level": None, + "clip_text_pooled_in_channels": 32, + "dropout": (0.1, 0.1, 0.1, 0.1), + } + + model = StableCascadeUNet(**model_kwargs) + return model.eval() + + def get_dummy_components(self): + prior = self.dummy_prior + + scheduler = DDPMWuerstchenScheduler() + tokenizer = self.dummy_tokenizer + text_encoder = self.dummy_text_encoder + decoder = self.dummy_decoder + vqgan = self.dummy_vqgan + prior_text_encoder = self.dummy_text_encoder + prior_tokenizer = self.dummy_tokenizer + + components = { + "text_encoder": text_encoder, + "tokenizer": tokenizer, + "decoder": decoder, + "scheduler": scheduler, + "vqgan": vqgan, + "prior_text_encoder": prior_text_encoder, + "prior_tokenizer": prior_tokenizer, + "prior_prior": prior, + "prior_scheduler": scheduler, + "prior_feature_extractor": None, + "prior_image_encoder": None, + } + + return components + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + inputs = { + "prompt": "horse", + "generator": generator, + "prior_guidance_scale": 4.0, + "decoder_guidance_scale": 4.0, + "num_inference_steps": 2, + "prior_num_inference_steps": 2, + "output_type": "np", + "height": 128, + "width": 128, + } + return inputs + + def test_stable_cascade(self): + device = "cpu" + + components = self.get_dummy_components() + + pipe = self.pipeline_class(**components) + pipe = pipe.to(device) + + pipe.set_progress_bar_config(disable=None) + + output = pipe(**self.get_dummy_inputs(device)) + image = output.images + + image_from_tuple = pipe(**self.get_dummy_inputs(device), return_dict=False)[0] + + image_slice = image[0, -3:, -3:, -1] + image_from_tuple_slice = image_from_tuple[-3:, -3:, -1] + + assert image.shape == (1, 128, 128, 3) + + expected_slice = np.array([0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0]) + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2, ( + f" expected_slice {expected_slice}, but got {image_slice.flatten()}" + ) + assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2, ( + f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" + ) + + @require_torch_accelerator + def test_offloads(self): + pipes = [] + components = self.get_dummy_components() + sd_pipe = self.pipeline_class(**components).to(torch_device) + pipes.append(sd_pipe) + + components = self.get_dummy_components() + sd_pipe = self.pipeline_class(**components) + sd_pipe.enable_sequential_cpu_offload(device=torch_device) + pipes.append(sd_pipe) + + components = self.get_dummy_components() + sd_pipe = self.pipeline_class(**components) + sd_pipe.enable_model_cpu_offload(device=torch_device) + pipes.append(sd_pipe) + + image_slices = [] + for pipe in pipes: + inputs = self.get_dummy_inputs(torch_device) + image = pipe(**inputs).images + + image_slices.append(image[0, -3:, -3:, -1].flatten()) + + assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3 + assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3 + + def test_inference_batch_single_identical(self): + super().test_inference_batch_single_identical(expected_max_diff=2e-2) + + @unittest.skip(reason="fp16 not supported") + def test_float16_inference(self): + super().test_float16_inference() + + @unittest.skip(reason="no callback test for combined pipeline") + def test_callback_inputs(self): + super().test_callback_inputs() diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/stable_cascade/test_stable_cascade_decoder.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/stable_cascade/test_stable_cascade_decoder.py new file mode 100644 index 0000000000000000000000000000000000000000..5b3acb8705b333089f04ebb4fda8b638b8678768 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/stable_cascade/test_stable_cascade_decoder.py @@ -0,0 +1,324 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import gc +import unittest + +import numpy as np +import torch +from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer + +from diffusers import DDPMWuerstchenScheduler, StableCascadeDecoderPipeline +from diffusers.models import StableCascadeUNet +from diffusers.pipelines.wuerstchen import PaellaVQModel +from diffusers.utils.torch_utils import randn_tensor + +from ...testing_utils import ( + backend_empty_cache, + enable_full_determinism, + load_numpy, + load_pt, + numpy_cosine_similarity_distance, + require_torch_accelerator, + skip_mps, + slow, + torch_device, +) +from ..test_pipelines_common import PipelineTesterMixin + + +enable_full_determinism() + + +class StableCascadeDecoderPipelineFastTests(PipelineTesterMixin, unittest.TestCase): + pipeline_class = StableCascadeDecoderPipeline + params = ["prompt"] + batch_params = ["image_embeddings", "prompt", "negative_prompt"] + required_optional_params = [ + "num_images_per_prompt", + "num_inference_steps", + "latents", + "negative_prompt", + "guidance_scale", + "output_type", + "return_dict", + ] + test_xformers_attention = False + callback_cfg_params = ["image_embeddings", "text_encoder_hidden_states"] + + @property + def text_embedder_hidden_size(self): + return 32 + + @property + def time_input_dim(self): + return 32 + + @property + def block_out_channels_0(self): + return self.time_input_dim + + @property + def time_embed_dim(self): + return self.time_input_dim * 4 + + @property + def dummy_tokenizer(self): + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + return tokenizer + + @property + def dummy_text_encoder(self): + torch.manual_seed(0) + config = CLIPTextConfig( + bos_token_id=0, + eos_token_id=2, + projection_dim=self.text_embedder_hidden_size, + hidden_size=self.text_embedder_hidden_size, + intermediate_size=37, + layer_norm_eps=1e-05, + num_attention_heads=4, + num_hidden_layers=5, + pad_token_id=1, + vocab_size=1000, + ) + return CLIPTextModelWithProjection(config).eval() + + @property + def dummy_vqgan(self): + torch.manual_seed(0) + + model_kwargs = { + "bottleneck_blocks": 1, + "num_vq_embeddings": 2, + } + model = PaellaVQModel(**model_kwargs) + return model.eval() + + @property + def dummy_decoder(self): + torch.manual_seed(0) + model_kwargs = { + "in_channels": 4, + "out_channels": 4, + "conditioning_dim": 128, + "block_out_channels": [16, 32, 64, 128], + "num_attention_heads": [-1, -1, 1, 2], + "down_num_layers_per_block": [1, 1, 1, 1], + "up_num_layers_per_block": [1, 1, 1, 1], + "down_blocks_repeat_mappers": [1, 1, 1, 1], + "up_blocks_repeat_mappers": [3, 3, 2, 2], + "block_types_per_layer": [ + ["SDCascadeResBlock", "SDCascadeTimestepBlock"], + ["SDCascadeResBlock", "SDCascadeTimestepBlock"], + ["SDCascadeResBlock", "SDCascadeTimestepBlock", "SDCascadeAttnBlock"], + ["SDCascadeResBlock", "SDCascadeTimestepBlock", "SDCascadeAttnBlock"], + ], + "switch_level": None, + "clip_text_pooled_in_channels": 32, + "dropout": [0.1, 0.1, 0.1, 0.1], + } + model = StableCascadeUNet(**model_kwargs) + return model.eval() + + def get_dummy_components(self): + decoder = self.dummy_decoder + text_encoder = self.dummy_text_encoder + tokenizer = self.dummy_tokenizer + vqgan = self.dummy_vqgan + + scheduler = DDPMWuerstchenScheduler() + + components = { + "decoder": decoder, + "vqgan": vqgan, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + "scheduler": scheduler, + "latent_dim_scale": 4.0, + } + + return components + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + inputs = { + "image_embeddings": torch.ones((1, 4, 4, 4), device=device), + "prompt": "horse", + "generator": generator, + "guidance_scale": 2.0, + "num_inference_steps": 2, + "output_type": "np", + } + return inputs + + def test_wuerstchen_decoder(self): + device = "cpu" + + components = self.get_dummy_components() + + pipe = self.pipeline_class(**components) + pipe = pipe.to(device) + + pipe.set_progress_bar_config(disable=None) + + output = pipe(**self.get_dummy_inputs(device)) + image = output.images + + image_from_tuple = pipe(**self.get_dummy_inputs(device), return_dict=False) + + image_slice = image[0, -3:, -3:, -1] + image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] + + assert image.shape == (1, 64, 64, 3) + + expected_slice = np.array([0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0]) + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 + assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 + + @skip_mps + def test_inference_batch_single_identical(self): + self._test_inference_batch_single_identical(expected_max_diff=1e-2) + + @skip_mps + def test_attention_slicing_forward_pass(self): + test_max_difference = torch_device == "cpu" + test_mean_pixel_difference = False + + self._test_attention_slicing_forward_pass( + test_max_difference=test_max_difference, + test_mean_pixel_difference=test_mean_pixel_difference, + ) + + @unittest.skip(reason="fp16 not supported") + def test_float16_inference(self): + super().test_float16_inference() + + def test_stable_cascade_decoder_single_prompt_multiple_image_embeddings(self): + device = "cpu" + components = self.get_dummy_components() + + pipe = StableCascadeDecoderPipeline(**components) + pipe.set_progress_bar_config(disable=None) + + prior_num_images_per_prompt = 2 + decoder_num_images_per_prompt = 2 + prompt = ["a cat"] + batch_size = len(prompt) + + generator = torch.Generator(device) + image_embeddings = randn_tensor( + (batch_size * prior_num_images_per_prompt, 4, 4, 4), generator=generator.manual_seed(0) + ) + decoder_output = pipe( + image_embeddings=image_embeddings, + prompt=prompt, + num_inference_steps=1, + output_type="np", + guidance_scale=0.0, + generator=generator.manual_seed(0), + num_images_per_prompt=decoder_num_images_per_prompt, + ) + + assert decoder_output.images.shape[0] == ( + batch_size * prior_num_images_per_prompt * decoder_num_images_per_prompt + ) + + def test_stable_cascade_decoder_single_prompt_multiple_image_embeddings_with_guidance(self): + device = "cpu" + components = self.get_dummy_components() + + pipe = StableCascadeDecoderPipeline(**components) + pipe.set_progress_bar_config(disable=None) + + prior_num_images_per_prompt = 2 + decoder_num_images_per_prompt = 2 + prompt = ["a cat"] + batch_size = len(prompt) + + generator = torch.Generator(device) + image_embeddings = randn_tensor( + (batch_size * prior_num_images_per_prompt, 4, 4, 4), generator=generator.manual_seed(0) + ) + decoder_output = pipe( + image_embeddings=image_embeddings, + prompt=prompt, + num_inference_steps=1, + output_type="np", + guidance_scale=2.0, + generator=generator.manual_seed(0), + num_images_per_prompt=decoder_num_images_per_prompt, + ) + + assert decoder_output.images.shape[0] == ( + batch_size * prior_num_images_per_prompt * decoder_num_images_per_prompt + ) + + def test_encode_prompt_works_in_isolation(self): + extra_required_param_value_dict = { + "device": torch.device(torch_device).type, + "batch_size": 1, + "do_classifier_free_guidance": self.get_dummy_inputs(device=torch_device).get("guidance_scale", 1.0) > 1.0, + } + return super().test_encode_prompt_works_in_isolation(extra_required_param_value_dict) + + +@slow +@require_torch_accelerator +class StableCascadeDecoderPipelineIntegrationTests(unittest.TestCase): + def setUp(self): + # clean up the VRAM before each test + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + # clean up the VRAM after each test + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def test_stable_cascade_decoder(self): + pipe = StableCascadeDecoderPipeline.from_pretrained( + "stabilityai/stable-cascade", variant="bf16", torch_dtype=torch.bfloat16 + ) + pipe.enable_model_cpu_offload(device=torch_device) + pipe.set_progress_bar_config(disable=None) + + prompt = "A photograph of the inside of a subway train. There are raccoons sitting on the seats. One of them is reading a newspaper. The window shows the city in the background." + + generator = torch.Generator(device="cpu").manual_seed(0) + image_embedding = load_pt( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_cascade/image_embedding.pt", + map_location=torch_device, + ) + + image = pipe( + prompt=prompt, + image_embeddings=image_embedding, + output_type="np", + num_inference_steps=2, + generator=generator, + ).images[0] + + assert image.shape == (1024, 1024, 3) + expected_image = load_numpy( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_cascade/stable_cascade_decoder_image.npy" + ) + max_diff = numpy_cosine_similarity_distance(image.flatten(), expected_image.flatten()) + assert max_diff < 2e-4 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/stable_cascade/test_stable_cascade_prior.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/stable_cascade/test_stable_cascade_prior.py new file mode 100644 index 0000000000000000000000000000000000000000..f8267186db14dc785de11d65deaaf6455bff2c7a --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/stable_cascade/test_stable_cascade_prior.py @@ -0,0 +1,284 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import gc +import unittest + +import numpy as np +import torch +from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer + +from diffusers import DDPMWuerstchenScheduler, StableCascadePriorPipeline +from diffusers.models import StableCascadeUNet +from diffusers.utils.import_utils import is_peft_available + +from ...testing_utils import ( + backend_empty_cache, + enable_full_determinism, + load_numpy, + numpy_cosine_similarity_distance, + require_peft_backend, + require_torch_accelerator, + skip_mps, + slow, + torch_device, +) + + +if is_peft_available(): + from peft import LoraConfig + from peft.tuners.tuners_utils import BaseTunerLayer + +from ..test_pipelines_common import PipelineTesterMixin + + +enable_full_determinism() + + +class StableCascadePriorPipelineFastTests(PipelineTesterMixin, unittest.TestCase): + pipeline_class = StableCascadePriorPipeline + params = ["prompt"] + batch_params = ["prompt", "negative_prompt"] + required_optional_params = [ + "num_images_per_prompt", + "generator", + "num_inference_steps", + "latents", + "negative_prompt", + "guidance_scale", + "output_type", + "return_dict", + ] + test_xformers_attention = False + callback_cfg_params = ["text_encoder_hidden_states"] + + @property + def text_embedder_hidden_size(self): + return 32 + + @property + def time_input_dim(self): + return 32 + + @property + def block_out_channels_0(self): + return self.time_input_dim + + @property + def time_embed_dim(self): + return self.time_input_dim * 4 + + @property + def dummy_tokenizer(self): + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + return tokenizer + + @property + def dummy_text_encoder(self): + torch.manual_seed(0) + config = CLIPTextConfig( + bos_token_id=0, + eos_token_id=2, + hidden_size=self.text_embedder_hidden_size, + projection_dim=self.text_embedder_hidden_size, + intermediate_size=37, + layer_norm_eps=1e-05, + num_attention_heads=4, + num_hidden_layers=5, + pad_token_id=1, + vocab_size=1000, + ) + return CLIPTextModelWithProjection(config).eval() + + @property + def dummy_prior(self): + torch.manual_seed(0) + + model_kwargs = { + "conditioning_dim": 128, + "block_out_channels": (128, 128), + "num_attention_heads": (2, 2), + "down_num_layers_per_block": (1, 1), + "up_num_layers_per_block": (1, 1), + "switch_level": (False,), + "clip_image_in_channels": 768, + "clip_text_in_channels": self.text_embedder_hidden_size, + "clip_text_pooled_in_channels": self.text_embedder_hidden_size, + "dropout": (0.1, 0.1), + } + + model = StableCascadeUNet(**model_kwargs) + return model.eval() + + def get_dummy_components(self): + prior = self.dummy_prior + text_encoder = self.dummy_text_encoder + tokenizer = self.dummy_tokenizer + + scheduler = DDPMWuerstchenScheduler() + + components = { + "prior": prior, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + "scheduler": scheduler, + "feature_extractor": None, + "image_encoder": None, + } + + return components + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + inputs = { + "prompt": "horse", + "generator": generator, + "guidance_scale": 4.0, + "num_inference_steps": 2, + "output_type": "np", + } + return inputs + + def test_wuerstchen_prior(self): + device = "cpu" + + components = self.get_dummy_components() + + pipe = self.pipeline_class(**components) + pipe = pipe.to(device) + + pipe.set_progress_bar_config(disable=None) + + output = pipe(**self.get_dummy_inputs(device)) + image = output.image_embeddings + + image_from_tuple = pipe(**self.get_dummy_inputs(device), return_dict=False)[0] + + image_slice = image[0, 0, 0, -10:] + + image_from_tuple_slice = image_from_tuple[0, 0, 0, -10:] + assert image.shape == (1, 16, 24, 24) + + expected_slice = np.array( + [94.5498, -21.9481, -117.5025, -192.8760, 38.0117, 73.4709, 38.1142, -185.5593, -47.7869, 167.2853] + ) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-2 + assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 5e-2 + + @skip_mps + def test_inference_batch_single_identical(self): + self._test_inference_batch_single_identical(expected_max_diff=2e-1) + + @skip_mps + def test_attention_slicing_forward_pass(self): + test_max_difference = torch_device == "cpu" + test_mean_pixel_difference = False + + self._test_attention_slicing_forward_pass( + test_max_difference=test_max_difference, + test_mean_pixel_difference=test_mean_pixel_difference, + ) + + @unittest.skip(reason="fp16 not supported") + def test_float16_inference(self): + super().test_float16_inference() + + def check_if_lora_correctly_set(self, model) -> bool: + """ + Checks if the LoRA layers are correctly set with peft + """ + for module in model.modules(): + if isinstance(module, BaseTunerLayer): + return True + return False + + def get_lora_components(self): + prior = self.dummy_prior + + prior_lora_config = LoraConfig( + r=4, lora_alpha=4, target_modules=["to_q", "to_k", "to_v", "to_out.0"], init_lora_weights=False + ) + + return prior, prior_lora_config + + @require_peft_backend + @unittest.skip(reason="no lora support for now") + def test_inference_with_prior_lora(self): + _, prior_lora_config = self.get_lora_components() + device = "cpu" + + components = self.get_dummy_components() + + pipe = self.pipeline_class(**components) + pipe = pipe.to(device) + + pipe.set_progress_bar_config(disable=None) + + output_no_lora = pipe(**self.get_dummy_inputs(device)) + image_embed = output_no_lora.image_embeddings + self.assertTrue(image_embed.shape == (1, 16, 24, 24)) + + pipe.prior.add_adapter(prior_lora_config) + self.assertTrue(self.check_if_lora_correctly_set(pipe.prior), "Lora not correctly set in prior") + + output_lora = pipe(**self.get_dummy_inputs(device)) + lora_image_embed = output_lora.image_embeddings + + self.assertTrue(image_embed.shape == lora_image_embed.shape) + + @unittest.skip("Test not supported because dtype determination relies on text encoder.") + def test_encode_prompt_works_in_isolation(self): + pass + + +@slow +@require_torch_accelerator +class StableCascadePriorPipelineIntegrationTests(unittest.TestCase): + def setUp(self): + # clean up the VRAM before each test + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + # clean up the VRAM after each test + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def test_stable_cascade_prior(self): + pipe = StableCascadePriorPipeline.from_pretrained( + "stabilityai/stable-cascade-prior", variant="bf16", torch_dtype=torch.bfloat16 + ) + pipe.enable_model_cpu_offload(device=torch_device) + pipe.set_progress_bar_config(disable=None) + + prompt = "A photograph of the inside of a subway train. There are raccoons sitting on the seats. One of them is reading a newspaper. The window shows the city in the background." + + generator = torch.Generator(device="cpu").manual_seed(0) + + output = pipe(prompt, num_inference_steps=2, output_type="np", generator=generator) + image_embedding = output.image_embeddings + expected_image_embedding = load_numpy( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_cascade/stable_cascade_prior_image_embeddings.npy" + ) + assert image_embedding.shape == (1, 16, 24, 24) + + max_diff = numpy_cosine_similarity_distance(image_embedding.flatten(), expected_image_embedding.flatten()) + assert max_diff < 1e-4 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/stable_diffusion/__init__.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/stable_diffusion/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/stable_diffusion/test_onnx_stable_diffusion.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/stable_diffusion/test_onnx_stable_diffusion.py new file mode 100644 index 0000000000000000000000000000000000000000..62414f3f19474a8da9b826409cffa6dacf7324b0 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/stable_diffusion/test_onnx_stable_diffusion.py @@ -0,0 +1,376 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import tempfile +import unittest + +import numpy as np + +from diffusers import ( + DDIMScheduler, + DPMSolverMultistepScheduler, + EulerAncestralDiscreteScheduler, + EulerDiscreteScheduler, + LMSDiscreteScheduler, + OnnxStableDiffusionPipeline, + PNDMScheduler, +) + +from ...testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu +from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin + + +if is_onnx_available(): + import onnxruntime as ort + + +class OnnxStableDiffusionPipelineFastTests(OnnxPipelineTesterMixin, unittest.TestCase): + hub_checkpoint = "hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline" + + def get_dummy_inputs(self, seed=0): + generator = np.random.RandomState(seed) + inputs = { + "prompt": "A painting of a squirrel eating a burger", + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 7.5, + "output_type": "np", + } + return inputs + + def test_pipeline_default_ddim(self): + pipe = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider") + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs() + image = pipe(**inputs).images + image_slice = image[0, -3:, -3:, -1] + + assert image.shape == (1, 128, 128, 3) + expected_slice = np.array([0.65072, 0.58492, 0.48219, 0.55521, 0.53180, 0.55939, 0.50697, 0.39800, 0.46455]) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 + + def test_pipeline_pndm(self): + pipe = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider") + pipe.scheduler = PNDMScheduler.from_config(pipe.scheduler.config, skip_prk_steps=True) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs() + image = pipe(**inputs).images + image_slice = image[0, -3:, -3:, -1] + + assert image.shape == (1, 128, 128, 3) + expected_slice = np.array([0.65863, 0.59425, 0.49326, 0.56313, 0.53875, 0.56627, 0.51065, 0.39777, 0.46330]) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 + + def test_pipeline_lms(self): + pipe = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider") + pipe.scheduler = LMSDiscreteScheduler.from_config(pipe.scheduler.config) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs() + image = pipe(**inputs).images + image_slice = image[0, -3:, -3:, -1] + + assert image.shape == (1, 128, 128, 3) + expected_slice = np.array([0.53755, 0.60786, 0.47402, 0.49488, 0.51869, 0.49819, 0.47985, 0.38957, 0.44279]) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 + + def test_pipeline_euler(self): + pipe = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider") + pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs() + image = pipe(**inputs).images + image_slice = image[0, -3:, -3:, -1] + + assert image.shape == (1, 128, 128, 3) + expected_slice = np.array([0.53755, 0.60786, 0.47402, 0.49488, 0.51869, 0.49819, 0.47985, 0.38957, 0.44279]) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 + + def test_pipeline_euler_ancestral(self): + pipe = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider") + pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs() + image = pipe(**inputs).images + image_slice = image[0, -3:, -3:, -1] + + assert image.shape == (1, 128, 128, 3) + expected_slice = np.array([0.53817, 0.60812, 0.47384, 0.49530, 0.51894, 0.49814, 0.47984, 0.38958, 0.44271]) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 + + def test_pipeline_dpm_multistep(self): + pipe = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider") + pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs() + image = pipe(**inputs).images + image_slice = image[0, -3:, -3:, -1] + + assert image.shape == (1, 128, 128, 3) + expected_slice = np.array([0.53895, 0.60808, 0.47933, 0.49608, 0.51886, 0.49950, 0.48053, 0.38957, 0.44200]) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 + + def test_stable_diffusion_prompt_embeds(self): + pipe = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider") + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs() + inputs["prompt"] = 3 * [inputs["prompt"]] + + # forward + output = pipe(**inputs) + image_slice_1 = output.images[0, -3:, -3:, -1] + + inputs = self.get_dummy_inputs() + prompt = 3 * [inputs.pop("prompt")] + + text_inputs = pipe.tokenizer( + prompt, + padding="max_length", + max_length=pipe.tokenizer.model_max_length, + truncation=True, + return_tensors="np", + ) + text_inputs = text_inputs["input_ids"] + + prompt_embeds = pipe.text_encoder(input_ids=text_inputs.astype(np.int32))[0] + + inputs["prompt_embeds"] = prompt_embeds + + # forward + output = pipe(**inputs) + image_slice_2 = output.images[0, -3:, -3:, -1] + + assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4 + + def test_stable_diffusion_negative_prompt_embeds(self): + pipe = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider") + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs() + negative_prompt = 3 * ["this is a negative prompt"] + inputs["negative_prompt"] = negative_prompt + inputs["prompt"] = 3 * [inputs["prompt"]] + + # forward + output = pipe(**inputs) + image_slice_1 = output.images[0, -3:, -3:, -1] + + inputs = self.get_dummy_inputs() + prompt = 3 * [inputs.pop("prompt")] + + embeds = [] + for p in [prompt, negative_prompt]: + text_inputs = pipe.tokenizer( + p, + padding="max_length", + max_length=pipe.tokenizer.model_max_length, + truncation=True, + return_tensors="np", + ) + text_inputs = text_inputs["input_ids"] + + embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.int32))[0]) + + inputs["prompt_embeds"], inputs["negative_prompt_embeds"] = embeds + + # forward + output = pipe(**inputs) + image_slice_2 = output.images[0, -3:, -3:, -1] + + assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4 + + +@nightly +@require_onnxruntime +@require_torch_gpu +class OnnxStableDiffusionPipelineIntegrationTests(unittest.TestCase): + @property + def gpu_provider(self): + return ( + "CUDAExecutionProvider", + { + "gpu_mem_limit": "15000000000", # 15GB + "arena_extend_strategy": "kSameAsRequested", + }, + ) + + @property + def gpu_options(self): + options = ort.SessionOptions() + options.enable_mem_pattern = False + return options + + def test_inference_default_pndm(self): + # using the PNDM scheduler by default + sd_pipe = OnnxStableDiffusionPipeline.from_pretrained( + "CompVis/stable-diffusion-v1-4", + revision="onnx", + safety_checker=None, + feature_extractor=None, + provider=self.gpu_provider, + sess_options=self.gpu_options, + ) + sd_pipe.set_progress_bar_config(disable=None) + + prompt = "A painting of a squirrel eating a burger" + np.random.seed(0) + output = sd_pipe([prompt], guidance_scale=6.0, num_inference_steps=10, output_type="np") + image = output.images + + image_slice = image[0, -3:, -3:, -1] + + assert image.shape == (1, 512, 512, 3) + expected_slice = np.array([0.0452, 0.0390, 0.0087, 0.0350, 0.0617, 0.0364, 0.0544, 0.0523, 0.0720]) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 + + def test_inference_ddim(self): + ddim_scheduler = DDIMScheduler.from_pretrained( + "stable-diffusion-v1-5/stable-diffusion-v1-5", subfolder="scheduler", revision="onnx" + ) + sd_pipe = OnnxStableDiffusionPipeline.from_pretrained( + "stable-diffusion-v1-5/stable-diffusion-v1-5", + revision="onnx", + scheduler=ddim_scheduler, + safety_checker=None, + feature_extractor=None, + provider=self.gpu_provider, + sess_options=self.gpu_options, + ) + sd_pipe.set_progress_bar_config(disable=None) + + prompt = "open neural network exchange" + generator = np.random.RandomState(0) + output = sd_pipe([prompt], guidance_scale=7.5, num_inference_steps=10, generator=generator, output_type="np") + image = output.images + image_slice = image[0, -3:, -3:, -1] + + assert image.shape == (1, 512, 512, 3) + expected_slice = np.array([0.2867, 0.1974, 0.1481, 0.7294, 0.7251, 0.6667, 0.4194, 0.5642, 0.6486]) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 + + def test_inference_k_lms(self): + lms_scheduler = LMSDiscreteScheduler.from_pretrained( + "stable-diffusion-v1-5/stable-diffusion-v1-5", subfolder="scheduler", revision="onnx" + ) + sd_pipe = OnnxStableDiffusionPipeline.from_pretrained( + "stable-diffusion-v1-5/stable-diffusion-v1-5", + revision="onnx", + scheduler=lms_scheduler, + safety_checker=None, + feature_extractor=None, + provider=self.gpu_provider, + sess_options=self.gpu_options, + ) + sd_pipe.set_progress_bar_config(disable=None) + + prompt = "open neural network exchange" + generator = np.random.RandomState(0) + output = sd_pipe([prompt], guidance_scale=7.5, num_inference_steps=10, generator=generator, output_type="np") + image = output.images + image_slice = image[0, -3:, -3:, -1] + + assert image.shape == (1, 512, 512, 3) + expected_slice = np.array([0.2306, 0.1959, 0.1593, 0.6549, 0.6394, 0.5408, 0.5065, 0.6010, 0.6161]) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 + + def test_intermediate_state(self): + number_of_steps = 0 + + def test_callback_fn(step: int, timestep: int, latents: np.ndarray) -> None: + test_callback_fn.has_been_called = True + nonlocal number_of_steps + number_of_steps += 1 + if step == 0: + assert latents.shape == (1, 4, 64, 64) + latents_slice = latents[0, -3:, -3:, -1] + expected_slice = np.array( + [-0.6772, -0.3835, -1.2456, 0.1905, -1.0974, 0.6967, -1.9353, 0.0178, 1.0167] + ) + + assert np.abs(latents_slice.flatten() - expected_slice).max() < 1e-3 + elif step == 5: + assert latents.shape == (1, 4, 64, 64) + latents_slice = latents[0, -3:, -3:, -1] + expected_slice = np.array( + [-0.3351, 0.2241, -0.1837, -0.2325, -0.6577, 0.3393, -0.0241, 0.5899, 1.3875] + ) + + assert np.abs(latents_slice.flatten() - expected_slice).max() < 1e-3 + + test_callback_fn.has_been_called = False + + pipe = OnnxStableDiffusionPipeline.from_pretrained( + "stable-diffusion-v1-5/stable-diffusion-v1-5", + revision="onnx", + safety_checker=None, + feature_extractor=None, + provider=self.gpu_provider, + sess_options=self.gpu_options, + ) + pipe.set_progress_bar_config(disable=None) + + prompt = "Andromeda galaxy in a bottle" + + generator = np.random.RandomState(0) + pipe( + prompt=prompt, + num_inference_steps=5, + guidance_scale=7.5, + generator=generator, + callback=test_callback_fn, + callback_steps=1, + ) + assert test_callback_fn.has_been_called + assert number_of_steps == 6 + + def test_stable_diffusion_no_safety_checker(self): + pipe = OnnxStableDiffusionPipeline.from_pretrained( + "stable-diffusion-v1-5/stable-diffusion-v1-5", + revision="onnx", + safety_checker=None, + feature_extractor=None, + provider=self.gpu_provider, + sess_options=self.gpu_options, + ) + assert isinstance(pipe, OnnxStableDiffusionPipeline) + assert pipe.safety_checker is None + + image = pipe("example prompt", num_inference_steps=2).images[0] + assert image is not None + + # check that there's no error when saving a pipeline with one of the models being None + with tempfile.TemporaryDirectory() as tmpdirname: + pipe.save_pretrained(tmpdirname) + pipe = OnnxStableDiffusionPipeline.from_pretrained(tmpdirname) + + # sanity check that the pipeline still works + assert pipe.safety_checker is None + image = pipe("example prompt", num_inference_steps=2).images[0] + assert image is not None diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/stable_diffusion/test_onnx_stable_diffusion_img2img.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/stable_diffusion/test_onnx_stable_diffusion_img2img.py new file mode 100644 index 0000000000000000000000000000000000000000..28d1d0f37ff8cfb6613fe2a43cb95bbf1bdd35d6 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/stable_diffusion/test_onnx_stable_diffusion_img2img.py @@ -0,0 +1,245 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import random +import unittest + +import numpy as np + +from diffusers import ( + DPMSolverMultistepScheduler, + EulerAncestralDiscreteScheduler, + EulerDiscreteScheduler, + LMSDiscreteScheduler, + OnnxStableDiffusionImg2ImgPipeline, + PNDMScheduler, +) + +from ...testing_utils import ( + floats_tensor, + is_onnx_available, + load_image, + nightly, + require_onnxruntime, + require_torch_gpu, +) +from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin + + +if is_onnx_available(): + import onnxruntime as ort + + +class OnnxStableDiffusionImg2ImgPipelineFastTests(OnnxPipelineTesterMixin, unittest.TestCase): + hub_checkpoint = "hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline" + + def get_dummy_inputs(self, seed=0): + image = floats_tensor((1, 3, 128, 128), rng=random.Random(seed)) + generator = np.random.RandomState(seed) + inputs = { + "prompt": "A painting of a squirrel eating a burger", + "image": image, + "generator": generator, + "num_inference_steps": 3, + "strength": 0.75, + "guidance_scale": 7.5, + "output_type": "np", + } + return inputs + + def test_pipeline_default_ddim(self): + pipe = OnnxStableDiffusionImg2ImgPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider") + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs() + image = pipe(**inputs).images + image_slice = image[0, -3:, -3:, -1].flatten() + + assert image.shape == (1, 128, 128, 3) + expected_slice = np.array([0.69643, 0.58484, 0.50314, 0.58760, 0.55368, 0.59643, 0.51529, 0.41217, 0.49087]) + assert np.abs(image_slice - expected_slice).max() < 1e-1 + + def test_pipeline_pndm(self): + pipe = OnnxStableDiffusionImg2ImgPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider") + pipe.scheduler = PNDMScheduler.from_config(pipe.scheduler.config, skip_prk_steps=True) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs() + image = pipe(**inputs).images + image_slice = image[0, -3:, -3:, -1] + + assert image.shape == (1, 128, 128, 3) + expected_slice = np.array([0.61737, 0.54642, 0.53183, 0.54465, 0.52742, 0.60525, 0.49969, 0.40655, 0.48154]) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 + + def test_pipeline_lms(self): + pipe = OnnxStableDiffusionImg2ImgPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider") + pipe.scheduler = LMSDiscreteScheduler.from_config(pipe.scheduler.config) + pipe.set_progress_bar_config(disable=None) + + # warmup pass to apply optimizations + _ = pipe(**self.get_dummy_inputs()) + + inputs = self.get_dummy_inputs() + image = pipe(**inputs).images + image_slice = image[0, -3:, -3:, -1] + + assert image.shape == (1, 128, 128, 3) + expected_slice = np.array([0.52761, 0.59977, 0.49033, 0.49619, 0.54282, 0.50311, 0.47600, 0.40918, 0.45203]) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 + + def test_pipeline_euler(self): + pipe = OnnxStableDiffusionImg2ImgPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider") + pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs() + image = pipe(**inputs).images + image_slice = image[0, -3:, -3:, -1] + + assert image.shape == (1, 128, 128, 3) + expected_slice = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304]) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 + + def test_pipeline_euler_ancestral(self): + pipe = OnnxStableDiffusionImg2ImgPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider") + pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs() + image = pipe(**inputs).images + image_slice = image[0, -3:, -3:, -1] + + assert image.shape == (1, 128, 128, 3) + expected_slice = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304]) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 + + def test_pipeline_dpm_multistep(self): + pipe = OnnxStableDiffusionImg2ImgPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider") + pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs() + image = pipe(**inputs).images + image_slice = image[0, -3:, -3:, -1] + + assert image.shape == (1, 128, 128, 3) + expected_slice = np.array([0.65331, 0.58277, 0.48204, 0.56059, 0.53665, 0.56235, 0.50969, 0.40009, 0.46552]) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 + + +@nightly +@require_onnxruntime +@require_torch_gpu +class OnnxStableDiffusionImg2ImgPipelineIntegrationTests(unittest.TestCase): + @property + def gpu_provider(self): + return ( + "CUDAExecutionProvider", + { + "gpu_mem_limit": "15000000000", # 15GB + "arena_extend_strategy": "kSameAsRequested", + }, + ) + + @property + def gpu_options(self): + options = ort.SessionOptions() + options.enable_mem_pattern = False + return options + + def test_inference_default_pndm(self): + init_image = load_image( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" + "/img2img/sketch-mountains-input.jpg" + ) + init_image = init_image.resize((768, 512)) + # using the PNDM scheduler by default + pipe = OnnxStableDiffusionImg2ImgPipeline.from_pretrained( + "CompVis/stable-diffusion-v1-4", + revision="onnx", + safety_checker=None, + feature_extractor=None, + provider=self.gpu_provider, + sess_options=self.gpu_options, + ) + pipe.set_progress_bar_config(disable=None) + + prompt = "A fantasy landscape, trending on artstation" + + generator = np.random.RandomState(0) + output = pipe( + prompt=prompt, + image=init_image, + strength=0.75, + guidance_scale=7.5, + num_inference_steps=10, + generator=generator, + output_type="np", + ) + images = output.images + image_slice = images[0, 255:258, 383:386, -1] + + assert images.shape == (1, 512, 768, 3) + expected_slice = np.array([0.4909, 0.5059, 0.5372, 0.4623, 0.4876, 0.5049, 0.4820, 0.4956, 0.5019]) + # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues + + assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2 + + def test_inference_k_lms(self): + init_image = load_image( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" + "/img2img/sketch-mountains-input.jpg" + ) + init_image = init_image.resize((768, 512)) + lms_scheduler = LMSDiscreteScheduler.from_pretrained( + "stable-diffusion-v1-5/stable-diffusion-v1-5", subfolder="scheduler", revision="onnx" + ) + pipe = OnnxStableDiffusionImg2ImgPipeline.from_pretrained( + "stable-diffusion-v1-5/stable-diffusion-v1-5", + revision="onnx", + scheduler=lms_scheduler, + safety_checker=None, + feature_extractor=None, + provider=self.gpu_provider, + sess_options=self.gpu_options, + ) + pipe.set_progress_bar_config(disable=None) + + prompt = "A fantasy landscape, trending on artstation" + + generator = np.random.RandomState(0) + output = pipe( + prompt=prompt, + image=init_image, + strength=0.75, + guidance_scale=7.5, + num_inference_steps=20, + generator=generator, + output_type="np", + ) + images = output.images + image_slice = images[0, 255:258, 383:386, -1] + + assert images.shape == (1, 512, 768, 3) + expected_slice = np.array([0.8043, 0.926, 0.9581, 0.8119, 0.8954, 0.913, 0.7209, 0.7463, 0.7431]) + # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues + + assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/stable_diffusion/test_onnx_stable_diffusion_inpaint.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/stable_diffusion/test_onnx_stable_diffusion_inpaint.py new file mode 100644 index 0000000000000000000000000000000000000000..1d46ff9a2f5f87d9dac90c48dd8ee7b84b7367fb --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/stable_diffusion/test_onnx_stable_diffusion_inpaint.py @@ -0,0 +1,141 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import numpy as np + +from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline + +from ...testing_utils import ( + is_onnx_available, + load_image, + nightly, + require_onnxruntime, + require_torch_gpu, +) +from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin + + +if is_onnx_available(): + import onnxruntime as ort + + +class OnnxStableDiffusionPipelineFastTests(OnnxPipelineTesterMixin, unittest.TestCase): + # FIXME: add fast tests + pass + + +@nightly +@require_onnxruntime +@require_torch_gpu +class OnnxStableDiffusionInpaintPipelineIntegrationTests(unittest.TestCase): + @property + def gpu_provider(self): + return ( + "CUDAExecutionProvider", + { + "gpu_mem_limit": "15000000000", # 15GB + "arena_extend_strategy": "kSameAsRequested", + }, + ) + + @property + def gpu_options(self): + options = ort.SessionOptions() + options.enable_mem_pattern = False + return options + + def test_inference_default_pndm(self): + init_image = load_image( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" + "/in_paint/overture-creations-5sI6fQgYIuo.png" + ) + mask_image = load_image( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" + "/in_paint/overture-creations-5sI6fQgYIuo_mask.png" + ) + pipe = OnnxStableDiffusionInpaintPipeline.from_pretrained( + "botp/stable-diffusion-v1-5-inpainting", + revision="onnx", + safety_checker=None, + feature_extractor=None, + provider=self.gpu_provider, + sess_options=self.gpu_options, + ) + pipe.set_progress_bar_config(disable=None) + + prompt = "A red cat sitting on a park bench" + + generator = np.random.RandomState(0) + output = pipe( + prompt=prompt, + image=init_image, + mask_image=mask_image, + guidance_scale=7.5, + num_inference_steps=10, + generator=generator, + output_type="np", + ) + images = output.images + image_slice = images[0, 255:258, 255:258, -1] + + assert images.shape == (1, 512, 512, 3) + expected_slice = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464]) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 + + def test_inference_k_lms(self): + init_image = load_image( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" + "/in_paint/overture-creations-5sI6fQgYIuo.png" + ) + mask_image = load_image( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" + "/in_paint/overture-creations-5sI6fQgYIuo_mask.png" + ) + lms_scheduler = LMSDiscreteScheduler.from_pretrained( + "botp/stable-diffusion-v1-5-inpainting", subfolder="scheduler", revision="onnx" + ) + pipe = OnnxStableDiffusionInpaintPipeline.from_pretrained( + "botp/stable-diffusion-v1-5-inpainting", + revision="onnx", + scheduler=lms_scheduler, + safety_checker=None, + feature_extractor=None, + provider=self.gpu_provider, + sess_options=self.gpu_options, + ) + pipe.set_progress_bar_config(disable=None) + + prompt = "A red cat sitting on a park bench" + + generator = np.random.RandomState(0) + output = pipe( + prompt=prompt, + image=init_image, + mask_image=mask_image, + guidance_scale=7.5, + num_inference_steps=20, + generator=generator, + output_type="np", + ) + images = output.images + image_slice = images[0, 255:258, 255:258, -1] + + assert images.shape == (1, 512, 512, 3) + expected_slice = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125]) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/stable_diffusion/test_onnx_stable_diffusion_upscale.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/stable_diffusion/test_onnx_stable_diffusion_upscale.py new file mode 100644 index 0000000000000000000000000000000000000000..55d9d38d64bd84573ecb571739a8eabe6f9b6951 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/stable_diffusion/test_onnx_stable_diffusion_upscale.py @@ -0,0 +1,231 @@ +# coding=utf-8 +# Copyright 2022 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import random +import unittest + +import numpy as np + +from diffusers import ( + DPMSolverMultistepScheduler, + EulerAncestralDiscreteScheduler, + EulerDiscreteScheduler, + LMSDiscreteScheduler, + OnnxStableDiffusionUpscalePipeline, + PNDMScheduler, +) + +from ...testing_utils import ( + floats_tensor, + is_onnx_available, + load_image, + nightly, + require_onnxruntime, + require_torch_gpu, +) +from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin + + +if is_onnx_available(): + import onnxruntime as ort + + +# TODO: (Dhruv) Update hub_checkpoint repo_id +@unittest.skip( + "There is a potential backdoor vulnerability in the hub_checkpoint. Skip running this test until resolved" +) +class OnnxStableDiffusionUpscalePipelineFastTests(OnnxPipelineTesterMixin, unittest.TestCase): + # TODO: is there an appropriate internal test set? + hub_checkpoint = "ssube/stable-diffusion-x4-upscaler-onnx" + + def get_dummy_inputs(self, seed=0): + image = floats_tensor((1, 3, 128, 128), rng=random.Random(seed)) + generator = np.random.RandomState(seed) + inputs = { + "prompt": "A painting of a squirrel eating a burger", + "image": image, + "generator": generator, + "num_inference_steps": 3, + "guidance_scale": 7.5, + "output_type": "np", + } + return inputs + + def test_pipeline_default_ddpm(self): + pipe = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider") + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs() + image = pipe(**inputs).images + image_slice = image[0, -3:, -3:, -1].flatten() + + # started as 128, should now be 512 + assert image.shape == (1, 512, 512, 3) + expected_slice = np.array([0.6957, 0.7002, 0.7186, 0.6881, 0.6693, 0.6910, 0.7445, 0.7274, 0.7056]) + assert np.abs(image_slice - expected_slice).max() < 1e-1 + + def test_pipeline_pndm(self): + pipe = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider") + pipe.scheduler = PNDMScheduler.from_config(pipe.scheduler.config, skip_prk_steps=True) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs() + image = pipe(**inputs).images + image_slice = image[0, -3:, -3:, -1] + + assert image.shape == (1, 512, 512, 3) + expected_slice = np.array([0.7349, 0.7347, 0.7034, 0.7696, 0.7876, 0.7597, 0.7916, 0.8085, 0.8036]) + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 + + def test_pipeline_dpm_multistep(self): + pipe = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider") + pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs() + image = pipe(**inputs).images + image_slice = image[0, -3:, -3:, -1] + + assert image.shape == (1, 512, 512, 3) + expected_slice = np.array( + [0.7659278, 0.76437664, 0.75579107, 0.7691116, 0.77666986, 0.7727672, 0.7758664, 0.7812226, 0.76942515] + ) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 + + def test_pipeline_euler(self): + pipe = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider") + pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs() + image = pipe(**inputs).images + image_slice = image[0, -3:, -3:, -1] + + assert image.shape == (1, 512, 512, 3) + expected_slice = np.array( + [0.6974782, 0.68902093, 0.70135885, 0.7583618, 0.7804545, 0.7854912, 0.78667426, 0.78743863, 0.78070223] + ) + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 + + def test_pipeline_euler_ancestral(self): + pipe = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider") + pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs() + image = pipe(**inputs).images + image_slice = image[0, -3:, -3:, -1] + + assert image.shape == (1, 512, 512, 3) + expected_slice = np.array( + [0.77424496, 0.773601, 0.7645288, 0.7769598, 0.7772739, 0.7738688, 0.78187233, 0.77879584, 0.767043] + ) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 + + +@nightly +@require_onnxruntime +@require_torch_gpu +class OnnxStableDiffusionUpscalePipelineIntegrationTests(unittest.TestCase): + @property + def gpu_provider(self): + return ( + "CUDAExecutionProvider", + { + "gpu_mem_limit": "15000000000", # 15GB + "arena_extend_strategy": "kSameAsRequested", + }, + ) + + @property + def gpu_options(self): + options = ort.SessionOptions() + options.enable_mem_pattern = False + return options + + def test_inference_default_ddpm(self): + init_image = load_image( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" + "/img2img/sketch-mountains-input.jpg" + ) + init_image = init_image.resize((128, 128)) + # using the PNDM scheduler by default + pipe = OnnxStableDiffusionUpscalePipeline.from_pretrained( + "ssube/stable-diffusion-x4-upscaler-onnx", + provider=self.gpu_provider, + sess_options=self.gpu_options, + ) + pipe.set_progress_bar_config(disable=None) + + prompt = "A fantasy landscape, trending on artstation" + + generator = np.random.RandomState(0) + output = pipe( + prompt=prompt, + image=init_image, + guidance_scale=7.5, + num_inference_steps=10, + generator=generator, + output_type="np", + ) + images = output.images + image_slice = images[0, 255:258, 383:386, -1] + + assert images.shape == (1, 512, 512, 3) + expected_slice = np.array([0.4883, 0.4947, 0.4980, 0.4975, 0.4982, 0.4980, 0.5000, 0.5006, 0.4972]) + # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues + + assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2 + + def test_inference_k_lms(self): + init_image = load_image( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" + "/img2img/sketch-mountains-input.jpg" + ) + init_image = init_image.resize((128, 128)) + lms_scheduler = LMSDiscreteScheduler.from_pretrained( + "ssube/stable-diffusion-x4-upscaler-onnx", subfolder="scheduler" + ) + pipe = OnnxStableDiffusionUpscalePipeline.from_pretrained( + "ssube/stable-diffusion-x4-upscaler-onnx", + scheduler=lms_scheduler, + provider=self.gpu_provider, + sess_options=self.gpu_options, + ) + pipe.set_progress_bar_config(disable=None) + + prompt = "A fantasy landscape, trending on artstation" + + generator = np.random.RandomState(0) + output = pipe( + prompt=prompt, + image=init_image, + guidance_scale=7.5, + num_inference_steps=20, + generator=generator, + output_type="np", + ) + images = output.images + image_slice = images[0, 255:258, 383:386, -1] + + assert images.shape == (1, 512, 512, 3) + expected_slice = np.array( + [0.50173753, 0.50223356, 0.502039, 0.50233036, 0.5023725, 0.5022601, 0.5018758, 0.50234085, 0.50241566] + ) + # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues + + assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/stable_diffusion/test_stable_diffusion.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/stable_diffusion/test_stable_diffusion.py new file mode 100644 index 0000000000000000000000000000000000000000..c9d9525b2e45c8b4e99ac98177c3f1d6b6347ce5 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/stable_diffusion/test_stable_diffusion.py @@ -0,0 +1,1453 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import gc +import tempfile +import time +import unittest + +import numpy as np +import torch +from huggingface_hub import hf_hub_download +from transformers import ( + CLIPTextConfig, + CLIPTextModel, + CLIPTokenizer, +) + +from diffusers import ( + AutoencoderKL, + DDIMScheduler, + DPMSolverMultistepScheduler, + EulerAncestralDiscreteScheduler, + EulerDiscreteScheduler, + LCMScheduler, + LMSDiscreteScheduler, + PNDMScheduler, + StableDiffusionPipeline, + UNet2DConditionModel, + logging, +) + +from ...testing_utils import ( + CaptureLogger, + backend_empty_cache, + backend_max_memory_allocated, + backend_reset_max_memory_allocated, + backend_reset_peak_memory_stats, + enable_full_determinism, + load_numpy, + nightly, + numpy_cosine_similarity_distance, + require_accelerate_version_greater, + require_torch_accelerator, + require_torch_multi_accelerator, + skip_mps, + slow, + torch_device, +) +from ..pipeline_params import ( + TEXT_TO_IMAGE_BATCH_PARAMS, + TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS, + TEXT_TO_IMAGE_IMAGE_PARAMS, + TEXT_TO_IMAGE_PARAMS, +) +from ..test_pipelines_common import ( + IPAdapterTesterMixin, + PipelineKarrasSchedulerTesterMixin, + PipelineLatentTesterMixin, + PipelineTesterMixin, +) + + +enable_full_determinism() + + +class StableDiffusionPipelineFastTests( + IPAdapterTesterMixin, + PipelineLatentTesterMixin, + PipelineKarrasSchedulerTesterMixin, + PipelineTesterMixin, + unittest.TestCase, +): + pipeline_class = StableDiffusionPipeline + params = TEXT_TO_IMAGE_PARAMS + batch_params = TEXT_TO_IMAGE_BATCH_PARAMS + image_params = TEXT_TO_IMAGE_IMAGE_PARAMS + image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS + callback_cfg_params = TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS + test_layerwise_casting = True + test_group_offloading = True + + def get_dummy_components(self, time_cond_proj_dim=None): + cross_attention_dim = 8 + + torch.manual_seed(0) + unet = UNet2DConditionModel( + block_out_channels=(4, 8), + layers_per_block=1, + sample_size=32, + time_cond_proj_dim=time_cond_proj_dim, + in_channels=4, + out_channels=4, + down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), + up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), + cross_attention_dim=cross_attention_dim, + norm_num_groups=2, + ) + scheduler = DDIMScheduler( + beta_start=0.00085, + beta_end=0.012, + beta_schedule="scaled_linear", + clip_sample=False, + set_alpha_to_one=False, + ) + torch.manual_seed(0) + vae = AutoencoderKL( + block_out_channels=[4, 8], + in_channels=3, + out_channels=3, + down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], + up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], + latent_channels=4, + norm_num_groups=2, + ) + torch.manual_seed(0) + text_encoder_config = CLIPTextConfig( + bos_token_id=0, + eos_token_id=2, + hidden_size=cross_attention_dim, + intermediate_size=16, + layer_norm_eps=1e-05, + num_attention_heads=2, + num_hidden_layers=2, + pad_token_id=1, + vocab_size=1000, + ) + text_encoder = CLIPTextModel(text_encoder_config) + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + + components = { + "unet": unet, + "scheduler": scheduler, + "vae": vae, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + "safety_checker": None, + "feature_extractor": None, + "image_encoder": None, + } + return components + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + inputs = { + "prompt": "A painting of a squirrel eating a burger", + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 6.0, + "output_type": "np", + } + return inputs + + def test_stable_diffusion_ddim(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + + components = self.get_dummy_components() + sd_pipe = StableDiffusionPipeline(**components) + sd_pipe = sd_pipe.to(torch_device) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + output = sd_pipe(**inputs) + image = output.images + + image_slice = image[0, -3:, -3:, -1] + + assert image.shape == (1, 64, 64, 3) + expected_slice = np.array([0.1763, 0.4776, 0.4986, 0.2566, 0.3802, 0.4596, 0.5363, 0.3277, 0.3949]) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 + + def test_stable_diffusion_lcm(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + + components = self.get_dummy_components(time_cond_proj_dim=256) + sd_pipe = StableDiffusionPipeline(**components) + sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.scheduler.config) + sd_pipe = sd_pipe.to(torch_device) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + output = sd_pipe(**inputs) + image = output.images + + image_slice = image[0, -3:, -3:, -1] + + assert image.shape == (1, 64, 64, 3) + expected_slice = np.array([0.2368, 0.4900, 0.5019, 0.2723, 0.4473, 0.4578, 0.4551, 0.3532, 0.4133]) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 + + def test_stable_diffusion_lcm_custom_timesteps(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + + components = self.get_dummy_components(time_cond_proj_dim=256) + sd_pipe = StableDiffusionPipeline(**components) + sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.scheduler.config) + sd_pipe = sd_pipe.to(torch_device) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + del inputs["num_inference_steps"] + inputs["timesteps"] = [999, 499] + output = sd_pipe(**inputs) + image = output.images + + image_slice = image[0, -3:, -3:, -1] + + assert image.shape == (1, 64, 64, 3) + expected_slice = np.array([0.2368, 0.4900, 0.5019, 0.2723, 0.4473, 0.4578, 0.4551, 0.3532, 0.4133]) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 + + def test_stable_diffusion_ays(self): + from diffusers.schedulers import AysSchedules + + timestep_schedule = AysSchedules["StableDiffusionTimesteps"] + sigma_schedule = AysSchedules["StableDiffusionSigmas"] + + device = "cpu" # ensure determinism for the device-dependent torch.Generator + + components = self.get_dummy_components(time_cond_proj_dim=256) + sd_pipe = StableDiffusionPipeline(**components) + sd_pipe.scheduler = EulerDiscreteScheduler.from_config(sd_pipe.scheduler.config) + sd_pipe = sd_pipe.to(torch_device) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + inputs["num_inference_steps"] = 10 + output = sd_pipe(**inputs).images + + inputs = self.get_dummy_inputs(device) + inputs["num_inference_steps"] = None + inputs["timesteps"] = timestep_schedule + output_ts = sd_pipe(**inputs).images + + inputs = self.get_dummy_inputs(device) + inputs["num_inference_steps"] = None + inputs["sigmas"] = sigma_schedule + output_sigmas = sd_pipe(**inputs).images + + assert np.abs(output_sigmas.flatten() - output_ts.flatten()).max() < 1e-3, ( + "ays timesteps and ays sigmas should have the same outputs" + ) + assert np.abs(output.flatten() - output_ts.flatten()).max() > 1e-3, ( + "use ays timesteps should have different outputs" + ) + assert np.abs(output.flatten() - output_sigmas.flatten()).max() > 1e-3, ( + "use ays sigmas should have different outputs" + ) + + def test_stable_diffusion_prompt_embeds(self): + components = self.get_dummy_components() + sd_pipe = StableDiffusionPipeline(**components) + sd_pipe = sd_pipe.to(torch_device) + sd_pipe = sd_pipe.to(torch_device) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(torch_device) + inputs["prompt"] = 3 * [inputs["prompt"]] + + # forward + output = sd_pipe(**inputs) + image_slice_1 = output.images[0, -3:, -3:, -1] + + inputs = self.get_dummy_inputs(torch_device) + prompt = 3 * [inputs.pop("prompt")] + + text_inputs = sd_pipe.tokenizer( + prompt, + padding="max_length", + max_length=sd_pipe.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_inputs = text_inputs["input_ids"].to(torch_device) + + prompt_embeds = sd_pipe.text_encoder(text_inputs)[0] + + inputs["prompt_embeds"] = prompt_embeds + + # forward + output = sd_pipe(**inputs) + image_slice_2 = output.images[0, -3:, -3:, -1] + + assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4 + + def test_stable_diffusion_negative_prompt_embeds(self): + components = self.get_dummy_components() + sd_pipe = StableDiffusionPipeline(**components) + sd_pipe = sd_pipe.to(torch_device) + sd_pipe = sd_pipe.to(torch_device) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(torch_device) + negative_prompt = 3 * ["this is a negative prompt"] + inputs["negative_prompt"] = negative_prompt + inputs["prompt"] = 3 * [inputs["prompt"]] + + # forward + output = sd_pipe(**inputs) + image_slice_1 = output.images[0, -3:, -3:, -1] + + inputs = self.get_dummy_inputs(torch_device) + prompt = 3 * [inputs.pop("prompt")] + + embeds = [] + for p in [prompt, negative_prompt]: + text_inputs = sd_pipe.tokenizer( + p, + padding="max_length", + max_length=sd_pipe.tokenizer.model_max_length, + truncation=True, + return_tensors="pt", + ) + text_inputs = text_inputs["input_ids"].to(torch_device) + + embeds.append(sd_pipe.text_encoder(text_inputs)[0]) + + inputs["prompt_embeds"], inputs["negative_prompt_embeds"] = embeds + + # forward + output = sd_pipe(**inputs) + image_slice_2 = output.images[0, -3:, -3:, -1] + + assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4 + + def test_stable_diffusion_ddim_factor_8(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + + components = self.get_dummy_components() + sd_pipe = StableDiffusionPipeline(**components) + sd_pipe = sd_pipe.to(device) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + output = sd_pipe(**inputs, height=136, width=136) + image = output.images + + image_slice = image[0, -3:, -3:, -1] + + assert image.shape == (1, 136, 136, 3) + expected_slice = np.array([0.4720, 0.5426, 0.5160, 0.3961, 0.4696, 0.4296, 0.5738, 0.5888, 0.5481]) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 + + def test_stable_diffusion_pndm(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + sd_pipe = StableDiffusionPipeline(**components) + sd_pipe.scheduler = PNDMScheduler(skip_prk_steps=True) + sd_pipe = sd_pipe.to(device) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + output = sd_pipe(**inputs) + image = output.images + image_slice = image[0, -3:, -3:, -1] + + assert image.shape == (1, 64, 64, 3) + expected_slice = np.array([0.1941, 0.4748, 0.4880, 0.2222, 0.4221, 0.4545, 0.5604, 0.3488, 0.3902]) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 + + def test_stable_diffusion_no_safety_checker(self): + pipe = StableDiffusionPipeline.from_pretrained( + "hf-internal-testing/tiny-stable-diffusion-lms-pipe", safety_checker=None + ) + assert isinstance(pipe, StableDiffusionPipeline) + assert isinstance(pipe.scheduler, LMSDiscreteScheduler) + assert pipe.safety_checker is None + + image = pipe("example prompt", num_inference_steps=2).images[0] + assert image is not None + + # check that there's no error when saving a pipeline with one of the models being None + with tempfile.TemporaryDirectory() as tmpdirname: + pipe.save_pretrained(tmpdirname) + pipe = StableDiffusionPipeline.from_pretrained(tmpdirname) + + # sanity check that the pipeline still works + assert pipe.safety_checker is None + image = pipe("example prompt", num_inference_steps=2).images[0] + assert image is not None + + def test_stable_diffusion_k_lms(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + + components = self.get_dummy_components() + sd_pipe = StableDiffusionPipeline(**components) + sd_pipe.scheduler = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config) + sd_pipe = sd_pipe.to(device) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + output = sd_pipe(**inputs) + image = output.images + image_slice = image[0, -3:, -3:, -1] + + assert image.shape == (1, 64, 64, 3) + expected_slice = np.array([0.2681, 0.4785, 0.4857, 0.2426, 0.4473, 0.4481, 0.5610, 0.3676, 0.3855]) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 + + def test_stable_diffusion_k_euler_ancestral(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + + components = self.get_dummy_components() + sd_pipe = StableDiffusionPipeline(**components) + sd_pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(sd_pipe.scheduler.config) + sd_pipe = sd_pipe.to(device) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + output = sd_pipe(**inputs) + image = output.images + image_slice = image[0, -3:, -3:, -1] + + assert image.shape == (1, 64, 64, 3) + expected_slice = np.array([0.2682, 0.4782, 0.4855, 0.2424, 0.4472, 0.4479, 0.5612, 0.3676, 0.3854]) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 + + def test_stable_diffusion_k_euler(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + + components = self.get_dummy_components() + sd_pipe = StableDiffusionPipeline(**components) + sd_pipe.scheduler = EulerDiscreteScheduler.from_config(sd_pipe.scheduler.config) + sd_pipe = sd_pipe.to(device) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + output = sd_pipe(**inputs) + image = output.images + image_slice = image[0, -3:, -3:, -1] + + assert image.shape == (1, 64, 64, 3) + expected_slice = np.array([0.2681, 0.4785, 0.4857, 0.2426, 0.4473, 0.4481, 0.5610, 0.3676, 0.3855]) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 + + def test_stable_diffusion_vae_slicing(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + components["scheduler"] = LMSDiscreteScheduler.from_config(components["scheduler"].config) + sd_pipe = StableDiffusionPipeline(**components) + sd_pipe = sd_pipe.to(device) + sd_pipe.set_progress_bar_config(disable=None) + + image_count = 4 + + inputs = self.get_dummy_inputs(device) + inputs["prompt"] = [inputs["prompt"]] * image_count + output_1 = sd_pipe(**inputs) + + # make sure sliced vae decode yields the same result + sd_pipe.enable_vae_slicing() + inputs = self.get_dummy_inputs(device) + inputs["prompt"] = [inputs["prompt"]] * image_count + output_2 = sd_pipe(**inputs) + + # there is a small discrepancy at image borders vs. full batch decode + assert np.abs(output_2.images.flatten() - output_1.images.flatten()).max() < 3e-3 + + def test_stable_diffusion_vae_tiling(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + + # make sure here that pndm scheduler skips prk + components["safety_checker"] = None + sd_pipe = StableDiffusionPipeline(**components) + sd_pipe = sd_pipe.to(device) + sd_pipe.set_progress_bar_config(disable=None) + + prompt = "A painting of a squirrel eating a burger" + + # Test that tiled decode at 512x512 yields the same result as the non-tiled decode + generator = torch.Generator(device=device).manual_seed(0) + output_1 = sd_pipe([prompt], generator=generator, guidance_scale=6.0, num_inference_steps=2, output_type="np") + + # make sure tiled vae decode yields the same result + sd_pipe.enable_vae_tiling() + generator = torch.Generator(device=device).manual_seed(0) + output_2 = sd_pipe([prompt], generator=generator, guidance_scale=6.0, num_inference_steps=2, output_type="np") + + assert np.abs(output_2.images.flatten() - output_1.images.flatten()).max() < 5e-1 + + # test that tiled decode works with various shapes + shapes = [(1, 4, 73, 97), (1, 4, 97, 73), (1, 4, 49, 65), (1, 4, 65, 49)] + for shape in shapes: + zeros = torch.zeros(shape).to(device) + sd_pipe.vae.decode(zeros) + + def test_stable_diffusion_negative_prompt(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + components["scheduler"] = PNDMScheduler(skip_prk_steps=True) + sd_pipe = StableDiffusionPipeline(**components) + sd_pipe = sd_pipe.to(device) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + negative_prompt = "french fries" + output = sd_pipe(**inputs, negative_prompt=negative_prompt) + + image = output.images + image_slice = image[0, -3:, -3:, -1] + + assert image.shape == (1, 64, 64, 3) + expected_slice = np.array([0.1907, 0.4709, 0.4858, 0.2224, 0.4223, 0.4539, 0.5606, 0.3489, 0.3900]) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 + + def test_stable_diffusion_long_prompt(self): + components = self.get_dummy_components() + components["scheduler"] = LMSDiscreteScheduler.from_config(components["scheduler"].config) + sd_pipe = StableDiffusionPipeline(**components) + sd_pipe = sd_pipe.to(torch_device) + sd_pipe.set_progress_bar_config(disable=None) + + do_classifier_free_guidance = True + negative_prompt = None + num_images_per_prompt = 1 + logger = logging.get_logger("diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion") + logger.setLevel(logging.WARNING) + + prompt = 100 * "@" + with CaptureLogger(logger) as cap_logger: + negative_text_embeddings, text_embeddings = sd_pipe.encode_prompt( + prompt, torch_device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt + ) + if negative_text_embeddings is not None: + text_embeddings = torch.cat([negative_text_embeddings, text_embeddings]) + + # 100 - 77 + 1 (BOS token) + 1 (EOS token) = 25 + assert cap_logger.out.count("@") == 25 + + negative_prompt = "Hello" + with CaptureLogger(logger) as cap_logger_2: + negative_text_embeddings_2, text_embeddings_2 = sd_pipe.encode_prompt( + prompt, torch_device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt + ) + if negative_text_embeddings_2 is not None: + text_embeddings_2 = torch.cat([negative_text_embeddings_2, text_embeddings_2]) + + assert cap_logger.out == cap_logger_2.out + + prompt = 25 * "@" + with CaptureLogger(logger) as cap_logger_3: + negative_text_embeddings_3, text_embeddings_3 = sd_pipe.encode_prompt( + prompt, torch_device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt + ) + if negative_text_embeddings_3 is not None: + text_embeddings_3 = torch.cat([negative_text_embeddings_3, text_embeddings_3]) + + assert text_embeddings_3.shape == text_embeddings_2.shape == text_embeddings.shape + assert text_embeddings.shape[1] == 77 + assert cap_logger_3.out == "" + + def test_stable_diffusion_height_width_opt(self): + components = self.get_dummy_components() + components["scheduler"] = LMSDiscreteScheduler.from_config(components["scheduler"].config) + sd_pipe = StableDiffusionPipeline(**components) + sd_pipe = sd_pipe.to(torch_device) + sd_pipe.set_progress_bar_config(disable=None) + + prompt = "hey" + + output = sd_pipe(prompt, num_inference_steps=1, output_type="np") + image_shape = output.images[0].shape[:2] + assert image_shape == (64, 64) + + output = sd_pipe(prompt, num_inference_steps=1, height=96, width=96, output_type="np") + image_shape = output.images[0].shape[:2] + assert image_shape == (96, 96) + + config = dict(sd_pipe.unet.config) + config["sample_size"] = 96 + sd_pipe.unet = UNet2DConditionModel.from_config(config).to(torch_device) + output = sd_pipe(prompt, num_inference_steps=1, output_type="np") + image_shape = output.images[0].shape[:2] + assert image_shape == (192, 192) + + def test_attention_slicing_forward_pass(self): + super().test_attention_slicing_forward_pass(expected_max_diff=3e-3) + + def test_inference_batch_single_identical(self): + super().test_inference_batch_single_identical(expected_max_diff=3e-3) + + # MPS currently doesn't support ComplexFloats, which are required for freeU - see https://github.com/huggingface/diffusers/issues/7569. + @skip_mps + def test_freeu_enabled(self): + components = self.get_dummy_components() + sd_pipe = StableDiffusionPipeline(**components) + sd_pipe = sd_pipe.to(torch_device) + sd_pipe.set_progress_bar_config(disable=None) + + prompt = "hey" + output = sd_pipe(prompt, num_inference_steps=1, output_type="np", generator=torch.manual_seed(0)).images + + sd_pipe.enable_freeu(s1=0.9, s2=0.2, b1=1.2, b2=1.4) + output_freeu = sd_pipe(prompt, num_inference_steps=1, output_type="np", generator=torch.manual_seed(0)).images + + assert not np.allclose(output[0, -3:, -3:, -1], output_freeu[0, -3:, -3:, -1]), ( + "Enabling of FreeU should lead to different results." + ) + + def test_freeu_disabled(self): + components = self.get_dummy_components() + sd_pipe = StableDiffusionPipeline(**components) + sd_pipe = sd_pipe.to(torch_device) + sd_pipe.set_progress_bar_config(disable=None) + + prompt = "hey" + output = sd_pipe(prompt, num_inference_steps=1, output_type="np", generator=torch.manual_seed(0)).images + + sd_pipe.enable_freeu(s1=0.9, s2=0.2, b1=1.2, b2=1.4) + sd_pipe.disable_freeu() + + freeu_keys = {"s1", "s2", "b1", "b2"} + for upsample_block in sd_pipe.unet.up_blocks: + for key in freeu_keys: + assert getattr(upsample_block, key) is None, f"Disabling of FreeU should have set {key} to None." + + output_no_freeu = sd_pipe( + prompt, num_inference_steps=1, output_type="np", generator=torch.manual_seed(0) + ).images + + assert np.allclose(output[0, -3:, -3:, -1], output_no_freeu[0, -3:, -3:, -1]), ( + "Disabling of FreeU should lead to results similar to the default pipeline results." + ) + + def test_fused_qkv_projections(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + sd_pipe = StableDiffusionPipeline(**components) + sd_pipe = sd_pipe.to(device) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + image = sd_pipe(**inputs).images + original_image_slice = image[0, -3:, -3:, -1] + + sd_pipe.fuse_qkv_projections() + inputs = self.get_dummy_inputs(device) + image = sd_pipe(**inputs).images + image_slice_fused = image[0, -3:, -3:, -1] + + sd_pipe.unfuse_qkv_projections() + inputs = self.get_dummy_inputs(device) + image = sd_pipe(**inputs).images + image_slice_disabled = image[0, -3:, -3:, -1] + + assert np.allclose(original_image_slice, image_slice_fused, atol=1e-2, rtol=1e-2), ( + "Fusion of QKV projections shouldn't affect the outputs." + ) + assert np.allclose(image_slice_fused, image_slice_disabled, atol=1e-2, rtol=1e-2), ( + "Outputs, with QKV projection fusion enabled, shouldn't change when fused QKV projections are disabled." + ) + assert np.allclose(original_image_slice, image_slice_disabled, atol=1e-2, rtol=1e-2), ( + "Original outputs should match when fused QKV projections are disabled." + ) + + def test_pipeline_interrupt(self): + components = self.get_dummy_components() + sd_pipe = StableDiffusionPipeline(**components) + sd_pipe = sd_pipe.to(torch_device) + sd_pipe.set_progress_bar_config(disable=None) + + prompt = "hey" + num_inference_steps = 3 + + # store intermediate latents from the generation process + class PipelineState: + def __init__(self): + self.state = [] + + def apply(self, pipe, i, t, callback_kwargs): + self.state.append(callback_kwargs["latents"]) + return callback_kwargs + + pipe_state = PipelineState() + sd_pipe( + prompt, + num_inference_steps=num_inference_steps, + output_type="np", + generator=torch.Generator("cpu").manual_seed(0), + callback_on_step_end=pipe_state.apply, + ).images + + # interrupt generation at step index + interrupt_step_idx = 1 + + def callback_on_step_end(pipe, i, t, callback_kwargs): + if i == interrupt_step_idx: + pipe._interrupt = True + + return callback_kwargs + + output_interrupted = sd_pipe( + prompt, + num_inference_steps=num_inference_steps, + output_type="latent", + generator=torch.Generator("cpu").manual_seed(0), + callback_on_step_end=callback_on_step_end, + ).images + + # fetch intermediate latents at the interrupted step + # from the completed generation process + intermediate_latent = pipe_state.state[interrupt_step_idx] + + # compare the intermediate latent to the output of the interrupted process + # they should be the same + assert torch.allclose(intermediate_latent, output_interrupted, atol=1e-4) + + def test_pipeline_accept_tuple_type_unet_sample_size(self): + # the purpose of this test is to see whether the pipeline would accept a unet with the tuple-typed sample size + sd_repo_id = "stable-diffusion-v1-5/stable-diffusion-v1-5" + sample_size = [60, 80] + customised_unet = UNet2DConditionModel(sample_size=sample_size) + pipe = StableDiffusionPipeline.from_pretrained(sd_repo_id, unet=customised_unet) + assert pipe.unet.config.sample_size == sample_size + + def test_encode_prompt_works_in_isolation(self): + extra_required_param_value_dict = { + "device": torch.device(torch_device).type, + "do_classifier_free_guidance": self.get_dummy_inputs(device=torch_device).get("guidance_scale", 1.0) > 1.0, + } + return super().test_encode_prompt_works_in_isolation(extra_required_param_value_dict) + + +@slow +@require_torch_accelerator +class StableDiffusionPipelineSlowTests(unittest.TestCase): + def setUp(self): + gc.collect() + backend_empty_cache(torch_device) + + def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): + generator = torch.Generator(device=generator_device).manual_seed(seed) + latents = np.random.RandomState(seed).standard_normal((1, 4, 64, 64)) + latents = torch.from_numpy(latents).to(device=device, dtype=dtype) + inputs = { + "prompt": "a photograph of an astronaut riding a horse", + "latents": latents, + "generator": generator, + "num_inference_steps": 3, + "guidance_scale": 7.5, + "output_type": "np", + } + return inputs + + def test_stable_diffusion_1_1_pndm(self): + sd_pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-1") + sd_pipe = sd_pipe.to(torch_device) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_inputs(torch_device) + image = sd_pipe(**inputs).images + image_slice = image[0, -3:, -3:, -1].flatten() + + assert image.shape == (1, 512, 512, 3) + expected_slice = np.array([0.4363, 0.4355, 0.3667, 0.4066, 0.3970, 0.3866, 0.4394, 0.4356, 0.4059]) + assert np.abs(image_slice - expected_slice).max() < 3e-3 + + def test_stable_diffusion_v1_4_with_freeu(self): + sd_pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4").to(torch_device) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_inputs(torch_device) + inputs["num_inference_steps"] = 25 + + sd_pipe.enable_freeu(s1=0.9, s2=0.2, b1=1.2, b2=1.4) + image = sd_pipe(**inputs).images + image = image[0, -3:, -3:, -1].flatten() + expected_image = [0.0721, 0.0588, 0.0268, 0.0384, 0.0636, 0.0, 0.0429, 0.0344, 0.0309] + max_diff = np.abs(expected_image - image).max() + assert max_diff < 1e-3 + + def test_stable_diffusion_1_4_pndm(self): + sd_pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4") + sd_pipe = sd_pipe.to(torch_device) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_inputs(torch_device) + image = sd_pipe(**inputs).images + image_slice = image[0, -3:, -3:, -1].flatten() + + assert image.shape == (1, 512, 512, 3) + expected_slice = np.array([0.5740, 0.4784, 0.3162, 0.6358, 0.5831, 0.5505, 0.5082, 0.5631, 0.5575]) + assert np.abs(image_slice - expected_slice).max() < 3e-3 + + def test_stable_diffusion_ddim(self): + sd_pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", safety_checker=None) + sd_pipe.scheduler = DDIMScheduler.from_config(sd_pipe.scheduler.config) + sd_pipe = sd_pipe.to(torch_device) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_inputs(torch_device) + image = sd_pipe(**inputs).images + image_slice = image[0, -3:, -3:, -1].flatten() + + assert image.shape == (1, 512, 512, 3) + expected_slice = np.array([0.38019, 0.28647, 0.27321, 0.40377, 0.38290, 0.35446, 0.39218, 0.38165, 0.42239]) + assert np.abs(image_slice - expected_slice).max() < 1e-4 + + def test_stable_diffusion_lms(self): + sd_pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", safety_checker=None) + sd_pipe.scheduler = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config) + sd_pipe = sd_pipe.to(torch_device) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_inputs(torch_device) + image = sd_pipe(**inputs).images + image_slice = image[0, -3:, -3:, -1].flatten() + + assert image.shape == (1, 512, 512, 3) + expected_slice = np.array([0.10542, 0.09620, 0.07332, 0.09015, 0.09382, 0.07597, 0.08496, 0.07806, 0.06455]) + assert np.abs(image_slice - expected_slice).max() < 3e-3 + + def test_stable_diffusion_dpm(self): + sd_pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", safety_checker=None) + sd_pipe.scheduler = DPMSolverMultistepScheduler.from_config( + sd_pipe.scheduler.config, + final_sigmas_type="sigma_min", + ) + sd_pipe = sd_pipe.to(torch_device) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_inputs(torch_device) + image = sd_pipe(**inputs).images + image_slice = image[0, -3:, -3:, -1].flatten() + + assert image.shape == (1, 512, 512, 3) + expected_slice = np.array([0.03503, 0.03494, 0.01087, 0.03128, 0.02552, 0.00803, 0.00742, 0.00372, 0.00000]) + assert np.abs(image_slice - expected_slice).max() < 3e-3 + + def test_stable_diffusion_attention_slicing(self): + backend_reset_peak_memory_stats(torch_device) + pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", torch_dtype=torch.float16) + pipe.unet.set_default_attn_processor() + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + # enable attention slicing + pipe.enable_attention_slicing() + inputs = self.get_inputs(torch_device, dtype=torch.float16) + image_sliced = pipe(**inputs).images + + mem_bytes = backend_max_memory_allocated(torch_device) + backend_reset_peak_memory_stats(torch_device) + # make sure that less than 3.75 GB is allocated + assert mem_bytes < 3.75 * 10**9 + + # disable slicing + pipe.disable_attention_slicing() + pipe.unet.set_default_attn_processor() + inputs = self.get_inputs(torch_device, dtype=torch.float16) + image = pipe(**inputs).images + + # make sure that more than 3.75 GB is allocated + mem_bytes = backend_max_memory_allocated(torch_device) + assert mem_bytes > 3.75 * 10**9 + max_diff = numpy_cosine_similarity_distance(image_sliced.flatten(), image.flatten()) + assert max_diff < 1e-3 + + def test_stable_diffusion_vae_slicing(self): + backend_reset_peak_memory_stats(torch_device) + pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", torch_dtype=torch.float16) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + pipe.enable_attention_slicing() + + # enable vae slicing + pipe.enable_vae_slicing() + inputs = self.get_inputs(torch_device, dtype=torch.float16) + inputs["prompt"] = [inputs["prompt"]] * 4 + inputs["latents"] = torch.cat([inputs["latents"]] * 4) + image_sliced = pipe(**inputs).images + + mem_bytes = backend_max_memory_allocated(torch_device) + backend_reset_peak_memory_stats(torch_device) + # make sure that less than 4 GB is allocated + assert mem_bytes < 4e9 + + # disable vae slicing + pipe.disable_vae_slicing() + inputs = self.get_inputs(torch_device, dtype=torch.float16) + inputs["prompt"] = [inputs["prompt"]] * 4 + inputs["latents"] = torch.cat([inputs["latents"]] * 4) + image = pipe(**inputs).images + + # make sure that more than 4 GB is allocated + mem_bytes = backend_max_memory_allocated(torch_device) + assert mem_bytes > 4e9 + # There is a small discrepancy at the image borders vs. a fully batched version. + max_diff = numpy_cosine_similarity_distance(image_sliced.flatten(), image.flatten()) + assert max_diff < 1e-2 + + def test_stable_diffusion_vae_tiling(self): + backend_reset_peak_memory_stats(torch_device) + model_id = "CompVis/stable-diffusion-v1-4" + pipe = StableDiffusionPipeline.from_pretrained( + model_id, variant="fp16", torch_dtype=torch.float16, safety_checker=None + ) + pipe.set_progress_bar_config(disable=None) + pipe.enable_attention_slicing() + pipe.unet = pipe.unet.to(memory_format=torch.channels_last) + pipe.vae = pipe.vae.to(memory_format=torch.channels_last) + + prompt = "a photograph of an astronaut riding a horse" + + # enable vae tiling + pipe.enable_vae_tiling() + pipe.enable_model_cpu_offload(device=torch_device) + generator = torch.Generator(device="cpu").manual_seed(0) + output_chunked = pipe( + [prompt], + width=1024, + height=1024, + generator=generator, + guidance_scale=7.5, + num_inference_steps=2, + output_type="np", + ) + image_chunked = output_chunked.images + + mem_bytes = backend_max_memory_allocated(torch_device) + + # disable vae tiling + pipe.disable_vae_tiling() + generator = torch.Generator(device="cpu").manual_seed(0) + output = pipe( + [prompt], + width=1024, + height=1024, + generator=generator, + guidance_scale=7.5, + num_inference_steps=2, + output_type="np", + ) + image = output.images + + assert mem_bytes < 1e10 + max_diff = numpy_cosine_similarity_distance(image_chunked.flatten(), image.flatten()) + assert max_diff < 1e-2 + + def test_stable_diffusion_fp16_vs_autocast(self): + # this test makes sure that the original model with autocast + # and the new model with fp16 yield the same result + pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", torch_dtype=torch.float16) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_inputs(torch_device, dtype=torch.float16) + image_fp16 = pipe(**inputs).images + + with torch.autocast(torch_device): + inputs = self.get_inputs(torch_device) + image_autocast = pipe(**inputs).images + + # Make sure results are close enough + diff = np.abs(image_fp16.flatten() - image_autocast.flatten()) + # They ARE different since ops are not run always at the same precision + # however, they should be extremely close. + assert diff.mean() < 2e-2 + + def test_stable_diffusion_intermediate_state(self): + number_of_steps = 0 + + def callback_fn(step: int, timestep: int, latents: torch.Tensor) -> None: + callback_fn.has_been_called = True + nonlocal number_of_steps + number_of_steps += 1 + if step == 1: + latents = latents.detach().cpu().numpy() + assert latents.shape == (1, 4, 64, 64) + latents_slice = latents[0, -3:, -3:, -1] + expected_slice = np.array( + [-0.5693, -0.3018, -0.9746, 0.0518, -0.8770, 0.7559, -1.7402, 0.1022, 1.1582] + ) + + assert np.abs(latents_slice.flatten() - expected_slice).max() < 5e-2 + elif step == 2: + latents = latents.detach().cpu().numpy() + assert latents.shape == (1, 4, 64, 64) + latents_slice = latents[0, -3:, -3:, -1] + expected_slice = np.array( + [-0.1958, -0.2993, -1.0166, -0.5005, -0.4810, 0.6162, -0.9492, 0.6621, 1.4492] + ) + + assert np.abs(latents_slice.flatten() - expected_slice).max() < 5e-2 + + callback_fn.has_been_called = False + + pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", torch_dtype=torch.float16) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + pipe.enable_attention_slicing() + + inputs = self.get_inputs(torch_device, dtype=torch.float16) + pipe(**inputs, callback=callback_fn, callback_steps=1) + assert callback_fn.has_been_called + assert number_of_steps == inputs["num_inference_steps"] + + def test_stable_diffusion_low_cpu_mem_usage(self): + pipeline_id = "CompVis/stable-diffusion-v1-4" + + start_time = time.time() + pipeline_low_cpu_mem_usage = StableDiffusionPipeline.from_pretrained(pipeline_id, torch_dtype=torch.float16) + pipeline_low_cpu_mem_usage.to(torch_device) + low_cpu_mem_usage_time = time.time() - start_time + + start_time = time.time() + _ = StableDiffusionPipeline.from_pretrained(pipeline_id, torch_dtype=torch.float16, low_cpu_mem_usage=False) + normal_load_time = time.time() - start_time + + assert 2 * low_cpu_mem_usage_time < normal_load_time + + def test_stable_diffusion_pipeline_with_sequential_cpu_offloading(self): + backend_empty_cache(torch_device) + backend_reset_max_memory_allocated(torch_device) + backend_reset_peak_memory_stats(torch_device) + + pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", torch_dtype=torch.float16) + pipe.set_progress_bar_config(disable=None) + pipe.enable_attention_slicing(1) + pipe.enable_sequential_cpu_offload(device=torch_device) + + inputs = self.get_inputs(torch_device, dtype=torch.float16) + _ = pipe(**inputs) + + mem_bytes = backend_max_memory_allocated(torch_device) + # make sure that less than 2.8 GB is allocated + assert mem_bytes < 2.8 * 10**9 + + def test_stable_diffusion_pipeline_with_model_offloading(self): + backend_empty_cache(torch_device) + backend_reset_peak_memory_stats(torch_device) + + inputs = self.get_inputs(torch_device, dtype=torch.float16) + + # Normal inference + + pipe = StableDiffusionPipeline.from_pretrained( + "CompVis/stable-diffusion-v1-4", + torch_dtype=torch.float16, + ) + pipe.unet.set_default_attn_processor() + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + outputs = pipe(**inputs) + mem_bytes = backend_max_memory_allocated(torch_device) + + # With model offloading + + # Reload but don't move to cuda + pipe = StableDiffusionPipeline.from_pretrained( + "CompVis/stable-diffusion-v1-4", + torch_dtype=torch.float16, + ) + pipe.unet.set_default_attn_processor() + + backend_empty_cache(torch_device) + backend_reset_max_memory_allocated(torch_device) + backend_reset_peak_memory_stats(torch_device) + + pipe.enable_model_cpu_offload(device=torch_device) + pipe.set_progress_bar_config(disable=None) + inputs = self.get_inputs(torch_device, dtype=torch.float16) + + outputs_offloaded = pipe(**inputs) + mem_bytes_offloaded = backend_max_memory_allocated(torch_device) + + images = outputs.images + offloaded_images = outputs_offloaded.images + + max_diff = numpy_cosine_similarity_distance(images.flatten(), offloaded_images.flatten()) + assert max_diff < 1e-3 + assert mem_bytes_offloaded < mem_bytes + assert mem_bytes_offloaded < 3.5 * 10**9 + for module in pipe.text_encoder, pipe.unet, pipe.vae: + assert module.device == torch.device("cpu") + + # With attention slicing + backend_empty_cache(torch_device) + backend_reset_max_memory_allocated(torch_device) + backend_reset_peak_memory_stats(torch_device) + + pipe.enable_attention_slicing() + _ = pipe(**inputs) + mem_bytes_slicing = backend_max_memory_allocated(torch_device) + + assert mem_bytes_slicing < mem_bytes_offloaded + assert mem_bytes_slicing < 3 * 10**9 + + def test_stable_diffusion_textual_inversion(self): + pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4") + pipe.load_textual_inversion("sd-concepts-library/low-poly-hd-logos-icons") + + a111_file = hf_hub_download("hf-internal-testing/text_inv_embedding_a1111_format", "winter_style.pt") + a111_file_neg = hf_hub_download( + "hf-internal-testing/text_inv_embedding_a1111_format", "winter_style_negative.pt" + ) + pipe.load_textual_inversion(a111_file) + pipe.load_textual_inversion(a111_file_neg) + pipe.to(torch_device) + + generator = torch.Generator(device="cpu").manual_seed(1) + + prompt = "An logo of a turtle in strong Style-Winter with " + neg_prompt = "Style-Winter-neg" + + image = pipe(prompt=prompt, negative_prompt=neg_prompt, generator=generator, output_type="np").images[0] + expected_image = load_numpy( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_inv/winter_logo_style.npy" + ) + + max_diff = np.abs(expected_image - image).max() + assert max_diff < 8e-1 + + def test_stable_diffusion_textual_inversion_with_model_cpu_offload(self): + pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4") + pipe.enable_model_cpu_offload(device=torch_device) + pipe.load_textual_inversion("sd-concepts-library/low-poly-hd-logos-icons") + + a111_file = hf_hub_download("hf-internal-testing/text_inv_embedding_a1111_format", "winter_style.pt") + a111_file_neg = hf_hub_download( + "hf-internal-testing/text_inv_embedding_a1111_format", "winter_style_negative.pt" + ) + pipe.load_textual_inversion(a111_file) + pipe.load_textual_inversion(a111_file_neg) + + generator = torch.Generator(device="cpu").manual_seed(1) + + prompt = "An logo of a turtle in strong Style-Winter with " + neg_prompt = "Style-Winter-neg" + + image = pipe(prompt=prompt, negative_prompt=neg_prompt, generator=generator, output_type="np").images[0] + expected_image = load_numpy( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_inv/winter_logo_style.npy" + ) + + max_diff = np.abs(expected_image - image).max() + assert max_diff < 8e-1 + + def test_stable_diffusion_textual_inversion_with_sequential_cpu_offload(self): + pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4") + pipe.enable_sequential_cpu_offload(device=torch_device) + pipe.load_textual_inversion("sd-concepts-library/low-poly-hd-logos-icons").to(torch_device) + + a111_file = hf_hub_download("hf-internal-testing/text_inv_embedding_a1111_format", "winter_style.pt") + a111_file_neg = hf_hub_download( + "hf-internal-testing/text_inv_embedding_a1111_format", "winter_style_negative.pt" + ) + pipe.load_textual_inversion(a111_file) + pipe.load_textual_inversion(a111_file_neg) + + generator = torch.Generator(device="cpu").manual_seed(1) + + prompt = "An logo of a turtle in strong Style-Winter with " + neg_prompt = "Style-Winter-neg" + + image = pipe(prompt=prompt, negative_prompt=neg_prompt, generator=generator, output_type="np").images[0] + expected_image = load_numpy( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_inv/winter_logo_style.npy" + ) + + max_diff = np.abs(expected_image - image).max() + assert max_diff < 8e-1 + + +@slow +@require_torch_accelerator +class StableDiffusionPipelineCkptTests(unittest.TestCase): + def setUp(self): + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def test_download_from_hub(self): + ckpt_paths = [ + "https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5/blob/main/v1-5-pruned-emaonly.safetensors", + "https://huggingface.co/WarriorMama777/OrangeMixs/blob/main/Models/AbyssOrangeMix/AbyssOrangeMix.safetensors", + ] + + for ckpt_path in ckpt_paths: + pipe = StableDiffusionPipeline.from_single_file(ckpt_path, torch_dtype=torch.float16) + pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config) + pipe.to(torch_device) + + image_out = pipe("test", num_inference_steps=1, output_type="np").images[0] + + assert image_out.shape == (512, 512, 3) + + def test_download_local(self): + ckpt_filename = hf_hub_download( + "stable-diffusion-v1-5/stable-diffusion-v1-5", filename="v1-5-pruned-emaonly.safetensors" + ) + config_filename = hf_hub_download("stable-diffusion-v1-5/stable-diffusion-v1-5", filename="v1-inference.yaml") + + pipe = StableDiffusionPipeline.from_single_file( + ckpt_filename, config_files={"v1": config_filename}, torch_dtype=torch.float16 + ) + pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config) + pipe.to(torch_device) + + image_out = pipe("test", num_inference_steps=1, output_type="np").images[0] + + assert image_out.shape == (512, 512, 3) + + +@nightly +@require_torch_accelerator +class StableDiffusionPipelineNightlyTests(unittest.TestCase): + def setUp(self): + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): + generator = torch.Generator(device=generator_device).manual_seed(seed) + latents = np.random.RandomState(seed).standard_normal((1, 4, 64, 64)) + latents = torch.from_numpy(latents).to(device=device, dtype=dtype) + inputs = { + "prompt": "a photograph of an astronaut riding a horse", + "latents": latents, + "generator": generator, + "num_inference_steps": 50, + "guidance_scale": 7.5, + "output_type": "np", + } + return inputs + + def test_stable_diffusion_1_4_pndm(self): + sd_pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4").to(torch_device) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_inputs(torch_device) + image = sd_pipe(**inputs).images[0] + + expected_image = load_numpy( + "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" + "/stable_diffusion_text2img/stable_diffusion_1_4_pndm.npy" + ) + max_diff = np.abs(expected_image - image).max() + assert max_diff < 1e-3 + + def test_stable_diffusion_1_5_pndm(self): + sd_pipe = StableDiffusionPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5").to( + torch_device + ) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_inputs(torch_device) + image = sd_pipe(**inputs).images[0] + + expected_image = load_numpy( + "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" + "/stable_diffusion_text2img/stable_diffusion_1_5_pndm.npy" + ) + max_diff = np.abs(expected_image - image).max() + assert max_diff < 1e-3 + + def test_stable_diffusion_ddim(self): + sd_pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4").to(torch_device) + sd_pipe.scheduler = DDIMScheduler.from_config(sd_pipe.scheduler.config) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_inputs(torch_device) + image = sd_pipe(**inputs).images[0] + + expected_image = load_numpy( + "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" + "/stable_diffusion_text2img/stable_diffusion_1_4_ddim.npy" + ) + max_diff = np.abs(expected_image - image).max() + assert max_diff < 3e-3 + + def test_stable_diffusion_lms(self): + sd_pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4").to(torch_device) + sd_pipe.scheduler = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_inputs(torch_device) + image = sd_pipe(**inputs).images[0] + + expected_image = load_numpy( + "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" + "/stable_diffusion_text2img/stable_diffusion_1_4_lms.npy" + ) + max_diff = np.abs(expected_image - image).max() + assert max_diff < 1e-3 + + def test_stable_diffusion_euler(self): + sd_pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4").to(torch_device) + sd_pipe.scheduler = EulerDiscreteScheduler.from_config(sd_pipe.scheduler.config) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_inputs(torch_device) + image = sd_pipe(**inputs).images[0] + + expected_image = load_numpy( + "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" + "/stable_diffusion_text2img/stable_diffusion_1_4_euler.npy" + ) + max_diff = np.abs(expected_image - image).max() + assert max_diff < 1e-3 + + +# (sayakpaul): This test suite was run in the DGX with two GPUs (1, 2). +@slow +@require_torch_multi_accelerator +@require_accelerate_version_greater("0.27.0") +class StableDiffusionPipelineDeviceMapTests(unittest.TestCase): + def tearDown(self): + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def get_inputs(self, generator_device="cpu", seed=0): + generator = torch.Generator(device=generator_device).manual_seed(seed) + inputs = { + "prompt": "a photograph of an astronaut riding a horse", + "generator": generator, + "num_inference_steps": 50, + "guidance_scale": 7.5, + "output_type": "np", + } + return inputs + + def get_pipeline_output_without_device_map(self): + sd_pipe = StableDiffusionPipeline.from_pretrained( + "stable-diffusion-v1-5/stable-diffusion-v1-5", torch_dtype=torch.float16 + ).to(torch_device) + sd_pipe.set_progress_bar_config(disable=True) + inputs = self.get_inputs() + no_device_map_image = sd_pipe(**inputs).images + + del sd_pipe + + return no_device_map_image + + def test_forward_pass_balanced_device_map(self): + no_device_map_image = self.get_pipeline_output_without_device_map() + + sd_pipe_with_device_map = StableDiffusionPipeline.from_pretrained( + "stable-diffusion-v1-5/stable-diffusion-v1-5", device_map="balanced", torch_dtype=torch.float16 + ) + sd_pipe_with_device_map.set_progress_bar_config(disable=True) + inputs = self.get_inputs() + device_map_image = sd_pipe_with_device_map(**inputs).images + + max_diff = np.abs(device_map_image - no_device_map_image).max() + assert max_diff < 1e-3 + + def test_components_put_in_right_devices(self): + sd_pipe_with_device_map = StableDiffusionPipeline.from_pretrained( + "stable-diffusion-v1-5/stable-diffusion-v1-5", device_map="balanced", torch_dtype=torch.float16 + ) + + assert len(set(sd_pipe_with_device_map.hf_device_map.values())) >= 2 + + def test_max_memory(self): + no_device_map_image = self.get_pipeline_output_without_device_map() + + sd_pipe_with_device_map = StableDiffusionPipeline.from_pretrained( + "stable-diffusion-v1-5/stable-diffusion-v1-5", + device_map="balanced", + max_memory={0: "1GB", 1: "1GB"}, + torch_dtype=torch.float16, + ) + sd_pipe_with_device_map.set_progress_bar_config(disable=True) + inputs = self.get_inputs() + device_map_image = sd_pipe_with_device_map(**inputs).images + + max_diff = np.abs(device_map_image - no_device_map_image).max() + assert max_diff < 1e-3 + + def test_reset_device_map(self): + sd_pipe_with_device_map = StableDiffusionPipeline.from_pretrained( + "stable-diffusion-v1-5/stable-diffusion-v1-5", device_map="balanced", torch_dtype=torch.float16 + ) + sd_pipe_with_device_map.reset_device_map() + + assert sd_pipe_with_device_map.hf_device_map is None + + for name, component in sd_pipe_with_device_map.components.items(): + if isinstance(component, torch.nn.Module): + assert component.device.type == "cpu" + + def test_reset_device_map_to(self): + sd_pipe_with_device_map = StableDiffusionPipeline.from_pretrained( + "stable-diffusion-v1-5/stable-diffusion-v1-5", device_map="balanced", torch_dtype=torch.float16 + ) + sd_pipe_with_device_map.reset_device_map() + + assert sd_pipe_with_device_map.hf_device_map is None + + # Make sure `to()` can be used and the pipeline can be called. + pipe = sd_pipe_with_device_map.to(torch_device) + _ = pipe("hello", num_inference_steps=2) + + def test_reset_device_map_enable_model_cpu_offload(self): + sd_pipe_with_device_map = StableDiffusionPipeline.from_pretrained( + "stable-diffusion-v1-5/stable-diffusion-v1-5", device_map="balanced", torch_dtype=torch.float16 + ) + sd_pipe_with_device_map.reset_device_map() + + assert sd_pipe_with_device_map.hf_device_map is None + + # Make sure `enable_model_cpu_offload()` can be used and the pipeline can be called. + sd_pipe_with_device_map.enable_model_cpu_offload(device=torch_device) + _ = sd_pipe_with_device_map("hello", num_inference_steps=2) + + def test_reset_device_map_enable_sequential_cpu_offload(self): + sd_pipe_with_device_map = StableDiffusionPipeline.from_pretrained( + "stable-diffusion-v1-5/stable-diffusion-v1-5", device_map="balanced", torch_dtype=torch.float16 + ) + sd_pipe_with_device_map.reset_device_map() + + assert sd_pipe_with_device_map.hf_device_map is None + + # Make sure `enable_sequential_cpu_offload()` can be used and the pipeline can be called. + sd_pipe_with_device_map.enable_sequential_cpu_offload(device=torch_device) + _ = sd_pipe_with_device_map("hello", num_inference_steps=2) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/stable_diffusion/test_stable_diffusion_img2img.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/stable_diffusion/test_stable_diffusion_img2img.py new file mode 100644 index 0000000000000000000000000000000000000000..a0b7268b9dd40c0b20b93423af3afb2d18e5b803 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/stable_diffusion/test_stable_diffusion_img2img.py @@ -0,0 +1,714 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import gc +import random +import unittest + +import numpy as np +import torch +from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer + +from diffusers import ( + AutoencoderKL, + AutoencoderTiny, + DDIMScheduler, + DPMSolverMultistepScheduler, + HeunDiscreteScheduler, + LCMScheduler, + LMSDiscreteScheduler, + PNDMScheduler, + StableDiffusionImg2ImgPipeline, + UNet2DConditionModel, +) + +from ...testing_utils import ( + backend_empty_cache, + backend_max_memory_allocated, + backend_reset_max_memory_allocated, + backend_reset_peak_memory_stats, + enable_full_determinism, + floats_tensor, + load_image, + load_numpy, + nightly, + require_torch_accelerator, + skip_mps, + slow, + torch_device, +) +from ..pipeline_params import ( + IMAGE_TO_IMAGE_IMAGE_PARAMS, + TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, + TEXT_GUIDED_IMAGE_VARIATION_PARAMS, + TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS, +) +from ..test_pipelines_common import ( + IPAdapterTesterMixin, + PipelineKarrasSchedulerTesterMixin, + PipelineLatentTesterMixin, + PipelineTesterMixin, +) + + +enable_full_determinism() + + +class StableDiffusionImg2ImgPipelineFastTests( + IPAdapterTesterMixin, + PipelineLatentTesterMixin, + PipelineKarrasSchedulerTesterMixin, + PipelineTesterMixin, + unittest.TestCase, +): + pipeline_class = StableDiffusionImg2ImgPipeline + params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"} + required_optional_params = PipelineTesterMixin.required_optional_params - {"latents"} + batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS + image_params = IMAGE_TO_IMAGE_IMAGE_PARAMS + image_latents_params = IMAGE_TO_IMAGE_IMAGE_PARAMS + callback_cfg_params = TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS + + def get_dummy_components(self, time_cond_proj_dim=None): + torch.manual_seed(0) + unet = UNet2DConditionModel( + block_out_channels=(32, 64), + layers_per_block=2, + time_cond_proj_dim=time_cond_proj_dim, + sample_size=32, + in_channels=4, + out_channels=4, + down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), + up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), + cross_attention_dim=32, + ) + scheduler = PNDMScheduler(skip_prk_steps=True) + torch.manual_seed(0) + vae = AutoencoderKL( + block_out_channels=[32, 64], + in_channels=3, + out_channels=3, + down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], + up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], + latent_channels=4, + ) + torch.manual_seed(0) + text_encoder_config = CLIPTextConfig( + bos_token_id=0, + eos_token_id=2, + hidden_size=32, + intermediate_size=37, + layer_norm_eps=1e-05, + num_attention_heads=4, + num_hidden_layers=5, + pad_token_id=1, + vocab_size=1000, + ) + text_encoder = CLIPTextModel(text_encoder_config) + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + + components = { + "unet": unet, + "scheduler": scheduler, + "vae": vae, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + "safety_checker": None, + "feature_extractor": None, + "image_encoder": None, + } + return components + + def get_dummy_tiny_autoencoder(self): + return AutoencoderTiny(in_channels=3, out_channels=3, latent_channels=4) + + def get_dummy_inputs(self, device, seed=0): + image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) + image = image / 2 + 0.5 + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + inputs = { + "prompt": "A painting of a squirrel eating a burger", + "image": image, + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 6.0, + "output_type": "np", + } + return inputs + + def test_stable_diffusion_img2img_default_case(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + sd_pipe = StableDiffusionImg2ImgPipeline(**components) + sd_pipe = sd_pipe.to(device) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + image = sd_pipe(**inputs).images + image_slice = image[0, -3:, -3:, -1] + + assert image.shape == (1, 32, 32, 3) + expected_slice = np.array([0.4555, 0.3216, 0.4049, 0.4620, 0.4618, 0.4126, 0.4122, 0.4629, 0.4579]) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 + + def test_stable_diffusion_img2img_default_case_lcm(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components(time_cond_proj_dim=256) + sd_pipe = StableDiffusionImg2ImgPipeline(**components) + sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.scheduler.config) + sd_pipe = sd_pipe.to(device) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + image = sd_pipe(**inputs).images + image_slice = image[0, -3:, -3:, -1] + + assert image.shape == (1, 32, 32, 3) + expected_slice = np.array([0.5709, 0.4614, 0.4587, 0.5978, 0.5298, 0.6910, 0.6240, 0.5212, 0.5454]) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 + + def test_stable_diffusion_img2img_default_case_lcm_custom_timesteps(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components(time_cond_proj_dim=256) + sd_pipe = StableDiffusionImg2ImgPipeline(**components) + sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.scheduler.config) + sd_pipe = sd_pipe.to(device) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + del inputs["num_inference_steps"] + inputs["timesteps"] = [999, 499] + image = sd_pipe(**inputs).images + image_slice = image[0, -3:, -3:, -1] + + assert image.shape == (1, 32, 32, 3) + expected_slice = np.array([0.5709, 0.4614, 0.4587, 0.5978, 0.5298, 0.6910, 0.6240, 0.5212, 0.5454]) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 + + def test_stable_diffusion_img2img_negative_prompt(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + sd_pipe = StableDiffusionImg2ImgPipeline(**components) + sd_pipe = sd_pipe.to(device) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + negative_prompt = "french fries" + output = sd_pipe(**inputs, negative_prompt=negative_prompt) + image = output.images + image_slice = image[0, -3:, -3:, -1] + + assert image.shape == (1, 32, 32, 3) + expected_slice = np.array([0.4593, 0.3408, 0.4232, 0.4749, 0.4476, 0.4115, 0.4357, 0.4733, 0.4663]) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 + + def test_ip_adapter(self): + expected_pipe_slice = None + if torch_device == "cpu": + expected_pipe_slice = np.array([0.4932, 0.5092, 0.5135, 0.5517, 0.5626, 0.6621, 0.6490, 0.5021, 0.5441]) + return super().test_ip_adapter(expected_pipe_slice=expected_pipe_slice) + + def test_stable_diffusion_img2img_multiple_init_images(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + sd_pipe = StableDiffusionImg2ImgPipeline(**components) + sd_pipe = sd_pipe.to(device) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + inputs["prompt"] = [inputs["prompt"]] * 2 + inputs["image"] = inputs["image"].repeat(2, 1, 1, 1) + image = sd_pipe(**inputs).images + image_slice = image[-1, -3:, -3:, -1] + + assert image.shape == (2, 32, 32, 3) + expected_slice = np.array([0.4241, 0.5576, 0.5711, 0.4792, 0.4311, 0.5952, 0.5827, 0.5138, 0.5109]) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 + + def test_stable_diffusion_img2img_k_lms(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + components["scheduler"] = LMSDiscreteScheduler( + beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear" + ) + sd_pipe = StableDiffusionImg2ImgPipeline(**components) + sd_pipe = sd_pipe.to(device) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + image = sd_pipe(**inputs).images + image_slice = image[0, -3:, -3:, -1] + + assert image.shape == (1, 32, 32, 3) + expected_slice = np.array([0.4398, 0.4949, 0.4337, 0.6580, 0.5555, 0.4338, 0.5769, 0.5955, 0.5175]) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 + + def test_stable_diffusion_img2img_tiny_autoencoder(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + sd_pipe = StableDiffusionImg2ImgPipeline(**components) + sd_pipe.vae = self.get_dummy_tiny_autoencoder() + sd_pipe = sd_pipe.to(device) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + image = sd_pipe(**inputs).images + image_slice = image[0, -3:, -3:, -1] + + assert image.shape == (1, 32, 32, 3) + expected_slice = np.array([0.00669, 0.00669, 0.0, 0.00693, 0.00858, 0.0, 0.00567, 0.00515, 0.00125]) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 + + @skip_mps + def test_save_load_local(self): + return super().test_save_load_local() + + @skip_mps + def test_dict_tuple_outputs_equivalent(self): + return super().test_dict_tuple_outputs_equivalent() + + @skip_mps + def test_save_load_optional_components(self): + return super().test_save_load_optional_components() + + @skip_mps + def test_attention_slicing_forward_pass(self): + return super().test_attention_slicing_forward_pass(expected_max_diff=5e-3) + + def test_inference_batch_single_identical(self): + super().test_inference_batch_single_identical(expected_max_diff=3e-3) + + def test_float16_inference(self): + super().test_float16_inference(expected_max_diff=5e-1) + + def test_pipeline_interrupt(self): + components = self.get_dummy_components() + sd_pipe = StableDiffusionImg2ImgPipeline(**components) + sd_pipe = sd_pipe.to(torch_device) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(torch_device) + + prompt = "hey" + num_inference_steps = 3 + + # store intermediate latents from the generation process + class PipelineState: + def __init__(self): + self.state = [] + + def apply(self, pipe, i, t, callback_kwargs): + self.state.append(callback_kwargs["latents"]) + return callback_kwargs + + pipe_state = PipelineState() + sd_pipe( + prompt, + image=inputs["image"], + num_inference_steps=num_inference_steps, + output_type="np", + generator=torch.Generator("cpu").manual_seed(0), + callback_on_step_end=pipe_state.apply, + ).images + + # interrupt generation at step index + interrupt_step_idx = 1 + + def callback_on_step_end(pipe, i, t, callback_kwargs): + if i == interrupt_step_idx: + pipe._interrupt = True + + return callback_kwargs + + output_interrupted = sd_pipe( + prompt, + image=inputs["image"], + num_inference_steps=num_inference_steps, + output_type="latent", + generator=torch.Generator("cpu").manual_seed(0), + callback_on_step_end=callback_on_step_end, + ).images + + # fetch intermediate latents at the interrupted step + # from the completed generation process + intermediate_latent = pipe_state.state[interrupt_step_idx] + + # compare the intermediate latent to the output of the interrupted process + # they should be the same + assert torch.allclose(intermediate_latent, output_interrupted, atol=1e-4) + + def test_encode_prompt_works_in_isolation(self): + extra_required_param_value_dict = { + "device": torch.device(torch_device).type, + "do_classifier_free_guidance": self.get_dummy_inputs(device=torch_device).get("guidance_scale", 1.0) > 1.0, + } + return super().test_encode_prompt_works_in_isolation(extra_required_param_value_dict) + + +@slow +@require_torch_accelerator +class StableDiffusionImg2ImgPipelineSlowTests(unittest.TestCase): + def setUp(self): + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): + generator = torch.Generator(device=generator_device).manual_seed(seed) + init_image = load_image( + "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" + "/stable_diffusion_img2img/sketch-mountains-input.png" + ) + inputs = { + "prompt": "a fantasy landscape, concept art, high resolution", + "image": init_image, + "generator": generator, + "num_inference_steps": 3, + "strength": 0.75, + "guidance_scale": 7.5, + "output_type": "np", + } + return inputs + + def test_stable_diffusion_img2img_default(self): + pipe = StableDiffusionImg2ImgPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", safety_checker=None) + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + pipe.enable_attention_slicing() + + inputs = self.get_inputs(torch_device) + image = pipe(**inputs).images + image_slice = image[0, -3:, -3:, -1].flatten() + + assert image.shape == (1, 512, 768, 3) + expected_slice = np.array([0.4300, 0.4662, 0.4930, 0.3990, 0.4307, 0.4525, 0.3719, 0.4064, 0.3923]) + + assert np.abs(expected_slice - image_slice).max() < 1e-3 + + def test_stable_diffusion_img2img_k_lms(self): + pipe = StableDiffusionImg2ImgPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", safety_checker=None) + pipe.scheduler = LMSDiscreteScheduler.from_config(pipe.scheduler.config) + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + pipe.enable_attention_slicing() + + inputs = self.get_inputs(torch_device) + image = pipe(**inputs).images + image_slice = image[0, -3:, -3:, -1].flatten() + + assert image.shape == (1, 512, 768, 3) + expected_slice = np.array([0.0389, 0.0346, 0.0415, 0.0290, 0.0218, 0.0210, 0.0408, 0.0567, 0.0271]) + + assert np.abs(expected_slice - image_slice).max() < 1e-3 + + def test_stable_diffusion_img2img_ddim(self): + pipe = StableDiffusionImg2ImgPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", safety_checker=None) + pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config) + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + pipe.enable_attention_slicing() + + inputs = self.get_inputs(torch_device) + image = pipe(**inputs).images + image_slice = image[0, -3:, -3:, -1].flatten() + + assert image.shape == (1, 512, 768, 3) + expected_slice = np.array([0.0593, 0.0607, 0.0851, 0.0582, 0.0636, 0.0721, 0.0751, 0.0981, 0.0781]) + + assert np.abs(expected_slice - image_slice).max() < 1e-3 + + def test_stable_diffusion_img2img_intermediate_state(self): + number_of_steps = 0 + + def callback_fn(step: int, timestep: int, latents: torch.Tensor) -> None: + callback_fn.has_been_called = True + nonlocal number_of_steps + number_of_steps += 1 + if step == 1: + latents = latents.detach().cpu().numpy() + assert latents.shape == (1, 4, 64, 96) + latents_slice = latents[0, -3:, -3:, -1] + expected_slice = np.array([-0.4958, 0.5107, 1.1045, 2.7539, 4.6680, 3.8320, 1.5049, 1.8633, 2.6523]) + + assert np.abs(latents_slice.flatten() - expected_slice).max() < 5e-2 + elif step == 2: + latents = latents.detach().cpu().numpy() + assert latents.shape == (1, 4, 64, 96) + latents_slice = latents[0, -3:, -3:, -1] + expected_slice = np.array([-0.4956, 0.5078, 1.0918, 2.7520, 4.6484, 3.8125, 1.5146, 1.8633, 2.6367]) + + assert np.abs(latents_slice.flatten() - expected_slice).max() < 5e-2 + + callback_fn.has_been_called = False + + pipe = StableDiffusionImg2ImgPipeline.from_pretrained( + "CompVis/stable-diffusion-v1-4", safety_checker=None, torch_dtype=torch.float16 + ) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + pipe.enable_attention_slicing() + + inputs = self.get_inputs(torch_device, dtype=torch.float16) + pipe(**inputs, callback=callback_fn, callback_steps=1) + assert callback_fn.has_been_called + assert number_of_steps == 2 + + def test_stable_diffusion_pipeline_with_sequential_cpu_offloading(self): + backend_empty_cache(torch_device) + backend_reset_max_memory_allocated(torch_device) + backend_reset_peak_memory_stats(torch_device) + + pipe = StableDiffusionImg2ImgPipeline.from_pretrained( + "CompVis/stable-diffusion-v1-4", safety_checker=None, torch_dtype=torch.float16 + ) + pipe.set_progress_bar_config(disable=None) + pipe.enable_attention_slicing(1) + pipe.enable_sequential_cpu_offload(device=torch_device) + + inputs = self.get_inputs(torch_device, dtype=torch.float16) + _ = pipe(**inputs) + + mem_bytes = backend_max_memory_allocated(torch_device) + # make sure that less than 2.2 GB is allocated + assert mem_bytes < 2.2 * 10**9 + + def test_stable_diffusion_pipeline_with_model_offloading(self): + backend_empty_cache(torch_device) + backend_reset_max_memory_allocated(torch_device) + backend_reset_peak_memory_stats(torch_device) + + inputs = self.get_inputs(torch_device, dtype=torch.float16) + + # Normal inference + + pipe = StableDiffusionImg2ImgPipeline.from_pretrained( + "CompVis/stable-diffusion-v1-4", + safety_checker=None, + torch_dtype=torch.float16, + ) + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + pipe(**inputs) + mem_bytes = backend_max_memory_allocated(torch_device) + + # With model offloading + + # Reload but don't move to cuda + pipe = StableDiffusionImg2ImgPipeline.from_pretrained( + "CompVis/stable-diffusion-v1-4", + safety_checker=None, + torch_dtype=torch.float16, + ) + + backend_empty_cache(torch_device) + backend_reset_max_memory_allocated(torch_device) + backend_reset_peak_memory_stats(torch_device) + + pipe.enable_model_cpu_offload(device=torch_device) + pipe.set_progress_bar_config(disable=None) + _ = pipe(**inputs) + mem_bytes_offloaded = backend_max_memory_allocated(torch_device) + + assert mem_bytes_offloaded < mem_bytes + for module in pipe.text_encoder, pipe.unet, pipe.vae: + assert module.device == torch.device("cpu") + + def test_img2img_2nd_order(self): + sd_pipe = StableDiffusionImg2ImgPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5") + sd_pipe.scheduler = HeunDiscreteScheduler.from_config(sd_pipe.scheduler.config) + sd_pipe.to(torch_device) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_inputs(torch_device) + inputs["num_inference_steps"] = 10 + inputs["strength"] = 0.75 + image = sd_pipe(**inputs).images[0] + + expected_image = load_numpy( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/img2img_heun.npy" + ) + max_diff = np.abs(expected_image - image).max() + assert max_diff < 5e-2 + + inputs = self.get_inputs(torch_device) + inputs["num_inference_steps"] = 11 + inputs["strength"] = 0.75 + image_other = sd_pipe(**inputs).images[0] + + mean_diff = np.abs(image - image_other).mean() + + # images should be very similar + assert mean_diff < 5e-2 + + def test_stable_diffusion_img2img_pipeline_multiple_of_8(self): + init_image = load_image( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" + "/img2img/sketch-mountains-input.jpg" + ) + # resize to resolution that is divisible by 8 but not 16 or 32 + init_image = init_image.resize((760, 504)) + + model_id = "CompVis/stable-diffusion-v1-4" + pipe = StableDiffusionImg2ImgPipeline.from_pretrained( + model_id, + safety_checker=None, + ) + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + pipe.enable_attention_slicing() + + prompt = "A fantasy landscape, trending on artstation" + + generator = torch.manual_seed(0) + output = pipe( + prompt=prompt, + image=init_image, + strength=0.75, + guidance_scale=7.5, + generator=generator, + output_type="np", + ) + image = output.images[0] + + image_slice = image[255:258, 383:386, -1] + + assert image.shape == (504, 760, 3) + expected_slice = np.array([0.9393, 0.9500, 0.9399, 0.9438, 0.9458, 0.9400, 0.9455, 0.9414, 0.9423]) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-3 + + def test_img2img_safety_checker_works(self): + sd_pipe = StableDiffusionImg2ImgPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5") + sd_pipe.to(torch_device) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_inputs(torch_device) + inputs["num_inference_steps"] = 20 + # make sure the safety checker is activated + inputs["prompt"] = "naked, sex, porn" + out = sd_pipe(**inputs) + + assert out.nsfw_content_detected[0], f"Safety checker should work for prompt: {inputs['prompt']}" + assert np.abs(out.images[0]).sum() < 1e-5 # should be all zeros + + +@nightly +@require_torch_accelerator +class StableDiffusionImg2ImgPipelineNightlyTests(unittest.TestCase): + def setUp(self): + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): + generator = torch.Generator(device=generator_device).manual_seed(seed) + init_image = load_image( + "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" + "/stable_diffusion_img2img/sketch-mountains-input.png" + ) + inputs = { + "prompt": "a fantasy landscape, concept art, high resolution", + "image": init_image, + "generator": generator, + "num_inference_steps": 50, + "strength": 0.75, + "guidance_scale": 7.5, + "output_type": "np", + } + return inputs + + def test_img2img_pndm(self): + sd_pipe = StableDiffusionImg2ImgPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5") + sd_pipe.to(torch_device) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_inputs(torch_device) + image = sd_pipe(**inputs).images[0] + + expected_image = load_numpy( + "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" + "/stable_diffusion_img2img/stable_diffusion_1_5_pndm.npy" + ) + max_diff = np.abs(expected_image - image).max() + assert max_diff < 1e-3 + + def test_img2img_ddim(self): + sd_pipe = StableDiffusionImg2ImgPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5") + sd_pipe.scheduler = DDIMScheduler.from_config(sd_pipe.scheduler.config) + sd_pipe.to(torch_device) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_inputs(torch_device) + image = sd_pipe(**inputs).images[0] + + expected_image = load_numpy( + "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" + "/stable_diffusion_img2img/stable_diffusion_1_5_ddim.npy" + ) + max_diff = np.abs(expected_image - image).max() + assert max_diff < 1e-3 + + def test_img2img_lms(self): + sd_pipe = StableDiffusionImg2ImgPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5") + sd_pipe.scheduler = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config) + sd_pipe.to(torch_device) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_inputs(torch_device) + image = sd_pipe(**inputs).images[0] + + expected_image = load_numpy( + "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" + "/stable_diffusion_img2img/stable_diffusion_1_5_lms.npy" + ) + max_diff = np.abs(expected_image - image).max() + assert max_diff < 1e-3 + + def test_img2img_dpm(self): + sd_pipe = StableDiffusionImg2ImgPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5") + sd_pipe.scheduler = DPMSolverMultistepScheduler.from_config(sd_pipe.scheduler.config) + sd_pipe.to(torch_device) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_inputs(torch_device) + inputs["num_inference_steps"] = 30 + image = sd_pipe(**inputs).images[0] + + expected_image = load_numpy( + "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" + "/stable_diffusion_img2img/stable_diffusion_1_5_dpm.npy" + ) + max_diff = np.abs(expected_image - image).max() + assert max_diff < 1e-3 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/stable_diffusion/test_stable_diffusion_inpaint.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/stable_diffusion/test_stable_diffusion_inpaint.py new file mode 100644 index 0000000000000000000000000000000000000000..259806a9479c721a0856edaa029b7c2c496bfd90 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/stable_diffusion/test_stable_diffusion_inpaint.py @@ -0,0 +1,1098 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import gc +import random +import unittest + +import numpy as np +import torch +from huggingface_hub import hf_hub_download +from PIL import Image +from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer + +from diffusers import ( + AsymmetricAutoencoderKL, + AutoencoderKL, + DDIMScheduler, + DPMSolverMultistepScheduler, + EulerAncestralDiscreteScheduler, + LCMScheduler, + LMSDiscreteScheduler, + PNDMScheduler, + StableDiffusionInpaintPipeline, + UNet2DConditionModel, +) + +from ...testing_utils import ( + Expectations, + backend_empty_cache, + backend_max_memory_allocated, + backend_reset_max_memory_allocated, + backend_reset_peak_memory_stats, + enable_full_determinism, + floats_tensor, + load_image, + load_numpy, + nightly, + require_torch_accelerator, + slow, + torch_device, +) +from ..pipeline_params import ( + TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, + TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, + TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS, +) +from ..test_pipelines_common import ( + IPAdapterTesterMixin, + PipelineKarrasSchedulerTesterMixin, + PipelineLatentTesterMixin, + PipelineTesterMixin, +) + + +enable_full_determinism() + + +class StableDiffusionInpaintPipelineFastTests( + IPAdapterTesterMixin, + PipelineLatentTesterMixin, + PipelineKarrasSchedulerTesterMixin, + PipelineTesterMixin, + unittest.TestCase, +): + pipeline_class = StableDiffusionInpaintPipeline + params = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS + batch_params = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS + image_params = frozenset([]) + # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess + image_latents_params = frozenset([]) + callback_cfg_params = TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS.union({"mask", "masked_image_latents"}) + + def get_dummy_components(self, time_cond_proj_dim=None): + torch.manual_seed(0) + unet = UNet2DConditionModel( + block_out_channels=(32, 64), + time_cond_proj_dim=time_cond_proj_dim, + layers_per_block=2, + sample_size=32, + in_channels=9, + out_channels=4, + down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), + up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), + cross_attention_dim=32, + ) + scheduler = PNDMScheduler(skip_prk_steps=True) + torch.manual_seed(0) + vae = AutoencoderKL( + block_out_channels=[32, 64], + in_channels=3, + out_channels=3, + down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], + up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], + latent_channels=4, + ) + torch.manual_seed(0) + text_encoder_config = CLIPTextConfig( + bos_token_id=0, + eos_token_id=2, + hidden_size=32, + intermediate_size=37, + layer_norm_eps=1e-05, + num_attention_heads=4, + num_hidden_layers=5, + pad_token_id=1, + vocab_size=1000, + ) + text_encoder = CLIPTextModel(text_encoder_config) + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + + components = { + "unet": unet, + "scheduler": scheduler, + "vae": vae, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + "safety_checker": None, + "feature_extractor": None, + "image_encoder": None, + } + return components + + def get_dummy_inputs(self, device, seed=0, img_res=64, output_pil=True): + # TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched + if output_pil: + # Get random floats in [0, 1] as image + image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) + image = image.cpu().permute(0, 2, 3, 1)[0] + mask_image = torch.ones_like(image) + # Convert image and mask_image to [0, 255] + image = 255 * image + mask_image = 255 * mask_image + # Convert to PIL image + init_image = Image.fromarray(np.uint8(image)).convert("RGB").resize((img_res, img_res)) + mask_image = Image.fromarray(np.uint8(mask_image)).convert("RGB").resize((img_res, img_res)) + else: + # Get random floats in [0, 1] as image with spatial size (img_res, img_res) + image = floats_tensor((1, 3, img_res, img_res), rng=random.Random(seed)).to(device) + # Convert image to [-1, 1] + init_image = 2.0 * image - 1.0 + mask_image = torch.ones((1, 1, img_res, img_res), device=device) + + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + + inputs = { + "prompt": "A painting of a squirrel eating a burger", + "image": init_image, + "mask_image": mask_image, + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 6.0, + "output_type": "np", + } + return inputs + + def test_stable_diffusion_inpaint(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + sd_pipe = StableDiffusionInpaintPipeline(**components) + sd_pipe = sd_pipe.to(device) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + image = sd_pipe(**inputs).images + image_slice = image[0, -3:, -3:, -1] + + assert image.shape == (1, 64, 64, 3) + expected_slice = np.array([0.4703, 0.5697, 0.3879, 0.5470, 0.6042, 0.4413, 0.5078, 0.4728, 0.4469]) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 + + def test_stable_diffusion_inpaint_lcm(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components(time_cond_proj_dim=256) + sd_pipe = StableDiffusionInpaintPipeline(**components) + sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.scheduler.config) + sd_pipe = sd_pipe.to(device) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + image = sd_pipe(**inputs).images + image_slice = image[0, -3:, -3:, -1] + + assert image.shape == (1, 64, 64, 3) + expected_slice = np.array([0.4931, 0.5988, 0.4569, 0.5556, 0.6650, 0.5087, 0.5966, 0.5358, 0.5269]) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 + + def test_stable_diffusion_inpaint_lcm_custom_timesteps(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components(time_cond_proj_dim=256) + sd_pipe = StableDiffusionInpaintPipeline(**components) + sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.scheduler.config) + sd_pipe = sd_pipe.to(device) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + del inputs["num_inference_steps"] + inputs["timesteps"] = [999, 499] + image = sd_pipe(**inputs).images + image_slice = image[0, -3:, -3:, -1] + + assert image.shape == (1, 64, 64, 3) + expected_slice = np.array([0.4931, 0.5988, 0.4569, 0.5556, 0.6650, 0.5087, 0.5966, 0.5358, 0.5269]) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 + + def test_stable_diffusion_inpaint_image_tensor(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + sd_pipe = StableDiffusionInpaintPipeline(**components) + sd_pipe = sd_pipe.to(device) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + output = sd_pipe(**inputs) + out_pil = output.images + + inputs = self.get_dummy_inputs(device) + inputs["image"] = torch.tensor(np.array(inputs["image"]) / 127.5 - 1).permute(2, 0, 1).unsqueeze(0) + inputs["mask_image"] = torch.tensor(np.array(inputs["mask_image"]) / 255).permute(2, 0, 1)[:1].unsqueeze(0) + output = sd_pipe(**inputs) + out_tensor = output.images + + assert out_pil.shape == (1, 64, 64, 3) + assert np.abs(out_pil.flatten() - out_tensor.flatten()).max() < 5e-2 + + def test_inference_batch_single_identical(self): + super().test_inference_batch_single_identical(expected_max_diff=3e-3) + + def test_stable_diffusion_inpaint_strength_zero_test(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + sd_pipe = StableDiffusionInpaintPipeline(**components) + sd_pipe = sd_pipe.to(device) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + + # check that the pipeline raises value error when num_inference_steps is < 1 + inputs["strength"] = 0.01 + with self.assertRaises(ValueError): + sd_pipe(**inputs).images + + def test_stable_diffusion_inpaint_mask_latents(self): + device = "cpu" + components = self.get_dummy_components() + sd_pipe = self.pipeline_class(**components).to(device) + sd_pipe.set_progress_bar_config(disable=None) + + # normal mask + normal image + ## `image`: pil, `mask_image``: pil, `masked_image_latents``: None + inputs = self.get_dummy_inputs(device) + inputs["strength"] = 0.9 + out_0 = sd_pipe(**inputs).images + + # image latents + mask latents + inputs = self.get_dummy_inputs(device) + image = sd_pipe.image_processor.preprocess(inputs["image"]).to(sd_pipe.device) + mask = sd_pipe.mask_processor.preprocess(inputs["mask_image"]).to(sd_pipe.device) + masked_image = image * (mask < 0.5) + + generator = torch.Generator(device=device).manual_seed(0) + image_latents = ( + sd_pipe.vae.encode(image).latent_dist.sample(generator=generator) * sd_pipe.vae.config.scaling_factor + ) + torch.randn((1, 4, 32, 32), generator=generator) + mask_latents = ( + sd_pipe.vae.encode(masked_image).latent_dist.sample(generator=generator) + * sd_pipe.vae.config.scaling_factor + ) + inputs["image"] = image_latents + inputs["masked_image_latents"] = mask_latents + inputs["mask_image"] = mask + inputs["strength"] = 0.9 + generator = torch.Generator(device=device).manual_seed(0) + torch.randn((1, 4, 32, 32), generator=generator) + inputs["generator"] = generator + out_1 = sd_pipe(**inputs).images + assert np.abs(out_0 - out_1).max() < 1e-2 + + def test_pipeline_interrupt(self): + components = self.get_dummy_components() + sd_pipe = StableDiffusionInpaintPipeline(**components) + sd_pipe = sd_pipe.to(torch_device) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(torch_device) + + prompt = "hey" + num_inference_steps = 3 + + # store intermediate latents from the generation process + class PipelineState: + def __init__(self): + self.state = [] + + def apply(self, pipe, i, t, callback_kwargs): + self.state.append(callback_kwargs["latents"]) + return callback_kwargs + + pipe_state = PipelineState() + sd_pipe( + prompt, + image=inputs["image"], + mask_image=inputs["mask_image"], + num_inference_steps=num_inference_steps, + output_type="np", + generator=torch.Generator("cpu").manual_seed(0), + callback_on_step_end=pipe_state.apply, + ).images + + # interrupt generation at step index + interrupt_step_idx = 1 + + def callback_on_step_end(pipe, i, t, callback_kwargs): + if i == interrupt_step_idx: + pipe._interrupt = True + + return callback_kwargs + + output_interrupted = sd_pipe( + prompt, + image=inputs["image"], + mask_image=inputs["mask_image"], + num_inference_steps=num_inference_steps, + output_type="latent", + generator=torch.Generator("cpu").manual_seed(0), + callback_on_step_end=callback_on_step_end, + ).images + + # fetch intermediate latents at the interrupted step + # from the completed generation process + intermediate_latent = pipe_state.state[interrupt_step_idx] + + # compare the intermediate latent to the output of the interrupted process + # they should be the same + assert torch.allclose(intermediate_latent, output_interrupted, atol=1e-4) + + def test_ip_adapter(self, from_simple=False, expected_pipe_slice=None): + if not from_simple: + expected_pipe_slice = None + if torch_device == "cpu": + expected_pipe_slice = np.array( + [0.4390, 0.5452, 0.3772, 0.5448, 0.6031, 0.4480, 0.5194, 0.4687, 0.4640] + ) + return super().test_ip_adapter(expected_pipe_slice=expected_pipe_slice) + + def test_encode_prompt_works_in_isolation(self): + extra_required_param_value_dict = { + "device": torch.device(torch_device).type, + "do_classifier_free_guidance": self.get_dummy_inputs(device=torch_device).get("guidance_scale", 1.0) > 1.0, + } + return super().test_encode_prompt_works_in_isolation(extra_required_param_value_dict, atol=1e-3, rtol=1e-3) + + +class StableDiffusionSimpleInpaintPipelineFastTests(StableDiffusionInpaintPipelineFastTests): + pipeline_class = StableDiffusionInpaintPipeline + params = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS + batch_params = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS + image_params = frozenset([]) + # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess + + def get_dummy_components(self, time_cond_proj_dim=None): + torch.manual_seed(0) + unet = UNet2DConditionModel( + block_out_channels=(32, 64), + layers_per_block=2, + time_cond_proj_dim=time_cond_proj_dim, + sample_size=32, + in_channels=4, + out_channels=4, + down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), + up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), + cross_attention_dim=32, + ) + scheduler = PNDMScheduler(skip_prk_steps=True) + torch.manual_seed(0) + vae = AutoencoderKL( + block_out_channels=[32, 64], + in_channels=3, + out_channels=3, + down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], + up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], + latent_channels=4, + ) + torch.manual_seed(0) + text_encoder_config = CLIPTextConfig( + bos_token_id=0, + eos_token_id=2, + hidden_size=32, + intermediate_size=37, + layer_norm_eps=1e-05, + num_attention_heads=4, + num_hidden_layers=5, + pad_token_id=1, + vocab_size=1000, + ) + text_encoder = CLIPTextModel(text_encoder_config) + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + + components = { + "unet": unet, + "scheduler": scheduler, + "vae": vae, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + "safety_checker": None, + "feature_extractor": None, + "image_encoder": None, + } + return components + + def get_dummy_inputs_2images(self, device, seed=0, img_res=64): + # Get random floats in [0, 1] as image with spatial size (img_res, img_res) + image1 = floats_tensor((1, 3, img_res, img_res), rng=random.Random(seed)).to(device) + image2 = floats_tensor((1, 3, img_res, img_res), rng=random.Random(seed + 22)).to(device) + # Convert images to [-1, 1] + init_image1 = 2.0 * image1 - 1.0 + init_image2 = 2.0 * image2 - 1.0 + + # empty mask + mask_image = torch.zeros((1, 1, img_res, img_res), device=device) + + if str(device).startswith("mps"): + generator1 = torch.manual_seed(seed) + generator2 = torch.manual_seed(seed) + else: + generator1 = torch.Generator(device=device).manual_seed(seed) + generator2 = torch.Generator(device=device).manual_seed(seed) + + inputs = { + "prompt": ["A painting of a squirrel eating a burger"] * 2, + "image": [init_image1, init_image2], + "mask_image": [mask_image] * 2, + "generator": [generator1, generator2], + "num_inference_steps": 2, + "guidance_scale": 6.0, + "output_type": "np", + } + return inputs + + def test_ip_adapter(self): + expected_pipe_slice = None + if torch_device == "cpu": + expected_pipe_slice = np.array([0.6345, 0.5395, 0.5611, 0.5403, 0.5830, 0.5855, 0.5193, 0.5443, 0.5211]) + return super().test_ip_adapter(from_simple=True, expected_pipe_slice=expected_pipe_slice) + + def test_stable_diffusion_inpaint(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + sd_pipe = StableDiffusionInpaintPipeline(**components) + sd_pipe = sd_pipe.to(device) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + image = sd_pipe(**inputs).images + image_slice = image[0, -3:, -3:, -1] + + assert image.shape == (1, 64, 64, 3) + expected_slice = np.array([0.6584, 0.5424, 0.5649, 0.5449, 0.5897, 0.6111, 0.5404, 0.5463, 0.5214]) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 + + def test_stable_diffusion_inpaint_lcm(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components(time_cond_proj_dim=256) + sd_pipe = StableDiffusionInpaintPipeline(**components) + sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.scheduler.config) + sd_pipe = sd_pipe.to(device) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + image = sd_pipe(**inputs).images + image_slice = image[0, -3:, -3:, -1] + + assert image.shape == (1, 64, 64, 3) + expected_slice = np.array([0.6240, 0.5355, 0.5649, 0.5378, 0.5374, 0.6242, 0.5132, 0.5347, 0.5396]) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 + + def test_stable_diffusion_inpaint_lcm_custom_timesteps(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components(time_cond_proj_dim=256) + sd_pipe = StableDiffusionInpaintPipeline(**components) + sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.scheduler.config) + sd_pipe = sd_pipe.to(device) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + del inputs["num_inference_steps"] + inputs["timesteps"] = [999, 499] + image = sd_pipe(**inputs).images + image_slice = image[0, -3:, -3:, -1] + + assert image.shape == (1, 64, 64, 3) + expected_slice = np.array([0.6240, 0.5355, 0.5649, 0.5378, 0.5374, 0.6242, 0.5132, 0.5347, 0.5396]) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 + + def test_stable_diffusion_inpaint_2_images(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + sd_pipe = self.pipeline_class(**components) + sd_pipe = sd_pipe.to(device) + sd_pipe.set_progress_bar_config(disable=None) + + # test to confirm if we pass two same image, we will get same output + inputs = self.get_dummy_inputs(device) + gen1 = torch.Generator(device=device).manual_seed(0) + gen2 = torch.Generator(device=device).manual_seed(0) + for name in ["prompt", "image", "mask_image"]: + inputs[name] = [inputs[name]] * 2 + inputs["generator"] = [gen1, gen2] + images = sd_pipe(**inputs).images + + assert images.shape == (2, 64, 64, 3) + + image_slice1 = images[0, -3:, -3:, -1] + image_slice2 = images[1, -3:, -3:, -1] + assert np.abs(image_slice1.flatten() - image_slice2.flatten()).max() < 1e-4 + + # test to confirm that if we pass two different images, we will get different output + inputs = self.get_dummy_inputs_2images(device) + images = sd_pipe(**inputs).images + assert images.shape == (2, 64, 64, 3) + + image_slice1 = images[0, -3:, -3:, -1] + image_slice2 = images[1, -3:, -3:, -1] + assert np.abs(image_slice1.flatten() - image_slice2.flatten()).max() > 1e-2 + + def test_stable_diffusion_inpaint_euler(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components(time_cond_proj_dim=256) + sd_pipe = StableDiffusionInpaintPipeline(**components) + sd_pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(sd_pipe.scheduler.config) + sd_pipe = sd_pipe.to(device) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device, output_pil=False) + half_dim = inputs["image"].shape[2] // 2 + inputs["mask_image"][0, 0, :half_dim, :half_dim] = 0 + + inputs["num_inference_steps"] = 4 + image = sd_pipe(**inputs).images + image_slice = image[0, -3:, -3:, -1] + + assert image.shape == (1, 64, 64, 3) + + expected_slice = np.array( + [[0.6387283, 0.5564158, 0.58631873, 0.5539942, 0.5494673, 0.6461868, 0.5251618, 0.5497595, 0.5508756]] + ) + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-4 + + +@slow +@require_torch_accelerator +class StableDiffusionInpaintPipelineSlowTests(unittest.TestCase): + def setUp(self): + super().setUp() + + def tearDown(self): + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): + generator = torch.Generator(device=generator_device).manual_seed(seed) + init_image = load_image( + "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" + "/stable_diffusion_inpaint/input_bench_image.png" + ) + mask_image = load_image( + "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" + "/stable_diffusion_inpaint/input_bench_mask.png" + ) + inputs = { + "prompt": "Face of a yellow cat, high resolution, sitting on a park bench", + "image": init_image, + "mask_image": mask_image, + "generator": generator, + "num_inference_steps": 3, + "guidance_scale": 7.5, + "output_type": "np", + } + return inputs + + def test_stable_diffusion_inpaint_ddim(self): + pipe = StableDiffusionInpaintPipeline.from_pretrained( + "botp/stable-diffusion-v1-5-inpainting", safety_checker=None + ) + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + pipe.enable_attention_slicing() + + inputs = self.get_inputs(torch_device) + image = pipe(**inputs).images + image_slice = image[0, 253:256, 253:256, -1].flatten() + + assert image.shape == (1, 512, 512, 3) + expected_slice = np.array([0.0427, 0.0460, 0.0483, 0.0460, 0.0584, 0.0521, 0.1549, 0.1695, 0.1794]) + + assert np.abs(expected_slice - image_slice).max() < 6e-4 + + def test_stable_diffusion_inpaint_fp16(self): + pipe = StableDiffusionInpaintPipeline.from_pretrained( + "botp/stable-diffusion-v1-5-inpainting", torch_dtype=torch.float16, safety_checker=None + ) + pipe.unet.set_default_attn_processor() + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + pipe.enable_attention_slicing() + + inputs = self.get_inputs(torch_device, dtype=torch.float16) + image = pipe(**inputs).images + image_slice = image[0, 253:256, 253:256, -1].flatten() + + assert image.shape == (1, 512, 512, 3) + expected_slice = np.array([0.1509, 0.1245, 0.1672, 0.1655, 0.1519, 0.1226, 0.1462, 0.1567, 0.2451]) + assert np.abs(expected_slice - image_slice).max() < 1e-1 + + def test_stable_diffusion_inpaint_pndm(self): + pipe = StableDiffusionInpaintPipeline.from_pretrained( + "botp/stable-diffusion-v1-5-inpainting", safety_checker=None + ) + pipe.scheduler = PNDMScheduler.from_config(pipe.scheduler.config) + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + pipe.enable_attention_slicing() + + inputs = self.get_inputs(torch_device) + image = pipe(**inputs).images + image_slice = image[0, 253:256, 253:256, -1].flatten() + + assert image.shape == (1, 512, 512, 3) + expected_slice = np.array([0.0425, 0.0273, 0.0344, 0.1694, 0.1727, 0.1812, 0.3256, 0.3311, 0.3272]) + + assert np.abs(expected_slice - image_slice).max() < 5e-3 + + def test_stable_diffusion_inpaint_k_lms(self): + pipe = StableDiffusionInpaintPipeline.from_pretrained( + "botp/stable-diffusion-v1-5-inpainting", safety_checker=None + ) + pipe.scheduler = LMSDiscreteScheduler.from_config(pipe.scheduler.config) + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + pipe.enable_attention_slicing() + + inputs = self.get_inputs(torch_device) + image = pipe(**inputs).images + image_slice = image[0, 253:256, 253:256, -1].flatten() + + assert image.shape == (1, 512, 512, 3) + expected_slice = np.array([0.9314, 0.7575, 0.9432, 0.8885, 0.9028, 0.7298, 0.9811, 0.9667, 0.7633]) + + assert np.abs(expected_slice - image_slice).max() < 6e-3 + + def test_stable_diffusion_inpaint_with_sequential_cpu_offloading(self): + backend_empty_cache(torch_device) + backend_reset_max_memory_allocated(torch_device) + backend_reset_peak_memory_stats(torch_device) + + pipe = StableDiffusionInpaintPipeline.from_pretrained( + "botp/stable-diffusion-v1-5-inpainting", safety_checker=None, torch_dtype=torch.float16 + ) + pipe.set_progress_bar_config(disable=None) + pipe.enable_attention_slicing(1) + pipe.enable_sequential_cpu_offload(device=torch_device) + + inputs = self.get_inputs(torch_device, dtype=torch.float16) + _ = pipe(**inputs) + + mem_bytes = backend_max_memory_allocated(torch_device) + # make sure that less than 2.2 GB is allocated + assert mem_bytes < 2.2 * 10**9 + + def test_stable_diffusion_inpaint_pil_input_resolution_test(self): + pipe = StableDiffusionInpaintPipeline.from_pretrained( + "botp/stable-diffusion-v1-5-inpainting", safety_checker=None + ) + pipe.scheduler = LMSDiscreteScheduler.from_config(pipe.scheduler.config) + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + pipe.enable_attention_slicing() + + inputs = self.get_inputs(torch_device) + # change input image to a random size (one that would cause a tensor mismatch error) + inputs["image"] = inputs["image"].resize((127, 127)) + inputs["mask_image"] = inputs["mask_image"].resize((127, 127)) + inputs["height"] = 128 + inputs["width"] = 128 + image = pipe(**inputs).images + # verify that the returned image has the same height and width as the input height and width + assert image.shape == (1, inputs["height"], inputs["width"], 3) + + def test_stable_diffusion_inpaint_strength_test(self): + pipe = StableDiffusionInpaintPipeline.from_pretrained( + "botp/stable-diffusion-v1-5-inpainting", safety_checker=None + ) + pipe.scheduler = LMSDiscreteScheduler.from_config(pipe.scheduler.config) + pipe.unet.set_default_attn_processor() + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + pipe.enable_attention_slicing() + + inputs = self.get_inputs(torch_device) + # change input strength + inputs["strength"] = 0.75 + image = pipe(**inputs).images + # verify that the returned image has the same height and width as the input height and width + assert image.shape == (1, 512, 512, 3) + + image_slice = image[0, 253:256, 253:256, -1].flatten() + expected_slice = np.array([0.2728, 0.2803, 0.2665, 0.2511, 0.2774, 0.2586, 0.2391, 0.2392, 0.2582]) + assert np.abs(expected_slice - image_slice).max() < 1e-3 + + def test_stable_diffusion_simple_inpaint_ddim(self): + pipe = StableDiffusionInpaintPipeline.from_pretrained( + "stable-diffusion-v1-5/stable-diffusion-v1-5", safety_checker=None + ) + pipe.unet.set_default_attn_processor() + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + pipe.enable_attention_slicing() + + inputs = self.get_inputs(torch_device) + image = pipe(**inputs).images + + image_slice = image[0, 253:256, 253:256, -1].flatten() + + assert image.shape == (1, 512, 512, 3) + expected_slice = np.array([0.3757, 0.3875, 0.4445, 0.4353, 0.3780, 0.4513, 0.3965, 0.3984, 0.4362]) + assert np.abs(expected_slice - image_slice).max() < 1e-3 + + +@slow +@require_torch_accelerator +class StableDiffusionInpaintPipelineAsymmetricAutoencoderKLSlowTests(unittest.TestCase): + def setUp(self): + super().setUp() + + def tearDown(self): + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): + generator = torch.Generator(device=generator_device).manual_seed(seed) + init_image = load_image( + "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" + "/stable_diffusion_inpaint/input_bench_image.png" + ) + mask_image = load_image( + "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" + "/stable_diffusion_inpaint/input_bench_mask.png" + ) + inputs = { + "prompt": "Face of a yellow cat, high resolution, sitting on a park bench", + "image": init_image, + "mask_image": mask_image, + "generator": generator, + "num_inference_steps": 3, + "guidance_scale": 7.5, + "output_type": "np", + } + return inputs + + def test_stable_diffusion_inpaint_ddim(self): + vae = AsymmetricAutoencoderKL.from_pretrained("cross-attention/asymmetric-autoencoder-kl-x-1-5") + pipe = StableDiffusionInpaintPipeline.from_pretrained( + "botp/stable-diffusion-v1-5-inpainting", safety_checker=None + ) + pipe.vae = vae + pipe.unet.set_default_attn_processor() + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + pipe.enable_attention_slicing() + + inputs = self.get_inputs(torch_device) + image = pipe(**inputs).images + image_slice = image[0, 253:256, 253:256, -1].flatten() + + assert image.shape == (1, 512, 512, 3) + expected_slice = np.array([0.0522, 0.0604, 0.0596, 0.0449, 0.0493, 0.0427, 0.1186, 0.1289, 0.1442]) + + assert np.abs(expected_slice - image_slice).max() < 1e-3 + + def test_stable_diffusion_inpaint_fp16(self): + vae = AsymmetricAutoencoderKL.from_pretrained( + "cross-attention/asymmetric-autoencoder-kl-x-1-5", torch_dtype=torch.float16 + ) + pipe = StableDiffusionInpaintPipeline.from_pretrained( + "botp/stable-diffusion-v1-5-inpainting", torch_dtype=torch.float16, safety_checker=None + ) + pipe.unet.set_default_attn_processor() + pipe.vae = vae + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + pipe.enable_attention_slicing() + + inputs = self.get_inputs(torch_device, dtype=torch.float16) + image = pipe(**inputs).images + image_slice = image[0, 253:256, 253:256, -1].flatten() + + assert image.shape == (1, 512, 512, 3) + expected_slices = Expectations( + { + ("xpu", 3): np.array( + [ + 0.2063, + 0.1731, + 0.1553, + 0.1741, + 0.1772, + 0.1077, + 0.2109, + 0.2407, + 0.1243, + ] + ), + ("cuda", 7): np.array( + [ + 0.1343, + 0.1406, + 0.1440, + 0.1504, + 0.1729, + 0.0989, + 0.1807, + 0.2822, + 0.1179, + ] + ), + } + ) + expected_slice = expected_slices.get_expectation() + + assert np.abs(expected_slice - image_slice).max() < 5e-2 + + def test_stable_diffusion_inpaint_pndm(self): + vae = AsymmetricAutoencoderKL.from_pretrained("cross-attention/asymmetric-autoencoder-kl-x-1-5") + pipe = StableDiffusionInpaintPipeline.from_pretrained( + "botp/stable-diffusion-v1-5-inpainting", safety_checker=None + ) + pipe.unet.set_default_attn_processor() + pipe.vae = vae + pipe.scheduler = PNDMScheduler.from_config(pipe.scheduler.config) + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + pipe.enable_attention_slicing() + + inputs = self.get_inputs(torch_device) + image = pipe(**inputs).images + image_slice = image[0, 253:256, 253:256, -1].flatten() + + assert image.shape == (1, 512, 512, 3) + expected_slice = np.array([0.0966, 0.1083, 0.1148, 0.1422, 0.1318, 0.1197, 0.3702, 0.3537, 0.3288]) + + assert np.abs(expected_slice - image_slice).max() < 5e-3 + + def test_stable_diffusion_inpaint_k_lms(self): + vae = AsymmetricAutoencoderKL.from_pretrained("cross-attention/asymmetric-autoencoder-kl-x-1-5") + pipe = StableDiffusionInpaintPipeline.from_pretrained( + "botp/stable-diffusion-v1-5-inpainting", safety_checker=None + ) + pipe.unet.set_default_attn_processor() + pipe.vae = vae + pipe.scheduler = LMSDiscreteScheduler.from_config(pipe.scheduler.config) + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + pipe.enable_attention_slicing() + + inputs = self.get_inputs(torch_device) + image = pipe(**inputs).images + image_slice = image[0, 253:256, 253:256, -1].flatten() + assert image.shape == (1, 512, 512, 3) + expected_slice = np.array([0.8931, 0.8683, 0.8965, 0.8501, 0.8592, 0.9118, 0.8734, 0.7463, 0.8990]) + assert np.abs(expected_slice - image_slice).max() < 6e-3 + + def test_stable_diffusion_inpaint_with_sequential_cpu_offloading(self): + backend_empty_cache(torch_device) + backend_reset_max_memory_allocated(torch_device) + backend_reset_peak_memory_stats(torch_device) + + vae = AsymmetricAutoencoderKL.from_pretrained( + "cross-attention/asymmetric-autoencoder-kl-x-1-5", torch_dtype=torch.float16 + ) + pipe = StableDiffusionInpaintPipeline.from_pretrained( + "botp/stable-diffusion-v1-5-inpainting", safety_checker=None, torch_dtype=torch.float16 + ) + pipe.vae = vae + pipe.set_progress_bar_config(disable=None) + pipe.enable_attention_slicing(1) + pipe.enable_sequential_cpu_offload(device=torch_device) + + inputs = self.get_inputs(torch_device, dtype=torch.float16) + _ = pipe(**inputs) + + mem_bytes = backend_max_memory_allocated(torch_device) + # make sure that less than 2.45 GB is allocated + assert mem_bytes < 2.45 * 10**9 + + def test_stable_diffusion_inpaint_pil_input_resolution_test(self): + vae = AsymmetricAutoencoderKL.from_pretrained( + "cross-attention/asymmetric-autoencoder-kl-x-1-5", + ) + pipe = StableDiffusionInpaintPipeline.from_pretrained( + "botp/stable-diffusion-v1-5-inpainting", safety_checker=None + ) + pipe.vae = vae + pipe.scheduler = LMSDiscreteScheduler.from_config(pipe.scheduler.config) + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + pipe.enable_attention_slicing() + + inputs = self.get_inputs(torch_device) + # change input image to a random size (one that would cause a tensor mismatch error) + inputs["image"] = inputs["image"].resize((127, 127)) + inputs["mask_image"] = inputs["mask_image"].resize((127, 127)) + inputs["height"] = 128 + inputs["width"] = 128 + image = pipe(**inputs).images + # verify that the returned image has the same height and width as the input height and width + assert image.shape == (1, inputs["height"], inputs["width"], 3) + + def test_stable_diffusion_inpaint_strength_test(self): + vae = AsymmetricAutoencoderKL.from_pretrained("cross-attention/asymmetric-autoencoder-kl-x-1-5") + pipe = StableDiffusionInpaintPipeline.from_pretrained( + "botp/stable-diffusion-v1-5-inpainting", safety_checker=None + ) + pipe.unet.set_default_attn_processor() + pipe.vae = vae + pipe.scheduler = LMSDiscreteScheduler.from_config(pipe.scheduler.config) + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + pipe.enable_attention_slicing() + + inputs = self.get_inputs(torch_device) + # change input strength + inputs["strength"] = 0.75 + image = pipe(**inputs).images + # verify that the returned image has the same height and width as the input height and width + assert image.shape == (1, 512, 512, 3) + + image_slice = image[0, 253:256, 253:256, -1].flatten() + expected_slice = np.array([0.2458, 0.2576, 0.3124, 0.2679, 0.2669, 0.2796, 0.2872, 0.2975, 0.2661]) + assert np.abs(expected_slice - image_slice).max() < 3e-3 + + def test_stable_diffusion_simple_inpaint_ddim(self): + vae = AsymmetricAutoencoderKL.from_pretrained("cross-attention/asymmetric-autoencoder-kl-x-1-5") + pipe = StableDiffusionInpaintPipeline.from_pretrained( + "stable-diffusion-v1-5/stable-diffusion-v1-5", safety_checker=None + ) + pipe.vae = vae + pipe.unet.set_default_attn_processor() + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + pipe.enable_attention_slicing() + + inputs = self.get_inputs(torch_device) + image = pipe(**inputs).images + + image_slice = image[0, 253:256, 253:256, -1].flatten() + + assert image.shape == (1, 512, 512, 3) + expected_slice = np.array([0.3296, 0.4041, 0.4097, 0.4145, 0.4342, 0.4152, 0.4927, 0.4931, 0.4430]) + assert np.abs(expected_slice - image_slice).max() < 1e-3 + + def test_download_local(self): + vae = AsymmetricAutoencoderKL.from_pretrained( + "cross-attention/asymmetric-autoencoder-kl-x-1-5", torch_dtype=torch.float16 + ) + filename = hf_hub_download("botp/stable-diffusion-v1-5-inpainting", filename="sd-v1-5-inpainting.ckpt") + + pipe = StableDiffusionInpaintPipeline.from_single_file(filename, torch_dtype=torch.float16) + pipe.vae = vae + pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config) + pipe.to(torch_device) + + inputs = self.get_inputs(torch_device) + inputs["num_inference_steps"] = 1 + image_out = pipe(**inputs).images[0] + + assert image_out.shape == (512, 512, 3) + + +@nightly +@require_torch_accelerator +class StableDiffusionInpaintPipelineNightlyTests(unittest.TestCase): + def setUp(self): + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): + generator = torch.Generator(device=generator_device).manual_seed(seed) + init_image = load_image( + "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" + "/stable_diffusion_inpaint/input_bench_image.png" + ) + mask_image = load_image( + "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" + "/stable_diffusion_inpaint/input_bench_mask.png" + ) + inputs = { + "prompt": "Face of a yellow cat, high resolution, sitting on a park bench", + "image": init_image, + "mask_image": mask_image, + "generator": generator, + "num_inference_steps": 50, + "guidance_scale": 7.5, + "output_type": "np", + } + return inputs + + def test_inpaint_ddim(self): + sd_pipe = StableDiffusionInpaintPipeline.from_pretrained("botp/stable-diffusion-v1-5-inpainting") + sd_pipe.to(torch_device) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_inputs(torch_device) + image = sd_pipe(**inputs).images[0] + + expected_image = load_numpy( + "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" + "/stable_diffusion_inpaint/stable_diffusion_inpaint_ddim.npy" + ) + max_diff = np.abs(expected_image - image).max() + assert max_diff < 1e-3 + + def test_inpaint_pndm(self): + sd_pipe = StableDiffusionInpaintPipeline.from_pretrained("botp/stable-diffusion-v1-5-inpainting") + sd_pipe.scheduler = PNDMScheduler.from_config(sd_pipe.scheduler.config) + sd_pipe.to(torch_device) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_inputs(torch_device) + image = sd_pipe(**inputs).images[0] + + expected_image = load_numpy( + "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" + "/stable_diffusion_inpaint/stable_diffusion_inpaint_pndm.npy" + ) + max_diff = np.abs(expected_image - image).max() + assert max_diff < 1e-3 + + def test_inpaint_lms(self): + sd_pipe = StableDiffusionInpaintPipeline.from_pretrained("botp/stable-diffusion-v1-5-inpainting") + sd_pipe.scheduler = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config) + sd_pipe.to(torch_device) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_inputs(torch_device) + image = sd_pipe(**inputs).images[0] + + expected_image = load_numpy( + "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" + "/stable_diffusion_inpaint/stable_diffusion_inpaint_lms.npy" + ) + max_diff = np.abs(expected_image - image).max() + assert max_diff < 1e-3 + + def test_inpaint_dpm(self): + sd_pipe = StableDiffusionInpaintPipeline.from_pretrained("botp/stable-diffusion-v1-5-inpainting") + sd_pipe.scheduler = DPMSolverMultistepScheduler.from_config(sd_pipe.scheduler.config) + sd_pipe.to(torch_device) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_inputs(torch_device) + inputs["num_inference_steps"] = 30 + image = sd_pipe(**inputs).images[0] + + expected_image = load_numpy( + "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" + "/stable_diffusion_inpaint/stable_diffusion_inpaint_dpm_multi.npy" + ) + max_diff = np.abs(expected_image - image).max() + assert max_diff < 1e-3 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/stable_diffusion/test_stable_diffusion_instruction_pix2pix.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/stable_diffusion/test_stable_diffusion_instruction_pix2pix.py new file mode 100644 index 0000000000000000000000000000000000000000..4758c5dab44b4618bd96e52040918d6b66bebe38 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/stable_diffusion/test_stable_diffusion_instruction_pix2pix.py @@ -0,0 +1,431 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import gc +import random +import unittest + +import numpy as np +import torch +from PIL import Image +from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer + +from diffusers import ( + AutoencoderKL, + DDIMScheduler, + EulerAncestralDiscreteScheduler, + LMSDiscreteScheduler, + PNDMScheduler, + StableDiffusionInstructPix2PixPipeline, + UNet2DConditionModel, +) +from diffusers.image_processor import VaeImageProcessor + +from ...testing_utils import ( + backend_empty_cache, + backend_max_memory_allocated, + backend_reset_max_memory_allocated, + backend_reset_peak_memory_stats, + enable_full_determinism, + floats_tensor, + load_image, + require_torch_accelerator, + slow, + torch_device, +) +from ..pipeline_params import ( + IMAGE_TO_IMAGE_IMAGE_PARAMS, + TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, + TEXT_GUIDED_IMAGE_VARIATION_PARAMS, + TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS, +) +from ..test_pipelines_common import ( + PipelineKarrasSchedulerTesterMixin, + PipelineLatentTesterMixin, + PipelineTesterMixin, +) + + +enable_full_determinism() + + +class StableDiffusionInstructPix2PixPipelineFastTests( + PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase +): + pipeline_class = StableDiffusionInstructPix2PixPipeline + params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width", "cross_attention_kwargs"} + batch_params = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS + image_params = IMAGE_TO_IMAGE_IMAGE_PARAMS + image_latents_params = IMAGE_TO_IMAGE_IMAGE_PARAMS + callback_cfg_params = TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS.union({"image_latents"}) - {"negative_prompt_embeds"} + + def get_dummy_components(self): + torch.manual_seed(0) + unet = UNet2DConditionModel( + block_out_channels=(32, 64), + layers_per_block=2, + sample_size=32, + in_channels=8, + out_channels=4, + down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), + up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), + cross_attention_dim=32, + ) + scheduler = PNDMScheduler(skip_prk_steps=True) + torch.manual_seed(0) + vae = AutoencoderKL( + block_out_channels=[32, 64], + in_channels=3, + out_channels=3, + down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], + up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], + latent_channels=4, + ) + torch.manual_seed(0) + text_encoder_config = CLIPTextConfig( + bos_token_id=0, + eos_token_id=2, + hidden_size=32, + intermediate_size=37, + layer_norm_eps=1e-05, + num_attention_heads=4, + num_hidden_layers=5, + pad_token_id=1, + vocab_size=1000, + ) + text_encoder = CLIPTextModel(text_encoder_config) + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + + components = { + "unet": unet, + "scheduler": scheduler, + "vae": vae, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + "safety_checker": None, + "feature_extractor": None, + "image_encoder": None, + } + return components + + def get_dummy_inputs(self, device, seed=0): + image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) + image = image.cpu().permute(0, 2, 3, 1)[0] + image = Image.fromarray(np.uint8(image)).convert("RGB") + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + inputs = { + "prompt": "A painting of a squirrel eating a burger", + "image": image, + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 6.0, + "image_guidance_scale": 1, + "output_type": "np", + } + return inputs + + def test_stable_diffusion_pix2pix_default_case(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + sd_pipe = StableDiffusionInstructPix2PixPipeline(**components) + sd_pipe = sd_pipe.to(device) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + image = sd_pipe(**inputs).images + image_slice = image[0, -3:, -3:, -1] + assert image.shape == (1, 32, 32, 3) + expected_slice = np.array([0.7526, 0.3750, 0.4547, 0.6117, 0.5866, 0.5016, 0.4327, 0.5642, 0.4815]) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 + + def test_stable_diffusion_pix2pix_negative_prompt(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + sd_pipe = StableDiffusionInstructPix2PixPipeline(**components) + sd_pipe = sd_pipe.to(device) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + negative_prompt = "french fries" + output = sd_pipe(**inputs, negative_prompt=negative_prompt) + image = output.images + image_slice = image[0, -3:, -3:, -1] + + assert image.shape == (1, 32, 32, 3) + expected_slice = np.array([0.7511, 0.3642, 0.4553, 0.6236, 0.5797, 0.5013, 0.4343, 0.5611, 0.4831]) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 + + def test_stable_diffusion_pix2pix_multiple_init_images(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + sd_pipe = StableDiffusionInstructPix2PixPipeline(**components) + sd_pipe = sd_pipe.to(device) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + inputs["prompt"] = [inputs["prompt"]] * 2 + + image = np.array(inputs["image"]).astype(np.float32) / 255.0 + image = torch.from_numpy(image).unsqueeze(0).to(device) + image = image / 2 + 0.5 + image = image.permute(0, 3, 1, 2) + inputs["image"] = image.repeat(2, 1, 1, 1) + + image = sd_pipe(**inputs).images + image_slice = image[-1, -3:, -3:, -1] + + assert image.shape == (2, 32, 32, 3) + expected_slice = np.array([0.5812, 0.5748, 0.5222, 0.5908, 0.5695, 0.7174, 0.6804, 0.5523, 0.5579]) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 + + def test_stable_diffusion_pix2pix_euler(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + components["scheduler"] = EulerAncestralDiscreteScheduler( + beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear" + ) + sd_pipe = StableDiffusionInstructPix2PixPipeline(**components) + sd_pipe = sd_pipe.to(device) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + image = sd_pipe(**inputs).images + image_slice = image[0, -3:, -3:, -1] + + assert image.shape == (1, 32, 32, 3) + expected_slice = np.array([0.7417, 0.3842, 0.4732, 0.5776, 0.5891, 0.5139, 0.4052, 0.5673, 0.4986]) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 + + def test_inference_batch_single_identical(self): + super().test_inference_batch_single_identical(expected_max_diff=3e-3) + + # Overwrite the default test_latents_inputs because pix2pix encode the image differently + def test_latents_input(self): + components = self.get_dummy_components() + pipe = StableDiffusionInstructPix2PixPipeline(**components) + pipe.image_processor = VaeImageProcessor(do_resize=False, do_normalize=False) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + out = pipe(**self.get_dummy_inputs_by_type(torch_device, input_image_type="pt"))[0] + + vae = components["vae"] + inputs = self.get_dummy_inputs_by_type(torch_device, input_image_type="pt") + + for image_param in self.image_latents_params: + if image_param in inputs.keys(): + inputs[image_param] = vae.encode(inputs[image_param]).latent_dist.mode() + + out_latents_inputs = pipe(**inputs)[0] + + max_diff = np.abs(out - out_latents_inputs).max() + self.assertLess(max_diff, 1e-4, "passing latents as image input generate different result from passing image") + + # Override the default test_callback_cfg because pix2pix create inputs for cfg differently + def test_callback_cfg(self): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + def callback_no_cfg(pipe, i, t, callback_kwargs): + if i == 1: + for k, w in callback_kwargs.items(): + if k in self.callback_cfg_params: + callback_kwargs[k] = callback_kwargs[k].chunk(3)[0] + pipe._guidance_scale = 1.0 + + return callback_kwargs + + inputs = self.get_dummy_inputs(torch_device) + inputs["guidance_scale"] = 1.0 + inputs["num_inference_steps"] = 2 + out_no_cfg = pipe(**inputs)[0] + + inputs["guidance_scale"] = 7.5 + inputs["callback_on_step_end"] = callback_no_cfg + inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs + out_callback_no_cfg = pipe(**inputs)[0] + + assert out_no_cfg.shape == out_callback_no_cfg.shape + + +@slow +@require_torch_accelerator +class StableDiffusionInstructPix2PixPipelineSlowTests(unittest.TestCase): + def setUp(self): + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def get_inputs(self, seed=0): + generator = torch.manual_seed(seed) + image = load_image( + "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg" + ) + inputs = { + "prompt": "turn him into a cyborg", + "image": image, + "generator": generator, + "num_inference_steps": 3, + "guidance_scale": 7.5, + "image_guidance_scale": 1.0, + "output_type": "np", + } + return inputs + + def test_stable_diffusion_pix2pix_default(self): + pipe = StableDiffusionInstructPix2PixPipeline.from_pretrained( + "timbrooks/instruct-pix2pix", safety_checker=None + ) + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + pipe.enable_attention_slicing() + + inputs = self.get_inputs() + image = pipe(**inputs).images + image_slice = image[0, -3:, -3:, -1].flatten() + + assert image.shape == (1, 512, 512, 3) + expected_slice = np.array([0.5902, 0.6015, 0.6027, 0.5983, 0.6092, 0.6061, 0.5765, 0.5785, 0.5555]) + + assert np.abs(expected_slice - image_slice).max() < 1e-3 + + def test_stable_diffusion_pix2pix_k_lms(self): + pipe = StableDiffusionInstructPix2PixPipeline.from_pretrained( + "timbrooks/instruct-pix2pix", safety_checker=None + ) + pipe.scheduler = LMSDiscreteScheduler.from_config(pipe.scheduler.config) + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + pipe.enable_attention_slicing() + + inputs = self.get_inputs() + image = pipe(**inputs).images + image_slice = image[0, -3:, -3:, -1].flatten() + + assert image.shape == (1, 512, 512, 3) + expected_slice = np.array([0.6578, 0.6817, 0.6972, 0.6761, 0.6856, 0.6916, 0.6428, 0.6516, 0.6301]) + + assert np.abs(expected_slice - image_slice).max() < 1e-3 + + def test_stable_diffusion_pix2pix_ddim(self): + pipe = StableDiffusionInstructPix2PixPipeline.from_pretrained( + "timbrooks/instruct-pix2pix", safety_checker=None + ) + pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config) + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + pipe.enable_attention_slicing() + + inputs = self.get_inputs() + image = pipe(**inputs).images + image_slice = image[0, -3:, -3:, -1].flatten() + + assert image.shape == (1, 512, 512, 3) + expected_slice = np.array([0.3828, 0.3834, 0.3818, 0.3792, 0.3865, 0.3752, 0.3792, 0.3847, 0.3753]) + + assert np.abs(expected_slice - image_slice).max() < 1e-3 + + def test_stable_diffusion_pix2pix_intermediate_state(self): + number_of_steps = 0 + + def callback_fn(step: int, timestep: int, latents: torch.Tensor) -> None: + callback_fn.has_been_called = True + nonlocal number_of_steps + number_of_steps += 1 + if step == 1: + latents = latents.detach().cpu().numpy() + assert latents.shape == (1, 4, 64, 64) + latents_slice = latents[0, -3:, -3:, -1] + expected_slice = np.array([-0.2463, -0.4644, -0.9756, 1.5176, 1.4414, 0.7866, 0.9897, 0.8521, 0.7983]) + + assert np.abs(latents_slice.flatten() - expected_slice).max() < 5e-2 + elif step == 2: + latents = latents.detach().cpu().numpy() + assert latents.shape == (1, 4, 64, 64) + latents_slice = latents[0, -3:, -3:, -1] + expected_slice = np.array([-0.2644, -0.4626, -0.9653, 1.5176, 1.4551, 0.7686, 0.9805, 0.8452, 0.8115]) + + assert np.abs(latents_slice.flatten() - expected_slice).max() < 5e-2 + + callback_fn.has_been_called = False + + pipe = StableDiffusionInstructPix2PixPipeline.from_pretrained( + "timbrooks/instruct-pix2pix", safety_checker=None, torch_dtype=torch.float16 + ) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + pipe.enable_attention_slicing() + + inputs = self.get_inputs() + pipe(**inputs, callback=callback_fn, callback_steps=1) + assert callback_fn.has_been_called + assert number_of_steps == 3 + + def test_stable_diffusion_pipeline_with_sequential_cpu_offloading(self): + backend_empty_cache(torch_device) + backend_reset_max_memory_allocated(torch_device) + backend_reset_peak_memory_stats(torch_device) + + pipe = StableDiffusionInstructPix2PixPipeline.from_pretrained( + "timbrooks/instruct-pix2pix", safety_checker=None, torch_dtype=torch.float16 + ) + pipe.set_progress_bar_config(disable=None) + pipe.enable_attention_slicing(1) + pipe.enable_sequential_cpu_offload(device=torch_device) + + inputs = self.get_inputs() + _ = pipe(**inputs) + + mem_bytes = backend_max_memory_allocated(torch_device) + # make sure that less than 2.2 GB is allocated + assert mem_bytes < 2.2 * 10**9 + + def test_stable_diffusion_pix2pix_pipeline_multiple_of_8(self): + inputs = self.get_inputs() + # resize to resolution that is divisible by 8 but not 16 or 32 + inputs["image"] = inputs["image"].resize((504, 504)) + + model_id = "timbrooks/instruct-pix2pix" + pipe = StableDiffusionInstructPix2PixPipeline.from_pretrained( + model_id, + safety_checker=None, + ) + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + pipe.enable_attention_slicing() + + output = pipe(**inputs) + image = output.images[0] + + image_slice = image[255:258, 383:386, -1] + + assert image.shape == (504, 504, 3) + expected_slice = np.array([0.2726, 0.2529, 0.2664, 0.2655, 0.2641, 0.2642, 0.2591, 0.2649, 0.2590]) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-3 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/stable_diffusion_2/__init__.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/stable_diffusion_2/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/stable_diffusion_2/test_stable_diffusion.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/stable_diffusion_2/test_stable_diffusion.py new file mode 100644 index 0000000000000000000000000000000000000000..3b2552b432d3eaff31c637312b26ab10a38155dd --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/stable_diffusion_2/test_stable_diffusion.py @@ -0,0 +1,437 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import gc +import unittest + +import numpy as np +import torch +from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer + +from diffusers import ( + AutoencoderKL, + DDIMScheduler, + EulerAncestralDiscreteScheduler, + EulerDiscreteScheduler, + LMSDiscreteScheduler, + PNDMScheduler, + StableDiffusionPipeline, + UNet2DConditionModel, + logging, +) + +from ...testing_utils import ( + CaptureLogger, + backend_empty_cache, + backend_max_memory_allocated, + backend_reset_peak_memory_stats, + enable_full_determinism, + load_numpy, + nightly, + numpy_cosine_similarity_distance, + require_torch_accelerator, + skip_mps, + slow, + torch_device, +) +from ..pipeline_params import ( + TEXT_TO_IMAGE_BATCH_PARAMS, + TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS, + TEXT_TO_IMAGE_IMAGE_PARAMS, + TEXT_TO_IMAGE_PARAMS, +) +from ..test_pipelines_common import ( + PipelineKarrasSchedulerTesterMixin, + PipelineLatentTesterMixin, + PipelineTesterMixin, + SDFunctionTesterMixin, +) + + +enable_full_determinism() + + +class StableDiffusion2PipelineFastTests( + SDFunctionTesterMixin, + PipelineLatentTesterMixin, + PipelineKarrasSchedulerTesterMixin, + PipelineTesterMixin, + unittest.TestCase, +): + pipeline_class = StableDiffusionPipeline + params = TEXT_TO_IMAGE_PARAMS + batch_params = TEXT_TO_IMAGE_BATCH_PARAMS + image_params = TEXT_TO_IMAGE_IMAGE_PARAMS + image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS + callback_cfg_params = TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS + test_layerwise_casting = True + test_group_offloading = True + + def get_dummy_components(self): + torch.manual_seed(0) + unet = UNet2DConditionModel( + block_out_channels=(32, 64), + layers_per_block=2, + sample_size=32, + in_channels=4, + out_channels=4, + down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), + up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), + cross_attention_dim=32, + # SD2-specific config below + attention_head_dim=(2, 4), + use_linear_projection=True, + ) + scheduler = DDIMScheduler( + beta_start=0.00085, + beta_end=0.012, + beta_schedule="scaled_linear", + clip_sample=False, + set_alpha_to_one=False, + ) + torch.manual_seed(0) + vae = AutoencoderKL( + block_out_channels=[32, 64], + in_channels=3, + out_channels=3, + down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], + up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], + latent_channels=4, + sample_size=128, + ) + torch.manual_seed(0) + text_encoder_config = CLIPTextConfig( + bos_token_id=0, + eos_token_id=2, + hidden_size=32, + intermediate_size=37, + layer_norm_eps=1e-05, + num_attention_heads=4, + num_hidden_layers=5, + pad_token_id=1, + vocab_size=1000, + # SD2-specific config below + hidden_act="gelu", + projection_dim=512, + ) + text_encoder = CLIPTextModel(text_encoder_config) + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + + components = { + "unet": unet, + "scheduler": scheduler, + "vae": vae, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + "safety_checker": None, + "feature_extractor": None, + "image_encoder": None, + } + return components + + def get_dummy_inputs(self, device, seed=0): + generator_device = "cpu" if not device.startswith("cuda") else "cuda" + if not str(device).startswith("mps"): + generator = torch.Generator(device=generator_device).manual_seed(seed) + else: + generator = torch.manual_seed(seed) + + inputs = { + "prompt": "A painting of a squirrel eating a burger", + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 6.0, + "output_type": "np", + } + return inputs + + def test_stable_diffusion_ddim(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + sd_pipe = StableDiffusionPipeline(**components) + sd_pipe = sd_pipe.to(device) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + image = sd_pipe(**inputs).images + image_slice = image[0, -3:, -3:, -1] + + assert image.shape == (1, 64, 64, 3) + expected_slice = np.array([0.5753, 0.6113, 0.5005, 0.5036, 0.5464, 0.4725, 0.4982, 0.4865, 0.4861]) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 + + def test_stable_diffusion_pndm(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + components["scheduler"] = PNDMScheduler(skip_prk_steps=True) + sd_pipe = StableDiffusionPipeline(**components) + sd_pipe = sd_pipe.to(device) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + image = sd_pipe(**inputs).images + image_slice = image[0, -3:, -3:, -1] + + assert image.shape == (1, 64, 64, 3) + expected_slice = np.array([0.5121, 0.5714, 0.4827, 0.5057, 0.5646, 0.4766, 0.5189, 0.4895, 0.4990]) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 + + def test_stable_diffusion_k_lms(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + components["scheduler"] = LMSDiscreteScheduler.from_config(components["scheduler"].config) + sd_pipe = StableDiffusionPipeline(**components) + sd_pipe = sd_pipe.to(device) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + image = sd_pipe(**inputs).images + image_slice = image[0, -3:, -3:, -1] + + assert image.shape == (1, 64, 64, 3) + expected_slice = np.array([0.4865, 0.5439, 0.4840, 0.4995, 0.5543, 0.4846, 0.5199, 0.4942, 0.5061]) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 + + def test_stable_diffusion_k_euler_ancestral(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + components["scheduler"] = EulerAncestralDiscreteScheduler.from_config(components["scheduler"].config) + sd_pipe = StableDiffusionPipeline(**components) + sd_pipe = sd_pipe.to(device) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + image = sd_pipe(**inputs).images + image_slice = image[0, -3:, -3:, -1] + + assert image.shape == (1, 64, 64, 3) + expected_slice = np.array([0.4864, 0.5440, 0.4842, 0.4994, 0.5543, 0.4846, 0.5196, 0.4942, 0.5063]) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 + + def test_stable_diffusion_k_euler(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + components["scheduler"] = EulerDiscreteScheduler.from_config(components["scheduler"].config) + sd_pipe = StableDiffusionPipeline(**components) + sd_pipe = sd_pipe.to(device) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + image = sd_pipe(**inputs).images + image_slice = image[0, -3:, -3:, -1] + + assert image.shape == (1, 64, 64, 3) + expected_slice = np.array([0.4865, 0.5439, 0.4840, 0.4995, 0.5543, 0.4846, 0.5199, 0.4942, 0.5061]) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 + + def test_stable_diffusion_unflawed(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + components["scheduler"] = DDIMScheduler.from_config( + components["scheduler"].config, timestep_spacing="trailing" + ) + sd_pipe = StableDiffusionPipeline(**components) + sd_pipe = sd_pipe.to(device) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + inputs["guidance_rescale"] = 0.7 + inputs["num_inference_steps"] = 10 + image = sd_pipe(**inputs).images + image_slice = image[0, -3:, -3:, -1] + + assert image.shape == (1, 64, 64, 3) + expected_slice = np.array([0.4736, 0.5405, 0.4705, 0.4955, 0.5675, 0.4812, 0.5310, 0.4967, 0.5064]) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 + + def test_stable_diffusion_long_prompt(self): + components = self.get_dummy_components() + components["scheduler"] = LMSDiscreteScheduler.from_config(components["scheduler"].config) + sd_pipe = StableDiffusionPipeline(**components) + sd_pipe = sd_pipe.to(torch_device) + sd_pipe.set_progress_bar_config(disable=None) + + do_classifier_free_guidance = True + negative_prompt = None + num_images_per_prompt = 1 + logger = logging.get_logger("diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion") + logger.setLevel(logging.WARNING) + + prompt = 25 * "@" + with CaptureLogger(logger) as cap_logger_3: + text_embeddings_3, negeative_text_embeddings_3 = sd_pipe.encode_prompt( + prompt, torch_device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt + ) + if negeative_text_embeddings_3 is not None: + text_embeddings_3 = torch.cat([negeative_text_embeddings_3, text_embeddings_3]) + + prompt = 100 * "@" + with CaptureLogger(logger) as cap_logger: + text_embeddings, negative_embeddings = sd_pipe.encode_prompt( + prompt, torch_device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt + ) + if negative_embeddings is not None: + text_embeddings = torch.cat([negative_embeddings, text_embeddings]) + + negative_prompt = "Hello" + with CaptureLogger(logger) as cap_logger_2: + text_embeddings_2, negative_text_embeddings_2 = sd_pipe.encode_prompt( + prompt, torch_device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt + ) + if negative_text_embeddings_2 is not None: + text_embeddings_2 = torch.cat([negative_text_embeddings_2, text_embeddings_2]) + + assert text_embeddings_3.shape == text_embeddings_2.shape == text_embeddings.shape + assert text_embeddings.shape[1] == 77 + + assert cap_logger.out == cap_logger_2.out + # 100 - 77 + 1 (BOS token) + 1 (EOS token) = 25 + assert cap_logger.out.count("@") == 25 + assert cap_logger_3.out == "" + + def test_attention_slicing_forward_pass(self): + super().test_attention_slicing_forward_pass(expected_max_diff=3e-3) + + def test_inference_batch_single_identical(self): + super().test_inference_batch_single_identical(expected_max_diff=3e-3) + + def test_encode_prompt_works_in_isolation(self): + extra_required_param_value_dict = { + "device": torch.device(torch_device).type, + "do_classifier_free_guidance": self.get_dummy_inputs(device=torch_device).get("guidance_scale", 1.0) > 1.0, + } + return super().test_encode_prompt_works_in_isolation(extra_required_param_value_dict) + + +@slow +@require_torch_accelerator +@skip_mps +class StableDiffusion2PipelineSlowTests(unittest.TestCase): + def tearDown(self): + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): + if not str(device).startswith("mps"): + generator = torch.Generator(device=generator_device).manual_seed(seed) + else: + generator = torch.manual_seed(seed) + + latents = np.random.RandomState(seed).standard_normal((1, 4, 64, 64)) + latents = torch.from_numpy(latents).to(device=device, dtype=dtype) + inputs = { + "prompt": "a photograph of an astronaut riding a horse", + "latents": latents, + "generator": generator, + "num_inference_steps": 3, + "guidance_scale": 7.5, + "output_type": "np", + } + return inputs + + def test_stable_diffusion_default_ddim(self): + pipe = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-base") + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_inputs(torch_device) + image = pipe(**inputs).images + image_slice = image[0, -3:, -3:, -1].flatten() + + assert image.shape == (1, 512, 512, 3) + expected_slice = np.array([0.49493, 0.47896, 0.40798, 0.54214, 0.53212, 0.48202, 0.47656, 0.46329, 0.48506]) + assert np.abs(image_slice - expected_slice).max() < 7e-3 + + @require_torch_accelerator + def test_stable_diffusion_attention_slicing(self): + backend_reset_peak_memory_stats(torch_device) + pipe = StableDiffusionPipeline.from_pretrained( + "stabilityai/stable-diffusion-2-base", torch_dtype=torch.float16 + ) + pipe.unet.set_default_attn_processor() + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + # enable attention slicing + pipe.enable_attention_slicing() + inputs = self.get_inputs(torch_device, dtype=torch.float16) + image_sliced = pipe(**inputs).images + + mem_bytes = backend_max_memory_allocated(torch_device) + backend_reset_peak_memory_stats(torch_device) + # make sure that less than 3.3 GB is allocated + assert mem_bytes < 3.3 * 10**9 + + # disable slicing + pipe.disable_attention_slicing() + pipe.unet.set_default_attn_processor() + inputs = self.get_inputs(torch_device, dtype=torch.float16) + image = pipe(**inputs).images + + # make sure that more than 3.3 GB is allocated + mem_bytes = backend_max_memory_allocated(torch_device) + assert mem_bytes > 3.3 * 10**9 + max_diff = numpy_cosine_similarity_distance(image.flatten(), image_sliced.flatten()) + assert max_diff < 5e-3 + + +@nightly +@require_torch_accelerator +@skip_mps +class StableDiffusion2PipelineNightlyTests(unittest.TestCase): + def tearDown(self): + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): + _generator_device = "cpu" if not generator_device.startswith("cuda") else "cuda" + if not str(device).startswith("mps"): + generator = torch.Generator(device=_generator_device).manual_seed(seed) + else: + generator = torch.manual_seed(seed) + + latents = np.random.RandomState(seed).standard_normal((1, 4, 64, 64)) + latents = torch.from_numpy(latents).to(device=device, dtype=dtype) + inputs = { + "prompt": "a photograph of an astronaut riding a horse", + "latents": latents, + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 7.5, + "output_type": "np", + } + return inputs + + def test_stable_diffusion_2_1_default(self): + sd_pipe = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base").to(torch_device) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_inputs(torch_device) + image = sd_pipe(**inputs).images[0] + + expected_image = load_numpy( + "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" + "/stable_diffusion_2_text2img/stable_diffusion_2_0_pndm.npy" + ) + max_diff = np.abs(expected_image - image).max() + assert max_diff < 1e-3 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/stable_diffusion_2/test_stable_diffusion_depth.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/stable_diffusion_2/test_stable_diffusion_depth.py new file mode 100644 index 0000000000000000000000000000000000000000..bea7c099046f6a6098fa07e87d9f90ec303f68d5 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/stable_diffusion_2/test_stable_diffusion_depth.py @@ -0,0 +1,470 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import gc +import random +import tempfile +import unittest + +import numpy as np +import torch +from PIL import Image +from transformers import ( + CLIPTextConfig, + CLIPTextModel, + CLIPTokenizer, + DPTConfig, + DPTForDepthEstimation, + DPTImageProcessor, +) + +from diffusers import ( + AutoencoderKL, + PNDMScheduler, + StableDiffusionDepth2ImgPipeline, + UNet2DConditionModel, +) + +from ...testing_utils import ( + backend_empty_cache, + enable_full_determinism, + floats_tensor, + load_image, + load_numpy, + nightly, + require_accelerate_version_greater, + require_accelerator, + require_torch_accelerator, + skip_mps, + slow, + torch_device, +) +from ..pipeline_params import ( + IMAGE_TO_IMAGE_IMAGE_PARAMS, + TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, + TEXT_GUIDED_IMAGE_VARIATION_PARAMS, + TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS, + TEXT_TO_IMAGE_IMAGE_PARAMS, +) +from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin + + +enable_full_determinism() + + +@skip_mps +class StableDiffusionDepth2ImgPipelineFastTests( + PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase +): + pipeline_class = StableDiffusionDepth2ImgPipeline + test_save_load_optional_components = False + params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"} + required_optional_params = PipelineTesterMixin.required_optional_params - {"latents"} + batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS + image_params = IMAGE_TO_IMAGE_IMAGE_PARAMS + image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS + callback_cfg_params = TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS.union({"depth_mask"}) + + supports_dduf = False + + def get_dummy_components(self): + torch.manual_seed(0) + unet = UNet2DConditionModel( + block_out_channels=(32, 64), + layers_per_block=2, + sample_size=32, + in_channels=5, + out_channels=4, + down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), + up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), + cross_attention_dim=32, + attention_head_dim=(2, 4), + use_linear_projection=True, + ) + scheduler = PNDMScheduler(skip_prk_steps=True) + torch.manual_seed(0) + vae = AutoencoderKL( + block_out_channels=[32, 64], + in_channels=3, + out_channels=3, + down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], + up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], + latent_channels=4, + ) + torch.manual_seed(0) + text_encoder_config = CLIPTextConfig( + bos_token_id=0, + eos_token_id=2, + hidden_size=32, + intermediate_size=37, + layer_norm_eps=1e-05, + num_attention_heads=4, + num_hidden_layers=5, + pad_token_id=1, + vocab_size=1000, + ) + text_encoder = CLIPTextModel(text_encoder_config) + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + + backbone_config = { + "global_padding": "same", + "layer_type": "bottleneck", + "depths": [3, 4, 9], + "out_features": ["stage1", "stage2", "stage3"], + "embedding_dynamic_padding": True, + "hidden_sizes": [96, 192, 384, 768], + "num_groups": 2, + } + depth_estimator_config = DPTConfig( + image_size=32, + patch_size=16, + num_channels=3, + hidden_size=32, + num_hidden_layers=4, + backbone_out_indices=(0, 1, 2, 3), + num_attention_heads=4, + intermediate_size=37, + hidden_act="gelu", + hidden_dropout_prob=0.1, + attention_probs_dropout_prob=0.1, + is_decoder=False, + initializer_range=0.02, + is_hybrid=True, + backbone_config=backbone_config, + backbone_featmap_shape=[1, 384, 24, 24], + ) + depth_estimator = DPTForDepthEstimation(depth_estimator_config).eval() + feature_extractor = DPTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-DPTForDepthEstimation") + + components = { + "unet": unet, + "scheduler": scheduler, + "vae": vae, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + "depth_estimator": depth_estimator, + "feature_extractor": feature_extractor, + } + return components + + def get_dummy_inputs(self, device, seed=0): + image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)) + image = image.cpu().permute(0, 2, 3, 1)[0] + image = Image.fromarray(np.uint8(image)).convert("RGB").resize((32, 32)) + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + inputs = { + "prompt": "A painting of a squirrel eating a burger", + "image": image, + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 6.0, + "output_type": "np", + } + return inputs + + def test_save_load_local(self): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(torch_device) + output = pipe(**inputs)[0] + + with tempfile.TemporaryDirectory() as tmpdir: + pipe.save_pretrained(tmpdir) + pipe_loaded = self.pipeline_class.from_pretrained(tmpdir) + pipe_loaded.to(torch_device) + pipe_loaded.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(torch_device) + output_loaded = pipe_loaded(**inputs)[0] + + max_diff = np.abs(output - output_loaded).max() + self.assertLess(max_diff, 1e-4) + + @unittest.skipIf(torch_device not in ["cuda", "xpu"], reason="float16 requires CUDA or XPU") + @require_accelerator + def test_save_load_float16(self): + components = self.get_dummy_components() + for name, module in components.items(): + if hasattr(module, "half"): + components[name] = module.to(torch_device).half() + pipe = self.pipeline_class(**components) + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(torch_device) + output = pipe(**inputs)[0] + + with tempfile.TemporaryDirectory() as tmpdir: + pipe.save_pretrained(tmpdir) + pipe_loaded = self.pipeline_class.from_pretrained(tmpdir, torch_dtype=torch.float16) + pipe_loaded.to(torch_device) + pipe_loaded.set_progress_bar_config(disable=None) + + for name, component in pipe_loaded.components.items(): + if hasattr(component, "dtype"): + self.assertTrue( + component.dtype == torch.float16, + f"`{name}.dtype` switched from `float16` to {component.dtype} after loading.", + ) + + inputs = self.get_dummy_inputs(torch_device) + output_loaded = pipe_loaded(**inputs)[0] + + max_diff = np.abs(output - output_loaded).max() + self.assertLess(max_diff, 2e-2, "The output of the fp16 pipeline changed after saving and loading.") + + @unittest.skipIf(torch_device not in ["cuda", "xpu"], reason="float16 requires CUDA or XPU") + @require_accelerator + def test_float16_inference(self): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + for name, module in components.items(): + if hasattr(module, "half"): + components[name] = module.half() + pipe_fp16 = self.pipeline_class(**components) + pipe_fp16.to(torch_device) + pipe_fp16.set_progress_bar_config(disable=None) + + output = pipe(**self.get_dummy_inputs(torch_device))[0] + output_fp16 = pipe_fp16(**self.get_dummy_inputs(torch_device))[0] + + max_diff = np.abs(output - output_fp16).max() + self.assertLess(max_diff, 1.3e-2, "The outputs of the fp16 and fp32 pipelines are too different.") + + @require_accelerator + @require_accelerate_version_greater("0.14.0") + def test_cpu_offload_forward_pass(self): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(torch_device) + output_without_offload = pipe(**inputs)[0] + + pipe.enable_sequential_cpu_offload(device=torch_device) + inputs = self.get_dummy_inputs(torch_device) + output_with_offload = pipe(**inputs)[0] + + max_diff = np.abs(output_with_offload - output_without_offload).max() + self.assertLess(max_diff, 1e-4, "CPU offloading should not affect the inference results") + + def test_dict_tuple_outputs_equivalent(self): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + output = pipe(**self.get_dummy_inputs(torch_device))[0] + output_tuple = pipe(**self.get_dummy_inputs(torch_device), return_dict=False)[0] + + max_diff = np.abs(output - output_tuple).max() + self.assertLess(max_diff, 1e-4) + + def test_stable_diffusion_depth2img_default_case(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + pipe = StableDiffusionDepth2ImgPipeline(**components) + pipe = pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + image = pipe(**inputs).images + image_slice = image[0, -3:, -3:, -1] + + assert image.shape == (1, 32, 32, 3) + if torch_device == "mps": + expected_slice = np.array([0.6071, 0.5035, 0.4378, 0.5776, 0.5753, 0.4316, 0.4513, 0.5263, 0.4546]) + else: + expected_slice = np.array([0.5435, 0.4992, 0.3783, 0.4411, 0.5842, 0.4654, 0.3786, 0.5077, 0.4655]) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 + + def test_stable_diffusion_depth2img_negative_prompt(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + pipe = StableDiffusionDepth2ImgPipeline(**components) + pipe = pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + negative_prompt = "french fries" + output = pipe(**inputs, negative_prompt=negative_prompt) + image = output.images + image_slice = image[0, -3:, -3:, -1] + + assert image.shape == (1, 32, 32, 3) + if torch_device == "mps": + expected_slice = np.array([0.6296, 0.5125, 0.3890, 0.4456, 0.5955, 0.4621, 0.3810, 0.5310, 0.4626]) + else: + expected_slice = np.array([0.6012, 0.4507, 0.3769, 0.4121, 0.5566, 0.4585, 0.3803, 0.5045, 0.4631]) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 + + def test_stable_diffusion_depth2img_multiple_init_images(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + pipe = StableDiffusionDepth2ImgPipeline(**components) + pipe = pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + inputs["prompt"] = [inputs["prompt"]] * 2 + inputs["image"] = 2 * [inputs["image"]] + image = pipe(**inputs).images + image_slice = image[-1, -3:, -3:, -1] + + assert image.shape == (2, 32, 32, 3) + + if torch_device == "mps": + expected_slice = np.array([0.6501, 0.5150, 0.4939, 0.6688, 0.5437, 0.5758, 0.5115, 0.4406, 0.4551]) + else: + expected_slice = np.array([0.6557, 0.6214, 0.6254, 0.5775, 0.4785, 0.5949, 0.5904, 0.4785, 0.4730]) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 + + def test_stable_diffusion_depth2img_pil(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + pipe = StableDiffusionDepth2ImgPipeline(**components) + pipe = pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + + image = pipe(**inputs).images + image_slice = image[0, -3:, -3:, -1] + + if torch_device == "mps": + expected_slice = np.array([0.53232, 0.47015, 0.40868, 0.45651, 0.4891, 0.4668, 0.4287, 0.48822, 0.47439]) + else: + expected_slice = np.array([0.5435, 0.4992, 0.3783, 0.4411, 0.5842, 0.4654, 0.3786, 0.5077, 0.4655]) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 + + @skip_mps + def test_attention_slicing_forward_pass(self): + return super().test_attention_slicing_forward_pass() + + def test_inference_batch_single_identical(self): + super().test_inference_batch_single_identical(expected_max_diff=7e-3) + + def test_encode_prompt_works_in_isolation(self): + extra_required_param_value_dict = { + "device": torch.device(torch_device).type, + "do_classifier_free_guidance": self.get_dummy_inputs(device=torch_device).get("guidance_scale", 1.0) > 1.0, + } + return super().test_encode_prompt_works_in_isolation(extra_required_param_value_dict) + + +@slow +@require_torch_accelerator +class StableDiffusionDepth2ImgPipelineSlowTests(unittest.TestCase): + def setUp(self): + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def get_inputs(self, device="cpu", dtype=torch.float32, seed=0): + generator = torch.Generator(device=device).manual_seed(seed) + init_image = load_image( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/depth2img/two_cats.png" + ) + inputs = { + "prompt": "two tigers", + "image": init_image, + "generator": generator, + "num_inference_steps": 3, + "strength": 0.75, + "guidance_scale": 7.5, + "output_type": "np", + } + return inputs + + def test_stable_diffusion_depth2img_pipeline_default(self): + pipe = StableDiffusionDepth2ImgPipeline.from_pretrained( + "stabilityai/stable-diffusion-2-depth", safety_checker=None + ) + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + pipe.enable_attention_slicing() + + inputs = self.get_inputs() + image = pipe(**inputs).images + image_slice = image[0, 253:256, 253:256, -1].flatten() + + assert image.shape == (1, 480, 640, 3) + expected_slice = np.array([0.5435, 0.4992, 0.3783, 0.4411, 0.5842, 0.4654, 0.3786, 0.5077, 0.4655]) + + assert np.abs(expected_slice - image_slice).max() < 6e-1 + + +@nightly +@require_torch_accelerator +class StableDiffusionImg2ImgPipelineNightlyTests(unittest.TestCase): + def setUp(self): + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def get_inputs(self, device="cpu", dtype=torch.float32, seed=0): + generator = torch.Generator(device=device).manual_seed(seed) + init_image = load_image( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/depth2img/two_cats.png" + ) + inputs = { + "prompt": "two tigers", + "image": init_image, + "generator": generator, + "num_inference_steps": 2, + "strength": 0.75, + "guidance_scale": 7.5, + "output_type": "np", + } + return inputs + + def test_depth2img(self): + pipe = StableDiffusionDepth2ImgPipeline.from_pretrained("stabilityai/stable-diffusion-2-depth") + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_inputs() + image = pipe(**inputs).images[0] + + expected_image = load_numpy( + "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" + "/stable_diffusion_depth2img/stable_diffusion_2_0_pndm.npy" + ) + max_diff = np.abs(expected_image - image).max() + assert max_diff < 1e-3 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/stable_diffusion_2/test_stable_diffusion_inpaint.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/stable_diffusion_2/test_stable_diffusion_inpaint.py new file mode 100644 index 0000000000000000000000000000000000000000..f010c1b03fe338803b63026d63bfb5466840264c --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/stable_diffusion_2/test_stable_diffusion_inpaint.py @@ -0,0 +1,293 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import gc +import random +import unittest + +import numpy as np +import torch +from PIL import Image +from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer + +from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNet2DConditionModel + +from ...testing_utils import ( + backend_empty_cache, + backend_max_memory_allocated, + backend_reset_max_memory_allocated, + backend_reset_peak_memory_stats, + enable_full_determinism, + floats_tensor, + load_image, + load_numpy, + require_torch_accelerator, + slow, + torch_device, +) +from ..pipeline_params import ( + TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, + TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, + TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS, +) +from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin + + +enable_full_determinism() + + +class StableDiffusion2InpaintPipelineFastTests( + PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase +): + pipeline_class = StableDiffusionInpaintPipeline + params = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS + batch_params = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS + image_params = frozenset( + [] + ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess + image_latents_params = frozenset([]) + callback_cfg_params = TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS.union({"mask", "masked_image_latents"}) + + def get_dummy_components(self): + torch.manual_seed(0) + unet = UNet2DConditionModel( + block_out_channels=(32, 64), + layers_per_block=2, + sample_size=32, + in_channels=9, + out_channels=4, + down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), + up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), + cross_attention_dim=32, + # SD2-specific config below + attention_head_dim=(2, 4), + use_linear_projection=True, + ) + scheduler = PNDMScheduler(skip_prk_steps=True) + torch.manual_seed(0) + vae = AutoencoderKL( + block_out_channels=[32, 64], + in_channels=3, + out_channels=3, + down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], + up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], + latent_channels=4, + sample_size=128, + ) + torch.manual_seed(0) + text_encoder_config = CLIPTextConfig( + bos_token_id=0, + eos_token_id=2, + hidden_size=32, + intermediate_size=37, + layer_norm_eps=1e-05, + num_attention_heads=4, + num_hidden_layers=5, + pad_token_id=1, + vocab_size=1000, + # SD2-specific config below + hidden_act="gelu", + projection_dim=512, + ) + text_encoder = CLIPTextModel(text_encoder_config) + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + + components = { + "unet": unet, + "scheduler": scheduler, + "vae": vae, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + "safety_checker": None, + "feature_extractor": None, + "image_encoder": None, + } + return components + + def get_dummy_inputs(self, device, seed=0): + # TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched + image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) + image = image.cpu().permute(0, 2, 3, 1)[0] + init_image = Image.fromarray(np.uint8(image)).convert("RGB").resize((64, 64)) + mask_image = Image.fromarray(np.uint8(image + 4)).convert("RGB").resize((64, 64)) + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + inputs = { + "prompt": "A painting of a squirrel eating a burger", + "image": init_image, + "mask_image": mask_image, + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 6.0, + "output_type": "np", + } + return inputs + + def test_stable_diffusion_inpaint(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + sd_pipe = StableDiffusionInpaintPipeline(**components) + sd_pipe = sd_pipe.to(device) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + image = sd_pipe(**inputs).images + image_slice = image[0, -3:, -3:, -1] + + assert image.shape == (1, 64, 64, 3) + expected_slice = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476]) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 + + def test_inference_batch_single_identical(self): + super().test_inference_batch_single_identical(expected_max_diff=3e-3) + + def test_encode_prompt_works_in_isolation(self): + extra_required_param_value_dict = { + "device": torch.device(torch_device).type, + "do_classifier_free_guidance": self.get_dummy_inputs(device=torch_device).get("guidance_scale", 1.0) > 1.0, + } + return super().test_encode_prompt_works_in_isolation(extra_required_param_value_dict) + + +@slow +@require_torch_accelerator +class StableDiffusionInpaintPipelineIntegrationTests(unittest.TestCase): + def setUp(self): + # clean up the VRAM before each test + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + # clean up the VRAM after each test + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def test_stable_diffusion_inpaint_pipeline(self): + init_image = load_image( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" + "/sd2-inpaint/init_image.png" + ) + mask_image = load_image( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" + ) + expected_image = load_numpy( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint" + "/yellow_cat_sitting_on_a_park_bench.npy" + ) + + model_id = "stabilityai/stable-diffusion-2-inpainting" + pipe = StableDiffusionInpaintPipeline.from_pretrained(model_id, safety_checker=None) + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + pipe.enable_attention_slicing() + + prompt = "Face of a yellow cat, high resolution, sitting on a park bench" + + generator = torch.manual_seed(0) + output = pipe( + prompt=prompt, + image=init_image, + mask_image=mask_image, + generator=generator, + output_type="np", + ) + image = output.images[0] + + assert image.shape == (512, 512, 3) + assert np.abs(expected_image - image).max() < 9e-3 + + def test_stable_diffusion_inpaint_pipeline_fp16(self): + init_image = load_image( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" + "/sd2-inpaint/init_image.png" + ) + mask_image = load_image( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" + ) + expected_image = load_numpy( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint" + "/yellow_cat_sitting_on_a_park_bench_fp16.npy" + ) + + model_id = "stabilityai/stable-diffusion-2-inpainting" + pipe = StableDiffusionInpaintPipeline.from_pretrained( + model_id, + torch_dtype=torch.float16, + safety_checker=None, + ) + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + pipe.enable_attention_slicing() + + prompt = "Face of a yellow cat, high resolution, sitting on a park bench" + + generator = torch.manual_seed(0) + output = pipe( + prompt=prompt, + image=init_image, + mask_image=mask_image, + generator=generator, + output_type="np", + ) + image = output.images[0] + + assert image.shape == (512, 512, 3) + assert np.abs(expected_image - image).max() < 5e-1 + + def test_stable_diffusion_pipeline_with_sequential_cpu_offloading(self): + backend_empty_cache(torch_device) + backend_reset_max_memory_allocated(torch_device) + backend_reset_peak_memory_stats(torch_device) + + init_image = load_image( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" + "/sd2-inpaint/init_image.png" + ) + mask_image = load_image( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" + ) + + model_id = "stabilityai/stable-diffusion-2-inpainting" + pndm = PNDMScheduler.from_pretrained(model_id, subfolder="scheduler") + pipe = StableDiffusionInpaintPipeline.from_pretrained( + model_id, + safety_checker=None, + scheduler=pndm, + torch_dtype=torch.float16, + ) + pipe.set_progress_bar_config(disable=None) + pipe.enable_attention_slicing(1) + pipe.enable_sequential_cpu_offload(device=torch_device) + + prompt = "Face of a yellow cat, high resolution, sitting on a park bench" + + generator = torch.manual_seed(0) + _ = pipe( + prompt=prompt, + image=init_image, + mask_image=mask_image, + generator=generator, + num_inference_steps=2, + output_type="np", + ) + + mem_bytes = backend_max_memory_allocated(torch_device) + # make sure that less than 2.65 GB is allocated + assert mem_bytes < 2.65 * 10**9 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/stable_diffusion_2/test_stable_diffusion_latent_upscale.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/stable_diffusion_2/test_stable_diffusion_latent_upscale.py new file mode 100644 index 0000000000000000000000000000000000000000..2e4b428dfeb53493da88410570c0797f03fed126 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/stable_diffusion_2/test_stable_diffusion_latent_upscale.py @@ -0,0 +1,356 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import gc +import random +import unittest + +import numpy as np +import torch +from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer + +import diffusers +from diffusers import ( + AutoencoderKL, + EulerDiscreteScheduler, + StableDiffusionLatentUpscalePipeline, + StableDiffusionPipeline, + UNet2DConditionModel, +) +from diffusers.schedulers import KarrasDiffusionSchedulers + +from ...testing_utils import ( + backend_empty_cache, + enable_full_determinism, + floats_tensor, + load_image, + load_numpy, + require_torch_accelerator, + slow, + torch_device, +) +from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS +from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin + + +enable_full_determinism() + + +def check_same_shape(tensor_list): + shapes = [tensor.shape for tensor in tensor_list] + return all(shape == shapes[0] for shape in shapes[1:]) + + +class StableDiffusionLatentUpscalePipelineFastTests( + PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase +): + pipeline_class = StableDiffusionLatentUpscalePipeline + params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - { + "height", + "width", + "cross_attention_kwargs", + "negative_prompt_embeds", + "prompt_embeds", + } + required_optional_params = PipelineTesterMixin.required_optional_params - {"num_images_per_prompt"} + batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS + image_params = frozenset( + [] + ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess + image_latents_params = frozenset([]) + + @property + def dummy_image(self): + batch_size = 1 + num_channels = 4 + sizes = (16, 16) + + image = floats_tensor((batch_size, num_channels) + sizes, rng=random.Random(0)).to(torch_device) + return image + + def get_dummy_components(self): + torch.manual_seed(0) + model = UNet2DConditionModel( + act_fn="gelu", + attention_head_dim=8, + norm_num_groups=None, + block_out_channels=[32, 32, 64, 64], + time_cond_proj_dim=160, + conv_in_kernel=1, + conv_out_kernel=1, + cross_attention_dim=32, + down_block_types=( + "KDownBlock2D", + "KCrossAttnDownBlock2D", + "KCrossAttnDownBlock2D", + "KCrossAttnDownBlock2D", + ), + in_channels=8, + mid_block_type=None, + only_cross_attention=False, + out_channels=5, + resnet_time_scale_shift="scale_shift", + time_embedding_type="fourier", + timestep_post_act="gelu", + up_block_types=("KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KUpBlock2D"), + ) + vae = AutoencoderKL( + block_out_channels=[32, 32, 64, 64], + in_channels=3, + out_channels=3, + down_block_types=[ + "DownEncoderBlock2D", + "DownEncoderBlock2D", + "DownEncoderBlock2D", + "DownEncoderBlock2D", + ], + up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"], + latent_channels=4, + ) + scheduler = EulerDiscreteScheduler(prediction_type="sample") + text_config = CLIPTextConfig( + bos_token_id=0, + eos_token_id=2, + hidden_size=32, + intermediate_size=37, + layer_norm_eps=1e-05, + num_attention_heads=4, + num_hidden_layers=5, + pad_token_id=1, + vocab_size=1000, + hidden_act="quick_gelu", + projection_dim=512, + ) + text_encoder = CLIPTextModel(text_config) + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + + components = { + "unet": model.eval(), + "vae": vae.eval(), + "scheduler": scheduler, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + } + + return components + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + inputs = { + "prompt": "A painting of a squirrel eating a burger", + "image": self.dummy_image.cpu(), + "generator": generator, + "num_inference_steps": 2, + "output_type": "np", + } + return inputs + + def test_inference(self): + device = "cpu" + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + image = pipe(**inputs).images + image_slice = image[0, -3:, -3:, -1] + + self.assertEqual(image.shape, (1, 256, 256, 3)) + expected_slice = np.array( + [0.47222412, 0.41921633, 0.44717434, 0.46874192, 0.42588258, 0.46150726, 0.4677534, 0.45583832, 0.48579055] + ) + max_diff = np.abs(image_slice.flatten() - expected_slice).max() + self.assertLessEqual(max_diff, 1e-3) + + def test_stable_diffusion_latent_upscaler_negative_prompt(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + sd_pipe = StableDiffusionLatentUpscalePipeline(**components) + sd_pipe = sd_pipe.to(device) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + negative_prompt = "french fries" + output = sd_pipe(**inputs, negative_prompt=negative_prompt) + image = output.images + image_slice = image[0, -3:, -3:, -1] + + assert image.shape == (1, 256, 256, 3) + expected_slice = np.array( + [0.43865365, 0.404124, 0.42618454, 0.44333526, 0.40564927, 0.43818694, 0.4411913, 0.43404633, 0.46392226] + ) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 + + def test_stable_diffusion_latent_upscaler_multiple_init_images(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + sd_pipe = StableDiffusionLatentUpscalePipeline(**components) + sd_pipe = sd_pipe.to(device) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + inputs["prompt"] = [inputs["prompt"]] * 2 + inputs["image"] = inputs["image"].repeat(2, 1, 1, 1) + image = sd_pipe(**inputs).images + image_slice = image[-1, -3:, -3:, -1] + + assert image.shape == (2, 256, 256, 3) + expected_slice = np.array( + [0.38730142, 0.35695046, 0.40646142, 0.40967226, 0.3981609, 0.4195988, 0.4248805, 0.430259, 0.45694894] + ) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 + + def test_attention_slicing_forward_pass(self): + super().test_attention_slicing_forward_pass(expected_max_diff=7e-3) + + def test_sequential_cpu_offload_forward_pass(self): + super().test_sequential_cpu_offload_forward_pass(expected_max_diff=3e-3) + + def test_dict_tuple_outputs_equivalent(self): + super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3) + + def test_inference_batch_single_identical(self): + super().test_inference_batch_single_identical(expected_max_diff=7e-3) + + def test_pt_np_pil_outputs_equivalent(self): + super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3e-3) + + def test_save_load_local(self): + super().test_save_load_local(expected_max_difference=3e-3) + + def test_save_load_optional_components(self): + super().test_save_load_optional_components(expected_max_difference=3e-3) + + def test_karras_schedulers_shape(self): + skip_schedulers = [ + "DDIMScheduler", + "DDPMScheduler", + "PNDMScheduler", + "HeunDiscreteScheduler", + "EulerAncestralDiscreteScheduler", + "KDPM2DiscreteScheduler", + "KDPM2AncestralDiscreteScheduler", + "DPMSolverSDEScheduler", + "EDMEulerScheduler", + ] + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + + # make sure that PNDM does not need warm-up + pipe.scheduler.register_to_config(skip_prk_steps=True) + + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + inputs = self.get_dummy_inputs(torch_device) + inputs["num_inference_steps"] = 2 + + outputs = [] + for scheduler_enum in KarrasDiffusionSchedulers: + if scheduler_enum.name in skip_schedulers: + # no sigma schedulers are not supported + # no schedulers + continue + + scheduler_cls = getattr(diffusers, scheduler_enum.name) + pipe.scheduler = scheduler_cls.from_config(pipe.scheduler.config) + output = pipe(**inputs)[0] + outputs.append(output) + + assert check_same_shape(outputs) + + def test_float16_inference(self): + super().test_float16_inference(expected_max_diff=5e-1) + + @unittest.skip("Test not supported for a weird use of `text_input_ids`.") + def test_encode_prompt_works_in_isolation(self): + pass + + +@require_torch_accelerator +@slow +class StableDiffusionLatentUpscalePipelineIntegrationTests(unittest.TestCase): + def setUp(self): + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def test_latent_upscaler_fp16(self): + generator = torch.manual_seed(33) + + pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", torch_dtype=torch.float16) + pipe.to(torch_device) + + upscaler = StableDiffusionLatentUpscalePipeline.from_pretrained( + "stabilityai/sd-x2-latent-upscaler", torch_dtype=torch.float16 + ) + upscaler.to(torch_device) + + prompt = "a photo of an astronaut high resolution, unreal engine, ultra realistic" + + low_res_latents = pipe(prompt, generator=generator, output_type="latent").images + + image = upscaler( + prompt=prompt, + image=low_res_latents, + num_inference_steps=20, + guidance_scale=0, + generator=generator, + output_type="np", + ).images[0] + + expected_image = load_numpy( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy" + ) + assert np.abs((expected_image - image).mean()) < 5e-2 + + def test_latent_upscaler_fp16_image(self): + generator = torch.manual_seed(33) + + upscaler = StableDiffusionLatentUpscalePipeline.from_pretrained( + "stabilityai/sd-x2-latent-upscaler", torch_dtype=torch.float16 + ) + upscaler.to(torch_device) + + prompt = "the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas" + + low_res_img = load_image( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png" + ) + + image = upscaler( + prompt=prompt, + image=low_res_img, + num_inference_steps=20, + guidance_scale=0, + generator=generator, + output_type="np", + ).images[0] + + expected_image = load_numpy( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy" + ) + assert np.abs((expected_image - image).max()) < 5e-2 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/stable_diffusion_2/test_stable_diffusion_upscale.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/stable_diffusion_2/test_stable_diffusion_upscale.py new file mode 100644 index 0000000000000000000000000000000000000000..481ac7f2d10f7e24215c709e9e9e2d0cd7d50186 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/stable_diffusion_2/test_stable_diffusion_upscale.py @@ -0,0 +1,498 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import gc +import random +import tempfile +import unittest + +import numpy as np +import torch +from PIL import Image +from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer + +from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNet2DConditionModel + +from ...testing_utils import ( + backend_empty_cache, + backend_max_memory_allocated, + backend_reset_max_memory_allocated, + backend_reset_peak_memory_stats, + enable_full_determinism, + floats_tensor, + load_image, + load_numpy, + require_accelerator, + require_torch_accelerator, + slow, + torch_device, +) + + +enable_full_determinism() + + +class StableDiffusionUpscalePipelineFastTests(unittest.TestCase): + def setUp(self): + # clean up the VRAM before each test + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + # clean up the VRAM after each test + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + @property + def dummy_image(self): + batch_size = 1 + num_channels = 3 + sizes = (32, 32) + + image = floats_tensor((batch_size, num_channels) + sizes, rng=random.Random(0)).to(torch_device) + return image + + @property + def dummy_cond_unet_upscale(self): + torch.manual_seed(0) + model = UNet2DConditionModel( + block_out_channels=(32, 32, 64), + layers_per_block=2, + sample_size=32, + in_channels=7, + out_channels=4, + down_block_types=("DownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D"), + up_block_types=("CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "UpBlock2D"), + cross_attention_dim=32, + # SD2-specific config below + attention_head_dim=8, + use_linear_projection=True, + only_cross_attention=(True, True, False), + num_class_embeds=100, + ) + return model + + @property + def dummy_vae(self): + torch.manual_seed(0) + model = AutoencoderKL( + block_out_channels=[32, 32, 64], + in_channels=3, + out_channels=3, + down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D"], + up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"], + latent_channels=4, + ) + return model + + @property + def dummy_text_encoder(self): + torch.manual_seed(0) + config = CLIPTextConfig( + bos_token_id=0, + eos_token_id=2, + hidden_size=32, + intermediate_size=37, + layer_norm_eps=1e-05, + num_attention_heads=4, + num_hidden_layers=5, + pad_token_id=1, + vocab_size=1000, + # SD2-specific config below + hidden_act="gelu", + projection_dim=512, + ) + return CLIPTextModel(config) + + def test_stable_diffusion_upscale(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + unet = self.dummy_cond_unet_upscale + low_res_scheduler = DDPMScheduler() + scheduler = DDIMScheduler(prediction_type="v_prediction") + vae = self.dummy_vae + text_encoder = self.dummy_text_encoder + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + + image = self.dummy_image.cpu().permute(0, 2, 3, 1)[0] + low_res_image = Image.fromarray(np.uint8(image)).convert("RGB").resize((64, 64)) + + # make sure here that pndm scheduler skips prk + sd_pipe = StableDiffusionUpscalePipeline( + unet=unet, + low_res_scheduler=low_res_scheduler, + scheduler=scheduler, + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + max_noise_level=350, + ) + sd_pipe = sd_pipe.to(device) + sd_pipe.set_progress_bar_config(disable=None) + + prompt = "A painting of a squirrel eating a burger" + generator = torch.Generator(device=device).manual_seed(0) + output = sd_pipe( + [prompt], + image=low_res_image, + generator=generator, + guidance_scale=6.0, + noise_level=20, + num_inference_steps=2, + output_type="np", + ) + + image = output.images + + generator = torch.Generator(device=device).manual_seed(0) + image_from_tuple = sd_pipe( + [prompt], + image=low_res_image, + generator=generator, + guidance_scale=6.0, + noise_level=20, + num_inference_steps=2, + output_type="np", + return_dict=False, + )[0] + + image_slice = image[0, -3:, -3:, -1] + image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] + + expected_height_width = low_res_image.size[0] * 4 + assert image.shape == (1, expected_height_width, expected_height_width, 3) + expected_slice = np.array([0.3113, 0.3910, 0.4272, 0.4859, 0.5061, 0.4652, 0.5362, 0.5715, 0.5661]) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 + assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 + + def test_stable_diffusion_upscale_batch(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + unet = self.dummy_cond_unet_upscale + low_res_scheduler = DDPMScheduler() + scheduler = DDIMScheduler(prediction_type="v_prediction") + vae = self.dummy_vae + text_encoder = self.dummy_text_encoder + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + + image = self.dummy_image.cpu().permute(0, 2, 3, 1)[0] + low_res_image = Image.fromarray(np.uint8(image)).convert("RGB").resize((64, 64)) + + # make sure here that pndm scheduler skips prk + sd_pipe = StableDiffusionUpscalePipeline( + unet=unet, + low_res_scheduler=low_res_scheduler, + scheduler=scheduler, + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + max_noise_level=350, + ) + sd_pipe = sd_pipe.to(device) + sd_pipe.set_progress_bar_config(disable=None) + + prompt = "A painting of a squirrel eating a burger" + output = sd_pipe( + 2 * [prompt], + image=2 * [low_res_image], + guidance_scale=6.0, + noise_level=20, + num_inference_steps=2, + output_type="np", + ) + image = output.images + assert image.shape[0] == 2 + + generator = torch.Generator(device=device).manual_seed(0) + output = sd_pipe( + [prompt], + image=low_res_image, + generator=generator, + num_images_per_prompt=2, + guidance_scale=6.0, + noise_level=20, + num_inference_steps=2, + output_type="np", + ) + image = output.images + assert image.shape[0] == 2 + + def test_stable_diffusion_upscale_prompt_embeds(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + unet = self.dummy_cond_unet_upscale + low_res_scheduler = DDPMScheduler() + scheduler = DDIMScheduler(prediction_type="v_prediction") + vae = self.dummy_vae + text_encoder = self.dummy_text_encoder + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + + image = self.dummy_image.cpu().permute(0, 2, 3, 1)[0] + low_res_image = Image.fromarray(np.uint8(image)).convert("RGB").resize((64, 64)) + + # make sure here that pndm scheduler skips prk + sd_pipe = StableDiffusionUpscalePipeline( + unet=unet, + low_res_scheduler=low_res_scheduler, + scheduler=scheduler, + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + max_noise_level=350, + ) + sd_pipe = sd_pipe.to(device) + sd_pipe.set_progress_bar_config(disable=None) + + prompt = "A painting of a squirrel eating a burger" + generator = torch.Generator(device=device).manual_seed(0) + output = sd_pipe( + [prompt], + image=low_res_image, + generator=generator, + guidance_scale=6.0, + noise_level=20, + num_inference_steps=2, + output_type="np", + ) + + image = output.images + + generator = torch.Generator(device=device).manual_seed(0) + prompt_embeds, negative_prompt_embeds = sd_pipe.encode_prompt(prompt, device, 1, False) + if negative_prompt_embeds is not None: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + + image_from_prompt_embeds = sd_pipe( + prompt_embeds=prompt_embeds, + image=[low_res_image], + generator=generator, + guidance_scale=6.0, + noise_level=20, + num_inference_steps=2, + output_type="np", + return_dict=False, + )[0] + + image_slice = image[0, -3:, -3:, -1] + image_from_prompt_embeds_slice = image_from_prompt_embeds[0, -3:, -3:, -1] + + expected_height_width = low_res_image.size[0] * 4 + assert image.shape == (1, expected_height_width, expected_height_width, 3) + expected_slice = np.array([0.3113, 0.3910, 0.4272, 0.4859, 0.5061, 0.4652, 0.5362, 0.5715, 0.5661]) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 + assert np.abs(image_from_prompt_embeds_slice.flatten() - expected_slice).max() < 1e-2 + + @require_accelerator + def test_stable_diffusion_upscale_fp16(self): + """Test that stable diffusion upscale works with fp16""" + unet = self.dummy_cond_unet_upscale + low_res_scheduler = DDPMScheduler() + scheduler = DDIMScheduler(prediction_type="v_prediction") + vae = self.dummy_vae + text_encoder = self.dummy_text_encoder + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + + image = self.dummy_image.cpu().permute(0, 2, 3, 1)[0] + low_res_image = Image.fromarray(np.uint8(image)).convert("RGB").resize((64, 64)) + + # put models in fp16, except vae as it overflows in fp16 + unet = unet.half() + text_encoder = text_encoder.half() + + # make sure here that pndm scheduler skips prk + sd_pipe = StableDiffusionUpscalePipeline( + unet=unet, + low_res_scheduler=low_res_scheduler, + scheduler=scheduler, + vae=vae, + text_encoder=text_encoder, + tokenizer=tokenizer, + max_noise_level=350, + ) + sd_pipe = sd_pipe.to(torch_device) + sd_pipe.set_progress_bar_config(disable=None) + + prompt = "A painting of a squirrel eating a burger" + generator = torch.manual_seed(0) + image = sd_pipe( + [prompt], + image=low_res_image, + generator=generator, + num_inference_steps=2, + output_type="np", + ).images + + expected_height_width = low_res_image.size[0] * 4 + assert image.shape == (1, expected_height_width, expected_height_width, 3) + + def test_stable_diffusion_upscale_from_save_pretrained(self): + pipes = [] + + device = "cpu" # ensure determinism for the device-dependent torch.Generator + low_res_scheduler = DDPMScheduler() + scheduler = DDIMScheduler(prediction_type="v_prediction") + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + + # make sure here that pndm scheduler skips prk + sd_pipe = StableDiffusionUpscalePipeline( + unet=self.dummy_cond_unet_upscale, + low_res_scheduler=low_res_scheduler, + scheduler=scheduler, + vae=self.dummy_vae, + text_encoder=self.dummy_text_encoder, + tokenizer=tokenizer, + max_noise_level=350, + ) + sd_pipe = sd_pipe.to(device) + pipes.append(sd_pipe) + + with tempfile.TemporaryDirectory() as tmpdirname: + sd_pipe.save_pretrained(tmpdirname) + sd_pipe = StableDiffusionUpscalePipeline.from_pretrained(tmpdirname).to(device) + pipes.append(sd_pipe) + + prompt = "A painting of a squirrel eating a burger" + image = self.dummy_image.cpu().permute(0, 2, 3, 1)[0] + low_res_image = Image.fromarray(np.uint8(image)).convert("RGB").resize((64, 64)) + + image_slices = [] + for pipe in pipes: + generator = torch.Generator(device=device).manual_seed(0) + image = pipe( + [prompt], + image=low_res_image, + generator=generator, + guidance_scale=6.0, + noise_level=20, + num_inference_steps=2, + output_type="np", + ).images + image_slices.append(image[0, -3:, -3:, -1].flatten()) + + assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3 + + +@slow +@require_torch_accelerator +class StableDiffusionUpscalePipelineIntegrationTests(unittest.TestCase): + def setUp(self): + # clean up the VRAM before each test + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + # clean up the VRAM after each test + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def test_stable_diffusion_upscale_pipeline(self): + image = load_image( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" + "/sd2-upscale/low_res_cat.png" + ) + expected_image = load_numpy( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale" + "/upsampled_cat.npy" + ) + + model_id = "stabilityai/stable-diffusion-x4-upscaler" + pipe = StableDiffusionUpscalePipeline.from_pretrained(model_id) + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + pipe.enable_attention_slicing() + + prompt = "a cat sitting on a park bench" + + generator = torch.manual_seed(0) + output = pipe( + prompt=prompt, + image=image, + generator=generator, + output_type="np", + ) + image = output.images[0] + + assert image.shape == (512, 512, 3) + assert np.abs(expected_image - image).max() < 1e-3 + + def test_stable_diffusion_upscale_pipeline_fp16(self): + image = load_image( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" + "/sd2-upscale/low_res_cat.png" + ) + expected_image = load_numpy( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale" + "/upsampled_cat_fp16.npy" + ) + + model_id = "stabilityai/stable-diffusion-x4-upscaler" + pipe = StableDiffusionUpscalePipeline.from_pretrained( + model_id, + torch_dtype=torch.float16, + ) + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + pipe.enable_attention_slicing() + + prompt = "a cat sitting on a park bench" + + generator = torch.manual_seed(0) + output = pipe( + prompt=prompt, + image=image, + generator=generator, + output_type="np", + ) + image = output.images[0] + + assert image.shape == (512, 512, 3) + assert np.abs(expected_image - image).max() < 5e-1 + + def test_stable_diffusion_pipeline_with_sequential_cpu_offloading(self): + backend_empty_cache(torch_device) + backend_reset_max_memory_allocated(torch_device) + backend_reset_peak_memory_stats(torch_device) + + image = load_image( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" + "/sd2-upscale/low_res_cat.png" + ) + + model_id = "stabilityai/stable-diffusion-x4-upscaler" + pipe = StableDiffusionUpscalePipeline.from_pretrained( + model_id, + torch_dtype=torch.float16, + ) + pipe.set_progress_bar_config(disable=None) + pipe.enable_attention_slicing(1) + pipe.enable_sequential_cpu_offload(device=torch_device) + + prompt = "a cat sitting on a park bench" + + generator = torch.manual_seed(0) + _ = pipe( + prompt=prompt, + image=image, + generator=generator, + num_inference_steps=5, + output_type="np", + ) + + mem_bytes = backend_max_memory_allocated(torch_device) + # make sure that less than 2.9 GB is allocated + assert mem_bytes < 2.9 * 10**9 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/stable_diffusion_2/test_stable_diffusion_v_pred.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/stable_diffusion_2/test_stable_diffusion_v_pred.py new file mode 100644 index 0000000000000000000000000000000000000000..37b309c4cac4ce8ed9bfb3a7613ccdff776ba0f7 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/stable_diffusion_2/test_stable_diffusion_v_pred.py @@ -0,0 +1,554 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import gc +import time +import unittest + +import numpy as np +import torch +from huggingface_hub import hf_hub_download +from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer + +from diffusers import ( + AutoencoderKL, + DDIMScheduler, + DPMSolverMultistepScheduler, + EulerDiscreteScheduler, + StableDiffusionPipeline, + UNet2DConditionModel, +) + +from ...testing_utils import ( + backend_empty_cache, + backend_max_memory_allocated, + backend_reset_max_memory_allocated, + backend_reset_peak_memory_stats, + enable_full_determinism, + load_numpy, + numpy_cosine_similarity_distance, + require_accelerator, + require_torch_accelerator, + slow, + torch_device, +) + + +enable_full_determinism() + + +class StableDiffusion2VPredictionPipelineFastTests(unittest.TestCase): + def setUp(self): + # clean up the VRAM before each test + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + # clean up the VRAM after each test + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + @property + def dummy_cond_unet(self): + torch.manual_seed(0) + model = UNet2DConditionModel( + block_out_channels=(32, 64), + layers_per_block=2, + sample_size=32, + in_channels=4, + out_channels=4, + down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), + up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), + cross_attention_dim=32, + # SD2-specific config below + attention_head_dim=(2, 4), + use_linear_projection=True, + ) + return model + + @property + def dummy_vae(self): + torch.manual_seed(0) + model = AutoencoderKL( + block_out_channels=[32, 64], + in_channels=3, + out_channels=3, + down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], + up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], + latent_channels=4, + sample_size=128, + ) + return model + + @property + def dummy_text_encoder(self): + torch.manual_seed(0) + config = CLIPTextConfig( + bos_token_id=0, + eos_token_id=2, + hidden_size=32, + intermediate_size=37, + layer_norm_eps=1e-05, + num_attention_heads=4, + num_hidden_layers=5, + pad_token_id=1, + vocab_size=1000, + # SD2-specific config below + hidden_act="gelu", + projection_dim=64, + ) + return CLIPTextModel(config) + + def test_stable_diffusion_v_pred_ddim(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + unet = self.dummy_cond_unet + scheduler = DDIMScheduler( + beta_start=0.00085, + beta_end=0.012, + beta_schedule="scaled_linear", + clip_sample=False, + set_alpha_to_one=False, + prediction_type="v_prediction", + ) + + vae = self.dummy_vae + bert = self.dummy_text_encoder + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + + # make sure here that pndm scheduler skips prk + sd_pipe = StableDiffusionPipeline( + unet=unet, + scheduler=scheduler, + vae=vae, + text_encoder=bert, + tokenizer=tokenizer, + safety_checker=None, + feature_extractor=None, + image_encoder=None, + requires_safety_checker=False, + ) + sd_pipe = sd_pipe.to(device) + sd_pipe.set_progress_bar_config(disable=None) + + prompt = "A painting of a squirrel eating a burger" + + generator = torch.Generator(device=device).manual_seed(0) + output = sd_pipe([prompt], generator=generator, guidance_scale=6.0, num_inference_steps=2, output_type="np") + image = output.images + + generator = torch.Generator(device=device).manual_seed(0) + image_from_tuple = sd_pipe( + [prompt], + generator=generator, + guidance_scale=6.0, + num_inference_steps=2, + output_type="np", + return_dict=False, + )[0] + + image_slice = image[0, -3:, -3:, -1] + image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] + + assert image.shape == (1, 64, 64, 3) + expected_slice = np.array([0.6569, 0.6525, 0.5142, 0.4968, 0.4923, 0.4601, 0.4996, 0.5041, 0.4544]) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 + assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 + + def test_stable_diffusion_v_pred_k_euler(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + unet = self.dummy_cond_unet + scheduler = EulerDiscreteScheduler( + beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", prediction_type="v_prediction" + ) + vae = self.dummy_vae + bert = self.dummy_text_encoder + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + + # make sure here that pndm scheduler skips prk + sd_pipe = StableDiffusionPipeline( + unet=unet, + scheduler=scheduler, + vae=vae, + text_encoder=bert, + tokenizer=tokenizer, + safety_checker=None, + feature_extractor=None, + image_encoder=None, + requires_safety_checker=False, + ) + sd_pipe = sd_pipe.to(device) + sd_pipe.set_progress_bar_config(disable=None) + + prompt = "A painting of a squirrel eating a burger" + generator = torch.Generator(device=device).manual_seed(0) + output = sd_pipe([prompt], generator=generator, guidance_scale=6.0, num_inference_steps=2, output_type="np") + + image = output.images + + generator = torch.Generator(device=device).manual_seed(0) + image_from_tuple = sd_pipe( + [prompt], + generator=generator, + guidance_scale=6.0, + num_inference_steps=2, + output_type="np", + return_dict=False, + )[0] + + image_slice = image[0, -3:, -3:, -1] + image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] + + assert image.shape == (1, 64, 64, 3) + expected_slice = np.array([0.5644, 0.6514, 0.5190, 0.5663, 0.5287, 0.4953, 0.5430, 0.5243, 0.4778]) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 + assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 + + @require_accelerator + def test_stable_diffusion_v_pred_fp16(self): + """Test that stable diffusion v-prediction works with fp16""" + unet = self.dummy_cond_unet + scheduler = DDIMScheduler( + beta_start=0.00085, + beta_end=0.012, + beta_schedule="scaled_linear", + clip_sample=False, + set_alpha_to_one=False, + prediction_type="v_prediction", + ) + vae = self.dummy_vae + bert = self.dummy_text_encoder + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + + # put models in fp16 + unet = unet.half() + vae = vae.half() + bert = bert.half() + + # make sure here that pndm scheduler skips prk + sd_pipe = StableDiffusionPipeline( + unet=unet, + scheduler=scheduler, + vae=vae, + text_encoder=bert, + tokenizer=tokenizer, + safety_checker=None, + feature_extractor=None, + image_encoder=None, + requires_safety_checker=False, + ) + sd_pipe = sd_pipe.to(torch_device) + sd_pipe.set_progress_bar_config(disable=None) + + prompt = "A painting of a squirrel eating a burger" + generator = torch.manual_seed(0) + image = sd_pipe([prompt], generator=generator, num_inference_steps=2, output_type="np").images + + assert image.shape == (1, 64, 64, 3) + + +@slow +@require_torch_accelerator +class StableDiffusion2VPredictionPipelineIntegrationTests(unittest.TestCase): + def setUp(self): + # clean up the VRAM before each test + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + # clean up the VRAM after each test + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def test_stable_diffusion_v_pred_default(self): + sd_pipe = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2") + sd_pipe = sd_pipe.to(torch_device) + sd_pipe.enable_attention_slicing() + sd_pipe.set_progress_bar_config(disable=None) + + prompt = "A painting of a squirrel eating a burger" + generator = torch.manual_seed(0) + output = sd_pipe([prompt], generator=generator, guidance_scale=7.5, num_inference_steps=20, output_type="np") + + image = output.images + image_slice = image[0, 253:256, 253:256, -1] + + assert image.shape == (1, 768, 768, 3) + expected_slice = np.array([0.1868, 0.1922, 0.1527, 0.1921, 0.1908, 0.1624, 0.1779, 0.1652, 0.1734]) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 + + def test_stable_diffusion_v_pred_upcast_attention(self): + sd_pipe = StableDiffusionPipeline.from_pretrained( + "stabilityai/stable-diffusion-2-1", torch_dtype=torch.float16 + ) + sd_pipe = sd_pipe.to(torch_device) + sd_pipe.enable_attention_slicing() + sd_pipe.set_progress_bar_config(disable=None) + + prompt = "A painting of a squirrel eating a burger" + generator = torch.manual_seed(0) + output = sd_pipe([prompt], generator=generator, guidance_scale=7.5, num_inference_steps=20, output_type="np") + + image = output.images + image_slice = image[0, 253:256, 253:256, -1] + + assert image.shape == (1, 768, 768, 3) + expected_slice = np.array([0.4209, 0.4087, 0.4097, 0.4209, 0.3860, 0.4329, 0.4280, 0.4324, 0.4187]) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-2 + + def test_stable_diffusion_v_pred_euler(self): + scheduler = EulerDiscreteScheduler.from_pretrained("stabilityai/stable-diffusion-2", subfolder="scheduler") + sd_pipe = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2", scheduler=scheduler) + sd_pipe = sd_pipe.to(torch_device) + sd_pipe.enable_attention_slicing() + sd_pipe.set_progress_bar_config(disable=None) + + prompt = "A painting of a squirrel eating a burger" + generator = torch.manual_seed(0) + + output = sd_pipe([prompt], generator=generator, num_inference_steps=5, output_type="np") + image = output.images + + image_slice = image[0, 253:256, 253:256, -1] + + assert image.shape == (1, 768, 768, 3) + expected_slice = np.array([0.1781, 0.1695, 0.1661, 0.1705, 0.1588, 0.1699, 0.2005, 0.1589, 0.1677]) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 + + def test_stable_diffusion_v_pred_dpm(self): + """ + TODO: update this test after making DPM compatible with V-prediction! + """ + scheduler = DPMSolverMultistepScheduler.from_pretrained( + "stabilityai/stable-diffusion-2", + subfolder="scheduler", + final_sigmas_type="sigma_min", + ) + sd_pipe = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2", scheduler=scheduler) + sd_pipe = sd_pipe.to(torch_device) + sd_pipe.enable_attention_slicing() + sd_pipe.set_progress_bar_config(disable=None) + + prompt = "a photograph of an astronaut riding a horse" + generator = torch.manual_seed(0) + image = sd_pipe( + [prompt], generator=generator, guidance_scale=7.5, num_inference_steps=5, output_type="np" + ).images + + image_slice = image[0, 253:256, 253:256, -1] + assert image.shape == (1, 768, 768, 3) + expected_slice = np.array([0.3303, 0.3184, 0.3291, 0.3300, 0.3256, 0.3113, 0.2965, 0.3134, 0.3192]) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 + + def test_stable_diffusion_attention_slicing_v_pred(self): + backend_reset_peak_memory_stats(torch_device) + model_id = "stabilityai/stable-diffusion-2" + pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16) + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + prompt = "a photograph of an astronaut riding a horse" + + # make attention efficient + pipe.enable_attention_slicing() + generator = torch.manual_seed(0) + output_chunked = pipe( + [prompt], generator=generator, guidance_scale=7.5, num_inference_steps=10, output_type="np" + ) + image_chunked = output_chunked.images + + mem_bytes = backend_max_memory_allocated(torch_device) + backend_reset_peak_memory_stats(torch_device) + # make sure that less than 5.5 GB is allocated + assert mem_bytes < 5.5 * 10**9 + + # disable slicing + pipe.disable_attention_slicing() + generator = torch.manual_seed(0) + output = pipe([prompt], generator=generator, guidance_scale=7.5, num_inference_steps=10, output_type="np") + image = output.images + + # make sure that more than 3.0 GB is allocated + mem_bytes = backend_max_memory_allocated(torch_device) + assert mem_bytes > 3 * 10**9 + max_diff = numpy_cosine_similarity_distance(image.flatten(), image_chunked.flatten()) + assert max_diff < 1e-3 + + def test_stable_diffusion_text2img_pipeline_v_pred_default(self): + expected_image = load_numpy( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/" + "sd2-text2img/astronaut_riding_a_horse_v_pred.npy" + ) + + pipe = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2") + pipe.to(torch_device) + pipe.enable_attention_slicing() + pipe.set_progress_bar_config(disable=None) + + prompt = "astronaut riding a horse" + + generator = torch.manual_seed(0) + output = pipe(prompt=prompt, guidance_scale=7.5, generator=generator, output_type="np") + image = output.images[0] + + assert image.shape == (768, 768, 3) + max_diff = numpy_cosine_similarity_distance(image.flatten(), expected_image.flatten()) + assert max_diff < 1e-3 + + def test_stable_diffusion_text2img_pipeline_unflawed(self): + expected_image = load_numpy( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/" + "sd2-text2img/lion_galaxy.npy" + ) + + pipe = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1") + pipe.scheduler = DDIMScheduler.from_config( + pipe.scheduler.config, timestep_spacing="trailing", rescale_betas_zero_snr=True + ) + pipe.enable_model_cpu_offload(device=torch_device) + pipe.set_progress_bar_config(disable=None) + + prompt = "A lion in galaxies, spirals, nebulae, stars, smoke, iridescent, intricate detail, octane render, 8k" + + generator = torch.Generator("cpu").manual_seed(0) + output = pipe( + prompt=prompt, + guidance_scale=7.5, + num_inference_steps=10, + guidance_rescale=0.7, + generator=generator, + output_type="np", + ) + image = output.images[0] + + assert image.shape == (768, 768, 3) + max_diff = numpy_cosine_similarity_distance(image.flatten(), expected_image.flatten()) + assert max_diff < 5e-2 + + def test_stable_diffusion_text2img_pipeline_v_pred_fp16(self): + expected_image = load_numpy( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/" + "sd2-text2img/astronaut_riding_a_horse_v_pred_fp16.npy" + ) + + pipe = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2", torch_dtype=torch.float16) + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + prompt = "astronaut riding a horse" + + generator = torch.manual_seed(0) + output = pipe(prompt=prompt, guidance_scale=7.5, generator=generator, output_type="np") + image = output.images[0] + + assert image.shape == (768, 768, 3) + max_diff = numpy_cosine_similarity_distance(image.flatten(), expected_image.flatten()) + assert max_diff < 1e-3 + + def test_download_local(self): + filename = hf_hub_download("stabilityai/stable-diffusion-2-1", filename="v2-1_768-ema-pruned.safetensors") + + pipe = StableDiffusionPipeline.from_single_file(filename, torch_dtype=torch.float16) + pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config) + pipe.enable_model_cpu_offload(device=torch_device) + + image_out = pipe("test", num_inference_steps=1, output_type="np").images[0] + + assert image_out.shape == (768, 768, 3) + + def test_stable_diffusion_text2img_intermediate_state_v_pred(self): + number_of_steps = 0 + + def test_callback_fn(step: int, timestep: int, latents: torch.Tensor) -> None: + test_callback_fn.has_been_called = True + nonlocal number_of_steps + number_of_steps += 1 + if step == 0: + latents = latents.detach().cpu().numpy() + assert latents.shape == (1, 4, 96, 96) + latents_slice = latents[0, -3:, -3:, -1] + expected_slice = np.array([0.7749, 0.0325, 0.5088, 0.1619, 0.3372, 0.3667, -0.5186, 0.6860, 1.4326]) + + assert np.abs(latents_slice.flatten() - expected_slice).max() < 5e-2 + elif step == 19: + latents = latents.detach().cpu().numpy() + assert latents.shape == (1, 4, 96, 96) + latents_slice = latents[0, -3:, -3:, -1] + expected_slice = np.array([1.3887, 1.0273, 1.7266, 0.0726, 0.6611, 0.1598, -1.0547, 0.1522, 0.0227]) + + assert np.abs(latents_slice.flatten() - expected_slice).max() < 5e-2 + + test_callback_fn.has_been_called = False + + pipe = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2", torch_dtype=torch.float16) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + pipe.enable_attention_slicing() + + prompt = "Andromeda galaxy in a bottle" + + generator = torch.manual_seed(0) + pipe( + prompt=prompt, + num_inference_steps=20, + guidance_scale=7.5, + generator=generator, + callback=test_callback_fn, + callback_steps=1, + ) + assert test_callback_fn.has_been_called + assert number_of_steps == 20 + + def test_stable_diffusion_low_cpu_mem_usage_v_pred(self): + pipeline_id = "stabilityai/stable-diffusion-2" + + start_time = time.time() + pipeline_low_cpu_mem_usage = StableDiffusionPipeline.from_pretrained(pipeline_id, torch_dtype=torch.float16) + pipeline_low_cpu_mem_usage.to(torch_device) + low_cpu_mem_usage_time = time.time() - start_time + + start_time = time.time() + _ = StableDiffusionPipeline.from_pretrained(pipeline_id, torch_dtype=torch.float16, low_cpu_mem_usage=False) + normal_load_time = time.time() - start_time + + assert 2 * low_cpu_mem_usage_time < normal_load_time + + def test_stable_diffusion_pipeline_with_sequential_cpu_offloading_v_pred(self): + backend_empty_cache(torch_device) + backend_reset_max_memory_allocated(torch_device) + backend_reset_peak_memory_stats(torch_device) + + pipeline_id = "stabilityai/stable-diffusion-2" + prompt = "Andromeda galaxy in a bottle" + + pipeline = StableDiffusionPipeline.from_pretrained(pipeline_id, torch_dtype=torch.float16) + pipeline.enable_attention_slicing(1) + pipeline.enable_sequential_cpu_offload(device=torch_device) + + generator = torch.manual_seed(0) + _ = pipeline(prompt, generator=generator, num_inference_steps=5) + + mem_bytes = backend_max_memory_allocated(torch_device) + # make sure that less than 2.8 GB is allocated + assert mem_bytes < 2.8 * 10**9 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/stable_diffusion_3/__init__.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/stable_diffusion_3/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/stable_diffusion_3/test_pipeline_stable_diffusion_3.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/stable_diffusion_3/test_pipeline_stable_diffusion_3.py new file mode 100644 index 0000000000000000000000000000000000000000..3ccefe3de35d9abe9ef1ae2de04646e250d2c4de --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/stable_diffusion_3/test_pipeline_stable_diffusion_3.py @@ -0,0 +1,262 @@ +import gc +import unittest + +import numpy as np +import torch +from transformers import AutoTokenizer, CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer, T5EncoderModel + +from diffusers import AutoencoderKL, FlowMatchEulerDiscreteScheduler, SD3Transformer2DModel, StableDiffusion3Pipeline + +from ...testing_utils import ( + backend_empty_cache, + numpy_cosine_similarity_distance, + require_big_accelerator, + slow, + torch_device, +) +from ..test_pipelines_common import ( + PipelineTesterMixin, + check_qkv_fusion_matches_attn_procs_length, + check_qkv_fusion_processors_exist, +) + + +class StableDiffusion3PipelineFastTests(unittest.TestCase, PipelineTesterMixin): + pipeline_class = StableDiffusion3Pipeline + params = frozenset( + [ + "prompt", + "height", + "width", + "guidance_scale", + "negative_prompt", + "prompt_embeds", + "negative_prompt_embeds", + ] + ) + batch_params = frozenset(["prompt", "negative_prompt"]) + test_layerwise_casting = True + test_group_offloading = True + + def get_dummy_components(self): + torch.manual_seed(0) + transformer = SD3Transformer2DModel( + sample_size=32, + patch_size=1, + in_channels=4, + num_layers=1, + attention_head_dim=8, + num_attention_heads=4, + caption_projection_dim=32, + joint_attention_dim=32, + pooled_projection_dim=64, + out_channels=4, + ) + clip_text_encoder_config = CLIPTextConfig( + bos_token_id=0, + eos_token_id=2, + hidden_size=32, + intermediate_size=37, + layer_norm_eps=1e-05, + num_attention_heads=4, + num_hidden_layers=5, + pad_token_id=1, + vocab_size=1000, + hidden_act="gelu", + projection_dim=32, + ) + + torch.manual_seed(0) + text_encoder = CLIPTextModelWithProjection(clip_text_encoder_config) + + torch.manual_seed(0) + text_encoder_2 = CLIPTextModelWithProjection(clip_text_encoder_config) + + text_encoder_3 = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5") + + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + tokenizer_3 = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") + + torch.manual_seed(0) + vae = AutoencoderKL( + sample_size=32, + in_channels=3, + out_channels=3, + block_out_channels=(4,), + layers_per_block=1, + latent_channels=4, + norm_num_groups=1, + use_quant_conv=False, + use_post_quant_conv=False, + shift_factor=0.0609, + scaling_factor=1.5035, + ) + + scheduler = FlowMatchEulerDiscreteScheduler() + + return { + "scheduler": scheduler, + "text_encoder": text_encoder, + "text_encoder_2": text_encoder_2, + "text_encoder_3": text_encoder_3, + "tokenizer": tokenizer, + "tokenizer_2": tokenizer_2, + "tokenizer_3": tokenizer_3, + "transformer": transformer, + "vae": vae, + "image_encoder": None, + "feature_extractor": None, + } + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device="cpu").manual_seed(seed) + + inputs = { + "prompt": "A painting of a squirrel eating a burger", + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 5.0, + "output_type": "np", + } + return inputs + + def test_inference(self): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + + inputs = self.get_dummy_inputs(torch_device) + image = pipe(**inputs).images[0] + generated_slice = image.flatten() + generated_slice = np.concatenate([generated_slice[:8], generated_slice[-8:]]) + + # fmt: off + expected_slice = np.array([0.5112, 0.5228, 0.5235, 0.5524, 0.3188, 0.5017, 0.5574, 0.4899, 0.6812, 0.5991, 0.3908, 0.5213, 0.5582, 0.4457, 0.4204, 0.5616]) + # fmt: on + + self.assertTrue( + np.allclose(generated_slice, expected_slice, atol=1e-3), "Output does not match expected slice." + ) + + def test_fused_qkv_projections(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe = pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + image = pipe(**inputs).images + original_image_slice = image[0, -3:, -3:, -1] + + # TODO (sayakpaul): will refactor this once `fuse_qkv_projections()` has been added + # to the pipeline level. + pipe.transformer.fuse_qkv_projections() + assert check_qkv_fusion_processors_exist(pipe.transformer), ( + "Something wrong with the fused attention processors. Expected all the attention processors to be fused." + ) + assert check_qkv_fusion_matches_attn_procs_length( + pipe.transformer, pipe.transformer.original_attn_processors + ), "Something wrong with the attention processors concerning the fused QKV projections." + + inputs = self.get_dummy_inputs(device) + image = pipe(**inputs).images + image_slice_fused = image[0, -3:, -3:, -1] + + pipe.transformer.unfuse_qkv_projections() + inputs = self.get_dummy_inputs(device) + image = pipe(**inputs).images + image_slice_disabled = image[0, -3:, -3:, -1] + + assert np.allclose(original_image_slice, image_slice_fused, atol=1e-3, rtol=1e-3), ( + "Fusion of QKV projections shouldn't affect the outputs." + ) + assert np.allclose(image_slice_fused, image_slice_disabled, atol=1e-3, rtol=1e-3), ( + "Outputs, with QKV projection fusion enabled, shouldn't change when fused QKV projections are disabled." + ) + assert np.allclose(original_image_slice, image_slice_disabled, atol=1e-2, rtol=1e-2), ( + "Original outputs should match when fused QKV projections are disabled." + ) + + def test_skip_guidance_layers(self): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(torch_device) + + output_full = pipe(**inputs)[0] + + inputs_with_skip = inputs.copy() + inputs_with_skip["skip_guidance_layers"] = [0] + output_skip = pipe(**inputs_with_skip)[0] + + self.assertFalse( + np.allclose(output_full, output_skip, atol=1e-5), "Outputs should differ when layers are skipped" + ) + + self.assertEqual(output_full.shape, output_skip.shape, "Outputs should have the same shape") + + inputs["num_images_per_prompt"] = 2 + output_full = pipe(**inputs)[0] + + inputs_with_skip = inputs.copy() + inputs_with_skip["skip_guidance_layers"] = [0] + output_skip = pipe(**inputs_with_skip)[0] + + self.assertFalse( + np.allclose(output_full, output_skip, atol=1e-5), "Outputs should differ when layers are skipped" + ) + + self.assertEqual(output_full.shape, output_skip.shape, "Outputs should have the same shape") + + +@slow +@require_big_accelerator +class StableDiffusion3PipelineSlowTests(unittest.TestCase): + pipeline_class = StableDiffusion3Pipeline + repo_id = "stabilityai/stable-diffusion-3-medium-diffusers" + + def setUp(self): + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def get_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device="cpu").manual_seed(seed) + + return { + "prompt": "A photo of a cat", + "num_inference_steps": 2, + "guidance_scale": 5.0, + "output_type": "np", + "generator": generator, + } + + def test_sd3_inference(self): + pipe = self.pipeline_class.from_pretrained(self.repo_id, torch_dtype=torch.float16) + pipe.enable_model_cpu_offload(device=torch_device) + + inputs = self.get_inputs(torch_device) + + image = pipe(**inputs).images[0] + image_slice = image[0, :10, :10] + # fmt: off + expected_slice = np.array([0.4648, 0.4404, 0.4177, 0.5063, 0.4800, 0.4287, 0.5425, 0.5190, 0.4717, 0.5430, 0.5195, 0.4766, 0.5361, 0.5122, 0.4612, 0.4871, 0.4749, 0.4058, 0.4756, 0.4678, 0.3804, 0.4832, 0.4822, 0.3799, 0.5103, 0.5034, 0.3953, 0.5073, 0.4839, 0.3884]) + # fmt: on + + max_diff = numpy_cosine_similarity_distance(expected_slice.flatten(), image_slice.flatten()) + + assert max_diff < 1e-4 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/stable_diffusion_3/test_pipeline_stable_diffusion_3_img2img.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/stable_diffusion_3/test_pipeline_stable_diffusion_3_img2img.py new file mode 100644 index 0000000000000000000000000000000000000000..9025b1060c9e2b063df84cb9fb719a90aeafa960 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/stable_diffusion_3/test_pipeline_stable_diffusion_3_img2img.py @@ -0,0 +1,210 @@ +import gc +import random +import unittest + +import numpy as np +import torch +from transformers import AutoTokenizer, CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer, T5EncoderModel + +from diffusers import ( + AutoencoderKL, + FlowMatchEulerDiscreteScheduler, + SD3Transformer2DModel, + StableDiffusion3Img2ImgPipeline, +) +from diffusers.utils import load_image + +from ...testing_utils import ( + Expectations, + backend_empty_cache, + floats_tensor, + numpy_cosine_similarity_distance, + require_big_accelerator, + slow, + torch_device, +) +from ..pipeline_params import ( + IMAGE_TO_IMAGE_IMAGE_PARAMS, + TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, + TEXT_GUIDED_IMAGE_VARIATION_PARAMS, +) +from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin + + +class StableDiffusion3Img2ImgPipelineFastTests(PipelineLatentTesterMixin, unittest.TestCase, PipelineTesterMixin): + pipeline_class = StableDiffusion3Img2ImgPipeline + params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"} + required_optional_params = PipelineTesterMixin.required_optional_params + batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS + image_params = IMAGE_TO_IMAGE_IMAGE_PARAMS + image_latents_params = IMAGE_TO_IMAGE_IMAGE_PARAMS + + def get_dummy_components(self): + torch.manual_seed(0) + transformer = SD3Transformer2DModel( + sample_size=32, + patch_size=1, + in_channels=4, + num_layers=1, + attention_head_dim=8, + num_attention_heads=4, + joint_attention_dim=32, + caption_projection_dim=32, + pooled_projection_dim=64, + out_channels=4, + ) + clip_text_encoder_config = CLIPTextConfig( + bos_token_id=0, + eos_token_id=2, + hidden_size=32, + intermediate_size=37, + layer_norm_eps=1e-05, + num_attention_heads=4, + num_hidden_layers=5, + pad_token_id=1, + vocab_size=1000, + hidden_act="gelu", + projection_dim=32, + ) + + torch.manual_seed(0) + text_encoder = CLIPTextModelWithProjection(clip_text_encoder_config) + + torch.manual_seed(0) + text_encoder_2 = CLIPTextModelWithProjection(clip_text_encoder_config) + + text_encoder_3 = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5") + + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + tokenizer_3 = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") + + torch.manual_seed(0) + vae = AutoencoderKL( + sample_size=32, + in_channels=3, + out_channels=3, + block_out_channels=(4,), + layers_per_block=1, + latent_channels=4, + norm_num_groups=1, + use_quant_conv=False, + use_post_quant_conv=False, + shift_factor=0.0609, + scaling_factor=1.5035, + ) + + scheduler = FlowMatchEulerDiscreteScheduler() + + return { + "scheduler": scheduler, + "text_encoder": text_encoder, + "text_encoder_2": text_encoder_2, + "text_encoder_3": text_encoder_3, + "tokenizer": tokenizer, + "tokenizer_2": tokenizer_2, + "tokenizer_3": tokenizer_3, + "transformer": transformer, + "vae": vae, + "image_encoder": None, + "feature_extractor": None, + } + + def get_dummy_inputs(self, device, seed=0): + image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device="cpu").manual_seed(seed) + + inputs = { + "prompt": "A painting of a squirrel eating a burger", + "image": image, + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 5.0, + "output_type": "np", + "strength": 0.8, + } + return inputs + + def test_inference(self): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + + inputs = self.get_dummy_inputs(torch_device) + image = pipe(**inputs).images[0] + generated_slice = image.flatten() + generated_slice = np.concatenate([generated_slice[:8], generated_slice[-8:]]) + + # fmt: off + expected_slice = np.array([0.4564, 0.5486, 0.4868, 0.5923, 0.3775, 0.5543, 0.4807, 0.4177, 0.3778, 0.5957, 0.5726, 0.4333, 0.6312, 0.5062, 0.4838, 0.5984]) + # fmt: on + + self.assertTrue( + np.allclose(generated_slice, expected_slice, atol=1e-3), "Output does not match expected slice." + ) + + @unittest.skip("Skip for now.") + def test_multi_vae(self): + pass + + +@slow +@require_big_accelerator +class StableDiffusion3Img2ImgPipelineSlowTests(unittest.TestCase): + pipeline_class = StableDiffusion3Img2ImgPipeline + repo_id = "stabilityai/stable-diffusion-3-medium-diffusers" + + def setUp(self): + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def get_inputs(self, device, seed=0): + init_image = load_image( + "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" + "/stable_diffusion_img2img/sketch-mountains-input.png" + ) + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device="cpu").manual_seed(seed) + + return { + "prompt": "A photo of a cat", + "num_inference_steps": 2, + "guidance_scale": 5.0, + "output_type": "np", + "generator": generator, + "image": init_image, + } + + def test_sd3_img2img_inference(self): + torch.manual_seed(0) + pipe = self.pipeline_class.from_pretrained(self.repo_id, torch_dtype=torch.float16) + pipe.enable_model_cpu_offload(device=torch_device) + inputs = self.get_inputs(torch_device) + image = pipe(**inputs).images[0] + image_slice = image[0, :10, :10] + + # fmt: off + expected_slices = Expectations( + { + ("xpu", 3): np.array([0.5117, 0.4421, 0.3852, 0.5044, 0.4219, 0.3262, 0.5024, 0.4329, 0.3276, 0.4978, 0.4412, 0.3355, 0.4983, 0.4338, 0.3279, 0.4893, 0.4241, 0.3129, 0.4875, 0.4253, 0.3030, 0.4961, 0.4267, 0.2988, 0.5029, 0.4255, 0.3054, 0.5132, 0.4248, 0.3222]), + ("cuda", 7): np.array([0.5435, 0.4673, 0.5732, 0.4438, 0.3557, 0.4912, 0.4331, 0.3491, 0.4915, 0.4287, 0.347, 0.4849, 0.4355, 0.3469, 0.4871, 0.4431, 0.3538, 0.4912, 0.4521, 0.3643, 0.5059, 0.4587, 0.373, 0.5166, 0.4685, 0.3845, 0.5264, 0.4746, 0.3914, 0.5342]), + ("cuda", 8): np.array([0.5146, 0.4385, 0.3826, 0.5098, 0.4150, 0.3218, 0.5142, 0.4312, 0.3298, 0.5127, 0.4431, 0.3411, 0.5171, 0.4424, 0.3374, 0.5088, 0.4348, 0.3242, 0.5073, 0.4380, 0.3174, 0.5132, 0.4397, 0.3115, 0.5132, 0.4343, 0.3118, 0.5219, 0.4328, 0.3256]), + } + ) + # fmt: on + + expected_slice = expected_slices.get_expectation() + + max_diff = numpy_cosine_similarity_distance(expected_slice.flatten(), image_slice.flatten()) + + assert max_diff < 1e-4, f"Outputs are not close enough, got {max_diff}" diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/stable_diffusion_3/test_pipeline_stable_diffusion_3_inpaint.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/stable_diffusion_3/test_pipeline_stable_diffusion_3_inpaint.py new file mode 100644 index 0000000000000000000000000000000000000000..628930340294c7f54dd7813e1d921f0a65dc7ad2 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/stable_diffusion_3/test_pipeline_stable_diffusion_3_inpaint.py @@ -0,0 +1,154 @@ +import random +import unittest + +import numpy as np +import torch +from transformers import AutoTokenizer, CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer, T5EncoderModel + +from diffusers import ( + AutoencoderKL, + FlowMatchEulerDiscreteScheduler, + SD3Transformer2DModel, + StableDiffusion3InpaintPipeline, +) + +from ...testing_utils import ( + enable_full_determinism, + floats_tensor, + torch_device, +) +from ..pipeline_params import ( + TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, + TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, + TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS, +) +from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin + + +enable_full_determinism() + + +class StableDiffusion3InpaintPipelineFastTests(PipelineLatentTesterMixin, unittest.TestCase, PipelineTesterMixin): + pipeline_class = StableDiffusion3InpaintPipeline + params = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS + required_optional_params = PipelineTesterMixin.required_optional_params + batch_params = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS + image_params = frozenset( + [] + ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess + image_latents_params = frozenset([]) + callback_cfg_params = TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS.union({"mask", "masked_image_latents"}) + + def get_dummy_components(self): + torch.manual_seed(0) + transformer = SD3Transformer2DModel( + sample_size=32, + patch_size=1, + in_channels=16, + num_layers=1, + attention_head_dim=8, + num_attention_heads=4, + joint_attention_dim=32, + caption_projection_dim=32, + pooled_projection_dim=64, + out_channels=16, + ) + clip_text_encoder_config = CLIPTextConfig( + bos_token_id=0, + eos_token_id=2, + hidden_size=32, + intermediate_size=37, + layer_norm_eps=1e-05, + num_attention_heads=4, + num_hidden_layers=5, + pad_token_id=1, + vocab_size=1000, + hidden_act="gelu", + projection_dim=32, + ) + + torch.manual_seed(0) + text_encoder = CLIPTextModelWithProjection(clip_text_encoder_config) + + torch.manual_seed(0) + text_encoder_2 = CLIPTextModelWithProjection(clip_text_encoder_config) + + text_encoder_3 = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5") + + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + tokenizer_3 = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") + + torch.manual_seed(0) + vae = AutoencoderKL( + sample_size=32, + in_channels=3, + out_channels=3, + block_out_channels=(4,), + layers_per_block=1, + latent_channels=16, + norm_num_groups=1, + use_quant_conv=False, + use_post_quant_conv=False, + shift_factor=0.0609, + scaling_factor=1.5035, + ) + + scheduler = FlowMatchEulerDiscreteScheduler() + + return { + "scheduler": scheduler, + "text_encoder": text_encoder, + "text_encoder_2": text_encoder_2, + "text_encoder_3": text_encoder_3, + "tokenizer": tokenizer, + "tokenizer_2": tokenizer_2, + "tokenizer_3": tokenizer_3, + "transformer": transformer, + "vae": vae, + "image_encoder": None, + "feature_extractor": None, + } + + def get_dummy_inputs(self, device, seed=0): + image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) + mask_image = torch.ones((1, 1, 32, 32)).to(device) + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device="cpu").manual_seed(seed) + + inputs = { + "prompt": "A painting of a squirrel eating a burger", + "image": image, + "mask_image": mask_image, + "height": 32, + "width": 32, + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 5.0, + "output_type": "np", + "strength": 0.8, + } + return inputs + + def test_inference(self): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + + inputs = self.get_dummy_inputs(torch_device) + image = pipe(**inputs).images[0] + generated_slice = image.flatten() + generated_slice = np.concatenate([generated_slice[:8], generated_slice[-8:]]) + + # fmt: off + expected_slice = np.array([0.5035, 0.6661, 0.5859, 0.413, 0.4224, 0.4234, 0.7181, 0.5062, 0.5183, 0.6877, 0.5074, 0.585, 0.6111, 0.5422, 0.5306, 0.5891]) + # fmt: on + + self.assertTrue( + np.allclose(generated_slice, expected_slice, atol=1e-3), "Output does not match expected slice." + ) + + @unittest.skip("Skip for now.") + def test_multi_vae(self): + pass diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/stable_diffusion_adapter/__init__.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/stable_diffusion_adapter/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/stable_diffusion_adapter/test_stable_diffusion_adapter.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/stable_diffusion_adapter/test_stable_diffusion_adapter.py new file mode 100644 index 0000000000000000000000000000000000000000..79b38d1cad1c54912dbdaab9c3acb1317a0ddf1b --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/stable_diffusion_adapter/test_stable_diffusion_adapter.py @@ -0,0 +1,727 @@ +# coding=utf-8 +# Copyright 2022 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import gc +import random +import unittest + +import numpy as np +import torch +from parameterized import parameterized +from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer + +import diffusers +from diffusers import ( + AutoencoderKL, + LCMScheduler, + MultiAdapter, + PNDMScheduler, + StableDiffusionAdapterPipeline, + T2IAdapter, + UNet2DConditionModel, +) +from diffusers.utils import logging +from diffusers.utils.import_utils import is_xformers_available + +from ...testing_utils import ( + backend_empty_cache, + enable_full_determinism, + floats_tensor, + load_image, + load_numpy, + numpy_cosine_similarity_distance, + require_torch_accelerator, + slow, + torch_device, +) +from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS +from ..test_pipelines_common import PipelineFromPipeTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference + + +enable_full_determinism() + + +class AdapterTests: + pipeline_class = StableDiffusionAdapterPipeline + params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS + batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS + + def get_dummy_components(self, adapter_type, time_cond_proj_dim=None): + torch.manual_seed(0) + unet = UNet2DConditionModel( + block_out_channels=(32, 64), + layers_per_block=2, + sample_size=32, + in_channels=4, + out_channels=4, + down_block_types=("CrossAttnDownBlock2D", "DownBlock2D"), + up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), + cross_attention_dim=32, + time_cond_proj_dim=time_cond_proj_dim, + ) + scheduler = PNDMScheduler(skip_prk_steps=True) + torch.manual_seed(0) + vae = AutoencoderKL( + block_out_channels=[32, 64], + in_channels=3, + out_channels=3, + down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], + up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], + latent_channels=4, + ) + torch.manual_seed(0) + text_encoder_config = CLIPTextConfig( + bos_token_id=0, + eos_token_id=2, + hidden_size=32, + intermediate_size=37, + layer_norm_eps=1e-05, + num_attention_heads=4, + num_hidden_layers=5, + pad_token_id=1, + vocab_size=1000, + ) + text_encoder = CLIPTextModel(text_encoder_config) + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + + torch.manual_seed(0) + + if adapter_type == "full_adapter" or adapter_type == "light_adapter": + adapter = T2IAdapter( + in_channels=3, + channels=[32, 64], + num_res_blocks=2, + downscale_factor=2, + adapter_type=adapter_type, + ) + elif adapter_type == "multi_adapter": + adapter = MultiAdapter( + [ + T2IAdapter( + in_channels=3, + channels=[32, 64], + num_res_blocks=2, + downscale_factor=2, + adapter_type="full_adapter", + ), + T2IAdapter( + in_channels=3, + channels=[32, 64], + num_res_blocks=2, + downscale_factor=2, + adapter_type="full_adapter", + ), + ] + ) + else: + raise ValueError( + f"Unknown adapter type: {adapter_type}, must be one of 'full_adapter', 'light_adapter', or 'multi_adapter''" + ) + + components = { + "adapter": adapter, + "unet": unet, + "scheduler": scheduler, + "vae": vae, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + "safety_checker": None, + "feature_extractor": None, + } + return components + + def get_dummy_components_with_full_downscaling(self, adapter_type): + """Get dummy components with x8 VAE downscaling and 4 UNet down blocks. + These dummy components are intended to fully-exercise the T2I-Adapter + downscaling behavior. + """ + torch.manual_seed(0) + unet = UNet2DConditionModel( + block_out_channels=(32, 32, 32, 64), + layers_per_block=2, + sample_size=32, + in_channels=4, + out_channels=4, + down_block_types=("CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D"), + up_block_types=("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D"), + cross_attention_dim=32, + ) + scheduler = PNDMScheduler(skip_prk_steps=True) + torch.manual_seed(0) + vae = AutoencoderKL( + block_out_channels=[32, 32, 32, 64], + in_channels=3, + out_channels=3, + down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D"], + up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"], + latent_channels=4, + ) + torch.manual_seed(0) + text_encoder_config = CLIPTextConfig( + bos_token_id=0, + eos_token_id=2, + hidden_size=32, + intermediate_size=37, + layer_norm_eps=1e-05, + num_attention_heads=4, + num_hidden_layers=5, + pad_token_id=1, + vocab_size=1000, + ) + text_encoder = CLIPTextModel(text_encoder_config) + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + + torch.manual_seed(0) + + if adapter_type == "full_adapter" or adapter_type == "light_adapter": + adapter = T2IAdapter( + in_channels=3, + channels=[32, 32, 32, 64], + num_res_blocks=2, + downscale_factor=8, + adapter_type=adapter_type, + ) + elif adapter_type == "multi_adapter": + adapter = MultiAdapter( + [ + T2IAdapter( + in_channels=3, + channels=[32, 32, 32, 64], + num_res_blocks=2, + downscale_factor=8, + adapter_type="full_adapter", + ), + T2IAdapter( + in_channels=3, + channels=[32, 32, 32, 64], + num_res_blocks=2, + downscale_factor=8, + adapter_type="full_adapter", + ), + ] + ) + else: + raise ValueError( + f"Unknown adapter type: {adapter_type}, must be one of 'full_adapter', 'light_adapter', or 'multi_adapter''" + ) + + components = { + "adapter": adapter, + "unet": unet, + "scheduler": scheduler, + "vae": vae, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + "safety_checker": None, + "feature_extractor": None, + } + return components + + def get_dummy_inputs(self, device, seed=0, height=64, width=64, num_images=1): + if num_images == 1: + image = floats_tensor((1, 3, height, width), rng=random.Random(seed)).to(device) + else: + image = [ + floats_tensor((1, 3, height, width), rng=random.Random(seed)).to(device) for _ in range(num_images) + ] + + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + inputs = { + "prompt": "A painting of a squirrel eating a burger", + "image": image, + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 6.0, + "output_type": "np", + } + return inputs + + def test_attention_slicing_forward_pass(self): + return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3) + + @unittest.skipIf( + torch_device != "cuda" or not is_xformers_available(), + reason="XFormers attention is only available with CUDA and `xformers` installed", + ) + def test_xformers_attention_forwardGenerator_pass(self): + self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3) + + def test_inference_batch_single_identical(self): + self._test_inference_batch_single_identical(expected_max_diff=2e-3) + + @parameterized.expand( + [ + # (dim=264) The internal feature map will be 33x33 after initial pixel unshuffling (downscaled x8). + (((4 * 8 + 1) * 8),), + # (dim=272) The internal feature map will be 17x17 after the first T2I down block (downscaled x16). + (((4 * 4 + 1) * 16),), + # (dim=288) The internal feature map will be 9x9 after the second T2I down block (downscaled x32). + (((4 * 2 + 1) * 32),), + # (dim=320) The internal feature map will be 5x5 after the third T2I down block (downscaled x64). + (((4 * 1 + 1) * 64),), + ] + ) + def test_multiple_image_dimensions(self, dim): + """Test that the T2I-Adapter pipeline supports any input dimension that + is divisible by the adapter's `downscale_factor`. This test was added in + response to an issue where the T2I Adapter's downscaling padding + behavior did not match the UNet's behavior. + + Note that we have selected `dim` values to produce odd resolutions at + each downscaling level. + """ + components = self.get_dummy_components_with_full_downscaling() + sd_pipe = StableDiffusionAdapterPipeline(**components) + sd_pipe = sd_pipe.to(torch_device) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(torch_device, height=dim, width=dim) + image = sd_pipe(**inputs).images + + assert image.shape == (1, dim, dim, 3) + + def test_adapter_lcm(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + + components = self.get_dummy_components(time_cond_proj_dim=256) + sd_pipe = StableDiffusionAdapterPipeline(**components) + sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.scheduler.config) + sd_pipe = sd_pipe.to(torch_device) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + output = sd_pipe(**inputs) + image = output.images + + image_slice = image[0, -3:, -3:, -1] + + assert image.shape == (1, 64, 64, 3) + expected_slice = np.array([0.4535, 0.5493, 0.4359, 0.5452, 0.6086, 0.4441, 0.5544, 0.501, 0.4859]) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 + + def test_adapter_lcm_custom_timesteps(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + + components = self.get_dummy_components(time_cond_proj_dim=256) + sd_pipe = StableDiffusionAdapterPipeline(**components) + sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.scheduler.config) + sd_pipe = sd_pipe.to(torch_device) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + del inputs["num_inference_steps"] + inputs["timesteps"] = [999, 499] + output = sd_pipe(**inputs) + image = output.images + + image_slice = image[0, -3:, -3:, -1] + + assert image.shape == (1, 64, 64, 3) + expected_slice = np.array([0.4535, 0.5493, 0.4359, 0.5452, 0.6086, 0.4441, 0.5544, 0.501, 0.4859]) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 + + def test_encode_prompt_works_in_isolation(self): + extra_required_param_value_dict = { + "device": torch.device(torch_device).type, + "do_classifier_free_guidance": self.get_dummy_inputs(device=torch_device).get("guidance_scale", 1.0) > 1.0, + } + return super().test_encode_prompt_works_in_isolation(extra_required_param_value_dict) + + +class StableDiffusionFullAdapterPipelineFastTests( + AdapterTests, PipelineTesterMixin, PipelineFromPipeTesterMixin, unittest.TestCase +): + def get_dummy_components(self, time_cond_proj_dim=None): + return super().get_dummy_components("full_adapter", time_cond_proj_dim=time_cond_proj_dim) + + def get_dummy_components_with_full_downscaling(self): + return super().get_dummy_components_with_full_downscaling("full_adapter") + + def test_stable_diffusion_adapter_default_case(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + sd_pipe = StableDiffusionAdapterPipeline(**components) + sd_pipe = sd_pipe.to(device) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + image = sd_pipe(**inputs).images + image_slice = image[0, -3:, -3:, -1] + + assert image.shape == (1, 64, 64, 3) + expected_slice = np.array([0.4858, 0.5500, 0.4278, 0.4669, 0.6184, 0.4322, 0.5010, 0.5033, 0.4746]) + assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-3 + + def test_from_pipe_consistent_forward_pass_cpu_offload(self): + super().test_from_pipe_consistent_forward_pass_cpu_offload(expected_max_diff=6e-3) + + +class StableDiffusionLightAdapterPipelineFastTests(AdapterTests, PipelineTesterMixin, unittest.TestCase): + def get_dummy_components(self, time_cond_proj_dim=None): + return super().get_dummy_components("light_adapter", time_cond_proj_dim=time_cond_proj_dim) + + def get_dummy_components_with_full_downscaling(self): + return super().get_dummy_components_with_full_downscaling("light_adapter") + + def test_stable_diffusion_adapter_default_case(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + sd_pipe = StableDiffusionAdapterPipeline(**components) + sd_pipe = sd_pipe.to(device) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + image = sd_pipe(**inputs).images + image_slice = image[0, -3:, -3:, -1] + + assert image.shape == (1, 64, 64, 3) + expected_slice = np.array([0.4965, 0.5548, 0.4330, 0.4771, 0.6226, 0.4382, 0.5037, 0.5071, 0.4782]) + assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-3 + + +class StableDiffusionMultiAdapterPipelineFastTests(AdapterTests, PipelineTesterMixin, unittest.TestCase): + supports_dduf = False + + def get_dummy_components(self, time_cond_proj_dim=None): + return super().get_dummy_components("multi_adapter", time_cond_proj_dim=time_cond_proj_dim) + + def get_dummy_components_with_full_downscaling(self): + return super().get_dummy_components_with_full_downscaling("multi_adapter") + + def get_dummy_inputs(self, device, height=64, width=64, seed=0): + inputs = super().get_dummy_inputs(device, seed, height=height, width=width, num_images=2) + inputs["adapter_conditioning_scale"] = [0.5, 0.5] + return inputs + + def test_stable_diffusion_adapter_default_case(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + sd_pipe = StableDiffusionAdapterPipeline(**components) + sd_pipe = sd_pipe.to(device) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + image = sd_pipe(**inputs).images + image_slice = image[0, -3:, -3:, -1] + + assert image.shape == (1, 64, 64, 3) + expected_slice = np.array([0.4902, 0.5539, 0.4317, 0.4682, 0.6190, 0.4351, 0.5018, 0.5046, 0.4772]) + assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-3 + + def test_inference_batch_consistent( + self, batch_sizes=[2, 4, 13], additional_params_copy_to_batched_inputs=["num_inference_steps"] + ): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(torch_device) + + logger = logging.get_logger(pipe.__module__) + logger.setLevel(level=diffusers.logging.FATAL) + + # batchify inputs + for batch_size in batch_sizes: + batched_inputs = {} + for name, value in inputs.items(): + if name in self.batch_params: + # prompt is string + if name == "prompt": + len_prompt = len(value) + # make unequal batch sizes + batched_inputs[name] = [value[: len_prompt // i] for i in range(1, batch_size + 1)] + + # make last batch super long + batched_inputs[name][-1] = 100 * "very long" + elif name == "image": + batched_images = [] + + for image in value: + batched_images.append(batch_size * [image]) + + batched_inputs[name] = batched_images + else: + batched_inputs[name] = batch_size * [value] + + elif name == "batch_size": + batched_inputs[name] = batch_size + else: + batched_inputs[name] = value + + for arg in additional_params_copy_to_batched_inputs: + batched_inputs[arg] = inputs[arg] + + batched_inputs["output_type"] = "np" + + if self.pipeline_class.__name__ == "DanceDiffusionPipeline": + batched_inputs.pop("output_type") + + output = pipe(**batched_inputs) + + assert len(output[0]) == batch_size + + batched_inputs["output_type"] = "np" + + if self.pipeline_class.__name__ == "DanceDiffusionPipeline": + batched_inputs.pop("output_type") + + output = pipe(**batched_inputs)[0] + + assert output.shape[0] == batch_size + + logger.setLevel(level=diffusers.logging.WARNING) + + def test_num_images_per_prompt(self): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + batch_sizes = [1, 2] + num_images_per_prompts = [1, 2] + + for batch_size in batch_sizes: + for num_images_per_prompt in num_images_per_prompts: + inputs = self.get_dummy_inputs(torch_device) + + for key in inputs.keys(): + if key in self.batch_params: + if key == "image": + batched_images = [] + + for image in inputs[key]: + batched_images.append(batch_size * [image]) + + inputs[key] = batched_images + else: + inputs[key] = batch_size * [inputs[key]] + + images = pipe(**inputs, num_images_per_prompt=num_images_per_prompt)[0] + + assert images.shape[0] == batch_size * num_images_per_prompt + + def test_inference_batch_single_identical( + self, + batch_size=3, + test_max_difference=None, + test_mean_pixel_difference=None, + relax_max_difference=False, + expected_max_diff=2e-3, + additional_params_copy_to_batched_inputs=["num_inference_steps"], + ): + if test_max_difference is None: + # TODO(Pedro) - not sure why, but not at all reproducible at the moment it seems + # make sure that batched and non-batched is identical + test_max_difference = torch_device != "mps" + + if test_mean_pixel_difference is None: + # TODO same as above + test_mean_pixel_difference = torch_device != "mps" + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(torch_device) + + logger = logging.get_logger(pipe.__module__) + logger.setLevel(level=diffusers.logging.FATAL) + + # batchify inputs + batched_inputs = {} + for name, value in inputs.items(): + if name in self.batch_params: + # prompt is string + if name == "prompt": + len_prompt = len(value) + # make unequal batch sizes + batched_inputs[name] = [value[: len_prompt // i] for i in range(1, batch_size + 1)] + + # make last batch super long + batched_inputs[name][-1] = 100 * "very long" + elif name == "image": + batched_images = [] + + for image in value: + batched_images.append(batch_size * [image]) + + batched_inputs[name] = batched_images + else: + batched_inputs[name] = batch_size * [value] + elif name == "batch_size": + batched_inputs[name] = batch_size + elif name == "generator": + batched_inputs[name] = [self.get_generator(i) for i in range(batch_size)] + else: + batched_inputs[name] = value + + for arg in additional_params_copy_to_batched_inputs: + batched_inputs[arg] = inputs[arg] + + if self.pipeline_class.__name__ != "DanceDiffusionPipeline": + batched_inputs["output_type"] = "np" + + output_batch = pipe(**batched_inputs) + assert output_batch[0].shape[0] == batch_size + + inputs["generator"] = self.get_generator(0) + + output = pipe(**inputs) + + logger.setLevel(level=diffusers.logging.WARNING) + if test_max_difference: + if relax_max_difference: + # Taking the median of the largest differences + # is resilient to outliers + diff = np.abs(output_batch[0][0] - output[0][0]) + diff = diff.flatten() + diff.sort() + max_diff = np.median(diff[-5:]) + else: + max_diff = np.abs(output_batch[0][0] - output[0][0]).max() + assert max_diff < expected_max_diff + + if test_mean_pixel_difference: + assert_mean_pixel_difference(output_batch[0][0], output[0][0]) + + +@slow +@require_torch_accelerator +class StableDiffusionAdapterPipelineSlowTests(unittest.TestCase): + def setUp(self): + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def test_stable_diffusion_adapter_depth_sd_v15(self): + adapter_model = "TencentARC/t2iadapter_depth_sd15v2" + sd_model = "stable-diffusion-v1-5/stable-diffusion-v1-5" + prompt = "desk" + image_url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/desk_depth.png" + input_channels = 3 + out_url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/t2iadapter_depth_sd15v2.npy" + out_url = "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_adapter/sd_adapter_v15_zoe_depth.npy" + + image = load_image(image_url) + expected_out = load_numpy(out_url) + if input_channels == 1: + image = image.convert("L") + + adapter = T2IAdapter.from_pretrained(adapter_model, torch_dtype=torch.float16) + + pipe = StableDiffusionAdapterPipeline.from_pretrained(sd_model, adapter=adapter, safety_checker=None) + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + pipe.enable_attention_slicing() + + generator = torch.Generator(device="cpu").manual_seed(0) + out = pipe(prompt=prompt, image=image, generator=generator, num_inference_steps=2, output_type="np").images + + max_diff = numpy_cosine_similarity_distance(out.flatten(), expected_out.flatten()) + assert max_diff < 1e-2 + + def test_stable_diffusion_adapter_zoedepth_sd_v15(self): + adapter_model = "TencentARC/t2iadapter_zoedepth_sd15v1" + sd_model = "stable-diffusion-v1-5/stable-diffusion-v1-5" + prompt = "motorcycle" + image_url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/motorcycle.png" + input_channels = 3 + out_url = "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_adapter/sd_adapter_v15_zoe_depth.npy" + + image = load_image(image_url) + expected_out = load_numpy(out_url) + if input_channels == 1: + image = image.convert("L") + + adapter = T2IAdapter.from_pretrained(adapter_model, torch_dtype=torch.float16) + + pipe = StableDiffusionAdapterPipeline.from_pretrained(sd_model, adapter=adapter, safety_checker=None) + pipe.set_progress_bar_config(disable=None) + pipe.enable_model_cpu_offload() + generator = torch.Generator(device="cpu").manual_seed(0) + out = pipe(prompt=prompt, image=image, generator=generator, num_inference_steps=2, output_type="np").images + + max_diff = numpy_cosine_similarity_distance(out.flatten(), expected_out.flatten()) + assert max_diff < 1e-2 + + def test_stable_diffusion_adapter_canny_sd_v15(self): + adapter_model = "TencentARC/t2iadapter_canny_sd15v2" + sd_model = "stable-diffusion-v1-5/stable-diffusion-v1-5" + prompt = "toy" + image_url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/toy_canny.png" + input_channels = 1 + out_url = "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_adapter/sd_adapter_v15_zoe_depth.npy" + + image = load_image(image_url) + expected_out = load_numpy(out_url) + if input_channels == 1: + image = image.convert("L") + + adapter = T2IAdapter.from_pretrained(adapter_model, torch_dtype=torch.float16) + + pipe = StableDiffusionAdapterPipeline.from_pretrained(sd_model, adapter=adapter, safety_checker=None) + + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + pipe.enable_attention_slicing() + + generator = torch.Generator(device="cpu").manual_seed(0) + + out = pipe(prompt=prompt, image=image, generator=generator, num_inference_steps=2, output_type="np").images + + max_diff = numpy_cosine_similarity_distance(out.flatten(), expected_out.flatten()) + assert max_diff < 1e-2 + + def test_stable_diffusion_adapter_sketch_sd15(self): + adapter_model = "TencentARC/t2iadapter_sketch_sd15v2" + sd_model = "stable-diffusion-v1-5/stable-diffusion-v1-5" + prompt = "cat" + image_url = ( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/edge.png" + ) + input_channels = 1 + out_url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/t2iadapter_sketch_sd15v2.npy" + + image = load_image(image_url) + expected_out = load_numpy(out_url) + if input_channels == 1: + image = image.convert("L") + + adapter = T2IAdapter.from_pretrained(adapter_model, torch_dtype=torch.float16) + + pipe = StableDiffusionAdapterPipeline.from_pretrained(sd_model, adapter=adapter, safety_checker=None) + + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + pipe.enable_attention_slicing() + + generator = torch.Generator(device="cpu").manual_seed(0) + + out = pipe(prompt=prompt, image=image, generator=generator, num_inference_steps=2, output_type="np").images + + max_diff = numpy_cosine_similarity_distance(out.flatten(), expected_out.flatten()) + assert max_diff < 1e-2 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/stable_diffusion_image_variation/__init__.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/stable_diffusion_image_variation/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/stable_diffusion_image_variation/test_stable_diffusion_image_variation.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/stable_diffusion_image_variation/test_stable_diffusion_image_variation.py new file mode 100644 index 0000000000000000000000000000000000000000..dbf5a7b68eae3a593cce010f9ef742ff89cc1b90 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/stable_diffusion_image_variation/test_stable_diffusion_image_variation.py @@ -0,0 +1,345 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import gc +import random +import unittest + +import numpy as np +import torch +from PIL import Image +from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModelWithProjection + +from diffusers import ( + AutoencoderKL, + DPMSolverMultistepScheduler, + PNDMScheduler, + StableDiffusionImageVariationPipeline, + UNet2DConditionModel, +) + +from ...testing_utils import ( + backend_empty_cache, + backend_max_memory_allocated, + backend_reset_max_memory_allocated, + backend_reset_peak_memory_stats, + enable_full_determinism, + floats_tensor, + load_image, + load_numpy, + nightly, + numpy_cosine_similarity_distance, + require_torch_accelerator, + slow, + torch_device, +) +from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS +from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin + + +enable_full_determinism() + + +class StableDiffusionImageVariationPipelineFastTests( + PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase +): + pipeline_class = StableDiffusionImageVariationPipeline + params = IMAGE_VARIATION_PARAMS + batch_params = IMAGE_VARIATION_BATCH_PARAMS + image_params = frozenset([]) + # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess + image_latents_params = frozenset([]) + + supports_dduf = False + + def get_dummy_components(self): + torch.manual_seed(0) + unet = UNet2DConditionModel( + block_out_channels=(32, 64), + layers_per_block=2, + sample_size=32, + in_channels=4, + out_channels=4, + down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), + up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), + cross_attention_dim=32, + ) + scheduler = PNDMScheduler(skip_prk_steps=True) + torch.manual_seed(0) + vae = AutoencoderKL( + block_out_channels=[32, 64], + in_channels=3, + out_channels=3, + down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], + up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], + latent_channels=4, + ) + torch.manual_seed(0) + image_encoder_config = CLIPVisionConfig( + hidden_size=32, + projection_dim=32, + intermediate_size=37, + layer_norm_eps=1e-05, + num_attention_heads=4, + num_hidden_layers=5, + image_size=32, + patch_size=4, + ) + image_encoder = CLIPVisionModelWithProjection(image_encoder_config) + feature_extractor = CLIPImageProcessor(crop_size=32, size=32) + + components = { + "unet": unet, + "scheduler": scheduler, + "vae": vae, + "image_encoder": image_encoder, + "feature_extractor": feature_extractor, + "safety_checker": None, + } + return components + + def get_dummy_inputs(self, device, seed=0): + image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)) + image = image.cpu().permute(0, 2, 3, 1)[0] + image = Image.fromarray(np.uint8(image)).convert("RGB").resize((32, 32)) + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + inputs = { + "image": image, + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 6.0, + "output_type": "np", + } + return inputs + + def test_stable_diffusion_img_variation_default_case(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + sd_pipe = StableDiffusionImageVariationPipeline(**components) + sd_pipe = sd_pipe.to(device) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + image = sd_pipe(**inputs).images + image_slice = image[0, -3:, -3:, -1] + + assert image.shape == (1, 64, 64, 3) + expected_slice = np.array([0.5348, 0.5924, 0.4798, 0.5237, 0.5741, 0.4651, 0.5344, 0.4942, 0.4851]) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 + + def test_stable_diffusion_img_variation_multiple_images(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + sd_pipe = StableDiffusionImageVariationPipeline(**components) + sd_pipe = sd_pipe.to(device) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + inputs["image"] = 2 * [inputs["image"]] + output = sd_pipe(**inputs) + + image = output.images + + image_slice = image[-1, -3:, -3:, -1] + + assert image.shape == (2, 64, 64, 3) + expected_slice = np.array([0.6647, 0.5557, 0.5723, 0.5567, 0.5869, 0.6044, 0.5502, 0.5439, 0.5189]) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 + + def test_inference_batch_single_identical(self): + super().test_inference_batch_single_identical(expected_max_diff=3e-3) + + +@slow +@require_torch_accelerator +class StableDiffusionImageVariationPipelineSlowTests(unittest.TestCase): + def setUp(self): + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): + generator = torch.Generator(device=generator_device).manual_seed(seed) + init_image = load_image( + "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" + "/stable_diffusion_imgvar/input_image_vermeer.png" + ) + latents = np.random.RandomState(seed).standard_normal((1, 4, 64, 64)) + latents = torch.from_numpy(latents).to(device=device, dtype=dtype) + inputs = { + "image": init_image, + "latents": latents, + "generator": generator, + "num_inference_steps": 3, + "guidance_scale": 7.5, + "output_type": "np", + } + return inputs + + def test_stable_diffusion_img_variation_pipeline_default(self): + sd_pipe = StableDiffusionImageVariationPipeline.from_pretrained( + "lambdalabs/sd-image-variations-diffusers", safety_checker=None + ) + sd_pipe = sd_pipe.to(torch_device) + sd_pipe.set_progress_bar_config(disable=None) + + generator_device = "cpu" + inputs = self.get_inputs(generator_device) + image = sd_pipe(**inputs).images + image_slice = image[0, -3:, -3:, -1].flatten() + + assert image.shape == (1, 512, 512, 3) + expected_slice = np.array([0.5348, 0.5924, 0.4798, 0.5237, 0.5741, 0.4651, 0.5344, 0.4942, 0.4851]) + + max_diff = numpy_cosine_similarity_distance(image_slice, expected_slice) + assert max_diff < 1e-4 + + def test_stable_diffusion_img_variation_intermediate_state(self): + number_of_steps = 0 + + def callback_fn(step: int, timestep: int, latents: torch.Tensor) -> None: + callback_fn.has_been_called = True + nonlocal number_of_steps + number_of_steps += 1 + if step == 1: + latents = latents.detach().cpu().numpy() + assert latents.shape == (1, 4, 64, 64) + latents_slice = latents[0, -3:, -3:, -1] + expected_slice = np.array([0.5348, 0.5924, 0.4798, 0.5237, 0.5741, 0.4651, 0.5344, 0.4942, 0.4851]) + max_diff = numpy_cosine_similarity_distance(latents_slice.flatten(), expected_slice) + + assert max_diff < 1e-3 + + elif step == 2: + latents = latents.detach().cpu().numpy() + assert latents.shape == (1, 4, 64, 64) + latents_slice = latents[0, -3:, -3:, -1] + expected_slice = np.array([0.5348, 0.5924, 0.4798, 0.5237, 0.5741, 0.4651, 0.5344, 0.4942, 0.4851]) + max_diff = numpy_cosine_similarity_distance(latents_slice.flatten(), expected_slice) + + assert max_diff < 1e-3 + + callback_fn.has_been_called = False + + pipe = StableDiffusionImageVariationPipeline.from_pretrained( + "lambdalabs/sd-image-variations-diffusers", + safety_checker=None, + torch_dtype=torch.float16, + ) + + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + for component in pipe.components.values(): + if hasattr(component, "set_default_attn_processor"): + component.set_default_attn_processor() + + generator_device = "cpu" + inputs = self.get_inputs(generator_device, dtype=torch.float16) + pipe(**inputs, callback=callback_fn, callback_steps=1) + assert callback_fn.has_been_called + assert number_of_steps == inputs["num_inference_steps"] + + def test_stable_diffusion_pipeline_with_sequential_cpu_offloading(self): + backend_empty_cache(torch_device) + backend_reset_max_memory_allocated(torch_device) + backend_reset_peak_memory_stats(torch_device) + + pipe = StableDiffusionImageVariationPipeline.from_pretrained( + "lambdalabs/sd-image-variations-diffusers", safety_checker=None, torch_dtype=torch.float16 + ) + pipe.set_progress_bar_config(disable=None) + pipe.enable_attention_slicing(1) + pipe.enable_sequential_cpu_offload(device=torch_device) + + inputs = self.get_inputs(torch_device, dtype=torch.float16) + _ = pipe(**inputs) + + mem_bytes = backend_max_memory_allocated(torch_device) + # make sure that less than 2.6 GB is allocated + assert mem_bytes < 2.6 * 10**9 + + +@nightly +@require_torch_accelerator +class StableDiffusionImageVariationPipelineNightlyTests(unittest.TestCase): + def setUp(self): + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): + generator = torch.Generator(device=generator_device).manual_seed(seed) + init_image = load_image( + "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" + "/stable_diffusion_imgvar/input_image_vermeer.png" + ) + latents = np.random.RandomState(seed).standard_normal((1, 4, 64, 64)) + latents = torch.from_numpy(latents).to(device=device, dtype=dtype) + inputs = { + "image": init_image, + "latents": latents, + "generator": generator, + "num_inference_steps": 50, + "guidance_scale": 7.5, + "output_type": "np", + } + return inputs + + def test_img_variation_pndm(self): + sd_pipe = StableDiffusionImageVariationPipeline.from_pretrained("fusing/sd-image-variations-diffusers") + sd_pipe.to(torch_device) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_inputs(torch_device) + image = sd_pipe(**inputs).images[0] + + expected_image = load_numpy( + "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" + "/stable_diffusion_imgvar/lambdalabs_variations_pndm.npy" + ) + max_diff = np.abs(expected_image - image).max() + assert max_diff < 1e-3 + + def test_img_variation_dpm(self): + sd_pipe = StableDiffusionImageVariationPipeline.from_pretrained("fusing/sd-image-variations-diffusers") + sd_pipe.scheduler = DPMSolverMultistepScheduler.from_config(sd_pipe.scheduler.config) + sd_pipe.to(torch_device) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_inputs(torch_device) + inputs["num_inference_steps"] = 25 + image = sd_pipe(**inputs).images[0] + + expected_image = load_numpy( + "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" + "/stable_diffusion_imgvar/lambdalabs_variations_dpm_multi.npy" + ) + max_diff = np.abs(expected_image - image).max() + assert max_diff < 1e-3 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/stable_diffusion_xl/__init__.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/stable_diffusion_xl/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl.py new file mode 100644 index 0000000000000000000000000000000000000000..b318a505e9db90e5c5d875cbdf6155fc087fab72 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl.py @@ -0,0 +1,976 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy +import gc +import tempfile +import unittest + +import numpy as np +import torch +from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer + +from diffusers import ( + AutoencoderKL, + DDIMScheduler, + DPMSolverMultistepScheduler, + EulerDiscreteScheduler, + HeunDiscreteScheduler, + LCMScheduler, + StableDiffusionXLImg2ImgPipeline, + StableDiffusionXLPipeline, + UNet2DConditionModel, + UniPCMultistepScheduler, +) + +from ...testing_utils import ( + backend_empty_cache, + enable_full_determinism, + load_image, + numpy_cosine_similarity_distance, + require_torch_accelerator, + slow, + torch_device, +) +from ..pipeline_params import ( + TEXT_TO_IMAGE_BATCH_PARAMS, + TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS, + TEXT_TO_IMAGE_IMAGE_PARAMS, + TEXT_TO_IMAGE_PARAMS, +) +from ..test_pipelines_common import ( + IPAdapterTesterMixin, + PipelineLatentTesterMixin, + PipelineTesterMixin, + SDFunctionTesterMixin, +) + + +enable_full_determinism() + + +class StableDiffusionXLPipelineFastTests( + SDFunctionTesterMixin, + IPAdapterTesterMixin, + PipelineLatentTesterMixin, + PipelineTesterMixin, + unittest.TestCase, +): + pipeline_class = StableDiffusionXLPipeline + params = TEXT_TO_IMAGE_PARAMS + batch_params = TEXT_TO_IMAGE_BATCH_PARAMS + image_params = TEXT_TO_IMAGE_IMAGE_PARAMS + image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS + callback_cfg_params = TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS.union({"add_text_embeds", "add_time_ids"}) + test_layerwise_casting = True + test_group_offloading = True + + def get_dummy_components(self, time_cond_proj_dim=None): + torch.manual_seed(0) + unet = UNet2DConditionModel( + block_out_channels=(2, 4), + layers_per_block=2, + time_cond_proj_dim=time_cond_proj_dim, + sample_size=32, + in_channels=4, + out_channels=4, + down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), + up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), + # SD2-specific config below + attention_head_dim=(2, 4), + use_linear_projection=True, + addition_embed_type="text_time", + addition_time_embed_dim=8, + transformer_layers_per_block=(1, 2), + projection_class_embeddings_input_dim=80, # 6 * 8 + 32 + cross_attention_dim=64, + norm_num_groups=1, + ) + scheduler = EulerDiscreteScheduler( + beta_start=0.00085, + beta_end=0.012, + steps_offset=1, + beta_schedule="scaled_linear", + timestep_spacing="leading", + ) + torch.manual_seed(0) + vae = AutoencoderKL( + block_out_channels=[32, 64], + in_channels=3, + out_channels=3, + down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], + up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], + latent_channels=4, + sample_size=128, + ) + torch.manual_seed(0) + text_encoder_config = CLIPTextConfig( + bos_token_id=0, + eos_token_id=2, + hidden_size=32, + intermediate_size=37, + layer_norm_eps=1e-05, + num_attention_heads=4, + num_hidden_layers=5, + pad_token_id=1, + vocab_size=1000, + # SD2-specific config below + hidden_act="gelu", + projection_dim=32, + ) + text_encoder = CLIPTextModel(text_encoder_config) + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + + text_encoder_2 = CLIPTextModelWithProjection(text_encoder_config) + tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + + components = { + "unet": unet, + "scheduler": scheduler, + "vae": vae, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + "text_encoder_2": text_encoder_2, + "tokenizer_2": tokenizer_2, + "image_encoder": None, + "feature_extractor": None, + } + return components + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + inputs = { + "prompt": "A painting of a squirrel eating a burger", + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 5.0, + "output_type": "np", + } + return inputs + + def test_stable_diffusion_xl_euler(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + sd_pipe = StableDiffusionXLPipeline(**components) + sd_pipe = sd_pipe.to(device) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + image = sd_pipe(**inputs).images + image_slice = image[0, -3:, -3:, -1] + + assert image.shape == (1, 64, 64, 3) + expected_slice = np.array([0.5388, 0.5452, 0.4694, 0.4583, 0.5253, 0.4832, 0.5288, 0.5035, 0.47]) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 + + def test_stable_diffusion_xl_euler_lcm(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components(time_cond_proj_dim=256) + sd_pipe = StableDiffusionXLPipeline(**components) + sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.scheduler.config) + sd_pipe = sd_pipe.to(device) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + image = sd_pipe(**inputs).images + image_slice = image[0, -3:, -3:, -1] + + assert image.shape == (1, 64, 64, 3) + expected_slice = np.array([0.4917, 0.6555, 0.4348, 0.5219, 0.7324, 0.4855, 0.5168, 0.5447, 0.5156]) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 + + def test_stable_diffusion_xl_euler_lcm_custom_timesteps(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components(time_cond_proj_dim=256) + sd_pipe = StableDiffusionXLPipeline(**components) + sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.scheduler.config) + sd_pipe = sd_pipe.to(device) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + del inputs["num_inference_steps"] + inputs["timesteps"] = [999, 499] + image = sd_pipe(**inputs).images + image_slice = image[0, -3:, -3:, -1] + + assert image.shape == (1, 64, 64, 3) + expected_slice = np.array([0.4917, 0.6555, 0.4348, 0.5219, 0.7324, 0.4855, 0.5168, 0.5447, 0.5156]) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 + + def test_stable_diffusion_ays(self): + from diffusers.schedulers import AysSchedules + + timestep_schedule = AysSchedules["StableDiffusionXLTimesteps"] + sigma_schedule = AysSchedules["StableDiffusionXLSigmas"] + + device = "cpu" # ensure determinism for the device-dependent torch.Generator + + components = self.get_dummy_components(time_cond_proj_dim=256) + sd_pipe = StableDiffusionXLPipeline(**components) + sd_pipe.scheduler = EulerDiscreteScheduler.from_config(sd_pipe.scheduler.config) + sd_pipe = sd_pipe.to(torch_device) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + inputs["num_inference_steps"] = 10 + output = sd_pipe(**inputs).images + + inputs = self.get_dummy_inputs(device) + inputs["num_inference_steps"] = None + inputs["timesteps"] = timestep_schedule + output_ts = sd_pipe(**inputs).images + + inputs = self.get_dummy_inputs(device) + inputs["num_inference_steps"] = None + inputs["sigmas"] = sigma_schedule + output_sigmas = sd_pipe(**inputs).images + + assert np.abs(output_sigmas.flatten() - output_ts.flatten()).max() < 1e-3, ( + "ays timesteps and ays sigmas should have the same outputs" + ) + assert np.abs(output.flatten() - output_ts.flatten()).max() > 1e-3, ( + "use ays timesteps should have different outputs" + ) + assert np.abs(output.flatten() - output_sigmas.flatten()).max() > 1e-3, ( + "use ays sigmas should have different outputs" + ) + + def test_ip_adapter(self): + expected_pipe_slice = None + if torch_device == "cpu": + expected_pipe_slice = np.array([0.5388, 0.5452, 0.4694, 0.4583, 0.5253, 0.4832, 0.5288, 0.5035, 0.4766]) + + return super().test_ip_adapter(expected_pipe_slice=expected_pipe_slice) + + def test_attention_slicing_forward_pass(self): + super().test_attention_slicing_forward_pass(expected_max_diff=3e-3) + + def test_inference_batch_single_identical(self): + super().test_inference_batch_single_identical(expected_max_diff=3e-3) + + @require_torch_accelerator + def test_stable_diffusion_xl_offloads(self): + pipes = [] + components = self.get_dummy_components() + sd_pipe = StableDiffusionXLPipeline(**components).to(torch_device) + pipes.append(sd_pipe) + + components = self.get_dummy_components() + sd_pipe = StableDiffusionXLPipeline(**components) + sd_pipe.enable_model_cpu_offload(device=torch_device) + pipes.append(sd_pipe) + + components = self.get_dummy_components() + sd_pipe = StableDiffusionXLPipeline(**components) + sd_pipe.enable_sequential_cpu_offload(device=torch_device) + pipes.append(sd_pipe) + + image_slices = [] + for pipe in pipes: + pipe.unet.set_default_attn_processor() + + inputs = self.get_dummy_inputs(torch_device) + image = pipe(**inputs).images + + image_slices.append(image[0, -3:, -3:, -1].flatten()) + + assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3 + assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3 + + @unittest.skip("We test this functionality elsewhere already.") + def test_save_load_optional_components(self): + pass + + def test_stable_diffusion_two_xl_mixture_of_denoiser_fast(self): + components = self.get_dummy_components() + pipe_1 = StableDiffusionXLPipeline(**components).to(torch_device) + pipe_1.unet.set_default_attn_processor() + pipe_2 = StableDiffusionXLImg2ImgPipeline(**components).to(torch_device) + pipe_2.unet.set_default_attn_processor() + + def assert_run_mixture( + num_steps, + split, + scheduler_cls_orig, + expected_tss, + num_train_timesteps=pipe_1.scheduler.config.num_train_timesteps, + ): + inputs = self.get_dummy_inputs(torch_device) + inputs["num_inference_steps"] = num_steps + + class scheduler_cls(scheduler_cls_orig): + pass + + pipe_1.scheduler = scheduler_cls.from_config(pipe_1.scheduler.config) + pipe_2.scheduler = scheduler_cls.from_config(pipe_2.scheduler.config) + + # Let's retrieve the number of timesteps we want to use + pipe_1.scheduler.set_timesteps(num_steps) + expected_steps = pipe_1.scheduler.timesteps.tolist() + + if pipe_1.scheduler.order == 2: + expected_steps_1 = list(filter(lambda ts: ts >= split, expected_tss)) + expected_steps_2 = expected_steps_1[-1:] + list(filter(lambda ts: ts < split, expected_tss)) + expected_steps = expected_steps_1 + expected_steps_2 + else: + expected_steps_1 = list(filter(lambda ts: ts >= split, expected_tss)) + expected_steps_2 = list(filter(lambda ts: ts < split, expected_tss)) + + # now we monkey patch step `done_steps` + # list into the step function for testing + done_steps = [] + old_step = copy.copy(scheduler_cls.step) + + def new_step(self, *args, **kwargs): + done_steps.append(args[1].cpu().item()) # args[1] is always the passed `t` + return old_step(self, *args, **kwargs) + + scheduler_cls.step = new_step + + inputs_1 = { + **inputs, + **{ + "denoising_end": 1.0 - (split / num_train_timesteps), + "output_type": "latent", + }, + } + latents = pipe_1(**inputs_1).images[0] + + assert expected_steps_1 == done_steps, f"Failure with {scheduler_cls.__name__} and {num_steps} and {split}" + + inputs_2 = { + **inputs, + **{ + "denoising_start": 1.0 - (split / num_train_timesteps), + "image": latents, + }, + } + pipe_2(**inputs_2).images[0] + + assert expected_steps_2 == done_steps[len(expected_steps_1) :] + assert expected_steps == done_steps, f"Failure with {scheduler_cls.__name__} and {num_steps} and {split}" + + steps = 10 + for split in [300, 700]: + for scheduler_cls_timesteps in [ + (EulerDiscreteScheduler, [901, 801, 701, 601, 501, 401, 301, 201, 101, 1]), + ( + HeunDiscreteScheduler, + [ + 901.0, + 801.0, + 801.0, + 701.0, + 701.0, + 601.0, + 601.0, + 501.0, + 501.0, + 401.0, + 401.0, + 301.0, + 301.0, + 201.0, + 201.0, + 101.0, + 101.0, + 1.0, + 1.0, + ], + ), + ]: + assert_run_mixture(steps, split, scheduler_cls_timesteps[0], scheduler_cls_timesteps[1]) + + @slow + def test_stable_diffusion_two_xl_mixture_of_denoiser(self): + components = self.get_dummy_components() + pipe_1 = StableDiffusionXLPipeline(**components).to(torch_device) + pipe_1.unet.set_default_attn_processor() + pipe_2 = StableDiffusionXLImg2ImgPipeline(**components).to(torch_device) + pipe_2.unet.set_default_attn_processor() + + def assert_run_mixture( + num_steps, + split, + scheduler_cls_orig, + expected_tss, + num_train_timesteps=pipe_1.scheduler.config.num_train_timesteps, + ): + inputs = self.get_dummy_inputs(torch_device) + inputs["num_inference_steps"] = num_steps + + class scheduler_cls(scheduler_cls_orig): + pass + + pipe_1.scheduler = scheduler_cls.from_config(pipe_1.scheduler.config) + pipe_2.scheduler = scheduler_cls.from_config(pipe_2.scheduler.config) + + # Let's retrieve the number of timesteps we want to use + pipe_1.scheduler.set_timesteps(num_steps) + expected_steps = pipe_1.scheduler.timesteps.tolist() + + if pipe_1.scheduler.order == 2: + expected_steps_1 = list(filter(lambda ts: ts >= split, expected_tss)) + expected_steps_2 = expected_steps_1[-1:] + list(filter(lambda ts: ts < split, expected_tss)) + expected_steps = expected_steps_1 + expected_steps_2 + else: + expected_steps_1 = list(filter(lambda ts: ts >= split, expected_tss)) + expected_steps_2 = list(filter(lambda ts: ts < split, expected_tss)) + + # now we monkey patch step `done_steps` + # list into the step function for testing + done_steps = [] + old_step = copy.copy(scheduler_cls.step) + + def new_step(self, *args, **kwargs): + done_steps.append(args[1].cpu().item()) # args[1] is always the passed `t` + return old_step(self, *args, **kwargs) + + scheduler_cls.step = new_step + + inputs_1 = { + **inputs, + **{ + "denoising_end": 1.0 - (split / num_train_timesteps), + "output_type": "latent", + }, + } + latents = pipe_1(**inputs_1).images[0] + + assert expected_steps_1 == done_steps, f"Failure with {scheduler_cls.__name__} and {num_steps} and {split}" + + inputs_2 = { + **inputs, + **{ + "denoising_start": 1.0 - (split / num_train_timesteps), + "image": latents, + }, + } + pipe_2(**inputs_2).images[0] + + assert expected_steps_2 == done_steps[len(expected_steps_1) :] + assert expected_steps == done_steps, f"Failure with {scheduler_cls.__name__} and {num_steps} and {split}" + + steps = 10 + for split in [300, 500, 700]: + for scheduler_cls_timesteps in [ + (DDIMScheduler, [901, 801, 701, 601, 501, 401, 301, 201, 101, 1]), + (EulerDiscreteScheduler, [901, 801, 701, 601, 501, 401, 301, 201, 101, 1]), + (DPMSolverMultistepScheduler, [901, 811, 721, 631, 541, 451, 361, 271, 181, 91]), + (UniPCMultistepScheduler, [901, 811, 721, 631, 541, 451, 361, 271, 181, 91]), + ( + HeunDiscreteScheduler, + [ + 901.0, + 801.0, + 801.0, + 701.0, + 701.0, + 601.0, + 601.0, + 501.0, + 501.0, + 401.0, + 401.0, + 301.0, + 301.0, + 201.0, + 201.0, + 101.0, + 101.0, + 1.0, + 1.0, + ], + ), + ]: + assert_run_mixture(steps, split, scheduler_cls_timesteps[0], scheduler_cls_timesteps[1]) + + steps = 25 + for split in [300, 500, 700]: + for scheduler_cls_timesteps in [ + ( + DDIMScheduler, + [ + 961, + 921, + 881, + 841, + 801, + 761, + 721, + 681, + 641, + 601, + 561, + 521, + 481, + 441, + 401, + 361, + 321, + 281, + 241, + 201, + 161, + 121, + 81, + 41, + 1, + ], + ), + ( + EulerDiscreteScheduler, + [ + 961.0, + 921.0, + 881.0, + 841.0, + 801.0, + 761.0, + 721.0, + 681.0, + 641.0, + 601.0, + 561.0, + 521.0, + 481.0, + 441.0, + 401.0, + 361.0, + 321.0, + 281.0, + 241.0, + 201.0, + 161.0, + 121.0, + 81.0, + 41.0, + 1.0, + ], + ), + ( + DPMSolverMultistepScheduler, + [ + 951, + 913, + 875, + 837, + 799, + 761, + 723, + 685, + 647, + 609, + 571, + 533, + 495, + 457, + 419, + 381, + 343, + 305, + 267, + 229, + 191, + 153, + 115, + 77, + 39, + ], + ), + ( + UniPCMultistepScheduler, + [ + 951, + 913, + 875, + 837, + 799, + 761, + 723, + 685, + 647, + 609, + 571, + 533, + 495, + 457, + 419, + 381, + 343, + 305, + 267, + 229, + 191, + 153, + 115, + 77, + 39, + ], + ), + ( + HeunDiscreteScheduler, + [ + 961.0, + 921.0, + 921.0, + 881.0, + 881.0, + 841.0, + 841.0, + 801.0, + 801.0, + 761.0, + 761.0, + 721.0, + 721.0, + 681.0, + 681.0, + 641.0, + 641.0, + 601.0, + 601.0, + 561.0, + 561.0, + 521.0, + 521.0, + 481.0, + 481.0, + 441.0, + 441.0, + 401.0, + 401.0, + 361.0, + 361.0, + 321.0, + 321.0, + 281.0, + 281.0, + 241.0, + 241.0, + 201.0, + 201.0, + 161.0, + 161.0, + 121.0, + 121.0, + 81.0, + 81.0, + 41.0, + 41.0, + 1.0, + 1.0, + ], + ), + ]: + assert_run_mixture(steps, split, scheduler_cls_timesteps[0], scheduler_cls_timesteps[1]) + + @slow + def test_stable_diffusion_three_xl_mixture_of_denoiser(self): + components = self.get_dummy_components() + pipe_1 = StableDiffusionXLPipeline(**components).to(torch_device) + pipe_1.unet.set_default_attn_processor() + pipe_2 = StableDiffusionXLImg2ImgPipeline(**components).to(torch_device) + pipe_2.unet.set_default_attn_processor() + pipe_3 = StableDiffusionXLImg2ImgPipeline(**components).to(torch_device) + pipe_3.unet.set_default_attn_processor() + + def assert_run_mixture( + num_steps, + split_1, + split_2, + scheduler_cls_orig, + num_train_timesteps=pipe_1.scheduler.config.num_train_timesteps, + ): + inputs = self.get_dummy_inputs(torch_device) + inputs["num_inference_steps"] = num_steps + + class scheduler_cls(scheduler_cls_orig): + pass + + pipe_1.scheduler = scheduler_cls.from_config(pipe_1.scheduler.config) + pipe_2.scheduler = scheduler_cls.from_config(pipe_2.scheduler.config) + pipe_3.scheduler = scheduler_cls.from_config(pipe_3.scheduler.config) + + # Let's retrieve the number of timesteps we want to use + pipe_1.scheduler.set_timesteps(num_steps) + expected_steps = pipe_1.scheduler.timesteps.tolist() + + split_1_ts = num_train_timesteps - int(round(num_train_timesteps * split_1)) + split_2_ts = num_train_timesteps - int(round(num_train_timesteps * split_2)) + + if pipe_1.scheduler.order == 2: + expected_steps_1 = list(filter(lambda ts: ts >= split_1_ts, expected_steps)) + expected_steps_2 = expected_steps_1[-1:] + list( + filter(lambda ts: ts >= split_2_ts and ts < split_1_ts, expected_steps) + ) + expected_steps_3 = expected_steps_2[-1:] + list(filter(lambda ts: ts < split_2_ts, expected_steps)) + expected_steps = expected_steps_1 + expected_steps_2 + expected_steps_3 + else: + expected_steps_1 = list(filter(lambda ts: ts >= split_1_ts, expected_steps)) + expected_steps_2 = list(filter(lambda ts: ts >= split_2_ts and ts < split_1_ts, expected_steps)) + expected_steps_3 = list(filter(lambda ts: ts < split_2_ts, expected_steps)) + + # now we monkey patch step `done_steps` + # list into the step function for testing + done_steps = [] + old_step = copy.copy(scheduler_cls.step) + + def new_step(self, *args, **kwargs): + done_steps.append(args[1].cpu().item()) # args[1] is always the passed `t` + return old_step(self, *args, **kwargs) + + scheduler_cls.step = new_step + + inputs_1 = {**inputs, **{"denoising_end": split_1, "output_type": "latent"}} + latents = pipe_1(**inputs_1).images[0] + + assert expected_steps_1 == done_steps, ( + f"Failure with {scheduler_cls.__name__} and {num_steps} and {split_1} and {split_2}" + ) + + with self.assertRaises(ValueError) as cm: + inputs_2 = { + **inputs, + **{ + "denoising_start": split_2, + "denoising_end": split_1, + "image": latents, + "output_type": "latent", + }, + } + pipe_2(**inputs_2).images[0] + assert "cannot be larger than or equal to `denoising_end`" in str(cm.exception) + + inputs_2 = { + **inputs, + **{"denoising_start": split_1, "denoising_end": split_2, "image": latents, "output_type": "latent"}, + } + pipe_2(**inputs_2).images[0] + + assert expected_steps_2 == done_steps[len(expected_steps_1) :] + + inputs_3 = {**inputs, **{"denoising_start": split_2, "image": latents}} + pipe_3(**inputs_3).images[0] + + assert expected_steps_3 == done_steps[len(expected_steps_1) + len(expected_steps_2) :] + assert expected_steps == done_steps, ( + f"Failure with {scheduler_cls.__name__} and {num_steps} and {split_1} and {split_2}" + ) + + for steps in [7, 11, 20]: + for split_1, split_2 in zip([0.19, 0.32], [0.81, 0.68]): + for scheduler_cls in [ + DDIMScheduler, + EulerDiscreteScheduler, + DPMSolverMultistepScheduler, + UniPCMultistepScheduler, + HeunDiscreteScheduler, + ]: + assert_run_mixture(steps, split_1, split_2, scheduler_cls) + + def test_stable_diffusion_xl_multi_prompts(self): + components = self.get_dummy_components() + sd_pipe = self.pipeline_class(**components).to(torch_device) + + # forward with single prompt + inputs = self.get_dummy_inputs(torch_device) + output = sd_pipe(**inputs) + image_slice_1 = output.images[0, -3:, -3:, -1] + + # forward with same prompt duplicated + inputs = self.get_dummy_inputs(torch_device) + inputs["prompt_2"] = inputs["prompt"] + output = sd_pipe(**inputs) + image_slice_2 = output.images[0, -3:, -3:, -1] + + # ensure the results are equal + assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4 + + # forward with different prompt + inputs = self.get_dummy_inputs(torch_device) + inputs["prompt_2"] = "different prompt" + output = sd_pipe(**inputs) + image_slice_3 = output.images[0, -3:, -3:, -1] + + # ensure the results are not equal + assert np.abs(image_slice_1.flatten() - image_slice_3.flatten()).max() > 1e-4 + + # manually set a negative_prompt + inputs = self.get_dummy_inputs(torch_device) + inputs["negative_prompt"] = "negative prompt" + output = sd_pipe(**inputs) + image_slice_1 = output.images[0, -3:, -3:, -1] + + # forward with same negative_prompt duplicated + inputs = self.get_dummy_inputs(torch_device) + inputs["negative_prompt"] = "negative prompt" + inputs["negative_prompt_2"] = inputs["negative_prompt"] + output = sd_pipe(**inputs) + image_slice_2 = output.images[0, -3:, -3:, -1] + + # ensure the results are equal + assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4 + + # forward with different negative_prompt + inputs = self.get_dummy_inputs(torch_device) + inputs["negative_prompt"] = "negative prompt" + inputs["negative_prompt_2"] = "different negative prompt" + output = sd_pipe(**inputs) + image_slice_3 = output.images[0, -3:, -3:, -1] + + # ensure the results are not equal + assert np.abs(image_slice_1.flatten() - image_slice_3.flatten()).max() > 1e-4 + + def test_stable_diffusion_xl_negative_conditions(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + sd_pipe = StableDiffusionXLPipeline(**components) + sd_pipe = sd_pipe.to(device) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + image = sd_pipe(**inputs).images + image_slice_with_no_neg_cond = image[0, -3:, -3:, -1] + + image = sd_pipe( + **inputs, + negative_original_size=(512, 512), + negative_crops_coords_top_left=(0, 0), + negative_target_size=(1024, 1024), + ).images + image_slice_with_neg_cond = image[0, -3:, -3:, -1] + + self.assertTrue(np.abs(image_slice_with_no_neg_cond - image_slice_with_neg_cond).max() > 1e-2) + + def test_stable_diffusion_xl_save_from_pretrained(self): + pipes = [] + components = self.get_dummy_components() + sd_pipe = StableDiffusionXLPipeline(**components).to(torch_device) + pipes.append(sd_pipe) + + with tempfile.TemporaryDirectory() as tmpdirname: + sd_pipe.save_pretrained(tmpdirname) + sd_pipe = StableDiffusionXLPipeline.from_pretrained(tmpdirname).to(torch_device) + pipes.append(sd_pipe) + + image_slices = [] + for pipe in pipes: + pipe.unet.set_default_attn_processor() + + inputs = self.get_dummy_inputs(torch_device) + image = pipe(**inputs).images + + image_slices.append(image[0, -3:, -3:, -1].flatten()) + + assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3 + + def test_pipeline_interrupt(self): + components = self.get_dummy_components() + sd_pipe = StableDiffusionXLPipeline(**components) + sd_pipe = sd_pipe.to(torch_device) + sd_pipe.set_progress_bar_config(disable=None) + + prompt = "hey" + num_inference_steps = 3 + + # store intermediate latents from the generation process + class PipelineState: + def __init__(self): + self.state = [] + + def apply(self, pipe, i, t, callback_kwargs): + self.state.append(callback_kwargs["latents"]) + return callback_kwargs + + pipe_state = PipelineState() + sd_pipe( + prompt, + num_inference_steps=num_inference_steps, + output_type="np", + generator=torch.Generator("cpu").manual_seed(0), + callback_on_step_end=pipe_state.apply, + ).images + + # interrupt generation at step index + interrupt_step_idx = 1 + + def callback_on_step_end(pipe, i, t, callback_kwargs): + if i == interrupt_step_idx: + pipe._interrupt = True + + return callback_kwargs + + output_interrupted = sd_pipe( + prompt, + num_inference_steps=num_inference_steps, + output_type="latent", + generator=torch.Generator("cpu").manual_seed(0), + callback_on_step_end=callback_on_step_end, + ).images + + # fetch intermediate latents at the interrupted step + # from the completed generation process + intermediate_latent = pipe_state.state[interrupt_step_idx] + + # compare the intermediate latent to the output of the interrupted process + # they should be the same + assert torch.allclose(intermediate_latent, output_interrupted, atol=1e-4) + + +@slow +class StableDiffusionXLPipelineIntegrationTests(unittest.TestCase): + def setUp(self): + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def test_stable_diffusion_lcm(self): + torch.manual_seed(0) + unet = UNet2DConditionModel.from_pretrained( + "latent-consistency/lcm-ssd-1b", torch_dtype=torch.float16, variant="fp16" + ) + sd_pipe = StableDiffusionXLPipeline.from_pretrained( + "segmind/SSD-1B", unet=unet, torch_dtype=torch.float16, variant="fp16" + ).to(torch_device) + sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.scheduler.config) + sd_pipe.set_progress_bar_config(disable=None) + + prompt = "a red car standing on the side of the street" + + image = sd_pipe( + prompt, num_inference_steps=4, guidance_scale=8.0, generator=torch.Generator("cpu").manual_seed(0) + ).images[0] + expected_image = load_image( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/lcm_full/stable_diffusion_ssd_1b_lcm.png" + ) + + image = sd_pipe.image_processor.pil_to_numpy(image) + expected_image = sd_pipe.image_processor.pil_to_numpy(expected_image) + + max_diff = numpy_cosine_similarity_distance(image.flatten(), expected_image.flatten()) + + assert max_diff < 1e-2 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl_adapter.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl_adapter.py new file mode 100644 index 0000000000000000000000000000000000000000..3d72270dda5c7e89cae1ad75bc0ebaff038d86cb --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl_adapter.py @@ -0,0 +1,671 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import random +import unittest + +import numpy as np +import torch +from parameterized import parameterized +from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer + +import diffusers +from diffusers import ( + AutoencoderKL, + EulerDiscreteScheduler, + LCMScheduler, + MultiAdapter, + StableDiffusionXLAdapterPipeline, + T2IAdapter, + UNet2DConditionModel, +) +from diffusers.utils import logging + +from ...testing_utils import ( + enable_full_determinism, + floats_tensor, + torch_device, +) +from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS +from ..test_pipelines_common import ( + IPAdapterTesterMixin, + PipelineTesterMixin, + assert_mean_pixel_difference, +) + + +enable_full_determinism() + + +class StableDiffusionXLAdapterPipelineFastTests(IPAdapterTesterMixin, PipelineTesterMixin, unittest.TestCase): + pipeline_class = StableDiffusionXLAdapterPipeline + params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS + batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS + + def get_dummy_components(self, adapter_type="full_adapter_xl", time_cond_proj_dim=None): + torch.manual_seed(0) + unet = UNet2DConditionModel( + block_out_channels=(32, 64), + layers_per_block=2, + sample_size=32, + in_channels=4, + out_channels=4, + down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), + up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), + # SD2-specific config below + attention_head_dim=(2, 4), + use_linear_projection=True, + addition_embed_type="text_time", + addition_time_embed_dim=8, + transformer_layers_per_block=(1, 2), + projection_class_embeddings_input_dim=80, # 6 * 8 + 32 + cross_attention_dim=64, + time_cond_proj_dim=time_cond_proj_dim, + ) + scheduler = EulerDiscreteScheduler( + beta_start=0.00085, + beta_end=0.012, + steps_offset=1, + beta_schedule="scaled_linear", + timestep_spacing="leading", + ) + torch.manual_seed(0) + vae = AutoencoderKL( + block_out_channels=[32, 64], + in_channels=3, + out_channels=3, + down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], + up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], + latent_channels=4, + sample_size=128, + ) + torch.manual_seed(0) + text_encoder_config = CLIPTextConfig( + bos_token_id=0, + eos_token_id=2, + hidden_size=32, + intermediate_size=37, + layer_norm_eps=1e-05, + num_attention_heads=4, + num_hidden_layers=5, + pad_token_id=1, + vocab_size=1000, + # SD2-specific config below + hidden_act="gelu", + projection_dim=32, + ) + text_encoder = CLIPTextModel(text_encoder_config) + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + + text_encoder_2 = CLIPTextModelWithProjection(text_encoder_config) + tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + if adapter_type == "full_adapter_xl": + adapter = T2IAdapter( + in_channels=3, + channels=[32, 64], + num_res_blocks=2, + downscale_factor=4, + adapter_type=adapter_type, + ) + elif adapter_type == "multi_adapter": + adapter = MultiAdapter( + [ + T2IAdapter( + in_channels=3, + channels=[32, 64], + num_res_blocks=2, + downscale_factor=4, + adapter_type="full_adapter_xl", + ), + T2IAdapter( + in_channels=3, + channels=[32, 64], + num_res_blocks=2, + downscale_factor=4, + adapter_type="full_adapter_xl", + ), + ] + ) + else: + raise ValueError( + f"Unknown adapter type: {adapter_type}, must be one of 'full_adapter_xl', or 'multi_adapter''" + ) + + components = { + "adapter": adapter, + "unet": unet, + "scheduler": scheduler, + "vae": vae, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + "text_encoder_2": text_encoder_2, + "tokenizer_2": tokenizer_2, + # "safety_checker": None, + "feature_extractor": None, + "image_encoder": None, + } + return components + + def get_dummy_components_with_full_downscaling(self, adapter_type="full_adapter_xl"): + """Get dummy components with x8 VAE downscaling and 3 UNet down blocks. + These dummy components are intended to fully-exercise the T2I-Adapter + downscaling behavior. + """ + torch.manual_seed(0) + unet = UNet2DConditionModel( + block_out_channels=(32, 32, 64), + layers_per_block=2, + sample_size=32, + in_channels=4, + out_channels=4, + down_block_types=("DownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D"), + up_block_types=("CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "UpBlock2D"), + # SD2-specific config below + attention_head_dim=2, + use_linear_projection=True, + addition_embed_type="text_time", + addition_time_embed_dim=8, + transformer_layers_per_block=1, + projection_class_embeddings_input_dim=80, # 6 * 8 + 32 + cross_attention_dim=64, + ) + scheduler = EulerDiscreteScheduler( + beta_start=0.00085, + beta_end=0.012, + steps_offset=1, + beta_schedule="scaled_linear", + timestep_spacing="leading", + ) + torch.manual_seed(0) + vae = AutoencoderKL( + block_out_channels=[32, 32, 32, 64], + in_channels=3, + out_channels=3, + down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D"], + up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"], + latent_channels=4, + sample_size=128, + ) + torch.manual_seed(0) + text_encoder_config = CLIPTextConfig( + bos_token_id=0, + eos_token_id=2, + hidden_size=32, + intermediate_size=37, + layer_norm_eps=1e-05, + num_attention_heads=4, + num_hidden_layers=5, + pad_token_id=1, + vocab_size=1000, + # SD2-specific config below + hidden_act="gelu", + projection_dim=32, + ) + text_encoder = CLIPTextModel(text_encoder_config) + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + + text_encoder_2 = CLIPTextModelWithProjection(text_encoder_config) + tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + if adapter_type == "full_adapter_xl": + adapter = T2IAdapter( + in_channels=3, + channels=[32, 32, 64], + num_res_blocks=2, + downscale_factor=16, + adapter_type=adapter_type, + ) + elif adapter_type == "multi_adapter": + adapter = MultiAdapter( + [ + T2IAdapter( + in_channels=3, + channels=[32, 32, 64], + num_res_blocks=2, + downscale_factor=16, + adapter_type="full_adapter_xl", + ), + T2IAdapter( + in_channels=3, + channels=[32, 32, 64], + num_res_blocks=2, + downscale_factor=16, + adapter_type="full_adapter_xl", + ), + ] + ) + else: + raise ValueError( + f"Unknown adapter type: {adapter_type}, must be one of 'full_adapter_xl', or 'multi_adapter''" + ) + + components = { + "adapter": adapter, + "unet": unet, + "scheduler": scheduler, + "vae": vae, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + "text_encoder_2": text_encoder_2, + "tokenizer_2": tokenizer_2, + # "safety_checker": None, + "feature_extractor": None, + "image_encoder": None, + } + return components + + def get_dummy_inputs(self, device, seed=0, height=64, width=64, num_images=1): + if num_images == 1: + image = floats_tensor((1, 3, height, width), rng=random.Random(seed)).to(device) + else: + image = [ + floats_tensor((1, 3, height, width), rng=random.Random(seed)).to(device) for _ in range(num_images) + ] + + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + inputs = { + "prompt": "A painting of a squirrel eating a burger", + "image": image, + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 5.0, + "output_type": "np", + } + return inputs + + def test_ip_adapter(self, from_multi=False, expected_pipe_slice=None): + if not from_multi: + expected_pipe_slice = None + if torch_device == "cpu": + expected_pipe_slice = np.array( + [0.5752, 0.6155, 0.4826, 0.5111, 0.5741, 0.4678, 0.5199, 0.5231, 0.4794] + ) + + return super().test_ip_adapter(expected_pipe_slice=expected_pipe_slice) + + @unittest.skip("We test this functionality elsewhere already.") + def test_save_load_optional_components(self): + pass + + def test_stable_diffusion_adapter_default_case(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + sd_pipe = StableDiffusionXLAdapterPipeline(**components) + sd_pipe = sd_pipe.to(device) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + image = sd_pipe(**inputs).images + image_slice = image[0, -3:, -3:, -1] + + assert image.shape == (1, 64, 64, 3) + expected_slice = np.array([00.5752, 0.6155, 0.4826, 0.5111, 0.5741, 0.4678, 0.5199, 0.5231, 0.4794]) + assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-3 + + @parameterized.expand( + [ + # (dim=144) The internal feature map will be 9x9 after initial pixel unshuffling (downscaled x16). + (((4 * 2 + 1) * 16),), + # (dim=160) The internal feature map will be 5x5 after the first T2I down block (downscaled x32). + (((4 * 1 + 1) * 32),), + ] + ) + def test_multiple_image_dimensions(self, dim): + """Test that the T2I-Adapter pipeline supports any input dimension that + is divisible by the adapter's `downscale_factor`. This test was added in + response to an issue where the T2I Adapter's downscaling padding + behavior did not match the UNet's behavior. + + Note that we have selected `dim` values to produce odd resolutions at + each downscaling level. + """ + components = self.get_dummy_components_with_full_downscaling() + sd_pipe = StableDiffusionXLAdapterPipeline(**components) + sd_pipe = sd_pipe.to(torch_device) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(torch_device, height=dim, width=dim) + image = sd_pipe(**inputs).images + + assert image.shape == (1, dim, dim, 3) + + @parameterized.expand(["full_adapter", "full_adapter_xl", "light_adapter"]) + def test_total_downscale_factor(self, adapter_type): + """Test that the T2IAdapter correctly reports its total_downscale_factor.""" + batch_size = 1 + in_channels = 3 + out_channels = [320, 640, 1280, 1280] + in_image_size = 512 + + adapter = T2IAdapter( + in_channels=in_channels, + channels=out_channels, + num_res_blocks=2, + downscale_factor=8, + adapter_type=adapter_type, + ) + adapter.to(torch_device) + + in_image = floats_tensor((batch_size, in_channels, in_image_size, in_image_size)).to(torch_device) + + adapter_state = adapter(in_image) + + # Assume that the last element in `adapter_state` has been downsampled the most, and check + # that it matches the `total_downscale_factor`. + expected_out_image_size = in_image_size // adapter.total_downscale_factor + assert adapter_state[-1].shape == ( + batch_size, + out_channels[-1], + expected_out_image_size, + expected_out_image_size, + ) + + def test_adapter_sdxl_lcm(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + + components = self.get_dummy_components(time_cond_proj_dim=256) + sd_pipe = StableDiffusionXLAdapterPipeline(**components) + sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.scheduler.config) + sd_pipe = sd_pipe.to(torch_device) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + output = sd_pipe(**inputs) + image = output.images + + image_slice = image[0, -3:, -3:, -1] + + assert image.shape == (1, 64, 64, 3) + expected_slice = np.array([0.5425, 0.5385, 0.4964, 0.5045, 0.6149, 0.4974, 0.5469, 0.5332, 0.5426]) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 + + def test_adapter_sdxl_lcm_custom_timesteps(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + + components = self.get_dummy_components(time_cond_proj_dim=256) + sd_pipe = StableDiffusionXLAdapterPipeline(**components) + sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.scheduler.config) + sd_pipe = sd_pipe.to(torch_device) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + del inputs["num_inference_steps"] + inputs["timesteps"] = [999, 499] + output = sd_pipe(**inputs) + image = output.images + + image_slice = image[0, -3:, -3:, -1] + + assert image.shape == (1, 64, 64, 3) + expected_slice = np.array([0.5425, 0.5385, 0.4964, 0.5045, 0.6149, 0.4974, 0.5469, 0.5332, 0.5426]) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 + + +class StableDiffusionXLMultiAdapterPipelineFastTests( + StableDiffusionXLAdapterPipelineFastTests, PipelineTesterMixin, unittest.TestCase +): + supports_dduf = False + + def get_dummy_components(self, time_cond_proj_dim=None): + return super().get_dummy_components("multi_adapter", time_cond_proj_dim=time_cond_proj_dim) + + def get_dummy_components_with_full_downscaling(self): + return super().get_dummy_components_with_full_downscaling("multi_adapter") + + def get_dummy_inputs(self, device, seed=0, height=64, width=64): + inputs = super().get_dummy_inputs(device, seed, height, width, num_images=2) + inputs["adapter_conditioning_scale"] = [0.5, 0.5] + return inputs + + def test_stable_diffusion_adapter_default_case(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + sd_pipe = StableDiffusionXLAdapterPipeline(**components) + sd_pipe = sd_pipe.to(device) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + image = sd_pipe(**inputs).images + image_slice = image[0, -3:, -3:, -1] + + assert image.shape == (1, 64, 64, 3) + expected_slice = np.array([0.5617, 0.6081, 0.4807, 0.5071, 0.5665, 0.4614, 0.5165, 0.5164, 0.4786]) + assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-3 + + def test_ip_adapter(self): + expected_pipe_slice = None + if torch_device == "cpu": + expected_pipe_slice = np.array([0.5617, 0.6081, 0.4807, 0.5071, 0.5665, 0.4614, 0.5165, 0.5164, 0.4786]) + + return super().test_ip_adapter(from_multi=True, expected_pipe_slice=expected_pipe_slice) + + def test_inference_batch_consistent( + self, batch_sizes=[2, 4, 13], additional_params_copy_to_batched_inputs=["num_inference_steps"] + ): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(torch_device) + + logger = logging.get_logger(pipe.__module__) + logger.setLevel(level=diffusers.logging.FATAL) + + # batchify inputs + for batch_size in batch_sizes: + batched_inputs = {} + for name, value in inputs.items(): + if name in self.batch_params: + # prompt is string + if name == "prompt": + len_prompt = len(value) + # make unequal batch sizes + batched_inputs[name] = [value[: len_prompt // i] for i in range(1, batch_size + 1)] + + # make last batch super long + batched_inputs[name][-1] = 100 * "very long" + elif name == "image": + batched_images = [] + + for image in value: + batched_images.append(batch_size * [image]) + + batched_inputs[name] = batched_images + else: + batched_inputs[name] = batch_size * [value] + + elif name == "batch_size": + batched_inputs[name] = batch_size + else: + batched_inputs[name] = value + + for arg in additional_params_copy_to_batched_inputs: + batched_inputs[arg] = inputs[arg] + + batched_inputs["output_type"] = "np" + + output = pipe(**batched_inputs) + + assert len(output[0]) == batch_size + + batched_inputs["output_type"] = "np" + + output = pipe(**batched_inputs)[0] + + assert output.shape[0] == batch_size + + logger.setLevel(level=diffusers.logging.WARNING) + + @unittest.skip("We test this functionality elsewhere already.") + def test_save_load_optional_components(self): + pass + + def test_num_images_per_prompt(self): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + batch_sizes = [1, 2] + num_images_per_prompts = [1, 2] + + for batch_size in batch_sizes: + for num_images_per_prompt in num_images_per_prompts: + inputs = self.get_dummy_inputs(torch_device) + + for key in inputs.keys(): + if key in self.batch_params: + if key == "image": + batched_images = [] + + for image in inputs[key]: + batched_images.append(batch_size * [image]) + + inputs[key] = batched_images + else: + inputs[key] = batch_size * [inputs[key]] + + images = pipe(**inputs, num_images_per_prompt=num_images_per_prompt)[0] + + assert images.shape[0] == batch_size * num_images_per_prompt + + def test_inference_batch_single_identical( + self, + batch_size=3, + test_max_difference=None, + test_mean_pixel_difference=None, + relax_max_difference=False, + expected_max_diff=2e-3, + additional_params_copy_to_batched_inputs=["num_inference_steps"], + ): + if test_max_difference is None: + # TODO(Pedro) - not sure why, but not at all reproducible at the moment it seems + # make sure that batched and non-batched is identical + test_max_difference = torch_device != "mps" + + if test_mean_pixel_difference is None: + # TODO same as above + test_mean_pixel_difference = torch_device != "mps" + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(torch_device) + + logger = logging.get_logger(pipe.__module__) + logger.setLevel(level=diffusers.logging.FATAL) + + # batchify inputs + batched_inputs = {} + for name, value in inputs.items(): + if name in self.batch_params: + # prompt is string + if name == "prompt": + len_prompt = len(value) + # make unequal batch sizes + batched_inputs[name] = [value[: len_prompt // i] for i in range(1, batch_size + 1)] + + # make last batch super long + batched_inputs[name][-1] = 100 * "very long" + elif name == "image": + batched_images = [] + + for image in value: + batched_images.append(batch_size * [image]) + + batched_inputs[name] = batched_images + else: + batched_inputs[name] = batch_size * [value] + elif name == "batch_size": + batched_inputs[name] = batch_size + elif name == "generator": + batched_inputs[name] = [self.get_generator(i) for i in range(batch_size)] + else: + batched_inputs[name] = value + + for arg in additional_params_copy_to_batched_inputs: + batched_inputs[arg] = inputs[arg] + + output_batch = pipe(**batched_inputs) + assert output_batch[0].shape[0] == batch_size + + inputs["generator"] = self.get_generator(0) + + output = pipe(**inputs) + + logger.setLevel(level=diffusers.logging.WARNING) + if test_max_difference: + if relax_max_difference: + # Taking the median of the largest differences + # is resilient to outliers + diff = np.abs(output_batch[0][0] - output[0][0]) + diff = diff.flatten() + diff.sort() + max_diff = np.median(diff[-5:]) + else: + max_diff = np.abs(output_batch[0][0] - output[0][0]).max() + assert max_diff < expected_max_diff + + if test_mean_pixel_difference: + assert_mean_pixel_difference(output_batch[0][0], output[0][0]) + + def test_adapter_sdxl_lcm(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + + components = self.get_dummy_components(time_cond_proj_dim=256) + sd_pipe = StableDiffusionXLAdapterPipeline(**components) + sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.scheduler.config) + sd_pipe = sd_pipe.to(torch_device) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + output = sd_pipe(**inputs) + image = output.images + + image_slice = image[0, -3:, -3:, -1] + + assert image.shape == (1, 64, 64, 3) + expected_slice = np.array([0.5313, 0.5375, 0.4942, 0.5021, 0.6142, 0.4968, 0.5434, 0.5311, 0.5448]) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 + + def test_adapter_sdxl_lcm_custom_timesteps(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + + components = self.get_dummy_components(time_cond_proj_dim=256) + sd_pipe = StableDiffusionXLAdapterPipeline(**components) + sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.scheduler.config) + sd_pipe = sd_pipe.to(torch_device) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + del inputs["num_inference_steps"] + inputs["timesteps"] = [999, 499] + output = sd_pipe(**inputs) + image = output.images + + image_slice = image[0, -3:, -3:, -1] + + assert image.shape == (1, 64, 64, 3) + expected_slice = np.array([0.5313, 0.5375, 0.4942, 0.5021, 0.6142, 0.4968, 0.5434, 0.5311, 0.5448]) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl_img2img.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl_img2img.py new file mode 100644 index 0000000000000000000000000000000000000000..c5499847069fcae0337abc69d1e3aba63cbba050 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl_img2img.py @@ -0,0 +1,717 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import gc +import random +import unittest + +import numpy as np +import torch +from transformers import ( + CLIPImageProcessor, + CLIPTextConfig, + CLIPTextModel, + CLIPTextModelWithProjection, + CLIPTokenizer, + CLIPVisionConfig, + CLIPVisionModelWithProjection, +) + +from diffusers import ( + AutoencoderKL, + AutoencoderTiny, + EDMDPMSolverMultistepScheduler, + EulerDiscreteScheduler, + LCMScheduler, + StableDiffusionXLImg2ImgPipeline, + UNet2DConditionModel, +) + +from ...testing_utils import ( + backend_empty_cache, + enable_full_determinism, + floats_tensor, + load_image, + require_torch_accelerator, + slow, + torch_device, +) +from ..pipeline_params import ( + IMAGE_TO_IMAGE_IMAGE_PARAMS, + TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, + TEXT_GUIDED_IMAGE_VARIATION_PARAMS, + TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS, +) +from ..test_pipelines_common import ( + IPAdapterTesterMixin, + PipelineLatentTesterMixin, + PipelineTesterMixin, +) + + +enable_full_determinism() + + +class StableDiffusionXLImg2ImgPipelineFastTests( + IPAdapterTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, unittest.TestCase +): + pipeline_class = StableDiffusionXLImg2ImgPipeline + params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"} + required_optional_params = PipelineTesterMixin.required_optional_params - {"latents"} + batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS + image_params = IMAGE_TO_IMAGE_IMAGE_PARAMS + image_latents_params = IMAGE_TO_IMAGE_IMAGE_PARAMS + callback_cfg_params = TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS.union( + {"add_text_embeds", "add_time_ids", "add_neg_time_ids"} + ) + + supports_dduf = False + + def get_dummy_components(self, skip_first_text_encoder=False, time_cond_proj_dim=None): + torch.manual_seed(0) + unet = UNet2DConditionModel( + block_out_channels=(32, 64), + layers_per_block=2, + sample_size=32, + in_channels=4, + out_channels=4, + time_cond_proj_dim=time_cond_proj_dim, + down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), + up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), + # SD2-specific config below + attention_head_dim=(2, 4), + use_linear_projection=True, + addition_embed_type="text_time", + addition_time_embed_dim=8, + transformer_layers_per_block=(1, 2), + projection_class_embeddings_input_dim=72, # 5 * 8 + 32 + cross_attention_dim=64 if not skip_first_text_encoder else 32, + ) + scheduler = EulerDiscreteScheduler( + beta_start=0.00085, + beta_end=0.012, + steps_offset=1, + beta_schedule="scaled_linear", + timestep_spacing="leading", + ) + torch.manual_seed(0) + vae = AutoencoderKL( + block_out_channels=[32, 64], + in_channels=3, + out_channels=3, + down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], + up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], + latent_channels=4, + sample_size=128, + ) + torch.manual_seed(0) + image_encoder_config = CLIPVisionConfig( + hidden_size=32, + image_size=224, + projection_dim=32, + intermediate_size=37, + num_attention_heads=4, + num_channels=3, + num_hidden_layers=5, + patch_size=14, + ) + + image_encoder = CLIPVisionModelWithProjection(image_encoder_config) + + feature_extractor = CLIPImageProcessor( + crop_size=224, + do_center_crop=True, + do_normalize=True, + do_resize=True, + image_mean=[0.48145466, 0.4578275, 0.40821073], + image_std=[0.26862954, 0.26130258, 0.27577711], + resample=3, + size=224, + ) + + torch.manual_seed(0) + text_encoder_config = CLIPTextConfig( + bos_token_id=0, + eos_token_id=2, + hidden_size=32, + intermediate_size=37, + layer_norm_eps=1e-05, + num_attention_heads=4, + num_hidden_layers=5, + pad_token_id=1, + vocab_size=1000, + # SD2-specific config below + hidden_act="gelu", + projection_dim=32, + ) + text_encoder = CLIPTextModel(text_encoder_config) + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + + text_encoder_2 = CLIPTextModelWithProjection(text_encoder_config) + tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + + components = { + "unet": unet, + "scheduler": scheduler, + "vae": vae, + "text_encoder": text_encoder if not skip_first_text_encoder else None, + "tokenizer": tokenizer if not skip_first_text_encoder else None, + "text_encoder_2": text_encoder_2, + "tokenizer_2": tokenizer_2, + "requires_aesthetics_score": True, + "image_encoder": image_encoder, + "feature_extractor": feature_extractor, + } + return components + + def get_dummy_tiny_autoencoder(self): + return AutoencoderTiny(in_channels=3, out_channels=3, latent_channels=4) + + def test_components_function(self): + init_components = self.get_dummy_components() + init_components.pop("requires_aesthetics_score") + pipe = self.pipeline_class(**init_components) + + self.assertTrue(hasattr(pipe, "components")) + self.assertTrue(set(pipe.components.keys()) == set(init_components.keys())) + + def get_dummy_inputs(self, device, seed=0): + image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) + image = image / 2 + 0.5 + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + inputs = { + "prompt": "A painting of a squirrel eating a burger", + "image": image, + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 5.0, + "output_type": "np", + "strength": 0.8, + } + return inputs + + def test_stable_diffusion_xl_img2img_euler(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + sd_pipe = StableDiffusionXLImg2ImgPipeline(**components) + sd_pipe = sd_pipe.to(device) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + image = sd_pipe(**inputs).images + image_slice = image[0, -3:, -3:, -1] + + assert image.shape == (1, 32, 32, 3) + + expected_slice = np.array([0.4664, 0.4886, 0.4403, 0.6902, 0.5592, 0.4534, 0.5931, 0.5951, 0.5224]) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 + + def test_stable_diffusion_xl_img2img_euler_lcm(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components(time_cond_proj_dim=256) + sd_pipe = StableDiffusionXLImg2ImgPipeline(**components) + sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.config) + sd_pipe = sd_pipe.to(device) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + image = sd_pipe(**inputs).images + image_slice = image[0, -3:, -3:, -1] + + assert image.shape == (1, 32, 32, 3) + + expected_slice = np.array([0.5604, 0.4352, 0.4717, 0.5844, 0.5101, 0.6704, 0.6290, 0.5460, 0.5286]) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 + + def test_stable_diffusion_xl_img2img_euler_lcm_custom_timesteps(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components(time_cond_proj_dim=256) + sd_pipe = StableDiffusionXLImg2ImgPipeline(**components) + sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.config) + sd_pipe = sd_pipe.to(device) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + del inputs["num_inference_steps"] + inputs["timesteps"] = [999, 499] + image = sd_pipe(**inputs).images + image_slice = image[0, -3:, -3:, -1] + + assert image.shape == (1, 32, 32, 3) + + expected_slice = np.array([0.5604, 0.4352, 0.4717, 0.5844, 0.5101, 0.6704, 0.6290, 0.5460, 0.5286]) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 + + def test_attention_slicing_forward_pass(self): + super().test_attention_slicing_forward_pass(expected_max_diff=3e-3) + + def test_inference_batch_single_identical(self): + super().test_inference_batch_single_identical(expected_max_diff=3e-3) + + @unittest.skip("Skip for now.") + def test_save_load_optional_components(self): + pass + + def test_ip_adapter(self): + expected_pipe_slice = None + if torch_device == "cpu": + expected_pipe_slice = np.array([0.5133, 0.4626, 0.4970, 0.6273, 0.5160, 0.6891, 0.6639, 0.5892, 0.5709]) + + return super().test_ip_adapter(expected_pipe_slice=expected_pipe_slice) + + def test_stable_diffusion_xl_img2img_tiny_autoencoder(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + sd_pipe = StableDiffusionXLImg2ImgPipeline(**components) + sd_pipe.vae = self.get_dummy_tiny_autoencoder() + sd_pipe = sd_pipe.to(device) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + image = sd_pipe(**inputs).images + image_slice = image[0, -3:, -3:, -1].flatten() + + assert image.shape == (1, 32, 32, 3) + expected_slice = np.array([0.0, 0.0, 0.0106, 0.0, 0.0, 0.0087, 0.0052, 0.0062, 0.0177]) + + assert np.allclose(image_slice, expected_slice, atol=1e-4, rtol=1e-4) + + @require_torch_accelerator + def test_stable_diffusion_xl_offloads(self): + pipes = [] + components = self.get_dummy_components() + sd_pipe = StableDiffusionXLImg2ImgPipeline(**components).to(torch_device) + pipes.append(sd_pipe) + + components = self.get_dummy_components() + sd_pipe = StableDiffusionXLImg2ImgPipeline(**components) + sd_pipe.enable_model_cpu_offload(device=torch_device) + pipes.append(sd_pipe) + + components = self.get_dummy_components() + sd_pipe = StableDiffusionXLImg2ImgPipeline(**components) + sd_pipe.enable_sequential_cpu_offload(device=torch_device) + pipes.append(sd_pipe) + + image_slices = [] + for pipe in pipes: + pipe.unet.set_default_attn_processor() + + generator_device = "cpu" + inputs = self.get_dummy_inputs(generator_device) + image = pipe(**inputs).images + + image_slices.append(image[0, -3:, -3:, -1].flatten()) + + assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3 + assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3 + + def test_stable_diffusion_xl_multi_prompts(self): + components = self.get_dummy_components() + sd_pipe = self.pipeline_class(**components).to(torch_device) + + # forward with single prompt + generator_device = "cpu" + inputs = self.get_dummy_inputs(generator_device) + inputs["num_inference_steps"] = 5 + output = sd_pipe(**inputs) + image_slice_1 = output.images[0, -3:, -3:, -1] + + # forward with same prompt duplicated + generator_device = "cpu" + inputs = self.get_dummy_inputs(generator_device) + inputs["num_inference_steps"] = 5 + inputs["prompt_2"] = inputs["prompt"] + output = sd_pipe(**inputs) + image_slice_2 = output.images[0, -3:, -3:, -1] + + # ensure the results are equal + assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4 + + # forward with different prompt + generator_device = "cpu" + inputs = self.get_dummy_inputs(generator_device) + inputs["num_inference_steps"] = 5 + inputs["prompt_2"] = "different prompt" + output = sd_pipe(**inputs) + image_slice_3 = output.images[0, -3:, -3:, -1] + + # ensure the results are not equal + assert np.abs(image_slice_1.flatten() - image_slice_3.flatten()).max() > 1e-4 + + # manually set a negative_prompt + generator_device = "cpu" + inputs = self.get_dummy_inputs(generator_device) + inputs["num_inference_steps"] = 5 + inputs["negative_prompt"] = "negative prompt" + output = sd_pipe(**inputs) + image_slice_1 = output.images[0, -3:, -3:, -1] + + # forward with same negative_prompt duplicated + generator_device = "cpu" + inputs = self.get_dummy_inputs(generator_device) + inputs["num_inference_steps"] = 5 + inputs["negative_prompt"] = "negative prompt" + inputs["negative_prompt_2"] = inputs["negative_prompt"] + output = sd_pipe(**inputs) + image_slice_2 = output.images[0, -3:, -3:, -1] + + # ensure the results are equal + assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4 + + # forward with different negative_prompt + generator_device = "cpu" + inputs = self.get_dummy_inputs(generator_device) + inputs["num_inference_steps"] = 5 + inputs["negative_prompt"] = "negative prompt" + inputs["negative_prompt_2"] = "different negative prompt" + output = sd_pipe(**inputs) + image_slice_3 = output.images[0, -3:, -3:, -1] + + # ensure the results are not equal + assert np.abs(image_slice_1.flatten() - image_slice_3.flatten()).max() > 1e-4 + + def test_stable_diffusion_xl_img2img_negative_conditions(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + + sd_pipe = self.pipeline_class(**components) + sd_pipe = sd_pipe.to(device) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + image = sd_pipe(**inputs).images + image_slice_with_no_neg_conditions = image[0, -3:, -3:, -1] + + image = sd_pipe( + **inputs, + negative_original_size=(512, 512), + negative_crops_coords_top_left=( + 0, + 0, + ), + negative_target_size=(1024, 1024), + ).images + image_slice_with_neg_conditions = image[0, -3:, -3:, -1] + + assert ( + np.abs(image_slice_with_no_neg_conditions.flatten() - image_slice_with_neg_conditions.flatten()).max() + > 1e-4 + ) + + def test_pipeline_interrupt(self): + components = self.get_dummy_components() + sd_pipe = StableDiffusionXLImg2ImgPipeline(**components) + sd_pipe = sd_pipe.to(torch_device) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(torch_device) + + prompt = "hey" + num_inference_steps = 5 + + # store intermediate latents from the generation process + class PipelineState: + def __init__(self): + self.state = [] + + def apply(self, pipe, i, t, callback_kwargs): + self.state.append(callback_kwargs["latents"]) + return callback_kwargs + + pipe_state = PipelineState() + sd_pipe( + prompt, + image=inputs["image"], + strength=0.8, + num_inference_steps=num_inference_steps, + output_type="np", + generator=torch.Generator("cpu").manual_seed(0), + callback_on_step_end=pipe_state.apply, + ).images + + # interrupt generation at step index + interrupt_step_idx = 1 + + def callback_on_step_end(pipe, i, t, callback_kwargs): + if i == interrupt_step_idx: + pipe._interrupt = True + + return callback_kwargs + + output_interrupted = sd_pipe( + prompt, + image=inputs["image"], + strength=0.8, + num_inference_steps=num_inference_steps, + output_type="latent", + generator=torch.Generator("cpu").manual_seed(0), + callback_on_step_end=callback_on_step_end, + ).images + + # fetch intermediate latents at the interrupted step + # from the completed generation process + intermediate_latent = pipe_state.state[interrupt_step_idx] + + # compare the intermediate latent to the output of the interrupted process + # they should be the same + assert torch.allclose(intermediate_latent, output_interrupted, atol=1e-4) + + +class StableDiffusionXLImg2ImgRefinerOnlyPipelineFastTests( + PipelineLatentTesterMixin, PipelineTesterMixin, unittest.TestCase +): + pipeline_class = StableDiffusionXLImg2ImgPipeline + params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"} + required_optional_params = PipelineTesterMixin.required_optional_params - {"latents"} + batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS + image_params = IMAGE_TO_IMAGE_IMAGE_PARAMS + image_latents_params = IMAGE_TO_IMAGE_IMAGE_PARAMS + + def get_dummy_components(self): + torch.manual_seed(0) + unet = UNet2DConditionModel( + block_out_channels=(32, 64), + layers_per_block=2, + sample_size=32, + in_channels=4, + out_channels=4, + down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), + up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), + # SD2-specific config below + attention_head_dim=(2, 4), + use_linear_projection=True, + addition_embed_type="text_time", + addition_time_embed_dim=8, + transformer_layers_per_block=(1, 2), + projection_class_embeddings_input_dim=72, # 5 * 8 + 32 + cross_attention_dim=32, + ) + scheduler = EulerDiscreteScheduler( + beta_start=0.00085, + beta_end=0.012, + steps_offset=1, + beta_schedule="scaled_linear", + timestep_spacing="leading", + ) + torch.manual_seed(0) + vae = AutoencoderKL( + block_out_channels=[32, 64], + in_channels=3, + out_channels=3, + down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], + up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], + latent_channels=4, + sample_size=128, + ) + torch.manual_seed(0) + text_encoder_config = CLIPTextConfig( + bos_token_id=0, + eos_token_id=2, + hidden_size=32, + intermediate_size=37, + layer_norm_eps=1e-05, + num_attention_heads=4, + num_hidden_layers=5, + pad_token_id=1, + vocab_size=1000, + # SD2-specific config below + hidden_act="gelu", + projection_dim=32, + ) + text_encoder_2 = CLIPTextModelWithProjection(text_encoder_config) + tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + + components = { + "unet": unet, + "scheduler": scheduler, + "vae": vae, + "tokenizer": None, + "text_encoder": None, + "text_encoder_2": text_encoder_2, + "tokenizer_2": tokenizer_2, + "requires_aesthetics_score": True, + "image_encoder": None, + "feature_extractor": None, + } + return components + + def test_components_function(self): + init_components = self.get_dummy_components() + init_components.pop("requires_aesthetics_score") + pipe = self.pipeline_class(**init_components) + + self.assertTrue(hasattr(pipe, "components")) + self.assertTrue(set(pipe.components.keys()) == set(init_components.keys())) + + def get_dummy_inputs(self, device, seed=0): + image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) + image = image / 2 + 0.5 + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + inputs = { + "prompt": "A painting of a squirrel eating a burger", + "image": image, + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 5.0, + "output_type": "np", + "strength": 0.8, + } + return inputs + + def test_stable_diffusion_xl_img2img_euler(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + sd_pipe = StableDiffusionXLImg2ImgPipeline(**components) + sd_pipe = sd_pipe.to(device) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + image = sd_pipe(**inputs).images + image_slice = image[0, -3:, -3:, -1] + + assert image.shape == (1, 32, 32, 3) + + expected_slice = np.array([0.4745, 0.4924, 0.4338, 0.6468, 0.5547, 0.4419, 0.5646, 0.5897, 0.5146]) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 + + @require_torch_accelerator + def test_stable_diffusion_xl_offloads(self): + pipes = [] + components = self.get_dummy_components() + sd_pipe = StableDiffusionXLImg2ImgPipeline(**components).to(torch_device) + pipes.append(sd_pipe) + + components = self.get_dummy_components() + sd_pipe = StableDiffusionXLImg2ImgPipeline(**components) + sd_pipe.enable_model_cpu_offload(device=torch_device) + pipes.append(sd_pipe) + + components = self.get_dummy_components() + sd_pipe = StableDiffusionXLImg2ImgPipeline(**components) + sd_pipe.enable_sequential_cpu_offload(device=torch_device) + pipes.append(sd_pipe) + + image_slices = [] + for pipe in pipes: + pipe.unet.set_default_attn_processor() + + generator_device = "cpu" + inputs = self.get_dummy_inputs(generator_device) + image = pipe(**inputs).images + + image_slices.append(image[0, -3:, -3:, -1].flatten()) + + assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3 + assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3 + + def test_stable_diffusion_xl_img2img_negative_conditions(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + + sd_pipe = self.pipeline_class(**components) + sd_pipe = sd_pipe.to(device) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + image = sd_pipe(**inputs).images + image_slice_with_no_neg_conditions = image[0, -3:, -3:, -1] + + image = sd_pipe( + **inputs, + negative_original_size=(512, 512), + negative_crops_coords_top_left=( + 0, + 0, + ), + negative_target_size=(1024, 1024), + ).images + image_slice_with_neg_conditions = image[0, -3:, -3:, -1] + + assert ( + np.abs(image_slice_with_no_neg_conditions.flatten() - image_slice_with_neg_conditions.flatten()).max() + > 1e-4 + ) + + def test_attention_slicing_forward_pass(self): + super().test_attention_slicing_forward_pass(expected_max_diff=3e-3) + + def test_inference_batch_single_identical(self): + super().test_inference_batch_single_identical(expected_max_diff=3e-3) + + @unittest.skip("We test this functionality elsewhere already.") + def test_save_load_optional_components(self): + pass + + +@slow +class StableDiffusionXLImg2ImgPipelineIntegrationTests(unittest.TestCase): + def setUp(self): + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def test_stable_diffusion_xl_img2img_playground(self): + torch.manual_seed(0) + model_path = "playgroundai/playground-v2.5-1024px-aesthetic" + + sd_pipe = StableDiffusionXLImg2ImgPipeline.from_pretrained( + model_path, torch_dtype=torch.float16, variant="fp16", add_watermarker=False + ) + + sd_pipe.enable_model_cpu_offload() + sd_pipe.scheduler = EDMDPMSolverMultistepScheduler.from_config( + sd_pipe.scheduler.config, use_karras_sigmas=True + ) + sd_pipe.set_progress_bar_config(disable=None) + + prompt = "a photo of an astronaut riding a horse on mars" + + url = "https://huggingface.co/datasets/patrickvonplaten/images/resolve/main/aa_xl/000000009.png" + + init_image = load_image(url).convert("RGB") + + image = sd_pipe( + prompt, + num_inference_steps=30, + guidance_scale=8.0, + image=init_image, + height=1024, + width=1024, + output_type="np", + ).images + + image_slice = image[0, -3:, -3:, -1] + + assert image.shape == (1, 1024, 1024, 3) + + expected_slice = np.array([0.3519, 0.3149, 0.3364, 0.3505, 0.3402, 0.3371, 0.3554, 0.3495, 0.3333]) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl_inpaint.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl_inpaint.py new file mode 100644 index 0000000000000000000000000000000000000000..d3f5779c7633eedb0ddeacc6286c5adcda8f1a37 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl_inpaint.py @@ -0,0 +1,826 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy +import random +import unittest + +import numpy as np +import torch +from PIL import Image +from transformers import ( + CLIPImageProcessor, + CLIPTextConfig, + CLIPTextModel, + CLIPTextModelWithProjection, + CLIPTokenizer, + CLIPVisionConfig, + CLIPVisionModelWithProjection, +) + +from diffusers import ( + AutoencoderKL, + DDIMScheduler, + DPMSolverMultistepScheduler, + EulerDiscreteScheduler, + HeunDiscreteScheduler, + LCMScheduler, + StableDiffusionXLInpaintPipeline, + UNet2DConditionModel, + UniPCMultistepScheduler, +) + +from ...testing_utils import ( + enable_full_determinism, + floats_tensor, + require_torch_accelerator, + slow, + torch_device, +) +from ..pipeline_params import ( + TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, + TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, + TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS, +) +from ..test_pipelines_common import IPAdapterTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin + + +enable_full_determinism() + + +class StableDiffusionXLInpaintPipelineFastTests( + IPAdapterTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, unittest.TestCase +): + pipeline_class = StableDiffusionXLInpaintPipeline + params = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS + batch_params = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS + image_params = frozenset([]) + # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess + image_latents_params = frozenset([]) + callback_cfg_params = TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS.union( + { + "add_text_embeds", + "add_time_ids", + "mask", + "masked_image_latents", + } + ) + + supports_dduf = False + + def get_dummy_components(self, skip_first_text_encoder=False, time_cond_proj_dim=None): + torch.manual_seed(0) + unet = UNet2DConditionModel( + block_out_channels=(32, 64), + layers_per_block=2, + sample_size=32, + in_channels=4, + out_channels=4, + time_cond_proj_dim=time_cond_proj_dim, + down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), + up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), + # SD2-specific config below + attention_head_dim=(2, 4), + use_linear_projection=True, + addition_embed_type="text_time", + addition_time_embed_dim=8, + transformer_layers_per_block=(1, 2), + projection_class_embeddings_input_dim=72, # 5 * 8 + 32 + cross_attention_dim=64 if not skip_first_text_encoder else 32, + ) + scheduler = EulerDiscreteScheduler( + beta_start=0.00085, + beta_end=0.012, + steps_offset=1, + beta_schedule="scaled_linear", + timestep_spacing="leading", + ) + torch.manual_seed(0) + vae = AutoencoderKL( + block_out_channels=[32, 64], + in_channels=3, + out_channels=3, + down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], + up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], + latent_channels=4, + sample_size=128, + ) + torch.manual_seed(0) + text_encoder_config = CLIPTextConfig( + bos_token_id=0, + eos_token_id=2, + hidden_size=32, + intermediate_size=37, + layer_norm_eps=1e-05, + num_attention_heads=4, + num_hidden_layers=5, + pad_token_id=1, + vocab_size=1000, + # SD2-specific config below + hidden_act="gelu", + projection_dim=32, + ) + text_encoder = CLIPTextModel(text_encoder_config) + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + + text_encoder_2 = CLIPTextModelWithProjection(text_encoder_config) + tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + + torch.manual_seed(0) + image_encoder_config = CLIPVisionConfig( + hidden_size=32, + image_size=224, + projection_dim=32, + intermediate_size=37, + num_attention_heads=4, + num_channels=3, + num_hidden_layers=5, + patch_size=14, + ) + + image_encoder = CLIPVisionModelWithProjection(image_encoder_config) + + feature_extractor = CLIPImageProcessor( + crop_size=224, + do_center_crop=True, + do_normalize=True, + do_resize=True, + image_mean=[0.48145466, 0.4578275, 0.40821073], + image_std=[0.26862954, 0.26130258, 0.27577711], + resample=3, + size=224, + ) + + components = { + "unet": unet, + "scheduler": scheduler, + "vae": vae, + "text_encoder": text_encoder if not skip_first_text_encoder else None, + "tokenizer": tokenizer if not skip_first_text_encoder else None, + "text_encoder_2": text_encoder_2, + "tokenizer_2": tokenizer_2, + "image_encoder": image_encoder, + "feature_extractor": feature_extractor, + "requires_aesthetics_score": True, + } + return components + + def get_dummy_inputs(self, device, seed=0): + # TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched + image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) + image = image.cpu().permute(0, 2, 3, 1)[0] + init_image = Image.fromarray(np.uint8(image)).convert("RGB").resize((64, 64)) + # create mask + image[8:, 8:, :] = 255 + mask_image = Image.fromarray(np.uint8(image)).convert("L").resize((64, 64)) + + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + inputs = { + "prompt": "A painting of a squirrel eating a burger", + "image": init_image, + "mask_image": mask_image, + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 6.0, + "strength": 1.0, + "output_type": "np", + } + return inputs + + def get_dummy_inputs_2images(self, device, seed=0, img_res=64): + # Get random floats in [0, 1] as image with spatial size (img_res, img_res) + image1 = floats_tensor((1, 3, img_res, img_res), rng=random.Random(seed)).to(device) + image2 = floats_tensor((1, 3, img_res, img_res), rng=random.Random(seed + 22)).to(device) + # Convert images to [-1, 1] + init_image1 = 2.0 * image1 - 1.0 + init_image2 = 2.0 * image2 - 1.0 + + # empty mask + mask_image = torch.zeros((1, 1, img_res, img_res), device=device) + + if str(device).startswith("mps"): + generator1 = torch.manual_seed(seed) + generator2 = torch.manual_seed(seed) + else: + generator1 = torch.Generator(device=device).manual_seed(seed) + generator2 = torch.Generator(device=device).manual_seed(seed) + + inputs = { + "prompt": ["A painting of a squirrel eating a burger"] * 2, + "image": [init_image1, init_image2], + "mask_image": [mask_image] * 2, + "generator": [generator1, generator2], + "num_inference_steps": 2, + "guidance_scale": 6.0, + "output_type": "np", + } + return inputs + + def test_ip_adapter(self): + expected_pipe_slice = None + if torch_device == "cpu": + expected_pipe_slice = np.array([0.8274, 0.5538, 0.6141, 0.5843, 0.6865, 0.7082, 0.5861, 0.6123, 0.5344]) + + return super().test_ip_adapter(expected_pipe_slice=expected_pipe_slice) + + def test_components_function(self): + init_components = self.get_dummy_components() + init_components.pop("requires_aesthetics_score") + pipe = self.pipeline_class(**init_components) + + self.assertTrue(hasattr(pipe, "components")) + self.assertTrue(set(pipe.components.keys()) == set(init_components.keys())) + + def test_stable_diffusion_xl_inpaint_euler(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + sd_pipe = StableDiffusionXLInpaintPipeline(**components) + sd_pipe = sd_pipe.to(device) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + image = sd_pipe(**inputs).images + image_slice = image[0, -3:, -3:, -1] + + assert image.shape == (1, 64, 64, 3) + + expected_slice = np.array([0.8279, 0.5673, 0.6088, 0.6156, 0.6923, 0.7347, 0.6547, 0.6108, 0.5198]) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 + + def test_stable_diffusion_xl_inpaint_euler_lcm(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components(time_cond_proj_dim=256) + sd_pipe = StableDiffusionXLInpaintPipeline(**components) + sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.config) + sd_pipe = sd_pipe.to(device) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + image = sd_pipe(**inputs).images + image_slice = image[0, -3:, -3:, -1] + + assert image.shape == (1, 64, 64, 3) + + expected_slice = np.array([0.6611, 0.5569, 0.5531, 0.5471, 0.5918, 0.6393, 0.5074, 0.5468, 0.5185]) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 + + def test_stable_diffusion_xl_inpaint_euler_lcm_custom_timesteps(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components(time_cond_proj_dim=256) + sd_pipe = StableDiffusionXLInpaintPipeline(**components) + sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.config) + sd_pipe = sd_pipe.to(device) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + del inputs["num_inference_steps"] + inputs["timesteps"] = [999, 499] + image = sd_pipe(**inputs).images + image_slice = image[0, -3:, -3:, -1] + + assert image.shape == (1, 64, 64, 3) + + expected_slice = np.array([0.6611, 0.5569, 0.5531, 0.5471, 0.5918, 0.6393, 0.5074, 0.5468, 0.5185]) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 + + def test_attention_slicing_forward_pass(self): + super().test_attention_slicing_forward_pass(expected_max_diff=3e-3) + + def test_inference_batch_single_identical(self): + super().test_inference_batch_single_identical(expected_max_diff=3e-3) + + @unittest.skip("Skip for now.") + def test_save_load_optional_components(self): + pass + + @require_torch_accelerator + def test_stable_diffusion_xl_inpaint_negative_prompt_embeds(self): + components = self.get_dummy_components() + sd_pipe = StableDiffusionXLInpaintPipeline(**components) + sd_pipe = sd_pipe.to(torch_device) + sd_pipe = sd_pipe.to(torch_device) + sd_pipe.set_progress_bar_config(disable=None) + + # forward without prompt embeds + inputs = self.get_dummy_inputs(torch_device) + negative_prompt = 3 * ["this is a negative prompt"] + inputs["negative_prompt"] = negative_prompt + inputs["prompt"] = 3 * [inputs["prompt"]] + + output = sd_pipe(**inputs) + image_slice_1 = output.images[0, -3:, -3:, -1] + + # forward with prompt embeds + inputs = self.get_dummy_inputs(torch_device) + negative_prompt = 3 * ["this is a negative prompt"] + prompt = 3 * [inputs.pop("prompt")] + + ( + prompt_embeds, + negative_prompt_embeds, + pooled_prompt_embeds, + negative_pooled_prompt_embeds, + ) = sd_pipe.encode_prompt(prompt, negative_prompt=negative_prompt) + + output = sd_pipe( + **inputs, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + pooled_prompt_embeds=pooled_prompt_embeds, + negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, + ) + image_slice_2 = output.images[0, -3:, -3:, -1] + + # make sure that it's equal + assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4 + + @require_torch_accelerator + def test_stable_diffusion_xl_offloads(self): + pipes = [] + components = self.get_dummy_components() + sd_pipe = StableDiffusionXLInpaintPipeline(**components).to(torch_device) + pipes.append(sd_pipe) + + components = self.get_dummy_components() + sd_pipe = StableDiffusionXLInpaintPipeline(**components) + sd_pipe.enable_model_cpu_offload(device=torch_device) + pipes.append(sd_pipe) + + components = self.get_dummy_components() + sd_pipe = StableDiffusionXLInpaintPipeline(**components) + sd_pipe.enable_sequential_cpu_offload(device=torch_device) + pipes.append(sd_pipe) + + image_slices = [] + for pipe in pipes: + pipe.unet.set_default_attn_processor() + + inputs = self.get_dummy_inputs(torch_device) + image = pipe(**inputs).images + + image_slices.append(image[0, -3:, -3:, -1].flatten()) + + assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3 + assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3 + + def test_stable_diffusion_xl_refiner(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components(skip_first_text_encoder=True) + + sd_pipe = self.pipeline_class(**components) + sd_pipe = sd_pipe.to(device) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + image = sd_pipe(**inputs).images + image_slice = image[0, -3:, -3:, -1] + + assert image.shape == (1, 64, 64, 3) + + expected_slice = np.array([0.7540, 0.5231, 0.5833, 0.6217, 0.6339, 0.7067, 0.6507, 0.5672, 0.5030]) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 + + def test_stable_diffusion_two_xl_mixture_of_denoiser_fast(self): + components = self.get_dummy_components() + pipe_1 = StableDiffusionXLInpaintPipeline(**components).to(torch_device) + pipe_1.unet.set_default_attn_processor() + pipe_2 = StableDiffusionXLInpaintPipeline(**components).to(torch_device) + pipe_2.unet.set_default_attn_processor() + + def assert_run_mixture( + num_steps, split, scheduler_cls_orig, num_train_timesteps=pipe_1.scheduler.config.num_train_timesteps + ): + inputs = self.get_dummy_inputs(torch_device) + inputs["num_inference_steps"] = num_steps + + class scheduler_cls(scheduler_cls_orig): + pass + + pipe_1.scheduler = scheduler_cls.from_config(pipe_1.scheduler.config) + pipe_2.scheduler = scheduler_cls.from_config(pipe_2.scheduler.config) + + # Let's retrieve the number of timesteps we want to use + pipe_1.scheduler.set_timesteps(num_steps) + expected_steps = pipe_1.scheduler.timesteps.tolist() + + split_ts = num_train_timesteps - int(round(num_train_timesteps * split)) + + if pipe_1.scheduler.order == 2: + expected_steps_1 = list(filter(lambda ts: ts >= split_ts, expected_steps)) + expected_steps_2 = expected_steps_1[-1:] + list(filter(lambda ts: ts < split_ts, expected_steps)) + expected_steps = expected_steps_1 + expected_steps_2 + else: + expected_steps_1 = list(filter(lambda ts: ts >= split_ts, expected_steps)) + expected_steps_2 = list(filter(lambda ts: ts < split_ts, expected_steps)) + + # now we monkey patch step `done_steps` + # list into the step function for testing + done_steps = [] + old_step = copy.copy(scheduler_cls.step) + + def new_step(self, *args, **kwargs): + done_steps.append(args[1].cpu().item()) # args[1] is always the passed `t` + return old_step(self, *args, **kwargs) + + scheduler_cls.step = new_step + + inputs_1 = {**inputs, **{"denoising_end": split, "output_type": "latent"}} + latents = pipe_1(**inputs_1).images[0] + + assert expected_steps_1 == done_steps, f"Failure with {scheduler_cls.__name__} and {num_steps} and {split}" + + inputs_2 = {**inputs, **{"denoising_start": split, "image": latents}} + pipe_2(**inputs_2).images[0] + + assert expected_steps_2 == done_steps[len(expected_steps_1) :] + assert expected_steps == done_steps, f"Failure with {scheduler_cls.__name__} and {num_steps} and {split}" + + for steps in [7, 20]: + assert_run_mixture(steps, 0.33, EulerDiscreteScheduler) + assert_run_mixture(steps, 0.33, HeunDiscreteScheduler) + + @slow + def test_stable_diffusion_two_xl_mixture_of_denoiser(self): + components = self.get_dummy_components() + pipe_1 = StableDiffusionXLInpaintPipeline(**components).to(torch_device) + pipe_1.unet.set_default_attn_processor() + pipe_2 = StableDiffusionXLInpaintPipeline(**components).to(torch_device) + pipe_2.unet.set_default_attn_processor() + + def assert_run_mixture( + num_steps, split, scheduler_cls_orig, num_train_timesteps=pipe_1.scheduler.config.num_train_timesteps + ): + inputs = self.get_dummy_inputs(torch_device) + inputs["num_inference_steps"] = num_steps + + class scheduler_cls(scheduler_cls_orig): + pass + + pipe_1.scheduler = scheduler_cls.from_config(pipe_1.scheduler.config) + pipe_2.scheduler = scheduler_cls.from_config(pipe_2.scheduler.config) + + # Let's retrieve the number of timesteps we want to use + pipe_1.scheduler.set_timesteps(num_steps) + expected_steps = pipe_1.scheduler.timesteps.tolist() + + split_ts = num_train_timesteps - int(round(num_train_timesteps * split)) + + if pipe_1.scheduler.order == 2: + expected_steps_1 = list(filter(lambda ts: ts >= split_ts, expected_steps)) + expected_steps_2 = expected_steps_1[-1:] + list(filter(lambda ts: ts < split_ts, expected_steps)) + expected_steps = expected_steps_1 + expected_steps_2 + else: + expected_steps_1 = list(filter(lambda ts: ts >= split_ts, expected_steps)) + expected_steps_2 = list(filter(lambda ts: ts < split_ts, expected_steps)) + + # now we monkey patch step `done_steps` + # list into the step function for testing + done_steps = [] + old_step = copy.copy(scheduler_cls.step) + + def new_step(self, *args, **kwargs): + done_steps.append(args[1].cpu().item()) # args[1] is always the passed `t` + return old_step(self, *args, **kwargs) + + scheduler_cls.step = new_step + + inputs_1 = {**inputs, **{"denoising_end": split, "output_type": "latent"}} + latents = pipe_1(**inputs_1).images[0] + + assert expected_steps_1 == done_steps, f"Failure with {scheduler_cls.__name__} and {num_steps} and {split}" + + inputs_2 = {**inputs, **{"denoising_start": split, "image": latents}} + pipe_2(**inputs_2).images[0] + + assert expected_steps_2 == done_steps[len(expected_steps_1) :] + assert expected_steps == done_steps, f"Failure with {scheduler_cls.__name__} and {num_steps} and {split}" + + for steps in [5, 8, 20]: + for split in [0.33, 0.49, 0.71]: + for scheduler_cls in [ + DDIMScheduler, + EulerDiscreteScheduler, + DPMSolverMultistepScheduler, + UniPCMultistepScheduler, + HeunDiscreteScheduler, + ]: + assert_run_mixture(steps, split, scheduler_cls) + + @slow + def test_stable_diffusion_three_xl_mixture_of_denoiser(self): + components = self.get_dummy_components() + pipe_1 = StableDiffusionXLInpaintPipeline(**components).to(torch_device) + pipe_1.unet.set_default_attn_processor() + pipe_2 = StableDiffusionXLInpaintPipeline(**components).to(torch_device) + pipe_2.unet.set_default_attn_processor() + pipe_3 = StableDiffusionXLInpaintPipeline(**components).to(torch_device) + pipe_3.unet.set_default_attn_processor() + + def assert_run_mixture( + num_steps, + split_1, + split_2, + scheduler_cls_orig, + num_train_timesteps=pipe_1.scheduler.config.num_train_timesteps, + ): + inputs = self.get_dummy_inputs(torch_device) + inputs["num_inference_steps"] = num_steps + + class scheduler_cls(scheduler_cls_orig): + pass + + pipe_1.scheduler = scheduler_cls.from_config(pipe_1.scheduler.config) + pipe_2.scheduler = scheduler_cls.from_config(pipe_2.scheduler.config) + pipe_3.scheduler = scheduler_cls.from_config(pipe_3.scheduler.config) + + # Let's retrieve the number of timesteps we want to use + pipe_1.scheduler.set_timesteps(num_steps) + expected_steps = pipe_1.scheduler.timesteps.tolist() + + split_1_ts = num_train_timesteps - int(round(num_train_timesteps * split_1)) + split_2_ts = num_train_timesteps - int(round(num_train_timesteps * split_2)) + + if pipe_1.scheduler.order == 2: + expected_steps_1 = list(filter(lambda ts: ts >= split_1_ts, expected_steps)) + expected_steps_2 = expected_steps_1[-1:] + list( + filter(lambda ts: ts >= split_2_ts and ts < split_1_ts, expected_steps) + ) + expected_steps_3 = expected_steps_2[-1:] + list(filter(lambda ts: ts < split_2_ts, expected_steps)) + expected_steps = expected_steps_1 + expected_steps_2 + expected_steps_3 + else: + expected_steps_1 = list(filter(lambda ts: ts >= split_1_ts, expected_steps)) + expected_steps_2 = list(filter(lambda ts: ts >= split_2_ts and ts < split_1_ts, expected_steps)) + expected_steps_3 = list(filter(lambda ts: ts < split_2_ts, expected_steps)) + + # now we monkey patch step `done_steps` + # list into the step function for testing + done_steps = [] + old_step = copy.copy(scheduler_cls.step) + + def new_step(self, *args, **kwargs): + done_steps.append(args[1].cpu().item()) # args[1] is always the passed `t` + return old_step(self, *args, **kwargs) + + scheduler_cls.step = new_step + + inputs_1 = {**inputs, **{"denoising_end": split_1, "output_type": "latent"}} + latents = pipe_1(**inputs_1).images[0] + + assert expected_steps_1 == done_steps, ( + f"Failure with {scheduler_cls.__name__} and {num_steps} and {split_1} and {split_2}" + ) + + inputs_2 = { + **inputs, + **{"denoising_start": split_1, "denoising_end": split_2, "image": latents, "output_type": "latent"}, + } + pipe_2(**inputs_2).images[0] + + assert expected_steps_2 == done_steps[len(expected_steps_1) :] + + inputs_3 = {**inputs, **{"denoising_start": split_2, "image": latents}} + pipe_3(**inputs_3).images[0] + + assert expected_steps_3 == done_steps[len(expected_steps_1) + len(expected_steps_2) :] + assert expected_steps == done_steps, ( + f"Failure with {scheduler_cls.__name__} and {num_steps} and {split_1} and {split_2}" + ) + + for steps in [7, 11, 20]: + for split_1, split_2 in zip([0.19, 0.32], [0.81, 0.68]): + for scheduler_cls in [ + DDIMScheduler, + EulerDiscreteScheduler, + DPMSolverMultistepScheduler, + UniPCMultistepScheduler, + HeunDiscreteScheduler, + ]: + assert_run_mixture(steps, split_1, split_2, scheduler_cls) + + def test_stable_diffusion_xl_multi_prompts(self): + components = self.get_dummy_components() + sd_pipe = self.pipeline_class(**components).to(torch_device) + + # forward with single prompt + inputs = self.get_dummy_inputs(torch_device) + inputs["num_inference_steps"] = 5 + output = sd_pipe(**inputs) + image_slice_1 = output.images[0, -3:, -3:, -1] + + # forward with same prompt duplicated + inputs = self.get_dummy_inputs(torch_device) + inputs["num_inference_steps"] = 5 + inputs["prompt_2"] = inputs["prompt"] + output = sd_pipe(**inputs) + image_slice_2 = output.images[0, -3:, -3:, -1] + + # ensure the results are equal + assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4 + + # forward with different prompt + inputs = self.get_dummy_inputs(torch_device) + inputs["num_inference_steps"] = 5 + inputs["prompt_2"] = "different prompt" + output = sd_pipe(**inputs) + image_slice_3 = output.images[0, -3:, -3:, -1] + + # ensure the results are not equal + assert np.abs(image_slice_1.flatten() - image_slice_3.flatten()).max() > 1e-4 + + # manually set a negative_prompt + inputs = self.get_dummy_inputs(torch_device) + inputs["num_inference_steps"] = 5 + inputs["negative_prompt"] = "negative prompt" + output = sd_pipe(**inputs) + image_slice_1 = output.images[0, -3:, -3:, -1] + + # forward with same negative_prompt duplicated + inputs = self.get_dummy_inputs(torch_device) + inputs["num_inference_steps"] = 5 + inputs["negative_prompt"] = "negative prompt" + inputs["negative_prompt_2"] = inputs["negative_prompt"] + output = sd_pipe(**inputs) + image_slice_2 = output.images[0, -3:, -3:, -1] + + # ensure the results are equal + assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4 + + # forward with different negative_prompt + inputs = self.get_dummy_inputs(torch_device) + inputs["num_inference_steps"] = 5 + inputs["negative_prompt"] = "negative prompt" + inputs["negative_prompt_2"] = "different negative prompt" + output = sd_pipe(**inputs) + image_slice_3 = output.images[0, -3:, -3:, -1] + + # ensure the results are not equal + assert np.abs(image_slice_1.flatten() - image_slice_3.flatten()).max() > 1e-4 + + def test_stable_diffusion_xl_img2img_negative_conditions(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + sd_pipe = self.pipeline_class(**components) + sd_pipe = sd_pipe.to(device) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + image = sd_pipe(**inputs).images + image_slice_with_no_neg_conditions = image[0, -3:, -3:, -1] + + image = sd_pipe( + **inputs, + negative_original_size=(512, 512), + negative_crops_coords_top_left=( + 0, + 0, + ), + negative_target_size=(1024, 1024), + ).images + image_slice_with_neg_conditions = image[0, -3:, -3:, -1] + + assert ( + np.abs(image_slice_with_no_neg_conditions.flatten() - image_slice_with_neg_conditions.flatten()).max() + > 1e-4 + ) + + def test_stable_diffusion_xl_inpaint_mask_latents(self): + device = "cpu" + components = self.get_dummy_components() + sd_pipe = self.pipeline_class(**components).to(device) + sd_pipe.set_progress_bar_config(disable=None) + + # normal mask + normal image + ## `image`: pil, `mask_image``: pil, `masked_image_latents``: None + inputs = self.get_dummy_inputs(device) + inputs["strength"] = 0.9 + out_0 = sd_pipe(**inputs).images + + # image latents + mask latents + inputs = self.get_dummy_inputs(device) + image = sd_pipe.image_processor.preprocess(inputs["image"]).to(sd_pipe.device) + mask = sd_pipe.mask_processor.preprocess(inputs["mask_image"]).to(sd_pipe.device) + masked_image = image * (mask < 0.5) + + generator = torch.Generator(device=device).manual_seed(0) + image_latents = sd_pipe._encode_vae_image(image, generator=generator) + torch.randn((1, 4, 32, 32), generator=generator) + mask_latents = sd_pipe._encode_vae_image(masked_image, generator=generator) + inputs["image"] = image_latents + inputs["masked_image_latents"] = mask_latents + inputs["mask_image"] = mask + inputs["strength"] = 0.9 + generator = torch.Generator(device=device).manual_seed(0) + torch.randn((1, 4, 32, 32), generator=generator) + inputs["generator"] = generator + out_1 = sd_pipe(**inputs).images + assert np.abs(out_0 - out_1).max() < 1e-2 + + def test_stable_diffusion_xl_inpaint_2_images(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + sd_pipe = self.pipeline_class(**components) + sd_pipe = sd_pipe.to(device) + sd_pipe.set_progress_bar_config(disable=None) + + # test to confirm if we pass two same image, we will get same output + inputs = self.get_dummy_inputs(device) + gen1 = torch.Generator(device=device).manual_seed(0) + gen2 = torch.Generator(device=device).manual_seed(0) + for name in ["prompt", "image", "mask_image"]: + inputs[name] = [inputs[name]] * 2 + inputs["generator"] = [gen1, gen2] + images = sd_pipe(**inputs).images + + assert images.shape == (2, 64, 64, 3) + + image_slice1 = images[0, -3:, -3:, -1] + image_slice2 = images[1, -3:, -3:, -1] + assert np.abs(image_slice1.flatten() - image_slice2.flatten()).max() < 1e-4 + + # test to confirm that if we pass two different images, we will get different output + inputs = self.get_dummy_inputs_2images(device) + images = sd_pipe(**inputs).images + assert images.shape == (2, 64, 64, 3) + + image_slice1 = images[0, -3:, -3:, -1] + image_slice2 = images[1, -3:, -3:, -1] + assert np.abs(image_slice1.flatten() - image_slice2.flatten()).max() > 1e-2 + + def test_pipeline_interrupt(self): + components = self.get_dummy_components() + sd_pipe = StableDiffusionXLInpaintPipeline(**components) + sd_pipe = sd_pipe.to(torch_device) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(torch_device) + + prompt = "hey" + num_inference_steps = 5 + + # store intermediate latents from the generation process + class PipelineState: + def __init__(self): + self.state = [] + + def apply(self, pipe, i, t, callback_kwargs): + self.state.append(callback_kwargs["latents"]) + return callback_kwargs + + pipe_state = PipelineState() + sd_pipe( + prompt, + image=inputs["image"], + mask_image=inputs["mask_image"], + strength=0.8, + num_inference_steps=num_inference_steps, + output_type="np", + generator=torch.Generator("cpu").manual_seed(0), + callback_on_step_end=pipe_state.apply, + ).images + + # interrupt generation at step index + interrupt_step_idx = 1 + + def callback_on_step_end(pipe, i, t, callback_kwargs): + if i == interrupt_step_idx: + pipe._interrupt = True + + return callback_kwargs + + output_interrupted = sd_pipe( + prompt, + image=inputs["image"], + mask_image=inputs["mask_image"], + strength=0.8, + num_inference_steps=num_inference_steps, + output_type="latent", + generator=torch.Generator("cpu").manual_seed(0), + callback_on_step_end=callback_on_step_end, + ).images + + # fetch intermediate latents at the interrupted step + # from the completed generation process + intermediate_latent = pipe_state.state[interrupt_step_idx] + + # compare the intermediate latent to the output of the interrupted process + # they should be the same + assert torch.allclose(intermediate_latent, output_interrupted, atol=1e-4) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl_instruction_pix2pix.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl_instruction_pix2pix.py new file mode 100644 index 0000000000000000000000000000000000000000..20a03583e7a965fae46c9655313e0844edbc4b35 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl_instruction_pix2pix.py @@ -0,0 +1,189 @@ +# coding=utf-8 +# Copyright 2025 Harutatsu Akiyama and HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import random +import unittest + +import numpy as np +import torch +from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer + +from diffusers import ( + AutoencoderKL, + EulerDiscreteScheduler, + UNet2DConditionModel, +) +from diffusers.image_processor import VaeImageProcessor +from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl_instruct_pix2pix import ( + StableDiffusionXLInstructPix2PixPipeline, +) + +from ...testing_utils import enable_full_determinism, floats_tensor, torch_device +from ..pipeline_params import ( + IMAGE_TO_IMAGE_IMAGE_PARAMS, + TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, + TEXT_GUIDED_IMAGE_VARIATION_PARAMS, +) +from ..test_pipelines_common import ( + PipelineKarrasSchedulerTesterMixin, + PipelineLatentTesterMixin, + PipelineTesterMixin, +) + + +enable_full_determinism() + + +class StableDiffusionXLInstructPix2PixPipelineFastTests( + PipelineLatentTesterMixin, + PipelineKarrasSchedulerTesterMixin, + PipelineTesterMixin, + unittest.TestCase, +): + pipeline_class = StableDiffusionXLInstructPix2PixPipeline + params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width", "cross_attention_kwargs"} + batch_params = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS + image_params = IMAGE_TO_IMAGE_IMAGE_PARAMS + image_latents_params = IMAGE_TO_IMAGE_IMAGE_PARAMS + + def get_dummy_components(self): + torch.manual_seed(0) + unet = UNet2DConditionModel( + block_out_channels=(32, 64), + layers_per_block=2, + sample_size=32, + in_channels=8, + out_channels=4, + down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), + up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), + # SD2-specific config below + attention_head_dim=(2, 4), + use_linear_projection=True, + addition_embed_type="text_time", + addition_time_embed_dim=8, + transformer_layers_per_block=(1, 2), + projection_class_embeddings_input_dim=80, # 5 * 8 + 32 + cross_attention_dim=64, + ) + + scheduler = EulerDiscreteScheduler( + beta_start=0.00085, + beta_end=0.012, + steps_offset=1, + beta_schedule="scaled_linear", + timestep_spacing="leading", + ) + torch.manual_seed(0) + vae = AutoencoderKL( + block_out_channels=[32, 64], + in_channels=3, + out_channels=3, + down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], + up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], + latent_channels=4, + sample_size=128, + ) + torch.manual_seed(0) + text_encoder_config = CLIPTextConfig( + bos_token_id=0, + eos_token_id=2, + hidden_size=32, + intermediate_size=37, + layer_norm_eps=1e-05, + num_attention_heads=4, + num_hidden_layers=5, + pad_token_id=1, + vocab_size=1000, + # SD2-specific config below + hidden_act="gelu", + projection_dim=32, + ) + text_encoder = CLIPTextModel(text_encoder_config) + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + + text_encoder_2 = CLIPTextModelWithProjection(text_encoder_config) + tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + + components = { + "unet": unet, + "scheduler": scheduler, + "vae": vae, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + "text_encoder_2": text_encoder_2, + "tokenizer_2": tokenizer_2, + } + return components + + def get_dummy_inputs(self, device, seed=0): + image = floats_tensor((1, 3, 64, 64), rng=random.Random(seed)).to(device) + image = image / 2 + 0.5 + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + inputs = { + "prompt": "A painting of a squirrel eating a burger", + "image": image, + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 6.0, + "image_guidance_scale": 1, + "output_type": "np", + } + return inputs + + def test_components_function(self): + init_components = self.get_dummy_components() + pipe = self.pipeline_class(**init_components) + + self.assertTrue(hasattr(pipe, "components")) + self.assertTrue(set(pipe.components.keys()) == set(init_components.keys())) + + def test_inference_batch_single_identical(self): + super().test_inference_batch_single_identical(expected_max_diff=3e-3) + + def test_attention_slicing_forward_pass(self): + super().test_attention_slicing_forward_pass(expected_max_diff=2e-3) + + # Overwrite the default test_latents_inputs because pix2pix encode the image differently + def test_latents_input(self): + components = self.get_dummy_components() + pipe = StableDiffusionXLInstructPix2PixPipeline(**components) + pipe.image_processor = VaeImageProcessor(do_resize=False, do_normalize=False) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + out = pipe(**self.get_dummy_inputs_by_type(torch_device, input_image_type="pt"))[0] + + vae = components["vae"] + inputs = self.get_dummy_inputs_by_type(torch_device, input_image_type="pt") + + for image_param in self.image_latents_params: + if image_param in inputs.keys(): + inputs[image_param] = vae.encode(inputs[image_param]).latent_dist.mode() + + out_latents_inputs = pipe(**inputs)[0] + + max_diff = np.abs(out - out_latents_inputs).max() + self.assertLess(max_diff, 1e-4, "passing latents as image input generate different result from passing image") + + @unittest.skip("Test not supported at the moment.") + def test_cfg(self): + pass + + @unittest.skip("Functionality is tested elsewhere.") + def test_save_load_optional_components(self): + pass diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/stable_unclip/__init__.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/stable_unclip/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/stable_unclip/test_stable_unclip.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/stable_unclip/test_stable_unclip.py new file mode 100644 index 0000000000000000000000000000000000000000..8923c2f63ceee0c50198c4c1051de0c557150c1d --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/stable_unclip/test_stable_unclip.py @@ -0,0 +1,257 @@ +import gc +import unittest + +import torch +from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer + +from diffusers import ( + AutoencoderKL, + DDIMScheduler, + DDPMScheduler, + PriorTransformer, + StableUnCLIPPipeline, + UNet2DConditionModel, +) +from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer + +from ...testing_utils import ( + backend_empty_cache, + backend_max_memory_allocated, + backend_reset_max_memory_allocated, + backend_reset_peak_memory_stats, + enable_full_determinism, + load_numpy, + nightly, + require_torch_accelerator, + torch_device, +) +from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS +from ..test_pipelines_common import ( + PipelineKarrasSchedulerTesterMixin, + PipelineLatentTesterMixin, + PipelineTesterMixin, + assert_mean_pixel_difference, +) + + +enable_full_determinism() + + +class StableUnCLIPPipelineFastTests( + PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase +): + pipeline_class = StableUnCLIPPipeline + params = TEXT_TO_IMAGE_PARAMS + batch_params = TEXT_TO_IMAGE_BATCH_PARAMS + image_params = TEXT_TO_IMAGE_IMAGE_PARAMS + image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS + + # TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false + test_xformers_attention = False + + def get_dummy_components(self): + embedder_hidden_size = 32 + embedder_projection_dim = embedder_hidden_size + + # prior components + + torch.manual_seed(0) + prior_tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + + torch.manual_seed(0) + prior_text_encoder = CLIPTextModelWithProjection( + CLIPTextConfig( + bos_token_id=0, + eos_token_id=2, + hidden_size=embedder_hidden_size, + projection_dim=embedder_projection_dim, + intermediate_size=37, + layer_norm_eps=1e-05, + num_attention_heads=4, + num_hidden_layers=5, + pad_token_id=1, + vocab_size=1000, + ) + ) + + torch.manual_seed(0) + prior = PriorTransformer( + num_attention_heads=2, + attention_head_dim=12, + embedding_dim=embedder_projection_dim, + num_layers=1, + ) + + torch.manual_seed(0) + prior_scheduler = DDPMScheduler( + variance_type="fixed_small_log", + prediction_type="sample", + num_train_timesteps=1000, + clip_sample=True, + clip_sample_range=5.0, + beta_schedule="squaredcos_cap_v2", + ) + + # regular denoising components + + torch.manual_seed(0) + image_normalizer = StableUnCLIPImageNormalizer(embedding_dim=embedder_hidden_size) + image_noising_scheduler = DDPMScheduler(beta_schedule="squaredcos_cap_v2") + + torch.manual_seed(0) + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + + torch.manual_seed(0) + text_encoder = CLIPTextModel( + CLIPTextConfig( + bos_token_id=0, + eos_token_id=2, + hidden_size=embedder_hidden_size, + projection_dim=32, + intermediate_size=37, + layer_norm_eps=1e-05, + num_attention_heads=4, + num_hidden_layers=5, + pad_token_id=1, + vocab_size=1000, + ) + ) + + torch.manual_seed(0) + unet = UNet2DConditionModel( + sample_size=32, + in_channels=4, + out_channels=4, + down_block_types=("CrossAttnDownBlock2D", "DownBlock2D"), + up_block_types=("UpBlock2D", "CrossAttnUpBlock2D"), + block_out_channels=(32, 64), + attention_head_dim=(2, 4), + class_embed_type="projection", + # The class embeddings are the noise augmented image embeddings. + # I.e. the image embeddings concated with the noised embeddings of the same dimension + projection_class_embeddings_input_dim=embedder_projection_dim * 2, + cross_attention_dim=embedder_hidden_size, + layers_per_block=1, + upcast_attention=True, + use_linear_projection=True, + ) + + torch.manual_seed(0) + scheduler = DDIMScheduler( + beta_schedule="scaled_linear", + beta_start=0.00085, + beta_end=0.012, + prediction_type="v_prediction", + set_alpha_to_one=False, + steps_offset=1, + ) + + torch.manual_seed(0) + vae = AutoencoderKL() + + components = { + # prior components + "prior_tokenizer": prior_tokenizer, + "prior_text_encoder": prior_text_encoder, + "prior": prior, + "prior_scheduler": prior_scheduler, + # image noising components + "image_normalizer": image_normalizer, + "image_noising_scheduler": image_noising_scheduler, + # regular denoising components + "tokenizer": tokenizer, + "text_encoder": text_encoder, + "unet": unet, + "scheduler": scheduler, + "vae": vae, + } + + return components + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + inputs = { + "prompt": "A painting of a squirrel eating a burger", + "generator": generator, + "num_inference_steps": 2, + "prior_num_inference_steps": 2, + "output_type": "np", + } + return inputs + + # Overriding PipelineTesterMixin::test_attention_slicing_forward_pass + # because UnCLIP GPU undeterminism requires a looser check. + def test_attention_slicing_forward_pass(self): + test_max_difference = torch_device == "cpu" + + self._test_attention_slicing_forward_pass(test_max_difference=test_max_difference) + + # Overriding PipelineTesterMixin::test_inference_batch_single_identical + # because UnCLIP undeterminism requires a looser check. + def test_inference_batch_single_identical(self): + self._test_inference_batch_single_identical(expected_max_diff=1e-3) + + @unittest.skip("Test not supported because of the use of `_encode_prior_prompt()`.") + def test_encode_prompt_works_in_isolation(self): + pass + + +@nightly +@require_torch_accelerator +class StableUnCLIPPipelineIntegrationTests(unittest.TestCase): + def setUp(self): + # clean up the VRAM before each test + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + # clean up the VRAM after each test + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def test_stable_unclip(self): + expected_image = load_numpy( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy" + ) + + pipe = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l", torch_dtype=torch.float16) + pipe.set_progress_bar_config(disable=None) + # stable unclip will oom when integration tests are run on a V100, + # so turn on memory savings + pipe.enable_attention_slicing() + pipe.enable_sequential_cpu_offload() + + generator = torch.Generator(device="cpu").manual_seed(0) + output = pipe("anime turtle", generator=generator, output_type="np") + + image = output.images[0] + + assert image.shape == (768, 768, 3) + + assert_mean_pixel_difference(image, expected_image) + + def test_stable_unclip_pipeline_with_sequential_cpu_offloading(self): + backend_empty_cache(torch_device) + backend_reset_max_memory_allocated(torch_device) + backend_reset_peak_memory_stats(torch_device) + + pipe = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l", torch_dtype=torch.float16) + pipe.set_progress_bar_config(disable=None) + pipe.enable_attention_slicing() + pipe.enable_sequential_cpu_offload() + + _ = pipe( + "anime turtle", + prior_num_inference_steps=2, + num_inference_steps=2, + output_type="np", + ) + + mem_bytes = backend_max_memory_allocated(torch_device) + # make sure that less than 7 GB is allocated + assert mem_bytes < 7 * 10**9 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/stable_unclip/test_stable_unclip_img2img.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/stable_unclip/test_stable_unclip_img2img.py new file mode 100644 index 0000000000000000000000000000000000000000..e7a0fbccef67bc7e06f20b8e6a4b8a1579d7c9dd --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/stable_unclip/test_stable_unclip_img2img.py @@ -0,0 +1,313 @@ +import gc +import random +import unittest + +import numpy as np +import torch +from transformers import ( + CLIPImageProcessor, + CLIPTextConfig, + CLIPTextModel, + CLIPTokenizer, + CLIPVisionConfig, + CLIPVisionModelWithProjection, +) + +from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImg2ImgPipeline, UNet2DConditionModel +from diffusers.pipelines.pipeline_utils import DiffusionPipeline +from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer +from diffusers.utils.import_utils import is_xformers_available + +from ...testing_utils import ( + backend_empty_cache, + backend_max_memory_allocated, + backend_reset_max_memory_allocated, + backend_reset_peak_memory_stats, + enable_full_determinism, + floats_tensor, + load_image, + load_numpy, + nightly, + require_torch_accelerator, + skip_mps, + torch_device, +) +from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS +from ..test_pipelines_common import ( + PipelineKarrasSchedulerTesterMixin, + PipelineLatentTesterMixin, + PipelineTesterMixin, + assert_mean_pixel_difference, +) + + +enable_full_determinism() + + +class StableUnCLIPImg2ImgPipelineFastTests( + PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase +): + pipeline_class = StableUnCLIPImg2ImgPipeline + params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS + batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS + image_params = frozenset( + [] + ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess + image_latents_params = frozenset([]) + + supports_dduf = False + + def get_dummy_components(self): + embedder_hidden_size = 32 + embedder_projection_dim = embedder_hidden_size + + # image encoding components + + feature_extractor = CLIPImageProcessor(crop_size=32, size=32) + + torch.manual_seed(0) + image_encoder = CLIPVisionModelWithProjection( + CLIPVisionConfig( + hidden_size=embedder_hidden_size, + projection_dim=embedder_projection_dim, + num_hidden_layers=5, + num_attention_heads=4, + image_size=32, + intermediate_size=37, + patch_size=1, + ) + ) + + # regular denoising components + + torch.manual_seed(0) + image_normalizer = StableUnCLIPImageNormalizer(embedding_dim=embedder_hidden_size) + image_noising_scheduler = DDPMScheduler(beta_schedule="squaredcos_cap_v2") + + torch.manual_seed(0) + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + + torch.manual_seed(0) + text_encoder = CLIPTextModel( + CLIPTextConfig( + bos_token_id=0, + eos_token_id=2, + hidden_size=embedder_hidden_size, + projection_dim=32, + intermediate_size=37, + layer_norm_eps=1e-05, + num_attention_heads=4, + num_hidden_layers=5, + pad_token_id=1, + vocab_size=1000, + ) + ) + + torch.manual_seed(0) + unet = UNet2DConditionModel( + sample_size=32, + in_channels=4, + out_channels=4, + down_block_types=("CrossAttnDownBlock2D", "DownBlock2D"), + up_block_types=("UpBlock2D", "CrossAttnUpBlock2D"), + block_out_channels=(32, 64), + attention_head_dim=(2, 4), + class_embed_type="projection", + # The class embeddings are the noise augmented image embeddings. + # I.e. the image embeddings concated with the noised embeddings of the same dimension + projection_class_embeddings_input_dim=embedder_projection_dim * 2, + cross_attention_dim=embedder_hidden_size, + layers_per_block=1, + upcast_attention=True, + use_linear_projection=True, + ) + + torch.manual_seed(0) + scheduler = DDIMScheduler( + beta_schedule="scaled_linear", + beta_start=0.00085, + beta_end=0.012, + prediction_type="v_prediction", + set_alpha_to_one=False, + steps_offset=1, + ) + + torch.manual_seed(0) + vae = AutoencoderKL() + + components = { + # image encoding components + "feature_extractor": feature_extractor, + "image_encoder": image_encoder.eval(), + # image noising components + "image_normalizer": image_normalizer.eval(), + "image_noising_scheduler": image_noising_scheduler, + # regular denoising components + "tokenizer": tokenizer, + "text_encoder": text_encoder.eval(), + "unet": unet.eval(), + "scheduler": scheduler, + "vae": vae.eval(), + } + + return components + + def get_dummy_inputs(self, device, seed=0, pil_image=True): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + + input_image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) + + if pil_image: + input_image = input_image * 0.5 + 0.5 + input_image = input_image.clamp(0, 1) + input_image = input_image.cpu().permute(0, 2, 3, 1).float().numpy() + input_image = DiffusionPipeline.numpy_to_pil(input_image)[0] + + return { + "prompt": "An anime racoon running a marathon", + "image": input_image, + "generator": generator, + "num_inference_steps": 2, + "output_type": "np", + } + + @skip_mps + def test_image_embeds_none(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + sd_pipe = StableUnCLIPImg2ImgPipeline(**components) + sd_pipe = sd_pipe.to(device) + sd_pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + inputs.update({"image_embeds": None}) + image = sd_pipe(**inputs).images + image_slice = image[0, -3:, -3:, -1] + + assert image.shape == (1, 32, 32, 3) + expected_slice = np.array([0.4397, 0.7080, 0.5590, 0.4255, 0.7181, 0.5938, 0.4051, 0.3720, 0.5116]) + + assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 + + # Overriding PipelineTesterMixin::test_attention_slicing_forward_pass + # because GPU undeterminism requires a looser check. + def test_attention_slicing_forward_pass(self): + test_max_difference = torch_device in ["cpu", "mps"] + + self._test_attention_slicing_forward_pass(test_max_difference=test_max_difference) + + # Overriding PipelineTesterMixin::test_inference_batch_single_identical + # because undeterminism requires a looser check. + def test_inference_batch_single_identical(self): + self._test_inference_batch_single_identical(expected_max_diff=1e-3) + + @unittest.skipIf( + torch_device != "cuda" or not is_xformers_available(), + reason="XFormers attention is only available with CUDA and `xformers` installed", + ) + def test_xformers_attention_forwardGenerator_pass(self): + self._test_xformers_attention_forwardGenerator_pass(test_max_difference=False) + + @unittest.skip("Test not supported at the moment.") + def test_encode_prompt_works_in_isolation(self): + pass + + +@nightly +@require_torch_accelerator +class StableUnCLIPImg2ImgPipelineIntegrationTests(unittest.TestCase): + def setUp(self): + # clean up the VRAM before each test + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + # clean up the VRAM after each test + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def test_stable_unclip_l_img2img(self): + input_image = load_image( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" + ) + + expected_image = load_numpy( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy" + ) + + pipe = StableUnCLIPImg2ImgPipeline.from_pretrained( + "fusing/stable-unclip-2-1-l-img2img", torch_dtype=torch.float16 + ) + pipe.set_progress_bar_config(disable=None) + # stable unclip will oom when integration tests are run on a V100, + # so turn on memory savings + pipe.enable_attention_slicing() + pipe.enable_sequential_cpu_offload() + + generator = torch.Generator(device="cpu").manual_seed(0) + output = pipe(input_image, "anime turtle", generator=generator, output_type="np") + + image = output.images[0] + + assert image.shape == (768, 768, 3) + + assert_mean_pixel_difference(image, expected_image) + + def test_stable_unclip_h_img2img(self): + input_image = load_image( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" + ) + + expected_image = load_numpy( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy" + ) + + pipe = StableUnCLIPImg2ImgPipeline.from_pretrained( + "fusing/stable-unclip-2-1-h-img2img", torch_dtype=torch.float16 + ) + pipe.set_progress_bar_config(disable=None) + # stable unclip will oom when integration tests are run on a V100, + # so turn on memory savings + pipe.enable_attention_slicing() + pipe.enable_sequential_cpu_offload() + + generator = torch.Generator(device="cpu").manual_seed(0) + output = pipe(input_image, "anime turtle", generator=generator, output_type="np") + + image = output.images[0] + + assert image.shape == (768, 768, 3) + + assert_mean_pixel_difference(image, expected_image) + + def test_stable_unclip_img2img_pipeline_with_sequential_cpu_offloading(self): + input_image = load_image( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" + ) + + backend_empty_cache(torch_device) + backend_reset_max_memory_allocated(torch_device) + backend_reset_peak_memory_stats(torch_device) + + pipe = StableUnCLIPImg2ImgPipeline.from_pretrained( + "fusing/stable-unclip-2-1-h-img2img", torch_dtype=torch.float16 + ) + pipe.set_progress_bar_config(disable=None) + pipe.enable_attention_slicing() + pipe.enable_sequential_cpu_offload() + + _ = pipe( + input_image, + "anime turtle", + num_inference_steps=2, + output_type="np", + ) + + mem_bytes = backend_max_memory_allocated(torch_device) + # make sure that less than 7 GB is allocated + assert mem_bytes < 7 * 10**9 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/stable_video_diffusion/__init__.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/stable_video_diffusion/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/stable_video_diffusion/test_stable_video_diffusion.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/stable_video_diffusion/test_stable_video_diffusion.py new file mode 100644 index 0000000000000000000000000000000000000000..52595f7a8cd931b98a5cfa44ba7516eef8618b9e --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/stable_video_diffusion/test_stable_video_diffusion.py @@ -0,0 +1,561 @@ +import gc +import random +import tempfile +import unittest + +import numpy as np +import torch +from transformers import ( + CLIPImageProcessor, + CLIPVisionConfig, + CLIPVisionModelWithProjection, +) + +import diffusers +from diffusers import ( + AutoencoderKLTemporalDecoder, + EulerDiscreteScheduler, + StableVideoDiffusionPipeline, + UNetSpatioTemporalConditionModel, +) +from diffusers.utils import load_image, logging +from diffusers.utils.import_utils import is_xformers_available + +from ...testing_utils import ( + CaptureLogger, + backend_empty_cache, + enable_full_determinism, + floats_tensor, + numpy_cosine_similarity_distance, + require_accelerate_version_greater, + require_accelerator, + require_torch_accelerator, + slow, + torch_device, +) +from ..test_pipelines_common import PipelineTesterMixin + + +enable_full_determinism() + + +def to_np(tensor): + if isinstance(tensor, torch.Tensor): + tensor = tensor.detach().cpu().numpy() + + return tensor + + +class StableVideoDiffusionPipelineFastTests(PipelineTesterMixin, unittest.TestCase): + pipeline_class = StableVideoDiffusionPipeline + params = frozenset(["image"]) + batch_params = frozenset(["image", "generator"]) + required_optional_params = frozenset( + [ + "num_inference_steps", + "generator", + "latents", + "return_dict", + ] + ) + + supports_dduf = False + + def get_dummy_components(self): + torch.manual_seed(0) + unet = UNetSpatioTemporalConditionModel( + block_out_channels=(32, 64), + layers_per_block=2, + sample_size=32, + in_channels=8, + out_channels=4, + down_block_types=( + "CrossAttnDownBlockSpatioTemporal", + "DownBlockSpatioTemporal", + ), + up_block_types=("UpBlockSpatioTemporal", "CrossAttnUpBlockSpatioTemporal"), + cross_attention_dim=32, + num_attention_heads=8, + projection_class_embeddings_input_dim=96, + addition_time_embed_dim=32, + ) + scheduler = EulerDiscreteScheduler( + beta_start=0.00085, + beta_end=0.012, + beta_schedule="scaled_linear", + interpolation_type="linear", + num_train_timesteps=1000, + prediction_type="v_prediction", + sigma_max=700.0, + sigma_min=0.002, + steps_offset=1, + timestep_spacing="leading", + timestep_type="continuous", + trained_betas=None, + use_karras_sigmas=True, + ) + + torch.manual_seed(0) + vae = AutoencoderKLTemporalDecoder( + block_out_channels=[32, 64], + in_channels=3, + out_channels=3, + down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], + latent_channels=4, + ) + + torch.manual_seed(0) + config = CLIPVisionConfig( + hidden_size=32, + projection_dim=32, + num_hidden_layers=5, + num_attention_heads=4, + image_size=32, + intermediate_size=37, + patch_size=1, + ) + image_encoder = CLIPVisionModelWithProjection(config) + + torch.manual_seed(0) + feature_extractor = CLIPImageProcessor(crop_size=32, size=32) + components = { + "unet": unet, + "image_encoder": image_encoder, + "scheduler": scheduler, + "vae": vae, + "feature_extractor": feature_extractor, + } + return components + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device="cpu").manual_seed(seed) + + image = floats_tensor((1, 3, 32, 32), rng=random.Random(0)).to(device) + inputs = { + "generator": generator, + "image": image, + "num_inference_steps": 2, + "output_type": "pt", + "min_guidance_scale": 1.0, + "max_guidance_scale": 2.5, + "num_frames": 2, + "height": 32, + "width": 32, + } + return inputs + + @unittest.skip("Deprecated functionality") + def test_attention_slicing_forward_pass(self): + pass + + @unittest.skip("Batched inference works and outputs look correct, but the test is failing") + def test_inference_batch_single_identical( + self, + batch_size=2, + expected_max_diff=1e-4, + ): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + for components in pipe.components.values(): + if hasattr(components, "set_default_attn_processor"): + components.set_default_attn_processor() + pipe.to(torch_device) + + pipe.set_progress_bar_config(disable=None) + inputs = self.get_dummy_inputs(torch_device) + + # Reset generator in case it is has been used in self.get_dummy_inputs + inputs["generator"] = torch.Generator("cpu").manual_seed(0) + + logger = logging.get_logger(pipe.__module__) + logger.setLevel(level=diffusers.logging.FATAL) + + # batchify inputs + batched_inputs = {} + batched_inputs.update(inputs) + + batched_inputs["generator"] = [torch.Generator("cpu").manual_seed(0) for i in range(batch_size)] + batched_inputs["image"] = torch.cat([inputs["image"]] * batch_size, dim=0) + + output = pipe(**inputs).frames + output_batch = pipe(**batched_inputs).frames + + assert len(output_batch) == batch_size + + max_diff = np.abs(to_np(output_batch[0]) - to_np(output[0])).max() + assert max_diff < expected_max_diff + + @unittest.skip("Test is similar to test_inference_batch_single_identical") + def test_inference_batch_consistent(self): + pass + + def test_np_output_type(self): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + for component in pipe.components.values(): + if hasattr(component, "set_default_attn_processor"): + component.set_default_attn_processor() + + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + generator_device = "cpu" + inputs = self.get_dummy_inputs(generator_device) + inputs["output_type"] = "np" + output = pipe(**inputs).frames + self.assertTrue(isinstance(output, np.ndarray)) + self.assertEqual(len(output.shape), 5) + + def test_dict_tuple_outputs_equivalent(self, expected_max_difference=1e-4): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + for component in pipe.components.values(): + if hasattr(component, "set_default_attn_processor"): + component.set_default_attn_processor() + + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + generator_device = "cpu" + output = pipe(**self.get_dummy_inputs(generator_device)).frames[0] + output_tuple = pipe(**self.get_dummy_inputs(generator_device), return_dict=False)[0] + + max_diff = np.abs(to_np(output) - to_np(output_tuple)).max() + self.assertLess(max_diff, expected_max_difference) + + @unittest.skip("Test is currently failing") + def test_float16_inference(self, expected_max_diff=5e-2): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + for component in pipe.components.values(): + if hasattr(component, "set_default_attn_processor"): + component.set_default_attn_processor() + + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + components = self.get_dummy_components() + pipe_fp16 = self.pipeline_class(**components) + for component in pipe_fp16.components.values(): + if hasattr(component, "set_default_attn_processor"): + component.set_default_attn_processor() + + pipe_fp16.to(torch_device, torch.float16) + pipe_fp16.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(torch_device) + output = pipe(**inputs).frames[0] + + fp16_inputs = self.get_dummy_inputs(torch_device) + output_fp16 = pipe_fp16(**fp16_inputs).frames[0] + + max_diff = np.abs(to_np(output) - to_np(output_fp16)).max() + self.assertLess(max_diff, expected_max_diff, "The outputs of the fp16 and fp32 pipelines are too different.") + + @unittest.skipIf(torch_device not in ["cuda", "xpu"], reason="float16 requires CUDA or XPU") + @require_accelerator + def test_save_load_float16(self, expected_max_diff=1e-2): + components = self.get_dummy_components() + for name, module in components.items(): + if hasattr(module, "half"): + components[name] = module.to(torch_device).half() + + pipe = self.pipeline_class(**components) + for component in pipe.components.values(): + if hasattr(component, "set_default_attn_processor"): + component.set_default_attn_processor() + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(torch_device) + output = pipe(**inputs).frames[0] + + with tempfile.TemporaryDirectory() as tmpdir: + pipe.save_pretrained(tmpdir) + pipe_loaded = self.pipeline_class.from_pretrained(tmpdir, torch_dtype=torch.float16) + for component in pipe_loaded.components.values(): + if hasattr(component, "set_default_attn_processor"): + component.set_default_attn_processor() + pipe_loaded.to(torch_device) + pipe_loaded.set_progress_bar_config(disable=None) + + for name, component in pipe_loaded.components.items(): + if hasattr(component, "dtype"): + self.assertTrue( + component.dtype == torch.float16, + f"`{name}.dtype` switched from `float16` to {component.dtype} after loading.", + ) + + inputs = self.get_dummy_inputs(torch_device) + output_loaded = pipe_loaded(**inputs).frames[0] + max_diff = np.abs(to_np(output) - to_np(output_loaded)).max() + self.assertLess( + max_diff, expected_max_diff, "The output of the fp16 pipeline changed after saving and loading." + ) + + def test_save_load_optional_components(self, expected_max_difference=1e-4): + if not hasattr(self.pipeline_class, "_optional_components"): + return + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + for component in pipe.components.values(): + if hasattr(component, "set_default_attn_processor"): + component.set_default_attn_processor() + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + # set all optional components to None + for optional_component in pipe._optional_components: + setattr(pipe, optional_component, None) + + generator_device = "cpu" + inputs = self.get_dummy_inputs(generator_device) + output = pipe(**inputs).frames[0] + + with tempfile.TemporaryDirectory() as tmpdir: + pipe.save_pretrained(tmpdir, safe_serialization=False) + pipe_loaded = self.pipeline_class.from_pretrained(tmpdir) + for component in pipe_loaded.components.values(): + if hasattr(component, "set_default_attn_processor"): + component.set_default_attn_processor() + pipe_loaded.to(torch_device) + pipe_loaded.set_progress_bar_config(disable=None) + + for optional_component in pipe._optional_components: + self.assertTrue( + getattr(pipe_loaded, optional_component) is None, + f"`{optional_component}` did not stay set to None after loading.", + ) + + inputs = self.get_dummy_inputs(generator_device) + output_loaded = pipe_loaded(**inputs).frames[0] + + max_diff = np.abs(to_np(output) - to_np(output_loaded)).max() + self.assertLess(max_diff, expected_max_difference) + + def test_save_load_local(self, expected_max_difference=9e-4): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + for component in pipe.components.values(): + if hasattr(component, "set_default_attn_processor"): + component.set_default_attn_processor() + + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(torch_device) + output = pipe(**inputs).frames[0] + + logger = logging.get_logger("diffusers.pipelines.pipeline_utils") + logger.setLevel(diffusers.logging.INFO) + + with tempfile.TemporaryDirectory() as tmpdir: + pipe.save_pretrained(tmpdir, safe_serialization=False) + + with CaptureLogger(logger) as cap_logger: + pipe_loaded = self.pipeline_class.from_pretrained(tmpdir) + + for name in pipe_loaded.components.keys(): + if name not in pipe_loaded._optional_components: + assert name in str(cap_logger) + + pipe_loaded.to(torch_device) + pipe_loaded.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(torch_device) + output_loaded = pipe_loaded(**inputs).frames[0] + + max_diff = np.abs(to_np(output) - to_np(output_loaded)).max() + self.assertLess(max_diff, expected_max_difference) + + @require_accelerator + def test_to_device(self): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.set_progress_bar_config(disable=None) + + pipe.to("cpu") + model_devices = [ + component.device.type for component in pipe.components.values() if hasattr(component, "device") + ] + self.assertTrue(all(device == "cpu" for device in model_devices)) + + output_cpu = pipe(**self.get_dummy_inputs("cpu")).frames[0] + self.assertTrue(np.isnan(output_cpu).sum() == 0) + + pipe.to(torch_device) + model_devices = [ + component.device.type for component in pipe.components.values() if hasattr(component, "device") + ] + self.assertTrue(all(device == torch_device for device in model_devices)) + + output_device = pipe(**self.get_dummy_inputs(torch_device)).frames[0] + self.assertTrue(np.isnan(to_np(output_device)).sum() == 0) + + def test_to_dtype(self): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.set_progress_bar_config(disable=None) + + model_dtypes = [component.dtype for component in pipe.components.values() if hasattr(component, "dtype")] + self.assertTrue(all(dtype == torch.float32 for dtype in model_dtypes)) + + pipe.to(dtype=torch.float16) + model_dtypes = [component.dtype for component in pipe.components.values() if hasattr(component, "dtype")] + self.assertTrue(all(dtype == torch.float16 for dtype in model_dtypes)) + + @require_accelerator + @require_accelerate_version_greater("0.14.0") + def test_sequential_cpu_offload_forward_pass(self, expected_max_diff=1e-4): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + for component in pipe.components.values(): + if hasattr(component, "set_default_attn_processor"): + component.set_default_attn_processor() + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + generator_device = "cpu" + inputs = self.get_dummy_inputs(generator_device) + output_without_offload = pipe(**inputs).frames[0] + + pipe.enable_sequential_cpu_offload(device=torch_device) + + inputs = self.get_dummy_inputs(generator_device) + output_with_offload = pipe(**inputs).frames[0] + + max_diff = np.abs(to_np(output_with_offload) - to_np(output_without_offload)).max() + self.assertLess(max_diff, expected_max_diff, "CPU offloading should not affect the inference results") + + @require_accelerator + @require_accelerate_version_greater("0.17.0") + def test_model_cpu_offload_forward_pass(self, expected_max_diff=2e-4): + generator_device = "cpu" + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + + for component in pipe.components.values(): + if hasattr(component, "set_default_attn_processor"): + component.set_default_attn_processor() + + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(generator_device) + output_without_offload = pipe(**inputs).frames[0] + + pipe.enable_model_cpu_offload(device=torch_device) + inputs = self.get_dummy_inputs(generator_device) + output_with_offload = pipe(**inputs).frames[0] + + max_diff = np.abs(to_np(output_with_offload) - to_np(output_without_offload)).max() + self.assertLess(max_diff, expected_max_diff, "CPU offloading should not affect the inference results") + offloaded_modules = [ + v + for k, v in pipe.components.items() + if isinstance(v, torch.nn.Module) and k not in pipe._exclude_from_cpu_offload + ] + ( + self.assertTrue(all(v.device.type == "cpu" for v in offloaded_modules)), + f"Not offloaded: {[v for v in offloaded_modules if v.device.type != 'cpu']}", + ) + + @unittest.skipIf( + torch_device != "cuda" or not is_xformers_available(), + reason="XFormers attention is only available with CUDA and `xformers` installed", + ) + def test_xformers_attention_forwardGenerator_pass(self): + expected_max_diff = 9e-4 + + if not self.test_xformers_attention: + return + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + for component in pipe.components.values(): + if hasattr(component, "set_default_attn_processor"): + component.set_default_attn_processor() + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(torch_device) + output_without_offload = pipe(**inputs).frames[0] + output_without_offload = ( + output_without_offload.cpu() if torch.is_tensor(output_without_offload) else output_without_offload + ) + + pipe.enable_xformers_memory_efficient_attention() + inputs = self.get_dummy_inputs(torch_device) + output_with_offload = pipe(**inputs).frames[0] + output_with_offload = ( + output_with_offload.cpu() if torch.is_tensor(output_with_offload) else output_without_offload + ) + + max_diff = np.abs(to_np(output_with_offload) - to_np(output_without_offload)).max() + self.assertLess(max_diff, expected_max_diff, "XFormers attention should not affect the inference results") + + def test_disable_cfg(self): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + for component in pipe.components.values(): + if hasattr(component, "set_default_attn_processor"): + component.set_default_attn_processor() + + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + generator_device = "cpu" + inputs = self.get_dummy_inputs(generator_device) + inputs["max_guidance_scale"] = 1.0 + output = pipe(**inputs).frames + self.assertEqual(len(output.shape), 5) + + +@slow +@require_torch_accelerator +class StableVideoDiffusionPipelineSlowTests(unittest.TestCase): + def setUp(self): + # clean up the VRAM before each test + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + # clean up the VRAM after each test + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def test_sd_video(self): + pipe = StableVideoDiffusionPipeline.from_pretrained( + "stabilityai/stable-video-diffusion-img2vid", + variant="fp16", + torch_dtype=torch.float16, + ) + pipe.enable_model_cpu_offload(device=torch_device) + pipe.set_progress_bar_config(disable=None) + image = load_image( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/pix2pix/cat_6.png?download=true" + ) + + generator = torch.Generator("cpu").manual_seed(0) + num_frames = 3 + + output = pipe( + image=image, + num_frames=num_frames, + generator=generator, + num_inference_steps=3, + output_type="np", + ) + + image = output.frames[0] + assert image.shape == (num_frames, 576, 1024, 3) + + image_slice = image[0, -3:, -3:, -1] + expected_slice = np.array([0.8592, 0.8645, 0.8499, 0.8722, 0.8769, 0.8421, 0.8557, 0.8528, 0.8285]) + assert numpy_cosine_similarity_distance(image_slice.flatten(), expected_slice.flatten()) < 1e-3 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/test_pipeline_utils.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/test_pipeline_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..6d9e681979762239bb59fce967bb5bb5462a8d79 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/test_pipeline_utils.py @@ -0,0 +1,952 @@ +import contextlib +import io +import re +import unittest + +import torch +from PIL import Image +from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer + +from diffusers import ( + AnimateDiffPipeline, + AnimateDiffVideoToVideoPipeline, + AutoencoderKL, + DDIMScheduler, + MotionAdapter, + StableDiffusionImg2ImgPipeline, + StableDiffusionInpaintPipeline, + StableDiffusionPipeline, + UNet2DConditionModel, +) +from diffusers.pipelines.pipeline_loading_utils import is_safetensors_compatible, variant_compatible_siblings + +from ..testing_utils import require_torch_accelerator, torch_device + + +class IsSafetensorsCompatibleTests(unittest.TestCase): + def test_all_is_compatible(self): + filenames = [ + "safety_checker/pytorch_model.bin", + "safety_checker/model.safetensors", + "vae/diffusion_pytorch_model.bin", + "vae/diffusion_pytorch_model.safetensors", + "text_encoder/pytorch_model.bin", + "text_encoder/model.safetensors", + "unet/diffusion_pytorch_model.bin", + "unet/diffusion_pytorch_model.safetensors", + ] + self.assertTrue(is_safetensors_compatible(filenames)) + + def test_diffusers_model_is_compatible(self): + filenames = [ + "unet/diffusion_pytorch_model.bin", + "unet/diffusion_pytorch_model.safetensors", + ] + self.assertTrue(is_safetensors_compatible(filenames)) + + def test_diffusers_model_is_not_compatible(self): + filenames = [ + "safety_checker/pytorch_model.bin", + "safety_checker/model.safetensors", + "vae/diffusion_pytorch_model.bin", + "vae/diffusion_pytorch_model.safetensors", + "text_encoder/pytorch_model.bin", + "text_encoder/model.safetensors", + "unet/diffusion_pytorch_model.bin", + # Removed: 'unet/diffusion_pytorch_model.safetensors', + ] + self.assertFalse(is_safetensors_compatible(filenames)) + + def test_transformer_model_is_compatible(self): + filenames = [ + "text_encoder/pytorch_model.bin", + "text_encoder/model.safetensors", + ] + self.assertTrue(is_safetensors_compatible(filenames)) + + def test_transformer_model_is_not_compatible(self): + filenames = [ + "safety_checker/pytorch_model.bin", + "safety_checker/model.safetensors", + "vae/diffusion_pytorch_model.bin", + "vae/diffusion_pytorch_model.safetensors", + "text_encoder/pytorch_model.bin", + # Removed: 'text_encoder/model.safetensors', + "unet/diffusion_pytorch_model.bin", + "unet/diffusion_pytorch_model.safetensors", + ] + self.assertFalse(is_safetensors_compatible(filenames)) + + def test_all_is_compatible_variant(self): + filenames = [ + "safety_checker/pytorch_model.fp16.bin", + "safety_checker/model.fp16.safetensors", + "vae/diffusion_pytorch_model.fp16.bin", + "vae/diffusion_pytorch_model.fp16.safetensors", + "text_encoder/pytorch_model.fp16.bin", + "text_encoder/model.fp16.safetensors", + "unet/diffusion_pytorch_model.fp16.bin", + "unet/diffusion_pytorch_model.fp16.safetensors", + ] + self.assertFalse(is_safetensors_compatible(filenames)) + self.assertTrue(is_safetensors_compatible(filenames, variant="fp16")) + + def test_diffusers_model_is_compatible_variant(self): + filenames = [ + "unet/diffusion_pytorch_model.fp16.bin", + "unet/diffusion_pytorch_model.fp16.safetensors", + ] + self.assertFalse(is_safetensors_compatible(filenames)) + self.assertTrue(is_safetensors_compatible(filenames, variant="fp16")) + + def test_diffusers_model_is_compatible_variant_mixed(self): + filenames = [ + "unet/diffusion_pytorch_model.bin", + "unet/diffusion_pytorch_model.fp16.safetensors", + ] + self.assertFalse(is_safetensors_compatible(filenames)) + self.assertTrue(is_safetensors_compatible(filenames, variant="fp16")) + + def test_diffusers_model_is_not_compatible_variant(self): + filenames = [ + "safety_checker/pytorch_model.fp16.bin", + "safety_checker/model.fp16.safetensors", + "vae/diffusion_pytorch_model.fp16.bin", + "vae/diffusion_pytorch_model.fp16.safetensors", + "text_encoder/pytorch_model.fp16.bin", + "text_encoder/model.fp16.safetensors", + "unet/diffusion_pytorch_model.fp16.bin", + # Removed: 'unet/diffusion_pytorch_model.fp16.safetensors', + ] + self.assertFalse(is_safetensors_compatible(filenames)) + + def test_transformer_model_is_compatible_variant(self): + filenames = [ + "text_encoder/pytorch_model.fp16.bin", + "text_encoder/model.fp16.safetensors", + ] + self.assertFalse(is_safetensors_compatible(filenames)) + self.assertTrue(is_safetensors_compatible(filenames, variant="fp16")) + + def test_transformer_model_is_not_compatible_variant(self): + filenames = [ + "safety_checker/pytorch_model.fp16.bin", + "safety_checker/model.fp16.safetensors", + "vae/diffusion_pytorch_model.fp16.bin", + "vae/diffusion_pytorch_model.fp16.safetensors", + "text_encoder/pytorch_model.fp16.bin", + "unet/diffusion_pytorch_model.fp16.bin", + "unet/diffusion_pytorch_model.fp16.safetensors", + ] + self.assertFalse(is_safetensors_compatible(filenames)) + + def test_transformer_model_is_compatible_variant_extra_folder(self): + filenames = [ + "safety_checker/pytorch_model.fp16.bin", + "safety_checker/model.fp16.safetensors", + "vae/diffusion_pytorch_model.fp16.bin", + "vae/diffusion_pytorch_model.fp16.safetensors", + "text_encoder/pytorch_model.fp16.bin", + "unet/diffusion_pytorch_model.fp16.bin", + "unet/diffusion_pytorch_model.fp16.safetensors", + ] + self.assertFalse(is_safetensors_compatible(filenames, folder_names={"vae", "unet"})) + self.assertTrue(is_safetensors_compatible(filenames, folder_names={"vae", "unet"}, variant="fp16")) + + def test_transformer_model_is_not_compatible_variant_extra_folder(self): + filenames = [ + "safety_checker/pytorch_model.fp16.bin", + "safety_checker/model.fp16.safetensors", + "vae/diffusion_pytorch_model.fp16.bin", + "vae/diffusion_pytorch_model.fp16.safetensors", + "text_encoder/pytorch_model.fp16.bin", + "unet/diffusion_pytorch_model.fp16.bin", + "unet/diffusion_pytorch_model.fp16.safetensors", + ] + self.assertFalse(is_safetensors_compatible(filenames, folder_names={"text_encoder"})) + + def test_transformers_is_compatible_sharded(self): + filenames = [ + "text_encoder/pytorch_model.bin", + "text_encoder/model-00001-of-00002.safetensors", + "text_encoder/model-00002-of-00002.safetensors", + ] + self.assertTrue(is_safetensors_compatible(filenames)) + + def test_transformers_is_compatible_variant_sharded(self): + filenames = [ + "text_encoder/pytorch_model.bin", + "text_encoder/model.fp16-00001-of-00002.safetensors", + "text_encoder/model.fp16-00001-of-00002.safetensors", + ] + self.assertFalse(is_safetensors_compatible(filenames)) + self.assertTrue(is_safetensors_compatible(filenames, variant="fp16")) + + def test_diffusers_is_compatible_sharded(self): + filenames = [ + "unet/diffusion_pytorch_model.bin", + "unet/diffusion_pytorch_model-00001-of-00002.safetensors", + "unet/diffusion_pytorch_model-00002-of-00002.safetensors", + ] + self.assertTrue(is_safetensors_compatible(filenames)) + + def test_diffusers_is_compatible_variant_sharded(self): + filenames = [ + "unet/diffusion_pytorch_model.bin", + "unet/diffusion_pytorch_model.fp16-00001-of-00002.safetensors", + "unet/diffusion_pytorch_model.fp16-00001-of-00002.safetensors", + ] + self.assertFalse(is_safetensors_compatible(filenames)) + self.assertTrue(is_safetensors_compatible(filenames, variant="fp16")) + + def test_diffusers_is_compatible_only_variants(self): + filenames = [ + "unet/diffusion_pytorch_model.fp16.safetensors", + ] + self.assertFalse(is_safetensors_compatible(filenames)) + self.assertTrue(is_safetensors_compatible(filenames, variant="fp16")) + + def test_diffusers_is_compatible_no_components(self): + filenames = [ + "diffusion_pytorch_model.bin", + ] + self.assertFalse(is_safetensors_compatible(filenames)) + + def test_diffusers_is_compatible_no_components_only_variants(self): + filenames = [ + "diffusion_pytorch_model.fp16.bin", + ] + self.assertFalse(is_safetensors_compatible(filenames)) + + def test_is_compatible_mixed_variants(self): + filenames = [ + "unet/diffusion_pytorch_model.fp16.safetensors", + "vae/diffusion_pytorch_model.safetensors", + ] + self.assertTrue(is_safetensors_compatible(filenames, variant="fp16")) + + def test_is_compatible_variant_and_non_safetensors(self): + filenames = [ + "unet/diffusion_pytorch_model.fp16.safetensors", + "vae/diffusion_pytorch_model.bin", + ] + self.assertFalse(is_safetensors_compatible(filenames, variant="fp16")) + + +class VariantCompatibleSiblingsTest(unittest.TestCase): + def test_only_non_variants_downloaded(self): + ignore_patterns = ["*.bin"] + variant = "fp16" + filenames = [ + f"vae/diffusion_pytorch_model.{variant}.safetensors", + "vae/diffusion_pytorch_model.safetensors", + f"text_encoder/model.{variant}.safetensors", + "text_encoder/model.safetensors", + f"unet/diffusion_pytorch_model.{variant}.safetensors", + "unet/diffusion_pytorch_model.safetensors", + ] + + model_filenames, variant_filenames = variant_compatible_siblings( + filenames, variant=None, ignore_patterns=ignore_patterns + ) + assert all(variant not in f for f in model_filenames) + + def test_only_variants_downloaded(self): + ignore_patterns = ["*.bin"] + variant = "fp16" + filenames = [ + f"vae/diffusion_pytorch_model.{variant}.safetensors", + "vae/diffusion_pytorch_model.safetensors", + f"text_encoder/model.{variant}.safetensors", + "text_encoder/model.safetensors", + f"unet/diffusion_pytorch_model.{variant}.safetensors", + "unet/diffusion_pytorch_model.safetensors", + ] + + model_filenames, variant_filenames = variant_compatible_siblings( + filenames, variant=variant, ignore_patterns=ignore_patterns + ) + assert all(variant in f for f in model_filenames) + + def test_mixed_variants_downloaded(self): + ignore_patterns = ["*.bin"] + variant = "fp16" + non_variant_file = "text_encoder/model.safetensors" + filenames = [ + f"vae/diffusion_pytorch_model.{variant}.safetensors", + "vae/diffusion_pytorch_model.safetensors", + "text_encoder/model.safetensors", + f"unet/diffusion_pytorch_model.{variant}.safetensors", + "unet/diffusion_pytorch_model.safetensors", + ] + model_filenames, variant_filenames = variant_compatible_siblings( + filenames, variant=variant, ignore_patterns=ignore_patterns + ) + assert all(variant in f if f != non_variant_file else variant not in f for f in model_filenames) + + def test_non_variants_in_main_dir_downloaded(self): + ignore_patterns = ["*.bin"] + variant = "fp16" + filenames = [ + f"diffusion_pytorch_model.{variant}.safetensors", + "diffusion_pytorch_model.safetensors", + "model.safetensors", + f"model.{variant}.safetensors", + ] + model_filenames, variant_filenames = variant_compatible_siblings( + filenames, variant=None, ignore_patterns=ignore_patterns + ) + assert all(variant not in f for f in model_filenames) + + def test_variants_in_main_dir_downloaded(self): + ignore_patterns = ["*.bin"] + variant = "fp16" + filenames = [ + f"diffusion_pytorch_model.{variant}.safetensors", + "diffusion_pytorch_model.safetensors", + "model.safetensors", + f"model.{variant}.safetensors", + f"diffusion_pytorch_model.{variant}.safetensors", + "diffusion_pytorch_model.safetensors", + ] + model_filenames, variant_filenames = variant_compatible_siblings( + filenames, variant=variant, ignore_patterns=ignore_patterns + ) + assert all(variant in f for f in model_filenames) + + def test_mixed_variants_in_main_dir_downloaded(self): + ignore_patterns = ["*.bin"] + variant = "fp16" + non_variant_file = "model.safetensors" + filenames = [ + f"diffusion_pytorch_model.{variant}.safetensors", + "diffusion_pytorch_model.safetensors", + "model.safetensors", + ] + model_filenames, variant_filenames = variant_compatible_siblings( + filenames, variant=variant, ignore_patterns=ignore_patterns + ) + assert all(variant in f if f != non_variant_file else variant not in f for f in model_filenames) + + def test_sharded_variants_in_main_dir_downloaded(self): + ignore_patterns = ["*.bin"] + variant = "fp16" + filenames = [ + "diffusion_pytorch_model.safetensors.index.json", + "diffusion_pytorch_model-00001-of-00003.safetensors", + "diffusion_pytorch_model-00002-of-00003.safetensors", + "diffusion_pytorch_model-00003-of-00003.safetensors", + f"diffusion_pytorch_model.{variant}-00001-of-00002.safetensors", + f"diffusion_pytorch_model.{variant}-00002-of-00002.safetensors", + f"diffusion_pytorch_model.safetensors.index.{variant}.json", + ] + model_filenames, variant_filenames = variant_compatible_siblings( + filenames, variant=variant, ignore_patterns=ignore_patterns + ) + assert all(variant in f for f in model_filenames) + + def test_mixed_sharded_and_variant_in_main_dir_downloaded(self): + ignore_patterns = ["*.bin"] + variant = "fp16" + filenames = [ + "diffusion_pytorch_model.safetensors.index.json", + "diffusion_pytorch_model-00001-of-00003.safetensors", + "diffusion_pytorch_model-00002-of-00003.safetensors", + "diffusion_pytorch_model-00003-of-00003.safetensors", + f"diffusion_pytorch_model.{variant}.safetensors", + ] + model_filenames, variant_filenames = variant_compatible_siblings( + filenames, variant=variant, ignore_patterns=ignore_patterns + ) + assert all(variant in f for f in model_filenames) + + def test_mixed_sharded_non_variants_in_main_dir_downloaded(self): + ignore_patterns = ["*.bin"] + variant = "fp16" + filenames = [ + f"diffusion_pytorch_model.safetensors.index.{variant}.json", + "diffusion_pytorch_model.safetensors.index.json", + "diffusion_pytorch_model-00001-of-00003.safetensors", + "diffusion_pytorch_model-00002-of-00003.safetensors", + "diffusion_pytorch_model-00003-of-00003.safetensors", + f"diffusion_pytorch_model.{variant}-00001-of-00002.safetensors", + f"diffusion_pytorch_model.{variant}-00002-of-00002.safetensors", + ] + model_filenames, variant_filenames = variant_compatible_siblings( + filenames, variant=None, ignore_patterns=ignore_patterns + ) + assert all(variant not in f for f in model_filenames) + + def test_sharded_non_variants_downloaded(self): + ignore_patterns = ["*.bin"] + variant = "fp16" + filenames = [ + f"unet/diffusion_pytorch_model.safetensors.index.{variant}.json", + "unet/diffusion_pytorch_model.safetensors.index.json", + "unet/diffusion_pytorch_model-00001-of-00003.safetensors", + "unet/diffusion_pytorch_model-00002-of-00003.safetensors", + "unet/diffusion_pytorch_model-00003-of-00003.safetensors", + f"unet/diffusion_pytorch_model.{variant}-00001-of-00002.safetensors", + f"unet/diffusion_pytorch_model.{variant}-00002-of-00002.safetensors", + ] + model_filenames, variant_filenames = variant_compatible_siblings( + filenames, variant=None, ignore_patterns=ignore_patterns + ) + assert all(variant not in f for f in model_filenames) + + def test_sharded_variants_downloaded(self): + ignore_patterns = ["*.bin"] + variant = "fp16" + filenames = [ + f"unet/diffusion_pytorch_model.safetensors.index.{variant}.json", + "unet/diffusion_pytorch_model.safetensors.index.json", + "unet/diffusion_pytorch_model-00001-of-00003.safetensors", + "unet/diffusion_pytorch_model-00002-of-00003.safetensors", + "unet/diffusion_pytorch_model-00003-of-00003.safetensors", + f"unet/diffusion_pytorch_model.{variant}-00001-of-00002.safetensors", + f"unet/diffusion_pytorch_model.{variant}-00002-of-00002.safetensors", + ] + model_filenames, variant_filenames = variant_compatible_siblings( + filenames, variant=variant, ignore_patterns=ignore_patterns + ) + assert all(variant in f for f in model_filenames) + assert model_filenames == variant_filenames + + def test_single_variant_with_sharded_non_variant_downloaded(self): + ignore_patterns = ["*.bin"] + variant = "fp16" + filenames = [ + "unet/diffusion_pytorch_model.safetensors.index.json", + "unet/diffusion_pytorch_model-00001-of-00003.safetensors", + "unet/diffusion_pytorch_model-00002-of-00003.safetensors", + "unet/diffusion_pytorch_model-00003-of-00003.safetensors", + f"unet/diffusion_pytorch_model.{variant}.safetensors", + ] + model_filenames, variant_filenames = variant_compatible_siblings( + filenames, variant=variant, ignore_patterns=ignore_patterns + ) + assert all(variant in f for f in model_filenames) + + def test_mixed_single_variant_with_sharded_non_variant_downloaded(self): + ignore_patterns = ["*.bin"] + variant = "fp16" + allowed_non_variant = "unet" + filenames = [ + "vae/diffusion_pytorch_model.safetensors.index.json", + "vae/diffusion_pytorch_model-00001-of-00003.safetensors", + "vae/diffusion_pytorch_model-00002-of-00003.safetensors", + "vae/diffusion_pytorch_model-00003-of-00003.safetensors", + f"vae/diffusion_pytorch_model.{variant}.safetensors", + "unet/diffusion_pytorch_model.safetensors.index.json", + "unet/diffusion_pytorch_model-00001-of-00003.safetensors", + "unet/diffusion_pytorch_model-00002-of-00003.safetensors", + "unet/diffusion_pytorch_model-00003-of-00003.safetensors", + ] + model_filenames, variant_filenames = variant_compatible_siblings( + filenames, variant=variant, ignore_patterns=ignore_patterns + ) + assert all(variant in f if allowed_non_variant not in f else variant not in f for f in model_filenames) + + def test_sharded_mixed_variants_downloaded(self): + ignore_patterns = ["*.bin"] + variant = "fp16" + allowed_non_variant = "unet" + filenames = [ + f"vae/diffusion_pytorch_model.safetensors.index.{variant}.json", + "vae/diffusion_pytorch_model.safetensors.index.json", + "unet/diffusion_pytorch_model.safetensors.index.json", + "unet/diffusion_pytorch_model-00001-of-00003.safetensors", + "unet/diffusion_pytorch_model-00002-of-00003.safetensors", + "unet/diffusion_pytorch_model-00003-of-00003.safetensors", + f"vae/diffusion_pytorch_model.{variant}-00001-of-00002.safetensors", + f"vae/diffusion_pytorch_model.{variant}-00002-of-00002.safetensors", + "vae/diffusion_pytorch_model-00001-of-00003.safetensors", + "vae/diffusion_pytorch_model-00002-of-00003.safetensors", + "vae/diffusion_pytorch_model-00003-of-00003.safetensors", + ] + model_filenames, variant_filenames = variant_compatible_siblings( + filenames, variant=variant, ignore_patterns=ignore_patterns + ) + assert all(variant in f if allowed_non_variant not in f else variant not in f for f in model_filenames) + + def test_downloading_when_no_variant_exists(self): + ignore_patterns = ["*.bin"] + variant = "fp16" + filenames = ["model.safetensors", "diffusion_pytorch_model.safetensors"] + with self.assertRaisesRegex(ValueError, "but no such modeling files are available. "): + model_filenames, variant_filenames = variant_compatible_siblings( + filenames, variant=variant, ignore_patterns=ignore_patterns + ) + + def test_downloading_use_safetensors_false(self): + ignore_patterns = ["*.safetensors"] + filenames = [ + "text_encoder/model.bin", + "unet/diffusion_pytorch_model.bin", + "unet/diffusion_pytorch_model.safetensors", + ] + model_filenames, variant_filenames = variant_compatible_siblings( + filenames, variant=None, ignore_patterns=ignore_patterns + ) + + assert all(".safetensors" not in f for f in model_filenames) + + def test_non_variant_in_main_dir_with_variant_in_subfolder(self): + ignore_patterns = ["*.bin"] + variant = "fp16" + allowed_non_variant = "diffusion_pytorch_model.safetensors" + filenames = [ + f"unet/diffusion_pytorch_model.{variant}.safetensors", + "diffusion_pytorch_model.safetensors", + ] + model_filenames, variant_filenames = variant_compatible_siblings( + filenames, variant=variant, ignore_patterns=ignore_patterns + ) + assert all(variant in f if allowed_non_variant not in f else variant not in f for f in model_filenames) + + def test_download_variants_when_component_has_no_safetensors_variant(self): + ignore_patterns = None + variant = "fp16" + filenames = [ + f"unet/diffusion_pytorch_model.{variant}.bin", + "vae/diffusion_pytorch_model.safetensors", + f"vae/diffusion_pytorch_model.{variant}.safetensors", + ] + model_filenames, variant_filenames = variant_compatible_siblings( + filenames, variant=variant, ignore_patterns=ignore_patterns + ) + assert { + f"unet/diffusion_pytorch_model.{variant}.bin", + f"vae/diffusion_pytorch_model.{variant}.safetensors", + } == model_filenames + + def test_error_when_download_sharded_variants_when_component_has_no_safetensors_variant(self): + ignore_patterns = ["*.bin"] + variant = "fp16" + filenames = [ + f"vae/diffusion_pytorch_model.bin.index.{variant}.json", + "vae/diffusion_pytorch_model.safetensors.index.json", + f"vae/diffusion_pytorch_model.{variant}-00002-of-00002.bin", + "vae/diffusion_pytorch_model-00001-of-00003.safetensors", + "vae/diffusion_pytorch_model-00002-of-00003.safetensors", + "vae/diffusion_pytorch_model-00003-of-00003.safetensors", + "unet/diffusion_pytorch_model.safetensors.index.json", + "unet/diffusion_pytorch_model-00001-of-00003.safetensors", + "unet/diffusion_pytorch_model-00002-of-00003.safetensors", + "unet/diffusion_pytorch_model-00003-of-00003.safetensors", + f"vae/diffusion_pytorch_model.{variant}-00001-of-00002.bin", + ] + with self.assertRaisesRegex(ValueError, "but no such modeling files are available. "): + model_filenames, variant_filenames = variant_compatible_siblings( + filenames, variant=variant, ignore_patterns=ignore_patterns + ) + + def test_download_sharded_variants_when_component_has_no_safetensors_variant_and_safetensors_false(self): + ignore_patterns = ["*.safetensors"] + allowed_non_variant = "unet" + variant = "fp16" + filenames = [ + f"vae/diffusion_pytorch_model.bin.index.{variant}.json", + "vae/diffusion_pytorch_model.safetensors.index.json", + f"vae/diffusion_pytorch_model.{variant}-00002-of-00002.bin", + "vae/diffusion_pytorch_model-00001-of-00003.safetensors", + "vae/diffusion_pytorch_model-00002-of-00003.safetensors", + "vae/diffusion_pytorch_model-00003-of-00003.safetensors", + "unet/diffusion_pytorch_model.safetensors.index.json", + "unet/diffusion_pytorch_model-00001-of-00003.safetensors", + "unet/diffusion_pytorch_model-00002-of-00003.safetensors", + "unet/diffusion_pytorch_model-00003-of-00003.safetensors", + f"vae/diffusion_pytorch_model.{variant}-00001-of-00002.bin", + ] + model_filenames, variant_filenames = variant_compatible_siblings( + filenames, variant=variant, ignore_patterns=ignore_patterns + ) + assert all(variant in f if allowed_non_variant not in f else variant not in f for f in model_filenames) + + def test_download_sharded_legacy_variants(self): + ignore_patterns = None + variant = "fp16" + filenames = [ + f"vae/transformer/diffusion_pytorch_model.safetensors.{variant}.index.json", + "vae/diffusion_pytorch_model.safetensors.index.json", + f"vae/diffusion_pytorch_model-00002-of-00002.{variant}.safetensors", + "vae/diffusion_pytorch_model-00001-of-00003.safetensors", + "vae/diffusion_pytorch_model-00002-of-00003.safetensors", + "vae/diffusion_pytorch_model-00003-of-00003.safetensors", + f"vae/diffusion_pytorch_model-00001-of-00002.{variant}.safetensors", + ] + model_filenames, variant_filenames = variant_compatible_siblings( + filenames, variant=variant, ignore_patterns=ignore_patterns + ) + assert all(variant in f for f in model_filenames) + + def test_download_onnx_models(self): + ignore_patterns = ["*.safetensors"] + filenames = [ + "vae/model.onnx", + "unet/model.onnx", + ] + model_filenames, variant_filenames = variant_compatible_siblings( + filenames, variant=None, ignore_patterns=ignore_patterns + ) + assert model_filenames == set(filenames) + + def test_download_flax_models(self): + ignore_patterns = ["*.safetensors", "*.bin"] + filenames = [ + "vae/diffusion_flax_model.msgpack", + "unet/diffusion_flax_model.msgpack", + ] + model_filenames, variant_filenames = variant_compatible_siblings( + filenames, variant=None, ignore_patterns=ignore_patterns + ) + assert model_filenames == set(filenames) + + +class ProgressBarTests(unittest.TestCase): + def get_dummy_components_image_generation(self): + cross_attention_dim = 8 + + torch.manual_seed(0) + unet = UNet2DConditionModel( + block_out_channels=(4, 8), + layers_per_block=1, + sample_size=32, + in_channels=4, + out_channels=4, + down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), + up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), + cross_attention_dim=cross_attention_dim, + norm_num_groups=2, + ) + scheduler = DDIMScheduler( + beta_start=0.00085, + beta_end=0.012, + beta_schedule="scaled_linear", + clip_sample=False, + set_alpha_to_one=False, + ) + torch.manual_seed(0) + vae = AutoencoderKL( + block_out_channels=[4, 8], + in_channels=3, + out_channels=3, + down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], + up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], + latent_channels=4, + norm_num_groups=2, + ) + torch.manual_seed(0) + text_encoder_config = CLIPTextConfig( + bos_token_id=0, + eos_token_id=2, + hidden_size=cross_attention_dim, + intermediate_size=16, + layer_norm_eps=1e-05, + num_attention_heads=2, + num_hidden_layers=2, + pad_token_id=1, + vocab_size=1000, + ) + text_encoder = CLIPTextModel(text_encoder_config) + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + + components = { + "unet": unet, + "scheduler": scheduler, + "vae": vae, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + "safety_checker": None, + "feature_extractor": None, + "image_encoder": None, + } + return components + + def get_dummy_components_video_generation(self): + cross_attention_dim = 8 + block_out_channels = (8, 8) + + torch.manual_seed(0) + unet = UNet2DConditionModel( + block_out_channels=block_out_channels, + layers_per_block=2, + sample_size=8, + in_channels=4, + out_channels=4, + down_block_types=("CrossAttnDownBlock2D", "DownBlock2D"), + up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), + cross_attention_dim=cross_attention_dim, + norm_num_groups=2, + ) + scheduler = DDIMScheduler( + beta_start=0.00085, + beta_end=0.012, + beta_schedule="linear", + clip_sample=False, + ) + torch.manual_seed(0) + vae = AutoencoderKL( + block_out_channels=block_out_channels, + in_channels=3, + out_channels=3, + down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], + up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], + latent_channels=4, + norm_num_groups=2, + ) + torch.manual_seed(0) + text_encoder_config = CLIPTextConfig( + bos_token_id=0, + eos_token_id=2, + hidden_size=cross_attention_dim, + intermediate_size=37, + layer_norm_eps=1e-05, + num_attention_heads=4, + num_hidden_layers=5, + pad_token_id=1, + vocab_size=1000, + ) + text_encoder = CLIPTextModel(text_encoder_config) + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + torch.manual_seed(0) + motion_adapter = MotionAdapter( + block_out_channels=block_out_channels, + motion_layers_per_block=2, + motion_norm_num_groups=2, + motion_num_attention_heads=4, + ) + + components = { + "unet": unet, + "scheduler": scheduler, + "vae": vae, + "motion_adapter": motion_adapter, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + "feature_extractor": None, + "image_encoder": None, + } + return components + + def test_text_to_image(self): + components = self.get_dummy_components_image_generation() + pipe = StableDiffusionPipeline(**components) + pipe.to(torch_device) + + inputs = {"prompt": "a cute cat", "num_inference_steps": 2} + with io.StringIO() as stderr, contextlib.redirect_stderr(stderr): + _ = pipe(**inputs) + stderr = stderr.getvalue() + # we can't calculate the number of progress steps beforehand e.g. for strength-dependent img2img, + # so we just match "5" in "#####| 1/5 [00:01<00:00]" + max_steps = re.search("/(.*?) ", stderr).group(1) + self.assertTrue(max_steps is not None and len(max_steps) > 0) + self.assertTrue( + f"{max_steps}/{max_steps}" in stderr, "Progress bar should be enabled and stopped at the max step" + ) + + pipe.set_progress_bar_config(disable=True) + with io.StringIO() as stderr, contextlib.redirect_stderr(stderr): + _ = pipe(**inputs) + self.assertTrue(stderr.getvalue() == "", "Progress bar should be disabled") + + def test_image_to_image(self): + components = self.get_dummy_components_image_generation() + pipe = StableDiffusionImg2ImgPipeline(**components) + pipe.to(torch_device) + + image = Image.new("RGB", (32, 32)) + inputs = {"prompt": "a cute cat", "num_inference_steps": 2, "strength": 0.5, "image": image} + with io.StringIO() as stderr, contextlib.redirect_stderr(stderr): + _ = pipe(**inputs) + stderr = stderr.getvalue() + # we can't calculate the number of progress steps beforehand e.g. for strength-dependent img2img, + # so we just match "5" in "#####| 1/5 [00:01<00:00]" + max_steps = re.search("/(.*?) ", stderr).group(1) + self.assertTrue(max_steps is not None and len(max_steps) > 0) + self.assertTrue( + f"{max_steps}/{max_steps}" in stderr, "Progress bar should be enabled and stopped at the max step" + ) + + pipe.set_progress_bar_config(disable=True) + with io.StringIO() as stderr, contextlib.redirect_stderr(stderr): + _ = pipe(**inputs) + self.assertTrue(stderr.getvalue() == "", "Progress bar should be disabled") + + def test_inpainting(self): + components = self.get_dummy_components_image_generation() + pipe = StableDiffusionInpaintPipeline(**components) + pipe.to(torch_device) + + image = Image.new("RGB", (32, 32)) + mask = Image.new("RGB", (32, 32)) + inputs = { + "prompt": "a cute cat", + "num_inference_steps": 2, + "strength": 0.5, + "image": image, + "mask_image": mask, + } + with io.StringIO() as stderr, contextlib.redirect_stderr(stderr): + _ = pipe(**inputs) + stderr = stderr.getvalue() + # we can't calculate the number of progress steps beforehand e.g. for strength-dependent img2img, + # so we just match "5" in "#####| 1/5 [00:01<00:00]" + max_steps = re.search("/(.*?) ", stderr).group(1) + self.assertTrue(max_steps is not None and len(max_steps) > 0) + self.assertTrue( + f"{max_steps}/{max_steps}" in stderr, "Progress bar should be enabled and stopped at the max step" + ) + + pipe.set_progress_bar_config(disable=True) + with io.StringIO() as stderr, contextlib.redirect_stderr(stderr): + _ = pipe(**inputs) + self.assertTrue(stderr.getvalue() == "", "Progress bar should be disabled") + + def test_text_to_video(self): + components = self.get_dummy_components_video_generation() + pipe = AnimateDiffPipeline(**components) + pipe.to(torch_device) + + inputs = {"prompt": "a cute cat", "num_inference_steps": 2, "num_frames": 2} + with io.StringIO() as stderr, contextlib.redirect_stderr(stderr): + _ = pipe(**inputs) + stderr = stderr.getvalue() + # we can't calculate the number of progress steps beforehand e.g. for strength-dependent img2img, + # so we just match "5" in "#####| 1/5 [00:01<00:00]" + max_steps = re.search("/(.*?) ", stderr).group(1) + self.assertTrue(max_steps is not None and len(max_steps) > 0) + self.assertTrue( + f"{max_steps}/{max_steps}" in stderr, "Progress bar should be enabled and stopped at the max step" + ) + + pipe.set_progress_bar_config(disable=True) + with io.StringIO() as stderr, contextlib.redirect_stderr(stderr): + _ = pipe(**inputs) + self.assertTrue(stderr.getvalue() == "", "Progress bar should be disabled") + + def test_video_to_video(self): + components = self.get_dummy_components_video_generation() + pipe = AnimateDiffVideoToVideoPipeline(**components) + pipe.to(torch_device) + + num_frames = 2 + video = [Image.new("RGB", (32, 32))] * num_frames + inputs = {"prompt": "a cute cat", "num_inference_steps": 2, "video": video} + with io.StringIO() as stderr, contextlib.redirect_stderr(stderr): + _ = pipe(**inputs) + stderr = stderr.getvalue() + # we can't calculate the number of progress steps beforehand e.g. for strength-dependent img2img, + # so we just match "5" in "#####| 1/5 [00:01<00:00]" + max_steps = re.search("/(.*?) ", stderr).group(1) + self.assertTrue(max_steps is not None and len(max_steps) > 0) + self.assertTrue( + f"{max_steps}/{max_steps}" in stderr, "Progress bar should be enabled and stopped at the max step" + ) + + pipe.set_progress_bar_config(disable=True) + with io.StringIO() as stderr, contextlib.redirect_stderr(stderr): + _ = pipe(**inputs) + self.assertTrue(stderr.getvalue() == "", "Progress bar should be disabled") + + +@require_torch_accelerator +class PipelineDeviceAndDtypeStabilityTests(unittest.TestCase): + expected_pipe_device = torch.device(f"{torch_device}:0") + expected_pipe_dtype = torch.float64 + + def get_dummy_components_image_generation(self): + cross_attention_dim = 8 + + torch.manual_seed(0) + unet = UNet2DConditionModel( + block_out_channels=(4, 8), + layers_per_block=1, + sample_size=32, + in_channels=4, + out_channels=4, + down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), + up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), + cross_attention_dim=cross_attention_dim, + norm_num_groups=2, + ) + scheduler = DDIMScheduler( + beta_start=0.00085, + beta_end=0.012, + beta_schedule="scaled_linear", + clip_sample=False, + set_alpha_to_one=False, + ) + torch.manual_seed(0) + vae = AutoencoderKL( + block_out_channels=[4, 8], + in_channels=3, + out_channels=3, + down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], + up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], + latent_channels=4, + norm_num_groups=2, + ) + torch.manual_seed(0) + text_encoder_config = CLIPTextConfig( + bos_token_id=0, + eos_token_id=2, + hidden_size=cross_attention_dim, + intermediate_size=16, + layer_norm_eps=1e-05, + num_attention_heads=2, + num_hidden_layers=2, + pad_token_id=1, + vocab_size=1000, + ) + text_encoder = CLIPTextModel(text_encoder_config) + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + + components = { + "unet": unet, + "scheduler": scheduler, + "vae": vae, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + "safety_checker": None, + "feature_extractor": None, + "image_encoder": None, + } + return components + + def test_deterministic_device(self): + components = self.get_dummy_components_image_generation() + + pipe = StableDiffusionPipeline(**components) + pipe.to(device=torch_device, dtype=torch.float32) + + pipe.unet.to(device="cpu") + pipe.vae.to(device=torch_device) + pipe.text_encoder.to(device=f"{torch_device}:0") + + pipe_device = pipe.device + + self.assertEqual( + self.expected_pipe_device, + pipe_device, + f"Wrong expected device. Expected {self.expected_pipe_device}. Got {pipe_device}.", + ) + + def test_deterministic_dtype(self): + components = self.get_dummy_components_image_generation() + + pipe = StableDiffusionPipeline(**components) + pipe.to(device=torch_device, dtype=torch.float32) + + pipe.unet.to(dtype=torch.float16) + pipe.vae.to(dtype=torch.float32) + pipe.text_encoder.to(dtype=torch.float64) + + pipe_dtype = pipe.dtype + + self.assertEqual( + self.expected_pipe_dtype, + pipe_dtype, + f"Wrong expected dtype. Expected {self.expected_pipe_dtype}. Got {pipe_dtype}.", + ) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/test_pipelines.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/test_pipelines.py new file mode 100644 index 0000000000000000000000000000000000000000..09df140f1af8721e9555eba75ed32a14c7e5091b --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/test_pipelines.py @@ -0,0 +1,2462 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import gc +import json +import os +import random +import re +import shutil +import sys +import tempfile +import traceback +import unittest +import unittest.mock as mock +import warnings + +import numpy as np +import PIL.Image +import requests_mock +import safetensors.torch +import torch +import torch.nn as nn +from huggingface_hub import snapshot_download +from parameterized import parameterized +from PIL import Image +from requests.exceptions import HTTPError +from transformers import CLIPImageProcessor, CLIPModel, CLIPTextConfig, CLIPTextModel, CLIPTokenizer + +from diffusers import ( + AutoencoderKL, + ConfigMixin, + DDIMPipeline, + DDIMScheduler, + DDPMPipeline, + DDPMScheduler, + DiffusionPipeline, + DPMSolverMultistepScheduler, + EulerAncestralDiscreteScheduler, + EulerDiscreteScheduler, + LMSDiscreteScheduler, + ModelMixin, + PNDMScheduler, + StableDiffusionImg2ImgPipeline, + StableDiffusionInpaintPipelineLegacy, + StableDiffusionPipeline, + UNet2DConditionModel, + UNet2DModel, + UniPCMultistepScheduler, + logging, +) +from diffusers.pipelines.pipeline_utils import _get_pipeline_class +from diffusers.schedulers.scheduling_utils import SCHEDULER_CONFIG_NAME +from diffusers.utils import ( + CONFIG_NAME, + WEIGHTS_NAME, +) +from diffusers.utils.torch_utils import is_compiled_module + +from ..testing_utils import ( + CaptureLogger, + backend_empty_cache, + enable_full_determinism, + floats_tensor, + get_python_version, + get_tests_dir, + is_torch_compile, + load_numpy, + nightly, + require_compel, + require_flax, + require_hf_hub_version_greater, + require_onnxruntime, + require_peft_backend, + require_peft_version_greater, + require_torch_2, + require_torch_accelerator, + require_transformers_version_greater, + run_test_in_subprocess, + slow, + torch_device, +) + + +enable_full_determinism() + + +# Will be run via run_test_in_subprocess +def _test_from_save_pretrained_dynamo(in_queue, out_queue, timeout): + error = None + try: + # 1. Load models + model = UNet2DModel( + block_out_channels=(32, 64), + layers_per_block=2, + sample_size=32, + in_channels=3, + out_channels=3, + down_block_types=("DownBlock2D", "AttnDownBlock2D"), + up_block_types=("AttnUpBlock2D", "UpBlock2D"), + ) + model = torch.compile(model) + scheduler = DDPMScheduler(num_train_timesteps=10) + + ddpm = DDPMPipeline(model, scheduler) + + # previous diffusers versions stripped compilation off + # compiled modules + assert is_compiled_module(ddpm.unet) + + ddpm.to(torch_device) + ddpm.set_progress_bar_config(disable=None) + + with tempfile.TemporaryDirectory() as tmpdirname: + ddpm.save_pretrained(tmpdirname) + new_ddpm = DDPMPipeline.from_pretrained(tmpdirname) + new_ddpm.to(torch_device) + + generator = torch.Generator(device=torch_device).manual_seed(0) + image = ddpm(generator=generator, num_inference_steps=5, output_type="np").images + + generator = torch.Generator(device=torch_device).manual_seed(0) + new_image = new_ddpm(generator=generator, num_inference_steps=5, output_type="np").images + + assert np.abs(image - new_image).max() < 1e-5, "Models don't give the same forward pass" + except Exception: + error = f"{traceback.format_exc()}" + + results = {"error": error} + out_queue.put(results, timeout=timeout) + out_queue.join() + + +class CustomEncoder(ModelMixin, ConfigMixin): + def __init__(self): + super().__init__() + self.linear = nn.Linear(3, 3) + + +class CustomPipeline(DiffusionPipeline): + def __init__(self, encoder: CustomEncoder, scheduler: DDIMScheduler): + super().__init__() + self.register_modules(encoder=encoder, scheduler=scheduler) + + +class DownloadTests(unittest.TestCase): + @unittest.skip("Flaky behaviour on CI. Re-enable after migrating to new runners") + def test_one_request_upon_cached(self): + # TODO: For some reason this test fails on MPS where no HEAD call is made. + if torch_device == "mps": + return + + with tempfile.TemporaryDirectory() as tmpdirname: + with requests_mock.mock(real_http=True) as m: + DiffusionPipeline.download("hf-internal-testing/tiny-stable-diffusion-pipe", cache_dir=tmpdirname) + + download_requests = [r.method for r in m.request_history] + assert download_requests.count("HEAD") == 15, "15 calls to files" + assert download_requests.count("GET") == 17, "15 calls to files + model_info + model_index.json" + assert len(download_requests) == 32, ( + "2 calls per file (15 files) + send_telemetry, model_info and model_index.json" + ) + + with requests_mock.mock(real_http=True) as m: + DiffusionPipeline.download( + "hf-internal-testing/tiny-stable-diffusion-pipe", safety_checker=None, cache_dir=tmpdirname + ) + + cache_requests = [r.method for r in m.request_history] + assert cache_requests.count("HEAD") == 1, "model_index.json is only HEAD" + assert cache_requests.count("GET") == 1, "model info is only GET" + assert len(cache_requests) == 2, ( + "We should call only `model_info` to check for _commit hash and `send_telemetry`" + ) + + def test_less_downloads_passed_object(self): + with tempfile.TemporaryDirectory() as tmpdirname: + cached_folder = DiffusionPipeline.download( + "hf-internal-testing/tiny-stable-diffusion-pipe", safety_checker=None, cache_dir=tmpdirname + ) + + # make sure safety checker is not downloaded + assert "safety_checker" not in os.listdir(cached_folder) + + # make sure rest is downloaded + assert "unet" in os.listdir(cached_folder) + assert "tokenizer" in os.listdir(cached_folder) + assert "vae" in os.listdir(cached_folder) + assert "model_index.json" in os.listdir(cached_folder) + assert "scheduler" in os.listdir(cached_folder) + assert "feature_extractor" in os.listdir(cached_folder) + + @unittest.skip("Flaky behaviour on CI. Re-enable after migrating to new runners") + def test_less_downloads_passed_object_calls(self): + # TODO: For some reason this test fails on MPS where no HEAD call is made. + if torch_device == "mps": + return + + with tempfile.TemporaryDirectory() as tmpdirname: + with requests_mock.mock(real_http=True) as m: + DiffusionPipeline.download( + "hf-internal-testing/tiny-stable-diffusion-pipe", safety_checker=None, cache_dir=tmpdirname + ) + + download_requests = [r.method for r in m.request_history] + # 15 - 2 because no call to config or model file for `safety_checker` + assert download_requests.count("HEAD") == 13, "13 calls to files" + # 17 - 2 because no call to config or model file for `safety_checker` + assert download_requests.count("GET") == 15, "13 calls to files + model_info + model_index.json" + assert len(download_requests) == 28, ( + "2 calls per file (13 files) + send_telemetry, model_info and model_index.json" + ) + + with requests_mock.mock(real_http=True) as m: + DiffusionPipeline.download( + "hf-internal-testing/tiny-stable-diffusion-pipe", safety_checker=None, cache_dir=tmpdirname + ) + + cache_requests = [r.method for r in m.request_history] + assert cache_requests.count("HEAD") == 1, "model_index.json is only HEAD" + assert cache_requests.count("GET") == 1, "model info is only GET" + assert len(cache_requests) == 2, ( + "We should call only `model_info` to check for _commit hash and `send_telemetry`" + ) + + def test_download_only_pytorch(self): + with tempfile.TemporaryDirectory() as tmpdirname: + # pipeline has Flax weights + tmpdirname = DiffusionPipeline.download( + "hf-internal-testing/tiny-stable-diffusion-pipe", safety_checker=None, cache_dir=tmpdirname + ) + + all_root_files = [t[-1] for t in os.walk(os.path.join(tmpdirname))] + files = [item for sublist in all_root_files for item in sublist] + + # None of the downloaded files should be a flax file even if we have some here: + # https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_flax_model.msgpack + assert not any(f.endswith(".msgpack") for f in files) + # We need to never convert this tiny model to safetensors for this test to pass + assert not any(f.endswith(".safetensors") for f in files) + + def test_force_safetensors_error(self): + with tempfile.TemporaryDirectory() as tmpdirname: + # pipeline has Flax weights + with self.assertRaises(EnvironmentError): + tmpdirname = DiffusionPipeline.download( + "hf-internal-testing/tiny-stable-diffusion-pipe-no-safetensors", + safety_checker=None, + cache_dir=tmpdirname, + use_safetensors=True, + ) + + def test_download_safetensors(self): + with tempfile.TemporaryDirectory() as tmpdirname: + # pipeline has Flax weights + tmpdirname = DiffusionPipeline.download( + "hf-internal-testing/tiny-stable-diffusion-pipe-safetensors", + safety_checker=None, + cache_dir=tmpdirname, + ) + + all_root_files = [t[-1] for t in os.walk(os.path.join(tmpdirname))] + files = [item for sublist in all_root_files for item in sublist] + + # None of the downloaded files should be a pytorch file even if we have some here: + # https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_flax_model.msgpack + assert not any(f.endswith(".bin") for f in files) + + def test_download_safetensors_index(self): + for variant in ["fp16", None]: + with tempfile.TemporaryDirectory() as tmpdirname: + tmpdirname = DiffusionPipeline.download( + "hf-internal-testing/tiny-stable-diffusion-pipe-indexes", + cache_dir=tmpdirname, + use_safetensors=True, + variant=variant, + ) + + all_root_files = [t[-1] for t in os.walk(os.path.join(tmpdirname))] + files = [item for sublist in all_root_files for item in sublist] + + # None of the downloaded files should be a safetensors file even if we have some here: + # https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe-indexes/tree/main/text_encoder + if variant is None: + assert not any("fp16" in f for f in files) + else: + model_files = [f for f in files if "safetensors" in f] + assert all("fp16" in f for f in model_files) + + assert len([f for f in files if ".safetensors" in f]) == 8 + assert not any(".bin" in f for f in files) + + def test_download_bin_index(self): + for variant in ["fp16", None]: + with tempfile.TemporaryDirectory() as tmpdirname: + tmpdirname = DiffusionPipeline.download( + "hf-internal-testing/tiny-stable-diffusion-pipe-indexes", + cache_dir=tmpdirname, + use_safetensors=False, + variant=variant, + ) + + all_root_files = [t[-1] for t in os.walk(os.path.join(tmpdirname))] + files = [item for sublist in all_root_files for item in sublist] + + # None of the downloaded files should be a safetensors file even if we have some here: + # https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe-indexes/tree/main/text_encoder + if variant is None: + assert not any("fp16" in f for f in files) + else: + model_files = [f for f in files if "bin" in f] + assert all("fp16" in f for f in model_files) + + assert len([f for f in files if ".bin" in f]) == 8 + assert not any(".safetensors" in f for f in files) + + def test_download_no_openvino_by_default(self): + with tempfile.TemporaryDirectory() as tmpdirname: + tmpdirname = DiffusionPipeline.download( + "hf-internal-testing/tiny-stable-diffusion-open-vino", + cache_dir=tmpdirname, + ) + + all_root_files = [t[-1] for t in os.walk(os.path.join(tmpdirname))] + files = [item for sublist in all_root_files for item in sublist] + + # make sure that by default no openvino weights are downloaded + assert all((f.endswith(".json") or f.endswith(".bin") or f.endswith(".txt")) for f in files) + assert not any("openvino_" in f for f in files) + + def test_download_no_onnx_by_default(self): + with tempfile.TemporaryDirectory() as tmpdirname: + tmpdirname = DiffusionPipeline.download( + "hf-internal-testing/tiny-stable-diffusion-xl-pipe", + cache_dir=tmpdirname, + use_safetensors=False, + ) + + all_root_files = [t[-1] for t in os.walk(os.path.join(tmpdirname))] + files = [item for sublist in all_root_files for item in sublist] + + # make sure that by default no onnx weights are downloaded for non-ONNX pipelines + assert all((f.endswith(".json") or f.endswith(".bin") or f.endswith(".txt")) for f in files) + assert not any((f.endswith(".onnx") or f.endswith(".pb")) for f in files) + + @require_onnxruntime + def test_download_onnx_by_default_for_onnx_pipelines(self): + with tempfile.TemporaryDirectory() as tmpdirname: + tmpdirname = DiffusionPipeline.download( + "hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline", + cache_dir=tmpdirname, + ) + + all_root_files = [t[-1] for t in os.walk(os.path.join(tmpdirname))] + files = [item for sublist in all_root_files for item in sublist] + + # make sure that by default onnx weights are downloaded for ONNX pipelines + assert any((f.endswith(".json") or f.endswith(".bin") or f.endswith(".txt")) for f in files) + assert any((f.endswith(".onnx")) for f in files) + assert any((f.endswith(".pb")) for f in files) + + def test_download_no_safety_checker(self): + prompt = "hello" + pipe = StableDiffusionPipeline.from_pretrained( + "hf-internal-testing/tiny-stable-diffusion-torch", safety_checker=None + ) + pipe = pipe.to(torch_device) + generator = torch.manual_seed(0) + out = pipe(prompt, num_inference_steps=2, generator=generator, output_type="np").images + + pipe_2 = StableDiffusionPipeline.from_pretrained("hf-internal-testing/tiny-stable-diffusion-torch") + pipe_2 = pipe_2.to(torch_device) + generator = torch.manual_seed(0) + out_2 = pipe_2(prompt, num_inference_steps=2, generator=generator, output_type="np").images + + assert np.max(np.abs(out - out_2)) < 1e-3 + + def test_load_no_safety_checker_explicit_locally(self): + prompt = "hello" + pipe = StableDiffusionPipeline.from_pretrained( + "hf-internal-testing/tiny-stable-diffusion-torch", safety_checker=None + ) + pipe = pipe.to(torch_device) + generator = torch.manual_seed(0) + out = pipe(prompt, num_inference_steps=2, generator=generator, output_type="np").images + + with tempfile.TemporaryDirectory() as tmpdirname: + pipe.save_pretrained(tmpdirname) + pipe_2 = StableDiffusionPipeline.from_pretrained(tmpdirname, safety_checker=None) + pipe_2 = pipe_2.to(torch_device) + + generator = torch.manual_seed(0) + + out_2 = pipe_2(prompt, num_inference_steps=2, generator=generator, output_type="np").images + + assert np.max(np.abs(out - out_2)) < 1e-3 + + def test_load_no_safety_checker_default_locally(self): + prompt = "hello" + pipe = StableDiffusionPipeline.from_pretrained("hf-internal-testing/tiny-stable-diffusion-torch") + pipe = pipe.to(torch_device) + + generator = torch.manual_seed(0) + out = pipe(prompt, num_inference_steps=2, generator=generator, output_type="np").images + + with tempfile.TemporaryDirectory() as tmpdirname: + pipe.save_pretrained(tmpdirname) + pipe_2 = StableDiffusionPipeline.from_pretrained(tmpdirname) + pipe_2 = pipe_2.to(torch_device) + + generator = torch.manual_seed(0) + + out_2 = pipe_2(prompt, num_inference_steps=2, generator=generator, output_type="np").images + + assert np.max(np.abs(out - out_2)) < 1e-3 + + def test_cached_files_are_used_when_no_internet(self): + # A mock response for an HTTP head request to emulate server down + response_mock = mock.Mock() + response_mock.status_code = 500 + response_mock.headers = {} + response_mock.raise_for_status.side_effect = HTTPError + response_mock.json.return_value = {} + + # Download this model to make sure it's in the cache. + orig_pipe = DiffusionPipeline.from_pretrained( + "hf-internal-testing/tiny-stable-diffusion-torch", safety_checker=None + ) + orig_comps = {k: v for k, v in orig_pipe.components.items() if hasattr(v, "parameters")} + + # Under the mock environment we get a 500 error when trying to reach the model. + with mock.patch("requests.request", return_value=response_mock): + # Download this model to make sure it's in the cache. + pipe = DiffusionPipeline.from_pretrained( + "hf-internal-testing/tiny-stable-diffusion-torch", safety_checker=None + ) + comps = {k: v for k, v in pipe.components.items() if hasattr(v, "parameters")} + + for m1, m2 in zip(orig_comps.values(), comps.values()): + for p1, p2 in zip(m1.parameters(), m2.parameters()): + if p1.data.ne(p2.data).sum() > 0: + assert False, "Parameters not the same!" + + def test_local_files_only_are_used_when_no_internet(self): + # A mock response for an HTTP head request to emulate server down + response_mock = mock.Mock() + response_mock.status_code = 500 + response_mock.headers = {} + response_mock.raise_for_status.side_effect = HTTPError + response_mock.json.return_value = {} + + # first check that with local files only the pipeline can only be used if cached + with self.assertRaises(FileNotFoundError): + with tempfile.TemporaryDirectory() as tmpdirname: + orig_pipe = DiffusionPipeline.from_pretrained( + "hf-internal-testing/tiny-stable-diffusion-torch", local_files_only=True, cache_dir=tmpdirname + ) + + # now download + orig_pipe = DiffusionPipeline.download("hf-internal-testing/tiny-stable-diffusion-torch") + + # make sure it can be loaded with local_files_only + orig_pipe = DiffusionPipeline.from_pretrained( + "hf-internal-testing/tiny-stable-diffusion-torch", local_files_only=True + ) + orig_comps = {k: v for k, v in orig_pipe.components.items() if hasattr(v, "parameters")} + + # Under the mock environment we get a 500 error when trying to connect to the internet. + # Make sure it works local_files_only only works here! + with mock.patch("requests.request", return_value=response_mock): + # Download this model to make sure it's in the cache. + pipe = DiffusionPipeline.from_pretrained("hf-internal-testing/tiny-stable-diffusion-torch") + comps = {k: v for k, v in pipe.components.items() if hasattr(v, "parameters")} + + for m1, m2 in zip(orig_comps.values(), comps.values()): + for p1, p2 in zip(m1.parameters(), m2.parameters()): + if p1.data.ne(p2.data).sum() > 0: + assert False, "Parameters not the same!" + + def test_download_from_variant_folder(self): + for use_safetensors in [False, True]: + other_format = ".bin" if use_safetensors else ".safetensors" + with tempfile.TemporaryDirectory() as tmpdirname: + tmpdirname = StableDiffusionPipeline.download( + "hf-internal-testing/stable-diffusion-all-variants", + cache_dir=tmpdirname, + use_safetensors=use_safetensors, + ) + all_root_files = [t[-1] for t in os.walk(tmpdirname)] + files = [item for sublist in all_root_files for item in sublist] + + # None of the downloaded files should be a variant file even if we have some here: + # https://huggingface.co/hf-internal-testing/stable-diffusion-all-variants/tree/main/unet + assert len(files) == 15, f"We should only download 15 files, not {len(files)}" + assert not any(f.endswith(other_format) for f in files) + # no variants + assert not any(len(f.split(".")) == 3 for f in files) + + def test_download_variant_all(self): + for use_safetensors in [False, True]: + other_format = ".bin" if use_safetensors else ".safetensors" + this_format = ".safetensors" if use_safetensors else ".bin" + variant = "fp16" + + with tempfile.TemporaryDirectory() as tmpdirname: + tmpdirname = StableDiffusionPipeline.download( + "hf-internal-testing/stable-diffusion-all-variants", + cache_dir=tmpdirname, + variant=variant, + use_safetensors=use_safetensors, + ) + all_root_files = [t[-1] for t in os.walk(tmpdirname)] + files = [item for sublist in all_root_files for item in sublist] + + # None of the downloaded files should be a non-variant file even if we have some here: + # https://huggingface.co/hf-internal-testing/stable-diffusion-all-variants/tree/main/unet + assert len(files) == 15, f"We should only download 15 files, not {len(files)}" + # unet, vae, text_encoder, safety_checker + assert len([f for f in files if f.endswith(f"{variant}{this_format}")]) == 4 + # all checkpoints should have variant ending + assert not any(f.endswith(this_format) and not f.endswith(f"{variant}{this_format}") for f in files) + assert not any(f.endswith(other_format) for f in files) + + def test_download_variant_partly(self): + for use_safetensors in [False, True]: + other_format = ".bin" if use_safetensors else ".safetensors" + this_format = ".safetensors" if use_safetensors else ".bin" + variant = "no_ema" + + with tempfile.TemporaryDirectory() as tmpdirname: + tmpdirname = StableDiffusionPipeline.download( + "hf-internal-testing/stable-diffusion-all-variants", + cache_dir=tmpdirname, + variant=variant, + use_safetensors=use_safetensors, + ) + all_root_files = [t[-1] for t in os.walk(tmpdirname)] + files = [item for sublist in all_root_files for item in sublist] + + unet_files = os.listdir(os.path.join(tmpdirname, "unet")) + + # Some of the downloaded files should be a non-variant file, check: + # https://huggingface.co/hf-internal-testing/stable-diffusion-all-variants/tree/main/unet + assert len(files) == 15, f"We should only download 15 files, not {len(files)}" + # only unet has "no_ema" variant + assert f"diffusion_pytorch_model.{variant}{this_format}" in unet_files + assert len([f for f in files if f.endswith(f"{variant}{this_format}")]) == 1 + # vae, safety_checker and text_encoder should have no variant + assert sum(f.endswith(this_format) and not f.endswith(f"{variant}{this_format}") for f in files) == 3 + assert not any(f.endswith(other_format) for f in files) + + def test_download_variants_with_sharded_checkpoints(self): + # Here we test for downloading of "variant" files belonging to the `unet` and + # the `text_encoder`. Their checkpoints can be sharded. + for use_safetensors in [True, False]: + for variant in ["fp16", None]: + with tempfile.TemporaryDirectory() as tmpdirname: + tmpdirname = DiffusionPipeline.download( + "hf-internal-testing/tiny-stable-diffusion-pipe-variants-right-format", + safety_checker=None, + cache_dir=tmpdirname, + variant=variant, + use_safetensors=use_safetensors, + ) + + all_root_files = [t[-1] for t in os.walk(os.path.join(tmpdirname))] + files = [item for sublist in all_root_files for item in sublist] + + # Check for `model_ext` and `variant`. + model_ext = ".safetensors" if use_safetensors else ".bin" + unexpected_ext = ".bin" if use_safetensors else ".safetensors" + model_files = [f for f in files if f.endswith(model_ext)] + assert not any(f.endswith(unexpected_ext) for f in files) + assert all(variant in f for f in model_files if f.endswith(model_ext) and variant is not None) + + def test_download_legacy_variants_with_sharded_ckpts_raises_warning(self): + repo_id = "hf-internal-testing/tiny-stable-diffusion-pipe-variants-all-kinds" + logger = logging.get_logger("diffusers.pipelines.pipeline_utils") + deprecated_warning_msg = "Warning: The repository contains sharded checkpoints for variant" + + with CaptureLogger(logger) as cap_logger: + with tempfile.TemporaryDirectory() as tmpdirname: + local_repo_id = snapshot_download(repo_id, cache_dir=tmpdirname) + + _ = DiffusionPipeline.from_pretrained( + local_repo_id, + safety_checker=None, + variant="fp16", + use_safetensors=True, + ) + assert deprecated_warning_msg in str(cap_logger), "Deprecation warning not found in logs" + + def test_download_safetensors_only_variant_exists_for_model(self): + variant = None + use_safetensors = True + + # text encoder is missing no variant weights, so the following can't work + with tempfile.TemporaryDirectory() as tmpdirname: + with self.assertRaises(OSError) as error_context: + tmpdirname = StableDiffusionPipeline.from_pretrained( + "hf-internal-testing/stable-diffusion-broken-variants", + cache_dir=tmpdirname, + variant=variant, + use_safetensors=use_safetensors, + ) + assert "Could not find the necessary `safetensors` weights" in str(error_context.exception) + + # text encoder has fp16 variants so we can load it + with tempfile.TemporaryDirectory() as tmpdirname: + tmpdirname = StableDiffusionPipeline.download( + "hf-internal-testing/stable-diffusion-broken-variants", + use_safetensors=use_safetensors, + cache_dir=tmpdirname, + variant="fp16", + ) + all_root_files = [t[-1] for t in os.walk(tmpdirname)] + files = [item for sublist in all_root_files for item in sublist] + # None of the downloaded files should be a non-variant file even if we have some here: + # https://huggingface.co/hf-internal-testing/stable-diffusion-broken-variants/tree/main/unet + assert len(files) == 15, f"We should only download 15 files, not {len(files)}" + + def test_download_bin_only_variant_exists_for_model(self): + variant = None + use_safetensors = False + + # text encoder is missing Non-variant weights, so the following can't work + with tempfile.TemporaryDirectory() as tmpdirname: + with self.assertRaises(OSError) as error_context: + tmpdirname = StableDiffusionPipeline.from_pretrained( + "hf-internal-testing/stable-diffusion-broken-variants", + cache_dir=tmpdirname, + variant=variant, + use_safetensors=use_safetensors, + ) + assert "Error no file name" in str(error_context.exception) + + # text encoder has fp16 variants so we can load it + with tempfile.TemporaryDirectory() as tmpdirname: + tmpdirname = StableDiffusionPipeline.download( + "hf-internal-testing/stable-diffusion-broken-variants", + use_safetensors=use_safetensors, + cache_dir=tmpdirname, + variant="fp16", + ) + all_root_files = [t[-1] for t in os.walk(tmpdirname)] + files = [item for sublist in all_root_files for item in sublist] + # None of the downloaded files should be a non-variant file even if we have some here: + # https://huggingface.co/hf-internal-testing/stable-diffusion-broken-variants/tree/main/unet + assert len(files) == 15, f"We should only download 15 files, not {len(files)}" + + def test_download_safetensors_variant_does_not_exist_for_model(self): + variant = "no_ema" + use_safetensors = True + + # text encoder is missing no_ema variant weights, so the following can't work + with tempfile.TemporaryDirectory() as tmpdirname: + with self.assertRaises(OSError) as error_context: + tmpdirname = StableDiffusionPipeline.from_pretrained( + "hf-internal-testing/stable-diffusion-broken-variants", + cache_dir=tmpdirname, + variant=variant, + use_safetensors=use_safetensors, + ) + + assert "Could not find the necessary `safetensors` weights" in str(error_context.exception) + + def test_download_bin_variant_does_not_exist_for_model(self): + variant = "no_ema" + use_safetensors = False + + # text encoder is missing no_ema variant weights, so the following can't work + with tempfile.TemporaryDirectory() as tmpdirname: + with self.assertRaises(OSError) as error_context: + tmpdirname = StableDiffusionPipeline.from_pretrained( + "hf-internal-testing/stable-diffusion-broken-variants", + cache_dir=tmpdirname, + variant=variant, + use_safetensors=use_safetensors, + ) + assert "Error no file name" in str(error_context.exception) + + def test_local_save_load_index(self): + prompt = "hello" + for variant in [None, "fp16"]: + for use_safe in [True, False]: + pipe = StableDiffusionPipeline.from_pretrained( + "hf-internal-testing/tiny-stable-diffusion-pipe-indexes", + variant=variant, + use_safetensors=use_safe, + safety_checker=None, + ) + pipe = pipe.to(torch_device) + generator = torch.manual_seed(0) + out = pipe(prompt, num_inference_steps=2, generator=generator, output_type="np").images + + with tempfile.TemporaryDirectory() as tmpdirname: + pipe.save_pretrained(tmpdirname, variant=variant, safe_serialization=use_safe) + pipe_2 = StableDiffusionPipeline.from_pretrained( + tmpdirname, safe_serialization=use_safe, variant=variant + ) + pipe_2 = pipe_2.to(torch_device) + + generator = torch.manual_seed(0) + + out_2 = pipe_2(prompt, num_inference_steps=2, generator=generator, output_type="np").images + + assert np.max(np.abs(out - out_2)) < 1e-3 + + def test_text_inversion_download(self): + pipe = StableDiffusionPipeline.from_pretrained( + "hf-internal-testing/tiny-stable-diffusion-torch", safety_checker=None + ) + pipe = pipe.to(torch_device) + + num_tokens = len(pipe.tokenizer) + + # single token load local + with tempfile.TemporaryDirectory() as tmpdirname: + ten = {"<*>": torch.ones((32,))} + torch.save(ten, os.path.join(tmpdirname, "learned_embeds.bin")) + + pipe.load_textual_inversion(tmpdirname) + + token = pipe.tokenizer.convert_tokens_to_ids("<*>") + assert token == num_tokens, "Added token must be at spot `num_tokens`" + assert pipe.text_encoder.get_input_embeddings().weight[-1].sum().item() == 32 + assert pipe._maybe_convert_prompt("<*>", pipe.tokenizer) == "<*>" + + prompt = "hey <*>" + out = pipe(prompt, num_inference_steps=1, output_type="np").images + assert out.shape == (1, 128, 128, 3) + + # single token load local with weight name + with tempfile.TemporaryDirectory() as tmpdirname: + ten = {"<**>": 2 * torch.ones((1, 32))} + torch.save(ten, os.path.join(tmpdirname, "learned_embeds.bin")) + + pipe.load_textual_inversion(tmpdirname, weight_name="learned_embeds.bin") + + token = pipe.tokenizer.convert_tokens_to_ids("<**>") + assert token == num_tokens + 1, "Added token must be at spot `num_tokens`" + assert pipe.text_encoder.get_input_embeddings().weight[-1].sum().item() == 64 + assert pipe._maybe_convert_prompt("<**>", pipe.tokenizer) == "<**>" + + prompt = "hey <**>" + out = pipe(prompt, num_inference_steps=1, output_type="np").images + assert out.shape == (1, 128, 128, 3) + + # multi token load + with tempfile.TemporaryDirectory() as tmpdirname: + ten = {"<***>": torch.cat([3 * torch.ones((1, 32)), 4 * torch.ones((1, 32)), 5 * torch.ones((1, 32))])} + torch.save(ten, os.path.join(tmpdirname, "learned_embeds.bin")) + + pipe.load_textual_inversion(tmpdirname) + + token = pipe.tokenizer.convert_tokens_to_ids("<***>") + token_1 = pipe.tokenizer.convert_tokens_to_ids("<***>_1") + token_2 = pipe.tokenizer.convert_tokens_to_ids("<***>_2") + + assert token == num_tokens + 2, "Added token must be at spot `num_tokens`" + assert token_1 == num_tokens + 3, "Added token must be at spot `num_tokens`" + assert token_2 == num_tokens + 4, "Added token must be at spot `num_tokens`" + assert pipe.text_encoder.get_input_embeddings().weight[-3].sum().item() == 96 + assert pipe.text_encoder.get_input_embeddings().weight[-2].sum().item() == 128 + assert pipe.text_encoder.get_input_embeddings().weight[-1].sum().item() == 160 + assert pipe._maybe_convert_prompt("<***>", pipe.tokenizer) == "<***> <***>_1 <***>_2" + + prompt = "hey <***>" + out = pipe(prompt, num_inference_steps=1, output_type="np").images + assert out.shape == (1, 128, 128, 3) + + # multi token load a1111 + with tempfile.TemporaryDirectory() as tmpdirname: + ten = { + "string_to_param": { + "*": torch.cat([3 * torch.ones((1, 32)), 4 * torch.ones((1, 32)), 5 * torch.ones((1, 32))]) + }, + "name": "<****>", + } + torch.save(ten, os.path.join(tmpdirname, "a1111.bin")) + + pipe.load_textual_inversion(tmpdirname, weight_name="a1111.bin") + + token = pipe.tokenizer.convert_tokens_to_ids("<****>") + token_1 = pipe.tokenizer.convert_tokens_to_ids("<****>_1") + token_2 = pipe.tokenizer.convert_tokens_to_ids("<****>_2") + + assert token == num_tokens + 5, "Added token must be at spot `num_tokens`" + assert token_1 == num_tokens + 6, "Added token must be at spot `num_tokens`" + assert token_2 == num_tokens + 7, "Added token must be at spot `num_tokens`" + assert pipe.text_encoder.get_input_embeddings().weight[-3].sum().item() == 96 + assert pipe.text_encoder.get_input_embeddings().weight[-2].sum().item() == 128 + assert pipe.text_encoder.get_input_embeddings().weight[-1].sum().item() == 160 + assert pipe._maybe_convert_prompt("<****>", pipe.tokenizer) == "<****> <****>_1 <****>_2" + + prompt = "hey <****>" + out = pipe(prompt, num_inference_steps=1, output_type="np").images + assert out.shape == (1, 128, 128, 3) + + # multi embedding load + with tempfile.TemporaryDirectory() as tmpdirname1: + with tempfile.TemporaryDirectory() as tmpdirname2: + ten = {"<*****>": torch.ones((32,))} + torch.save(ten, os.path.join(tmpdirname1, "learned_embeds.bin")) + + ten = {"<******>": 2 * torch.ones((1, 32))} + torch.save(ten, os.path.join(tmpdirname2, "learned_embeds.bin")) + + pipe.load_textual_inversion([tmpdirname1, tmpdirname2]) + + token = pipe.tokenizer.convert_tokens_to_ids("<*****>") + assert token == num_tokens + 8, "Added token must be at spot `num_tokens`" + assert pipe.text_encoder.get_input_embeddings().weight[-2].sum().item() == 32 + assert pipe._maybe_convert_prompt("<*****>", pipe.tokenizer) == "<*****>" + + token = pipe.tokenizer.convert_tokens_to_ids("<******>") + assert token == num_tokens + 9, "Added token must be at spot `num_tokens`" + assert pipe.text_encoder.get_input_embeddings().weight[-1].sum().item() == 64 + assert pipe._maybe_convert_prompt("<******>", pipe.tokenizer) == "<******>" + + prompt = "hey <*****> <******>" + out = pipe(prompt, num_inference_steps=1, output_type="np").images + assert out.shape == (1, 128, 128, 3) + + # single token state dict load + ten = {"": torch.ones((32,))} + pipe.load_textual_inversion(ten) + + token = pipe.tokenizer.convert_tokens_to_ids("") + assert token == num_tokens + 10, "Added token must be at spot `num_tokens`" + assert pipe.text_encoder.get_input_embeddings().weight[-1].sum().item() == 32 + assert pipe._maybe_convert_prompt("", pipe.tokenizer) == "" + + prompt = "hey " + out = pipe(prompt, num_inference_steps=1, output_type="np").images + assert out.shape == (1, 128, 128, 3) + + # multi embedding state dict load + ten1 = {"": torch.ones((32,))} + ten2 = {"": 2 * torch.ones((1, 32))} + + pipe.load_textual_inversion([ten1, ten2]) + + token = pipe.tokenizer.convert_tokens_to_ids("") + assert token == num_tokens + 11, "Added token must be at spot `num_tokens`" + assert pipe.text_encoder.get_input_embeddings().weight[-2].sum().item() == 32 + assert pipe._maybe_convert_prompt("", pipe.tokenizer) == "" + + token = pipe.tokenizer.convert_tokens_to_ids("") + assert token == num_tokens + 12, "Added token must be at spot `num_tokens`" + assert pipe.text_encoder.get_input_embeddings().weight[-1].sum().item() == 64 + assert pipe._maybe_convert_prompt("", pipe.tokenizer) == "" + + prompt = "hey " + out = pipe(prompt, num_inference_steps=1, output_type="np").images + assert out.shape == (1, 128, 128, 3) + + # auto1111 multi-token state dict load + ten = { + "string_to_param": { + "*": torch.cat([3 * torch.ones((1, 32)), 4 * torch.ones((1, 32)), 5 * torch.ones((1, 32))]) + }, + "name": "", + } + + pipe.load_textual_inversion(ten) + + token = pipe.tokenizer.convert_tokens_to_ids("") + token_1 = pipe.tokenizer.convert_tokens_to_ids("_1") + token_2 = pipe.tokenizer.convert_tokens_to_ids("_2") + + assert token == num_tokens + 13, "Added token must be at spot `num_tokens`" + assert token_1 == num_tokens + 14, "Added token must be at spot `num_tokens`" + assert token_2 == num_tokens + 15, "Added token must be at spot `num_tokens`" + assert pipe.text_encoder.get_input_embeddings().weight[-3].sum().item() == 96 + assert pipe.text_encoder.get_input_embeddings().weight[-2].sum().item() == 128 + assert pipe.text_encoder.get_input_embeddings().weight[-1].sum().item() == 160 + assert pipe._maybe_convert_prompt("", pipe.tokenizer) == " _1 _2" + + prompt = "hey " + out = pipe(prompt, num_inference_steps=1, output_type="np").images + assert out.shape == (1, 128, 128, 3) + + # multiple references to multi embedding + ten = {"": torch.ones(3, 32)} + pipe.load_textual_inversion(ten) + + assert ( + pipe._maybe_convert_prompt(" ", pipe.tokenizer) == " _1 _2 _1 _2" + ) + + prompt = "hey " + out = pipe(prompt, num_inference_steps=1, output_type="np").images + assert out.shape == (1, 128, 128, 3) + + def test_text_inversion_multi_tokens(self): + pipe1 = StableDiffusionPipeline.from_pretrained( + "hf-internal-testing/tiny-stable-diffusion-torch", safety_checker=None + ) + pipe1 = pipe1.to(torch_device) + + token1, token2 = "<*>", "<**>" + ten1 = torch.ones((32,)) + ten2 = torch.ones((32,)) * 2 + + num_tokens = len(pipe1.tokenizer) + + pipe1.load_textual_inversion(ten1, token=token1) + pipe1.load_textual_inversion(ten2, token=token2) + emb1 = pipe1.text_encoder.get_input_embeddings().weight + + pipe2 = StableDiffusionPipeline.from_pretrained( + "hf-internal-testing/tiny-stable-diffusion-torch", safety_checker=None + ) + pipe2 = pipe2.to(torch_device) + pipe2.load_textual_inversion([ten1, ten2], token=[token1, token2]) + emb2 = pipe2.text_encoder.get_input_embeddings().weight + + pipe3 = StableDiffusionPipeline.from_pretrained( + "hf-internal-testing/tiny-stable-diffusion-torch", safety_checker=None + ) + pipe3 = pipe3.to(torch_device) + pipe3.load_textual_inversion(torch.stack([ten1, ten2], dim=0), token=[token1, token2]) + emb3 = pipe3.text_encoder.get_input_embeddings().weight + + assert len(pipe1.tokenizer) == len(pipe2.tokenizer) == len(pipe3.tokenizer) == num_tokens + 2 + assert ( + pipe1.tokenizer.convert_tokens_to_ids(token1) + == pipe2.tokenizer.convert_tokens_to_ids(token1) + == pipe3.tokenizer.convert_tokens_to_ids(token1) + == num_tokens + ) + assert ( + pipe1.tokenizer.convert_tokens_to_ids(token2) + == pipe2.tokenizer.convert_tokens_to_ids(token2) + == pipe3.tokenizer.convert_tokens_to_ids(token2) + == num_tokens + 1 + ) + assert emb1[num_tokens].sum().item() == emb2[num_tokens].sum().item() == emb3[num_tokens].sum().item() + assert ( + emb1[num_tokens + 1].sum().item() == emb2[num_tokens + 1].sum().item() == emb3[num_tokens + 1].sum().item() + ) + + def test_textual_inversion_unload(self): + pipe1 = StableDiffusionPipeline.from_pretrained( + "hf-internal-testing/tiny-stable-diffusion-torch", safety_checker=None + ) + pipe1 = pipe1.to(torch_device) + orig_tokenizer_size = len(pipe1.tokenizer) + orig_emb_size = len(pipe1.text_encoder.get_input_embeddings().weight) + + token = "<*>" + ten = torch.ones((32,)) + pipe1.load_textual_inversion(ten, token=token) + pipe1.unload_textual_inversion() + pipe1.load_textual_inversion(ten, token=token) + pipe1.unload_textual_inversion() + + final_tokenizer_size = len(pipe1.tokenizer) + final_emb_size = len(pipe1.text_encoder.get_input_embeddings().weight) + # both should be restored to original size + assert final_tokenizer_size == orig_tokenizer_size + assert final_emb_size == orig_emb_size + + def test_download_ignore_files(self): + # Check https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe-ignore-files/blob/72f58636e5508a218c6b3f60550dc96445547817/model_index.json#L4 + with tempfile.TemporaryDirectory() as tmpdirname: + # pipeline has Flax weights + tmpdirname = DiffusionPipeline.download("hf-internal-testing/tiny-stable-diffusion-pipe-ignore-files") + all_root_files = [t[-1] for t in os.walk(os.path.join(tmpdirname))] + files = [item for sublist in all_root_files for item in sublist] + + # None of the downloaded files should be a pytorch file even if we have some here: + # https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_flax_model.msgpack + assert not any(f in ["vae/diffusion_pytorch_model.bin", "text_encoder/config.json"] for f in files) + assert len(files) == 14 + + def test_download_dduf_with_custom_pipeline_raises_error(self): + with self.assertRaises(NotImplementedError): + _ = DiffusionPipeline.download( + "DDUF/tiny-flux-dev-pipe-dduf", dduf_file="fluxpipeline.dduf", custom_pipeline="my_pipeline" + ) + + def test_download_dduf_with_connected_pipeline_raises_error(self): + with self.assertRaises(NotImplementedError): + _ = DiffusionPipeline.download( + "DDUF/tiny-flux-dev-pipe-dduf", dduf_file="fluxpipeline.dduf", load_connected_pipeline=True + ) + + def test_get_pipeline_class_from_flax(self): + flax_config = {"_class_name": "FlaxStableDiffusionPipeline"} + config = {"_class_name": "StableDiffusionPipeline"} + + # when loading a PyTorch Pipeline from a FlaxPipeline `model_index.json`, e.g.: https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-lms-pipe/blob/7a9063578b325779f0f1967874a6771caa973cad/model_index.json#L2 + # we need to make sure that we don't load the Flax Pipeline class, but instead the PyTorch pipeline class + assert _get_pipeline_class(DiffusionPipeline, flax_config) == _get_pipeline_class(DiffusionPipeline, config) + + +class CustomPipelineTests(unittest.TestCase): + def test_load_custom_pipeline(self): + pipeline = DiffusionPipeline.from_pretrained( + "google/ddpm-cifar10-32", custom_pipeline="hf-internal-testing/diffusers-dummy-pipeline" + ) + pipeline = pipeline.to(torch_device) + # NOTE that `"CustomPipeline"` is not a class that is defined in this library, but solely on the Hub + # under https://huggingface.co/hf-internal-testing/diffusers-dummy-pipeline/blob/main/pipeline.py#L24 + assert pipeline.__class__.__name__ == "CustomPipeline" + + def test_load_custom_github(self): + pipeline = DiffusionPipeline.from_pretrained( + "google/ddpm-cifar10-32", custom_pipeline="one_step_unet", custom_revision="main" + ) + + # make sure that on "main" pipeline gives only ones because of: https://github.com/huggingface/diffusers/pull/1690 + with torch.no_grad(): + output = pipeline() + + assert output.numel() == output.sum() + + # hack since Python doesn't like overwriting modules: https://stackoverflow.com/questions/3105801/unload-a-module-in-python + # Could in the future work with hashes instead. + del sys.modules["diffusers_modules.git.one_step_unet"] + + pipeline = DiffusionPipeline.from_pretrained( + "google/ddpm-cifar10-32", custom_pipeline="one_step_unet", custom_revision="0.10.2" + ) + with torch.no_grad(): + output = pipeline() + + assert output.numel() != output.sum() + + assert pipeline.__class__.__name__ == "UnetSchedulerOneForwardPipeline" + + def test_run_custom_pipeline(self): + pipeline = DiffusionPipeline.from_pretrained( + "google/ddpm-cifar10-32", custom_pipeline="hf-internal-testing/diffusers-dummy-pipeline" + ) + pipeline = pipeline.to(torch_device) + images, output_str = pipeline(num_inference_steps=2, output_type="np") + + assert images[0].shape == (1, 32, 32, 3) + + # compare output to https://huggingface.co/hf-internal-testing/diffusers-dummy-pipeline/blob/main/pipeline.py#L102 + assert output_str == "This is a test" + + def test_remote_components(self): + # make sure that trust remote code has to be passed + with self.assertRaises(ValueError): + pipeline = DiffusionPipeline.from_pretrained("hf-internal-testing/tiny-sdxl-custom-components") + + # Check that only loading custom components "my_unet", "my_scheduler" works + pipeline = DiffusionPipeline.from_pretrained( + "hf-internal-testing/tiny-sdxl-custom-components", trust_remote_code=True + ) + + assert pipeline.config.unet == ("diffusers_modules.local.my_unet_model", "MyUNetModel") + assert pipeline.config.scheduler == ("diffusers_modules.local.my_scheduler", "MyScheduler") + assert pipeline.__class__.__name__ == "StableDiffusionXLPipeline" + + pipeline = pipeline.to(torch_device) + images = pipeline("test", num_inference_steps=2, output_type="np")[0] + + assert images.shape == (1, 64, 64, 3) + + # Check that only loading custom components "my_unet", "my_scheduler" and explicit custom pipeline works + pipeline = DiffusionPipeline.from_pretrained( + "hf-internal-testing/tiny-sdxl-custom-components", custom_pipeline="my_pipeline", trust_remote_code=True + ) + + assert pipeline.config.unet == ("diffusers_modules.local.my_unet_model", "MyUNetModel") + assert pipeline.config.scheduler == ("diffusers_modules.local.my_scheduler", "MyScheduler") + assert pipeline.__class__.__name__ == "MyPipeline" + + pipeline = pipeline.to(torch_device) + images = pipeline("test", num_inference_steps=2, output_type="np")[0] + + assert images.shape == (1, 64, 64, 3) + + def test_remote_auto_custom_pipe(self): + # make sure that trust remote code has to be passed + with self.assertRaises(ValueError): + pipeline = DiffusionPipeline.from_pretrained("hf-internal-testing/tiny-sdxl-custom-all") + + # Check that only loading custom components "my_unet", "my_scheduler" and auto custom pipeline works + pipeline = DiffusionPipeline.from_pretrained( + "hf-internal-testing/tiny-sdxl-custom-all", trust_remote_code=True + ) + + assert pipeline.config.unet == ("diffusers_modules.local.my_unet_model", "MyUNetModel") + assert pipeline.config.scheduler == ("diffusers_modules.local.my_scheduler", "MyScheduler") + assert pipeline.__class__.__name__ == "MyPipeline" + + pipeline = pipeline.to(torch_device) + images = pipeline("test", num_inference_steps=2, output_type="np")[0] + + assert images.shape == (1, 64, 64, 3) + + def test_remote_custom_pipe_with_dot_in_name(self): + # make sure that trust remote code has to be passed + with self.assertRaises(ValueError): + pipeline = DiffusionPipeline.from_pretrained("akasharidas/ddpm-cifar10-32-dot.in.name") + + pipeline = DiffusionPipeline.from_pretrained("akasharidas/ddpm-cifar10-32-dot.in.name", trust_remote_code=True) + + assert pipeline.__class__.__name__ == "CustomPipeline" + + pipeline = pipeline.to(torch_device) + images, output_str = pipeline(num_inference_steps=2, output_type="np") + + assert images[0].shape == (1, 32, 32, 3) + assert output_str == "This is a test" + + def test_local_custom_pipeline_repo(self): + local_custom_pipeline_path = get_tests_dir("fixtures/custom_pipeline") + pipeline = DiffusionPipeline.from_pretrained( + "google/ddpm-cifar10-32", custom_pipeline=local_custom_pipeline_path + ) + pipeline = pipeline.to(torch_device) + images, output_str = pipeline(num_inference_steps=2, output_type="np") + + assert pipeline.__class__.__name__ == "CustomLocalPipeline" + assert images[0].shape == (1, 32, 32, 3) + # compare to https://github.com/huggingface/diffusers/blob/main/tests/fixtures/custom_pipeline/pipeline.py#L102 + assert output_str == "This is a local test" + + def test_local_custom_pipeline_file(self): + local_custom_pipeline_path = get_tests_dir("fixtures/custom_pipeline") + local_custom_pipeline_path = os.path.join(local_custom_pipeline_path, "what_ever.py") + pipeline = DiffusionPipeline.from_pretrained( + "google/ddpm-cifar10-32", custom_pipeline=local_custom_pipeline_path + ) + pipeline = pipeline.to(torch_device) + images, output_str = pipeline(num_inference_steps=2, output_type="np") + + assert pipeline.__class__.__name__ == "CustomLocalPipeline" + assert images[0].shape == (1, 32, 32, 3) + # compare to https://github.com/huggingface/diffusers/blob/main/tests/fixtures/custom_pipeline/pipeline.py#L102 + assert output_str == "This is a local test" + + def test_custom_model_and_pipeline(self): + pipe = CustomPipeline( + encoder=CustomEncoder(), + scheduler=DDIMScheduler(), + ) + + with tempfile.TemporaryDirectory() as tmpdirname: + pipe.save_pretrained(tmpdirname, safe_serialization=False) + + pipe_new = CustomPipeline.from_pretrained(tmpdirname) + pipe_new.save_pretrained(tmpdirname) + + conf_1 = dict(pipe.config) + conf_2 = dict(pipe_new.config) + + del conf_2["_name_or_path"] + + assert conf_1 == conf_2 + + @slow + @require_torch_accelerator + def test_download_from_git(self): + # Because adaptive_avg_pool2d_backward_cuda + # does not have a deterministic implementation. + clip_model_id = "laion/CLIP-ViT-B-32-laion2B-s34B-b79K" + + feature_extractor = CLIPImageProcessor.from_pretrained(clip_model_id) + clip_model = CLIPModel.from_pretrained(clip_model_id, torch_dtype=torch.float16) + + pipeline = DiffusionPipeline.from_pretrained( + "CompVis/stable-diffusion-v1-4", + custom_pipeline="clip_guided_stable_diffusion", + clip_model=clip_model, + feature_extractor=feature_extractor, + torch_dtype=torch.float16, + ) + pipeline.enable_attention_slicing() + pipeline = pipeline.to(torch_device) + + # NOTE that `"CLIPGuidedStableDiffusion"` is not a class that is defined in the pypi package of th e library, but solely on the community examples folder of GitHub under: + # https://github.com/huggingface/diffusers/blob/main/examples/community/clip_guided_stable_diffusion.py + assert pipeline.__class__.__name__ == "CLIPGuidedStableDiffusion" + + image = pipeline("a prompt", num_inference_steps=2, output_type="np").images[0] + assert image.shape == (512, 512, 3) + + def test_save_pipeline_change_config(self): + pipe = DiffusionPipeline.from_pretrained( + "hf-internal-testing/tiny-stable-diffusion-torch", safety_checker=None + ) + + with tempfile.TemporaryDirectory() as tmpdirname: + pipe.save_pretrained(tmpdirname) + pipe = DiffusionPipeline.from_pretrained(tmpdirname) + + assert pipe.scheduler.__class__.__name__ == "PNDMScheduler" + + # let's make sure that changing the scheduler is correctly reflected + with tempfile.TemporaryDirectory() as tmpdirname: + pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) + pipe.save_pretrained(tmpdirname) + pipe = DiffusionPipeline.from_pretrained(tmpdirname) + + assert pipe.scheduler.__class__.__name__ == "DPMSolverMultistepScheduler" + + +class PipelineFastTests(unittest.TestCase): + def setUp(self): + # clean up the VRAM before each test + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + # clean up the VRAM after each test + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def dummy_image(self): + batch_size = 1 + num_channels = 3 + sizes = (32, 32) + + image = floats_tensor((batch_size, num_channels) + sizes, rng=random.Random(0)).to(torch_device) + return image + + def dummy_uncond_unet(self, sample_size=32): + torch.manual_seed(0) + model = UNet2DModel( + block_out_channels=(32, 64), + layers_per_block=2, + sample_size=sample_size, + in_channels=3, + out_channels=3, + down_block_types=("DownBlock2D", "AttnDownBlock2D"), + up_block_types=("AttnUpBlock2D", "UpBlock2D"), + ) + return model + + def dummy_cond_unet(self, sample_size=32): + torch.manual_seed(0) + model = UNet2DConditionModel( + block_out_channels=(32, 64), + layers_per_block=2, + sample_size=sample_size, + in_channels=4, + out_channels=4, + down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), + up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), + cross_attention_dim=32, + ) + return model + + @property + def dummy_vae(self): + torch.manual_seed(0) + model = AutoencoderKL( + block_out_channels=[32, 64], + in_channels=3, + out_channels=3, + down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], + up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], + latent_channels=4, + ) + return model + + @property + def dummy_text_encoder(self): + torch.manual_seed(0) + config = CLIPTextConfig( + bos_token_id=0, + eos_token_id=2, + hidden_size=32, + intermediate_size=37, + layer_norm_eps=1e-05, + num_attention_heads=4, + num_hidden_layers=5, + pad_token_id=1, + vocab_size=1000, + ) + return CLIPTextModel(config) + + @property + def dummy_extractor(self): + def extract(*args, **kwargs): + class Out: + def __init__(self): + self.pixel_values = torch.ones([0]) + + def to(self, device): + self.pixel_values.to(device) + return self + + return Out() + + return extract + + @parameterized.expand( + [ + [DDIMScheduler, DDIMPipeline, 32], + [DDPMScheduler, DDPMPipeline, 32], + [DDIMScheduler, DDIMPipeline, (32, 64)], + [DDPMScheduler, DDPMPipeline, (64, 32)], + ] + ) + def test_uncond_unet_components(self, scheduler_fn=DDPMScheduler, pipeline_fn=DDPMPipeline, sample_size=32): + unet = self.dummy_uncond_unet(sample_size) + scheduler = scheduler_fn() + pipeline = pipeline_fn(unet, scheduler).to(torch_device) + + generator = torch.manual_seed(0) + out_image = pipeline( + generator=generator, + num_inference_steps=2, + output_type="np", + ).images + sample_size = (sample_size, sample_size) if isinstance(sample_size, int) else sample_size + assert out_image.shape == (1, *sample_size, 3) + + def test_stable_diffusion_components(self): + """Test that components property works correctly""" + unet = self.dummy_cond_unet() + scheduler = PNDMScheduler(skip_prk_steps=True) + vae = self.dummy_vae + bert = self.dummy_text_encoder + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + + image = self.dummy_image().cpu().permute(0, 2, 3, 1)[0] + init_image = Image.fromarray(np.uint8(image)).convert("RGB") + mask_image = Image.fromarray(np.uint8(image + 4)).convert("RGB").resize((32, 32)) + + # make sure here that pndm scheduler skips prk + inpaint = StableDiffusionInpaintPipelineLegacy( + unet=unet, + scheduler=scheduler, + vae=vae, + text_encoder=bert, + tokenizer=tokenizer, + safety_checker=None, + feature_extractor=self.dummy_extractor, + ).to(torch_device) + img2img = StableDiffusionImg2ImgPipeline(**inpaint.components, image_encoder=None).to(torch_device) + text2img = StableDiffusionPipeline(**inpaint.components, image_encoder=None).to(torch_device) + + prompt = "A painting of a squirrel eating a burger" + + generator = torch.manual_seed(0) + image_inpaint = inpaint( + [prompt], + generator=generator, + num_inference_steps=2, + output_type="np", + image=init_image, + mask_image=mask_image, + ).images + image_img2img = img2img( + [prompt], + generator=generator, + num_inference_steps=2, + output_type="np", + image=init_image, + ).images + image_text2img = text2img( + [prompt], + generator=generator, + num_inference_steps=2, + output_type="np", + ).images + + assert image_inpaint.shape == (1, 32, 32, 3) + assert image_img2img.shape == (1, 32, 32, 3) + assert image_text2img.shape == (1, 64, 64, 3) + + @require_torch_accelerator + def test_pipe_false_offload_warn(self): + unet = self.dummy_cond_unet() + scheduler = PNDMScheduler(skip_prk_steps=True) + vae = self.dummy_vae + bert = self.dummy_text_encoder + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + + sd = StableDiffusionPipeline( + unet=unet, + scheduler=scheduler, + vae=vae, + text_encoder=bert, + tokenizer=tokenizer, + safety_checker=None, + feature_extractor=self.dummy_extractor, + ) + + sd.enable_model_cpu_offload(device=torch_device) + + logger = logging.get_logger("diffusers.pipelines.pipeline_utils") + with CaptureLogger(logger) as cap_logger: + sd.to(torch_device) + + assert "It is strongly recommended against doing so" in str(cap_logger) + + sd = StableDiffusionPipeline( + unet=unet, + scheduler=scheduler, + vae=vae, + text_encoder=bert, + tokenizer=tokenizer, + safety_checker=None, + feature_extractor=self.dummy_extractor, + ) + + def test_set_scheduler(self): + unet = self.dummy_cond_unet() + scheduler = PNDMScheduler(skip_prk_steps=True) + vae = self.dummy_vae + bert = self.dummy_text_encoder + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + + sd = StableDiffusionPipeline( + unet=unet, + scheduler=scheduler, + vae=vae, + text_encoder=bert, + tokenizer=tokenizer, + safety_checker=None, + feature_extractor=self.dummy_extractor, + ) + + sd.scheduler = DDIMScheduler.from_config(sd.scheduler.config) + assert isinstance(sd.scheduler, DDIMScheduler) + sd.scheduler = DDPMScheduler.from_config(sd.scheduler.config) + assert isinstance(sd.scheduler, DDPMScheduler) + sd.scheduler = PNDMScheduler.from_config(sd.scheduler.config) + assert isinstance(sd.scheduler, PNDMScheduler) + sd.scheduler = LMSDiscreteScheduler.from_config(sd.scheduler.config) + assert isinstance(sd.scheduler, LMSDiscreteScheduler) + sd.scheduler = EulerDiscreteScheduler.from_config(sd.scheduler.config) + assert isinstance(sd.scheduler, EulerDiscreteScheduler) + sd.scheduler = EulerAncestralDiscreteScheduler.from_config(sd.scheduler.config) + assert isinstance(sd.scheduler, EulerAncestralDiscreteScheduler) + sd.scheduler = DPMSolverMultistepScheduler.from_config(sd.scheduler.config) + assert isinstance(sd.scheduler, DPMSolverMultistepScheduler) + + def test_set_component_to_none(self): + unet = self.dummy_cond_unet() + scheduler = PNDMScheduler(skip_prk_steps=True) + vae = self.dummy_vae + bert = self.dummy_text_encoder + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + + pipeline = StableDiffusionPipeline( + unet=unet, + scheduler=scheduler, + vae=vae, + text_encoder=bert, + tokenizer=tokenizer, + safety_checker=None, + feature_extractor=self.dummy_extractor, + ) + + generator = torch.Generator(device="cpu").manual_seed(0) + + prompt = "This is a flower" + + out_image = pipeline( + prompt=prompt, + generator=generator, + num_inference_steps=1, + output_type="np", + ).images + + pipeline.feature_extractor = None + generator = torch.Generator(device="cpu").manual_seed(0) + out_image_2 = pipeline( + prompt=prompt, + generator=generator, + num_inference_steps=1, + output_type="np", + ).images + + assert out_image.shape == (1, 64, 64, 3) + assert np.abs(out_image - out_image_2).max() < 1e-3 + + def test_optional_components_is_none(self): + unet = self.dummy_cond_unet() + scheduler = PNDMScheduler(skip_prk_steps=True) + vae = self.dummy_vae + bert = self.dummy_text_encoder + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + + items = { + "feature_extractor": self.dummy_extractor, + "unet": unet, + "scheduler": scheduler, + "vae": vae, + "text_encoder": bert, + "tokenizer": tokenizer, + "safety_checker": None, + # we don't add an image encoder + } + + pipeline = StableDiffusionPipeline(**items) + + assert sorted(pipeline.components.keys()) == sorted(["image_encoder"] + list(items.keys())) + assert pipeline.image_encoder is None + + def test_set_scheduler_consistency(self): + unet = self.dummy_cond_unet() + pndm = PNDMScheduler.from_config("hf-internal-testing/tiny-stable-diffusion-torch", subfolder="scheduler") + ddim = DDIMScheduler.from_config("hf-internal-testing/tiny-stable-diffusion-torch", subfolder="scheduler") + vae = self.dummy_vae + bert = self.dummy_text_encoder + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + + sd = StableDiffusionPipeline( + unet=unet, + scheduler=pndm, + vae=vae, + text_encoder=bert, + tokenizer=tokenizer, + safety_checker=None, + feature_extractor=self.dummy_extractor, + ) + + pndm_config = sd.scheduler.config + sd.scheduler = DDPMScheduler.from_config(pndm_config) + sd.scheduler = PNDMScheduler.from_config(sd.scheduler.config) + pndm_config_2 = sd.scheduler.config + pndm_config_2 = {k: v for k, v in pndm_config_2.items() if k in pndm_config} + + assert dict(pndm_config) == dict(pndm_config_2) + + sd = StableDiffusionPipeline( + unet=unet, + scheduler=ddim, + vae=vae, + text_encoder=bert, + tokenizer=tokenizer, + safety_checker=None, + feature_extractor=self.dummy_extractor, + ) + + ddim_config = sd.scheduler.config + sd.scheduler = LMSDiscreteScheduler.from_config(ddim_config) + sd.scheduler = DDIMScheduler.from_config(sd.scheduler.config) + ddim_config_2 = sd.scheduler.config + ddim_config_2 = {k: v for k, v in ddim_config_2.items() if k in ddim_config} + + assert dict(ddim_config) == dict(ddim_config_2) + + def test_save_safe_serialization(self): + pipeline = StableDiffusionPipeline.from_pretrained("hf-internal-testing/tiny-stable-diffusion-torch") + with tempfile.TemporaryDirectory() as tmpdirname: + pipeline.save_pretrained(tmpdirname, safe_serialization=True) + + # Validate that the VAE safetensor exists and are of the correct format + vae_path = os.path.join(tmpdirname, "vae", "diffusion_pytorch_model.safetensors") + assert os.path.exists(vae_path), f"Could not find {vae_path}" + _ = safetensors.torch.load_file(vae_path) + + # Validate that the UNet safetensor exists and are of the correct format + unet_path = os.path.join(tmpdirname, "unet", "diffusion_pytorch_model.safetensors") + assert os.path.exists(unet_path), f"Could not find {unet_path}" + _ = safetensors.torch.load_file(unet_path) + + # Validate that the text encoder safetensor exists and are of the correct format + text_encoder_path = os.path.join(tmpdirname, "text_encoder", "model.safetensors") + assert os.path.exists(text_encoder_path), f"Could not find {text_encoder_path}" + _ = safetensors.torch.load_file(text_encoder_path) + + pipeline = StableDiffusionPipeline.from_pretrained(tmpdirname) + assert pipeline.unet is not None + assert pipeline.vae is not None + assert pipeline.text_encoder is not None + assert pipeline.scheduler is not None + assert pipeline.feature_extractor is not None + + def test_no_pytorch_download_when_doing_safetensors(self): + # by default we don't download + with tempfile.TemporaryDirectory() as tmpdirname: + _ = StableDiffusionPipeline.from_pretrained( + "hf-internal-testing/diffusers-stable-diffusion-tiny-all", cache_dir=tmpdirname + ) + + path = os.path.join( + tmpdirname, + "models--hf-internal-testing--diffusers-stable-diffusion-tiny-all", + "snapshots", + "07838d72e12f9bcec1375b0482b80c1d399be843", + "unet", + ) + # safetensors exists + assert os.path.exists(os.path.join(path, "diffusion_pytorch_model.safetensors")) + # pytorch does not + assert not os.path.exists(os.path.join(path, "diffusion_pytorch_model.bin")) + + def test_no_safetensors_download_when_doing_pytorch(self): + use_safetensors = False + + with tempfile.TemporaryDirectory() as tmpdirname: + _ = StableDiffusionPipeline.from_pretrained( + "hf-internal-testing/diffusers-stable-diffusion-tiny-all", + cache_dir=tmpdirname, + use_safetensors=use_safetensors, + ) + + path = os.path.join( + tmpdirname, + "models--hf-internal-testing--diffusers-stable-diffusion-tiny-all", + "snapshots", + "07838d72e12f9bcec1375b0482b80c1d399be843", + "unet", + ) + # safetensors does not exists + assert not os.path.exists(os.path.join(path, "diffusion_pytorch_model.safetensors")) + # pytorch does + assert os.path.exists(os.path.join(path, "diffusion_pytorch_model.bin")) + + def test_optional_components(self): + unet = self.dummy_cond_unet() + pndm = PNDMScheduler.from_config("hf-internal-testing/tiny-stable-diffusion-torch", subfolder="scheduler") + vae = self.dummy_vae + bert = self.dummy_text_encoder + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + + orig_sd = StableDiffusionPipeline( + unet=unet, + scheduler=pndm, + vae=vae, + text_encoder=bert, + tokenizer=tokenizer, + safety_checker=unet, + feature_extractor=self.dummy_extractor, + ) + sd = orig_sd + + assert sd.config.requires_safety_checker is True + + with tempfile.TemporaryDirectory() as tmpdirname: + sd.save_pretrained(tmpdirname) + + # Test that passing None works + sd = StableDiffusionPipeline.from_pretrained( + tmpdirname, feature_extractor=None, safety_checker=None, requires_safety_checker=False + ) + + assert sd.config.requires_safety_checker is False + assert sd.config.safety_checker == (None, None) + assert sd.config.feature_extractor == (None, None) + + with tempfile.TemporaryDirectory() as tmpdirname: + sd.save_pretrained(tmpdirname) + + # Test that loading previous None works + sd = StableDiffusionPipeline.from_pretrained(tmpdirname) + + assert sd.config.requires_safety_checker is False + assert sd.config.safety_checker == (None, None) + assert sd.config.feature_extractor == (None, None) + + orig_sd.save_pretrained(tmpdirname) + + # Test that loading without any directory works + shutil.rmtree(os.path.join(tmpdirname, "safety_checker")) + with open(os.path.join(tmpdirname, sd.config_name)) as f: + config = json.load(f) + config["safety_checker"] = [None, None] + with open(os.path.join(tmpdirname, sd.config_name), "w") as f: + json.dump(config, f) + + sd = StableDiffusionPipeline.from_pretrained(tmpdirname, requires_safety_checker=False) + sd.save_pretrained(tmpdirname) + sd = StableDiffusionPipeline.from_pretrained(tmpdirname) + + assert sd.config.requires_safety_checker is False + assert sd.config.safety_checker == (None, None) + assert sd.config.feature_extractor == (None, None) + + # Test that loading from deleted model index works + with open(os.path.join(tmpdirname, sd.config_name)) as f: + config = json.load(f) + del config["safety_checker"] + del config["feature_extractor"] + with open(os.path.join(tmpdirname, sd.config_name), "w") as f: + json.dump(config, f) + + sd = StableDiffusionPipeline.from_pretrained(tmpdirname) + + assert sd.config.requires_safety_checker is False + assert sd.config.safety_checker == (None, None) + assert sd.config.feature_extractor == (None, None) + + with tempfile.TemporaryDirectory() as tmpdirname: + sd.save_pretrained(tmpdirname) + + # Test that partially loading works + sd = StableDiffusionPipeline.from_pretrained(tmpdirname, feature_extractor=self.dummy_extractor) + + assert sd.config.requires_safety_checker is False + assert sd.config.safety_checker == (None, None) + assert sd.config.feature_extractor != (None, None) + + # Test that partially loading works + sd = StableDiffusionPipeline.from_pretrained( + tmpdirname, + feature_extractor=self.dummy_extractor, + safety_checker=unet, + requires_safety_checker=[True, True], + ) + + assert sd.config.requires_safety_checker == [True, True] + assert sd.config.safety_checker != (None, None) + assert sd.config.feature_extractor != (None, None) + + with tempfile.TemporaryDirectory() as tmpdirname: + sd.save_pretrained(tmpdirname) + sd = StableDiffusionPipeline.from_pretrained(tmpdirname, feature_extractor=self.dummy_extractor) + + assert sd.config.requires_safety_checker == [True, True] + assert sd.config.safety_checker != (None, None) + assert sd.config.feature_extractor != (None, None) + + def test_name_or_path(self): + model_path = "hf-internal-testing/tiny-stable-diffusion-torch" + sd = DiffusionPipeline.from_pretrained(model_path) + + assert sd.name_or_path == model_path + + with tempfile.TemporaryDirectory() as tmpdirname: + sd.save_pretrained(tmpdirname) + sd = DiffusionPipeline.from_pretrained(tmpdirname) + + assert sd.name_or_path == tmpdirname + + def test_error_no_variant_available(self): + variant = "fp16" + with self.assertRaises(ValueError) as error_context: + _ = StableDiffusionPipeline.from_pretrained( + "hf-internal-testing/diffusers-stable-diffusion-tiny-all", variant=variant + ) + + assert "but no such modeling files are available" in str(error_context.exception) + assert variant in str(error_context.exception) + + def test_pipe_to(self): + unet = self.dummy_cond_unet() + scheduler = PNDMScheduler(skip_prk_steps=True) + vae = self.dummy_vae + bert = self.dummy_text_encoder + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + + sd = StableDiffusionPipeline( + unet=unet, + scheduler=scheduler, + vae=vae, + text_encoder=bert, + tokenizer=tokenizer, + safety_checker=None, + feature_extractor=self.dummy_extractor, + ) + + device_type = torch.device(torch_device).type + + sd1 = sd.to(device_type) + sd2 = sd.to(torch.device(device_type)) + sd3 = sd.to(device_type, torch.float32) + sd4 = sd.to(device=device_type) + sd5 = sd.to(torch_device=device_type) + sd6 = sd.to(device_type, dtype=torch.float32) + sd7 = sd.to(device_type, torch_dtype=torch.float32) + + assert sd1.device.type == device_type + assert sd2.device.type == device_type + assert sd3.device.type == device_type + assert sd4.device.type == device_type + assert sd5.device.type == device_type + assert sd6.device.type == device_type + assert sd7.device.type == device_type + + sd1 = sd.to(torch.float16) + sd2 = sd.to(None, torch.float16) + sd3 = sd.to(dtype=torch.float16) + sd4 = sd.to(dtype=torch.float16) + sd5 = sd.to(None, dtype=torch.float16) + sd6 = sd.to(None, torch_dtype=torch.float16) + + assert sd1.dtype == torch.float16 + assert sd2.dtype == torch.float16 + assert sd3.dtype == torch.float16 + assert sd4.dtype == torch.float16 + assert sd5.dtype == torch.float16 + assert sd6.dtype == torch.float16 + + sd1 = sd.to(device=device_type, dtype=torch.float16) + sd2 = sd.to(torch_device=device_type, torch_dtype=torch.float16) + sd3 = sd.to(device_type, torch.float16) + + assert sd1.dtype == torch.float16 + assert sd2.dtype == torch.float16 + assert sd3.dtype == torch.float16 + + assert sd1.device.type == device_type + assert sd2.device.type == device_type + assert sd3.device.type == device_type + + def test_pipe_same_device_id_offload(self): + unet = self.dummy_cond_unet() + scheduler = PNDMScheduler(skip_prk_steps=True) + vae = self.dummy_vae + bert = self.dummy_text_encoder + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + + sd = StableDiffusionPipeline( + unet=unet, + scheduler=scheduler, + vae=vae, + text_encoder=bert, + tokenizer=tokenizer, + safety_checker=None, + feature_extractor=self.dummy_extractor, + ) + + # `enable_model_cpu_offload` detects device type when not passed + # `enable_model_cpu_offload` raises ValueError if detected device is `cpu` + # This test only checks whether `_offload_gpu_id` is set correctly + # So the device passed can be any supported `torch.device` type + # This allows us to keep the test under `PipelineFastTests` + sd.enable_model_cpu_offload(gpu_id=5, device="cuda") + assert sd._offload_gpu_id == 5 + sd.maybe_free_model_hooks() + assert sd._offload_gpu_id == 5 + + @parameterized.expand([torch.float32, torch.float16]) + @require_hf_hub_version_greater("0.26.5") + @require_transformers_version_greater("4.47.1") + def test_load_dduf_from_hub(self, dtype): + with tempfile.TemporaryDirectory() as tmpdir: + pipe = DiffusionPipeline.from_pretrained( + "DDUF/tiny-flux-dev-pipe-dduf", dduf_file="fluxpipeline.dduf", cache_dir=tmpdir, torch_dtype=dtype + ).to(torch_device) + out_1 = pipe(prompt="dog", num_inference_steps=5, generator=torch.manual_seed(0), output_type="np").images + + pipe.save_pretrained(tmpdir) + loaded_pipe = DiffusionPipeline.from_pretrained(tmpdir, torch_dtype=dtype).to(torch_device) + + out_2 = loaded_pipe( + prompt="dog", num_inference_steps=5, generator=torch.manual_seed(0), output_type="np" + ).images + + self.assertTrue(np.allclose(out_1, out_2, atol=1e-4, rtol=1e-4)) + + @require_hf_hub_version_greater("0.26.5") + @require_transformers_version_greater("4.47.1") + def test_load_dduf_from_hub_local_files_only(self): + with tempfile.TemporaryDirectory() as tmpdir: + pipe = DiffusionPipeline.from_pretrained( + "DDUF/tiny-flux-dev-pipe-dduf", dduf_file="fluxpipeline.dduf", cache_dir=tmpdir + ).to(torch_device) + out_1 = pipe(prompt="dog", num_inference_steps=5, generator=torch.manual_seed(0), output_type="np").images + + local_files_pipe = DiffusionPipeline.from_pretrained( + "DDUF/tiny-flux-dev-pipe-dduf", dduf_file="fluxpipeline.dduf", cache_dir=tmpdir, local_files_only=True + ).to(torch_device) + out_2 = local_files_pipe( + prompt="dog", num_inference_steps=5, generator=torch.manual_seed(0), output_type="np" + ).images + + self.assertTrue(np.allclose(out_1, out_2, atol=1e-4, rtol=1e-4)) + + def test_dduf_raises_error_with_custom_pipeline(self): + with self.assertRaises(NotImplementedError): + _ = DiffusionPipeline.from_pretrained( + "DDUF/tiny-flux-dev-pipe-dduf", dduf_file="fluxpipeline.dduf", custom_pipeline="my_pipeline" + ) + + def test_dduf_raises_error_with_connected_pipeline(self): + with self.assertRaises(NotImplementedError): + _ = DiffusionPipeline.from_pretrained( + "DDUF/tiny-flux-dev-pipe-dduf", dduf_file="fluxpipeline.dduf", load_connected_pipeline=True + ) + + def test_wrong_model(self): + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + with self.assertRaises(ValueError) as error_context: + _ = StableDiffusionPipeline.from_pretrained( + "hf-internal-testing/diffusers-stable-diffusion-tiny-all", text_encoder=tokenizer + ) + + assert "is of type" in str(error_context.exception) + assert "but should be" in str(error_context.exception) + + @require_hf_hub_version_greater("0.26.5") + @require_transformers_version_greater("4.47.1") + def test_dduf_load_sharded_checkpoint_diffusion_model(self): + with tempfile.TemporaryDirectory() as tmpdir: + pipe = DiffusionPipeline.from_pretrained( + "hf-internal-testing/tiny-flux-dev-pipe-sharded-checkpoint-DDUF", + dduf_file="tiny-flux-dev-pipe-sharded-checkpoint.dduf", + cache_dir=tmpdir, + ).to(torch_device) + + out_1 = pipe(prompt="dog", num_inference_steps=5, generator=torch.manual_seed(0), output_type="np").images + + pipe.save_pretrained(tmpdir) + loaded_pipe = DiffusionPipeline.from_pretrained(tmpdir).to(torch_device) + + out_2 = loaded_pipe( + prompt="dog", num_inference_steps=5, generator=torch.manual_seed(0), output_type="np" + ).images + + self.assertTrue(np.allclose(out_1, out_2, atol=1e-4, rtol=1e-4)) + + +@slow +@require_torch_accelerator +class PipelineSlowTests(unittest.TestCase): + def setUp(self): + # clean up the VRAM before each test + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + # clean up the VRAM after each test + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def test_smart_download(self): + model_id = "hf-internal-testing/unet-pipeline-dummy" + with tempfile.TemporaryDirectory() as tmpdirname: + _ = DiffusionPipeline.from_pretrained(model_id, cache_dir=tmpdirname, force_download=True) + local_repo_name = "--".join(["models"] + model_id.split("/")) + snapshot_dir = os.path.join(tmpdirname, local_repo_name, "snapshots") + snapshot_dir = os.path.join(snapshot_dir, os.listdir(snapshot_dir)[0]) + + # inspect all downloaded files to make sure that everything is included + assert os.path.isfile(os.path.join(snapshot_dir, DiffusionPipeline.config_name)) + assert os.path.isfile(os.path.join(snapshot_dir, CONFIG_NAME)) + assert os.path.isfile(os.path.join(snapshot_dir, SCHEDULER_CONFIG_NAME)) + assert os.path.isfile(os.path.join(snapshot_dir, WEIGHTS_NAME)) + assert os.path.isfile(os.path.join(snapshot_dir, "scheduler", SCHEDULER_CONFIG_NAME)) + assert os.path.isfile(os.path.join(snapshot_dir, "unet", WEIGHTS_NAME)) + assert os.path.isfile(os.path.join(snapshot_dir, "unet", WEIGHTS_NAME)) + # let's make sure the super large numpy file: + # https://huggingface.co/hf-internal-testing/unet-pipeline-dummy/blob/main/big_array.npy + # is not downloaded, but all the expected ones + assert not os.path.isfile(os.path.join(snapshot_dir, "big_array.npy")) + + def test_warning_unused_kwargs(self): + model_id = "hf-internal-testing/unet-pipeline-dummy" + logger = logging.get_logger("diffusers.pipelines") + with tempfile.TemporaryDirectory() as tmpdirname: + with CaptureLogger(logger) as cap_logger: + DiffusionPipeline.from_pretrained( + model_id, + not_used=True, + cache_dir=tmpdirname, + force_download=True, + ) + + assert ( + cap_logger.out.strip().split("\n")[-1] + == "Keyword arguments {'not_used': True} are not expected by DDPMPipeline and will be ignored." + ) + + def test_from_save_pretrained(self): + # 1. Load models + model = UNet2DModel( + block_out_channels=(32, 64), + layers_per_block=2, + sample_size=32, + in_channels=3, + out_channels=3, + down_block_types=("DownBlock2D", "AttnDownBlock2D"), + up_block_types=("AttnUpBlock2D", "UpBlock2D"), + ) + scheduler = DDPMScheduler(num_train_timesteps=10) + + ddpm = DDPMPipeline(model, scheduler) + ddpm.to(torch_device) + ddpm.set_progress_bar_config(disable=None) + + with tempfile.TemporaryDirectory() as tmpdirname: + ddpm.save_pretrained(tmpdirname) + new_ddpm = DDPMPipeline.from_pretrained(tmpdirname) + new_ddpm.to(torch_device) + + generator = torch.Generator(device=torch_device).manual_seed(0) + image = ddpm(generator=generator, num_inference_steps=5, output_type="np").images + + generator = torch.Generator(device=torch_device).manual_seed(0) + new_image = new_ddpm(generator=generator, num_inference_steps=5, output_type="np").images + + assert np.abs(image - new_image).max() < 1e-5, "Models don't give the same forward pass" + + @is_torch_compile + @require_torch_2 + @unittest.skipIf( + get_python_version == (3, 12), + reason="Torch Dynamo isn't yet supported for Python 3.12.", + ) + def test_from_save_pretrained_dynamo(self): + torch.compiler.rest() + with torch._inductor.utils.fresh_inductor_cache(): + run_test_in_subprocess(test_case=self, target_func=_test_from_save_pretrained_dynamo, inputs=None) + + def test_from_pretrained_hub(self): + model_path = "google/ddpm-cifar10-32" + + scheduler = DDPMScheduler(num_train_timesteps=10) + + ddpm = DDPMPipeline.from_pretrained(model_path, scheduler=scheduler) + ddpm = ddpm.to(torch_device) + ddpm.set_progress_bar_config(disable=None) + + ddpm_from_hub = DiffusionPipeline.from_pretrained(model_path, scheduler=scheduler) + ddpm_from_hub = ddpm_from_hub.to(torch_device) + ddpm_from_hub.set_progress_bar_config(disable=None) + + generator = torch.Generator(device=torch_device).manual_seed(0) + image = ddpm(generator=generator, num_inference_steps=5, output_type="np").images + + generator = torch.Generator(device=torch_device).manual_seed(0) + new_image = ddpm_from_hub(generator=generator, num_inference_steps=5, output_type="np").images + + assert np.abs(image - new_image).max() < 1e-5, "Models don't give the same forward pass" + + def test_from_pretrained_hub_pass_model(self): + model_path = "google/ddpm-cifar10-32" + + scheduler = DDPMScheduler(num_train_timesteps=10) + + # pass unet into DiffusionPipeline + unet = UNet2DModel.from_pretrained(model_path) + ddpm_from_hub_custom_model = DiffusionPipeline.from_pretrained(model_path, unet=unet, scheduler=scheduler) + ddpm_from_hub_custom_model = ddpm_from_hub_custom_model.to(torch_device) + ddpm_from_hub_custom_model.set_progress_bar_config(disable=None) + + ddpm_from_hub = DiffusionPipeline.from_pretrained(model_path, scheduler=scheduler) + ddpm_from_hub = ddpm_from_hub.to(torch_device) + ddpm_from_hub_custom_model.set_progress_bar_config(disable=None) + + generator = torch.Generator(device=torch_device).manual_seed(0) + image = ddpm_from_hub_custom_model(generator=generator, num_inference_steps=5, output_type="np").images + + generator = torch.Generator(device=torch_device).manual_seed(0) + new_image = ddpm_from_hub(generator=generator, num_inference_steps=5, output_type="np").images + + assert np.abs(image - new_image).max() < 1e-5, "Models don't give the same forward pass" + + def test_output_format(self): + model_path = "google/ddpm-cifar10-32" + + scheduler = DDIMScheduler.from_pretrained(model_path) + pipe = DDIMPipeline.from_pretrained(model_path, scheduler=scheduler) + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + images = pipe(output_type="np").images + assert images.shape == (1, 32, 32, 3) + assert isinstance(images, np.ndarray) + + images = pipe(output_type="pil", num_inference_steps=4).images + assert isinstance(images, list) + assert len(images) == 1 + assert isinstance(images[0], PIL.Image.Image) + + # use PIL by default + images = pipe(num_inference_steps=4).images + assert isinstance(images, list) + assert isinstance(images[0], PIL.Image.Image) + + @require_flax + def test_from_flax_from_pt(self): + pipe_pt = StableDiffusionPipeline.from_pretrained( + "hf-internal-testing/tiny-stable-diffusion-torch", safety_checker=None + ) + pipe_pt.to(torch_device) + + from diffusers import FlaxStableDiffusionPipeline + + with tempfile.TemporaryDirectory() as tmpdirname: + pipe_pt.save_pretrained(tmpdirname) + + pipe_flax, params = FlaxStableDiffusionPipeline.from_pretrained( + tmpdirname, safety_checker=None, from_pt=True + ) + + with tempfile.TemporaryDirectory() as tmpdirname: + pipe_flax.save_pretrained(tmpdirname, params=params) + pipe_pt_2 = StableDiffusionPipeline.from_pretrained(tmpdirname, safety_checker=None, from_flax=True) + pipe_pt_2.to(torch_device) + + prompt = "Hello" + + generator = torch.manual_seed(0) + image_0 = pipe_pt( + [prompt], + generator=generator, + num_inference_steps=2, + output_type="np", + ).images[0] + + generator = torch.manual_seed(0) + image_1 = pipe_pt_2( + [prompt], + generator=generator, + num_inference_steps=2, + output_type="np", + ).images[0] + + assert np.abs(image_0 - image_1).sum() < 1e-5, "Models don't give the same forward pass" + + @require_compel + def test_weighted_prompts_compel(self): + from compel import Compel + + pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4") + pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config) + pipe.enable_model_cpu_offload(device=torch_device) + pipe.enable_attention_slicing() + + compel = Compel(tokenizer=pipe.tokenizer, text_encoder=pipe.text_encoder) + + prompt = "a red cat playing with a ball{}" + + prompts = [prompt.format(s) for s in ["", "++", "--"]] + + prompt_embeds = compel(prompts) + + generator = [torch.Generator(device="cpu").manual_seed(33) for _ in range(prompt_embeds.shape[0])] + + images = pipe( + prompt_embeds=prompt_embeds, generator=generator, num_inference_steps=20, output_type="np" + ).images + + for i, image in enumerate(images): + expected_image = load_numpy( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" + f"/compel/forest_{i}.npy" + ) + + assert np.abs(image - expected_image).max() < 3e-1 + + +@nightly +@require_torch_accelerator +class PipelineNightlyTests(unittest.TestCase): + def setUp(self): + # clean up the VRAM before each test + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + # clean up the VRAM after each test + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def test_ddpm_ddim_equality_batched(self): + seed = 0 + model_id = "google/ddpm-cifar10-32" + + unet = UNet2DModel.from_pretrained(model_id) + ddpm_scheduler = DDPMScheduler() + ddim_scheduler = DDIMScheduler() + + ddpm = DDPMPipeline(unet=unet, scheduler=ddpm_scheduler) + ddpm.to(torch_device) + ddpm.set_progress_bar_config(disable=None) + + ddim = DDIMPipeline(unet=unet, scheduler=ddim_scheduler) + ddim.to(torch_device) + ddim.set_progress_bar_config(disable=None) + + generator = torch.Generator(device=torch_device).manual_seed(seed) + ddpm_images = ddpm(batch_size=2, generator=generator, output_type="np").images + + generator = torch.Generator(device=torch_device).manual_seed(seed) + ddim_images = ddim( + batch_size=2, + generator=generator, + num_inference_steps=1000, + eta=1.0, + output_type="np", + use_clipped_model_output=True, # Need this to make DDIM match DDPM + ).images + + # the values aren't exactly equal, but the images look the same visually + assert np.abs(ddpm_images - ddim_images).max() < 1e-1 + + +@slow +@require_torch_2 +@require_torch_accelerator +@require_peft_backend +@require_peft_version_greater("0.14.0") +@is_torch_compile +class TestLoraHotSwappingForPipeline(unittest.TestCase): + """Test that hotswapping does not result in recompilation in a pipeline. + + We're not extensively testing the hotswapping functionality since it is implemented in PEFT and is extensively + tested there. The goal of this test is specifically to ensure that hotswapping with diffusers does not require + recompilation. + + See + https://github.com/huggingface/peft/blob/eaab05e18d51fb4cce20a73c9acd82a00c013b83/tests/test_gpu_examples.py#L4252 + for the analogous PEFT test. + + """ + + def tearDown(self): + # It is critical that the dynamo cache is reset for each test. Otherwise, if the test re-uses the same model, + # there will be recompilation errors, as torch caches the model when run in the same process. + super().tearDown() + torch.compiler.reset() + gc.collect() + backend_empty_cache(torch_device) + + def get_unet_lora_config(self, lora_rank, lora_alpha, target_modules): + # from diffusers test_models_unet_2d_condition.py + from peft import LoraConfig + + unet_lora_config = LoraConfig( + r=lora_rank, + lora_alpha=lora_alpha, + target_modules=target_modules, + init_lora_weights=False, + use_dora=False, + ) + return unet_lora_config + + def get_lora_state_dicts(self, modules_to_save, adapter_name): + from peft import get_peft_model_state_dict + + state_dicts = {} + for module_name, module in modules_to_save.items(): + if module is not None: + state_dicts[f"{module_name}_lora_layers"] = get_peft_model_state_dict( + module, adapter_name=adapter_name + ) + return state_dicts + + def get_dummy_input(self): + pipeline_inputs = { + "prompt": "A painting of a squirrel eating a burger", + "num_inference_steps": 5, + "guidance_scale": 6.0, + "output_type": "np", + "return_dict": False, + } + return pipeline_inputs + + def check_pipeline_hotswap(self, do_compile, rank0, rank1, target_modules0, target_modules1=None): + """ + Check that hotswapping works on a pipeline. + + Steps: + - create 2 LoRA adapters and save them + - load the first adapter + - hotswap the second adapter + - check that the outputs are correct + - optionally compile the model + + Note: We set rank == alpha here because save_lora_adapter does not save the alpha scalings, thus the test would + fail if the values are different. Since rank != alpha does not matter for the purpose of this test, this is + fine. + """ + # create 2 adapters with different ranks and alphas + dummy_input = self.get_dummy_input() + pipeline = StableDiffusionPipeline.from_pretrained("hf-internal-testing/tiny-sd-pipe").to(torch_device) + alpha0, alpha1 = rank0, rank1 + max_rank = max([rank0, rank1]) + if target_modules1 is None: + target_modules1 = target_modules0[:] + lora_config0 = self.get_unet_lora_config(rank0, alpha0, target_modules0) + lora_config1 = self.get_unet_lora_config(rank1, alpha1, target_modules1) + + torch.manual_seed(0) + pipeline.unet.add_adapter(lora_config0, adapter_name="adapter0") + output0_before = pipeline(**dummy_input, generator=torch.manual_seed(0))[0] + + torch.manual_seed(1) + pipeline.unet.add_adapter(lora_config1, adapter_name="adapter1") + pipeline.unet.set_adapter("adapter1") + output1_before = pipeline(**dummy_input, generator=torch.manual_seed(0))[0] + + # sanity check + tol = 1e-3 + assert not np.allclose(output0_before, output1_before, atol=tol, rtol=tol) + assert not (output0_before == 0).all() + assert not (output1_before == 0).all() + + with tempfile.TemporaryDirectory() as tmp_dirname: + # save the adapter checkpoints + lora0_state_dicts = self.get_lora_state_dicts({"unet": pipeline.unet}, adapter_name="adapter0") + StableDiffusionPipeline.save_lora_weights( + save_directory=os.path.join(tmp_dirname, "adapter0"), safe_serialization=True, **lora0_state_dicts + ) + lora1_state_dicts = self.get_lora_state_dicts({"unet": pipeline.unet}, adapter_name="adapter1") + StableDiffusionPipeline.save_lora_weights( + save_directory=os.path.join(tmp_dirname, "adapter1"), safe_serialization=True, **lora1_state_dicts + ) + del pipeline + + # load the first adapter + pipeline = StableDiffusionPipeline.from_pretrained("hf-internal-testing/tiny-sd-pipe").to(torch_device) + if do_compile or (rank0 != rank1): + # no need to prepare if the model is not compiled or if the ranks are identical + pipeline.enable_lora_hotswap(target_rank=max_rank) + + file_name0 = os.path.join(tmp_dirname, "adapter0", "pytorch_lora_weights.safetensors") + file_name1 = os.path.join(tmp_dirname, "adapter1", "pytorch_lora_weights.safetensors") + + pipeline.load_lora_weights(file_name0) + if do_compile: + pipeline.unet = torch.compile(pipeline.unet, mode="reduce-overhead") + + output0_after = pipeline(**dummy_input, generator=torch.manual_seed(0))[0] + + # sanity check: still same result + assert np.allclose(output0_before, output0_after, atol=tol, rtol=tol) + + # hotswap the 2nd adapter + pipeline.load_lora_weights(file_name1, hotswap=True, adapter_name="default_0") + output1_after = pipeline(**dummy_input, generator=torch.manual_seed(0))[0] + + # sanity check: since it's the same LoRA, the results should be identical + assert np.allclose(output1_before, output1_after, atol=tol, rtol=tol) + + @parameterized.expand([(11, 11), (7, 13), (13, 7)]) # important to test small to large and vice versa + def test_hotswapping_pipeline(self, rank0, rank1): + self.check_pipeline_hotswap( + do_compile=False, rank0=rank0, rank1=rank1, target_modules0=["to_q", "to_k", "to_v", "to_out.0"] + ) + + @parameterized.expand([(11, 11), (7, 13), (13, 7)]) # important to test small to large and vice versa + def test_hotswapping_compiled_pipline_linear(self, rank0, rank1): + # It's important to add this context to raise an error on recompilation + target_modules = ["to_q", "to_k", "to_v", "to_out.0"] + with torch._dynamo.config.patch(error_on_recompile=True), torch._inductor.utils.fresh_inductor_cache(): + self.check_pipeline_hotswap(do_compile=True, rank0=rank0, rank1=rank1, target_modules0=target_modules) + + @parameterized.expand([(11, 11), (7, 13), (13, 7)]) # important to test small to large and vice versa + def test_hotswapping_compiled_pipline_conv2d(self, rank0, rank1): + # It's important to add this context to raise an error on recompilation + target_modules = ["conv", "conv1", "conv2"] + with torch._dynamo.config.patch(error_on_recompile=True), torch._inductor.utils.fresh_inductor_cache(): + self.check_pipeline_hotswap(do_compile=True, rank0=rank0, rank1=rank1, target_modules0=target_modules) + + @parameterized.expand([(11, 11), (7, 13), (13, 7)]) # important to test small to large and vice versa + def test_hotswapping_compiled_pipline_both_linear_and_conv2d(self, rank0, rank1): + # It's important to add this context to raise an error on recompilation + target_modules = ["to_q", "conv"] + with torch._dynamo.config.patch(error_on_recompile=True), torch._inductor.utils.fresh_inductor_cache(): + self.check_pipeline_hotswap(do_compile=True, rank0=rank0, rank1=rank1, target_modules0=target_modules) + + def test_enable_lora_hotswap_called_after_adapter_added_raises(self): + # ensure that enable_lora_hotswap is called before loading the first adapter + lora_config = self.get_unet_lora_config(8, 8, target_modules=["to_q"]) + pipeline = StableDiffusionPipeline.from_pretrained("hf-internal-testing/tiny-sd-pipe").to(torch_device) + pipeline.unet.add_adapter(lora_config) + msg = re.escape("Call `enable_lora_hotswap` before loading the first adapter.") + with self.assertRaisesRegex(RuntimeError, msg): + pipeline.enable_lora_hotswap(target_rank=32) + + def test_enable_lora_hotswap_called_after_adapter_added_warns(self): + # ensure that enable_lora_hotswap is called before loading the first adapter + from diffusers.loaders.peft import logger + + lora_config = self.get_unet_lora_config(8, 8, target_modules=["to_q"]) + pipeline = StableDiffusionPipeline.from_pretrained("hf-internal-testing/tiny-sd-pipe").to(torch_device) + pipeline.unet.add_adapter(lora_config) + msg = ( + "It is recommended to call `enable_lora_hotswap` before loading the first adapter to avoid recompilation." + ) + with self.assertLogs(logger=logger, level="WARNING") as cm: + pipeline.enable_lora_hotswap(target_rank=32, check_compiled="warn") + assert any(msg in log for log in cm.output) + + def test_enable_lora_hotswap_called_after_adapter_added_ignore(self): + # check possibility to ignore the error/warning + lora_config = self.get_unet_lora_config(8, 8, target_modules=["to_q"]) + pipeline = StableDiffusionPipeline.from_pretrained("hf-internal-testing/tiny-sd-pipe").to(torch_device) + pipeline.unet.add_adapter(lora_config) + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") # Capture all warnings + pipeline.enable_lora_hotswap(target_rank=32, check_compiled="warn") + self.assertEqual(len(w), 0, f"Expected no warnings, but got: {[str(warn.message) for warn in w]}") + + def test_enable_lora_hotswap_wrong_check_compiled_argument_raises(self): + # check that wrong argument value raises an error + lora_config = self.get_unet_lora_config(8, 8, target_modules=["to_q"]) + pipeline = StableDiffusionPipeline.from_pretrained("hf-internal-testing/tiny-sd-pipe").to(torch_device) + pipeline.unet.add_adapter(lora_config) + msg = re.escape("check_compiles should be one of 'error', 'warn', or 'ignore', got 'wrong-argument' instead.") + with self.assertRaisesRegex(ValueError, msg): + pipeline.enable_lora_hotswap(target_rank=32, check_compiled="wrong-argument") + + def test_hotswap_second_adapter_targets_more_layers_raises(self): + # check the error and log + from diffusers.loaders.peft import logger + + # at the moment, PEFT requires the 2nd adapter to target the same or a subset of layers + target_modules0 = ["to_q"] + target_modules1 = ["to_q", "to_k"] + with self.assertRaises(RuntimeError): # peft raises RuntimeError + with self.assertLogs(logger=logger, level="ERROR") as cm: + self.check_pipeline_hotswap( + do_compile=True, rank0=8, rank1=8, target_modules0=target_modules0, target_modules1=target_modules1 + ) + assert any("Hotswapping adapter0 was unsuccessful" in log for log in cm.output) + + def test_hotswap_component_not_supported_raises(self): + # right now, not some components don't support hotswapping, e.g. the text_encoder + from peft import LoraConfig + + pipeline = StableDiffusionPipeline.from_pretrained("hf-internal-testing/tiny-sd-pipe").to(torch_device) + lora_config0 = LoraConfig(target_modules=["q_proj"]) + lora_config1 = LoraConfig(target_modules=["q_proj"]) + + pipeline.text_encoder.add_adapter(lora_config0, adapter_name="adapter0") + pipeline.text_encoder.add_adapter(lora_config1, adapter_name="adapter1") + + with tempfile.TemporaryDirectory() as tmp_dirname: + # save the adapter checkpoints + lora0_state_dicts = self.get_lora_state_dicts( + {"text_encoder": pipeline.text_encoder}, adapter_name="adapter0" + ) + StableDiffusionPipeline.save_lora_weights( + save_directory=os.path.join(tmp_dirname, "adapter0"), safe_serialization=True, **lora0_state_dicts + ) + lora1_state_dicts = self.get_lora_state_dicts( + {"text_encoder": pipeline.text_encoder}, adapter_name="adapter1" + ) + StableDiffusionPipeline.save_lora_weights( + save_directory=os.path.join(tmp_dirname, "adapter1"), safe_serialization=True, **lora1_state_dicts + ) + del pipeline + + # load the first adapter + pipeline = StableDiffusionPipeline.from_pretrained("hf-internal-testing/tiny-sd-pipe").to(torch_device) + file_name0 = os.path.join(tmp_dirname, "adapter0", "pytorch_lora_weights.safetensors") + file_name1 = os.path.join(tmp_dirname, "adapter1", "pytorch_lora_weights.safetensors") + + pipeline.load_lora_weights(file_name0) + msg = re.escape( + "At the moment, hotswapping is not supported for text encoders, please pass `hotswap=False`" + ) + with self.assertRaisesRegex(ValueError, msg): + pipeline.load_lora_weights(file_name1, hotswap=True, adapter_name="default_0") diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/test_pipelines_auto.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/test_pipelines_auto.py new file mode 100644 index 0000000000000000000000000000000000000000..f3c639c367f73517a1d9f0c2b051d51d17e961c3 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/test_pipelines_auto.py @@ -0,0 +1,575 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import gc +import os +import shutil +import unittest +from collections import OrderedDict +from pathlib import Path + +import torch +from transformers import CLIPVisionConfig, CLIPVisionModelWithProjection + +from diffusers import ( + AutoPipelineForImage2Image, + AutoPipelineForInpainting, + AutoPipelineForText2Image, + ControlNetModel, + DiffusionPipeline, +) +from diffusers.pipelines.auto_pipeline import ( + AUTO_IMAGE2IMAGE_PIPELINES_MAPPING, + AUTO_INPAINT_PIPELINES_MAPPING, + AUTO_TEXT2IMAGE_PIPELINES_MAPPING, +) + +from ..testing_utils import slow + + +PRETRAINED_MODEL_REPO_MAPPING = OrderedDict( + [ + ("stable-diffusion", "stable-diffusion-v1-5/stable-diffusion-v1-5"), + ("if", "DeepFloyd/IF-I-XL-v1.0"), + ("kandinsky", "kandinsky-community/kandinsky-2-1"), + ("kandinsky22", "kandinsky-community/kandinsky-2-2-decoder"), + ] +) + + +class AutoPipelineFastTest(unittest.TestCase): + @property + def dummy_image_encoder(self): + torch.manual_seed(0) + config = CLIPVisionConfig( + hidden_size=1, + projection_dim=1, + num_hidden_layers=1, + num_attention_heads=1, + image_size=1, + intermediate_size=1, + patch_size=1, + ) + return CLIPVisionModelWithProjection(config) + + def test_from_pipe_consistent(self): + pipe = AutoPipelineForText2Image.from_pretrained( + "hf-internal-testing/tiny-stable-diffusion-pipe", requires_safety_checker=False + ) + original_config = dict(pipe.config) + + pipe = AutoPipelineForImage2Image.from_pipe(pipe) + assert dict(pipe.config) == original_config + + pipe = AutoPipelineForText2Image.from_pipe(pipe) + assert dict(pipe.config) == original_config + + def test_from_pipe_override(self): + pipe = AutoPipelineForText2Image.from_pretrained( + "hf-internal-testing/tiny-stable-diffusion-pipe", requires_safety_checker=False + ) + + pipe = AutoPipelineForImage2Image.from_pipe(pipe, requires_safety_checker=True) + assert pipe.config.requires_safety_checker is True + + pipe = AutoPipelineForText2Image.from_pipe(pipe, requires_safety_checker=True) + assert pipe.config.requires_safety_checker is True + + def test_from_pipe_consistent_sdxl(self): + pipe = AutoPipelineForImage2Image.from_pretrained( + "hf-internal-testing/tiny-stable-diffusion-xl-pipe", + requires_aesthetics_score=True, + force_zeros_for_empty_prompt=False, + ) + + original_config = dict(pipe.config) + + pipe = AutoPipelineForText2Image.from_pipe(pipe) + pipe = AutoPipelineForImage2Image.from_pipe(pipe) + + assert dict(pipe.config) == original_config + + def test_kwargs_local_files_only(self): + repo = "hf-internal-testing/tiny-stable-diffusion-torch" + tmpdirname = DiffusionPipeline.download(repo) + tmpdirname = Path(tmpdirname) + + # edit commit_id to so that it's not the latest commit + commit_id = tmpdirname.name + new_commit_id = commit_id + "hug" + + ref_dir = tmpdirname.parent.parent / "refs/main" + with open(ref_dir, "w") as f: + f.write(new_commit_id) + + new_tmpdirname = tmpdirname.parent / new_commit_id + os.rename(tmpdirname, new_tmpdirname) + + try: + AutoPipelineForText2Image.from_pretrained(repo, local_files_only=True) + except OSError: + assert False, "not able to load local files" + + shutil.rmtree(tmpdirname.parent.parent) + + def test_from_pretrained_text2img(self): + repo = "hf-internal-testing/tiny-stable-diffusion-xl-pipe" + pipe = AutoPipelineForText2Image.from_pretrained(repo) + assert pipe.__class__.__name__ == "StableDiffusionXLPipeline" + + controlnet = ControlNetModel.from_pretrained("hf-internal-testing/tiny-controlnet") + pipe_control = AutoPipelineForText2Image.from_pretrained(repo, controlnet=controlnet) + assert pipe_control.__class__.__name__ == "StableDiffusionXLControlNetPipeline" + + pipe_pag = AutoPipelineForText2Image.from_pretrained(repo, enable_pag=True) + assert pipe_pag.__class__.__name__ == "StableDiffusionXLPAGPipeline" + + pipe_control_pag = AutoPipelineForText2Image.from_pretrained(repo, controlnet=controlnet, enable_pag=True) + assert pipe_control_pag.__class__.__name__ == "StableDiffusionXLControlNetPAGPipeline" + + def test_from_pipe_pag_text2img(self): + # test from StableDiffusionXLPipeline + pipe = AutoPipelineForText2Image.from_pretrained("hf-internal-testing/tiny-stable-diffusion-xl-pipe") + controlnet = ControlNetModel.from_pretrained("hf-internal-testing/tiny-controlnet") + + # - test `enable_pag` flag + pipe_pag = AutoPipelineForText2Image.from_pipe(pipe, enable_pag=True) + assert pipe_pag.__class__.__name__ == "StableDiffusionXLPAGPipeline" + assert "controlnet" not in pipe_pag.components + + pipe = AutoPipelineForText2Image.from_pipe(pipe, enable_pag=False) + assert pipe.__class__.__name__ == "StableDiffusionXLPipeline" + assert "controlnet" not in pipe.components + + # - test `enabe_pag` + `controlnet` flag + pipe_control_pag = AutoPipelineForText2Image.from_pipe(pipe, controlnet=controlnet, enable_pag=True) + assert pipe_control_pag.__class__.__name__ == "StableDiffusionXLControlNetPAGPipeline" + assert "controlnet" in pipe_control_pag.components + + pipe_control = AutoPipelineForText2Image.from_pipe(pipe, controlnet=controlnet, enable_pag=False) + assert pipe_control.__class__.__name__ == "StableDiffusionXLControlNetPipeline" + assert "controlnet" in pipe_control.components + + pipe_pag = AutoPipelineForText2Image.from_pipe(pipe, controlnet=None, enable_pag=True) + assert pipe_pag.__class__.__name__ == "StableDiffusionXLPAGPipeline" + assert "controlnet" not in pipe_pag.components + + pipe = AutoPipelineForText2Image.from_pipe(pipe, controlnet=None, enable_pag=False) + assert pipe.__class__.__name__ == "StableDiffusionXLPipeline" + assert "controlnet" not in pipe.components + + # test from StableDiffusionXLControlNetPipeline + # - test `enable_pag` flag + pipe_control_pag = AutoPipelineForText2Image.from_pipe(pipe_control, enable_pag=True) + assert pipe_control_pag.__class__.__name__ == "StableDiffusionXLControlNetPAGPipeline" + assert "controlnet" in pipe_control_pag.components + + pipe_control = AutoPipelineForText2Image.from_pipe(pipe_control, enable_pag=False) + assert pipe_control.__class__.__name__ == "StableDiffusionXLControlNetPipeline" + assert "controlnet" in pipe_control.components + + # - test `enable_pag` + `controlnet` flag + pipe_control_pag = AutoPipelineForText2Image.from_pipe(pipe_control, controlnet=controlnet, enable_pag=True) + assert pipe_control_pag.__class__.__name__ == "StableDiffusionXLControlNetPAGPipeline" + assert "controlnet" in pipe_control_pag.components + + pipe_control = AutoPipelineForText2Image.from_pipe(pipe_control, controlnet=controlnet, enable_pag=False) + assert pipe_control.__class__.__name__ == "StableDiffusionXLControlNetPipeline" + assert "controlnet" in pipe_control.components + + pipe_pag = AutoPipelineForText2Image.from_pipe(pipe_control, controlnet=None, enable_pag=True) + assert pipe_pag.__class__.__name__ == "StableDiffusionXLPAGPipeline" + assert "controlnet" not in pipe_pag.components + + pipe = AutoPipelineForText2Image.from_pipe(pipe_control, controlnet=None, enable_pag=False) + assert pipe.__class__.__name__ == "StableDiffusionXLPipeline" + assert "controlnet" not in pipe.components + + # test from StableDiffusionXLControlNetPAGPipeline + # - test `enable_pag` flag + pipe_control_pag = AutoPipelineForText2Image.from_pipe(pipe_control_pag, enable_pag=True) + assert pipe_control_pag.__class__.__name__ == "StableDiffusionXLControlNetPAGPipeline" + assert "controlnet" in pipe_control_pag.components + + pipe_control = AutoPipelineForText2Image.from_pipe(pipe_control_pag, enable_pag=False) + assert pipe_control.__class__.__name__ == "StableDiffusionXLControlNetPipeline" + assert "controlnet" in pipe_control.components + + # - test `enable_pag` + `controlnet` flag + pipe_control_pag = AutoPipelineForText2Image.from_pipe( + pipe_control_pag, controlnet=controlnet, enable_pag=True + ) + assert pipe_control_pag.__class__.__name__ == "StableDiffusionXLControlNetPAGPipeline" + assert "controlnet" in pipe_control_pag.components + + pipe_control = AutoPipelineForText2Image.from_pipe(pipe_control_pag, controlnet=controlnet, enable_pag=False) + assert pipe_control.__class__.__name__ == "StableDiffusionXLControlNetPipeline" + assert "controlnet" in pipe_control.components + + pipe_pag = AutoPipelineForText2Image.from_pipe(pipe_control_pag, controlnet=None, enable_pag=True) + assert pipe_pag.__class__.__name__ == "StableDiffusionXLPAGPipeline" + assert "controlnet" not in pipe_pag.components + + pipe = AutoPipelineForText2Image.from_pipe(pipe_control_pag, controlnet=None, enable_pag=False) + assert pipe.__class__.__name__ == "StableDiffusionXLPipeline" + assert "controlnet" not in pipe.components + + pipe = AutoPipelineForText2Image.from_pipe(pipe_control_pag, enable_pag=False) + assert pipe.__class__.__name__ == "StableDiffusionXLControlNetPipeline" + assert "controlnet" in pipe.components + + def test_from_pretrained_img2img(self): + repo = "hf-internal-testing/tiny-stable-diffusion-xl-pipe" + + pipe = AutoPipelineForImage2Image.from_pretrained(repo) + assert pipe.__class__.__name__ == "StableDiffusionXLImg2ImgPipeline" + + controlnet = ControlNetModel.from_pretrained("hf-internal-testing/tiny-controlnet") + pipe_control = AutoPipelineForImage2Image.from_pretrained(repo, controlnet=controlnet) + assert pipe_control.__class__.__name__ == "StableDiffusionXLControlNetImg2ImgPipeline" + + pipe_pag = AutoPipelineForImage2Image.from_pretrained(repo, enable_pag=True) + assert pipe_pag.__class__.__name__ == "StableDiffusionXLPAGImg2ImgPipeline" + + pipe_control_pag = AutoPipelineForImage2Image.from_pretrained(repo, controlnet=controlnet, enable_pag=True) + assert pipe_control_pag.__class__.__name__ == "StableDiffusionXLControlNetPAGImg2ImgPipeline" + + def test_from_pretrained_img2img_refiner(self): + repo = "hf-internal-testing/tiny-stable-diffusion-xl-refiner-pipe" + + pipe = AutoPipelineForImage2Image.from_pretrained(repo) + assert pipe.__class__.__name__ == "StableDiffusionXLImg2ImgPipeline" + + controlnet = ControlNetModel.from_pretrained("hf-internal-testing/tiny-controlnet") + pipe_control = AutoPipelineForImage2Image.from_pretrained(repo, controlnet=controlnet) + assert pipe_control.__class__.__name__ == "StableDiffusionXLControlNetImg2ImgPipeline" + + pipe_pag = AutoPipelineForImage2Image.from_pretrained(repo, enable_pag=True) + assert pipe_pag.__class__.__name__ == "StableDiffusionXLPAGImg2ImgPipeline" + + pipe_control_pag = AutoPipelineForImage2Image.from_pretrained(repo, controlnet=controlnet, enable_pag=True) + assert pipe_control_pag.__class__.__name__ == "StableDiffusionXLControlNetPAGImg2ImgPipeline" + + def test_from_pipe_pag_img2img(self): + # test from tableDiffusionXLPAGImg2ImgPipeline + pipe = AutoPipelineForImage2Image.from_pretrained("hf-internal-testing/tiny-stable-diffusion-xl-pipe") + # - test `enable_pag` flag + pipe_pag = AutoPipelineForImage2Image.from_pipe(pipe, enable_pag=True) + assert pipe_pag.__class__.__name__ == "StableDiffusionXLPAGImg2ImgPipeline" + + pipe = AutoPipelineForImage2Image.from_pipe(pipe, enable_pag=False) + assert pipe.__class__.__name__ == "StableDiffusionXLImg2ImgPipeline" + + # testing from StableDiffusionXLPAGImg2ImgPipeline + # - test `enable_pag` flag + pipe_pag = AutoPipelineForImage2Image.from_pipe(pipe_pag, enable_pag=True) + assert pipe_pag.__class__.__name__ == "StableDiffusionXLPAGImg2ImgPipeline" + + pipe = AutoPipelineForImage2Image.from_pipe(pipe_pag, enable_pag=False) + assert pipe.__class__.__name__ == "StableDiffusionXLImg2ImgPipeline" + + def test_from_pretrained_inpaint(self): + repo = "hf-internal-testing/tiny-stable-diffusion-xl-pipe" + + pipe = AutoPipelineForInpainting.from_pretrained(repo) + assert pipe.__class__.__name__ == "StableDiffusionXLInpaintPipeline" + + pipe_pag = AutoPipelineForInpainting.from_pretrained(repo, enable_pag=True) + assert pipe_pag.__class__.__name__ == "StableDiffusionXLPAGInpaintPipeline" + + def test_from_pretrained_inpaint_from_inpaint(self): + repo = "hf-internal-testing/tiny-stable-diffusion-xl-inpaint-pipe" + + pipe = AutoPipelineForInpainting.from_pretrained(repo) + assert pipe.__class__.__name__ == "StableDiffusionXLInpaintPipeline" + + # make sure you can use pag with inpaint-specific pipeline + pipe = AutoPipelineForInpainting.from_pretrained(repo, enable_pag=True) + assert pipe.__class__.__name__ == "StableDiffusionXLPAGInpaintPipeline" + + def test_from_pipe_pag_inpaint(self): + # test from tableDiffusionXLPAGInpaintPipeline + pipe = AutoPipelineForInpainting.from_pretrained("hf-internal-testing/tiny-stable-diffusion-xl-pipe") + # - test `enable_pag` flag + pipe_pag = AutoPipelineForInpainting.from_pipe(pipe, enable_pag=True) + assert pipe_pag.__class__.__name__ == "StableDiffusionXLPAGInpaintPipeline" + + pipe = AutoPipelineForInpainting.from_pipe(pipe, enable_pag=False) + assert pipe.__class__.__name__ == "StableDiffusionXLInpaintPipeline" + + # testing from StableDiffusionXLPAGInpaintPipeline + # - test `enable_pag` flag + pipe_pag = AutoPipelineForInpainting.from_pipe(pipe_pag, enable_pag=True) + assert pipe_pag.__class__.__name__ == "StableDiffusionXLPAGInpaintPipeline" + + pipe = AutoPipelineForInpainting.from_pipe(pipe_pag, enable_pag=False) + assert pipe.__class__.__name__ == "StableDiffusionXLInpaintPipeline" + + def test_from_pipe_pag_new_task(self): + # for from_pipe_new_task we only need to make sure it can map to the same pipeline from a different task, + # i.e. no need to test `enable_pag` + `controlnet` flag because it is already tested in `test_from_pipe_pag_text2img` and `test_from_pipe_pag_inpaint`etc + pipe_pag_text2img = AutoPipelineForText2Image.from_pretrained( + "hf-internal-testing/tiny-stable-diffusion-xl-pipe", enable_pag=True + ) + + # text2img pag -> inpaint pag + pipe_pag_inpaint = AutoPipelineForInpainting.from_pipe(pipe_pag_text2img) + assert pipe_pag_inpaint.__class__.__name__ == "StableDiffusionXLPAGInpaintPipeline" + # text2img pag -> img2img pag + pipe_pag_img2img = AutoPipelineForImage2Image.from_pipe(pipe_pag_text2img) + assert pipe_pag_img2img.__class__.__name__ == "StableDiffusionXLPAGImg2ImgPipeline" + + # inpaint pag -> text2img pag + pipe_pag_text2img = AutoPipelineForText2Image.from_pipe(pipe_pag_inpaint) + assert pipe_pag_text2img.__class__.__name__ == "StableDiffusionXLPAGPipeline" + # inpaint pag -> img2img pag + pipe_pag_img2img = AutoPipelineForImage2Image.from_pipe(pipe_pag_inpaint) + assert pipe_pag_img2img.__class__.__name__ == "StableDiffusionXLPAGImg2ImgPipeline" + + # img2img pag -> text2img pag + pipe_pag_text2img = AutoPipelineForText2Image.from_pipe(pipe_pag_img2img) + assert pipe_pag_text2img.__class__.__name__ == "StableDiffusionXLPAGPipeline" + # img2img pag -> inpaint pag + pipe_pag_inpaint = AutoPipelineForInpainting.from_pipe(pipe_pag_img2img) + assert pipe_pag_inpaint.__class__.__name__ == "StableDiffusionXLPAGInpaintPipeline" + + def test_from_pipe_controlnet_text2img(self): + pipe = AutoPipelineForText2Image.from_pretrained("hf-internal-testing/tiny-stable-diffusion-pipe") + controlnet = ControlNetModel.from_pretrained("hf-internal-testing/tiny-controlnet") + + pipe = AutoPipelineForText2Image.from_pipe(pipe, controlnet=controlnet) + assert pipe.__class__.__name__ == "StableDiffusionControlNetPipeline" + assert "controlnet" in pipe.components + + pipe = AutoPipelineForText2Image.from_pipe(pipe, controlnet=None) + assert pipe.__class__.__name__ == "StableDiffusionPipeline" + assert "controlnet" not in pipe.components + + def test_from_pipe_controlnet_img2img(self): + pipe = AutoPipelineForImage2Image.from_pretrained("hf-internal-testing/tiny-stable-diffusion-pipe") + controlnet = ControlNetModel.from_pretrained("hf-internal-testing/tiny-controlnet") + + pipe = AutoPipelineForImage2Image.from_pipe(pipe, controlnet=controlnet) + assert pipe.__class__.__name__ == "StableDiffusionControlNetImg2ImgPipeline" + assert "controlnet" in pipe.components + + pipe = AutoPipelineForImage2Image.from_pipe(pipe, controlnet=None) + assert pipe.__class__.__name__ == "StableDiffusionImg2ImgPipeline" + assert "controlnet" not in pipe.components + + def test_from_pipe_controlnet_inpaint(self): + pipe = AutoPipelineForInpainting.from_pretrained("hf-internal-testing/tiny-stable-diffusion-torch") + controlnet = ControlNetModel.from_pretrained("hf-internal-testing/tiny-controlnet") + + pipe = AutoPipelineForInpainting.from_pipe(pipe, controlnet=controlnet) + assert pipe.__class__.__name__ == "StableDiffusionControlNetInpaintPipeline" + assert "controlnet" in pipe.components + + pipe = AutoPipelineForInpainting.from_pipe(pipe, controlnet=None) + assert pipe.__class__.__name__ == "StableDiffusionInpaintPipeline" + assert "controlnet" not in pipe.components + + def test_from_pipe_controlnet_new_task(self): + pipe_text2img = AutoPipelineForText2Image.from_pretrained("hf-internal-testing/tiny-stable-diffusion-torch") + controlnet = ControlNetModel.from_pretrained("hf-internal-testing/tiny-controlnet") + + pipe_control_img2img = AutoPipelineForImage2Image.from_pipe(pipe_text2img, controlnet=controlnet) + assert pipe_control_img2img.__class__.__name__ == "StableDiffusionControlNetImg2ImgPipeline" + assert "controlnet" in pipe_control_img2img.components + + pipe_inpaint = AutoPipelineForInpainting.from_pipe(pipe_control_img2img, controlnet=None) + assert pipe_inpaint.__class__.__name__ == "StableDiffusionInpaintPipeline" + assert "controlnet" not in pipe_inpaint.components + + # testing `from_pipe` for text2img controlnet + ## 1. from a different controlnet pipe, without controlnet argument + pipe_control_text2img = AutoPipelineForText2Image.from_pipe(pipe_control_img2img) + assert pipe_control_text2img.__class__.__name__ == "StableDiffusionControlNetPipeline" + assert "controlnet" in pipe_control_text2img.components + + ## 2. from a different controlnet pipe, with controlnet argument + pipe_control_text2img = AutoPipelineForText2Image.from_pipe(pipe_control_img2img, controlnet=controlnet) + assert pipe_control_text2img.__class__.__name__ == "StableDiffusionControlNetPipeline" + assert "controlnet" in pipe_control_text2img.components + + ## 3. from same controlnet pipeline class, with a different controlnet component + pipe_control_text2img = AutoPipelineForText2Image.from_pipe(pipe_control_text2img, controlnet=controlnet) + assert pipe_control_text2img.__class__.__name__ == "StableDiffusionControlNetPipeline" + assert "controlnet" in pipe_control_text2img.components + + # testing from_pipe for inpainting + ## 1. from a different controlnet pipeline class + pipe_control_inpaint = AutoPipelineForInpainting.from_pipe(pipe_control_img2img) + assert pipe_control_inpaint.__class__.__name__ == "StableDiffusionControlNetInpaintPipeline" + assert "controlnet" in pipe_control_inpaint.components + + ## from a different controlnet pipe, with a different controlnet + pipe_control_inpaint = AutoPipelineForInpainting.from_pipe(pipe_control_img2img, controlnet=controlnet) + assert pipe_control_inpaint.__class__.__name__ == "StableDiffusionControlNetInpaintPipeline" + assert "controlnet" in pipe_control_inpaint.components + + ## from same controlnet pipe, with a different controlnet + pipe_control_inpaint = AutoPipelineForInpainting.from_pipe(pipe_control_inpaint, controlnet=controlnet) + assert pipe_control_inpaint.__class__.__name__ == "StableDiffusionControlNetInpaintPipeline" + assert "controlnet" in pipe_control_inpaint.components + + # testing from_pipe from img2img controlnet + ## from a different controlnet pipe, without controlnet argument + pipe_control_img2img = AutoPipelineForImage2Image.from_pipe(pipe_control_text2img) + assert pipe_control_img2img.__class__.__name__ == "StableDiffusionControlNetImg2ImgPipeline" + assert "controlnet" in pipe_control_img2img.components + + # from a different controlnet pipe, with a different controlnet component + pipe_control_img2img = AutoPipelineForImage2Image.from_pipe(pipe_control_text2img, controlnet=controlnet) + assert pipe_control_img2img.__class__.__name__ == "StableDiffusionControlNetImg2ImgPipeline" + assert "controlnet" in pipe_control_img2img.components + + # from same controlnet pipeline class, with a different controlnet + pipe_control_img2img = AutoPipelineForImage2Image.from_pipe(pipe_control_img2img, controlnet=controlnet) + assert pipe_control_img2img.__class__.__name__ == "StableDiffusionControlNetImg2ImgPipeline" + assert "controlnet" in pipe_control_img2img.components + + def test_from_pipe_optional_components(self): + image_encoder = self.dummy_image_encoder + + pipe = AutoPipelineForText2Image.from_pretrained( + "hf-internal-testing/tiny-stable-diffusion-pipe", + image_encoder=image_encoder, + ) + + pipe = AutoPipelineForImage2Image.from_pipe(pipe) + assert pipe.image_encoder is not None + + pipe = AutoPipelineForText2Image.from_pipe(pipe, image_encoder=None) + assert pipe.image_encoder is None + + +@slow +class AutoPipelineIntegrationTest(unittest.TestCase): + def test_pipe_auto(self): + for model_name, model_repo in PRETRAINED_MODEL_REPO_MAPPING.items(): + # test txt2img + pipe_txt2img = AutoPipelineForText2Image.from_pretrained( + model_repo, variant="fp16", torch_dtype=torch.float16 + ) + self.assertIsInstance(pipe_txt2img, AUTO_TEXT2IMAGE_PIPELINES_MAPPING[model_name]) + + pipe_to = AutoPipelineForText2Image.from_pipe(pipe_txt2img) + self.assertIsInstance(pipe_to, AUTO_TEXT2IMAGE_PIPELINES_MAPPING[model_name]) + + pipe_to = AutoPipelineForImage2Image.from_pipe(pipe_txt2img) + self.assertIsInstance(pipe_to, AUTO_IMAGE2IMAGE_PIPELINES_MAPPING[model_name]) + + if "kandinsky" not in model_name: + pipe_to = AutoPipelineForInpainting.from_pipe(pipe_txt2img) + self.assertIsInstance(pipe_to, AUTO_INPAINT_PIPELINES_MAPPING[model_name]) + + del pipe_txt2img, pipe_to + gc.collect() + + # test img2img + + pipe_img2img = AutoPipelineForImage2Image.from_pretrained( + model_repo, variant="fp16", torch_dtype=torch.float16 + ) + self.assertIsInstance(pipe_img2img, AUTO_IMAGE2IMAGE_PIPELINES_MAPPING[model_name]) + + pipe_to = AutoPipelineForText2Image.from_pipe(pipe_img2img) + self.assertIsInstance(pipe_to, AUTO_TEXT2IMAGE_PIPELINES_MAPPING[model_name]) + + pipe_to = AutoPipelineForImage2Image.from_pipe(pipe_img2img) + self.assertIsInstance(pipe_to, AUTO_IMAGE2IMAGE_PIPELINES_MAPPING[model_name]) + + if "kandinsky" not in model_name: + pipe_to = AutoPipelineForInpainting.from_pipe(pipe_img2img) + self.assertIsInstance(pipe_to, AUTO_INPAINT_PIPELINES_MAPPING[model_name]) + + del pipe_img2img, pipe_to + gc.collect() + + # test inpaint + + if "kandinsky" not in model_name: + pipe_inpaint = AutoPipelineForInpainting.from_pretrained( + model_repo, variant="fp16", torch_dtype=torch.float16 + ) + self.assertIsInstance(pipe_inpaint, AUTO_INPAINT_PIPELINES_MAPPING[model_name]) + + pipe_to = AutoPipelineForText2Image.from_pipe(pipe_inpaint) + self.assertIsInstance(pipe_to, AUTO_TEXT2IMAGE_PIPELINES_MAPPING[model_name]) + + pipe_to = AutoPipelineForImage2Image.from_pipe(pipe_inpaint) + self.assertIsInstance(pipe_to, AUTO_IMAGE2IMAGE_PIPELINES_MAPPING[model_name]) + + pipe_to = AutoPipelineForInpainting.from_pipe(pipe_inpaint) + self.assertIsInstance(pipe_to, AUTO_INPAINT_PIPELINES_MAPPING[model_name]) + + del pipe_inpaint, pipe_to + gc.collect() + + def test_from_pipe_consistent(self): + for model_name, model_repo in PRETRAINED_MODEL_REPO_MAPPING.items(): + if model_name in ["kandinsky", "kandinsky22"]: + auto_pipes = [AutoPipelineForText2Image, AutoPipelineForImage2Image] + else: + auto_pipes = [AutoPipelineForText2Image, AutoPipelineForImage2Image, AutoPipelineForInpainting] + + # test from_pretrained + for pipe_from_class in auto_pipes: + pipe_from = pipe_from_class.from_pretrained(model_repo, variant="fp16", torch_dtype=torch.float16) + pipe_from_config = dict(pipe_from.config) + + for pipe_to_class in auto_pipes: + pipe_to = pipe_to_class.from_pipe(pipe_from) + self.assertEqual(dict(pipe_to.config), pipe_from_config) + + del pipe_from, pipe_to + gc.collect() + + def test_controlnet(self): + # test from_pretrained + model_repo = "stable-diffusion-v1-5/stable-diffusion-v1-5" + controlnet_repo = "lllyasviel/sd-controlnet-canny" + + controlnet = ControlNetModel.from_pretrained(controlnet_repo, torch_dtype=torch.float16) + + pipe_txt2img = AutoPipelineForText2Image.from_pretrained( + model_repo, controlnet=controlnet, torch_dtype=torch.float16 + ) + self.assertIsInstance(pipe_txt2img, AUTO_TEXT2IMAGE_PIPELINES_MAPPING["stable-diffusion-controlnet"]) + + pipe_img2img = AutoPipelineForImage2Image.from_pretrained( + model_repo, controlnet=controlnet, torch_dtype=torch.float16 + ) + self.assertIsInstance(pipe_img2img, AUTO_IMAGE2IMAGE_PIPELINES_MAPPING["stable-diffusion-controlnet"]) + + pipe_inpaint = AutoPipelineForInpainting.from_pretrained( + model_repo, controlnet=controlnet, torch_dtype=torch.float16 + ) + self.assertIsInstance(pipe_inpaint, AUTO_INPAINT_PIPELINES_MAPPING["stable-diffusion-controlnet"]) + + # test from_pipe + for pipe_from in [pipe_txt2img, pipe_img2img, pipe_inpaint]: + pipe_to = AutoPipelineForText2Image.from_pipe(pipe_from) + self.assertIsInstance(pipe_to, AUTO_TEXT2IMAGE_PIPELINES_MAPPING["stable-diffusion-controlnet"]) + self.assertEqual(dict(pipe_to.config), dict(pipe_txt2img.config)) + + pipe_to = AutoPipelineForImage2Image.from_pipe(pipe_from) + self.assertIsInstance(pipe_to, AUTO_IMAGE2IMAGE_PIPELINES_MAPPING["stable-diffusion-controlnet"]) + self.assertEqual(dict(pipe_to.config), dict(pipe_img2img.config)) + + pipe_to = AutoPipelineForInpainting.from_pipe(pipe_from) + self.assertIsInstance(pipe_to, AUTO_INPAINT_PIPELINES_MAPPING["stable-diffusion-controlnet"]) + self.assertEqual(dict(pipe_to.config), dict(pipe_inpaint.config)) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/test_pipelines_combined.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/test_pipelines_combined.py new file mode 100644 index 0000000000000000000000000000000000000000..fffc053bae3f9466fab4f9938df9cccd58c2b66f --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/test_pipelines_combined.py @@ -0,0 +1,128 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import torch +from huggingface_hub import ModelCard + +from diffusers import ( + DDPMScheduler, + DiffusionPipeline, + KandinskyV22CombinedPipeline, + KandinskyV22Pipeline, + KandinskyV22PriorPipeline, +) +from diffusers.pipelines.pipeline_utils import CONNECTED_PIPES_KEYS + + +def state_dicts_almost_equal(sd1, sd2): + sd1 = dict(sorted(sd1.items())) + sd2 = dict(sorted(sd2.items())) + + models_are_equal = True + for ten1, ten2 in zip(sd1.values(), sd2.values()): + if (ten1 - ten2).abs().sum() > 1e-3: + models_are_equal = False + + return models_are_equal + + +class CombinedPipelineFastTest(unittest.TestCase): + def modelcard_has_connected_pipeline(self, model_id): + modelcard = ModelCard.load(model_id) + connected_pipes = {prefix: getattr(modelcard.data, prefix, [None])[0] for prefix in CONNECTED_PIPES_KEYS} + connected_pipes = {k: v for k, v in connected_pipes.items() if v is not None} + + return len(connected_pipes) > 0 + + def test_correct_modelcard_format(self): + # hf-internal-testing/tiny-random-kandinsky-v22-prior has no metadata + assert not self.modelcard_has_connected_pipeline("hf-internal-testing/tiny-random-kandinsky-v22-prior") + + # see https://huggingface.co/hf-internal-testing/tiny-random-kandinsky-v22-decoder/blob/8baff9897c6be017013e21b5c562e5a381646c7e/README.md?code=true#L2 + assert self.modelcard_has_connected_pipeline("hf-internal-testing/tiny-random-kandinsky-v22-decoder") + + def test_load_connected_checkpoint_when_specified(self): + pipeline_prior = DiffusionPipeline.from_pretrained("hf-internal-testing/tiny-random-kandinsky-v22-prior") + pipeline_prior_connected = DiffusionPipeline.from_pretrained( + "hf-internal-testing/tiny-random-kandinsky-v22-prior", load_connected_pipeline=True + ) + + # Passing `load_connected_pipeline` to prior is a no-op as the pipeline has no connected pipeline + assert pipeline_prior.__class__ == pipeline_prior_connected.__class__ + + pipeline = DiffusionPipeline.from_pretrained("hf-internal-testing/tiny-random-kandinsky-v22-decoder") + pipeline_connected = DiffusionPipeline.from_pretrained( + "hf-internal-testing/tiny-random-kandinsky-v22-decoder", load_connected_pipeline=True + ) + + # Passing `load_connected_pipeline` to decoder loads the combined pipeline + assert pipeline.__class__ != pipeline_connected.__class__ + assert pipeline.__class__ == KandinskyV22Pipeline + assert pipeline_connected.__class__ == KandinskyV22CombinedPipeline + + # check that loaded components match prior and decoder components + assert set(pipeline_connected.components.keys()) == set( + ["prior_" + k for k in pipeline_prior.components.keys()] + list(pipeline.components.keys()) + ) + + def test_load_connected_checkpoint_default(self): + prior = KandinskyV22PriorPipeline.from_pretrained("hf-internal-testing/tiny-random-kandinsky-v22-prior") + decoder = KandinskyV22Pipeline.from_pretrained("hf-internal-testing/tiny-random-kandinsky-v22-decoder") + + # check that combined pipeline loads both prior & decoder because of + # https://huggingface.co/hf-internal-testing/tiny-random-kandinsky-v22-decoder/blob/8baff9897c6be017013e21b5c562e5a381646c7e/README.md?code=true#L3 + assert ( + KandinskyV22CombinedPipeline._load_connected_pipes + ) # combined pipelines will download more checkpoints that just the one specified + pipeline = KandinskyV22CombinedPipeline.from_pretrained( + "hf-internal-testing/tiny-random-kandinsky-v22-decoder" + ) + + prior_comps = prior.components + decoder_comps = decoder.components + for k, component in pipeline.components.items(): + if k.startswith("prior_"): + k = k[6:] + comp = prior_comps[k] + else: + comp = decoder_comps[k] + + if isinstance(component, torch.nn.Module): + assert state_dicts_almost_equal(component.state_dict(), comp.state_dict()) + elif hasattr(component, "config"): + assert dict(component.config) == dict(comp.config) + else: + assert component.__class__ == comp.__class__ + + def test_load_connected_checkpoint_with_passed_obj(self): + pipeline = KandinskyV22CombinedPipeline.from_pretrained( + "hf-internal-testing/tiny-random-kandinsky-v22-decoder" + ) + prior_scheduler = DDPMScheduler.from_config(pipeline.prior_scheduler.config) + scheduler = DDPMScheduler.from_config(pipeline.scheduler.config) + + # make sure we pass a different scheduler and prior_scheduler + assert pipeline.prior_scheduler.__class__ != prior_scheduler.__class__ + assert pipeline.scheduler.__class__ != scheduler.__class__ + + pipeline_new = KandinskyV22CombinedPipeline.from_pretrained( + "hf-internal-testing/tiny-random-kandinsky-v22-decoder", + prior_scheduler=prior_scheduler, + scheduler=scheduler, + ) + assert dict(pipeline_new.prior_scheduler.config) == dict(prior_scheduler.config) + assert dict(pipeline_new.scheduler.config) == dict(scheduler.config) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/test_pipelines_common.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/test_pipelines_common.py new file mode 100644 index 0000000000000000000000000000000000000000..dcef33897e6a4875e36440c7ffea3c6b5244ff9f --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/test_pipelines_common.py @@ -0,0 +1,2853 @@ +import gc +import inspect +import json +import os +import tempfile +import unittest +import uuid +from typing import Any, Callable, Dict, Union + +import numpy as np +import PIL.Image +import torch +import torch.nn as nn +from huggingface_hub import ModelCard, delete_repo +from huggingface_hub.utils import is_jinja_available +from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer + +import diffusers +from diffusers import ( + AsymmetricAutoencoderKL, + AutoencoderKL, + AutoencoderTiny, + ConsistencyDecoderVAE, + DDIMScheduler, + DiffusionPipeline, + FasterCacheConfig, + KolorsPipeline, + PyramidAttentionBroadcastConfig, + StableDiffusionPipeline, + StableDiffusionXLPipeline, + UNet2DConditionModel, + apply_faster_cache, +) +from diffusers.hooks import apply_group_offloading +from diffusers.hooks.faster_cache import FasterCacheBlockHook, FasterCacheDenoiserHook +from diffusers.hooks.first_block_cache import FirstBlockCacheConfig +from diffusers.hooks.pyramid_attention_broadcast import PyramidAttentionBroadcastHook +from diffusers.image_processor import VaeImageProcessor +from diffusers.loaders import FluxIPAdapterMixin, IPAdapterMixin +from diffusers.models.attention import AttentionModuleMixin +from diffusers.models.attention_processor import AttnProcessor +from diffusers.models.controlnets.controlnet_xs import UNetControlNetXSModel +from diffusers.models.unets.unet_3d_condition import UNet3DConditionModel +from diffusers.models.unets.unet_i2vgen_xl import I2VGenXLUNet +from diffusers.models.unets.unet_motion_model import UNetMotionModel +from diffusers.pipelines.pipeline_utils import StableDiffusionMixin +from diffusers.schedulers import KarrasDiffusionSchedulers +from diffusers.utils import logging +from diffusers.utils.import_utils import is_xformers_available +from diffusers.utils.source_code_parsing_utils import ReturnNameVisitor + +from ..models.autoencoders.vae import ( + get_asym_autoencoder_kl_config, + get_autoencoder_kl_config, + get_autoencoder_tiny_config, + get_consistency_vae_config, +) +from ..models.transformers.test_models_transformer_flux import create_flux_ip_adapter_state_dict +from ..models.unets.test_models_unet_2d_condition import ( + create_ip_adapter_faceid_state_dict, + create_ip_adapter_state_dict, +) +from ..others.test_utils import TOKEN, USER, is_staging_test +from ..testing_utils import ( + CaptureLogger, + backend_empty_cache, + numpy_cosine_similarity_distance, + require_accelerate_version_greater, + require_accelerator, + require_hf_hub_version_greater, + require_torch, + require_torch_accelerator, + require_transformers_version_greater, + skip_mps, + torch_device, +) + + +def to_np(tensor): + if isinstance(tensor, torch.Tensor): + tensor = tensor.detach().cpu().numpy() + + return tensor + + +def check_same_shape(tensor_list): + shapes = [tensor.shape for tensor in tensor_list] + return all(shape == shapes[0] for shape in shapes[1:]) + + +def check_qkv_fusion_matches_attn_procs_length(model, original_attn_processors): + current_attn_processors = model.attn_processors + return len(current_attn_processors) == len(original_attn_processors) + + +def check_qkv_fusion_processors_exist(model): + current_attn_processors = model.attn_processors + proc_names = [v.__class__.__name__ for _, v in current_attn_processors.items()] + return all(p.startswith("Fused") for p in proc_names) + + +def check_qkv_fused_layers_exist(model, layer_names): + is_fused_submodules = [] + for submodule in model.modules(): + if not isinstance(submodule, AttentionModuleMixin): + continue + is_fused_attribute_set = submodule.fused_projections + is_fused_layer = True + for layer in layer_names: + is_fused_layer = is_fused_layer and getattr(submodule, layer, None) is not None + is_fused = is_fused_attribute_set and is_fused_layer + is_fused_submodules.append(is_fused) + return all(is_fused_submodules) + + +class SDFunctionTesterMixin: + """ + This mixin is designed to be used with PipelineTesterMixin and unittest.TestCase classes. + It provides a set of common tests for PyTorch pipeline that inherit from StableDiffusionMixin, e.g. vae_slicing, vae_tiling, freeu, etc. + """ + + def test_vae_slicing(self, image_count=4): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + # components["scheduler"] = LMSDiscreteScheduler.from_config(components["scheduler"].config) + pipe = self.pipeline_class(**components) + pipe = pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + inputs["prompt"] = [inputs["prompt"]] * image_count + if "image" in inputs: # fix batch size mismatch in I2V_Gen pipeline + inputs["image"] = [inputs["image"]] * image_count + output_1 = pipe(**inputs) + + # make sure sliced vae decode yields the same result + pipe.enable_vae_slicing() + inputs = self.get_dummy_inputs(device) + inputs["prompt"] = [inputs["prompt"]] * image_count + if "image" in inputs: + inputs["image"] = [inputs["image"]] * image_count + inputs["return_dict"] = False + output_2 = pipe(**inputs) + + assert np.abs(output_2[0].flatten() - output_1[0].flatten()).max() < 1e-2 + + def test_vae_tiling(self): + components = self.get_dummy_components() + + # make sure here that pndm scheduler skips prk + if "safety_checker" in components: + components["safety_checker"] = None + pipe = self.pipeline_class(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(torch_device) + inputs["return_dict"] = False + + # Test that tiled decode at 512x512 yields the same result as the non-tiled decode + output_1 = pipe(**inputs)[0] + + # make sure tiled vae decode yields the same result + pipe.enable_vae_tiling() + inputs = self.get_dummy_inputs(torch_device) + inputs["return_dict"] = False + output_2 = pipe(**inputs)[0] + + assert np.abs(to_np(output_2) - to_np(output_1)).max() < 5e-1 + + # test that tiled decode works with various shapes + shapes = [(1, 4, 73, 97), (1, 4, 65, 49)] + with torch.no_grad(): + for shape in shapes: + zeros = torch.zeros(shape).to(torch_device) + pipe.vae.decode(zeros) + + # MPS currently doesn't support ComplexFloats, which are required for FreeU - see https://github.com/huggingface/diffusers/issues/7569. + @skip_mps + def test_freeu(self): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + # Normal inference + inputs = self.get_dummy_inputs(torch_device) + inputs["return_dict"] = False + inputs["output_type"] = "np" + output = pipe(**inputs)[0] + + # FreeU-enabled inference + pipe.enable_freeu(s1=0.9, s2=0.2, b1=1.2, b2=1.4) + inputs = self.get_dummy_inputs(torch_device) + inputs["return_dict"] = False + inputs["output_type"] = "np" + output_freeu = pipe(**inputs)[0] + + # FreeU-disabled inference + pipe.disable_freeu() + freeu_keys = {"s1", "s2", "b1", "b2"} + for upsample_block in pipe.unet.up_blocks: + for key in freeu_keys: + assert getattr(upsample_block, key) is None, f"Disabling of FreeU should have set {key} to None." + + inputs = self.get_dummy_inputs(torch_device) + inputs["return_dict"] = False + inputs["output_type"] = "np" + output_no_freeu = pipe(**inputs)[0] + + assert not np.allclose(output[0, -3:, -3:, -1], output_freeu[0, -3:, -3:, -1]), ( + "Enabling of FreeU should lead to different results." + ) + assert np.allclose(output, output_no_freeu, atol=1e-2), ( + f"Disabling of FreeU should lead to results similar to the default pipeline results but Max Abs Error={np.abs(output_no_freeu - output).max()}." + ) + + def test_fused_qkv_projections(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe = pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + inputs["return_dict"] = False + image = pipe(**inputs)[0] + original_image_slice = image[0, -3:, -3:, -1] + + pipe.fuse_qkv_projections() + for _, component in pipe.components.items(): + if ( + isinstance(component, nn.Module) + and hasattr(component, "original_attn_processors") + and component.original_attn_processors is not None + ): + assert check_qkv_fusion_processors_exist(component), ( + "Something wrong with the fused attention processors. Expected all the attention processors to be fused." + ) + assert check_qkv_fusion_matches_attn_procs_length(component, component.original_attn_processors), ( + "Something wrong with the attention processors concerning the fused QKV projections." + ) + + inputs = self.get_dummy_inputs(device) + inputs["return_dict"] = False + image_fused = pipe(**inputs)[0] + image_slice_fused = image_fused[0, -3:, -3:, -1] + + pipe.unfuse_qkv_projections() + inputs = self.get_dummy_inputs(device) + inputs["return_dict"] = False + image_disabled = pipe(**inputs)[0] + image_slice_disabled = image_disabled[0, -3:, -3:, -1] + + assert np.allclose(original_image_slice, image_slice_fused, atol=1e-2, rtol=1e-2), ( + "Fusion of QKV projections shouldn't affect the outputs." + ) + assert np.allclose(image_slice_fused, image_slice_disabled, atol=1e-2, rtol=1e-2), ( + "Outputs, with QKV projection fusion enabled, shouldn't change when fused QKV projections are disabled." + ) + assert np.allclose(original_image_slice, image_slice_disabled, atol=1e-2, rtol=1e-2), ( + "Original outputs should match when fused QKV projections are disabled." + ) + + +class IPAdapterTesterMixin: + """ + This mixin is designed to be used with PipelineTesterMixin and unittest.TestCase classes. + It provides a set of common tests for pipelines that support IP Adapters. + """ + + def test_pipeline_signature(self): + parameters = inspect.signature(self.pipeline_class.__call__).parameters + + assert issubclass(self.pipeline_class, IPAdapterMixin) + self.assertIn( + "ip_adapter_image", + parameters, + "`ip_adapter_image` argument must be supported by the `__call__` method", + ) + self.assertIn( + "ip_adapter_image_embeds", + parameters, + "`ip_adapter_image_embeds` argument must be supported by the `__call__` method", + ) + + def _get_dummy_image_embeds(self, cross_attention_dim: int = 32): + return torch.randn((2, 1, cross_attention_dim), device=torch_device) + + def _get_dummy_faceid_image_embeds(self, cross_attention_dim: int = 32): + return torch.randn((2, 1, 1, cross_attention_dim), device=torch_device) + + def _get_dummy_masks(self, input_size: int = 64): + _masks = torch.zeros((1, 1, input_size, input_size), device=torch_device) + _masks[0, :, :, : int(input_size / 2)] = 1 + return _masks + + def _modify_inputs_for_ip_adapter_test(self, inputs: Dict[str, Any]): + parameters = inspect.signature(self.pipeline_class.__call__).parameters + if "image" in parameters.keys() and "strength" in parameters.keys(): + inputs["num_inference_steps"] = 4 + + inputs["output_type"] = "np" + inputs["return_dict"] = False + return inputs + + def test_ip_adapter(self, expected_max_diff: float = 1e-4, expected_pipe_slice=None): + r"""Tests for IP-Adapter. + + The following scenarios are tested: + - Single IP-Adapter with scale=0 should produce same output as no IP-Adapter. + - Multi IP-Adapter with scale=0 should produce same output as no IP-Adapter. + - Single IP-Adapter with scale!=0 should produce different output compared to no IP-Adapter. + - Multi IP-Adapter with scale!=0 should produce different output compared to no IP-Adapter. + """ + # Raising the tolerance for this test when it's run on a CPU because we + # compare against static slices and that can be shaky (with a VVVV low probability). + expected_max_diff = 9e-4 if torch_device == "cpu" else expected_max_diff + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components).to(torch_device) + pipe.set_progress_bar_config(disable=None) + cross_attention_dim = pipe.unet.config.get("cross_attention_dim", 32) + + # forward pass without ip adapter + inputs = self._modify_inputs_for_ip_adapter_test(self.get_dummy_inputs(torch_device)) + if expected_pipe_slice is None: + output_without_adapter = pipe(**inputs)[0] + else: + output_without_adapter = expected_pipe_slice + + # 1. Single IP-Adapter test cases + adapter_state_dict = create_ip_adapter_state_dict(pipe.unet) + pipe.unet._load_ip_adapter_weights(adapter_state_dict) + + # forward pass with single ip adapter, but scale=0 which should have no effect + inputs = self._modify_inputs_for_ip_adapter_test(self.get_dummy_inputs(torch_device)) + inputs["ip_adapter_image_embeds"] = [self._get_dummy_image_embeds(cross_attention_dim)] + pipe.set_ip_adapter_scale(0.0) + output_without_adapter_scale = pipe(**inputs)[0] + if expected_pipe_slice is not None: + output_without_adapter_scale = output_without_adapter_scale[0, -3:, -3:, -1].flatten() + + # forward pass with single ip adapter, but with scale of adapter weights + inputs = self._modify_inputs_for_ip_adapter_test(self.get_dummy_inputs(torch_device)) + inputs["ip_adapter_image_embeds"] = [self._get_dummy_image_embeds(cross_attention_dim)] + pipe.set_ip_adapter_scale(42.0) + output_with_adapter_scale = pipe(**inputs)[0] + if expected_pipe_slice is not None: + output_with_adapter_scale = output_with_adapter_scale[0, -3:, -3:, -1].flatten() + + max_diff_without_adapter_scale = np.abs(output_without_adapter_scale - output_without_adapter).max() + max_diff_with_adapter_scale = np.abs(output_with_adapter_scale - output_without_adapter).max() + + self.assertLess( + max_diff_without_adapter_scale, + expected_max_diff, + "Output without ip-adapter must be same as normal inference", + ) + self.assertGreater( + max_diff_with_adapter_scale, 1e-2, "Output with ip-adapter must be different from normal inference" + ) + + # 2. Multi IP-Adapter test cases + adapter_state_dict_1 = create_ip_adapter_state_dict(pipe.unet) + adapter_state_dict_2 = create_ip_adapter_state_dict(pipe.unet) + pipe.unet._load_ip_adapter_weights([adapter_state_dict_1, adapter_state_dict_2]) + + # forward pass with multi ip adapter, but scale=0 which should have no effect + inputs = self._modify_inputs_for_ip_adapter_test(self.get_dummy_inputs(torch_device)) + inputs["ip_adapter_image_embeds"] = [self._get_dummy_image_embeds(cross_attention_dim)] * 2 + pipe.set_ip_adapter_scale([0.0, 0.0]) + output_without_multi_adapter_scale = pipe(**inputs)[0] + if expected_pipe_slice is not None: + output_without_multi_adapter_scale = output_without_multi_adapter_scale[0, -3:, -3:, -1].flatten() + + # forward pass with multi ip adapter, but with scale of adapter weights + inputs = self._modify_inputs_for_ip_adapter_test(self.get_dummy_inputs(torch_device)) + inputs["ip_adapter_image_embeds"] = [self._get_dummy_image_embeds(cross_attention_dim)] * 2 + pipe.set_ip_adapter_scale([42.0, 42.0]) + output_with_multi_adapter_scale = pipe(**inputs)[0] + if expected_pipe_slice is not None: + output_with_multi_adapter_scale = output_with_multi_adapter_scale[0, -3:, -3:, -1].flatten() + + max_diff_without_multi_adapter_scale = np.abs( + output_without_multi_adapter_scale - output_without_adapter + ).max() + max_diff_with_multi_adapter_scale = np.abs(output_with_multi_adapter_scale - output_without_adapter).max() + self.assertLess( + max_diff_without_multi_adapter_scale, + expected_max_diff, + "Output without multi-ip-adapter must be same as normal inference", + ) + self.assertGreater( + max_diff_with_multi_adapter_scale, + 1e-2, + "Output with multi-ip-adapter scale must be different from normal inference", + ) + + def test_ip_adapter_cfg(self, expected_max_diff: float = 1e-4): + parameters = inspect.signature(self.pipeline_class.__call__).parameters + + if "guidance_scale" not in parameters: + return + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components).to(torch_device) + pipe.set_progress_bar_config(disable=None) + cross_attention_dim = pipe.unet.config.get("cross_attention_dim", 32) + + adapter_state_dict = create_ip_adapter_state_dict(pipe.unet) + pipe.unet._load_ip_adapter_weights(adapter_state_dict) + pipe.set_ip_adapter_scale(1.0) + + # forward pass with CFG not applied + inputs = self._modify_inputs_for_ip_adapter_test(self.get_dummy_inputs(torch_device)) + inputs["ip_adapter_image_embeds"] = [self._get_dummy_image_embeds(cross_attention_dim)[0].unsqueeze(0)] + inputs["guidance_scale"] = 1.0 + out_no_cfg = pipe(**inputs)[0] + + # forward pass with CFG applied + inputs = self._modify_inputs_for_ip_adapter_test(self.get_dummy_inputs(torch_device)) + inputs["ip_adapter_image_embeds"] = [self._get_dummy_image_embeds(cross_attention_dim)] + inputs["guidance_scale"] = 7.5 + out_cfg = pipe(**inputs)[0] + + assert out_cfg.shape == out_no_cfg.shape + + def test_ip_adapter_masks(self, expected_max_diff: float = 1e-4): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components).to(torch_device) + pipe.set_progress_bar_config(disable=None) + cross_attention_dim = pipe.unet.config.get("cross_attention_dim", 32) + sample_size = pipe.unet.config.get("sample_size", 32) + block_out_channels = pipe.vae.config.get("block_out_channels", [128, 256, 512, 512]) + input_size = sample_size * (2 ** (len(block_out_channels) - 1)) + + # forward pass without ip adapter + inputs = self._modify_inputs_for_ip_adapter_test(self.get_dummy_inputs(torch_device)) + output_without_adapter = pipe(**inputs)[0] + output_without_adapter = output_without_adapter[0, -3:, -3:, -1].flatten() + + adapter_state_dict = create_ip_adapter_state_dict(pipe.unet) + pipe.unet._load_ip_adapter_weights(adapter_state_dict) + + # forward pass with single ip adapter and masks, but scale=0 which should have no effect + inputs = self._modify_inputs_for_ip_adapter_test(self.get_dummy_inputs(torch_device)) + inputs["ip_adapter_image_embeds"] = [self._get_dummy_image_embeds(cross_attention_dim)] + inputs["cross_attention_kwargs"] = {"ip_adapter_masks": [self._get_dummy_masks(input_size)]} + pipe.set_ip_adapter_scale(0.0) + output_without_adapter_scale = pipe(**inputs)[0] + output_without_adapter_scale = output_without_adapter_scale[0, -3:, -3:, -1].flatten() + + # forward pass with single ip adapter and masks, but with scale of adapter weights + inputs = self._modify_inputs_for_ip_adapter_test(self.get_dummy_inputs(torch_device)) + inputs["ip_adapter_image_embeds"] = [self._get_dummy_image_embeds(cross_attention_dim)] + inputs["cross_attention_kwargs"] = {"ip_adapter_masks": [self._get_dummy_masks(input_size)]} + pipe.set_ip_adapter_scale(42.0) + output_with_adapter_scale = pipe(**inputs)[0] + output_with_adapter_scale = output_with_adapter_scale[0, -3:, -3:, -1].flatten() + + max_diff_without_adapter_scale = np.abs(output_without_adapter_scale - output_without_adapter).max() + max_diff_with_adapter_scale = np.abs(output_with_adapter_scale - output_without_adapter).max() + + self.assertLess( + max_diff_without_adapter_scale, + expected_max_diff, + "Output without ip-adapter must be same as normal inference", + ) + self.assertGreater( + max_diff_with_adapter_scale, 1e-3, "Output with ip-adapter must be different from normal inference" + ) + + def test_ip_adapter_faceid(self, expected_max_diff: float = 1e-4): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components).to(torch_device) + pipe.set_progress_bar_config(disable=None) + cross_attention_dim = pipe.unet.config.get("cross_attention_dim", 32) + + # forward pass without ip adapter + inputs = self._modify_inputs_for_ip_adapter_test(self.get_dummy_inputs(torch_device)) + output_without_adapter = pipe(**inputs)[0] + output_without_adapter = output_without_adapter[0, -3:, -3:, -1].flatten() + + adapter_state_dict = create_ip_adapter_faceid_state_dict(pipe.unet) + pipe.unet._load_ip_adapter_weights(adapter_state_dict) + + # forward pass with single ip adapter, but scale=0 which should have no effect + inputs = self._modify_inputs_for_ip_adapter_test(self.get_dummy_inputs(torch_device)) + inputs["ip_adapter_image_embeds"] = [self._get_dummy_faceid_image_embeds(cross_attention_dim)] + pipe.set_ip_adapter_scale(0.0) + output_without_adapter_scale = pipe(**inputs)[0] + output_without_adapter_scale = output_without_adapter_scale[0, -3:, -3:, -1].flatten() + + # forward pass with single ip adapter, but with scale of adapter weights + inputs = self._modify_inputs_for_ip_adapter_test(self.get_dummy_inputs(torch_device)) + inputs["ip_adapter_image_embeds"] = [self._get_dummy_faceid_image_embeds(cross_attention_dim)] + pipe.set_ip_adapter_scale(42.0) + output_with_adapter_scale = pipe(**inputs)[0] + output_with_adapter_scale = output_with_adapter_scale[0, -3:, -3:, -1].flatten() + + max_diff_without_adapter_scale = np.abs(output_without_adapter_scale - output_without_adapter).max() + max_diff_with_adapter_scale = np.abs(output_with_adapter_scale - output_without_adapter).max() + + self.assertLess( + max_diff_without_adapter_scale, + expected_max_diff, + "Output without ip-adapter must be same as normal inference", + ) + self.assertGreater( + max_diff_with_adapter_scale, 1e-3, "Output with ip-adapter must be different from normal inference" + ) + + +class FluxIPAdapterTesterMixin: + """ + This mixin is designed to be used with PipelineTesterMixin and unittest.TestCase classes. + It provides a set of common tests for pipelines that support IP Adapters. + """ + + def test_pipeline_signature(self): + parameters = inspect.signature(self.pipeline_class.__call__).parameters + + assert issubclass(self.pipeline_class, FluxIPAdapterMixin) + self.assertIn( + "ip_adapter_image", + parameters, + "`ip_adapter_image` argument must be supported by the `__call__` method", + ) + self.assertIn( + "ip_adapter_image_embeds", + parameters, + "`ip_adapter_image_embeds` argument must be supported by the `__call__` method", + ) + + def _get_dummy_image_embeds(self, image_embed_dim: int = 768): + return torch.randn((1, 1, image_embed_dim), device=torch_device) + + def _modify_inputs_for_ip_adapter_test(self, inputs: Dict[str, Any]): + inputs["negative_prompt"] = "" + if "true_cfg_scale" in inspect.signature(self.pipeline_class.__call__).parameters: + inputs["true_cfg_scale"] = 4.0 + inputs["output_type"] = "np" + inputs["return_dict"] = False + return inputs + + def test_ip_adapter(self, expected_max_diff: float = 1e-4, expected_pipe_slice=None): + r"""Tests for IP-Adapter. + + The following scenarios are tested: + - Single IP-Adapter with scale=0 should produce same output as no IP-Adapter. + - Multi IP-Adapter with scale=0 should produce same output as no IP-Adapter. + - Single IP-Adapter with scale!=0 should produce different output compared to no IP-Adapter. + - Multi IP-Adapter with scale!=0 should produce different output compared to no IP-Adapter. + """ + # Raising the tolerance for this test when it's run on a CPU because we + # compare against static slices and that can be shaky (with a VVVV low probability). + expected_max_diff = 9e-4 if torch_device == "cpu" else expected_max_diff + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components).to(torch_device) + pipe.set_progress_bar_config(disable=None) + image_embed_dim = ( + pipe.transformer.config.pooled_projection_dim + if hasattr(pipe.transformer.config, "pooled_projection_dim") + else 768 + ) + + # forward pass without ip adapter + inputs = self._modify_inputs_for_ip_adapter_test(self.get_dummy_inputs(torch_device)) + if expected_pipe_slice is None: + output_without_adapter = pipe(**inputs)[0] + else: + output_without_adapter = expected_pipe_slice + + # 1. Single IP-Adapter test cases + adapter_state_dict = create_flux_ip_adapter_state_dict(pipe.transformer) + pipe.transformer._load_ip_adapter_weights(adapter_state_dict) + + # forward pass with single ip adapter, but scale=0 which should have no effect + inputs = self._modify_inputs_for_ip_adapter_test(self.get_dummy_inputs(torch_device)) + inputs["ip_adapter_image_embeds"] = [self._get_dummy_image_embeds(image_embed_dim)] + inputs["negative_ip_adapter_image_embeds"] = [self._get_dummy_image_embeds(image_embed_dim)] + pipe.set_ip_adapter_scale(0.0) + output_without_adapter_scale = pipe(**inputs)[0] + if expected_pipe_slice is not None: + output_without_adapter_scale = output_without_adapter_scale[0, -3:, -3:, -1].flatten() + + # forward pass with single ip adapter, but with scale of adapter weights + inputs = self._modify_inputs_for_ip_adapter_test(self.get_dummy_inputs(torch_device)) + inputs["ip_adapter_image_embeds"] = [self._get_dummy_image_embeds(image_embed_dim)] + inputs["negative_ip_adapter_image_embeds"] = [self._get_dummy_image_embeds(image_embed_dim)] + pipe.set_ip_adapter_scale(42.0) + output_with_adapter_scale = pipe(**inputs)[0] + if expected_pipe_slice is not None: + output_with_adapter_scale = output_with_adapter_scale[0, -3:, -3:, -1].flatten() + + max_diff_without_adapter_scale = np.abs(output_without_adapter_scale - output_without_adapter).max() + max_diff_with_adapter_scale = np.abs(output_with_adapter_scale - output_without_adapter).max() + + self.assertLess( + max_diff_without_adapter_scale, + expected_max_diff, + "Output without ip-adapter must be same as normal inference", + ) + self.assertGreater( + max_diff_with_adapter_scale, 1e-2, "Output with ip-adapter must be different from normal inference" + ) + + # 2. Multi IP-Adapter test cases + adapter_state_dict_1 = create_flux_ip_adapter_state_dict(pipe.transformer) + adapter_state_dict_2 = create_flux_ip_adapter_state_dict(pipe.transformer) + pipe.transformer._load_ip_adapter_weights([adapter_state_dict_1, adapter_state_dict_2]) + + # forward pass with multi ip adapter, but scale=0 which should have no effect + inputs = self._modify_inputs_for_ip_adapter_test(self.get_dummy_inputs(torch_device)) + inputs["ip_adapter_image_embeds"] = [self._get_dummy_image_embeds(image_embed_dim)] * 2 + inputs["negative_ip_adapter_image_embeds"] = [self._get_dummy_image_embeds(image_embed_dim)] * 2 + pipe.set_ip_adapter_scale([0.0, 0.0]) + output_without_multi_adapter_scale = pipe(**inputs)[0] + if expected_pipe_slice is not None: + output_without_multi_adapter_scale = output_without_multi_adapter_scale[0, -3:, -3:, -1].flatten() + + # forward pass with multi ip adapter, but with scale of adapter weights + inputs = self._modify_inputs_for_ip_adapter_test(self.get_dummy_inputs(torch_device)) + inputs["ip_adapter_image_embeds"] = [self._get_dummy_image_embeds(image_embed_dim)] * 2 + inputs["negative_ip_adapter_image_embeds"] = [self._get_dummy_image_embeds(image_embed_dim)] * 2 + pipe.set_ip_adapter_scale([42.0, 42.0]) + output_with_multi_adapter_scale = pipe(**inputs)[0] + if expected_pipe_slice is not None: + output_with_multi_adapter_scale = output_with_multi_adapter_scale[0, -3:, -3:, -1].flatten() + + max_diff_without_multi_adapter_scale = np.abs( + output_without_multi_adapter_scale - output_without_adapter + ).max() + max_diff_with_multi_adapter_scale = np.abs(output_with_multi_adapter_scale - output_without_adapter).max() + self.assertLess( + max_diff_without_multi_adapter_scale, + expected_max_diff, + "Output without multi-ip-adapter must be same as normal inference", + ) + self.assertGreater( + max_diff_with_multi_adapter_scale, + 1e-2, + "Output with multi-ip-adapter scale must be different from normal inference", + ) + + +class PipelineLatentTesterMixin: + """ + This mixin is designed to be used with PipelineTesterMixin and unittest.TestCase classes. + It provides a set of common tests for PyTorch pipeline that has vae, e.g. + equivalence of different input and output types, etc. + """ + + @property + def image_params(self) -> frozenset: + raise NotImplementedError( + "You need to set the attribute `image_params` in the child test class. " + "`image_params` are tested for if all accepted input image types (i.e. `pt`,`pil`,`np`) are producing same results" + ) + + @property + def image_latents_params(self) -> frozenset: + raise NotImplementedError( + "You need to set the attribute `image_latents_params` in the child test class. " + "`image_latents_params` are tested for if passing latents directly are producing same results" + ) + + def get_dummy_inputs_by_type(self, device, seed=0, input_image_type="pt", output_type="np"): + inputs = self.get_dummy_inputs(device, seed) + + def convert_to_pt(image): + if isinstance(image, torch.Tensor): + input_image = image + elif isinstance(image, np.ndarray): + input_image = VaeImageProcessor.numpy_to_pt(image) + elif isinstance(image, PIL.Image.Image): + input_image = VaeImageProcessor.pil_to_numpy(image) + input_image = VaeImageProcessor.numpy_to_pt(input_image) + else: + raise ValueError(f"unsupported input_image_type {type(image)}") + return input_image + + def convert_pt_to_type(image, input_image_type): + if input_image_type == "pt": + input_image = image + elif input_image_type == "np": + input_image = VaeImageProcessor.pt_to_numpy(image) + elif input_image_type == "pil": + input_image = VaeImageProcessor.pt_to_numpy(image) + input_image = VaeImageProcessor.numpy_to_pil(input_image) + else: + raise ValueError(f"unsupported input_image_type {input_image_type}.") + return input_image + + for image_param in self.image_params: + if image_param in inputs.keys(): + inputs[image_param] = convert_pt_to_type( + convert_to_pt(inputs[image_param]).to(device), input_image_type + ) + + inputs["output_type"] = output_type + + return inputs + + def test_pt_np_pil_outputs_equivalent(self, expected_max_diff=1e-4): + self._test_pt_np_pil_outputs_equivalent(expected_max_diff=expected_max_diff) + + def _test_pt_np_pil_outputs_equivalent(self, expected_max_diff=1e-4, input_image_type="pt"): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + output_pt = pipe( + **self.get_dummy_inputs_by_type(torch_device, input_image_type=input_image_type, output_type="pt") + )[0] + output_np = pipe( + **self.get_dummy_inputs_by_type(torch_device, input_image_type=input_image_type, output_type="np") + )[0] + output_pil = pipe( + **self.get_dummy_inputs_by_type(torch_device, input_image_type=input_image_type, output_type="pil") + )[0] + + max_diff = np.abs(output_pt.cpu().numpy().transpose(0, 2, 3, 1) - output_np).max() + self.assertLess( + max_diff, expected_max_diff, "`output_type=='pt'` generate different results from `output_type=='np'`" + ) + + max_diff = np.abs(np.array(output_pil[0]) - (output_np * 255).round()).max() + self.assertLess(max_diff, 2.0, "`output_type=='pil'` generate different results from `output_type=='np'`") + + def test_pt_np_pil_inputs_equivalent(self): + if len(self.image_params) == 0: + return + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + out_input_pt = pipe(**self.get_dummy_inputs_by_type(torch_device, input_image_type="pt"))[0] + out_input_np = pipe(**self.get_dummy_inputs_by_type(torch_device, input_image_type="np"))[0] + out_input_pil = pipe(**self.get_dummy_inputs_by_type(torch_device, input_image_type="pil"))[0] + + max_diff = np.abs(out_input_pt - out_input_np).max() + self.assertLess(max_diff, 1e-4, "`input_type=='pt'` generate different result from `input_type=='np'`") + max_diff = np.abs(out_input_pil - out_input_np).max() + self.assertLess(max_diff, 1e-2, "`input_type=='pt'` generate different result from `input_type=='np'`") + + def test_latents_input(self): + if len(self.image_latents_params) == 0: + return + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.image_processor = VaeImageProcessor(do_resize=False, do_normalize=False) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + out = pipe(**self.get_dummy_inputs_by_type(torch_device, input_image_type="pt"))[0] + + vae = components["vae"] + inputs = self.get_dummy_inputs_by_type(torch_device, input_image_type="pt") + generator = inputs["generator"] + for image_param in self.image_latents_params: + if image_param in inputs.keys(): + inputs[image_param] = ( + vae.encode(inputs[image_param]).latent_dist.sample(generator) * vae.config.scaling_factor + ) + out_latents_inputs = pipe(**inputs)[0] + + max_diff = np.abs(out - out_latents_inputs).max() + self.assertLess(max_diff, 1e-4, "passing latents as image input generate different result from passing image") + + def test_multi_vae(self): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + block_out_channels = pipe.vae.config.block_out_channels + norm_num_groups = pipe.vae.config.norm_num_groups + + vae_classes = [AutoencoderKL, AsymmetricAutoencoderKL, ConsistencyDecoderVAE, AutoencoderTiny] + configs = [ + get_autoencoder_kl_config(block_out_channels, norm_num_groups), + get_asym_autoencoder_kl_config(block_out_channels, norm_num_groups), + get_consistency_vae_config(block_out_channels, norm_num_groups), + get_autoencoder_tiny_config(block_out_channels), + ] + + out_np = pipe(**self.get_dummy_inputs_by_type(torch_device, input_image_type="np"))[0] + + for vae_cls, config in zip(vae_classes, configs): + vae = vae_cls(**config) + vae = vae.to(torch_device) + components["vae"] = vae + vae_pipe = self.pipeline_class(**components) + out_vae_np = vae_pipe(**self.get_dummy_inputs_by_type(torch_device, input_image_type="np"))[0] + + assert out_vae_np.shape == out_np.shape + + +@require_torch +class PipelineFromPipeTesterMixin: + @property + def original_pipeline_class(self): + if "xl" in self.pipeline_class.__name__.lower(): + original_pipeline_class = StableDiffusionXLPipeline + elif "kolors" in self.pipeline_class.__name__.lower(): + original_pipeline_class = KolorsPipeline + else: + original_pipeline_class = StableDiffusionPipeline + + return original_pipeline_class + + def get_dummy_inputs_pipe(self, device, seed=0): + inputs = self.get_dummy_inputs(device, seed=seed) + inputs["output_type"] = "np" + inputs["return_dict"] = False + return inputs + + def get_dummy_inputs_for_pipe_original(self, device, seed=0): + inputs = {} + for k, v in self.get_dummy_inputs_pipe(device, seed=seed).items(): + if k in set(inspect.signature(self.original_pipeline_class.__call__).parameters.keys()): + inputs[k] = v + return inputs + + def test_from_pipe_consistent_config(self): + if self.original_pipeline_class == StableDiffusionPipeline: + original_repo = "hf-internal-testing/tiny-stable-diffusion-pipe" + original_kwargs = {"requires_safety_checker": False} + elif self.original_pipeline_class == StableDiffusionXLPipeline: + original_repo = "hf-internal-testing/tiny-stable-diffusion-xl-pipe" + original_kwargs = {"requires_aesthetics_score": True, "force_zeros_for_empty_prompt": False} + elif self.original_pipeline_class == KolorsPipeline: + original_repo = "hf-internal-testing/tiny-kolors-pipe" + original_kwargs = {"force_zeros_for_empty_prompt": False} + else: + raise ValueError( + "original_pipeline_class must be either StableDiffusionPipeline or StableDiffusionXLPipeline" + ) + + # create original_pipeline_class(sd/sdxl) + pipe_original = self.original_pipeline_class.from_pretrained(original_repo, **original_kwargs) + + # original_pipeline_class(sd/sdxl) -> pipeline_class + pipe_components = self.get_dummy_components() + pipe_additional_components = {} + for name, component in pipe_components.items(): + if name not in pipe_original.components: + pipe_additional_components[name] = component + + pipe = self.pipeline_class.from_pipe(pipe_original, **pipe_additional_components) + + # pipeline_class -> original_pipeline_class(sd/sdxl) + original_pipe_additional_components = {} + for name, component in pipe_original.components.items(): + if name not in pipe.components or not isinstance(component, pipe.components[name].__class__): + original_pipe_additional_components[name] = component + + pipe_original_2 = self.original_pipeline_class.from_pipe(pipe, **original_pipe_additional_components) + + # compare the config + original_config = {k: v for k, v in pipe_original.config.items() if not k.startswith("_")} + original_config_2 = {k: v for k, v in pipe_original_2.config.items() if not k.startswith("_")} + assert original_config_2 == original_config + + def test_from_pipe_consistent_forward_pass(self, expected_max_diff=1e-3): + components = self.get_dummy_components() + original_expected_modules, _ = self.original_pipeline_class._get_signature_keys(self.original_pipeline_class) + + # pipeline components that are also expected to be in the original pipeline + original_pipe_components = {} + # additional components that are not in the pipeline, but expected in the original pipeline + original_pipe_additional_components = {} + # additional components that are in the pipeline, but not expected in the original pipeline + current_pipe_additional_components = {} + + for name, component in components.items(): + if name in original_expected_modules: + original_pipe_components[name] = component + else: + current_pipe_additional_components[name] = component + for name in original_expected_modules: + if name not in original_pipe_components: + if name in self.original_pipeline_class._optional_components: + original_pipe_additional_components[name] = None + else: + raise ValueError(f"missing required module for {self.original_pipeline_class.__class__}: {name}") + + pipe_original = self.original_pipeline_class(**original_pipe_components, **original_pipe_additional_components) + for component in pipe_original.components.values(): + if hasattr(component, "set_default_attn_processor"): + component.set_default_attn_processor() + pipe_original.to(torch_device) + pipe_original.set_progress_bar_config(disable=None) + inputs = self.get_dummy_inputs_for_pipe_original(torch_device) + output_original = pipe_original(**inputs)[0] + + pipe = self.pipeline_class(**components) + for component in pipe.components.values(): + if hasattr(component, "set_default_attn_processor"): + component.set_default_attn_processor() + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + inputs = self.get_dummy_inputs_pipe(torch_device) + output = pipe(**inputs)[0] + + pipe_from_original = self.pipeline_class.from_pipe(pipe_original, **current_pipe_additional_components) + pipe_from_original.to(torch_device) + pipe_from_original.set_progress_bar_config(disable=None) + inputs = self.get_dummy_inputs_pipe(torch_device) + output_from_original = pipe_from_original(**inputs)[0] + + max_diff = np.abs(to_np(output) - to_np(output_from_original)).max() + self.assertLess( + max_diff, + expected_max_diff, + "The outputs of the pipelines created with `from_pipe` and `__init__` are different.", + ) + + inputs = self.get_dummy_inputs_for_pipe_original(torch_device) + output_original_2 = pipe_original(**inputs)[0] + + max_diff = np.abs(to_np(output_original) - to_np(output_original_2)).max() + self.assertLess(max_diff, expected_max_diff, "`from_pipe` should not change the output of original pipeline.") + + for component in pipe_original.components.values(): + if hasattr(component, "attn_processors"): + assert all(type(proc) == AttnProcessor for proc in component.attn_processors.values()), ( + "`from_pipe` changed the attention processor in original pipeline." + ) + + @require_accelerator + @require_accelerate_version_greater("0.14.0") + def test_from_pipe_consistent_forward_pass_cpu_offload(self, expected_max_diff=1e-3): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + for component in pipe.components.values(): + if hasattr(component, "set_default_attn_processor"): + component.set_default_attn_processor() + pipe.enable_model_cpu_offload(device=torch_device) + pipe.set_progress_bar_config(disable=None) + inputs = self.get_dummy_inputs_pipe(torch_device) + output = pipe(**inputs)[0] + + original_expected_modules, _ = self.original_pipeline_class._get_signature_keys(self.original_pipeline_class) + # pipeline components that are also expected to be in the original pipeline + original_pipe_components = {} + # additional components that are not in the pipeline, but expected in the original pipeline + original_pipe_additional_components = {} + # additional components that are in the pipeline, but not expected in the original pipeline + current_pipe_additional_components = {} + for name, component in components.items(): + if name in original_expected_modules: + original_pipe_components[name] = component + else: + current_pipe_additional_components[name] = component + for name in original_expected_modules: + if name not in original_pipe_components: + if name in self.original_pipeline_class._optional_components: + original_pipe_additional_components[name] = None + else: + raise ValueError(f"missing required module for {self.original_pipeline_class.__class__}: {name}") + + pipe_original = self.original_pipeline_class(**original_pipe_components, **original_pipe_additional_components) + for component in pipe_original.components.values(): + if hasattr(component, "set_default_attn_processor"): + component.set_default_attn_processor() + pipe_original.set_progress_bar_config(disable=None) + + pipe_from_original = self.pipeline_class.from_pipe(pipe_original, **current_pipe_additional_components) + for component in pipe_from_original.components.values(): + if hasattr(component, "set_default_attn_processor"): + component.set_default_attn_processor() + + pipe_from_original.enable_model_cpu_offload(device=torch_device) + pipe_from_original.set_progress_bar_config(disable=None) + inputs = self.get_dummy_inputs_pipe(torch_device) + output_from_original = pipe_from_original(**inputs)[0] + + max_diff = np.abs(to_np(output) - to_np(output_from_original)).max() + self.assertLess( + max_diff, + expected_max_diff, + "The outputs of the pipelines created with `from_pipe` and `__init__` are different.", + ) + + +@require_torch +class PipelineKarrasSchedulerTesterMixin: + """ + This mixin is designed to be used with unittest.TestCase classes. + It provides a set of common tests for each PyTorch pipeline that makes use of KarrasDiffusionSchedulers + equivalence of dict and tuple outputs, etc. + """ + + def test_karras_schedulers_shape( + self, num_inference_steps_for_strength=4, num_inference_steps_for_strength_for_iterations=5 + ): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + + # make sure that PNDM does not need warm-up + pipe.scheduler.register_to_config(skip_prk_steps=True) + + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + inputs = self.get_dummy_inputs(torch_device) + inputs["num_inference_steps"] = 2 + + if "strength" in inputs: + inputs["num_inference_steps"] = num_inference_steps_for_strength + inputs["strength"] = 0.5 + + outputs = [] + for scheduler_enum in KarrasDiffusionSchedulers: + if "KDPM2" in scheduler_enum.name: + inputs["num_inference_steps"] = num_inference_steps_for_strength_for_iterations + + scheduler_cls = getattr(diffusers, scheduler_enum.name) + pipe.scheduler = scheduler_cls.from_config(pipe.scheduler.config) + output = pipe(**inputs)[0] + outputs.append(output) + + if "KDPM2" in scheduler_enum.name: + inputs["num_inference_steps"] = 2 + + assert check_same_shape(outputs) + + +@require_torch +class PipelineTesterMixin: + """ + This mixin is designed to be used with unittest.TestCase classes. + It provides a set of common tests for each PyTorch pipeline, e.g. saving and loading the pipeline, + equivalence of dict and tuple outputs, etc. + """ + + # Canonical parameters that are passed to `__call__` regardless + # of the type of pipeline. They are always optional and have common + # sense default values. + required_optional_params = frozenset( + [ + "num_inference_steps", + "num_images_per_prompt", + "generator", + "latents", + "output_type", + "return_dict", + ] + ) + + # set these parameters to False in the child class if the pipeline does not support the corresponding functionality + test_attention_slicing = True + + test_xformers_attention = True + test_layerwise_casting = False + test_group_offloading = False + supports_dduf = True + + def get_generator(self, seed): + device = torch_device if torch_device != "mps" else "cpu" + generator = torch.Generator(device).manual_seed(seed) + return generator + + @property + def pipeline_class(self) -> Union[Callable, DiffusionPipeline]: + raise NotImplementedError( + "You need to set the attribute `pipeline_class = ClassNameOfPipeline` in the child test class. " + "See existing pipeline tests for reference." + ) + + def get_dummy_components(self): + raise NotImplementedError( + "You need to implement `get_dummy_components(self)` in the child test class. " + "See existing pipeline tests for reference." + ) + + def get_dummy_inputs(self, device, seed=0): + raise NotImplementedError( + "You need to implement `get_dummy_inputs(self, device, seed)` in the child test class. " + "See existing pipeline tests for reference." + ) + + @property + def params(self) -> frozenset: + raise NotImplementedError( + "You need to set the attribute `params` in the child test class. " + "`params` are checked for if all values are present in `__call__`'s signature." + " You can set `params` using one of the common set of parameters defined in `pipeline_params.py`" + " e.g., `TEXT_TO_IMAGE_PARAMS` defines the common parameters used in text to " + "image pipelines, including prompts and prompt embedding overrides." + "If your pipeline's set of arguments has minor changes from one of the common sets of arguments, " + "do not make modifications to the existing common sets of arguments. I.e. a text to image pipeline " + "with non-configurable height and width arguments should set the attribute as " + "`params = TEXT_TO_IMAGE_PARAMS - {'height', 'width'}`. " + "See existing pipeline tests for reference." + ) + + @property + def batch_params(self) -> frozenset: + raise NotImplementedError( + "You need to set the attribute `batch_params` in the child test class. " + "`batch_params` are the parameters required to be batched when passed to the pipeline's " + "`__call__` method. `pipeline_params.py` provides some common sets of parameters such as " + "`TEXT_TO_IMAGE_BATCH_PARAMS`, `IMAGE_VARIATION_BATCH_PARAMS`, etc... If your pipeline's " + "set of batch arguments has minor changes from one of the common sets of batch arguments, " + "do not make modifications to the existing common sets of batch arguments. I.e. a text to " + "image pipeline `negative_prompt` is not batched should set the attribute as " + "`batch_params = TEXT_TO_IMAGE_BATCH_PARAMS - {'negative_prompt'}`. " + "See existing pipeline tests for reference." + ) + + @property + def callback_cfg_params(self) -> frozenset: + raise NotImplementedError( + "You need to set the attribute `callback_cfg_params` in the child test class that requires to run test_callback_cfg. " + "`callback_cfg_params` are the parameters that needs to be passed to the pipeline's callback " + "function when dynamically adjusting `guidance_scale`. They are variables that require special" + "treatment when `do_classifier_free_guidance` is `True`. `pipeline_params.py` provides some common" + " sets of parameters such as `TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS`. If your pipeline's " + "set of cfg arguments has minor changes from one of the common sets of cfg arguments, " + "do not make modifications to the existing common sets of cfg arguments. I.e. for inpaint pipeline, you " + " need to adjust batch size of `mask` and `masked_image_latents` so should set the attribute as" + "`callback_cfg_params = TEXT_TO_IMAGE_CFG_PARAMS.union({'mask', 'masked_image_latents'})`" + ) + + def setUp(self): + # clean up the VRAM before each test + super().setUp() + torch.compiler.reset() + gc.collect() + backend_empty_cache(torch_device) + + # Skip tests for pipelines that inherit from DeprecatedPipelineMixin + from diffusers.pipelines.pipeline_utils import DeprecatedPipelineMixin + + if hasattr(self, "pipeline_class") and issubclass(self.pipeline_class, DeprecatedPipelineMixin): + import pytest + + pytest.skip(reason=f"Deprecated Pipeline: {self.pipeline_class.__name__}") + + def tearDown(self): + # clean up the VRAM after each test in case of CUDA runtime errors + super().tearDown() + torch.compiler.reset() + gc.collect() + backend_empty_cache(torch_device) + + def test_save_load_local(self, expected_max_difference=5e-4): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + for component in pipe.components.values(): + if hasattr(component, "set_default_attn_processor"): + component.set_default_attn_processor() + + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(torch_device) + output = pipe(**inputs)[0] + + logger = logging.get_logger("diffusers.pipelines.pipeline_utils") + logger.setLevel(diffusers.logging.INFO) + + with tempfile.TemporaryDirectory() as tmpdir: + pipe.save_pretrained(tmpdir, safe_serialization=False) + + with CaptureLogger(logger) as cap_logger: + pipe_loaded = self.pipeline_class.from_pretrained(tmpdir) + + for component in pipe_loaded.components.values(): + if hasattr(component, "set_default_attn_processor"): + component.set_default_attn_processor() + + for name in pipe_loaded.components.keys(): + if name not in pipe_loaded._optional_components: + assert name in str(cap_logger) + + pipe_loaded.to(torch_device) + pipe_loaded.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(torch_device) + output_loaded = pipe_loaded(**inputs)[0] + + max_diff = np.abs(to_np(output) - to_np(output_loaded)).max() + self.assertLess(max_diff, expected_max_difference) + + def test_pipeline_call_signature(self): + self.assertTrue( + hasattr(self.pipeline_class, "__call__"), f"{self.pipeline_class} should have a `__call__` method" + ) + + parameters = inspect.signature(self.pipeline_class.__call__).parameters + + optional_parameters = set() + + for k, v in parameters.items(): + if v.default != inspect._empty: + optional_parameters.add(k) + + parameters = set(parameters.keys()) + parameters.remove("self") + parameters.discard("kwargs") # kwargs can be added if arguments of pipeline call function are deprecated + + remaining_required_parameters = set() + + for param in self.params: + if param not in parameters: + remaining_required_parameters.add(param) + + self.assertTrue( + len(remaining_required_parameters) == 0, + f"Required parameters not present: {remaining_required_parameters}", + ) + + remaining_required_optional_parameters = set() + + for param in self.required_optional_params: + if param not in optional_parameters: + remaining_required_optional_parameters.add(param) + + self.assertTrue( + len(remaining_required_optional_parameters) == 0, + f"Required optional parameters not present: {remaining_required_optional_parameters}", + ) + + def test_inference_batch_consistent(self, batch_sizes=[2]): + self._test_inference_batch_consistent(batch_sizes=batch_sizes) + + def _test_inference_batch_consistent( + self, batch_sizes=[2], additional_params_copy_to_batched_inputs=["num_inference_steps"], batch_generator=True + ): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(torch_device) + inputs["generator"] = self.get_generator(0) + + logger = logging.get_logger(pipe.__module__) + logger.setLevel(level=diffusers.logging.FATAL) + + # prepare batched inputs + batched_inputs = [] + for batch_size in batch_sizes: + batched_input = {} + batched_input.update(inputs) + + for name in self.batch_params: + if name not in inputs: + continue + + value = inputs[name] + if name == "prompt": + len_prompt = len(value) + # make unequal batch sizes + batched_input[name] = [value[: len_prompt // i] for i in range(1, batch_size + 1)] + + # make last batch super long + batched_input[name][-1] = 100 * "very long" + + else: + batched_input[name] = batch_size * [value] + + if batch_generator and "generator" in inputs: + batched_input["generator"] = [self.get_generator(i) for i in range(batch_size)] + + if "batch_size" in inputs: + batched_input["batch_size"] = batch_size + + batched_inputs.append(batched_input) + + logger.setLevel(level=diffusers.logging.WARNING) + for batch_size, batched_input in zip(batch_sizes, batched_inputs): + output = pipe(**batched_input) + assert len(output[0]) == batch_size + + def test_inference_batch_single_identical(self, batch_size=3, expected_max_diff=1e-4): + self._test_inference_batch_single_identical(batch_size=batch_size, expected_max_diff=expected_max_diff) + + def _test_inference_batch_single_identical( + self, + batch_size=2, + expected_max_diff=1e-4, + additional_params_copy_to_batched_inputs=["num_inference_steps"], + ): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + for components in pipe.components.values(): + if hasattr(components, "set_default_attn_processor"): + components.set_default_attn_processor() + + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + inputs = self.get_dummy_inputs(torch_device) + # Reset generator in case it is has been used in self.get_dummy_inputs + inputs["generator"] = self.get_generator(0) + + logger = logging.get_logger(pipe.__module__) + logger.setLevel(level=diffusers.logging.FATAL) + + # batchify inputs + batched_inputs = {} + batched_inputs.update(inputs) + + for name in self.batch_params: + if name not in inputs: + continue + + value = inputs[name] + if name == "prompt": + len_prompt = len(value) + batched_inputs[name] = [value[: len_prompt // i] for i in range(1, batch_size + 1)] + batched_inputs[name][-1] = 100 * "very long" + + else: + batched_inputs[name] = batch_size * [value] + + if "generator" in inputs: + batched_inputs["generator"] = [self.get_generator(i) for i in range(batch_size)] + + if "batch_size" in inputs: + batched_inputs["batch_size"] = batch_size + + for arg in additional_params_copy_to_batched_inputs: + batched_inputs[arg] = inputs[arg] + + output = pipe(**inputs) + output_batch = pipe(**batched_inputs) + + assert output_batch[0].shape[0] == batch_size + + max_diff = np.abs(to_np(output_batch[0][0]) - to_np(output[0][0])).max() + assert max_diff < expected_max_diff + + def test_dict_tuple_outputs_equivalent(self, expected_slice=None, expected_max_difference=1e-4): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + for component in pipe.components.values(): + if hasattr(component, "set_default_attn_processor"): + component.set_default_attn_processor() + + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + generator_device = "cpu" + if expected_slice is None: + output = pipe(**self.get_dummy_inputs(generator_device))[0] + else: + output = expected_slice + + output_tuple = pipe(**self.get_dummy_inputs(generator_device), return_dict=False)[0] + + if expected_slice is None: + max_diff = np.abs(to_np(output) - to_np(output_tuple)).max() + else: + if output_tuple.ndim != 5: + max_diff = np.abs(to_np(output) - to_np(output_tuple)[0, -3:, -3:, -1].flatten()).max() + else: + max_diff = np.abs(to_np(output) - to_np(output_tuple)[0, -3:, -3:, -1, -1].flatten()).max() + + self.assertLess(max_diff, expected_max_difference) + + def test_components_function(self): + init_components = self.get_dummy_components() + init_components = {k: v for k, v in init_components.items() if not isinstance(v, (str, int, float))} + + pipe = self.pipeline_class(**init_components) + + self.assertTrue(hasattr(pipe, "components")) + self.assertTrue(set(pipe.components.keys()) == set(init_components.keys())) + + @unittest.skipIf(torch_device not in ["cuda", "xpu"], reason="float16 requires CUDA or XPU") + @require_accelerator + def test_float16_inference(self, expected_max_diff=5e-2): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + for component in pipe.components.values(): + if hasattr(component, "set_default_attn_processor"): + component.set_default_attn_processor() + + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + components = self.get_dummy_components() + pipe_fp16 = self.pipeline_class(**components) + for component in pipe_fp16.components.values(): + if hasattr(component, "set_default_attn_processor"): + component.set_default_attn_processor() + pipe_fp16.to(torch_device, torch.float16) + pipe_fp16.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(torch_device) + # Reset generator in case it is used inside dummy inputs + if "generator" in inputs: + inputs["generator"] = self.get_generator(0) + output = pipe(**inputs)[0] + + fp16_inputs = self.get_dummy_inputs(torch_device) + # Reset generator in case it is used inside dummy inputs + if "generator" in fp16_inputs: + fp16_inputs["generator"] = self.get_generator(0) + output_fp16 = pipe_fp16(**fp16_inputs)[0] + + if isinstance(output, torch.Tensor): + output = output.cpu() + output_fp16 = output_fp16.cpu() + + max_diff = numpy_cosine_similarity_distance(output.flatten(), output_fp16.flatten()) + assert max_diff < expected_max_diff + + @unittest.skipIf(torch_device not in ["cuda", "xpu"], reason="float16 requires CUDA or XPU") + @require_accelerator + def test_save_load_float16(self, expected_max_diff=1e-2): + components = self.get_dummy_components() + for name, module in components.items(): + if hasattr(module, "half"): + components[name] = module.to(torch_device).half() + + pipe = self.pipeline_class(**components) + for component in pipe.components.values(): + if hasattr(component, "set_default_attn_processor"): + component.set_default_attn_processor() + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(torch_device) + output = pipe(**inputs)[0] + + with tempfile.TemporaryDirectory() as tmpdir: + pipe.save_pretrained(tmpdir) + pipe_loaded = self.pipeline_class.from_pretrained(tmpdir, torch_dtype=torch.float16) + for component in pipe_loaded.components.values(): + if hasattr(component, "set_default_attn_processor"): + component.set_default_attn_processor() + pipe_loaded.to(torch_device) + pipe_loaded.set_progress_bar_config(disable=None) + + for name, component in pipe_loaded.components.items(): + if hasattr(component, "dtype"): + self.assertTrue( + component.dtype == torch.float16, + f"`{name}.dtype` switched from `float16` to {component.dtype} after loading.", + ) + + inputs = self.get_dummy_inputs(torch_device) + output_loaded = pipe_loaded(**inputs)[0] + max_diff = np.abs(to_np(output) - to_np(output_loaded)).max() + self.assertLess( + max_diff, expected_max_diff, "The output of the fp16 pipeline changed after saving and loading." + ) + + def test_save_load_optional_components(self, expected_max_difference=1e-4): + if not hasattr(self.pipeline_class, "_optional_components"): + return + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + for component in pipe.components.values(): + if hasattr(component, "set_default_attn_processor"): + component.set_default_attn_processor() + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + # set all optional components to None + for optional_component in pipe._optional_components: + setattr(pipe, optional_component, None) + + generator_device = "cpu" + inputs = self.get_dummy_inputs(generator_device) + torch.manual_seed(0) + output = pipe(**inputs)[0] + + with tempfile.TemporaryDirectory() as tmpdir: + pipe.save_pretrained(tmpdir, safe_serialization=False) + pipe_loaded = self.pipeline_class.from_pretrained(tmpdir) + for component in pipe_loaded.components.values(): + if hasattr(component, "set_default_attn_processor"): + component.set_default_attn_processor() + pipe_loaded.to(torch_device) + pipe_loaded.set_progress_bar_config(disable=None) + + for optional_component in pipe._optional_components: + self.assertTrue( + getattr(pipe_loaded, optional_component) is None, + f"`{optional_component}` did not stay set to None after loading.", + ) + + inputs = self.get_dummy_inputs(generator_device) + torch.manual_seed(0) + output_loaded = pipe_loaded(**inputs)[0] + + max_diff = np.abs(to_np(output) - to_np(output_loaded)).max() + self.assertLess(max_diff, expected_max_difference) + + @require_accelerator + def test_to_device(self): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.set_progress_bar_config(disable=None) + + pipe.to("cpu") + model_devices = [component.device.type for component in components.values() if hasattr(component, "device")] + self.assertTrue(all(device == "cpu" for device in model_devices)) + + output_cpu = pipe(**self.get_dummy_inputs("cpu"))[0] + self.assertTrue(np.isnan(output_cpu).sum() == 0) + + pipe.to(torch_device) + model_devices = [component.device.type for component in components.values() if hasattr(component, "device")] + self.assertTrue(all(device == torch_device for device in model_devices)) + + output_device = pipe(**self.get_dummy_inputs(torch_device))[0] + self.assertTrue(np.isnan(to_np(output_device)).sum() == 0) + + def test_to_dtype(self): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.set_progress_bar_config(disable=None) + + model_dtypes = [component.dtype for component in components.values() if hasattr(component, "dtype")] + self.assertTrue(all(dtype == torch.float32 for dtype in model_dtypes)) + + pipe.to(dtype=torch.float16) + model_dtypes = [component.dtype for component in components.values() if hasattr(component, "dtype")] + self.assertTrue(all(dtype == torch.float16 for dtype in model_dtypes)) + + def test_attention_slicing_forward_pass(self, expected_max_diff=1e-3): + self._test_attention_slicing_forward_pass(expected_max_diff=expected_max_diff) + + def _test_attention_slicing_forward_pass( + self, test_max_difference=True, test_mean_pixel_difference=True, expected_max_diff=1e-3 + ): + if not self.test_attention_slicing: + return + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + for component in pipe.components.values(): + if hasattr(component, "set_default_attn_processor"): + component.set_default_attn_processor() + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + generator_device = "cpu" + inputs = self.get_dummy_inputs(generator_device) + output_without_slicing = pipe(**inputs)[0] + + pipe.enable_attention_slicing(slice_size=1) + inputs = self.get_dummy_inputs(generator_device) + output_with_slicing1 = pipe(**inputs)[0] + + pipe.enable_attention_slicing(slice_size=2) + inputs = self.get_dummy_inputs(generator_device) + output_with_slicing2 = pipe(**inputs)[0] + + if test_max_difference: + max_diff1 = np.abs(to_np(output_with_slicing1) - to_np(output_without_slicing)).max() + max_diff2 = np.abs(to_np(output_with_slicing2) - to_np(output_without_slicing)).max() + self.assertLess( + max(max_diff1, max_diff2), + expected_max_diff, + "Attention slicing should not affect the inference results", + ) + + if test_mean_pixel_difference: + assert_mean_pixel_difference(to_np(output_with_slicing1[0]), to_np(output_without_slicing[0])) + assert_mean_pixel_difference(to_np(output_with_slicing2[0]), to_np(output_without_slicing[0])) + + @require_accelerator + @require_accelerate_version_greater("0.14.0") + def test_sequential_cpu_offload_forward_pass(self, expected_max_diff=1e-4): + import accelerate + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + for component in pipe.components.values(): + if hasattr(component, "set_default_attn_processor"): + component.set_default_attn_processor() + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + generator_device = "cpu" + inputs = self.get_dummy_inputs(generator_device) + torch.manual_seed(0) + output_without_offload = pipe(**inputs)[0] + + pipe.enable_sequential_cpu_offload(device=torch_device) + assert pipe._execution_device.type == torch_device + + inputs = self.get_dummy_inputs(generator_device) + torch.manual_seed(0) + output_with_offload = pipe(**inputs)[0] + + max_diff = np.abs(to_np(output_with_offload) - to_np(output_without_offload)).max() + self.assertLess(max_diff, expected_max_diff, "CPU offloading should not affect the inference results") + + # make sure all `torch.nn.Module` components (except those in `self._exclude_from_cpu_offload`) are offloaded correctly + offloaded_modules = { + k: v + for k, v in pipe.components.items() + if isinstance(v, torch.nn.Module) and k not in pipe._exclude_from_cpu_offload + } + # 1. all offloaded modules should be saved to cpu and moved to meta device + self.assertTrue( + all(v.device.type == "meta" for v in offloaded_modules.values()), + f"Not offloaded: {[k for k, v in offloaded_modules.items() if v.device.type != 'meta']}", + ) + # 2. all offloaded modules should have hook installed + self.assertTrue( + all(hasattr(v, "_hf_hook") for k, v in offloaded_modules.items()), + f"No hook attached: {[k for k, v in offloaded_modules.items() if not hasattr(v, '_hf_hook')]}", + ) + # 3. all offloaded modules should have correct hooks installed, should be either one of these two + # - `AlignDevicesHook` + # - a SequentialHook` that contains `AlignDevicesHook` + offloaded_modules_with_incorrect_hooks = {} + for k, v in offloaded_modules.items(): + if hasattr(v, "_hf_hook"): + if isinstance(v._hf_hook, accelerate.hooks.SequentialHook): + # if it is a `SequentialHook`, we loop through its `hooks` attribute to check if it only contains `AlignDevicesHook` + for hook in v._hf_hook.hooks: + if not isinstance(hook, accelerate.hooks.AlignDevicesHook): + offloaded_modules_with_incorrect_hooks[k] = type(v._hf_hook.hooks[0]) + elif not isinstance(v._hf_hook, accelerate.hooks.AlignDevicesHook): + offloaded_modules_with_incorrect_hooks[k] = type(v._hf_hook) + + self.assertTrue( + len(offloaded_modules_with_incorrect_hooks) == 0, + f"Not installed correct hook: {offloaded_modules_with_incorrect_hooks}", + ) + + @require_accelerator + @require_accelerate_version_greater("0.17.0") + def test_model_cpu_offload_forward_pass(self, expected_max_diff=2e-4): + import accelerate + + generator_device = "cpu" + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + + for component in pipe.components.values(): + if hasattr(component, "set_default_attn_processor"): + component.set_default_attn_processor() + + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(generator_device) + torch.manual_seed(0) + output_without_offload = pipe(**inputs)[0] + + pipe.enable_model_cpu_offload(device=torch_device) + assert pipe._execution_device.type == torch_device + + inputs = self.get_dummy_inputs(generator_device) + torch.manual_seed(0) + output_with_offload = pipe(**inputs)[0] + + max_diff = np.abs(to_np(output_with_offload) - to_np(output_without_offload)).max() + self.assertLess(max_diff, expected_max_diff, "CPU offloading should not affect the inference results") + + # make sure all `torch.nn.Module` components (except those in `self._exclude_from_cpu_offload`) are offloaded correctly + offloaded_modules = { + k: v + for k, v in pipe.components.items() + if isinstance(v, torch.nn.Module) and k not in pipe._exclude_from_cpu_offload + } + # 1. check if all offloaded modules are saved to cpu + self.assertTrue( + all(v.device.type == "cpu" for v in offloaded_modules.values()), + f"Not offloaded: {[k for k, v in offloaded_modules.items() if v.device.type != 'cpu']}", + ) + # 2. check if all offloaded modules have hooks installed + self.assertTrue( + all(hasattr(v, "_hf_hook") for k, v in offloaded_modules.items()), + f"No hook attached: {[k for k, v in offloaded_modules.items() if not hasattr(v, '_hf_hook')]}", + ) + # 3. check if all offloaded modules have correct type of hooks installed, should be `CpuOffload` + offloaded_modules_with_incorrect_hooks = {} + for k, v in offloaded_modules.items(): + if hasattr(v, "_hf_hook") and not isinstance(v._hf_hook, accelerate.hooks.CpuOffload): + offloaded_modules_with_incorrect_hooks[k] = type(v._hf_hook) + + self.assertTrue( + len(offloaded_modules_with_incorrect_hooks) == 0, + f"Not installed correct hook: {offloaded_modules_with_incorrect_hooks}", + ) + + @require_accelerator + @require_accelerate_version_greater("0.17.0") + def test_cpu_offload_forward_pass_twice(self, expected_max_diff=2e-4): + import accelerate + + generator_device = "cpu" + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + + for component in pipe.components.values(): + if hasattr(component, "set_default_attn_processor"): + component.set_default_attn_processor() + + pipe.set_progress_bar_config(disable=None) + + pipe.enable_model_cpu_offload() + inputs = self.get_dummy_inputs(generator_device) + output_with_offload = pipe(**inputs)[0] + + pipe.enable_model_cpu_offload() + inputs = self.get_dummy_inputs(generator_device) + output_with_offload_twice = pipe(**inputs)[0] + + max_diff = np.abs(to_np(output_with_offload) - to_np(output_with_offload_twice)).max() + self.assertLess( + max_diff, expected_max_diff, "running CPU offloading 2nd time should not affect the inference results" + ) + + # make sure all `torch.nn.Module` components (except those in `self._exclude_from_cpu_offload`) are offloaded correctly + offloaded_modules = { + k: v + for k, v in pipe.components.items() + if isinstance(v, torch.nn.Module) and k not in pipe._exclude_from_cpu_offload + } + # 1. check if all offloaded modules are saved to cpu + self.assertTrue( + all(v.device.type == "cpu" for v in offloaded_modules.values()), + f"Not offloaded: {[k for k, v in offloaded_modules.items() if v.device.type != 'cpu']}", + ) + # 2. check if all offloaded modules have hooks installed + self.assertTrue( + all(hasattr(v, "_hf_hook") for k, v in offloaded_modules.items()), + f"No hook attached: {[k for k, v in offloaded_modules.items() if not hasattr(v, '_hf_hook')]}", + ) + # 3. check if all offloaded modules have correct type of hooks installed, should be `CpuOffload` + offloaded_modules_with_incorrect_hooks = {} + for k, v in offloaded_modules.items(): + if hasattr(v, "_hf_hook") and not isinstance(v._hf_hook, accelerate.hooks.CpuOffload): + offloaded_modules_with_incorrect_hooks[k] = type(v._hf_hook) + + self.assertTrue( + len(offloaded_modules_with_incorrect_hooks) == 0, + f"Not installed correct hook: {offloaded_modules_with_incorrect_hooks}", + ) + + @require_accelerator + @require_accelerate_version_greater("0.14.0") + def test_sequential_offload_forward_pass_twice(self, expected_max_diff=2e-4): + import accelerate + + generator_device = "cpu" + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + + for component in pipe.components.values(): + if hasattr(component, "set_default_attn_processor"): + component.set_default_attn_processor() + + pipe.set_progress_bar_config(disable=None) + + pipe.enable_sequential_cpu_offload(device=torch_device) + inputs = self.get_dummy_inputs(generator_device) + output_with_offload = pipe(**inputs)[0] + + pipe.enable_sequential_cpu_offload(device=torch_device) + inputs = self.get_dummy_inputs(generator_device) + output_with_offload_twice = pipe(**inputs)[0] + + max_diff = np.abs(to_np(output_with_offload) - to_np(output_with_offload_twice)).max() + self.assertLess( + max_diff, expected_max_diff, "running sequential offloading second time should have the inference results" + ) + + # make sure all `torch.nn.Module` components (except those in `self._exclude_from_cpu_offload`) are offloaded correctly + offloaded_modules = { + k: v + for k, v in pipe.components.items() + if isinstance(v, torch.nn.Module) and k not in pipe._exclude_from_cpu_offload + } + # 1. check if all offloaded modules are moved to meta device + self.assertTrue( + all(v.device.type == "meta" for v in offloaded_modules.values()), + f"Not offloaded: {[k for k, v in offloaded_modules.items() if v.device.type != 'meta']}", + ) + # 2. check if all offloaded modules have hook installed + self.assertTrue( + all(hasattr(v, "_hf_hook") for k, v in offloaded_modules.items()), + f"No hook attached: {[k for k, v in offloaded_modules.items() if not hasattr(v, '_hf_hook')]}", + ) + # 3. check if all offloaded modules have correct hooks installed, should be either one of these two + # - `AlignDevicesHook` + # - a SequentialHook` that contains `AlignDevicesHook` + offloaded_modules_with_incorrect_hooks = {} + for k, v in offloaded_modules.items(): + if hasattr(v, "_hf_hook"): + if isinstance(v._hf_hook, accelerate.hooks.SequentialHook): + # if it is a `SequentialHook`, we loop through its `hooks` attribute to check if it only contains `AlignDevicesHook` + for hook in v._hf_hook.hooks: + if not isinstance(hook, accelerate.hooks.AlignDevicesHook): + offloaded_modules_with_incorrect_hooks[k] = type(v._hf_hook.hooks[0]) + elif not isinstance(v._hf_hook, accelerate.hooks.AlignDevicesHook): + offloaded_modules_with_incorrect_hooks[k] = type(v._hf_hook) + + self.assertTrue( + len(offloaded_modules_with_incorrect_hooks) == 0, + f"Not installed correct hook: {offloaded_modules_with_incorrect_hooks}", + ) + + @unittest.skipIf( + torch_device != "cuda" or not is_xformers_available(), + reason="XFormers attention is only available with CUDA and `xformers` installed", + ) + def test_xformers_attention_forwardGenerator_pass(self): + self._test_xformers_attention_forwardGenerator_pass() + + def _test_xformers_attention_forwardGenerator_pass( + self, test_max_difference=True, test_mean_pixel_difference=True, expected_max_diff=1e-4 + ): + if not self.test_xformers_attention: + return + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + for component in pipe.components.values(): + if hasattr(component, "set_default_attn_processor"): + component.set_default_attn_processor() + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(torch_device) + output_without_offload = pipe(**inputs)[0] + output_without_offload = ( + output_without_offload.cpu() if torch.is_tensor(output_without_offload) else output_without_offload + ) + + pipe.enable_xformers_memory_efficient_attention() + inputs = self.get_dummy_inputs(torch_device) + output_with_offload = pipe(**inputs)[0] + output_with_offload = ( + output_with_offload.cpu() if torch.is_tensor(output_with_offload) else output_without_offload + ) + + if test_max_difference: + max_diff = np.abs(to_np(output_with_offload) - to_np(output_without_offload)).max() + self.assertLess(max_diff, expected_max_diff, "XFormers attention should not affect the inference results") + + if test_mean_pixel_difference: + assert_mean_pixel_difference(output_with_offload[0], output_without_offload[0]) + + def test_num_images_per_prompt(self): + sig = inspect.signature(self.pipeline_class.__call__) + + if "num_images_per_prompt" not in sig.parameters: + return + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + batch_sizes = [1, 2] + num_images_per_prompts = [1, 2] + + for batch_size in batch_sizes: + for num_images_per_prompt in num_images_per_prompts: + inputs = self.get_dummy_inputs(torch_device) + + for key in inputs.keys(): + if key in self.batch_params: + inputs[key] = batch_size * [inputs[key]] + + images = pipe(**inputs, num_images_per_prompt=num_images_per_prompt)[0] + + assert images.shape[0] == batch_size * num_images_per_prompt + + def test_cfg(self): + sig = inspect.signature(self.pipeline_class.__call__) + + if "guidance_scale" not in sig.parameters: + return + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(torch_device) + + inputs["guidance_scale"] = 1.0 + out_no_cfg = pipe(**inputs)[0] + + inputs["guidance_scale"] = 7.5 + out_cfg = pipe(**inputs)[0] + + assert out_cfg.shape == out_no_cfg.shape + + def test_callback_inputs(self): + sig = inspect.signature(self.pipeline_class.__call__) + has_callback_tensor_inputs = "callback_on_step_end_tensor_inputs" in sig.parameters + has_callback_step_end = "callback_on_step_end" in sig.parameters + + if not (has_callback_tensor_inputs and has_callback_step_end): + return + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + self.assertTrue( + hasattr(pipe, "_callback_tensor_inputs"), + f" {self.pipeline_class} should have `_callback_tensor_inputs` that defines a list of tensor variables its callback function can use as inputs", + ) + + def callback_inputs_subset(pipe, i, t, callback_kwargs): + # iterate over callback args + for tensor_name, tensor_value in callback_kwargs.items(): + # check that we're only passing in allowed tensor inputs + assert tensor_name in pipe._callback_tensor_inputs + + return callback_kwargs + + def callback_inputs_all(pipe, i, t, callback_kwargs): + for tensor_name in pipe._callback_tensor_inputs: + assert tensor_name in callback_kwargs + + # iterate over callback args + for tensor_name, tensor_value in callback_kwargs.items(): + # check that we're only passing in allowed tensor inputs + assert tensor_name in pipe._callback_tensor_inputs + + return callback_kwargs + + inputs = self.get_dummy_inputs(torch_device) + + # Test passing in a subset + inputs["callback_on_step_end"] = callback_inputs_subset + inputs["callback_on_step_end_tensor_inputs"] = ["latents"] + inputs["output_type"] = "latent" + output = pipe(**inputs)[0] + + # Test passing in a everything + inputs["callback_on_step_end"] = callback_inputs_all + inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs + inputs["output_type"] = "latent" + output = pipe(**inputs)[0] + + def callback_inputs_change_tensor(pipe, i, t, callback_kwargs): + is_last = i == (pipe.num_timesteps - 1) + if is_last: + callback_kwargs["latents"] = torch.zeros_like(callback_kwargs["latents"]) + return callback_kwargs + + inputs["callback_on_step_end"] = callback_inputs_change_tensor + inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs + inputs["output_type"] = "latent" + output = pipe(**inputs)[0] + assert output.abs().sum() == 0 + + def test_callback_cfg(self): + sig = inspect.signature(self.pipeline_class.__call__) + has_callback_tensor_inputs = "callback_on_step_end_tensor_inputs" in sig.parameters + has_callback_step_end = "callback_on_step_end" in sig.parameters + + if not (has_callback_tensor_inputs and has_callback_step_end): + return + + if "guidance_scale" not in sig.parameters: + return + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + self.assertTrue( + hasattr(pipe, "_callback_tensor_inputs"), + f" {self.pipeline_class} should have `_callback_tensor_inputs` that defines a list of tensor variables its callback function can use as inputs", + ) + + def callback_increase_guidance(pipe, i, t, callback_kwargs): + pipe._guidance_scale += 1.0 + + return callback_kwargs + + inputs = self.get_dummy_inputs(torch_device) + + # use cfg guidance because some pipelines modify the shape of the latents + # outside of the denoising loop + inputs["guidance_scale"] = 2.0 + inputs["callback_on_step_end"] = callback_increase_guidance + inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs + _ = pipe(**inputs)[0] + + # we increase the guidance scale by 1.0 at every step + # check that the guidance scale is increased by the number of scheduler timesteps + # accounts for models that modify the number of inference steps based on strength + assert pipe.guidance_scale == (inputs["guidance_scale"] + pipe.num_timesteps) + + def test_serialization_with_variants(self): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + model_components = [ + component_name for component_name, component in pipe.components.items() if isinstance(component, nn.Module) + ] + variant = "fp16" + + with tempfile.TemporaryDirectory() as tmpdir: + pipe.save_pretrained(tmpdir, variant=variant, safe_serialization=False) + + with open(f"{tmpdir}/model_index.json", "r") as f: + config = json.load(f) + + for subfolder in os.listdir(tmpdir): + if not os.path.isfile(subfolder) and subfolder in model_components: + folder_path = os.path.join(tmpdir, subfolder) + is_folder = os.path.isdir(folder_path) and subfolder in config + assert is_folder and any(p.split(".")[1].startswith(variant) for p in os.listdir(folder_path)) + + def test_loading_with_variants(self): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + variant = "fp16" + + def is_nan(tensor): + if tensor.ndimension() == 0: + has_nan = torch.isnan(tensor).item() + else: + has_nan = torch.isnan(tensor).any() + return has_nan + + with tempfile.TemporaryDirectory() as tmpdir: + pipe.save_pretrained(tmpdir, variant=variant, safe_serialization=False) + pipe_loaded = self.pipeline_class.from_pretrained(tmpdir, variant=variant) + + model_components_pipe = { + component_name: component + for component_name, component in pipe.components.items() + if isinstance(component, nn.Module) + } + model_components_pipe_loaded = { + component_name: component + for component_name, component in pipe_loaded.components.items() + if isinstance(component, nn.Module) + } + for component_name in model_components_pipe: + pipe_component = model_components_pipe[component_name] + pipe_loaded_component = model_components_pipe_loaded[component_name] + for p1, p2 in zip(pipe_component.parameters(), pipe_loaded_component.parameters()): + # nan check for luminanext (mps). + if not (is_nan(p1) and is_nan(p2)): + self.assertTrue(torch.equal(p1, p2)) + + def test_loading_with_incorrect_variants_raises_error(self): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + variant = "fp16" + + with tempfile.TemporaryDirectory() as tmpdir: + # Don't save with variants. + pipe.save_pretrained(tmpdir, safe_serialization=False) + + with self.assertRaises(ValueError) as error: + _ = self.pipeline_class.from_pretrained(tmpdir, variant=variant) + + assert f"You are trying to load the model files of the `variant={variant}`" in str(error.exception) + + def test_encode_prompt_works_in_isolation(self, extra_required_param_value_dict=None, atol=1e-4, rtol=1e-4): + if not hasattr(self.pipeline_class, "encode_prompt"): + return + + components = self.get_dummy_components() + + # We initialize the pipeline with only text encoders and tokenizers, + # mimicking a real-world scenario. + components_with_text_encoders = {} + for k in components: + if "text" in k or "tokenizer" in k: + components_with_text_encoders[k] = components[k] + else: + components_with_text_encoders[k] = None + pipe_with_just_text_encoder = self.pipeline_class(**components_with_text_encoders) + pipe_with_just_text_encoder = pipe_with_just_text_encoder.to(torch_device) + + # Get inputs and also the args of `encode_prompts`. + inputs = self.get_dummy_inputs(torch_device) + encode_prompt_signature = inspect.signature(pipe_with_just_text_encoder.encode_prompt) + encode_prompt_parameters = list(encode_prompt_signature.parameters.values()) + + # Required args in encode_prompt with those with no default. + required_params = [] + for param in encode_prompt_parameters: + if param.name == "self" or param.name == "kwargs": + continue + if param.default is inspect.Parameter.empty: + required_params.append(param.name) + + # Craft inputs for the `encode_prompt()` method to run in isolation. + encode_prompt_param_names = [p.name for p in encode_prompt_parameters if p.name != "self"] + input_keys = list(inputs.keys()) + encode_prompt_inputs = {k: inputs.pop(k) for k in input_keys if k in encode_prompt_param_names} + + pipe_call_signature = inspect.signature(pipe_with_just_text_encoder.__call__) + pipe_call_parameters = pipe_call_signature.parameters + + # For each required arg in encode_prompt, check if it's missing + # in encode_prompt_inputs. If so, see if __call__ has a default + # for that arg and use it if available. + for required_param_name in required_params: + if required_param_name not in encode_prompt_inputs: + pipe_call_param = pipe_call_parameters.get(required_param_name, None) + if pipe_call_param is not None and pipe_call_param.default is not inspect.Parameter.empty: + # Use the default from pipe.__call__ + encode_prompt_inputs[required_param_name] = pipe_call_param.default + elif extra_required_param_value_dict is not None and isinstance(extra_required_param_value_dict, dict): + encode_prompt_inputs[required_param_name] = extra_required_param_value_dict[required_param_name] + else: + raise ValueError( + f"Required parameter '{required_param_name}' in " + f"encode_prompt has no default in either encode_prompt or __call__." + ) + + # Compute `encode_prompt()`. + with torch.no_grad(): + encoded_prompt_outputs = pipe_with_just_text_encoder.encode_prompt(**encode_prompt_inputs) + + # Programmatically determine the return names of `encode_prompt.` + ast_visitor = ReturnNameVisitor() + encode_prompt_tree = ast_visitor.get_ast_tree(cls=self.pipeline_class) + ast_visitor.visit(encode_prompt_tree) + prompt_embed_kwargs = ast_visitor.return_names + prompt_embeds_kwargs = dict(zip(prompt_embed_kwargs, encoded_prompt_outputs)) + + # Pack the outputs of `encode_prompt`. + adapted_prompt_embeds_kwargs = { + k: prompt_embeds_kwargs.pop(k) for k in list(prompt_embeds_kwargs.keys()) if k in pipe_call_parameters + } + + # now initialize a pipeline without text encoders and compute outputs with the + # `encode_prompt()` outputs and other relevant inputs. + components_with_text_encoders = {} + for k in components: + if "text" in k or "tokenizer" in k: + components_with_text_encoders[k] = None + else: + components_with_text_encoders[k] = components[k] + pipe_without_text_encoders = self.pipeline_class(**components_with_text_encoders).to(torch_device) + + # Set `negative_prompt` to None as we have already calculated its embeds + # if it was present in `inputs`. This is because otherwise we will interfere wrongly + # for non-None `negative_prompt` values as defaults (PixArt for example). + pipe_without_tes_inputs = {**inputs, **adapted_prompt_embeds_kwargs} + if ( + pipe_call_parameters.get("negative_prompt", None) is not None + and pipe_call_parameters.get("negative_prompt").default is not None + ): + pipe_without_tes_inputs.update({"negative_prompt": None}) + + # Pipelines like attend and excite have `prompt` as a required argument. + if ( + pipe_call_parameters.get("prompt", None) is not None + and pipe_call_parameters.get("prompt").default is inspect.Parameter.empty + and pipe_call_parameters.get("prompt_embeds", None) is not None + and pipe_call_parameters.get("prompt_embeds").default is None + ): + pipe_without_tes_inputs.update({"prompt": None}) + + pipe_out = pipe_without_text_encoders(**pipe_without_tes_inputs)[0] + + # Compare against regular pipeline outputs. + full_pipe = self.pipeline_class(**components).to(torch_device) + inputs = self.get_dummy_inputs(torch_device) + pipe_out_2 = full_pipe(**inputs)[0] + + if isinstance(pipe_out, np.ndarray) and isinstance(pipe_out_2, np.ndarray): + self.assertTrue(np.allclose(pipe_out, pipe_out_2, atol=atol, rtol=rtol)) + elif isinstance(pipe_out, torch.Tensor) and isinstance(pipe_out_2, torch.Tensor): + self.assertTrue(torch.allclose(pipe_out, pipe_out_2, atol=atol, rtol=rtol)) + + def test_StableDiffusionMixin_component(self): + """Any pipeline that have LDMFuncMixin should have vae and unet components.""" + if not issubclass(self.pipeline_class, StableDiffusionMixin): + return + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + self.assertTrue(hasattr(pipe, "vae") and isinstance(pipe.vae, (AutoencoderKL, AutoencoderTiny))) + self.assertTrue( + hasattr(pipe, "unet") + and isinstance( + pipe.unet, + (UNet2DConditionModel, UNet3DConditionModel, I2VGenXLUNet, UNetMotionModel, UNetControlNetXSModel), + ) + ) + + @require_hf_hub_version_greater("0.26.5") + @require_transformers_version_greater("4.47.1") + def test_save_load_dduf(self, atol=1e-4, rtol=1e-4): + if not self.supports_dduf: + return + + from huggingface_hub import export_folder_as_dduf + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device="cpu") + inputs.pop("generator") + inputs["generator"] = torch.manual_seed(0) + + pipeline_out = pipe(**inputs)[0] + + with tempfile.TemporaryDirectory() as tmpdir: + dduf_filename = os.path.join(tmpdir, f"{pipe.__class__.__name__.lower()}.dduf") + pipe.save_pretrained(tmpdir, safe_serialization=True) + export_folder_as_dduf(dduf_filename, folder_path=tmpdir) + loaded_pipe = self.pipeline_class.from_pretrained(tmpdir, dduf_file=dduf_filename).to(torch_device) + + inputs["generator"] = torch.manual_seed(0) + loaded_pipeline_out = loaded_pipe(**inputs)[0] + + if isinstance(pipeline_out, np.ndarray) and isinstance(loaded_pipeline_out, np.ndarray): + assert np.allclose(pipeline_out, loaded_pipeline_out, atol=atol, rtol=rtol) + elif isinstance(pipeline_out, torch.Tensor) and isinstance(loaded_pipeline_out, torch.Tensor): + assert torch.allclose(pipeline_out, loaded_pipeline_out, atol=atol, rtol=rtol) + + def test_layerwise_casting_inference(self): + if not self.test_layerwise_casting: + return + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.to(torch_device, dtype=torch.bfloat16) + pipe.set_progress_bar_config(disable=None) + + denoiser = pipe.transformer if hasattr(pipe, "transformer") else pipe.unet + denoiser.enable_layerwise_casting(storage_dtype=torch.float8_e4m3fn, compute_dtype=torch.bfloat16) + + inputs = self.get_dummy_inputs(torch_device) + _ = pipe(**inputs)[0] + + @require_torch_accelerator + def test_group_offloading_inference(self): + if not self.test_group_offloading: + return + + def create_pipe(): + torch.manual_seed(0) + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.set_progress_bar_config(disable=None) + return pipe + + def enable_group_offload_on_component(pipe, group_offloading_kwargs): + # We intentionally don't test VAE's here. This is because some tests enable tiling on the VAE. If + # tiling is enabled and a forward pass is run, when accelerator streams are used, the execution order of + # the layers is not traced correctly. This causes errors. For apply group offloading to VAE, a + # warmup forward pass (even with dummy small inputs) is recommended. + for component_name in [ + "text_encoder", + "text_encoder_2", + "text_encoder_3", + "transformer", + "unet", + "controlnet", + ]: + if not hasattr(pipe, component_name): + continue + component = getattr(pipe, component_name) + if not getattr(component, "_supports_group_offloading", True): + continue + if hasattr(component, "enable_group_offload"): + # For diffusers ModelMixin implementations + component.enable_group_offload(torch.device(torch_device), **group_offloading_kwargs) + else: + # For other models not part of diffusers + apply_group_offloading( + component, onload_device=torch.device(torch_device), **group_offloading_kwargs + ) + self.assertTrue( + all( + module._diffusers_hook.get_hook("group_offloading") is not None + for module in component.modules() + if hasattr(module, "_diffusers_hook") + ) + ) + for component_name in ["vae", "vqvae", "image_encoder"]: + component = getattr(pipe, component_name, None) + if isinstance(component, torch.nn.Module): + component.to(torch_device) + + def run_forward(pipe): + torch.manual_seed(0) + inputs = self.get_dummy_inputs(torch_device) + return pipe(**inputs)[0] + + pipe = create_pipe().to(torch_device) + output_without_group_offloading = run_forward(pipe) + + pipe = create_pipe() + enable_group_offload_on_component(pipe, {"offload_type": "block_level", "num_blocks_per_group": 1}) + output_with_group_offloading1 = run_forward(pipe) + + pipe = create_pipe() + enable_group_offload_on_component(pipe, {"offload_type": "leaf_level"}) + output_with_group_offloading2 = run_forward(pipe) + + if torch.is_tensor(output_without_group_offloading): + output_without_group_offloading = output_without_group_offloading.detach().cpu().numpy() + output_with_group_offloading1 = output_with_group_offloading1.detach().cpu().numpy() + output_with_group_offloading2 = output_with_group_offloading2.detach().cpu().numpy() + + self.assertTrue(np.allclose(output_without_group_offloading, output_with_group_offloading1, atol=1e-4)) + self.assertTrue(np.allclose(output_without_group_offloading, output_with_group_offloading2, atol=1e-4)) + + def test_torch_dtype_dict(self): + components = self.get_dummy_components() + if not components: + self.skipTest("No dummy components defined.") + + pipe = self.pipeline_class(**components) + specified_key = next(iter(components.keys())) + + with tempfile.TemporaryDirectory(ignore_cleanup_errors=True) as tmpdirname: + pipe.save_pretrained(tmpdirname, safe_serialization=False) + torch_dtype_dict = {specified_key: torch.bfloat16, "default": torch.float16} + loaded_pipe = self.pipeline_class.from_pretrained(tmpdirname, torch_dtype=torch_dtype_dict) + + for name, component in loaded_pipe.components.items(): + if isinstance(component, torch.nn.Module) and hasattr(component, "dtype"): + expected_dtype = torch_dtype_dict.get(name, torch_dtype_dict.get("default", torch.float32)) + self.assertEqual( + component.dtype, + expected_dtype, + f"Component '{name}' has dtype {component.dtype} but expected {expected_dtype}", + ) + + @require_torch_accelerator + def test_pipeline_with_accelerator_device_map(self, expected_max_difference=1e-4): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe = pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + torch.manual_seed(0) + inputs = self.get_dummy_inputs(torch_device) + inputs["generator"] = torch.manual_seed(0) + out = pipe(**inputs)[0] + + with tempfile.TemporaryDirectory() as tmpdir: + pipe.save_pretrained(tmpdir) + loaded_pipe = self.pipeline_class.from_pretrained(tmpdir, device_map=torch_device) + for component in loaded_pipe.components.values(): + if hasattr(component, "set_default_attn_processor"): + component.set_default_attn_processor() + inputs["generator"] = torch.manual_seed(0) + loaded_out = loaded_pipe(**inputs)[0] + max_diff = np.abs(to_np(out) - to_np(loaded_out)).max() + self.assertLess(max_diff, expected_max_difference) + + +@is_staging_test +class PipelinePushToHubTester(unittest.TestCase): + identifier = uuid.uuid4() + repo_id = f"test-pipeline-{identifier}" + org_repo_id = f"valid_org/{repo_id}-org" + + def get_pipeline_components(self): + unet = UNet2DConditionModel( + block_out_channels=(32, 64), + layers_per_block=2, + sample_size=32, + in_channels=4, + out_channels=4, + down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), + up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), + cross_attention_dim=32, + ) + + scheduler = DDIMScheduler( + beta_start=0.00085, + beta_end=0.012, + beta_schedule="scaled_linear", + clip_sample=False, + set_alpha_to_one=False, + ) + + vae = AutoencoderKL( + block_out_channels=[32, 64], + in_channels=3, + out_channels=3, + down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], + up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], + latent_channels=4, + ) + + text_encoder_config = CLIPTextConfig( + bos_token_id=0, + eos_token_id=2, + hidden_size=32, + intermediate_size=37, + layer_norm_eps=1e-05, + num_attention_heads=4, + num_hidden_layers=5, + pad_token_id=1, + vocab_size=1000, + ) + text_encoder = CLIPTextModel(text_encoder_config) + + with tempfile.TemporaryDirectory() as tmpdir: + dummy_vocab = {"<|startoftext|>": 0, "<|endoftext|>": 1, "!": 2} + vocab_path = os.path.join(tmpdir, "vocab.json") + with open(vocab_path, "w") as f: + json.dump(dummy_vocab, f) + + merges = "Ġ t\nĠt h" + merges_path = os.path.join(tmpdir, "merges.txt") + with open(merges_path, "w") as f: + f.writelines(merges) + tokenizer = CLIPTokenizer(vocab_file=vocab_path, merges_file=merges_path) + + components = { + "unet": unet, + "scheduler": scheduler, + "vae": vae, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + "safety_checker": None, + "feature_extractor": None, + } + return components + + def test_push_to_hub(self): + components = self.get_pipeline_components() + pipeline = StableDiffusionPipeline(**components) + pipeline.push_to_hub(self.repo_id, token=TOKEN) + + new_model = UNet2DConditionModel.from_pretrained(f"{USER}/{self.repo_id}", subfolder="unet") + unet = components["unet"] + for p1, p2 in zip(unet.parameters(), new_model.parameters()): + self.assertTrue(torch.equal(p1, p2)) + + # Reset repo + delete_repo(token=TOKEN, repo_id=self.repo_id) + + # Push to hub via save_pretrained + with tempfile.TemporaryDirectory() as tmp_dir: + pipeline.save_pretrained(tmp_dir, repo_id=self.repo_id, push_to_hub=True, token=TOKEN) + + new_model = UNet2DConditionModel.from_pretrained(f"{USER}/{self.repo_id}", subfolder="unet") + for p1, p2 in zip(unet.parameters(), new_model.parameters()): + self.assertTrue(torch.equal(p1, p2)) + + # Reset repo + delete_repo(self.repo_id, token=TOKEN) + + def test_push_to_hub_in_organization(self): + components = self.get_pipeline_components() + pipeline = StableDiffusionPipeline(**components) + pipeline.push_to_hub(self.org_repo_id, token=TOKEN) + + new_model = UNet2DConditionModel.from_pretrained(self.org_repo_id, subfolder="unet") + unet = components["unet"] + for p1, p2 in zip(unet.parameters(), new_model.parameters()): + self.assertTrue(torch.equal(p1, p2)) + + # Reset repo + delete_repo(token=TOKEN, repo_id=self.org_repo_id) + + # Push to hub via save_pretrained + with tempfile.TemporaryDirectory() as tmp_dir: + pipeline.save_pretrained(tmp_dir, push_to_hub=True, token=TOKEN, repo_id=self.org_repo_id) + + new_model = UNet2DConditionModel.from_pretrained(self.org_repo_id, subfolder="unet") + for p1, p2 in zip(unet.parameters(), new_model.parameters()): + self.assertTrue(torch.equal(p1, p2)) + + # Reset repo + delete_repo(self.org_repo_id, token=TOKEN) + + @unittest.skipIf( + not is_jinja_available(), + reason="Model card tests cannot be performed without Jinja installed.", + ) + def test_push_to_hub_library_name(self): + components = self.get_pipeline_components() + pipeline = StableDiffusionPipeline(**components) + pipeline.push_to_hub(self.repo_id, token=TOKEN) + + model_card = ModelCard.load(f"{USER}/{self.repo_id}", token=TOKEN).data + assert model_card.library_name == "diffusers" + + # Reset repo + delete_repo(self.repo_id, token=TOKEN) + + +class PyramidAttentionBroadcastTesterMixin: + pab_config = PyramidAttentionBroadcastConfig( + spatial_attention_block_skip_range=2, + spatial_attention_timestep_skip_range=(100, 800), + spatial_attention_block_identifiers=["transformer_blocks"], + ) + + def test_pyramid_attention_broadcast_layers(self): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + + num_layers = 0 + num_single_layers = 0 + dummy_component_kwargs = {} + dummy_component_parameters = inspect.signature(self.get_dummy_components).parameters + if "num_layers" in dummy_component_parameters: + num_layers = 2 + dummy_component_kwargs["num_layers"] = num_layers + if "num_single_layers" in dummy_component_parameters: + num_single_layers = 2 + dummy_component_kwargs["num_single_layers"] = num_single_layers + + components = self.get_dummy_components(**dummy_component_kwargs) + pipe = self.pipeline_class(**components) + pipe.set_progress_bar_config(disable=None) + + self.pab_config.current_timestep_callback = lambda: pipe.current_timestep + denoiser = pipe.transformer if hasattr(pipe, "transformer") else pipe.unet + denoiser.enable_cache(self.pab_config) + + expected_hooks = 0 + if self.pab_config.spatial_attention_block_skip_range is not None: + expected_hooks += num_layers + num_single_layers + if self.pab_config.temporal_attention_block_skip_range is not None: + expected_hooks += num_layers + num_single_layers + if self.pab_config.cross_attention_block_skip_range is not None: + expected_hooks += num_layers + num_single_layers + + denoiser = pipe.transformer if hasattr(pipe, "transformer") else pipe.unet + count = 0 + for module in denoiser.modules(): + if hasattr(module, "_diffusers_hook"): + hook = module._diffusers_hook.get_hook("pyramid_attention_broadcast") + if hook is None: + continue + count += 1 + self.assertTrue( + isinstance(hook, PyramidAttentionBroadcastHook), + "Hook should be of type PyramidAttentionBroadcastHook.", + ) + self.assertTrue(hook.state.cache is None, "Cache should be None at initialization.") + self.assertEqual(count, expected_hooks, "Number of hooks should match the expected number.") + + # Perform dummy inference step to ensure state is updated + def pab_state_check_callback(pipe, i, t, kwargs): + for module in denoiser.modules(): + if hasattr(module, "_diffusers_hook"): + hook = module._diffusers_hook.get_hook("pyramid_attention_broadcast") + if hook is None: + continue + self.assertTrue( + hook.state.cache is not None, + "Cache should have updated during inference.", + ) + self.assertTrue( + hook.state.iteration == i + 1, + "Hook iteration state should have updated during inference.", + ) + return {} + + inputs = self.get_dummy_inputs(device) + inputs["num_inference_steps"] = 2 + inputs["callback_on_step_end"] = pab_state_check_callback + pipe(**inputs)[0] + + # After inference, reset_stateful_hooks is called within the pipeline, which should have reset the states + for module in denoiser.modules(): + if hasattr(module, "_diffusers_hook"): + hook = module._diffusers_hook.get_hook("pyramid_attention_broadcast") + if hook is None: + continue + self.assertTrue( + hook.state.cache is None, + "Cache should be reset to None after inference.", + ) + self.assertTrue( + hook.state.iteration == 0, + "Iteration should be reset to 0 after inference.", + ) + + def test_pyramid_attention_broadcast_inference(self, expected_atol: float = 0.2): + # We need to use higher tolerance because we are using a random model. With a converged/trained + # model, the tolerance can be lower. + + device = "cpu" # ensure determinism for the device-dependent torch.Generator + num_layers = 2 + components = self.get_dummy_components(num_layers=num_layers) + pipe = self.pipeline_class(**components) + pipe = pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + # Run inference without PAB + inputs = self.get_dummy_inputs(device) + inputs["num_inference_steps"] = 4 + output = pipe(**inputs)[0] + original_image_slice = output.flatten() + original_image_slice = np.concatenate((original_image_slice[:8], original_image_slice[-8:])) + + # Run inference with PAB enabled + self.pab_config.current_timestep_callback = lambda: pipe.current_timestep + denoiser = pipe.transformer if hasattr(pipe, "transformer") else pipe.unet + denoiser.enable_cache(self.pab_config) + + inputs = self.get_dummy_inputs(device) + inputs["num_inference_steps"] = 4 + output = pipe(**inputs)[0] + image_slice_pab_enabled = output.flatten() + image_slice_pab_enabled = np.concatenate((image_slice_pab_enabled[:8], image_slice_pab_enabled[-8:])) + + # Run inference with PAB disabled + denoiser.disable_cache() + + inputs = self.get_dummy_inputs(device) + inputs["num_inference_steps"] = 4 + output = pipe(**inputs)[0] + image_slice_pab_disabled = output.flatten() + image_slice_pab_disabled = np.concatenate((image_slice_pab_disabled[:8], image_slice_pab_disabled[-8:])) + + assert np.allclose(original_image_slice, image_slice_pab_enabled, atol=expected_atol), ( + "PAB outputs should not differ much in specified timestep range." + ) + assert np.allclose(original_image_slice, image_slice_pab_disabled, atol=1e-4), ( + "Outputs from normal inference and after disabling cache should not differ." + ) + + +class FasterCacheTesterMixin: + faster_cache_config = FasterCacheConfig( + spatial_attention_block_skip_range=2, + spatial_attention_timestep_skip_range=(-1, 901), + unconditional_batch_skip_range=2, + attention_weight_callback=lambda _: 0.5, + ) + + def test_faster_cache_basic_warning_or_errors_raised(self): + components = self.get_dummy_components() + + logger = logging.get_logger("diffusers.hooks.faster_cache") + logger.setLevel(logging.INFO) + + # Check if warning is raise when no attention_weight_callback is provided + pipe = self.pipeline_class(**components) + with CaptureLogger(logger) as cap_logger: + config = FasterCacheConfig(spatial_attention_block_skip_range=2, attention_weight_callback=None) + apply_faster_cache(pipe.transformer, config) + self.assertTrue("No `attention_weight_callback` provided when enabling FasterCache" in cap_logger.out) + + # Check if error raised when unsupported tensor format used + pipe = self.pipeline_class(**components) + with self.assertRaises(ValueError): + config = FasterCacheConfig(spatial_attention_block_skip_range=2, tensor_format="BFHWC") + apply_faster_cache(pipe.transformer, config) + + def test_faster_cache_inference(self, expected_atol: float = 0.1): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + + def create_pipe(): + torch.manual_seed(0) + num_layers = 2 + components = self.get_dummy_components(num_layers=num_layers) + pipe = self.pipeline_class(**components) + pipe = pipe.to(device) + pipe.set_progress_bar_config(disable=None) + return pipe + + def run_forward(pipe): + torch.manual_seed(0) + inputs = self.get_dummy_inputs(device) + inputs["num_inference_steps"] = 4 + return pipe(**inputs)[0] + + # Run inference without FasterCache + pipe = create_pipe() + output = run_forward(pipe).flatten() + original_image_slice = np.concatenate((output[:8], output[-8:])) + + # Run inference with FasterCache enabled + self.faster_cache_config.current_timestep_callback = lambda: pipe.current_timestep + pipe = create_pipe() + pipe.transformer.enable_cache(self.faster_cache_config) + output = run_forward(pipe).flatten() + image_slice_faster_cache_enabled = np.concatenate((output[:8], output[-8:])) + + # Run inference with FasterCache disabled + pipe.transformer.disable_cache() + output = run_forward(pipe).flatten() + image_slice_faster_cache_disabled = np.concatenate((output[:8], output[-8:])) + + assert np.allclose(original_image_slice, image_slice_faster_cache_enabled, atol=expected_atol), ( + "FasterCache outputs should not differ much in specified timestep range." + ) + assert np.allclose(original_image_slice, image_slice_faster_cache_disabled, atol=1e-4), ( + "Outputs from normal inference and after disabling cache should not differ." + ) + + def test_faster_cache_state(self): + from diffusers.hooks.faster_cache import _FASTER_CACHE_BLOCK_HOOK, _FASTER_CACHE_DENOISER_HOOK + + device = "cpu" # ensure determinism for the device-dependent torch.Generator + num_layers = 0 + num_single_layers = 0 + dummy_component_kwargs = {} + dummy_component_parameters = inspect.signature(self.get_dummy_components).parameters + if "num_layers" in dummy_component_parameters: + num_layers = 2 + dummy_component_kwargs["num_layers"] = num_layers + if "num_single_layers" in dummy_component_parameters: + num_single_layers = 2 + dummy_component_kwargs["num_single_layers"] = num_single_layers + + components = self.get_dummy_components(**dummy_component_kwargs) + pipe = self.pipeline_class(**components) + pipe.set_progress_bar_config(disable=None) + + self.faster_cache_config.current_timestep_callback = lambda: pipe.current_timestep + pipe.transformer.enable_cache(self.faster_cache_config) + + expected_hooks = 0 + if self.faster_cache_config.spatial_attention_block_skip_range is not None: + expected_hooks += num_layers + num_single_layers + if self.faster_cache_config.temporal_attention_block_skip_range is not None: + expected_hooks += num_layers + num_single_layers + + # Check if faster_cache denoiser hook is attached + denoiser = pipe.transformer if hasattr(pipe, "transformer") else pipe.unet + self.assertTrue( + hasattr(denoiser, "_diffusers_hook") + and isinstance(denoiser._diffusers_hook.get_hook(_FASTER_CACHE_DENOISER_HOOK), FasterCacheDenoiserHook), + "Hook should be of type FasterCacheDenoiserHook.", + ) + + # Check if all blocks have faster_cache block hook attached + count = 0 + for name, module in denoiser.named_modules(): + if hasattr(module, "_diffusers_hook"): + if name == "": + # Skip the root denoiser module + continue + count += 1 + self.assertTrue( + isinstance(module._diffusers_hook.get_hook(_FASTER_CACHE_BLOCK_HOOK), FasterCacheBlockHook), + "Hook should be of type FasterCacheBlockHook.", + ) + self.assertEqual(count, expected_hooks, "Number of hooks should match expected number.") + + # Perform inference to ensure that states are updated correctly + def faster_cache_state_check_callback(pipe, i, t, kwargs): + for name, module in denoiser.named_modules(): + if not hasattr(module, "_diffusers_hook"): + continue + if name == "": + # Root denoiser module + state = module._diffusers_hook.get_hook(_FASTER_CACHE_DENOISER_HOOK).state + if not self.faster_cache_config.is_guidance_distilled: + self.assertTrue(state.low_frequency_delta is not None, "Low frequency delta should be set.") + self.assertTrue(state.high_frequency_delta is not None, "High frequency delta should be set.") + else: + # Internal blocks + state = module._diffusers_hook.get_hook(_FASTER_CACHE_BLOCK_HOOK).state + self.assertTrue(state.cache is not None and len(state.cache) == 2, "Cache should be set.") + self.assertTrue(state.iteration == i + 1, "Hook iteration state should have updated during inference.") + return {} + + inputs = self.get_dummy_inputs(device) + inputs["num_inference_steps"] = 4 + inputs["callback_on_step_end"] = faster_cache_state_check_callback + _ = pipe(**inputs)[0] + + # After inference, reset_stateful_hooks is called within the pipeline, which should have reset the states + for name, module in denoiser.named_modules(): + if not hasattr(module, "_diffusers_hook"): + continue + + if name == "": + # Root denoiser module + state = module._diffusers_hook.get_hook(_FASTER_CACHE_DENOISER_HOOK).state + self.assertTrue(state.iteration == 0, "Iteration should be reset to 0.") + self.assertTrue(state.low_frequency_delta is None, "Low frequency delta should be reset to None.") + self.assertTrue(state.high_frequency_delta is None, "High frequency delta should be reset to None.") + else: + # Internal blocks + state = module._diffusers_hook.get_hook(_FASTER_CACHE_BLOCK_HOOK).state + self.assertTrue(state.iteration == 0, "Iteration should be reset to 0.") + self.assertTrue(state.batch_size is None, "Batch size should be reset to None.") + self.assertTrue(state.cache is None, "Cache should be reset to None.") + + +# TODO(aryan, dhruv): the cache tester mixins should probably be rewritten so that more models can be tested out +# of the box once there is better cache support/implementation +class FirstBlockCacheTesterMixin: + # threshold is intentionally set higher than usual values since we're testing with random unconverged models + # that will not satisfy the expected properties of the denoiser for caching to be effective + first_block_cache_config = FirstBlockCacheConfig(threshold=0.8) + + def test_first_block_cache_inference(self, expected_atol: float = 0.1): + device = "cpu" # ensure determinism for the device-dependent torch.Generator + + def create_pipe(): + torch.manual_seed(0) + num_layers = 2 + components = self.get_dummy_components(num_layers=num_layers) + pipe = self.pipeline_class(**components) + pipe = pipe.to(device) + pipe.set_progress_bar_config(disable=None) + return pipe + + def run_forward(pipe): + torch.manual_seed(0) + inputs = self.get_dummy_inputs(device) + inputs["num_inference_steps"] = 4 + return pipe(**inputs)[0] + + # Run inference without FirstBlockCache + pipe = create_pipe() + output = run_forward(pipe).flatten() + original_image_slice = np.concatenate((output[:8], output[-8:])) + + # Run inference with FirstBlockCache enabled + pipe = create_pipe() + pipe.transformer.enable_cache(self.first_block_cache_config) + output = run_forward(pipe).flatten() + image_slice_fbc_enabled = np.concatenate((output[:8], output[-8:])) + + # Run inference with FirstBlockCache disabled + pipe.transformer.disable_cache() + output = run_forward(pipe).flatten() + image_slice_fbc_disabled = np.concatenate((output[:8], output[-8:])) + + assert np.allclose(original_image_slice, image_slice_fbc_enabled, atol=expected_atol), ( + "FirstBlockCache outputs should not differ much." + ) + assert np.allclose(original_image_slice, image_slice_fbc_disabled, atol=1e-4), ( + "Outputs from normal inference and after disabling cache should not differ." + ) + + +# Some models (e.g. unCLIP) are extremely likely to significantly deviate depending on which hardware is used. +# This helper function is used to check that the image doesn't deviate on average more than 10 pixels from a +# reference image. +def assert_mean_pixel_difference(image, expected_image, expected_max_diff=10): + image = np.asarray(DiffusionPipeline.numpy_to_pil(image)[0], dtype=np.float32) + expected_image = np.asarray(DiffusionPipeline.numpy_to_pil(expected_image)[0], dtype=np.float32) + avg_diff = np.abs(image - expected_image).mean() + assert avg_diff < expected_max_diff, f"Error image deviates {avg_diff} pixels on average" diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/test_pipelines_onnx_common.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/test_pipelines_onnx_common.py new file mode 100644 index 0000000000000000000000000000000000000000..fa077efb8ab0f122631f58b158580e4d501c1661 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/test_pipelines_onnx_common.py @@ -0,0 +1,12 @@ +from ..testing_utils import require_onnxruntime + + +@require_onnxruntime +class OnnxPipelineTesterMixin: + """ + This mixin is designed to be used with unittest.TestCase classes. + It provides a set of common tests for each ONNXRuntime pipeline, e.g. saving and loading the pipeline, + equivalence of dict and tuple outputs, etc. + """ + + pass diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/visualcloze/__init__.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/visualcloze/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/visualcloze/test_pipeline_visualcloze_combined.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/visualcloze/test_pipeline_visualcloze_combined.py new file mode 100644 index 0000000000000000000000000000000000000000..00ae0441fe9908b58a97637ee7fae509f7f93216 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/visualcloze/test_pipeline_visualcloze_combined.py @@ -0,0 +1,344 @@ +import random +import tempfile +import unittest + +import numpy as np +import torch +from PIL import Image +from transformers import AutoTokenizer, CLIPTextConfig, CLIPTextModel, CLIPTokenizer, T5EncoderModel + +import diffusers +from diffusers import AutoencoderKL, FlowMatchEulerDiscreteScheduler, FluxTransformer2DModel, VisualClozePipeline +from diffusers.utils import logging + +from ...testing_utils import ( + CaptureLogger, + enable_full_determinism, + floats_tensor, + require_accelerator, + torch_device, +) +from ..test_pipelines_common import PipelineTesterMixin, to_np + + +enable_full_determinism() + + +class VisualClozePipelineFastTests(unittest.TestCase, PipelineTesterMixin): + pipeline_class = VisualClozePipeline + params = frozenset( + [ + "task_prompt", + "content_prompt", + "upsampling_height", + "upsampling_width", + "guidance_scale", + "prompt_embeds", + "pooled_prompt_embeds", + "upsampling_strength", + ] + ) + batch_params = frozenset(["task_prompt", "content_prompt", "image"]) + test_xformers_attention = False + test_layerwise_casting = True + test_group_offloading = True + + supports_dduf = False + + def get_dummy_components(self): + torch.manual_seed(0) + transformer = FluxTransformer2DModel( + patch_size=1, + in_channels=12, + out_channels=4, + num_layers=1, + num_single_layers=1, + attention_head_dim=6, + num_attention_heads=2, + joint_attention_dim=32, + pooled_projection_dim=32, + axes_dims_rope=[2, 2, 2], + ) + clip_text_encoder_config = CLIPTextConfig( + bos_token_id=0, + eos_token_id=2, + hidden_size=32, + intermediate_size=37, + layer_norm_eps=1e-05, + num_attention_heads=4, + num_hidden_layers=5, + pad_token_id=1, + vocab_size=1000, + hidden_act="gelu", + projection_dim=32, + ) + + torch.manual_seed(0) + text_encoder = CLIPTextModel(clip_text_encoder_config) + + torch.manual_seed(0) + text_encoder_2 = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5") + + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + tokenizer_2 = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") + + torch.manual_seed(0) + vae = AutoencoderKL( + sample_size=32, + in_channels=3, + out_channels=3, + block_out_channels=(4,), + layers_per_block=1, + latent_channels=1, + norm_num_groups=1, + use_quant_conv=False, + use_post_quant_conv=False, + shift_factor=0.0609, + scaling_factor=1.5035, + ) + + scheduler = FlowMatchEulerDiscreteScheduler() + + return { + "scheduler": scheduler, + "text_encoder": text_encoder, + "text_encoder_2": text_encoder_2, + "tokenizer": tokenizer, + "tokenizer_2": tokenizer_2, + "transformer": transformer, + "vae": vae, + "resolution": 32, + } + + def get_dummy_inputs(self, device, seed=0): + # Create example images to simulate the input format required by VisualCloze + context_image = [ + Image.fromarray(floats_tensor((32, 32, 3), rng=random.Random(seed), scale=255).numpy().astype(np.uint8)) + for _ in range(2) + ] + query_image = [ + Image.fromarray( + floats_tensor((32, 32, 3), rng=random.Random(seed + 1), scale=255).numpy().astype(np.uint8) + ), + None, + ] + + # Create an image list that conforms to the VisualCloze input format + image = [ + context_image, # In-Context example + query_image, # Query image + ] + + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device="cpu").manual_seed(seed) + + inputs = { + "task_prompt": "Each row outlines a logical process, starting from [IMAGE1] gray-based depth map with detailed object contours, to achieve [IMAGE2] an image with flawless clarity.", + "content_prompt": "A beautiful landscape with mountains and a lake", + "image": image, + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 5.0, + "upsampling_height": 32, + "upsampling_width": 32, + "max_sequence_length": 77, + "output_type": "np", + "upsampling_strength": 0.4, + } + return inputs + + def test_visualcloze_different_prompts(self): + pipe = self.pipeline_class(**self.get_dummy_components()).to(torch_device) + + inputs = self.get_dummy_inputs(torch_device) + output_same_prompt = pipe(**inputs).images[0] + + inputs = self.get_dummy_inputs(torch_device) + inputs["task_prompt"] = "A different task to perform." + output_different_prompts = pipe(**inputs).images[0] + + max_diff = np.abs(output_same_prompt - output_different_prompts).max() + + # Outputs should be different + assert max_diff > 1e-6 + + def test_visualcloze_image_output_shape(self): + pipe = self.pipeline_class(**self.get_dummy_components()).to(torch_device) + inputs = self.get_dummy_inputs(torch_device) + + height_width_pairs = [(32, 32), (72, 57)] + for height, width in height_width_pairs: + expected_height = height - height % (pipe.generation_pipe.vae_scale_factor * 2) + expected_width = width - width % (pipe.generation_pipe.vae_scale_factor * 2) + + inputs.update({"upsampling_height": height, "upsampling_width": width}) + image = pipe(**inputs).images[0] + output_height, output_width, _ = image.shape + assert (output_height, output_width) == (expected_height, expected_width) + + def test_inference_batch_single_identical(self): + self._test_inference_batch_single_identical(expected_max_diff=1e-3) + + def test_upsampling_strength(self, expected_min_diff=1e-1): + pipe = self.pipeline_class(**self.get_dummy_components()).to(torch_device) + inputs = self.get_dummy_inputs(torch_device) + + # Test different upsampling strengths + inputs["upsampling_strength"] = 0.2 + output_no_upsampling = pipe(**inputs).images[0] + + inputs["upsampling_strength"] = 0.8 + output_full_upsampling = pipe(**inputs).images[0] + + # Different upsampling strengths should produce different outputs + max_diff = np.abs(output_no_upsampling - output_full_upsampling).max() + assert max_diff > expected_min_diff + + def test_different_task_prompts(self, expected_min_diff=1e-1): + pipe = self.pipeline_class(**self.get_dummy_components()).to(torch_device) + inputs = self.get_dummy_inputs(torch_device) + + output_original = pipe(**inputs).images[0] + + inputs["task_prompt"] = "A different task description for image generation" + output_different_task = pipe(**inputs).images[0] + + # Different task prompts should produce different outputs + max_diff = np.abs(output_original - output_different_task).max() + assert max_diff > expected_min_diff + + @unittest.skip( + "Test not applicable because the pipeline being tested is a wrapper pipeline. CFG tests should be done on the inner pipelines." + ) + def test_callback_cfg(self): + pass + + def test_save_load_local(self, expected_max_difference=5e-4): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + for component in pipe.components.values(): + if hasattr(component, "set_default_attn_processor"): + component.set_default_attn_processor() + + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(torch_device) + output = pipe(**inputs)[0] + + logger = logging.get_logger("diffusers.pipelines.pipeline_utils") + logger.setLevel(diffusers.logging.INFO) + + with tempfile.TemporaryDirectory() as tmpdir: + pipe.save_pretrained(tmpdir, safe_serialization=False) + + with CaptureLogger(logger) as cap_logger: + # NOTE: Resolution must be set to 32 for loading otherwise will lead to OOM on CI hardware + # This attribute is not serialized in the config of the pipeline + pipe_loaded = self.pipeline_class.from_pretrained(tmpdir, resolution=32) + + for component in pipe_loaded.components.values(): + if hasattr(component, "set_default_attn_processor"): + component.set_default_attn_processor() + + for name in pipe_loaded.components.keys(): + if name not in pipe_loaded._optional_components: + assert name in str(cap_logger) + + pipe_loaded.to(torch_device) + pipe_loaded.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(torch_device) + output_loaded = pipe_loaded(**inputs)[0] + + max_diff = np.abs(to_np(output) - to_np(output_loaded)).max() + self.assertLess(max_diff, expected_max_difference) + + def test_save_load_optional_components(self, expected_max_difference=1e-4): + if not hasattr(self.pipeline_class, "_optional_components"): + return + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + for component in pipe.components.values(): + if hasattr(component, "set_default_attn_processor"): + component.set_default_attn_processor() + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + # set all optional components to None + for optional_component in pipe._optional_components: + setattr(pipe, optional_component, None) + + generator_device = "cpu" + inputs = self.get_dummy_inputs(generator_device) + torch.manual_seed(0) + output = pipe(**inputs)[0] + + with tempfile.TemporaryDirectory() as tmpdir: + pipe.save_pretrained(tmpdir, safe_serialization=False) + # NOTE: Resolution must be set to 32 for loading otherwise will lead to OOM on CI hardware + # This attribute is not serialized in the config of the pipeline + pipe_loaded = self.pipeline_class.from_pretrained(tmpdir, resolution=32) + for component in pipe_loaded.components.values(): + if hasattr(component, "set_default_attn_processor"): + component.set_default_attn_processor() + pipe_loaded.to(torch_device) + pipe_loaded.set_progress_bar_config(disable=None) + + for optional_component in pipe._optional_components: + self.assertTrue( + getattr(pipe_loaded, optional_component) is None, + f"`{optional_component}` did not stay set to None after loading.", + ) + + inputs = self.get_dummy_inputs(generator_device) + torch.manual_seed(0) + output_loaded = pipe_loaded(**inputs)[0] + + max_diff = np.abs(to_np(output) - to_np(output_loaded)).max() + self.assertLess(max_diff, expected_max_difference) + + @unittest.skipIf(torch_device not in ["cuda", "xpu"], reason="float16 requires CUDA or XPU") + @require_accelerator + def test_save_load_float16(self, expected_max_diff=1e-2): + components = self.get_dummy_components() + for name, module in components.items(): + if hasattr(module, "half"): + components[name] = module.to(torch_device).half() + + pipe = self.pipeline_class(**components) + for component in pipe.components.values(): + if hasattr(component, "set_default_attn_processor"): + component.set_default_attn_processor() + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(torch_device) + output = pipe(**inputs)[0] + + with tempfile.TemporaryDirectory() as tmpdir: + pipe.save_pretrained(tmpdir) + # NOTE: Resolution must be set to 32 for loading otherwise will lead to OOM on CI hardware + # This attribute is not serialized in the config of the pipeline + pipe_loaded = self.pipeline_class.from_pretrained(tmpdir, torch_dtype=torch.float16, resolution=32) + for component in pipe_loaded.components.values(): + if hasattr(component, "set_default_attn_processor"): + component.set_default_attn_processor() + pipe_loaded.to(torch_device) + pipe_loaded.set_progress_bar_config(disable=None) + + for name, component in pipe_loaded.components.items(): + if hasattr(component, "dtype"): + self.assertTrue( + component.dtype == torch.float16, + f"`{name}.dtype` switched from `float16` to {component.dtype} after loading.", + ) + + inputs = self.get_dummy_inputs(torch_device) + output_loaded = pipe_loaded(**inputs)[0] + max_diff = np.abs(to_np(output) - to_np(output_loaded)).max() + self.assertLess( + max_diff, expected_max_diff, "The output of the fp16 pipeline changed after saving and loading." + ) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/visualcloze/test_pipeline_visualcloze_generation.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/visualcloze/test_pipeline_visualcloze_generation.py new file mode 100644 index 0000000000000000000000000000000000000000..ab6b3ca5c587670bd21d8be65c0648632d385fb5 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/visualcloze/test_pipeline_visualcloze_generation.py @@ -0,0 +1,312 @@ +import random +import tempfile +import unittest + +import numpy as np +import torch +from PIL import Image +from transformers import AutoTokenizer, CLIPTextConfig, CLIPTextModel, CLIPTokenizer, T5EncoderModel + +import diffusers +from diffusers import ( + AutoencoderKL, + FlowMatchEulerDiscreteScheduler, + FluxTransformer2DModel, + VisualClozeGenerationPipeline, +) +from diffusers.utils import logging + +from ...testing_utils import ( + CaptureLogger, + enable_full_determinism, + floats_tensor, + require_accelerator, + torch_device, +) +from ..test_pipelines_common import PipelineTesterMixin, to_np + + +enable_full_determinism() + + +class VisualClozeGenerationPipelineFastTests(unittest.TestCase, PipelineTesterMixin): + pipeline_class = VisualClozeGenerationPipeline + params = frozenset( + [ + "task_prompt", + "content_prompt", + "guidance_scale", + "prompt_embeds", + "pooled_prompt_embeds", + ] + ) + batch_params = frozenset(["task_prompt", "content_prompt", "image"]) + test_xformers_attention = False + test_layerwise_casting = True + test_group_offloading = True + + supports_dduf = False + + def get_dummy_components(self): + torch.manual_seed(0) + transformer = FluxTransformer2DModel( + patch_size=1, + in_channels=12, + out_channels=4, + num_layers=1, + num_single_layers=1, + attention_head_dim=6, + num_attention_heads=2, + joint_attention_dim=32, + pooled_projection_dim=32, + axes_dims_rope=[2, 2, 2], + ) + clip_text_encoder_config = CLIPTextConfig( + bos_token_id=0, + eos_token_id=2, + hidden_size=32, + intermediate_size=37, + layer_norm_eps=1e-05, + num_attention_heads=4, + num_hidden_layers=5, + pad_token_id=1, + vocab_size=1000, + hidden_act="gelu", + projection_dim=32, + ) + + torch.manual_seed(0) + text_encoder = CLIPTextModel(clip_text_encoder_config) + + torch.manual_seed(0) + text_encoder_2 = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5") + + tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") + tokenizer_2 = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") + + torch.manual_seed(0) + vae = AutoencoderKL( + sample_size=32, + in_channels=3, + out_channels=3, + block_out_channels=(4,), + layers_per_block=1, + latent_channels=1, + norm_num_groups=1, + use_quant_conv=False, + use_post_quant_conv=False, + shift_factor=0.0609, + scaling_factor=1.5035, + ) + + scheduler = FlowMatchEulerDiscreteScheduler() + + return { + "scheduler": scheduler, + "text_encoder": text_encoder, + "text_encoder_2": text_encoder_2, + "tokenizer": tokenizer, + "tokenizer_2": tokenizer_2, + "transformer": transformer, + "vae": vae, + "resolution": 32, + } + + def get_dummy_inputs(self, device, seed=0): + # Create example images to simulate the input format required by VisualCloze + context_image = [ + Image.fromarray(floats_tensor((32, 32, 3), rng=random.Random(seed), scale=255).numpy().astype(np.uint8)) + for _ in range(2) + ] + query_image = [ + Image.fromarray( + floats_tensor((32, 32, 3), rng=random.Random(seed + 1), scale=255).numpy().astype(np.uint8) + ), + None, + ] + + # Create an image list that conforms to the VisualCloze input format + image = [ + context_image, # In-Context example + query_image, # Query image + ] + + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device="cpu").manual_seed(seed) + + inputs = { + "task_prompt": "Each row outlines a logical process, starting from [IMAGE1] gray-based depth map with detailed object contours, to achieve [IMAGE2] an image with flawless clarity.", + "content_prompt": "A beautiful landscape with mountains and a lake", + "image": image, + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 5.0, + "max_sequence_length": 77, + "output_type": "np", + } + return inputs + + def test_visualcloze_different_prompts(self): + pipe = self.pipeline_class(**self.get_dummy_components()).to(torch_device) + + inputs = self.get_dummy_inputs(torch_device) + output_same_prompt = pipe(**inputs).images[0] + + inputs = self.get_dummy_inputs(torch_device) + inputs["task_prompt"] = "A different task to perform." + output_different_prompts = pipe(**inputs).images[0] + + max_diff = np.abs(output_same_prompt - output_different_prompts).max() + + # Outputs should be different + assert max_diff > 1e-6 + + def test_inference_batch_single_identical(self): + self._test_inference_batch_single_identical(expected_max_diff=1e-3) + + def test_different_task_prompts(self, expected_min_diff=1e-1): + pipe = self.pipeline_class(**self.get_dummy_components()).to(torch_device) + inputs = self.get_dummy_inputs(torch_device) + + output_original = pipe(**inputs).images[0] + + inputs["task_prompt"] = "A different task description for image generation" + output_different_task = pipe(**inputs).images[0] + + # Different task prompts should produce different outputs + max_diff = np.abs(output_original - output_different_task).max() + assert max_diff > expected_min_diff + + def test_save_load_local(self, expected_max_difference=5e-4): + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + for component in pipe.components.values(): + if hasattr(component, "set_default_attn_processor"): + component.set_default_attn_processor() + + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(torch_device) + output = pipe(**inputs)[0] + + logger = logging.get_logger("diffusers.pipelines.pipeline_utils") + logger.setLevel(diffusers.logging.INFO) + + with tempfile.TemporaryDirectory() as tmpdir: + pipe.save_pretrained(tmpdir, safe_serialization=False) + + with CaptureLogger(logger) as cap_logger: + # NOTE: Resolution must be set to 32 for loading otherwise will lead to OOM on CI hardware + # This attribute is not serialized in the config of the pipeline + pipe_loaded = self.pipeline_class.from_pretrained(tmpdir, resolution=32) + + for component in pipe_loaded.components.values(): + if hasattr(component, "set_default_attn_processor"): + component.set_default_attn_processor() + + for name in pipe_loaded.components.keys(): + if name not in pipe_loaded._optional_components: + assert name in str(cap_logger) + + pipe_loaded.to(torch_device) + pipe_loaded.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(torch_device) + output_loaded = pipe_loaded(**inputs)[0] + + max_diff = np.abs(to_np(output) - to_np(output_loaded)).max() + self.assertLess(max_diff, expected_max_difference) + + def test_save_load_optional_components(self, expected_max_difference=1e-4): + if not hasattr(self.pipeline_class, "_optional_components"): + return + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + for component in pipe.components.values(): + if hasattr(component, "set_default_attn_processor"): + component.set_default_attn_processor() + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + # set all optional components to None + for optional_component in pipe._optional_components: + setattr(pipe, optional_component, None) + + generator_device = "cpu" + inputs = self.get_dummy_inputs(generator_device) + torch.manual_seed(0) + output = pipe(**inputs)[0] + + with tempfile.TemporaryDirectory() as tmpdir: + pipe.save_pretrained(tmpdir, safe_serialization=False) + # NOTE: Resolution must be set to 32 for loading otherwise will lead to OOM on CI hardware + # This attribute is not serialized in the config of the pipeline + pipe_loaded = self.pipeline_class.from_pretrained(tmpdir, resolution=32) + for component in pipe_loaded.components.values(): + if hasattr(component, "set_default_attn_processor"): + component.set_default_attn_processor() + pipe_loaded.to(torch_device) + pipe_loaded.set_progress_bar_config(disable=None) + + for optional_component in pipe._optional_components: + self.assertTrue( + getattr(pipe_loaded, optional_component) is None, + f"`{optional_component}` did not stay set to None after loading.", + ) + + inputs = self.get_dummy_inputs(generator_device) + torch.manual_seed(0) + output_loaded = pipe_loaded(**inputs)[0] + + max_diff = np.abs(to_np(output) - to_np(output_loaded)).max() + self.assertLess(max_diff, expected_max_difference) + + @unittest.skipIf(torch_device not in ["cuda", "xpu"], reason="float16 requires CUDA or XPU") + @require_accelerator + def test_save_load_float16(self, expected_max_diff=1e-2): + components = self.get_dummy_components() + for name, module in components.items(): + if hasattr(module, "half"): + components[name] = module.to(torch_device).half() + + pipe = self.pipeline_class(**components) + for component in pipe.components.values(): + if hasattr(component, "set_default_attn_processor"): + component.set_default_attn_processor() + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(torch_device) + output = pipe(**inputs)[0] + + with tempfile.TemporaryDirectory() as tmpdir: + pipe.save_pretrained(tmpdir) + # NOTE: Resolution must be set to 32 for loading otherwise will lead to OOM on CI hardware + # This attribute is not serialized in the config of the pipeline + pipe_loaded = self.pipeline_class.from_pretrained(tmpdir, torch_dtype=torch.float16, resolution=32) + for component in pipe_loaded.components.values(): + if hasattr(component, "set_default_attn_processor"): + component.set_default_attn_processor() + pipe_loaded.to(torch_device) + pipe_loaded.set_progress_bar_config(disable=None) + + for name, component in pipe_loaded.components.items(): + if hasattr(component, "dtype"): + self.assertTrue( + component.dtype == torch.float16, + f"`{name}.dtype` switched from `float16` to {component.dtype} after loading.", + ) + + inputs = self.get_dummy_inputs(torch_device) + output_loaded = pipe_loaded(**inputs)[0] + max_diff = np.abs(to_np(output) - to_np(output_loaded)).max() + self.assertLess( + max_diff, expected_max_diff, "The output of the fp16 pipeline changed after saving and loading." + ) + + @unittest.skip("Skipped due to missing layout_prompt. Needs further investigation.") + def test_encode_prompt_works_in_isolation(self, extra_required_param_value_dict=None, atol=0.0001, rtol=0.0001): + pass diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/wan/__init__.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/wan/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/wan/test_wan.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/wan/test_wan.py new file mode 100644 index 0000000000000000000000000000000000000000..106a7b294646767a193c5563507385c1487b5ede --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/wan/test_wan.py @@ -0,0 +1,201 @@ +# Copyright 2025 The HuggingFace Team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import gc +import tempfile +import unittest + +import numpy as np +import torch +from transformers import AutoTokenizer, T5EncoderModel + +from diffusers import AutoencoderKLWan, FlowMatchEulerDiscreteScheduler, WanPipeline, WanTransformer3DModel + +from ...testing_utils import ( + backend_empty_cache, + enable_full_determinism, + require_torch_accelerator, + slow, + torch_device, +) +from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS +from ..test_pipelines_common import PipelineTesterMixin + + +enable_full_determinism() + + +class WanPipelineFastTests(PipelineTesterMixin, unittest.TestCase): + pipeline_class = WanPipeline + params = TEXT_TO_IMAGE_PARAMS - {"cross_attention_kwargs"} + batch_params = TEXT_TO_IMAGE_BATCH_PARAMS + image_params = TEXT_TO_IMAGE_IMAGE_PARAMS + image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS + required_optional_params = frozenset( + [ + "num_inference_steps", + "generator", + "latents", + "return_dict", + "callback_on_step_end", + "callback_on_step_end_tensor_inputs", + ] + ) + test_xformers_attention = False + supports_dduf = False + + def get_dummy_components(self): + torch.manual_seed(0) + vae = AutoencoderKLWan( + base_dim=3, + z_dim=16, + dim_mult=[1, 1, 1, 1], + num_res_blocks=1, + temperal_downsample=[False, True, True], + ) + + torch.manual_seed(0) + # TODO: impl FlowDPMSolverMultistepScheduler + scheduler = FlowMatchEulerDiscreteScheduler(shift=7.0) + text_encoder = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5") + tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") + + torch.manual_seed(0) + transformer = WanTransformer3DModel( + patch_size=(1, 2, 2), + num_attention_heads=2, + attention_head_dim=12, + in_channels=16, + out_channels=16, + text_dim=32, + freq_dim=256, + ffn_dim=32, + num_layers=2, + cross_attn_norm=True, + qk_norm="rms_norm_across_heads", + rope_max_seq_len=32, + ) + + components = { + "transformer": transformer, + "vae": vae, + "scheduler": scheduler, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + "transformer_2": None, + } + return components + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + inputs = { + "prompt": "dance monkey", + "negative_prompt": "negative", # TODO + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 6.0, + "height": 16, + "width": 16, + "num_frames": 9, + "max_sequence_length": 16, + "output_type": "pt", + } + return inputs + + def test_inference(self): + device = "cpu" + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + video = pipe(**inputs).frames + generated_video = video[0] + self.assertEqual(generated_video.shape, (9, 3, 16, 16)) + + # fmt: off + expected_slice = torch.tensor([0.4525, 0.452, 0.4485, 0.4534, 0.4524, 0.4529, 0.454, 0.453, 0.5127, 0.5326, 0.5204, 0.5253, 0.5439, 0.5424, 0.5133, 0.5078]) + # fmt: on + + generated_slice = generated_video.flatten() + generated_slice = torch.cat([generated_slice[:8], generated_slice[-8:]]) + self.assertTrue(torch.allclose(generated_slice, expected_slice, atol=1e-3)) + + @unittest.skip("Test not supported") + def test_attention_slicing_forward_pass(self): + pass + + # _optional_components include transformer, transformer_2, but only transformer_2 is optional for this wan2.1 t2v pipeline + def test_save_load_optional_components(self, expected_max_difference=1e-4): + optional_component = "transformer_2" + + components = self.get_dummy_components() + components[optional_component] = None + pipe = self.pipeline_class(**components) + for component in pipe.components.values(): + if hasattr(component, "set_default_attn_processor"): + component.set_default_attn_processor() + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + generator_device = "cpu" + inputs = self.get_dummy_inputs(generator_device) + torch.manual_seed(0) + output = pipe(**inputs)[0] + + with tempfile.TemporaryDirectory() as tmpdir: + pipe.save_pretrained(tmpdir, safe_serialization=False) + pipe_loaded = self.pipeline_class.from_pretrained(tmpdir) + for component in pipe_loaded.components.values(): + if hasattr(component, "set_default_attn_processor"): + component.set_default_attn_processor() + pipe_loaded.to(torch_device) + pipe_loaded.set_progress_bar_config(disable=None) + + self.assertTrue( + getattr(pipe_loaded, optional_component) is None, + f"`{optional_component}` did not stay set to None after loading.", + ) + + inputs = self.get_dummy_inputs(generator_device) + torch.manual_seed(0) + output_loaded = pipe_loaded(**inputs)[0] + + max_diff = np.abs(output.detach().cpu().numpy() - output_loaded.detach().cpu().numpy()).max() + self.assertLess(max_diff, expected_max_difference) + + +@slow +@require_torch_accelerator +class WanPipelineIntegrationTests(unittest.TestCase): + prompt = "A painting of a squirrel eating a burger." + + def setUp(self): + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + @unittest.skip("TODO: test needs to be implemented") + def test_Wanx(self): + pass diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/wan/test_wan_22.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/wan/test_wan_22.py new file mode 100644 index 0000000000000000000000000000000000000000..56ef5ceb97edfc09dd2825c3f78c1a7fba02bffe --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/wan/test_wan_22.py @@ -0,0 +1,367 @@ +# Copyright 2025 The HuggingFace Team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import tempfile +import unittest + +import numpy as np +import torch +from transformers import AutoTokenizer, T5EncoderModel + +from diffusers import AutoencoderKLWan, UniPCMultistepScheduler, WanPipeline, WanTransformer3DModel + +from ...testing_utils import ( + enable_full_determinism, + torch_device, +) +from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS +from ..test_pipelines_common import PipelineTesterMixin + + +enable_full_determinism() + + +class Wan22PipelineFastTests(PipelineTesterMixin, unittest.TestCase): + pipeline_class = WanPipeline + params = TEXT_TO_IMAGE_PARAMS - {"cross_attention_kwargs"} + batch_params = TEXT_TO_IMAGE_BATCH_PARAMS + image_params = TEXT_TO_IMAGE_IMAGE_PARAMS + image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS + required_optional_params = frozenset( + [ + "num_inference_steps", + "generator", + "latents", + "return_dict", + "callback_on_step_end", + "callback_on_step_end_tensor_inputs", + ] + ) + test_xformers_attention = False + supports_dduf = False + + def get_dummy_components(self): + torch.manual_seed(0) + vae = AutoencoderKLWan( + base_dim=3, + z_dim=16, + dim_mult=[1, 1, 1, 1], + num_res_blocks=1, + temperal_downsample=[False, True, True], + ) + + torch.manual_seed(0) + scheduler = UniPCMultistepScheduler(prediction_type="flow_prediction", use_flow_sigmas=True, flow_shift=3.0) + text_encoder = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5") + tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") + + torch.manual_seed(0) + transformer = WanTransformer3DModel( + patch_size=(1, 2, 2), + num_attention_heads=2, + attention_head_dim=12, + in_channels=16, + out_channels=16, + text_dim=32, + freq_dim=256, + ffn_dim=32, + num_layers=2, + cross_attn_norm=True, + qk_norm="rms_norm_across_heads", + rope_max_seq_len=32, + ) + + torch.manual_seed(0) + transformer_2 = WanTransformer3DModel( + patch_size=(1, 2, 2), + num_attention_heads=2, + attention_head_dim=12, + in_channels=16, + out_channels=16, + text_dim=32, + freq_dim=256, + ffn_dim=32, + num_layers=2, + cross_attn_norm=True, + qk_norm="rms_norm_across_heads", + rope_max_seq_len=32, + ) + + components = { + "transformer": transformer, + "vae": vae, + "scheduler": scheduler, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + "transformer_2": transformer_2, + "boundary_ratio": 0.875, + } + return components + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + inputs = { + "prompt": "dance monkey", + "negative_prompt": "negative", + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 6.0, + "height": 16, + "width": 16, + "num_frames": 9, + "max_sequence_length": 16, + "output_type": "pt", + } + return inputs + + def test_inference(self): + device = "cpu" + + components = self.get_dummy_components() + pipe = self.pipeline_class( + **components, + ) + pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + video = pipe(**inputs).frames + generated_video = video[0] + self.assertEqual(generated_video.shape, (9, 3, 16, 16)) + + # fmt: off + expected_slice = torch.tensor([0.4525, 0.452, 0.4485, 0.4534, 0.4524, 0.4529, 0.454, 0.453, 0.5127, 0.5326, 0.5204, 0.5253, 0.5439, 0.5424, 0.5133, 0.5078]) + # fmt: on + + generated_slice = generated_video.flatten() + generated_slice = torch.cat([generated_slice[:8], generated_slice[-8:]]) + self.assertTrue(torch.allclose(generated_slice, expected_slice, atol=1e-3)) + + @unittest.skip("Test not supported") + def test_attention_slicing_forward_pass(self): + pass + + def test_save_load_optional_components(self, expected_max_difference=1e-4): + optional_component = "transformer" + + components = self.get_dummy_components() + components[optional_component] = None + components["boundary_ratio"] = 1.0 # for wan 2.2 14B, transformer is not used when boundary_ratio is 1.0 + + pipe = self.pipeline_class(**components) + for component in pipe.components.values(): + if hasattr(component, "set_default_attn_processor"): + component.set_default_attn_processor() + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + generator_device = "cpu" + inputs = self.get_dummy_inputs(generator_device) + torch.manual_seed(0) + output = pipe(**inputs)[0] + + with tempfile.TemporaryDirectory() as tmpdir: + pipe.save_pretrained(tmpdir, safe_serialization=False) + pipe_loaded = self.pipeline_class.from_pretrained(tmpdir) + for component in pipe_loaded.components.values(): + if hasattr(component, "set_default_attn_processor"): + component.set_default_attn_processor() + pipe_loaded.to(torch_device) + pipe_loaded.set_progress_bar_config(disable=None) + + self.assertTrue( + getattr(pipe_loaded, "transformer") is None, + "`transformer` did not stay set to None after loading.", + ) + + inputs = self.get_dummy_inputs(generator_device) + torch.manual_seed(0) + output_loaded = pipe_loaded(**inputs)[0] + + max_diff = np.abs(output.detach().cpu().numpy() - output_loaded.detach().cpu().numpy()).max() + self.assertLess(max_diff, expected_max_difference) + + +class Wan225BPipelineFastTests(PipelineTesterMixin, unittest.TestCase): + pipeline_class = WanPipeline + params = TEXT_TO_IMAGE_PARAMS - {"cross_attention_kwargs"} + batch_params = TEXT_TO_IMAGE_BATCH_PARAMS + image_params = TEXT_TO_IMAGE_IMAGE_PARAMS + image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS + required_optional_params = frozenset( + [ + "num_inference_steps", + "generator", + "latents", + "return_dict", + "callback_on_step_end", + "callback_on_step_end_tensor_inputs", + ] + ) + test_xformers_attention = False + supports_dduf = False + + def get_dummy_components(self): + torch.manual_seed(0) + vae = AutoencoderKLWan( + base_dim=3, + z_dim=48, + in_channels=12, + out_channels=12, + is_residual=True, + patch_size=2, + latents_mean=[0.0] * 48, + latents_std=[1.0] * 48, + dim_mult=[1, 1, 1, 1], + num_res_blocks=1, + scale_factor_spatial=16, + scale_factor_temporal=4, + temperal_downsample=[False, True, True], + ) + + torch.manual_seed(0) + scheduler = UniPCMultistepScheduler(prediction_type="flow_prediction", use_flow_sigmas=True, flow_shift=3.0) + text_encoder = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5") + tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") + + torch.manual_seed(0) + transformer = WanTransformer3DModel( + patch_size=(1, 2, 2), + num_attention_heads=2, + attention_head_dim=12, + in_channels=48, + out_channels=48, + text_dim=32, + freq_dim=256, + ffn_dim=32, + num_layers=2, + cross_attn_norm=True, + qk_norm="rms_norm_across_heads", + rope_max_seq_len=32, + ) + + components = { + "transformer": transformer, + "vae": vae, + "scheduler": scheduler, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + "transformer_2": None, + "boundary_ratio": None, + "expand_timesteps": True, + } + return components + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + inputs = { + "prompt": "dance monkey", + "negative_prompt": "negative", # TODO + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 6.0, + "height": 32, + "width": 32, + "num_frames": 9, + "max_sequence_length": 16, + "output_type": "pt", + } + return inputs + + def test_inference(self): + device = "cpu" + + components = self.get_dummy_components() + pipe = self.pipeline_class( + **components, + ) + pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + video = pipe(**inputs).frames + generated_video = video[0] + self.assertEqual(generated_video.shape, (9, 3, 32, 32)) + + # fmt: off + expected_slice = torch.tensor([[[0.4814, 0.4298, 0.5094, 0.4289, 0.5061, 0.4301, 0.5043, 0.4284, 0.5375, + 0.5965, 0.5527, 0.6014, 0.5228, 0.6076, 0.6644, 0.5651]]]) + # fmt: on + + generated_slice = generated_video.flatten() + generated_slice = torch.cat([generated_slice[:8], generated_slice[-8:]]) + self.assertTrue( + torch.allclose(generated_slice, expected_slice, atol=1e-3), + f"generated_slice: {generated_slice}, expected_slice: {expected_slice}", + ) + + @unittest.skip("Test not supported") + def test_attention_slicing_forward_pass(self): + pass + + def test_components_function(self): + init_components = self.get_dummy_components() + init_components.pop("boundary_ratio") + init_components.pop("expand_timesteps") + pipe = self.pipeline_class(**init_components) + + self.assertTrue(hasattr(pipe, "components")) + self.assertTrue(set(pipe.components.keys()) == set(init_components.keys())) + + def test_save_load_optional_components(self, expected_max_difference=1e-4): + optional_component = "transformer_2" + + components = self.get_dummy_components() + components[optional_component] = None + pipe = self.pipeline_class(**components) + for component in pipe.components.values(): + if hasattr(component, "set_default_attn_processor"): + component.set_default_attn_processor() + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + generator_device = "cpu" + inputs = self.get_dummy_inputs(generator_device) + torch.manual_seed(0) + output = pipe(**inputs)[0] + + with tempfile.TemporaryDirectory() as tmpdir: + pipe.save_pretrained(tmpdir, safe_serialization=False) + pipe_loaded = self.pipeline_class.from_pretrained(tmpdir) + for component in pipe_loaded.components.values(): + if hasattr(component, "set_default_attn_processor"): + component.set_default_attn_processor() + pipe_loaded.to(torch_device) + pipe_loaded.set_progress_bar_config(disable=None) + + self.assertTrue( + getattr(pipe_loaded, optional_component) is None, + f"`{optional_component}` did not stay set to None after loading.", + ) + + inputs = self.get_dummy_inputs(generator_device) + torch.manual_seed(0) + output_loaded = pipe_loaded(**inputs)[0] + + max_diff = np.abs(output.detach().cpu().numpy() - output_loaded.detach().cpu().numpy()).max() + self.assertLess(max_diff, expected_max_difference) + + def test_inference_batch_single_identical(self): + self._test_inference_batch_single_identical(expected_max_diff=2e-3) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/wan/test_wan_22_image_to_video.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/wan/test_wan_22_image_to_video.py new file mode 100644 index 0000000000000000000000000000000000000000..6294d62044f3caa067647240b0fd9a5f6266c8b3 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/wan/test_wan_22_image_to_video.py @@ -0,0 +1,392 @@ +# Copyright 2025 The HuggingFace Team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import tempfile +import unittest + +import numpy as np +import torch +from PIL import Image +from transformers import AutoTokenizer, T5EncoderModel + +from diffusers import AutoencoderKLWan, UniPCMultistepScheduler, WanImageToVideoPipeline, WanTransformer3DModel + +from ...testing_utils import ( + enable_full_determinism, + torch_device, +) +from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS +from ..test_pipelines_common import PipelineTesterMixin + + +enable_full_determinism() + + +class Wan22ImageToVideoPipelineFastTests(PipelineTesterMixin, unittest.TestCase): + pipeline_class = WanImageToVideoPipeline + params = TEXT_TO_IMAGE_PARAMS - {"cross_attention_kwargs"} + batch_params = TEXT_TO_IMAGE_BATCH_PARAMS + image_params = TEXT_TO_IMAGE_IMAGE_PARAMS + image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS + required_optional_params = frozenset( + [ + "num_inference_steps", + "generator", + "latents", + "return_dict", + "callback_on_step_end", + "callback_on_step_end_tensor_inputs", + ] + ) + test_xformers_attention = False + supports_dduf = False + + def get_dummy_components(self): + torch.manual_seed(0) + vae = AutoencoderKLWan( + base_dim=3, + z_dim=16, + dim_mult=[1, 1, 1, 1], + num_res_blocks=1, + temperal_downsample=[False, True, True], + ) + + torch.manual_seed(0) + scheduler = UniPCMultistepScheduler(prediction_type="flow_prediction", use_flow_sigmas=True, flow_shift=3.0) + text_encoder = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5") + tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") + + torch.manual_seed(0) + transformer = WanTransformer3DModel( + patch_size=(1, 2, 2), + num_attention_heads=2, + attention_head_dim=12, + in_channels=36, + out_channels=16, + text_dim=32, + freq_dim=256, + ffn_dim=32, + num_layers=2, + cross_attn_norm=True, + qk_norm="rms_norm_across_heads", + rope_max_seq_len=32, + ) + + torch.manual_seed(0) + transformer_2 = WanTransformer3DModel( + patch_size=(1, 2, 2), + num_attention_heads=2, + attention_head_dim=12, + in_channels=36, + out_channels=16, + text_dim=32, + freq_dim=256, + ffn_dim=32, + num_layers=2, + cross_attn_norm=True, + qk_norm="rms_norm_across_heads", + rope_max_seq_len=32, + ) + + components = { + "transformer": transformer, + "vae": vae, + "scheduler": scheduler, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + "transformer_2": transformer_2, + "image_encoder": None, + "image_processor": None, + "boundary_ratio": 0.875, + } + return components + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + image_height = 16 + image_width = 16 + image = Image.new("RGB", (image_width, image_height)) + inputs = { + "image": image, + "prompt": "dance monkey", + "negative_prompt": "negative", # TODO + "height": image_height, + "width": image_width, + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 6.0, + "num_frames": 9, + "max_sequence_length": 16, + "output_type": "pt", + } + return inputs + + def test_inference(self): + device = "cpu" + + components = self.get_dummy_components() + pipe = self.pipeline_class( + **components, + ) + pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + video = pipe(**inputs).frames + generated_video = video[0] + self.assertEqual(generated_video.shape, (9, 3, 16, 16)) + + # fmt: off + expected_slice = torch.tensor([0.4527, 0.4526, 0.4498, 0.4539, 0.4521, 0.4524, 0.4533, 0.4535, 0.5154, + 0.5353, 0.5200, 0.5174, 0.5434, 0.5301, 0.5199, 0.5216]) + # fmt: on + + generated_slice = generated_video.flatten() + generated_slice = torch.cat([generated_slice[:8], generated_slice[-8:]]) + self.assertTrue( + torch.allclose(generated_slice, expected_slice, atol=1e-3), + f"generated_slice: {generated_slice}, expected_slice: {expected_slice}", + ) + + @unittest.skip("Test not supported") + def test_attention_slicing_forward_pass(self): + pass + + def test_save_load_optional_components(self, expected_max_difference=1e-4): + optional_component = ["transformer", "image_encoder", "image_processor"] + + components = self.get_dummy_components() + for component in optional_component: + components[component] = None + components["boundary_ratio"] = 1.0 # for wan 2.2 14B, transformer is not used when boundary_ratio is 1.0 + + pipe = self.pipeline_class(**components) + for component in pipe.components.values(): + if hasattr(component, "set_default_attn_processor"): + component.set_default_attn_processor() + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + generator_device = "cpu" + inputs = self.get_dummy_inputs(generator_device) + torch.manual_seed(0) + output = pipe(**inputs)[0] + + with tempfile.TemporaryDirectory() as tmpdir: + pipe.save_pretrained(tmpdir, safe_serialization=False) + pipe_loaded = self.pipeline_class.from_pretrained(tmpdir) + for component in pipe_loaded.components.values(): + if hasattr(component, "set_default_attn_processor"): + component.set_default_attn_processor() + pipe_loaded.to(torch_device) + pipe_loaded.set_progress_bar_config(disable=None) + + for component in optional_component: + self.assertTrue( + getattr(pipe_loaded, component) is None, + f"`{component}` did not stay set to None after loading.", + ) + + inputs = self.get_dummy_inputs(generator_device) + torch.manual_seed(0) + output_loaded = pipe_loaded(**inputs)[0] + + max_diff = np.abs(output.detach().cpu().numpy() - output_loaded.detach().cpu().numpy()).max() + self.assertLess(max_diff, expected_max_difference) + + +class Wan225BImageToVideoPipelineFastTests(PipelineTesterMixin, unittest.TestCase): + pipeline_class = WanImageToVideoPipeline + params = TEXT_TO_IMAGE_PARAMS - {"cross_attention_kwargs"} + batch_params = TEXT_TO_IMAGE_BATCH_PARAMS + image_params = TEXT_TO_IMAGE_IMAGE_PARAMS + image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS + required_optional_params = frozenset( + [ + "num_inference_steps", + "generator", + "latents", + "return_dict", + "callback_on_step_end", + "callback_on_step_end_tensor_inputs", + ] + ) + test_xformers_attention = False + supports_dduf = False + + def get_dummy_components(self): + torch.manual_seed(0) + vae = AutoencoderKLWan( + base_dim=3, + z_dim=48, + in_channels=12, + out_channels=12, + is_residual=True, + patch_size=2, + latents_mean=[0.0] * 48, + latents_std=[1.0] * 48, + dim_mult=[1, 1, 1, 1], + num_res_blocks=1, + scale_factor_spatial=16, + scale_factor_temporal=4, + temperal_downsample=[False, True, True], + ) + + torch.manual_seed(0) + scheduler = UniPCMultistepScheduler(prediction_type="flow_prediction", use_flow_sigmas=True, flow_shift=3.0) + text_encoder = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5") + tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") + + torch.manual_seed(0) + transformer = WanTransformer3DModel( + patch_size=(1, 2, 2), + num_attention_heads=2, + attention_head_dim=12, + in_channels=48, + out_channels=48, + text_dim=32, + freq_dim=256, + ffn_dim=32, + num_layers=2, + cross_attn_norm=True, + qk_norm="rms_norm_across_heads", + rope_max_seq_len=32, + ) + + components = { + "transformer": transformer, + "vae": vae, + "scheduler": scheduler, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + "transformer_2": None, + "image_encoder": None, + "image_processor": None, + "boundary_ratio": None, + "expand_timesteps": True, + } + return components + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + image_height = 32 + image_width = 32 + image = Image.new("RGB", (image_width, image_height)) + inputs = { + "image": image, + "prompt": "dance monkey", + "negative_prompt": "negative", # TODO + "height": image_height, + "width": image_width, + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 6.0, + "num_frames": 9, + "max_sequence_length": 16, + "output_type": "pt", + } + return inputs + + def test_inference(self): + device = "cpu" + + components = self.get_dummy_components() + pipe = self.pipeline_class( + **components, + ) + pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + video = pipe(**inputs).frames + generated_video = video[0] + self.assertEqual(generated_video.shape, (9, 3, 32, 32)) + + # fmt: off + expected_slice = torch.tensor([[0.4833, 0.4305, 0.5100, 0.4299, 0.5056, 0.4298, 0.5052, 0.4332, 0.5550, + 0.6092, 0.5536, 0.5928, 0.5199, 0.5864, 0.6705, 0.5493]]) + # fmt: on + + generated_slice = generated_video.flatten() + generated_slice = torch.cat([generated_slice[:8], generated_slice[-8:]]) + self.assertTrue( + torch.allclose(generated_slice, expected_slice, atol=1e-3), + f"generated_slice: {generated_slice}, expected_slice: {expected_slice}", + ) + + @unittest.skip("Test not supported") + def test_attention_slicing_forward_pass(self): + pass + + def test_components_function(self): + init_components = self.get_dummy_components() + init_components.pop("boundary_ratio") + init_components.pop("expand_timesteps") + pipe = self.pipeline_class(**init_components) + + self.assertTrue(hasattr(pipe, "components")) + self.assertTrue(set(pipe.components.keys()) == set(init_components.keys())) + + def test_save_load_optional_components(self, expected_max_difference=1e-4): + optional_component = ["transformer_2", "image_encoder", "image_processor"] + + components = self.get_dummy_components() + for component in optional_component: + components[component] = None + pipe = self.pipeline_class(**components) + for component in pipe.components.values(): + if hasattr(component, "set_default_attn_processor"): + component.set_default_attn_processor() + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + generator_device = "cpu" + inputs = self.get_dummy_inputs(generator_device) + torch.manual_seed(0) + output = pipe(**inputs)[0] + + with tempfile.TemporaryDirectory() as tmpdir: + pipe.save_pretrained(tmpdir, safe_serialization=False) + pipe_loaded = self.pipeline_class.from_pretrained(tmpdir) + for component in pipe_loaded.components.values(): + if hasattr(component, "set_default_attn_processor"): + component.set_default_attn_processor() + pipe_loaded.to(torch_device) + pipe_loaded.set_progress_bar_config(disable=None) + + for component in optional_component: + self.assertTrue( + getattr(pipe_loaded, component) is None, + f"`{component}` did not stay set to None after loading.", + ) + + inputs = self.get_dummy_inputs(generator_device) + torch.manual_seed(0) + output_loaded = pipe_loaded(**inputs)[0] + + max_diff = np.abs(output.detach().cpu().numpy() - output_loaded.detach().cpu().numpy()).max() + self.assertLess(max_diff, expected_max_difference) + + def test_inference_batch_single_identical(self): + self._test_inference_batch_single_identical(expected_max_diff=2e-3) + + @unittest.skip("Test not supported") + def test_callback_inputs(self): + pass diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/wan/test_wan_image_to_video.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/wan/test_wan_image_to_video.py new file mode 100644 index 0000000000000000000000000000000000000000..07a9142f2553795d65996e62b28151cc2cdab7b2 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/wan/test_wan_image_to_video.py @@ -0,0 +1,381 @@ +# Copyright 2025 The HuggingFace Team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import tempfile +import unittest + +import numpy as np +import torch +from PIL import Image +from transformers import ( + AutoTokenizer, + CLIPImageProcessor, + CLIPVisionConfig, + CLIPVisionModelWithProjection, + T5EncoderModel, +) + +from diffusers import AutoencoderKLWan, FlowMatchEulerDiscreteScheduler, WanImageToVideoPipeline, WanTransformer3DModel + +from ...testing_utils import enable_full_determinism, torch_device +from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS +from ..test_pipelines_common import PipelineTesterMixin + + +enable_full_determinism() + + +class WanImageToVideoPipelineFastTests(PipelineTesterMixin, unittest.TestCase): + pipeline_class = WanImageToVideoPipeline + params = TEXT_TO_IMAGE_PARAMS - {"cross_attention_kwargs", "height", "width"} + batch_params = TEXT_TO_IMAGE_BATCH_PARAMS + image_params = TEXT_TO_IMAGE_IMAGE_PARAMS + image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS + required_optional_params = frozenset( + [ + "num_inference_steps", + "generator", + "latents", + "return_dict", + "callback_on_step_end", + "callback_on_step_end_tensor_inputs", + ] + ) + test_xformers_attention = False + supports_dduf = False + + def get_dummy_components(self): + torch.manual_seed(0) + vae = AutoencoderKLWan( + base_dim=3, + z_dim=16, + dim_mult=[1, 1, 1, 1], + num_res_blocks=1, + temperal_downsample=[False, True, True], + ) + + torch.manual_seed(0) + # TODO: impl FlowDPMSolverMultistepScheduler + scheduler = FlowMatchEulerDiscreteScheduler(shift=7.0) + text_encoder = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5") + tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") + + torch.manual_seed(0) + transformer = WanTransformer3DModel( + patch_size=(1, 2, 2), + num_attention_heads=2, + attention_head_dim=12, + in_channels=36, + out_channels=16, + text_dim=32, + freq_dim=256, + ffn_dim=32, + num_layers=2, + cross_attn_norm=True, + qk_norm="rms_norm_across_heads", + rope_max_seq_len=32, + image_dim=4, + ) + + torch.manual_seed(0) + image_encoder_config = CLIPVisionConfig( + hidden_size=4, + projection_dim=4, + num_hidden_layers=2, + num_attention_heads=2, + image_size=32, + intermediate_size=16, + patch_size=1, + ) + image_encoder = CLIPVisionModelWithProjection(image_encoder_config) + + torch.manual_seed(0) + image_processor = CLIPImageProcessor(crop_size=32, size=32) + + components = { + "transformer": transformer, + "vae": vae, + "scheduler": scheduler, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + "image_encoder": image_encoder, + "image_processor": image_processor, + "transformer_2": None, + } + return components + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + image_height = 16 + image_width = 16 + image = Image.new("RGB", (image_width, image_height)) + inputs = { + "image": image, + "prompt": "dance monkey", + "negative_prompt": "negative", # TODO + "height": image_height, + "width": image_width, + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 6.0, + "num_frames": 9, + "max_sequence_length": 16, + "output_type": "pt", + } + return inputs + + def test_inference(self): + device = "cpu" + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + video = pipe(**inputs).frames + generated_video = video[0] + self.assertEqual(generated_video.shape, (9, 3, 16, 16)) + + # fmt: off + expected_slice = torch.tensor([0.4525, 0.4525, 0.4497, 0.4536, 0.452, 0.4529, 0.454, 0.4535, 0.5072, 0.5527, 0.5165, 0.5244, 0.5481, 0.5282, 0.5208, 0.5214]) + # fmt: on + + generated_slice = generated_video.flatten() + generated_slice = torch.cat([generated_slice[:8], generated_slice[-8:]]) + self.assertTrue(torch.allclose(generated_slice, expected_slice, atol=1e-3)) + + @unittest.skip("Test not supported") + def test_attention_slicing_forward_pass(self): + pass + + @unittest.skip("TODO: revisit failing as it requires a very high threshold to pass") + def test_inference_batch_single_identical(self): + pass + + # _optional_components include transformer, transformer_2 and image_encoder, image_processor, but only transformer_2 is optional for wan2.1 i2v pipeline + def test_save_load_optional_components(self, expected_max_difference=1e-4): + optional_component = "transformer_2" + + components = self.get_dummy_components() + components[optional_component] = None + pipe = self.pipeline_class(**components) + for component in pipe.components.values(): + if hasattr(component, "set_default_attn_processor"): + component.set_default_attn_processor() + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + generator_device = "cpu" + inputs = self.get_dummy_inputs(generator_device) + torch.manual_seed(0) + output = pipe(**inputs)[0] + + with tempfile.TemporaryDirectory() as tmpdir: + pipe.save_pretrained(tmpdir, safe_serialization=False) + pipe_loaded = self.pipeline_class.from_pretrained(tmpdir) + for component in pipe_loaded.components.values(): + if hasattr(component, "set_default_attn_processor"): + component.set_default_attn_processor() + pipe_loaded.to(torch_device) + pipe_loaded.set_progress_bar_config(disable=None) + + self.assertTrue( + getattr(pipe_loaded, optional_component) is None, + f"`{optional_component}` did not stay set to None after loading.", + ) + + inputs = self.get_dummy_inputs(generator_device) + torch.manual_seed(0) + output_loaded = pipe_loaded(**inputs)[0] + + max_diff = np.abs(output.detach().cpu().numpy() - output_loaded.detach().cpu().numpy()).max() + self.assertLess(max_diff, expected_max_difference) + + +class WanFLFToVideoPipelineFastTests(PipelineTesterMixin, unittest.TestCase): + pipeline_class = WanImageToVideoPipeline + params = TEXT_TO_IMAGE_PARAMS - {"cross_attention_kwargs", "height", "width"} + batch_params = TEXT_TO_IMAGE_BATCH_PARAMS + image_params = TEXT_TO_IMAGE_IMAGE_PARAMS + image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS + required_optional_params = frozenset( + [ + "num_inference_steps", + "generator", + "latents", + "return_dict", + "callback_on_step_end", + "callback_on_step_end_tensor_inputs", + ] + ) + test_xformers_attention = False + supports_dduf = False + + def get_dummy_components(self): + torch.manual_seed(0) + vae = AutoencoderKLWan( + base_dim=3, + z_dim=16, + dim_mult=[1, 1, 1, 1], + num_res_blocks=1, + temperal_downsample=[False, True, True], + ) + + torch.manual_seed(0) + # TODO: impl FlowDPMSolverMultistepScheduler + scheduler = FlowMatchEulerDiscreteScheduler(shift=7.0) + text_encoder = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5") + tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") + + torch.manual_seed(0) + transformer = WanTransformer3DModel( + patch_size=(1, 2, 2), + num_attention_heads=2, + attention_head_dim=12, + in_channels=36, + out_channels=16, + text_dim=32, + freq_dim=256, + ffn_dim=32, + num_layers=2, + cross_attn_norm=True, + qk_norm="rms_norm_across_heads", + rope_max_seq_len=32, + image_dim=4, + pos_embed_seq_len=2 * (4 * 4 + 1), + ) + + torch.manual_seed(0) + image_encoder_config = CLIPVisionConfig( + hidden_size=4, + projection_dim=4, + num_hidden_layers=2, + num_attention_heads=2, + image_size=4, + intermediate_size=16, + patch_size=1, + ) + image_encoder = CLIPVisionModelWithProjection(image_encoder_config) + + torch.manual_seed(0) + image_processor = CLIPImageProcessor(crop_size=4, size=4) + + components = { + "transformer": transformer, + "vae": vae, + "scheduler": scheduler, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + "image_encoder": image_encoder, + "image_processor": image_processor, + "transformer_2": None, + } + return components + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + image_height = 16 + image_width = 16 + image = Image.new("RGB", (image_width, image_height)) + last_image = Image.new("RGB", (image_width, image_height)) + inputs = { + "image": image, + "last_image": last_image, + "prompt": "dance monkey", + "negative_prompt": "negative", + "height": image_height, + "width": image_width, + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 6.0, + "num_frames": 9, + "max_sequence_length": 16, + "output_type": "pt", + } + return inputs + + def test_inference(self): + device = "cpu" + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + video = pipe(**inputs).frames + generated_video = video[0] + self.assertEqual(generated_video.shape, (9, 3, 16, 16)) + + # fmt: off + expected_slice = torch.tensor([0.4531, 0.4527, 0.4498, 0.4542, 0.4526, 0.4527, 0.4534, 0.4534, 0.5061, 0.5185, 0.5283, 0.5181, 0.5309, 0.5365, 0.5113, 0.5244]) + # fmt: on + + generated_slice = generated_video.flatten() + generated_slice = torch.cat([generated_slice[:8], generated_slice[-8:]]) + self.assertTrue(torch.allclose(generated_slice, expected_slice, atol=1e-3)) + + @unittest.skip("Test not supported") + def test_attention_slicing_forward_pass(self): + pass + + @unittest.skip("TODO: revisit failing as it requires a very high threshold to pass") + def test_inference_batch_single_identical(self): + pass + + # _optional_components include transformer, transformer_2 and image_encoder, image_processor, but only transformer_2 is optional for wan2.1 FLFT2V pipeline + def test_save_load_optional_components(self, expected_max_difference=1e-4): + optional_component = "transformer_2" + + components = self.get_dummy_components() + components[optional_component] = None + pipe = self.pipeline_class(**components) + for component in pipe.components.values(): + if hasattr(component, "set_default_attn_processor"): + component.set_default_attn_processor() + pipe.to(torch_device) + pipe.set_progress_bar_config(disable=None) + + generator_device = "cpu" + inputs = self.get_dummy_inputs(generator_device) + torch.manual_seed(0) + output = pipe(**inputs)[0] + + with tempfile.TemporaryDirectory() as tmpdir: + pipe.save_pretrained(tmpdir, safe_serialization=False) + pipe_loaded = self.pipeline_class.from_pretrained(tmpdir) + for component in pipe_loaded.components.values(): + if hasattr(component, "set_default_attn_processor"): + component.set_default_attn_processor() + pipe_loaded.to(torch_device) + pipe_loaded.set_progress_bar_config(disable=None) + + self.assertTrue( + getattr(pipe_loaded, optional_component) is None, + f"`{optional_component}` did not stay set to None after loading.", + ) + + inputs = self.get_dummy_inputs(generator_device) + torch.manual_seed(0) + output_loaded = pipe_loaded(**inputs)[0] + + max_diff = np.abs(output.detach().cpu().numpy() - output_loaded.detach().cpu().numpy()).max() + self.assertLess(max_diff, expected_max_difference) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/wan/test_wan_speech_to_video.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/wan/test_wan_speech_to_video.py new file mode 100644 index 0000000000000000000000000000000000000000..3bbe673d2de42ff6100b832ed4f3b50c45478660 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/wan/test_wan_speech_to_video.py @@ -0,0 +1,216 @@ +# Copyright 2025 The HuggingFace Team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import numpy as np +import torch +from PIL import Image +from transformers import AutoTokenizer, T5EncoderModel + +from diffusers import ( + AutoencoderKLWan, + FlowMatchEulerDiscreteScheduler, + WanS2VTransformer3DModel, + WanSpeechToVideoPipeline, +) + +from ...testing_utils import enable_full_determinism +from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS +from ..test_pipelines_common import PipelineTesterMixin + + +enable_full_determinism() + + +class WanSpeechToVideoPipelineFastTests(PipelineTesterMixin, unittest.TestCase): + pipeline_class = WanSpeechToVideoPipeline + params = TEXT_TO_IMAGE_PARAMS - {"cross_attention_kwargs"} + batch_params = TEXT_TO_IMAGE_BATCH_PARAMS + image_params = TEXT_TO_IMAGE_IMAGE_PARAMS + image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS + required_optional_params = frozenset( + [ + "num_inference_steps", + "generator", + "latents", + "return_dict", + "callback_on_step_end", + "callback_on_step_end_tensor_inputs", + ] + ) + test_xformers_attention = False + supports_dduf = False + + def get_dummy_components(self): + torch.manual_seed(0) + vae = AutoencoderKLWan( + base_dim=3, + z_dim=16, + dim_mult=[1, 1, 1, 1], + num_res_blocks=1, + temperal_downsample=[False, True, True], + ) + + torch.manual_seed(0) + scheduler = FlowMatchEulerDiscreteScheduler(shift=7.0) + text_encoder = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5") + tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") + + torch.manual_seed(0) + transformer = WanS2VTransformer3DModel( + patch_size=(1, 2, 2), + num_attention_heads=2, + attention_head_dim=12, + in_channels=16, + out_channels=16, + text_dim=32, + freq_dim=256, + ffn_dim=32, + num_layers=3, + cross_attn_norm=True, + qk_norm="rms_norm_across_heads", + rope_max_seq_len=32, + ) + + components = { + "transformer": transformer, + "vae": vae, + "scheduler": scheduler, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + } + return components + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + + num_frames = 17 + height = 16 + width = 16 + + video = [Image.new("RGB", (height, width))] * num_frames + mask = [Image.new("L", (height, width), 0)] * num_frames + + inputs = { + "video": video, + "mask": mask, + "prompt": "dance monkey", + "negative_prompt": "negative", + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 6.0, + "height": 16, + "width": 16, + "num_frames": num_frames, + "max_sequence_length": 16, + "output_type": "pt", + } + return inputs + + def test_inference(self): + device = "cpu" + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + video = pipe(**inputs).frames[0] + self.assertEqual(video.shape, (17, 3, 16, 16)) + + # fmt: off + expected_slice = [0.4523, 0.45198, 0.44872, 0.45326, 0.45211, 0.45258, 0.45344, 0.453, 0.52431, 0.52572, 0.50701, 0.5118, 0.53717, 0.53093, 0.50557, 0.51402] + # fmt: on + + video_slice = video.flatten() + video_slice = torch.cat([video_slice[:8], video_slice[-8:]]) + video_slice = [round(x, 5) for x in video_slice.tolist()] + self.assertTrue(np.allclose(video_slice, expected_slice, atol=1e-3)) + + def test_inference_with_single_reference_image(self): + device = "cpu" + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + inputs["reference_images"] = Image.new("RGB", (16, 16)) + video = pipe(**inputs).frames[0] + self.assertEqual(video.shape, (17, 3, 16, 16)) + + # fmt: off + expected_slice = [0.45247, 0.45214, 0.44874, 0.45314, 0.45171, 0.45299, 0.45428, 0.45317, 0.51378, 0.52658, 0.53361, 0.52303, 0.46204, 0.50435, 0.52555, 0.51342] + # fmt: on + + video_slice = video.flatten() + video_slice = torch.cat([video_slice[:8], video_slice[-8:]]) + video_slice = [round(x, 5) for x in video_slice.tolist()] + self.assertTrue(np.allclose(video_slice, expected_slice, atol=1e-3)) + + def test_inference_with_multiple_reference_image(self): + device = "cpu" + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + inputs["reference_images"] = [[Image.new("RGB", (16, 16))] * 2] + video = pipe(**inputs).frames[0] + self.assertEqual(video.shape, (17, 3, 16, 16)) + + # fmt: off + expected_slice = [0.45321, 0.45221, 0.44818, 0.45375, 0.45268, 0.4519, 0.45271, 0.45253, 0.51244, 0.52223, 0.51253, 0.51321, 0.50743, 0.51177, 0.51626, 0.50983] + # fmt: on + + video_slice = video.flatten() + video_slice = torch.cat([video_slice[:8], video_slice[-8:]]) + video_slice = [round(x, 5) for x in video_slice.tolist()] + self.assertTrue(np.allclose(video_slice, expected_slice, atol=1e-3)) + + @unittest.skip("Test not supported") + def test_attention_slicing_forward_pass(self): + pass + + @unittest.skip("Errors out because passing multiple prompts at once is not yet supported by this pipeline.") + def test_encode_prompt_works_in_isolation(self): + pass + + @unittest.skip("Batching is not yet supported with this pipeline") + def test_inference_batch_consistent(self): + pass + + @unittest.skip("Batching is not yet supported with this pipeline") + def test_inference_batch_single_identical(self): + return super().test_inference_batch_single_identical() + + @unittest.skip( + "AutoencoderKLWan encoded latents are always in FP32. This test is not designed to handle mixed dtype inputs" + ) + def test_float16_inference(self): + pass + + @unittest.skip( + "AutoencoderKLWan encoded latents are always in FP32. This test is not designed to handle mixed dtype inputs" + ) + def test_save_load_float16(self): + pass diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/wan/test_wan_vace.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/wan/test_wan_vace.py new file mode 100644 index 0000000000000000000000000000000000000000..ed13d5649dc34bbf09fdc953ef8ee2f67922482d --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/wan/test_wan_vace.py @@ -0,0 +1,213 @@ +# Copyright 2025 The HuggingFace Team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import numpy as np +import torch +from PIL import Image +from transformers import AutoTokenizer, T5EncoderModel + +from diffusers import AutoencoderKLWan, FlowMatchEulerDiscreteScheduler, WanVACEPipeline, WanVACETransformer3DModel + +from ...testing_utils import enable_full_determinism +from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS +from ..test_pipelines_common import PipelineTesterMixin + + +enable_full_determinism() + + +class WanVACEPipelineFastTests(PipelineTesterMixin, unittest.TestCase): + pipeline_class = WanVACEPipeline + params = TEXT_TO_IMAGE_PARAMS - {"cross_attention_kwargs"} + batch_params = TEXT_TO_IMAGE_BATCH_PARAMS + image_params = TEXT_TO_IMAGE_IMAGE_PARAMS + image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS + required_optional_params = frozenset( + [ + "num_inference_steps", + "generator", + "latents", + "return_dict", + "callback_on_step_end", + "callback_on_step_end_tensor_inputs", + ] + ) + test_xformers_attention = False + supports_dduf = False + + def get_dummy_components(self): + torch.manual_seed(0) + vae = AutoencoderKLWan( + base_dim=3, + z_dim=16, + dim_mult=[1, 1, 1, 1], + num_res_blocks=1, + temperal_downsample=[False, True, True], + ) + + torch.manual_seed(0) + scheduler = FlowMatchEulerDiscreteScheduler(shift=7.0) + text_encoder = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5") + tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") + + torch.manual_seed(0) + transformer = WanVACETransformer3DModel( + patch_size=(1, 2, 2), + num_attention_heads=2, + attention_head_dim=12, + in_channels=16, + out_channels=16, + text_dim=32, + freq_dim=256, + ffn_dim=32, + num_layers=3, + cross_attn_norm=True, + qk_norm="rms_norm_across_heads", + rope_max_seq_len=32, + vace_layers=[0, 2], + vace_in_channels=96, + ) + + components = { + "transformer": transformer, + "vae": vae, + "scheduler": scheduler, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + } + return components + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + + num_frames = 17 + height = 16 + width = 16 + + video = [Image.new("RGB", (height, width))] * num_frames + mask = [Image.new("L", (height, width), 0)] * num_frames + + inputs = { + "video": video, + "mask": mask, + "prompt": "dance monkey", + "negative_prompt": "negative", + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 6.0, + "height": 16, + "width": 16, + "num_frames": num_frames, + "max_sequence_length": 16, + "output_type": "pt", + } + return inputs + + def test_inference(self): + device = "cpu" + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + video = pipe(**inputs).frames[0] + self.assertEqual(video.shape, (17, 3, 16, 16)) + + # fmt: off + expected_slice = [0.4523, 0.45198, 0.44872, 0.45326, 0.45211, 0.45258, 0.45344, 0.453, 0.52431, 0.52572, 0.50701, 0.5118, 0.53717, 0.53093, 0.50557, 0.51402] + # fmt: on + + video_slice = video.flatten() + video_slice = torch.cat([video_slice[:8], video_slice[-8:]]) + video_slice = [round(x, 5) for x in video_slice.tolist()] + self.assertTrue(np.allclose(video_slice, expected_slice, atol=1e-3)) + + def test_inference_with_single_reference_image(self): + device = "cpu" + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + inputs["reference_images"] = Image.new("RGB", (16, 16)) + video = pipe(**inputs).frames[0] + self.assertEqual(video.shape, (17, 3, 16, 16)) + + # fmt: off + expected_slice = [0.45247, 0.45214, 0.44874, 0.45314, 0.45171, 0.45299, 0.45428, 0.45317, 0.51378, 0.52658, 0.53361, 0.52303, 0.46204, 0.50435, 0.52555, 0.51342] + # fmt: on + + video_slice = video.flatten() + video_slice = torch.cat([video_slice[:8], video_slice[-8:]]) + video_slice = [round(x, 5) for x in video_slice.tolist()] + self.assertTrue(np.allclose(video_slice, expected_slice, atol=1e-3)) + + def test_inference_with_multiple_reference_image(self): + device = "cpu" + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + inputs["reference_images"] = [[Image.new("RGB", (16, 16))] * 2] + video = pipe(**inputs).frames[0] + self.assertEqual(video.shape, (17, 3, 16, 16)) + + # fmt: off + expected_slice = [0.45321, 0.45221, 0.44818, 0.45375, 0.45268, 0.4519, 0.45271, 0.45253, 0.51244, 0.52223, 0.51253, 0.51321, 0.50743, 0.51177, 0.51626, 0.50983] + # fmt: on + + video_slice = video.flatten() + video_slice = torch.cat([video_slice[:8], video_slice[-8:]]) + video_slice = [round(x, 5) for x in video_slice.tolist()] + self.assertTrue(np.allclose(video_slice, expected_slice, atol=1e-3)) + + @unittest.skip("Test not supported") + def test_attention_slicing_forward_pass(self): + pass + + @unittest.skip("Errors out because passing multiple prompts at once is not yet supported by this pipeline.") + def test_encode_prompt_works_in_isolation(self): + pass + + @unittest.skip("Batching is not yet supported with this pipeline") + def test_inference_batch_consistent(self): + pass + + @unittest.skip("Batching is not yet supported with this pipeline") + def test_inference_batch_single_identical(self): + return super().test_inference_batch_single_identical() + + @unittest.skip( + "AutoencoderKLWan encoded latents are always in FP32. This test is not designed to handle mixed dtype inputs" + ) + def test_float16_inference(self): + pass + + @unittest.skip( + "AutoencoderKLWan encoded latents are always in FP32. This test is not designed to handle mixed dtype inputs" + ) + def test_save_load_float16(self): + pass diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/wan/test_wan_video_to_video.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/wan/test_wan_video_to_video.py new file mode 100644 index 0000000000000000000000000000000000000000..27ada121ca485db206f08d761f000dd087147adc --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/pipelines/wan/test_wan_video_to_video.py @@ -0,0 +1,149 @@ +# Copyright 2025 The HuggingFace Team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import torch +from PIL import Image +from transformers import AutoTokenizer, T5EncoderModel + +from diffusers import AutoencoderKLWan, UniPCMultistepScheduler, WanTransformer3DModel, WanVideoToVideoPipeline + +from ...testing_utils import ( + enable_full_determinism, +) +from ..pipeline_params import TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS +from ..test_pipelines_common import ( + PipelineTesterMixin, +) + + +enable_full_determinism() + + +class WanVideoToVideoPipelineFastTests(PipelineTesterMixin, unittest.TestCase): + pipeline_class = WanVideoToVideoPipeline + params = TEXT_TO_IMAGE_PARAMS - {"cross_attention_kwargs"} + batch_params = frozenset(["video", "prompt", "negative_prompt"]) + image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS + required_optional_params = frozenset( + [ + "num_inference_steps", + "generator", + "latents", + "return_dict", + "callback_on_step_end", + "callback_on_step_end_tensor_inputs", + ] + ) + test_xformers_attention = False + supports_dduf = False + + def get_dummy_components(self): + torch.manual_seed(0) + vae = AutoencoderKLWan( + base_dim=3, + z_dim=16, + dim_mult=[1, 1, 1, 1], + num_res_blocks=1, + temperal_downsample=[False, True, True], + ) + + torch.manual_seed(0) + scheduler = UniPCMultistepScheduler(flow_shift=3.0) + text_encoder = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5") + tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") + + torch.manual_seed(0) + transformer = WanTransformer3DModel( + patch_size=(1, 2, 2), + num_attention_heads=2, + attention_head_dim=12, + in_channels=16, + out_channels=16, + text_dim=32, + freq_dim=256, + ffn_dim=32, + num_layers=2, + cross_attn_norm=True, + qk_norm="rms_norm_across_heads", + rope_max_seq_len=32, + ) + + components = { + "transformer": transformer, + "vae": vae, + "scheduler": scheduler, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + } + return components + + def get_dummy_inputs(self, device, seed=0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator(device=device).manual_seed(seed) + + video = [Image.new("RGB", (16, 16))] * 17 + inputs = { + "video": video, + "prompt": "dance monkey", + "negative_prompt": "negative", # TODO + "generator": generator, + "num_inference_steps": 4, + "guidance_scale": 6.0, + "height": 16, + "width": 16, + "max_sequence_length": 16, + "output_type": "pt", + } + return inputs + + def test_inference(self): + device = "cpu" + + components = self.get_dummy_components() + pipe = self.pipeline_class(**components) + pipe.to(device) + pipe.set_progress_bar_config(disable=None) + + inputs = self.get_dummy_inputs(device) + video = pipe(**inputs).frames + generated_video = video[0] + self.assertEqual(generated_video.shape, (17, 3, 16, 16)) + + # fmt: off + expected_slice = torch.tensor([0.4522, 0.4534, 0.4532, 0.4553, 0.4526, 0.4538, 0.4533, 0.4547, 0.513, 0.5176, 0.5286, 0.4958, 0.4955, 0.5381, 0.5154, 0.5195]) + # fmt:on + + generated_slice = generated_video.flatten() + generated_slice = torch.cat([generated_slice[:8], generated_slice[-8:]]) + self.assertTrue(torch.allclose(generated_slice, expected_slice, atol=1e-3)) + + @unittest.skip("Test not supported") + def test_attention_slicing_forward_pass(self): + pass + + @unittest.skip( + "WanVideoToVideoPipeline has to run in mixed precision. Casting the entire pipeline will result in errors" + ) + def test_float16_inference(self): + pass + + @unittest.skip( + "WanVideoToVideoPipeline has to run in mixed precision. Save/Load the entire pipeline in FP16 will result in errors" + ) + def test_save_load_float16(self): + pass diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/quantization/__init__.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/quantization/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/quantization/bnb/README.md b/exp_code/1_benchmark/diffusers-WanS2V/tests/quantization/bnb/README.md new file mode 100644 index 0000000000000000000000000000000000000000..f1585581597d77c0f6d465d84d9d836741dd1671 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/quantization/bnb/README.md @@ -0,0 +1,44 @@ +The tests here are adapted from [`transformers` tests](https://github.com/huggingface/transformers/tree/409fcfdfccde77a14b7cc36972b774cabc371ae1/tests/quantization/bnb). + +They were conducted on the `audace` machine, using a single RTX 4090. Below is `nvidia-smi`: + +```bash ++-----------------------------------------------------------------------------------------+ +| NVIDIA-SMI 550.90.07 Driver Version: 550.90.07 CUDA Version: 12.4 | +|-----------------------------------------+------------------------+----------------------+ +| GPU Name Persistence-M | Bus-Id Disp.A | Volatile Uncorr. ECC | +| Fan Temp Perf Pwr:Usage/Cap | Memory-Usage | GPU-Util Compute M. | +| | | MIG M. | +|=========================================+========================+======================| +| 0 NVIDIA GeForce RTX 4090 Off | 00000000:01:00.0 Off | Off | +| 30% 55C P0 61W / 450W | 1MiB / 24564MiB | 2% Default | +| | | N/A | ++-----------------------------------------+------------------------+----------------------+ +| 1 NVIDIA GeForce RTX 4090 Off | 00000000:13:00.0 Off | Off | +| 30% 51C P0 60W / 450W | 1MiB / 24564MiB | 0% Default | +| | | N/A | ++-----------------------------------------+------------------------+----------------------+ +``` + +`diffusers-cli`: + +```bash +- 🤗 Diffusers version: 0.31.0.dev0 +- Platform: Linux-5.15.0-117-generic-x86_64-with-glibc2.35 +- Running on Google Colab?: No +- Python version: 3.10.12 +- PyTorch version (GPU?): 2.5.0.dev20240818+cu124 (True) +- Flax version (CPU?/GPU?/TPU?): not installed (NA) +- Jax version: not installed +- JaxLib version: not installed +- Huggingface_hub version: 0.24.5 +- Transformers version: 4.44.2 +- Accelerate version: 0.34.0.dev0 +- PEFT version: 0.12.0 +- Bitsandbytes version: 0.43.3 +- Safetensors version: 0.4.4 +- xFormers version: not installed +- Accelerator: NVIDIA GeForce RTX 4090, 24564 MiB +NVIDIA GeForce RTX 4090, 24564 MiB +- Using GPU in script?: Yes +``` \ No newline at end of file diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/quantization/bnb/__init__.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/quantization/bnb/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/quantization/bnb/test_4bit.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/quantization/bnb/test_4bit.py new file mode 100644 index 0000000000000000000000000000000000000000..c1da8f1ece78021c4c68de461e57cd492ad19db7 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/quantization/bnb/test_4bit.py @@ -0,0 +1,895 @@ +# coding=utf-8 +# Copyright 2025 The HuggingFace Team Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a clone of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import gc +import os +import tempfile +import unittest + +import numpy as np +import pytest +import safetensors.torch +from huggingface_hub import hf_hub_download +from PIL import Image + +from diffusers import ( + BitsAndBytesConfig, + DiffusionPipeline, + FluxControlPipeline, + FluxTransformer2DModel, + SD3Transformer2DModel, +) +from diffusers.quantizers import PipelineQuantizationConfig +from diffusers.utils import is_accelerate_version, logging + +from ...testing_utils import ( + CaptureLogger, + backend_empty_cache, + is_bitsandbytes_available, + is_torch_available, + is_transformers_available, + load_pt, + numpy_cosine_similarity_distance, + require_accelerate, + require_bitsandbytes_version_greater, + require_peft_backend, + require_torch, + require_torch_accelerator, + require_torch_version_greater, + require_transformers_version_greater, + slow, + torch_device, +) +from ..test_torch_compile_utils import QuantCompileTests + + +def get_some_linear_layer(model): + if model.__class__.__name__ in ["SD3Transformer2DModel", "FluxTransformer2DModel"]: + return model.transformer_blocks[0].attn.to_q + else: + return NotImplementedError("Don't know what layer to retrieve here.") + + +if is_transformers_available(): + from transformers import BitsAndBytesConfig as BnbConfig + from transformers import T5EncoderModel + +if is_torch_available(): + import torch + + from ..utils import LoRALayer, get_memory_consumption_stat + + +if is_bitsandbytes_available(): + import bitsandbytes as bnb + + from diffusers.quantizers.bitsandbytes.utils import replace_with_bnb_linear + + +@require_bitsandbytes_version_greater("0.43.2") +@require_accelerate +@require_torch +@require_torch_accelerator +@slow +class Base4bitTests(unittest.TestCase): + # We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected) + # Therefore here we use only SD3 to test our module + model_name = "stabilityai/stable-diffusion-3-medium-diffusers" + + # This was obtained on audace so the number might slightly change + expected_rel_difference = 3.69 + + expected_memory_saving_ratio = 0.8 + + prompt = "a beautiful sunset amidst the mountains." + num_inference_steps = 10 + seed = 0 + + @classmethod + def setUpClass(cls): + cls.is_deterministic_enabled = torch.are_deterministic_algorithms_enabled() + if not cls.is_deterministic_enabled: + torch.use_deterministic_algorithms(True) + + @classmethod + def tearDownClass(cls): + if not cls.is_deterministic_enabled: + torch.use_deterministic_algorithms(False) + + def get_dummy_inputs(self): + prompt_embeds = load_pt( + "https://huggingface.co/datasets/hf-internal-testing/bnb-diffusers-testing-artifacts/resolve/main/prompt_embeds.pt", + torch_device, + ) + pooled_prompt_embeds = load_pt( + "https://huggingface.co/datasets/hf-internal-testing/bnb-diffusers-testing-artifacts/resolve/main/pooled_prompt_embeds.pt", + torch_device, + ) + latent_model_input = load_pt( + "https://huggingface.co/datasets/hf-internal-testing/bnb-diffusers-testing-artifacts/resolve/main/latent_model_input.pt", + torch_device, + ) + + input_dict_for_transformer = { + "hidden_states": latent_model_input, + "encoder_hidden_states": prompt_embeds, + "pooled_projections": pooled_prompt_embeds, + "timestep": torch.Tensor([1.0]), + "return_dict": False, + } + return input_dict_for_transformer + + +class BnB4BitBasicTests(Base4bitTests): + def setUp(self): + gc.collect() + backend_empty_cache(torch_device) + + # Models + self.model_fp16 = SD3Transformer2DModel.from_pretrained( + self.model_name, subfolder="transformer", torch_dtype=torch.float16 + ) + nf4_config = BitsAndBytesConfig( + load_in_4bit=True, + bnb_4bit_quant_type="nf4", + bnb_4bit_compute_dtype=torch.float16, + ) + self.model_4bit = SD3Transformer2DModel.from_pretrained( + self.model_name, subfolder="transformer", quantization_config=nf4_config, device_map=torch_device + ) + + def tearDown(self): + if hasattr(self, "model_fp16"): + del self.model_fp16 + if hasattr(self, "model_4bit"): + del self.model_4bit + + gc.collect() + backend_empty_cache(torch_device) + + def test_quantization_num_parameters(self): + r""" + Test if the number of returned parameters is correct + """ + num_params_4bit = self.model_4bit.num_parameters() + num_params_fp16 = self.model_fp16.num_parameters() + + self.assertEqual(num_params_4bit, num_params_fp16) + + def test_quantization_config_json_serialization(self): + r""" + A simple test to check if the quantization config is correctly serialized and deserialized + """ + config = self.model_4bit.config + + self.assertTrue("quantization_config" in config) + + _ = config["quantization_config"].to_dict() + _ = config["quantization_config"].to_diff_dict() + + _ = config["quantization_config"].to_json_string() + + def test_memory_footprint(self): + r""" + A simple test to check if the model conversion has been done correctly by checking on the + memory footprint of the converted model and the class type of the linear layers of the converted models + """ + mem_fp16 = self.model_fp16.get_memory_footprint() + mem_4bit = self.model_4bit.get_memory_footprint() + + self.assertAlmostEqual(mem_fp16 / mem_4bit, self.expected_rel_difference, delta=1e-2) + linear = get_some_linear_layer(self.model_4bit) + self.assertTrue(linear.weight.__class__ == bnb.nn.Params4bit) + + def test_model_memory_usage(self): + # Delete to not let anything interfere. + del self.model_4bit, self.model_fp16 + + # Re-instantiate. + inputs = self.get_dummy_inputs() + inputs = { + k: v.to(device=torch_device, dtype=torch.float16) for k, v in inputs.items() if not isinstance(v, bool) + } + model_fp16 = SD3Transformer2DModel.from_pretrained( + self.model_name, subfolder="transformer", torch_dtype=torch.float16 + ).to(torch_device) + unquantized_model_memory = get_memory_consumption_stat(model_fp16, inputs) + del model_fp16 + + nf4_config = BitsAndBytesConfig( + load_in_4bit=True, + bnb_4bit_quant_type="nf4", + bnb_4bit_compute_dtype=torch.float16, + ) + model_4bit = SD3Transformer2DModel.from_pretrained( + self.model_name, subfolder="transformer", quantization_config=nf4_config, torch_dtype=torch.float16 + ) + quantized_model_memory = get_memory_consumption_stat(model_4bit, inputs) + assert unquantized_model_memory / quantized_model_memory >= self.expected_memory_saving_ratio + + def test_original_dtype(self): + r""" + A simple test to check if the model successfully stores the original dtype + """ + self.assertTrue("_pre_quantization_dtype" in self.model_4bit.config) + self.assertFalse("_pre_quantization_dtype" in self.model_fp16.config) + self.assertTrue(self.model_4bit.config["_pre_quantization_dtype"] == torch.float16) + + def test_keep_modules_in_fp32(self): + r""" + A simple tests to check if the modules under `_keep_in_fp32_modules` are kept in fp32. + Also ensures if inference works. + """ + fp32_modules = SD3Transformer2DModel._keep_in_fp32_modules + SD3Transformer2DModel._keep_in_fp32_modules = ["proj_out"] + + nf4_config = BitsAndBytesConfig( + load_in_4bit=True, + bnb_4bit_quant_type="nf4", + bnb_4bit_compute_dtype=torch.float16, + ) + model = SD3Transformer2DModel.from_pretrained( + self.model_name, subfolder="transformer", quantization_config=nf4_config, device_map=torch_device + ) + + for name, module in model.named_modules(): + if isinstance(module, torch.nn.Linear): + if name in model._keep_in_fp32_modules: + self.assertTrue(module.weight.dtype == torch.float32) + else: + # 4-bit parameters are packed in uint8 variables + self.assertTrue(module.weight.dtype == torch.uint8) + + # test if inference works. + with torch.no_grad() and torch.amp.autocast(torch_device, dtype=torch.float16): + input_dict_for_transformer = self.get_dummy_inputs() + model_inputs = { + k: v.to(device=torch_device) for k, v in input_dict_for_transformer.items() if not isinstance(v, bool) + } + model_inputs.update({k: v for k, v in input_dict_for_transformer.items() if k not in model_inputs}) + _ = model(**model_inputs) + + SD3Transformer2DModel._keep_in_fp32_modules = fp32_modules + + def test_linear_are_4bit(self): + r""" + A simple test to check if the model conversion has been done correctly by checking on the + memory footprint of the converted model and the class type of the linear layers of the converted models + """ + self.model_fp16.get_memory_footprint() + self.model_4bit.get_memory_footprint() + + for name, module in self.model_4bit.named_modules(): + if isinstance(module, torch.nn.Linear): + if name not in ["proj_out"]: + # 4-bit parameters are packed in uint8 variables + self.assertTrue(module.weight.dtype == torch.uint8) + + def test_config_from_pretrained(self): + transformer_4bit = FluxTransformer2DModel.from_pretrained( + "hf-internal-testing/flux.1-dev-nf4-pkg", subfolder="transformer" + ) + linear = get_some_linear_layer(transformer_4bit) + self.assertTrue(linear.weight.__class__ == bnb.nn.Params4bit) + self.assertTrue(hasattr(linear.weight, "quant_state")) + self.assertTrue(linear.weight.quant_state.__class__ == bnb.functional.QuantState) + + def test_device_assignment(self): + mem_before = self.model_4bit.get_memory_footprint() + + # Move to CPU + self.model_4bit.to("cpu") + self.assertEqual(self.model_4bit.device.type, "cpu") + self.assertAlmostEqual(self.model_4bit.get_memory_footprint(), mem_before) + + # Move back to CUDA device + for device in [0, f"{torch_device}", f"{torch_device}:0", "call()"]: + if device == "call()": + self.model_4bit.to(f"{torch_device}:0") + else: + self.model_4bit.to(device) + self.assertEqual(self.model_4bit.device, torch.device(0)) + self.assertAlmostEqual(self.model_4bit.get_memory_footprint(), mem_before) + self.model_4bit.to("cpu") + + def test_device_and_dtype_assignment(self): + r""" + Test whether trying to cast (or assigning a device to) a model after converting it in 4-bit will throw an error. + Checks also if other models are casted correctly. Device placement, however, is supported. + """ + with self.assertRaises(ValueError): + # Tries with a `dtype` + self.model_4bit.to(torch.float16) + + with self.assertRaises(ValueError): + # Tries with a `device` and `dtype` + self.model_4bit.to(device=f"{torch_device}:0", dtype=torch.float16) + + with self.assertRaises(ValueError): + # Tries with a cast + self.model_4bit.float() + + with self.assertRaises(ValueError): + # Tries with a cast + self.model_4bit.half() + + # This should work + self.model_4bit.to(torch_device) + + # Test if we did not break anything + self.model_fp16 = self.model_fp16.to(dtype=torch.float32, device=torch_device) + input_dict_for_transformer = self.get_dummy_inputs() + model_inputs = { + k: v.to(dtype=torch.float32, device=torch_device) + for k, v in input_dict_for_transformer.items() + if not isinstance(v, bool) + } + model_inputs.update({k: v for k, v in input_dict_for_transformer.items() if k not in model_inputs}) + with torch.no_grad(): + _ = self.model_fp16(**model_inputs) + + # Check this does not throw an error + _ = self.model_fp16.to("cpu") + + # Check this does not throw an error + _ = self.model_fp16.half() + + # Check this does not throw an error + _ = self.model_fp16.float() + + # Check that this does not throw an error + _ = self.model_fp16.to(torch_device) + + def test_bnb_4bit_wrong_config(self): + r""" + Test whether creating a bnb config with unsupported values leads to errors. + """ + with self.assertRaises(ValueError): + _ = BitsAndBytesConfig(load_in_4bit=True, bnb_4bit_quant_storage="add") + + def test_bnb_4bit_errors_loading_incorrect_state_dict(self): + r""" + Test if loading with an incorrect state dict raises an error. + """ + with tempfile.TemporaryDirectory() as tmpdirname: + nf4_config = BitsAndBytesConfig(load_in_4bit=True) + model_4bit = SD3Transformer2DModel.from_pretrained( + self.model_name, subfolder="transformer", quantization_config=nf4_config, device_map=torch_device + ) + model_4bit.save_pretrained(tmpdirname) + del model_4bit + + with self.assertRaises(ValueError) as err_context: + state_dict = safetensors.torch.load_file( + os.path.join(tmpdirname, "diffusion_pytorch_model.safetensors") + ) + + # corrupt the state dict + key_to_target = "context_embedder.weight" # can be other keys too. + compatible_param = state_dict[key_to_target] + corrupted_param = torch.randn(compatible_param.shape[0] - 1, 1) + state_dict[key_to_target] = bnb.nn.Params4bit(corrupted_param, requires_grad=False) + safetensors.torch.save_file( + state_dict, os.path.join(tmpdirname, "diffusion_pytorch_model.safetensors") + ) + + _ = SD3Transformer2DModel.from_pretrained(tmpdirname) + + assert key_to_target in str(err_context.exception) + + def test_bnb_4bit_logs_warning_for_no_quantization(self): + model_with_no_linear = torch.nn.Sequential(torch.nn.Conv2d(4, 4, 3), torch.nn.ReLU()) + quantization_config = BitsAndBytesConfig(load_in_4bit=True) + logger = logging.get_logger("diffusers.quantizers.bitsandbytes.utils") + logger.setLevel(30) + with CaptureLogger(logger) as cap_logger: + _ = replace_with_bnb_linear(model_with_no_linear, quantization_config=quantization_config) + assert ( + "You are loading your model in 8bit or 4bit but no linear modules were found in your model." + in cap_logger.out + ) + + +class BnB4BitTrainingTests(Base4bitTests): + def setUp(self): + gc.collect() + backend_empty_cache(torch_device) + + nf4_config = BitsAndBytesConfig( + load_in_4bit=True, + bnb_4bit_quant_type="nf4", + bnb_4bit_compute_dtype=torch.float16, + ) + self.model_4bit = SD3Transformer2DModel.from_pretrained( + self.model_name, subfolder="transformer", quantization_config=nf4_config, device_map=torch_device + ) + + def test_training(self): + # Step 1: freeze all parameters + for param in self.model_4bit.parameters(): + param.requires_grad = False # freeze the model - train adapters later + if param.ndim == 1: + # cast the small parameters (e.g. layernorm) to fp32 for stability + param.data = param.data.to(torch.float32) + + # Step 2: add adapters + for _, module in self.model_4bit.named_modules(): + if "Attention" in repr(type(module)): + module.to_k = LoRALayer(module.to_k, rank=4) + module.to_q = LoRALayer(module.to_q, rank=4) + module.to_v = LoRALayer(module.to_v, rank=4) + + # Step 3: dummy batch + input_dict_for_transformer = self.get_dummy_inputs() + model_inputs = { + k: v.to(device=torch_device) for k, v in input_dict_for_transformer.items() if not isinstance(v, bool) + } + model_inputs.update({k: v for k, v in input_dict_for_transformer.items() if k not in model_inputs}) + + # Step 4: Check if the gradient is not None + with torch.amp.autocast(torch_device, dtype=torch.float16): + out = self.model_4bit(**model_inputs)[0] + out.norm().backward() + + for module in self.model_4bit.modules(): + if isinstance(module, LoRALayer): + self.assertTrue(module.adapter[1].weight.grad is not None) + self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0) + + +@require_transformers_version_greater("4.44.0") +class SlowBnb4BitTests(Base4bitTests): + def setUp(self) -> None: + gc.collect() + backend_empty_cache(torch_device) + + nf4_config = BitsAndBytesConfig( + load_in_4bit=True, + bnb_4bit_quant_type="nf4", + bnb_4bit_compute_dtype=torch.float16, + ) + model_4bit = SD3Transformer2DModel.from_pretrained( + self.model_name, subfolder="transformer", quantization_config=nf4_config, device_map=torch_device + ) + self.pipeline_4bit = DiffusionPipeline.from_pretrained( + self.model_name, transformer=model_4bit, torch_dtype=torch.float16 + ) + self.pipeline_4bit.enable_model_cpu_offload() + + def tearDown(self): + del self.pipeline_4bit + + gc.collect() + backend_empty_cache(torch_device) + + def test_quality(self): + output = self.pipeline_4bit( + prompt=self.prompt, + num_inference_steps=self.num_inference_steps, + generator=torch.manual_seed(self.seed), + output_type="np", + ).images + + out_slice = output[0, -3:, -3:, -1].flatten() + expected_slice = np.array([0.1123, 0.1296, 0.1609, 0.1042, 0.1230, 0.1274, 0.0928, 0.1165, 0.1216]) + + max_diff = numpy_cosine_similarity_distance(expected_slice, out_slice) + self.assertTrue(max_diff < 1e-2) + + def test_generate_quality_dequantize(self): + r""" + Test that loading the model and unquantize it produce correct results. + """ + self.pipeline_4bit.transformer.dequantize() + output = self.pipeline_4bit( + prompt=self.prompt, + num_inference_steps=self.num_inference_steps, + generator=torch.manual_seed(self.seed), + output_type="np", + ).images + + out_slice = output[0, -3:, -3:, -1].flatten() + expected_slice = np.array([0.1216, 0.1387, 0.1584, 0.1152, 0.1318, 0.1282, 0.1062, 0.1226, 0.1228]) + max_diff = numpy_cosine_similarity_distance(expected_slice, out_slice) + self.assertTrue(max_diff < 1e-3) + + # Since we offloaded the `pipeline_4bit.transformer` to CPU (result of `enable_model_cpu_offload()), check + # the following. + self.assertTrue(self.pipeline_4bit.transformer.device.type == "cpu") + # calling it again shouldn't be a problem + _ = self.pipeline_4bit( + prompt=self.prompt, + num_inference_steps=2, + generator=torch.manual_seed(self.seed), + output_type="np", + ).images + + def test_moving_to_cpu_throws_warning(self): + nf4_config = BitsAndBytesConfig( + load_in_4bit=True, + bnb_4bit_quant_type="nf4", + bnb_4bit_compute_dtype=torch.float16, + ) + model_4bit = SD3Transformer2DModel.from_pretrained( + self.model_name, subfolder="transformer", quantization_config=nf4_config, device_map=torch_device + ) + + logger = logging.get_logger("diffusers.pipelines.pipeline_utils") + logger.setLevel(30) + with CaptureLogger(logger) as cap_logger: + # Because `model.dtype` will return torch.float16 as SD3 transformer has + # a conv layer as the first layer. + _ = DiffusionPipeline.from_pretrained( + self.model_name, transformer=model_4bit, torch_dtype=torch.float16 + ).to("cpu") + + assert "Pipelines loaded with `dtype=torch.float16`" in cap_logger.out + + @pytest.mark.xfail( + condition=is_accelerate_version("<=", "1.1.1"), + reason="Test will pass after https://github.com/huggingface/accelerate/pull/3223 is in a release.", + strict=True, + ) + def test_pipeline_cuda_placement_works_with_nf4(self): + transformer_nf4_config = BitsAndBytesConfig( + load_in_4bit=True, + bnb_4bit_quant_type="nf4", + bnb_4bit_compute_dtype=torch.float16, + ) + transformer_4bit = SD3Transformer2DModel.from_pretrained( + self.model_name, + subfolder="transformer", + quantization_config=transformer_nf4_config, + torch_dtype=torch.float16, + device_map=torch_device, + ) + text_encoder_3_nf4_config = BnbConfig( + load_in_4bit=True, + bnb_4bit_quant_type="nf4", + bnb_4bit_compute_dtype=torch.float16, + ) + text_encoder_3_4bit = T5EncoderModel.from_pretrained( + self.model_name, + subfolder="text_encoder_3", + quantization_config=text_encoder_3_nf4_config, + torch_dtype=torch.float16, + device_map=torch_device, + ) + # CUDA device placement works. + pipeline_4bit = DiffusionPipeline.from_pretrained( + self.model_name, + transformer=transformer_4bit, + text_encoder_3=text_encoder_3_4bit, + torch_dtype=torch.float16, + ).to(torch_device) + + # Check if inference works. + _ = pipeline_4bit(self.prompt, max_sequence_length=20, num_inference_steps=2) + + del pipeline_4bit + + def test_device_map(self): + """ + Test if the quantized model is working properly with "auto". + cpu/disk offloading as well doesn't work with bnb. + """ + + def get_dummy_tensor_inputs(device=None, seed: int = 0): + batch_size = 1 + num_latent_channels = 4 + num_image_channels = 3 + height = width = 4 + sequence_length = 48 + embedding_dim = 32 + + torch.manual_seed(seed) + hidden_states = torch.randn((batch_size, height * width, num_latent_channels)).to( + device, dtype=torch.bfloat16 + ) + torch.manual_seed(seed) + encoder_hidden_states = torch.randn((batch_size, sequence_length, embedding_dim)).to( + device, dtype=torch.bfloat16 + ) + + torch.manual_seed(seed) + pooled_prompt_embeds = torch.randn((batch_size, embedding_dim)).to(device, dtype=torch.bfloat16) + + torch.manual_seed(seed) + text_ids = torch.randn((sequence_length, num_image_channels)).to(device, dtype=torch.bfloat16) + + torch.manual_seed(seed) + image_ids = torch.randn((height * width, num_image_channels)).to(device, dtype=torch.bfloat16) + + timestep = torch.tensor([1.0]).to(device, dtype=torch.bfloat16).expand(batch_size) + + return { + "hidden_states": hidden_states, + "encoder_hidden_states": encoder_hidden_states, + "pooled_projections": pooled_prompt_embeds, + "txt_ids": text_ids, + "img_ids": image_ids, + "timestep": timestep, + } + + inputs = get_dummy_tensor_inputs(torch_device) + expected_slice = np.array( + [0.47070312, 0.00390625, -0.03662109, -0.19628906, -0.53125, 0.5234375, -0.17089844, -0.59375, 0.578125] + ) + + # non sharded + quantization_config = BitsAndBytesConfig( + load_in_4bit=True, bnb_4bit_quant_type="nf4", bnb_4bit_compute_dtype=torch.float16 + ) + quantized_model = FluxTransformer2DModel.from_pretrained( + "hf-internal-testing/tiny-flux-pipe", + subfolder="transformer", + quantization_config=quantization_config, + device_map="auto", + torch_dtype=torch.bfloat16, + ) + + weight = quantized_model.transformer_blocks[0].ff.net[2].weight + self.assertTrue(isinstance(weight, bnb.nn.modules.Params4bit)) + + output = quantized_model(**inputs)[0] + output_slice = output.flatten()[-9:].detach().float().cpu().numpy() + self.assertTrue(numpy_cosine_similarity_distance(output_slice, expected_slice) < 1e-3) + + # sharded + + quantization_config = BitsAndBytesConfig( + load_in_4bit=True, bnb_4bit_quant_type="nf4", bnb_4bit_compute_dtype=torch.float16 + ) + quantized_model = FluxTransformer2DModel.from_pretrained( + "hf-internal-testing/tiny-flux-sharded", + subfolder="transformer", + quantization_config=quantization_config, + device_map="auto", + torch_dtype=torch.bfloat16, + ) + + weight = quantized_model.transformer_blocks[0].ff.net[2].weight + self.assertTrue(isinstance(weight, bnb.nn.modules.Params4bit)) + + output = quantized_model(**inputs)[0] + output_slice = output.flatten()[-9:].detach().float().cpu().numpy() + + self.assertTrue(numpy_cosine_similarity_distance(output_slice, expected_slice) < 1e-3) + + +@require_transformers_version_greater("4.44.0") +class SlowBnb4BitFluxTests(Base4bitTests): + def setUp(self) -> None: + gc.collect() + backend_empty_cache(torch_device) + + model_id = "hf-internal-testing/flux.1-dev-nf4-pkg" + t5_4bit = T5EncoderModel.from_pretrained(model_id, subfolder="text_encoder_2") + transformer_4bit = FluxTransformer2DModel.from_pretrained(model_id, subfolder="transformer") + self.pipeline_4bit = DiffusionPipeline.from_pretrained( + "black-forest-labs/FLUX.1-dev", + text_encoder_2=t5_4bit, + transformer=transformer_4bit, + torch_dtype=torch.float16, + ) + self.pipeline_4bit.enable_model_cpu_offload() + + def tearDown(self): + del self.pipeline_4bit + + gc.collect() + backend_empty_cache(torch_device) + + def test_quality(self): + # keep the resolution and max tokens to a lower number for faster execution. + output = self.pipeline_4bit( + prompt=self.prompt, + num_inference_steps=self.num_inference_steps, + generator=torch.manual_seed(self.seed), + height=256, + width=256, + max_sequence_length=64, + output_type="np", + ).images + + out_slice = output[0, -3:, -3:, -1].flatten() + expected_slice = np.array([0.0583, 0.0586, 0.0632, 0.0815, 0.0813, 0.0947, 0.1040, 0.1145, 0.1265]) + + max_diff = numpy_cosine_similarity_distance(expected_slice, out_slice) + self.assertTrue(max_diff < 1e-3) + + @require_peft_backend + def test_lora_loading(self): + self.pipeline_4bit.load_lora_weights( + hf_hub_download("ByteDance/Hyper-SD", "Hyper-FLUX.1-dev-8steps-lora.safetensors"), adapter_name="hyper-sd" + ) + self.pipeline_4bit.set_adapters("hyper-sd", adapter_weights=0.125) + + output = self.pipeline_4bit( + prompt=self.prompt, + height=256, + width=256, + max_sequence_length=64, + output_type="np", + num_inference_steps=8, + generator=torch.Generator().manual_seed(42), + ).images + out_slice = output[0, -3:, -3:, -1].flatten() + expected_slice = np.array([0.5347, 0.5342, 0.5283, 0.5093, 0.4988, 0.5093, 0.5044, 0.5015, 0.4946]) + + max_diff = numpy_cosine_similarity_distance(expected_slice, out_slice) + self.assertTrue(max_diff < 1e-3) + + +@require_transformers_version_greater("4.44.0") +@require_peft_backend +class SlowBnb4BitFluxControlWithLoraTests(Base4bitTests): + def setUp(self) -> None: + gc.collect() + backend_empty_cache(torch_device) + + self.pipeline_4bit = FluxControlPipeline.from_pretrained("eramth/flux-4bit", torch_dtype=torch.float16) + self.pipeline_4bit.enable_model_cpu_offload() + + def tearDown(self): + del self.pipeline_4bit + + gc.collect() + backend_empty_cache(torch_device) + + def test_lora_loading(self): + self.pipeline_4bit.load_lora_weights("black-forest-labs/FLUX.1-Canny-dev-lora") + + output = self.pipeline_4bit( + prompt=self.prompt, + control_image=Image.new(mode="RGB", size=(256, 256)), + height=256, + width=256, + max_sequence_length=64, + output_type="np", + num_inference_steps=8, + generator=torch.Generator().manual_seed(42), + ).images + out_slice = output[0, -3:, -3:, -1].flatten() + expected_slice = np.array([0.1636, 0.1675, 0.1982, 0.1743, 0.1809, 0.1936, 0.1743, 0.2095, 0.2139]) + + max_diff = numpy_cosine_similarity_distance(expected_slice, out_slice) + self.assertTrue(max_diff < 1e-3, msg=f"{out_slice=} != {expected_slice=}") + + +@slow +class BaseBnb4BitSerializationTests(Base4bitTests): + def tearDown(self): + gc.collect() + backend_empty_cache(torch_device) + + def test_serialization(self, quant_type="nf4", double_quant=True, safe_serialization=True): + r""" + Test whether it is possible to serialize a model in 4-bit. Uses most typical params as default. + See ExtendedSerializationTest class for more params combinations. + """ + + self.quantization_config = BitsAndBytesConfig( + load_in_4bit=True, + bnb_4bit_quant_type=quant_type, + bnb_4bit_use_double_quant=double_quant, + bnb_4bit_compute_dtype=torch.bfloat16, + ) + model_0 = SD3Transformer2DModel.from_pretrained( + self.model_name, + subfolder="transformer", + quantization_config=self.quantization_config, + device_map=torch_device, + ) + self.assertTrue("_pre_quantization_dtype" in model_0.config) + with tempfile.TemporaryDirectory() as tmpdirname: + model_0.save_pretrained(tmpdirname, safe_serialization=safe_serialization) + + config = SD3Transformer2DModel.load_config(tmpdirname) + self.assertTrue("quantization_config" in config) + self.assertTrue("_pre_quantization_dtype" not in config) + + model_1 = SD3Transformer2DModel.from_pretrained(tmpdirname) + + # checking quantized linear module weight + linear = get_some_linear_layer(model_1) + self.assertTrue(linear.weight.__class__ == bnb.nn.Params4bit) + self.assertTrue(hasattr(linear.weight, "quant_state")) + self.assertTrue(linear.weight.quant_state.__class__ == bnb.functional.QuantState) + + # checking memory footpring + self.assertAlmostEqual(model_0.get_memory_footprint() / model_1.get_memory_footprint(), 1, places=2) + + # Matching all parameters and their quant_state items: + d0 = dict(model_0.named_parameters()) + d1 = dict(model_1.named_parameters()) + self.assertTrue(d0.keys() == d1.keys()) + + for k in d0.keys(): + self.assertTrue(d0[k].shape == d1[k].shape) + self.assertTrue(d0[k].device.type == d1[k].device.type) + self.assertTrue(d0[k].device == d1[k].device) + self.assertTrue(d0[k].dtype == d1[k].dtype) + self.assertTrue(torch.equal(d0[k], d1[k].to(d0[k].device))) + + if isinstance(d0[k], bnb.nn.modules.Params4bit): + for v0, v1 in zip( + d0[k].quant_state.as_dict().values(), + d1[k].quant_state.as_dict().values(), + ): + if isinstance(v0, torch.Tensor): + self.assertTrue(torch.equal(v0, v1.to(v0.device))) + else: + self.assertTrue(v0 == v1) + + # comparing forward() outputs + dummy_inputs = self.get_dummy_inputs() + inputs = {k: v.to(torch_device) for k, v in dummy_inputs.items() if isinstance(v, torch.Tensor)} + inputs.update({k: v for k, v in dummy_inputs.items() if k not in inputs}) + out_0 = model_0(**inputs)[0] + out_1 = model_1(**inputs)[0] + self.assertTrue(torch.equal(out_0, out_1)) + + +class ExtendedSerializationTest(BaseBnb4BitSerializationTests): + """ + tests more combinations of parameters + """ + + def test_nf4_single_unsafe(self): + self.test_serialization(quant_type="nf4", double_quant=False, safe_serialization=False) + + def test_nf4_single_safe(self): + self.test_serialization(quant_type="nf4", double_quant=False, safe_serialization=True) + + def test_nf4_double_unsafe(self): + self.test_serialization(quant_type="nf4", double_quant=True, safe_serialization=False) + + # nf4 double safetensors quantization is tested in test_serialization() method from the parent class + + def test_fp4_single_unsafe(self): + self.test_serialization(quant_type="fp4", double_quant=False, safe_serialization=False) + + def test_fp4_single_safe(self): + self.test_serialization(quant_type="fp4", double_quant=False, safe_serialization=True) + + def test_fp4_double_unsafe(self): + self.test_serialization(quant_type="fp4", double_quant=True, safe_serialization=False) + + def test_fp4_double_safe(self): + self.test_serialization(quant_type="fp4", double_quant=True, safe_serialization=True) + + +@require_torch_version_greater("2.7.1") +@require_bitsandbytes_version_greater("0.45.5") +class Bnb4BitCompileTests(QuantCompileTests, unittest.TestCase): + @property + def quantization_config(self): + return PipelineQuantizationConfig( + quant_backend="bitsandbytes_4bit", + quant_kwargs={ + "load_in_4bit": True, + "bnb_4bit_quant_type": "nf4", + "bnb_4bit_compute_dtype": torch.bfloat16, + }, + components_to_quantize=["transformer", "text_encoder_2"], + ) + + @require_bitsandbytes_version_greater("0.46.1") + def test_torch_compile(self): + torch._dynamo.config.capture_dynamic_output_shape_ops = True + super().test_torch_compile() + + def test_torch_compile_with_group_offload_leaf(self): + super()._test_torch_compile_with_group_offload_leaf(use_stream=True) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/quantization/bnb/test_mixed_int8.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/quantization/bnb/test_mixed_int8.py new file mode 100644 index 0000000000000000000000000000000000000000..fde3966dec973c61aefb2da0680fbce8f9c607c8 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/quantization/bnb/test_mixed_int8.py @@ -0,0 +1,863 @@ +# coding=utf-8 +# Copyright 2025 The HuggingFace Team Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a clone of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import gc +import tempfile +import unittest + +import numpy as np +import pytest +from huggingface_hub import hf_hub_download +from PIL import Image + +from diffusers import ( + BitsAndBytesConfig, + DiffusionPipeline, + FluxControlPipeline, + FluxTransformer2DModel, + SanaTransformer2DModel, + SD3Transformer2DModel, + logging, +) +from diffusers.quantizers import PipelineQuantizationConfig +from diffusers.utils import is_accelerate_version + +from ...testing_utils import ( + CaptureLogger, + backend_empty_cache, + is_bitsandbytes_available, + is_torch_available, + is_transformers_available, + load_pt, + numpy_cosine_similarity_distance, + require_accelerate, + require_bitsandbytes_version_greater, + require_peft_backend, + require_peft_version_greater, + require_torch, + require_torch_accelerator, + require_torch_version_greater_equal, + require_transformers_version_greater, + slow, + torch_device, +) +from ..test_torch_compile_utils import QuantCompileTests + + +def get_some_linear_layer(model): + if model.__class__.__name__ in ["SD3Transformer2DModel", "FluxTransformer2DModel"]: + return model.transformer_blocks[0].attn.to_q + else: + return NotImplementedError("Don't know what layer to retrieve here.") + + +if is_transformers_available(): + from transformers import BitsAndBytesConfig as BnbConfig + from transformers import T5EncoderModel + +if is_torch_available(): + import torch + + from ..utils import LoRALayer, get_memory_consumption_stat + + +if is_bitsandbytes_available(): + import bitsandbytes as bnb + + from diffusers.quantizers.bitsandbytes import replace_with_bnb_linear + + +@require_bitsandbytes_version_greater("0.43.2") +@require_accelerate +@require_torch +@require_torch_accelerator +@slow +class Base8bitTests(unittest.TestCase): + # We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected) + # Therefore here we use only SD3 to test our module + model_name = "stabilityai/stable-diffusion-3-medium-diffusers" + + # This was obtained on audace so the number might slightly change + expected_rel_difference = 1.94 + + expected_memory_saving_ratio = 0.7 + + prompt = "a beautiful sunset amidst the mountains." + num_inference_steps = 10 + seed = 0 + + @classmethod + def setUpClass(cls): + cls.is_deterministic_enabled = torch.are_deterministic_algorithms_enabled() + if not cls.is_deterministic_enabled: + torch.use_deterministic_algorithms(True) + + @classmethod + def tearDownClass(cls): + if not cls.is_deterministic_enabled: + torch.use_deterministic_algorithms(False) + + def get_dummy_inputs(self): + prompt_embeds = load_pt( + "https://huggingface.co/datasets/hf-internal-testing/bnb-diffusers-testing-artifacts/resolve/main/prompt_embeds.pt", + map_location="cpu", + ) + pooled_prompt_embeds = load_pt( + "https://huggingface.co/datasets/hf-internal-testing/bnb-diffusers-testing-artifacts/resolve/main/pooled_prompt_embeds.pt", + map_location="cpu", + ) + latent_model_input = load_pt( + "https://huggingface.co/datasets/hf-internal-testing/bnb-diffusers-testing-artifacts/resolve/main/latent_model_input.pt", + map_location="cpu", + ) + + input_dict_for_transformer = { + "hidden_states": latent_model_input, + "encoder_hidden_states": prompt_embeds, + "pooled_projections": pooled_prompt_embeds, + "timestep": torch.Tensor([1.0]), + "return_dict": False, + } + return input_dict_for_transformer + + +class BnB8bitBasicTests(Base8bitTests): + def setUp(self): + gc.collect() + backend_empty_cache(torch_device) + + # Models + self.model_fp16 = SD3Transformer2DModel.from_pretrained( + self.model_name, subfolder="transformer", torch_dtype=torch.float16 + ) + mixed_int8_config = BitsAndBytesConfig(load_in_8bit=True) + self.model_8bit = SD3Transformer2DModel.from_pretrained( + self.model_name, subfolder="transformer", quantization_config=mixed_int8_config, device_map=torch_device + ) + + def tearDown(self): + if hasattr(self, "model_fp16"): + del self.model_fp16 + if hasattr(self, "model_8bit"): + del self.model_8bit + + gc.collect() + backend_empty_cache(torch_device) + + def test_quantization_num_parameters(self): + r""" + Test if the number of returned parameters is correct + """ + num_params_8bit = self.model_8bit.num_parameters() + num_params_fp16 = self.model_fp16.num_parameters() + + self.assertEqual(num_params_8bit, num_params_fp16) + + def test_quantization_config_json_serialization(self): + r""" + A simple test to check if the quantization config is correctly serialized and deserialized + """ + config = self.model_8bit.config + + self.assertTrue("quantization_config" in config) + + _ = config["quantization_config"].to_dict() + _ = config["quantization_config"].to_diff_dict() + + _ = config["quantization_config"].to_json_string() + + def test_memory_footprint(self): + r""" + A simple test to check if the model conversion has been done correctly by checking on the + memory footprint of the converted model and the class type of the linear layers of the converted models + """ + mem_fp16 = self.model_fp16.get_memory_footprint() + mem_8bit = self.model_8bit.get_memory_footprint() + + self.assertAlmostEqual(mem_fp16 / mem_8bit, self.expected_rel_difference, delta=1e-2) + linear = get_some_linear_layer(self.model_8bit) + self.assertTrue(linear.weight.__class__ == bnb.nn.Int8Params) + + def test_model_memory_usage(self): + # Delete to not let anything interfere. + del self.model_8bit, self.model_fp16 + + # Re-instantiate. + inputs = self.get_dummy_inputs() + inputs = { + k: v.to(device=torch_device, dtype=torch.float16) for k, v in inputs.items() if not isinstance(v, bool) + } + model_fp16 = SD3Transformer2DModel.from_pretrained( + self.model_name, subfolder="transformer", torch_dtype=torch.float16 + ).to(torch_device) + unquantized_model_memory = get_memory_consumption_stat(model_fp16, inputs) + del model_fp16 + + config = BitsAndBytesConfig(load_in_8bit=True) + model_8bit = SD3Transformer2DModel.from_pretrained( + self.model_name, subfolder="transformer", quantization_config=config, torch_dtype=torch.float16 + ) + quantized_model_memory = get_memory_consumption_stat(model_8bit, inputs) + assert unquantized_model_memory / quantized_model_memory >= self.expected_memory_saving_ratio + + def test_original_dtype(self): + r""" + A simple test to check if the model successfully stores the original dtype + """ + self.assertTrue("_pre_quantization_dtype" in self.model_8bit.config) + self.assertFalse("_pre_quantization_dtype" in self.model_fp16.config) + self.assertTrue(self.model_8bit.config["_pre_quantization_dtype"] == torch.float16) + + def test_keep_modules_in_fp32(self): + r""" + A simple tests to check if the modules under `_keep_in_fp32_modules` are kept in fp32. + Also ensures if inference works. + """ + fp32_modules = SD3Transformer2DModel._keep_in_fp32_modules + SD3Transformer2DModel._keep_in_fp32_modules = ["proj_out"] + + mixed_int8_config = BitsAndBytesConfig(load_in_8bit=True) + model = SD3Transformer2DModel.from_pretrained( + self.model_name, subfolder="transformer", quantization_config=mixed_int8_config, device_map=torch_device + ) + + for name, module in model.named_modules(): + if isinstance(module, torch.nn.Linear): + if name in model._keep_in_fp32_modules: + self.assertTrue(module.weight.dtype == torch.float32) + else: + # 8-bit parameters are packed in int8 variables + self.assertTrue(module.weight.dtype == torch.int8) + + # test if inference works. + with torch.no_grad() and torch.autocast(model.device.type, dtype=torch.float16): + input_dict_for_transformer = self.get_dummy_inputs() + model_inputs = { + k: v.to(device=torch_device) for k, v in input_dict_for_transformer.items() if not isinstance(v, bool) + } + model_inputs.update({k: v for k, v in input_dict_for_transformer.items() if k not in model_inputs}) + _ = model(**model_inputs) + + SD3Transformer2DModel._keep_in_fp32_modules = fp32_modules + + def test_linear_are_8bit(self): + r""" + A simple test to check if the model conversion has been done correctly by checking on the + memory footprint of the converted model and the class type of the linear layers of the converted models + """ + self.model_fp16.get_memory_footprint() + self.model_8bit.get_memory_footprint() + + for name, module in self.model_8bit.named_modules(): + if isinstance(module, torch.nn.Linear): + if name not in ["proj_out"]: + # 8-bit parameters are packed in int8 variables + self.assertTrue(module.weight.dtype == torch.int8) + + def test_llm_skip(self): + r""" + A simple test to check if `llm_int8_skip_modules` works as expected + """ + config = BitsAndBytesConfig(load_in_8bit=True, llm_int8_skip_modules=["proj_out"]) + model_8bit = SD3Transformer2DModel.from_pretrained( + self.model_name, subfolder="transformer", quantization_config=config, device_map=torch_device + ) + linear = get_some_linear_layer(model_8bit) + self.assertTrue(linear.weight.dtype == torch.int8) + self.assertTrue(isinstance(linear, bnb.nn.Linear8bitLt)) + + self.assertTrue(isinstance(model_8bit.proj_out, torch.nn.Linear)) + self.assertTrue(model_8bit.proj_out.weight.dtype != torch.int8) + + def test_config_from_pretrained(self): + transformer_8bit = FluxTransformer2DModel.from_pretrained( + "hf-internal-testing/flux.1-dev-int8-pkg", subfolder="transformer" + ) + linear = get_some_linear_layer(transformer_8bit) + self.assertTrue(linear.weight.__class__ == bnb.nn.Int8Params) + self.assertTrue(hasattr(linear.weight, "SCB")) + + def test_device_and_dtype_assignment(self): + r""" + Test whether trying to cast (or assigning a device to) a model after converting it in 8-bit will throw an error. + Checks also if other models are casted correctly. + """ + with self.assertRaises(ValueError): + # Tries with `str` + self.model_8bit.to("cpu") + + with self.assertRaises(ValueError): + # Tries with a `dtype`` + self.model_8bit.to(torch.float16) + + with self.assertRaises(ValueError): + # Tries with a `device` + self.model_8bit.to(torch.device(f"{torch_device}:0")) + + with self.assertRaises(ValueError): + # Tries with a `device` + self.model_8bit.float() + + with self.assertRaises(ValueError): + # Tries with a `device` + self.model_8bit.half() + + # Test if we did not break anything + self.model_fp16 = self.model_fp16.to(dtype=torch.float32, device=torch_device) + input_dict_for_transformer = self.get_dummy_inputs() + model_inputs = { + k: v.to(dtype=torch.float32, device=torch_device) + for k, v in input_dict_for_transformer.items() + if not isinstance(v, bool) + } + model_inputs.update({k: v for k, v in input_dict_for_transformer.items() if k not in model_inputs}) + with torch.no_grad(): + _ = self.model_fp16(**model_inputs) + + # Check this does not throw an error + _ = self.model_fp16.to("cpu") + + # Check this does not throw an error + _ = self.model_fp16.half() + + # Check this does not throw an error + _ = self.model_fp16.float() + + # Check that this does not throw an error + _ = self.model_fp16.to(torch_device) + + def test_bnb_8bit_logs_warning_for_no_quantization(self): + model_with_no_linear = torch.nn.Sequential(torch.nn.Conv2d(4, 4, 3), torch.nn.ReLU()) + quantization_config = BitsAndBytesConfig(load_in_8bit=True) + logger = logging.get_logger("diffusers.quantizers.bitsandbytes.utils") + logger.setLevel(30) + with CaptureLogger(logger) as cap_logger: + _ = replace_with_bnb_linear(model_with_no_linear, quantization_config=quantization_config) + assert ( + "You are loading your model in 8bit or 4bit but no linear modules were found in your model." + in cap_logger.out + ) + + +class Bnb8bitDeviceTests(Base8bitTests): + def setUp(self) -> None: + gc.collect() + backend_empty_cache(torch_device) + + mixed_int8_config = BitsAndBytesConfig(load_in_8bit=True) + self.model_8bit = SanaTransformer2DModel.from_pretrained( + "Efficient-Large-Model/Sana_1600M_4Kpx_BF16_diffusers", + subfolder="transformer", + quantization_config=mixed_int8_config, + device_map=torch_device, + ) + + def tearDown(self): + del self.model_8bit + + gc.collect() + backend_empty_cache(torch_device) + + def test_buffers_device_assignment(self): + for buffer_name, buffer in self.model_8bit.named_buffers(): + self.assertEqual( + buffer.device.type, + torch.device(torch_device).type, + f"Expected device {torch_device} for {buffer_name} got {buffer.device}.", + ) + + +class BnB8bitTrainingTests(Base8bitTests): + def setUp(self): + gc.collect() + backend_empty_cache(torch_device) + + mixed_int8_config = BitsAndBytesConfig(load_in_8bit=True) + self.model_8bit = SD3Transformer2DModel.from_pretrained( + self.model_name, subfolder="transformer", quantization_config=mixed_int8_config, device_map=torch_device + ) + + def test_training(self): + # Step 1: freeze all parameters + for param in self.model_8bit.parameters(): + param.requires_grad = False # freeze the model - train adapters later + if param.ndim == 1: + # cast the small parameters (e.g. layernorm) to fp32 for stability + param.data = param.data.to(torch.float32) + + # Step 2: add adapters + for _, module in self.model_8bit.named_modules(): + if "Attention" in repr(type(module)): + module.to_k = LoRALayer(module.to_k, rank=4) + module.to_q = LoRALayer(module.to_q, rank=4) + module.to_v = LoRALayer(module.to_v, rank=4) + + # Step 3: dummy batch + input_dict_for_transformer = self.get_dummy_inputs() + model_inputs = { + k: v.to(device=torch_device) for k, v in input_dict_for_transformer.items() if not isinstance(v, bool) + } + model_inputs.update({k: v for k, v in input_dict_for_transformer.items() if k not in model_inputs}) + + # Step 4: Check if the gradient is not None + with torch.amp.autocast(torch_device, dtype=torch.float16): + out = self.model_8bit(**model_inputs)[0] + out.norm().backward() + + for module in self.model_8bit.modules(): + if isinstance(module, LoRALayer): + self.assertTrue(module.adapter[1].weight.grad is not None) + self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0) + + +@require_transformers_version_greater("4.44.0") +class SlowBnb8bitTests(Base8bitTests): + def setUp(self) -> None: + gc.collect() + backend_empty_cache(torch_device) + + mixed_int8_config = BitsAndBytesConfig(load_in_8bit=True) + model_8bit = SD3Transformer2DModel.from_pretrained( + self.model_name, subfolder="transformer", quantization_config=mixed_int8_config, device_map=torch_device + ) + self.pipeline_8bit = DiffusionPipeline.from_pretrained( + self.model_name, transformer=model_8bit, torch_dtype=torch.float16 + ) + self.pipeline_8bit.enable_model_cpu_offload() + + def tearDown(self): + del self.pipeline_8bit + + gc.collect() + backend_empty_cache(torch_device) + + def test_quality(self): + output = self.pipeline_8bit( + prompt=self.prompt, + num_inference_steps=self.num_inference_steps, + generator=torch.manual_seed(self.seed), + output_type="np", + ).images + out_slice = output[0, -3:, -3:, -1].flatten() + expected_slice = np.array([0.0674, 0.0623, 0.0364, 0.0632, 0.0671, 0.0430, 0.0317, 0.0493, 0.0583]) + + max_diff = numpy_cosine_similarity_distance(expected_slice, out_slice) + self.assertTrue(max_diff < 1e-2) + + def test_model_cpu_offload_raises_warning(self): + model_8bit = SD3Transformer2DModel.from_pretrained( + self.model_name, + subfolder="transformer", + quantization_config=BitsAndBytesConfig(load_in_8bit=True), + device_map=torch_device, + ) + pipeline_8bit = DiffusionPipeline.from_pretrained( + self.model_name, transformer=model_8bit, torch_dtype=torch.float16 + ) + logger = logging.get_logger("diffusers.pipelines.pipeline_utils") + logger.setLevel(30) + + with CaptureLogger(logger) as cap_logger: + pipeline_8bit.enable_model_cpu_offload() + + assert "has been loaded in `bitsandbytes` 8bit" in cap_logger.out + + def test_moving_to_cpu_throws_warning(self): + model_8bit = SD3Transformer2DModel.from_pretrained( + self.model_name, + subfolder="transformer", + quantization_config=BitsAndBytesConfig(load_in_8bit=True), + device_map=torch_device, + ) + logger = logging.get_logger("diffusers.pipelines.pipeline_utils") + logger.setLevel(30) + + with CaptureLogger(logger) as cap_logger: + # Because `model.dtype` will return torch.float16 as SD3 transformer has + # a conv layer as the first layer. + _ = DiffusionPipeline.from_pretrained( + self.model_name, transformer=model_8bit, torch_dtype=torch.float16 + ).to("cpu") + + assert "Pipelines loaded with `dtype=torch.float16`" in cap_logger.out + + def test_generate_quality_dequantize(self): + r""" + Test that loading the model and unquantize it produce correct results. + """ + self.pipeline_8bit.transformer.dequantize() + output = self.pipeline_8bit( + prompt=self.prompt, + num_inference_steps=self.num_inference_steps, + generator=torch.manual_seed(self.seed), + output_type="np", + ).images + + out_slice = output[0, -3:, -3:, -1].flatten() + expected_slice = np.array([0.0266, 0.0264, 0.0271, 0.0110, 0.0310, 0.0098, 0.0078, 0.0256, 0.0208]) + max_diff = numpy_cosine_similarity_distance(expected_slice, out_slice) + self.assertTrue(max_diff < 1e-2) + + # 8bit models cannot be offloaded to CPU. + self.assertTrue(self.pipeline_8bit.transformer.device.type == torch_device) + # calling it again shouldn't be a problem + _ = self.pipeline_8bit( + prompt=self.prompt, + num_inference_steps=2, + generator=torch.manual_seed(self.seed), + output_type="np", + ).images + + @pytest.mark.xfail( + condition=is_accelerate_version("<=", "1.1.1"), + reason="Test will pass after https://github.com/huggingface/accelerate/pull/3223 is in a release.", + strict=True, + ) + def test_pipeline_cuda_placement_works_with_mixed_int8(self): + transformer_8bit_config = BitsAndBytesConfig(load_in_8bit=True) + transformer_8bit = SD3Transformer2DModel.from_pretrained( + self.model_name, + subfolder="transformer", + quantization_config=transformer_8bit_config, + torch_dtype=torch.float16, + device_map=torch_device, + ) + text_encoder_3_8bit_config = BnbConfig(load_in_8bit=True) + text_encoder_3_8bit = T5EncoderModel.from_pretrained( + self.model_name, + subfolder="text_encoder_3", + quantization_config=text_encoder_3_8bit_config, + torch_dtype=torch.float16, + device_map=torch_device, + ) + + # CUDA device placement works. + device = torch_device if torch_device != "rocm" else "cuda" + pipeline_8bit = DiffusionPipeline.from_pretrained( + self.model_name, + transformer=transformer_8bit, + text_encoder_3=text_encoder_3_8bit, + torch_dtype=torch.float16, + ).to(device) + + # Check if inference works. + _ = pipeline_8bit(self.prompt, max_sequence_length=20, num_inference_steps=2) + + del pipeline_8bit + + def test_device_map(self): + """ + Test if the quantized model is working properly with "auto" + pu/disk offloading doesn't work with bnb. + """ + + def get_dummy_tensor_inputs(device=None, seed: int = 0): + batch_size = 1 + num_latent_channels = 4 + num_image_channels = 3 + height = width = 4 + sequence_length = 48 + embedding_dim = 32 + + torch.manual_seed(seed) + hidden_states = torch.randn((batch_size, height * width, num_latent_channels)).to( + device, dtype=torch.bfloat16 + ) + + torch.manual_seed(seed) + encoder_hidden_states = torch.randn((batch_size, sequence_length, embedding_dim)).to( + device, dtype=torch.bfloat16 + ) + + torch.manual_seed(seed) + pooled_prompt_embeds = torch.randn((batch_size, embedding_dim)).to(device, dtype=torch.bfloat16) + + torch.manual_seed(seed) + text_ids = torch.randn((sequence_length, num_image_channels)).to(device, dtype=torch.bfloat16) + + torch.manual_seed(seed) + image_ids = torch.randn((height * width, num_image_channels)).to(device, dtype=torch.bfloat16) + + timestep = torch.tensor([1.0]).to(device, dtype=torch.bfloat16).expand(batch_size) + + return { + "hidden_states": hidden_states, + "encoder_hidden_states": encoder_hidden_states, + "pooled_projections": pooled_prompt_embeds, + "txt_ids": text_ids, + "img_ids": image_ids, + "timestep": timestep, + } + + inputs = get_dummy_tensor_inputs(torch_device) + expected_slice = np.array( + [ + 0.33789062, + -0.04736328, + -0.00256348, + -0.23144531, + -0.49804688, + 0.4375, + -0.15429688, + -0.65234375, + 0.44335938, + ] + ) + + # non sharded + quantization_config = BitsAndBytesConfig(load_in_8bit=True) + quantized_model = FluxTransformer2DModel.from_pretrained( + "hf-internal-testing/tiny-flux-pipe", + subfolder="transformer", + quantization_config=quantization_config, + device_map="auto", + torch_dtype=torch.bfloat16, + ) + + weight = quantized_model.transformer_blocks[0].ff.net[2].weight + self.assertTrue(isinstance(weight, bnb.nn.modules.Int8Params)) + + output = quantized_model(**inputs)[0] + output_slice = output.flatten()[-9:].detach().float().cpu().numpy() + self.assertTrue(numpy_cosine_similarity_distance(output_slice, expected_slice) < 1e-3) + + # sharded + quantization_config = BitsAndBytesConfig(load_in_8bit=True) + quantized_model = FluxTransformer2DModel.from_pretrained( + "hf-internal-testing/tiny-flux-sharded", + subfolder="transformer", + quantization_config=quantization_config, + device_map="auto", + torch_dtype=torch.bfloat16, + ) + + weight = quantized_model.transformer_blocks[0].ff.net[2].weight + self.assertTrue(isinstance(weight, bnb.nn.modules.Int8Params)) + output = quantized_model(**inputs)[0] + output_slice = output.flatten()[-9:].detach().float().cpu().numpy() + + self.assertTrue(numpy_cosine_similarity_distance(output_slice, expected_slice) < 1e-3) + + +@require_transformers_version_greater("4.44.0") +class SlowBnb8bitFluxTests(Base8bitTests): + def setUp(self) -> None: + gc.collect() + backend_empty_cache(torch_device) + + model_id = "hf-internal-testing/flux.1-dev-int8-pkg" + t5_8bit = T5EncoderModel.from_pretrained(model_id, subfolder="text_encoder_2") + transformer_8bit = FluxTransformer2DModel.from_pretrained(model_id, subfolder="transformer") + self.pipeline_8bit = DiffusionPipeline.from_pretrained( + "black-forest-labs/FLUX.1-dev", + text_encoder_2=t5_8bit, + transformer=transformer_8bit, + torch_dtype=torch.float16, + ) + self.pipeline_8bit.enable_model_cpu_offload() + + def tearDown(self): + del self.pipeline_8bit + + gc.collect() + backend_empty_cache(torch_device) + + def test_quality(self): + # keep the resolution and max tokens to a lower number for faster execution. + output = self.pipeline_8bit( + prompt=self.prompt, + num_inference_steps=self.num_inference_steps, + generator=torch.manual_seed(self.seed), + height=256, + width=256, + max_sequence_length=64, + output_type="np", + ).images + out_slice = output[0, -3:, -3:, -1].flatten() + expected_slice = np.array([0.0574, 0.0554, 0.0581, 0.0686, 0.0676, 0.0759, 0.0757, 0.0803, 0.0930]) + + max_diff = numpy_cosine_similarity_distance(expected_slice, out_slice) + self.assertTrue(max_diff < 1e-3) + + @require_peft_version_greater("0.14.0") + def test_lora_loading(self): + self.pipeline_8bit.load_lora_weights( + hf_hub_download("ByteDance/Hyper-SD", "Hyper-FLUX.1-dev-8steps-lora.safetensors"), adapter_name="hyper-sd" + ) + self.pipeline_8bit.set_adapters("hyper-sd", adapter_weights=0.125) + + output = self.pipeline_8bit( + prompt=self.prompt, + height=256, + width=256, + max_sequence_length=64, + output_type="np", + num_inference_steps=8, + generator=torch.manual_seed(42), + ).images + out_slice = output[0, -3:, -3:, -1].flatten() + + expected_slice = np.array([0.3916, 0.3916, 0.3887, 0.4243, 0.4155, 0.4233, 0.4570, 0.4531, 0.4248]) + + max_diff = numpy_cosine_similarity_distance(expected_slice, out_slice) + self.assertTrue(max_diff < 1e-3) + + +@require_transformers_version_greater("4.44.0") +@require_peft_backend +class SlowBnb4BitFluxControlWithLoraTests(Base8bitTests): + def setUp(self) -> None: + gc.collect() + backend_empty_cache(torch_device) + + self.pipeline_8bit = FluxControlPipeline.from_pretrained( + "black-forest-labs/FLUX.1-dev", + quantization_config=PipelineQuantizationConfig( + quant_backend="bitsandbytes_8bit", + quant_kwargs={"load_in_8bit": True}, + components_to_quantize=["transformer", "text_encoder_2"], + ), + torch_dtype=torch.float16, + ) + self.pipeline_8bit.enable_model_cpu_offload() + + def tearDown(self): + del self.pipeline_8bit + + gc.collect() + backend_empty_cache(torch_device) + + def test_lora_loading(self): + self.pipeline_8bit.load_lora_weights("black-forest-labs/FLUX.1-Canny-dev-lora") + + output = self.pipeline_8bit( + prompt=self.prompt, + control_image=Image.new(mode="RGB", size=(256, 256)), + height=256, + width=256, + max_sequence_length=64, + output_type="np", + num_inference_steps=8, + generator=torch.Generator().manual_seed(42), + ).images + out_slice = output[0, -3:, -3:, -1].flatten() + expected_slice = np.array([0.2029, 0.2136, 0.2268, 0.1921, 0.1997, 0.2185, 0.2021, 0.2183, 0.2292]) + + max_diff = numpy_cosine_similarity_distance(expected_slice, out_slice) + self.assertTrue(max_diff < 1e-3, msg=f"{out_slice=} != {expected_slice=}") + + +@slow +class BaseBnb8bitSerializationTests(Base8bitTests): + def setUp(self): + gc.collect() + backend_empty_cache(torch_device) + + quantization_config = BitsAndBytesConfig( + load_in_8bit=True, + ) + self.model_0 = SD3Transformer2DModel.from_pretrained( + self.model_name, subfolder="transformer", quantization_config=quantization_config, device_map=torch_device + ) + + def tearDown(self): + del self.model_0 + + gc.collect() + backend_empty_cache(torch_device) + + def test_serialization(self): + r""" + Test whether it is possible to serialize a model in 8-bit. Uses most typical params as default. + """ + self.assertTrue("_pre_quantization_dtype" in self.model_0.config) + with tempfile.TemporaryDirectory() as tmpdirname: + self.model_0.save_pretrained(tmpdirname) + + config = SD3Transformer2DModel.load_config(tmpdirname) + self.assertTrue("quantization_config" in config) + self.assertTrue("_pre_quantization_dtype" not in config) + + model_1 = SD3Transformer2DModel.from_pretrained(tmpdirname) + + # checking quantized linear module weight + linear = get_some_linear_layer(model_1) + self.assertTrue(linear.weight.__class__ == bnb.nn.Int8Params) + self.assertTrue(hasattr(linear.weight, "SCB")) + + # checking memory footpring + self.assertAlmostEqual(self.model_0.get_memory_footprint() / model_1.get_memory_footprint(), 1, places=2) + + # Matching all parameters and their quant_state items: + d0 = dict(self.model_0.named_parameters()) + d1 = dict(model_1.named_parameters()) + self.assertTrue(d0.keys() == d1.keys()) + + # comparing forward() outputs + dummy_inputs = self.get_dummy_inputs() + inputs = {k: v.to(torch_device) for k, v in dummy_inputs.items() if isinstance(v, torch.Tensor)} + inputs.update({k: v for k, v in dummy_inputs.items() if k not in inputs}) + out_0 = self.model_0(**inputs)[0] + out_1 = model_1(**inputs)[0] + self.assertTrue(torch.equal(out_0, out_1)) + + def test_serialization_sharded(self): + with tempfile.TemporaryDirectory() as tmpdirname: + self.model_0.save_pretrained(tmpdirname, max_shard_size="200MB") + + config = SD3Transformer2DModel.load_config(tmpdirname) + self.assertTrue("quantization_config" in config) + self.assertTrue("_pre_quantization_dtype" not in config) + + model_1 = SD3Transformer2DModel.from_pretrained(tmpdirname) + + # checking quantized linear module weight + linear = get_some_linear_layer(model_1) + self.assertTrue(linear.weight.__class__ == bnb.nn.Int8Params) + self.assertTrue(hasattr(linear.weight, "SCB")) + + # comparing forward() outputs + dummy_inputs = self.get_dummy_inputs() + inputs = {k: v.to(torch_device) for k, v in dummy_inputs.items() if isinstance(v, torch.Tensor)} + inputs.update({k: v for k, v in dummy_inputs.items() if k not in inputs}) + out_0 = self.model_0(**inputs)[0] + out_1 = model_1(**inputs)[0] + self.assertTrue(torch.equal(out_0, out_1)) + + +@require_torch_version_greater_equal("2.6.0") +@require_bitsandbytes_version_greater("0.45.5") +class Bnb8BitCompileTests(QuantCompileTests, unittest.TestCase): + @property + def quantization_config(self): + return PipelineQuantizationConfig( + quant_backend="bitsandbytes_8bit", + quant_kwargs={"load_in_8bit": True}, + components_to_quantize=["transformer", "text_encoder_2"], + ) + + @pytest.mark.xfail( + reason="Test fails because of an offloading problem from Accelerate with confusion in hooks." + " Test passes without recompilation context manager. Refer to https://github.com/huggingface/diffusers/pull/12002/files#r2240462757 for details." + ) + def test_torch_compile(self): + torch._dynamo.config.capture_dynamic_output_shape_ops = True + super()._test_torch_compile(torch_dtype=torch.float16) + + def test_torch_compile_with_cpu_offload(self): + super()._test_torch_compile_with_cpu_offload(torch_dtype=torch.float16) + + @pytest.mark.xfail(reason="Test fails because of an offloading problem from Accelerate with confusion in hooks.") + def test_torch_compile_with_group_offload_leaf(self): + super()._test_torch_compile_with_group_offload_leaf(torch_dtype=torch.float16, use_stream=True) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/quantization/gguf/__init__.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/quantization/gguf/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/quantization/gguf/test_gguf.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/quantization/gguf/test_gguf.py new file mode 100644 index 0000000000000000000000000000000000000000..442d236438fb72a265980d01eec50e7d2396d7ac --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/quantization/gguf/test_gguf.py @@ -0,0 +1,768 @@ +import gc +import unittest + +import numpy as np +import torch +import torch.nn as nn + +from diffusers import ( + AuraFlowPipeline, + AuraFlowTransformer2DModel, + DiffusionPipeline, + FluxControlPipeline, + FluxPipeline, + FluxTransformer2DModel, + GGUFQuantizationConfig, + HiDreamImageTransformer2DModel, + SD3Transformer2DModel, + StableDiffusion3Pipeline, + WanS2VTransformer3DModel, + WanTransformer3DModel, + WanVACETransformer3DModel, +) +from diffusers.utils import load_image + +from ...testing_utils import ( + Expectations, + backend_empty_cache, + backend_max_memory_allocated, + backend_reset_peak_memory_stats, + enable_full_determinism, + is_gguf_available, + nightly, + numpy_cosine_similarity_distance, + require_accelerate, + require_accelerator, + require_big_accelerator, + require_gguf_version_greater_or_equal, + require_kernels_version_greater_or_equal, + require_peft_backend, + require_torch_version_greater, + torch_device, +) +from ..test_torch_compile_utils import QuantCompileTests + + +if is_gguf_available(): + import gguf + + from diffusers.quantizers.gguf.utils import GGUFLinear, GGUFParameter + +enable_full_determinism() + + +@nightly +@require_accelerate +@require_accelerator +@require_gguf_version_greater_or_equal("0.10.0") +@require_kernels_version_greater_or_equal("0.9.0") +class GGUFCudaKernelsTests(unittest.TestCase): + def setUp(self): + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + gc.collect() + backend_empty_cache(torch_device) + + def test_cuda_kernels_vs_native(self): + if torch_device != "cuda": + self.skipTest("CUDA kernels test requires CUDA device") + + from diffusers.quantizers.gguf.utils import GGUFLinear, can_use_cuda_kernels + + if not can_use_cuda_kernels: + self.skipTest("CUDA kernels not available (compute capability < 7 or kernels not installed)") + + test_quant_types = ["Q4_0", "Q4_K"] + test_shape = (1, 64, 512) # batch, seq_len, hidden_dim + compute_dtype = torch.bfloat16 + + for quant_type in test_quant_types: + qtype = getattr(gguf.GGMLQuantizationType, quant_type) + in_features, out_features = 512, 512 + + torch.manual_seed(42) + float_weight = torch.randn(out_features, in_features, dtype=torch.float32) + quantized_data = gguf.quants.quantize(float_weight.numpy(), qtype) + weight_data = torch.from_numpy(quantized_data).to(device=torch_device) + weight = GGUFParameter(weight_data, quant_type=qtype) + + x = torch.randn(test_shape, dtype=compute_dtype, device=torch_device) + + linear = GGUFLinear(in_features, out_features, bias=True, compute_dtype=compute_dtype) + linear.weight = weight + linear.bias = nn.Parameter(torch.randn(out_features, dtype=compute_dtype)) + linear = linear.to(torch_device) + + with torch.no_grad(): + output_native = linear.forward_native(x) + output_cuda = linear.forward_cuda(x) + + assert torch.allclose(output_native, output_cuda, 1e-2), ( + f"GGUF CUDA Kernel Output is different from Native Output for {quant_type}" + ) + + +@nightly +@require_big_accelerator +@require_accelerate +@require_gguf_version_greater_or_equal("0.10.0") +class GGUFSingleFileTesterMixin: + ckpt_path = None + model_cls = None + torch_dtype = torch.bfloat16 + expected_memory_use_in_gb = 5 + + def test_gguf_parameters(self): + quant_storage_type = torch.uint8 + quantization_config = GGUFQuantizationConfig(compute_dtype=self.torch_dtype) + model = self.model_cls.from_single_file(self.ckpt_path, quantization_config=quantization_config) + + for param_name, param in model.named_parameters(): + if isinstance(param, GGUFParameter): + assert hasattr(param, "quant_type") + assert param.dtype == quant_storage_type + + def test_gguf_linear_layers(self): + quantization_config = GGUFQuantizationConfig(compute_dtype=self.torch_dtype) + model = self.model_cls.from_single_file(self.ckpt_path, quantization_config=quantization_config) + + for name, module in model.named_modules(): + if isinstance(module, torch.nn.Linear) and hasattr(module.weight, "quant_type"): + assert module.weight.dtype == torch.uint8 + if module.bias is not None: + assert module.bias.dtype == self.torch_dtype + + def test_gguf_memory_usage(self): + quantization_config = GGUFQuantizationConfig(compute_dtype=self.torch_dtype) + + model = self.model_cls.from_single_file( + self.ckpt_path, quantization_config=quantization_config, torch_dtype=self.torch_dtype + ) + model.to(torch_device) + assert (model.get_memory_footprint() / 1024**3) < self.expected_memory_use_in_gb + inputs = self.get_dummy_inputs() + + backend_reset_peak_memory_stats(torch_device) + backend_empty_cache(torch_device) + with torch.no_grad(): + model(**inputs) + max_memory = backend_max_memory_allocated(torch_device) + assert (max_memory / 1024**3) < self.expected_memory_use_in_gb + + def test_keep_modules_in_fp32(self): + r""" + A simple tests to check if the modules under `_keep_in_fp32_modules` are kept in fp32. + Also ensures if inference works. + """ + _keep_in_fp32_modules = self.model_cls._keep_in_fp32_modules + self.model_cls._keep_in_fp32_modules = ["proj_out"] + + quantization_config = GGUFQuantizationConfig(compute_dtype=self.torch_dtype) + model = self.model_cls.from_single_file(self.ckpt_path, quantization_config=quantization_config) + + for name, module in model.named_modules(): + if isinstance(module, torch.nn.Linear): + if name in model._keep_in_fp32_modules: + assert module.weight.dtype == torch.float32 + self.model_cls._keep_in_fp32_modules = _keep_in_fp32_modules + + def test_dtype_assignment(self): + quantization_config = GGUFQuantizationConfig(compute_dtype=self.torch_dtype) + model = self.model_cls.from_single_file(self.ckpt_path, quantization_config=quantization_config) + + with self.assertRaises(ValueError): + # Tries with a `dtype` + model.to(torch.float16) + + with self.assertRaises(ValueError): + # Tries with a `device` and `dtype` + device_0 = f"{torch_device}:0" + model.to(device=device_0, dtype=torch.float16) + + with self.assertRaises(ValueError): + # Tries with a cast + model.float() + + with self.assertRaises(ValueError): + # Tries with a cast + model.half() + + # This should work + model.to(torch_device) + + def test_dequantize_model(self): + quantization_config = GGUFQuantizationConfig(compute_dtype=self.torch_dtype) + model = self.model_cls.from_single_file(self.ckpt_path, quantization_config=quantization_config) + model.dequantize() + + def _check_for_gguf_linear(model): + has_children = list(model.children()) + if not has_children: + return + + for name, module in model.named_children(): + if isinstance(module, nn.Linear): + assert not isinstance(module, GGUFLinear), f"{name} is still GGUFLinear" + assert not isinstance(module.weight, GGUFParameter), f"{name} weight is still GGUFParameter" + + for name, module in model.named_children(): + _check_for_gguf_linear(module) + + +class FluxGGUFSingleFileTests(GGUFSingleFileTesterMixin, unittest.TestCase): + ckpt_path = "https://huggingface.co/city96/FLUX.1-dev-gguf/blob/main/flux1-dev-Q2_K.gguf" + diffusers_ckpt_path = "https://huggingface.co/sayakpaul/flux-diffusers-gguf/blob/main/model-Q4_0.gguf" + torch_dtype = torch.bfloat16 + model_cls = FluxTransformer2DModel + expected_memory_use_in_gb = 5 + + def setUp(self): + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + gc.collect() + backend_empty_cache(torch_device) + + def get_dummy_inputs(self): + return { + "hidden_states": torch.randn((1, 4096, 64), generator=torch.Generator("cpu").manual_seed(0)).to( + torch_device, self.torch_dtype + ), + "encoder_hidden_states": torch.randn( + (1, 512, 4096), + generator=torch.Generator("cpu").manual_seed(0), + ).to(torch_device, self.torch_dtype), + "pooled_projections": torch.randn( + (1, 768), + generator=torch.Generator("cpu").manual_seed(0), + ).to(torch_device, self.torch_dtype), + "timestep": torch.tensor([1]).to(torch_device, self.torch_dtype), + "img_ids": torch.randn((4096, 3), generator=torch.Generator("cpu").manual_seed(0)).to( + torch_device, self.torch_dtype + ), + "txt_ids": torch.randn((512, 3), generator=torch.Generator("cpu").manual_seed(0)).to( + torch_device, self.torch_dtype + ), + "guidance": torch.tensor([3.5]).to(torch_device, self.torch_dtype), + } + + def test_pipeline_inference(self): + quantization_config = GGUFQuantizationConfig(compute_dtype=self.torch_dtype) + transformer = self.model_cls.from_single_file( + self.ckpt_path, quantization_config=quantization_config, torch_dtype=self.torch_dtype + ) + pipe = FluxPipeline.from_pretrained( + "black-forest-labs/FLUX.1-dev", transformer=transformer, torch_dtype=self.torch_dtype + ) + pipe.enable_model_cpu_offload() + + prompt = "a cat holding a sign that says hello" + output = pipe( + prompt=prompt, num_inference_steps=2, generator=torch.Generator("cpu").manual_seed(0), output_type="np" + ).images[0] + output_slice = output[:3, :3, :].flatten() + expected_slice = np.array( + [ + 0.47265625, + 0.43359375, + 0.359375, + 0.47070312, + 0.421875, + 0.34375, + 0.46875, + 0.421875, + 0.34765625, + 0.46484375, + 0.421875, + 0.34179688, + 0.47070312, + 0.42578125, + 0.34570312, + 0.46875, + 0.42578125, + 0.3515625, + 0.45507812, + 0.4140625, + 0.33984375, + 0.4609375, + 0.41796875, + 0.34375, + 0.45898438, + 0.41796875, + 0.34375, + ] + ) + max_diff = numpy_cosine_similarity_distance(expected_slice, output_slice) + assert max_diff < 1e-4 + + def test_loading_gguf_diffusers_format(self): + model = self.model_cls.from_single_file( + self.diffusers_ckpt_path, + subfolder="transformer", + quantization_config=GGUFQuantizationConfig(compute_dtype=torch.bfloat16), + config="black-forest-labs/FLUX.1-dev", + ) + model.to(torch_device) + model(**self.get_dummy_inputs()) + + +class SD35LargeGGUFSingleFileTests(GGUFSingleFileTesterMixin, unittest.TestCase): + ckpt_path = "https://huggingface.co/city96/stable-diffusion-3.5-large-gguf/blob/main/sd3.5_large-Q4_0.gguf" + torch_dtype = torch.bfloat16 + model_cls = SD3Transformer2DModel + expected_memory_use_in_gb = 5 + + def setUp(self): + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + gc.collect() + backend_empty_cache(torch_device) + + def get_dummy_inputs(self): + return { + "hidden_states": torch.randn((1, 16, 64, 64), generator=torch.Generator("cpu").manual_seed(0)).to( + torch_device, self.torch_dtype + ), + "encoder_hidden_states": torch.randn( + (1, 512, 4096), + generator=torch.Generator("cpu").manual_seed(0), + ).to(torch_device, self.torch_dtype), + "pooled_projections": torch.randn( + (1, 2048), + generator=torch.Generator("cpu").manual_seed(0), + ).to(torch_device, self.torch_dtype), + "timestep": torch.tensor([1]).to(torch_device, self.torch_dtype), + } + + def test_pipeline_inference(self): + quantization_config = GGUFQuantizationConfig(compute_dtype=self.torch_dtype) + transformer = self.model_cls.from_single_file( + self.ckpt_path, quantization_config=quantization_config, torch_dtype=self.torch_dtype + ) + pipe = StableDiffusion3Pipeline.from_pretrained( + "stabilityai/stable-diffusion-3.5-large", transformer=transformer, torch_dtype=self.torch_dtype + ) + pipe.enable_model_cpu_offload() + + prompt = "a cat holding a sign that says hello" + output = pipe( + prompt=prompt, + num_inference_steps=2, + generator=torch.Generator("cpu").manual_seed(0), + output_type="np", + ).images[0] + output_slice = output[:3, :3, :].flatten() + expected_slices = Expectations( + { + ("xpu", 3): np.array( + [ + 0.1953125, + 0.3125, + 0.31445312, + 0.13085938, + 0.30664062, + 0.29296875, + 0.11523438, + 0.2890625, + 0.28320312, + 0.16601562, + 0.3046875, + 0.328125, + 0.140625, + 0.31640625, + 0.32421875, + 0.12304688, + 0.3046875, + 0.3046875, + 0.17578125, + 0.3359375, + 0.3203125, + 0.16601562, + 0.34375, + 0.31640625, + 0.15429688, + 0.328125, + 0.31054688, + ] + ), + ("cuda", 7): np.array( + [ + 0.17578125, + 0.27539062, + 0.27734375, + 0.11914062, + 0.26953125, + 0.25390625, + 0.109375, + 0.25390625, + 0.25, + 0.15039062, + 0.26171875, + 0.28515625, + 0.13671875, + 0.27734375, + 0.28515625, + 0.12109375, + 0.26757812, + 0.265625, + 0.16210938, + 0.29882812, + 0.28515625, + 0.15625, + 0.30664062, + 0.27734375, + 0.14648438, + 0.29296875, + 0.26953125, + ] + ), + } + ) + expected_slice = expected_slices.get_expectation() + max_diff = numpy_cosine_similarity_distance(expected_slice, output_slice) + assert max_diff < 1e-4 + + +class SD35MediumGGUFSingleFileTests(GGUFSingleFileTesterMixin, unittest.TestCase): + ckpt_path = "https://huggingface.co/city96/stable-diffusion-3.5-medium-gguf/blob/main/sd3.5_medium-Q3_K_M.gguf" + torch_dtype = torch.bfloat16 + model_cls = SD3Transformer2DModel + expected_memory_use_in_gb = 2 + + def setUp(self): + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + gc.collect() + backend_empty_cache(torch_device) + + def get_dummy_inputs(self): + return { + "hidden_states": torch.randn((1, 16, 64, 64), generator=torch.Generator("cpu").manual_seed(0)).to( + torch_device, self.torch_dtype + ), + "encoder_hidden_states": torch.randn( + (1, 512, 4096), + generator=torch.Generator("cpu").manual_seed(0), + ).to(torch_device, self.torch_dtype), + "pooled_projections": torch.randn( + (1, 2048), + generator=torch.Generator("cpu").manual_seed(0), + ).to(torch_device, self.torch_dtype), + "timestep": torch.tensor([1]).to(torch_device, self.torch_dtype), + } + + def test_pipeline_inference(self): + quantization_config = GGUFQuantizationConfig(compute_dtype=self.torch_dtype) + transformer = self.model_cls.from_single_file( + self.ckpt_path, quantization_config=quantization_config, torch_dtype=self.torch_dtype + ) + pipe = StableDiffusion3Pipeline.from_pretrained( + "stabilityai/stable-diffusion-3.5-medium", transformer=transformer, torch_dtype=self.torch_dtype + ) + pipe.enable_model_cpu_offload() + + prompt = "a cat holding a sign that says hello" + output = pipe( + prompt=prompt, num_inference_steps=2, generator=torch.Generator("cpu").manual_seed(0), output_type="np" + ).images[0] + output_slice = output[:3, :3, :].flatten() + expected_slice = np.array( + [ + 0.625, + 0.6171875, + 0.609375, + 0.65625, + 0.65234375, + 0.640625, + 0.6484375, + 0.640625, + 0.625, + 0.6484375, + 0.63671875, + 0.6484375, + 0.66796875, + 0.65625, + 0.65234375, + 0.6640625, + 0.6484375, + 0.6328125, + 0.6640625, + 0.6484375, + 0.640625, + 0.67578125, + 0.66015625, + 0.62109375, + 0.671875, + 0.65625, + 0.62109375, + ] + ) + max_diff = numpy_cosine_similarity_distance(expected_slice, output_slice) + assert max_diff < 1e-4 + + +class AuraFlowGGUFSingleFileTests(GGUFSingleFileTesterMixin, unittest.TestCase): + ckpt_path = "https://huggingface.co/city96/AuraFlow-v0.3-gguf/blob/main/aura_flow_0.3-Q2_K.gguf" + torch_dtype = torch.bfloat16 + model_cls = AuraFlowTransformer2DModel + expected_memory_use_in_gb = 4 + + def setUp(self): + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + gc.collect() + backend_empty_cache(torch_device) + + def get_dummy_inputs(self): + return { + "hidden_states": torch.randn((1, 4, 64, 64), generator=torch.Generator("cpu").manual_seed(0)).to( + torch_device, self.torch_dtype + ), + "encoder_hidden_states": torch.randn( + (1, 512, 2048), + generator=torch.Generator("cpu").manual_seed(0), + ).to(torch_device, self.torch_dtype), + "timestep": torch.tensor([1]).to(torch_device, self.torch_dtype), + } + + def test_pipeline_inference(self): + quantization_config = GGUFQuantizationConfig(compute_dtype=self.torch_dtype) + transformer = self.model_cls.from_single_file( + self.ckpt_path, quantization_config=quantization_config, torch_dtype=self.torch_dtype + ) + pipe = AuraFlowPipeline.from_pretrained( + "fal/AuraFlow-v0.3", transformer=transformer, torch_dtype=self.torch_dtype + ) + pipe.enable_model_cpu_offload() + + prompt = "a pony holding a sign that says hello" + output = pipe( + prompt=prompt, num_inference_steps=2, generator=torch.Generator("cpu").manual_seed(0), output_type="np" + ).images[0] + output_slice = output[:3, :3, :].flatten() + expected_slice = np.array( + [ + 0.46484375, + 0.546875, + 0.64453125, + 0.48242188, + 0.53515625, + 0.59765625, + 0.47070312, + 0.5078125, + 0.5703125, + 0.42773438, + 0.50390625, + 0.5703125, + 0.47070312, + 0.515625, + 0.57421875, + 0.45898438, + 0.48632812, + 0.53515625, + 0.4453125, + 0.5078125, + 0.56640625, + 0.47851562, + 0.5234375, + 0.57421875, + 0.48632812, + 0.5234375, + 0.56640625, + ] + ) + max_diff = numpy_cosine_similarity_distance(expected_slice, output_slice) + assert max_diff < 1e-4 + + +@require_peft_backend +@nightly +@require_big_accelerator +@require_accelerate +@require_gguf_version_greater_or_equal("0.10.0") +class FluxControlLoRAGGUFTests(unittest.TestCase): + def test_lora_loading(self): + ckpt_path = "https://huggingface.co/city96/FLUX.1-dev-gguf/blob/main/flux1-dev-Q2_K.gguf" + transformer = FluxTransformer2DModel.from_single_file( + ckpt_path, + quantization_config=GGUFQuantizationConfig(compute_dtype=torch.bfloat16), + torch_dtype=torch.bfloat16, + ) + pipe = FluxControlPipeline.from_pretrained( + "black-forest-labs/FLUX.1-dev", + transformer=transformer, + torch_dtype=torch.bfloat16, + ).to(torch_device) + pipe.load_lora_weights("black-forest-labs/FLUX.1-Canny-dev-lora") + + prompt = "A robot made of exotic candies and chocolates of different kinds. The background is filled with confetti and celebratory gifts." + control_image = load_image( + "https://huggingface.co/datasets/sayakpaul/sample-datasets/resolve/main/control_image_robot_canny.png" + ) + + output = pipe( + prompt=prompt, + control_image=control_image, + height=256, + width=256, + num_inference_steps=10, + guidance_scale=30.0, + output_type="np", + generator=torch.manual_seed(0), + ).images + + out_slice = output[0, -3:, -3:, -1].flatten() + expected_slice = np.array([0.8047, 0.8359, 0.8711, 0.6875, 0.7070, 0.7383, 0.5469, 0.5820, 0.6641]) + + max_diff = numpy_cosine_similarity_distance(expected_slice, out_slice) + self.assertTrue(max_diff < 1e-3) + + +class HiDreamGGUFSingleFileTests(GGUFSingleFileTesterMixin, unittest.TestCase): + ckpt_path = "https://huggingface.co/city96/HiDream-I1-Dev-gguf/blob/main/hidream-i1-dev-Q2_K.gguf" + torch_dtype = torch.bfloat16 + model_cls = HiDreamImageTransformer2DModel + expected_memory_use_in_gb = 8 + + def get_dummy_inputs(self): + return { + "hidden_states": torch.randn((1, 16, 128, 128), generator=torch.Generator("cpu").manual_seed(0)).to( + torch_device, self.torch_dtype + ), + "encoder_hidden_states_t5": torch.randn( + (1, 128, 4096), + generator=torch.Generator("cpu").manual_seed(0), + ).to(torch_device, self.torch_dtype), + "encoder_hidden_states_llama3": torch.randn( + (32, 1, 128, 4096), + generator=torch.Generator("cpu").manual_seed(0), + ).to(torch_device, self.torch_dtype), + "pooled_embeds": torch.randn( + (1, 2048), + generator=torch.Generator("cpu").manual_seed(0), + ).to(torch_device, self.torch_dtype), + "timesteps": torch.tensor([1]).to(torch_device, self.torch_dtype), + } + + +class WanGGUFTexttoVideoSingleFileTests(GGUFSingleFileTesterMixin, unittest.TestCase): + ckpt_path = "https://huggingface.co/city96/Wan2.1-T2V-14B-gguf/blob/main/wan2.1-t2v-14b-Q3_K_S.gguf" + torch_dtype = torch.bfloat16 + model_cls = WanTransformer3DModel + expected_memory_use_in_gb = 9 + + def get_dummy_inputs(self): + return { + "hidden_states": torch.randn((1, 16, 2, 64, 64), generator=torch.Generator("cpu").manual_seed(0)).to( + torch_device, self.torch_dtype + ), + "encoder_hidden_states": torch.randn( + (1, 512, 4096), + generator=torch.Generator("cpu").manual_seed(0), + ).to(torch_device, self.torch_dtype), + "timestep": torch.tensor([1]).to(torch_device, self.torch_dtype), + } + + +class WanGGUFImagetoVideoSingleFileTests(GGUFSingleFileTesterMixin, unittest.TestCase): + ckpt_path = "https://huggingface.co/city96/Wan2.1-I2V-14B-480P-gguf/blob/main/wan2.1-i2v-14b-480p-Q3_K_S.gguf" + torch_dtype = torch.bfloat16 + model_cls = WanTransformer3DModel + expected_memory_use_in_gb = 9 + + def get_dummy_inputs(self): + return { + "hidden_states": torch.randn((1, 36, 2, 64, 64), generator=torch.Generator("cpu").manual_seed(0)).to( + torch_device, self.torch_dtype + ), + "encoder_hidden_states": torch.randn( + (1, 512, 4096), + generator=torch.Generator("cpu").manual_seed(0), + ).to(torch_device, self.torch_dtype), + "encoder_hidden_states_image": torch.randn( + (1, 257, 1280), generator=torch.Generator("cpu").manual_seed(0) + ).to(torch_device, self.torch_dtype), + "timestep": torch.tensor([1]).to(torch_device, self.torch_dtype), + } + + +class WanVACEGGUFSingleFileTests(GGUFSingleFileTesterMixin, unittest.TestCase): + ckpt_path = "https://huggingface.co/QuantStack/Wan2.1_14B_VACE-GGUF/blob/main/Wan2.1_14B_VACE-Q3_K_S.gguf" + torch_dtype = torch.bfloat16 + model_cls = WanVACETransformer3DModel + expected_memory_use_in_gb = 9 + + def get_dummy_inputs(self): + return { + "hidden_states": torch.randn((1, 16, 2, 64, 64), generator=torch.Generator("cpu").manual_seed(0)).to( + torch_device, self.torch_dtype + ), + "encoder_hidden_states": torch.randn( + (1, 512, 4096), + generator=torch.Generator("cpu").manual_seed(0), + ).to(torch_device, self.torch_dtype), + "control_hidden_states": torch.randn( + (1, 96, 2, 64, 64), + generator=torch.Generator("cpu").manual_seed(0), + ).to(torch_device, self.torch_dtype), + "control_hidden_states_scale": torch.randn( + (8,), + generator=torch.Generator("cpu").manual_seed(0), + ).to(torch_device, self.torch_dtype), + "timestep": torch.tensor([1]).to(torch_device, self.torch_dtype), + } + + +class WanS2VGGUFSingleFileTests(GGUFSingleFileTesterMixin, unittest.TestCase): + ckpt_path = "https://huggingface.co/QuantStack/Wan2.2-S2V-14B-GGUF/blob/main/Wan2.2-S2V-14B-Q3_K_S.gguf" + torch_dtype = torch.bfloat16 + model_cls = WanS2VTransformer3DModel + expected_memory_use_in_gb = 9 + + def get_dummy_inputs(self): + return { + "hidden_states": torch.randn((1, 16, 2, 64, 64), generator=torch.Generator("cpu").manual_seed(0)).to( + torch_device, self.torch_dtype + ), + "encoder_hidden_states": torch.randn( + (1, 512, 4096), + generator=torch.Generator("cpu").manual_seed(0), + ).to(torch_device, self.torch_dtype), + "control_hidden_states": torch.randn( + (1, 96, 2, 64, 64), + generator=torch.Generator("cpu").manual_seed(0), + ).to(torch_device, self.torch_dtype), + "control_hidden_states_scale": torch.randn( + (8,), + generator=torch.Generator("cpu").manual_seed(0), + ).to(torch_device, self.torch_dtype), + "timestep": torch.tensor([1]).to(torch_device, self.torch_dtype), + } + + +@require_torch_version_greater("2.7.1") +class GGUFCompileTests(QuantCompileTests, unittest.TestCase): + torch_dtype = torch.bfloat16 + gguf_ckpt = "https://huggingface.co/city96/FLUX.1-dev-gguf/blob/main/flux1-dev-Q2_K.gguf" + + @property + def quantization_config(self): + return GGUFQuantizationConfig(compute_dtype=self.torch_dtype) + + def _init_pipeline(self, *args, **kwargs): + transformer = FluxTransformer2DModel.from_single_file( + self.gguf_ckpt, quantization_config=self.quantization_config, torch_dtype=self.torch_dtype + ) + pipe = DiffusionPipeline.from_pretrained( + "black-forest-labs/FLUX.1-dev", transformer=transformer, torch_dtype=self.torch_dtype + ) + return pipe diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/quantization/quanto/__init__.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/quantization/quanto/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/quantization/quanto/test_quanto.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/quantization/quanto/test_quanto.py new file mode 100644 index 0000000000000000000000000000000000000000..28555a6076b88d236f959e3eff7c91ae02e6607e --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/quantization/quanto/test_quanto.py @@ -0,0 +1,335 @@ +import gc +import tempfile +import unittest + +from diffusers import FluxPipeline, FluxTransformer2DModel, QuantoConfig +from diffusers.models.attention_processor import Attention +from diffusers.utils import is_optimum_quanto_available, is_torch_available + +from ...testing_utils import ( + backend_empty_cache, + backend_reset_peak_memory_stats, + enable_full_determinism, + nightly, + numpy_cosine_similarity_distance, + require_accelerate, + require_big_accelerator, + require_torch_cuda_compatibility, + torch_device, +) + + +if is_optimum_quanto_available(): + from optimum.quanto import QLinear + +if is_torch_available(): + import torch + + from ..utils import LoRALayer, get_memory_consumption_stat + +enable_full_determinism() + + +@nightly +@require_big_accelerator +@require_accelerate +class QuantoBaseTesterMixin: + model_id = None + pipeline_model_id = None + model_cls = None + torch_dtype = torch.bfloat16 + # the expected reduction in peak memory used compared to an unquantized model expressed as a percentage + expected_memory_reduction = 0.0 + keep_in_fp32_module = "" + modules_to_not_convert = "" + _test_torch_compile = False + + def setUp(self): + backend_reset_peak_memory_stats(torch_device) + backend_empty_cache(torch_device) + gc.collect() + + def tearDown(self): + backend_reset_peak_memory_stats(torch_device) + backend_empty_cache(torch_device) + gc.collect() + + def get_dummy_init_kwargs(self): + return {"weights_dtype": "float8"} + + def get_dummy_model_init_kwargs(self): + return { + "pretrained_model_name_or_path": self.model_id, + "torch_dtype": self.torch_dtype, + "quantization_config": QuantoConfig(**self.get_dummy_init_kwargs()), + } + + def test_quanto_layers(self): + model = self.model_cls.from_pretrained(**self.get_dummy_model_init_kwargs()) + for name, module in model.named_modules(): + if isinstance(module, torch.nn.Linear): + assert isinstance(module, QLinear) + + def test_quanto_memory_usage(self): + inputs = self.get_dummy_inputs() + inputs = { + k: v.to(device=torch_device, dtype=torch.bfloat16) for k, v in inputs.items() if not isinstance(v, bool) + } + + unquantized_model = self.model_cls.from_pretrained(self.model_id, torch_dtype=self.torch_dtype) + unquantized_model.to(torch_device) + unquantized_model_memory = get_memory_consumption_stat(unquantized_model, inputs) + + quantized_model = self.model_cls.from_pretrained(**self.get_dummy_model_init_kwargs()) + quantized_model.to(torch_device) + quantized_model_memory = get_memory_consumption_stat(quantized_model, inputs) + + assert unquantized_model_memory / quantized_model_memory >= self.expected_memory_reduction + + def test_keep_modules_in_fp32(self): + r""" + A simple tests to check if the modules under `_keep_in_fp32_modules` are kept in fp32. + Also ensures if inference works. + """ + _keep_in_fp32_modules = self.model_cls._keep_in_fp32_modules + self.model_cls._keep_in_fp32_modules = self.keep_in_fp32_module + + model = self.model_cls.from_pretrained(**self.get_dummy_model_init_kwargs()) + model.to(torch_device) + + for name, module in model.named_modules(): + if isinstance(module, torch.nn.Linear): + if name in model._keep_in_fp32_modules: + assert module.weight.dtype == torch.float32 + self.model_cls._keep_in_fp32_modules = _keep_in_fp32_modules + + def test_modules_to_not_convert(self): + init_kwargs = self.get_dummy_model_init_kwargs() + + quantization_config_kwargs = self.get_dummy_init_kwargs() + quantization_config_kwargs.update({"modules_to_not_convert": self.modules_to_not_convert}) + quantization_config = QuantoConfig(**quantization_config_kwargs) + + init_kwargs.update({"quantization_config": quantization_config}) + + model = self.model_cls.from_pretrained(**init_kwargs) + model.to(torch_device) + + for name, module in model.named_modules(): + if name in self.modules_to_not_convert: + assert not isinstance(module, QLinear) + + def test_dtype_assignment(self): + model = self.model_cls.from_pretrained(**self.get_dummy_model_init_kwargs()) + + with self.assertRaises(ValueError): + # Tries with a `dtype` + model.to(torch.float16) + + with self.assertRaises(ValueError): + # Tries with a `device` and `dtype` + device_0 = f"{torch_device}:0" + model.to(device=device_0, dtype=torch.float16) + + with self.assertRaises(ValueError): + # Tries with a cast + model.float() + + with self.assertRaises(ValueError): + # Tries with a cast + model.half() + + # This should work + model.to(torch_device) + + def test_serialization(self): + model = self.model_cls.from_pretrained(**self.get_dummy_model_init_kwargs()) + inputs = self.get_dummy_inputs() + + model.to(torch_device) + with torch.no_grad(): + model_output = model(**inputs) + + with tempfile.TemporaryDirectory() as tmp_dir: + model.save_pretrained(tmp_dir) + saved_model = self.model_cls.from_pretrained( + tmp_dir, + torch_dtype=torch.bfloat16, + ) + + saved_model.to(torch_device) + with torch.no_grad(): + saved_model_output = saved_model(**inputs) + + assert torch.allclose(model_output.sample, saved_model_output.sample, rtol=1e-5, atol=1e-5) + + def test_torch_compile(self): + if not self._test_torch_compile: + return + + model = self.model_cls.from_pretrained(**self.get_dummy_model_init_kwargs()) + compiled_model = torch.compile(model, mode="max-autotune", fullgraph=True, dynamic=False) + + model.to(torch_device) + with torch.no_grad(): + model_output = model(**self.get_dummy_inputs()).sample + + compiled_model.to(torch_device) + with torch.no_grad(): + compiled_model_output = compiled_model(**self.get_dummy_inputs()).sample + + model_output = model_output.detach().float().cpu().numpy() + compiled_model_output = compiled_model_output.detach().float().cpu().numpy() + + max_diff = numpy_cosine_similarity_distance(model_output.flatten(), compiled_model_output.flatten()) + assert max_diff < 1e-3 + + def test_device_map_error(self): + with self.assertRaises(ValueError): + _ = self.model_cls.from_pretrained( + **self.get_dummy_model_init_kwargs(), device_map={0: "8GB", "cpu": "16GB"} + ) + + +class FluxTransformerQuantoMixin(QuantoBaseTesterMixin): + model_id = "hf-internal-testing/tiny-flux-transformer" + model_cls = FluxTransformer2DModel + pipeline_cls = FluxPipeline + torch_dtype = torch.bfloat16 + keep_in_fp32_module = "proj_out" + modules_to_not_convert = ["proj_out"] + _test_torch_compile = False + + def get_dummy_inputs(self): + return { + "hidden_states": torch.randn((1, 4096, 64), generator=torch.Generator("cpu").manual_seed(0)).to( + torch_device, self.torch_dtype + ), + "encoder_hidden_states": torch.randn( + (1, 512, 4096), + generator=torch.Generator("cpu").manual_seed(0), + ).to(torch_device, self.torch_dtype), + "pooled_projections": torch.randn( + (1, 768), + generator=torch.Generator("cpu").manual_seed(0), + ).to(torch_device, self.torch_dtype), + "timestep": torch.tensor([1]).to(torch_device, self.torch_dtype), + "img_ids": torch.randn((4096, 3), generator=torch.Generator("cpu").manual_seed(0)).to( + torch_device, self.torch_dtype + ), + "txt_ids": torch.randn((512, 3), generator=torch.Generator("cpu").manual_seed(0)).to( + torch_device, self.torch_dtype + ), + "guidance": torch.tensor([3.5]).to(torch_device, self.torch_dtype), + } + + def get_dummy_training_inputs(self, device=None, seed: int = 0): + batch_size = 1 + num_latent_channels = 4 + num_image_channels = 3 + height = width = 4 + sequence_length = 48 + embedding_dim = 32 + + torch.manual_seed(seed) + hidden_states = torch.randn((batch_size, height * width, num_latent_channels)).to(device, dtype=torch.bfloat16) + + torch.manual_seed(seed) + encoder_hidden_states = torch.randn((batch_size, sequence_length, embedding_dim)).to( + device, dtype=torch.bfloat16 + ) + + torch.manual_seed(seed) + pooled_prompt_embeds = torch.randn((batch_size, embedding_dim)).to(device, dtype=torch.bfloat16) + + torch.manual_seed(seed) + text_ids = torch.randn((sequence_length, num_image_channels)).to(device, dtype=torch.bfloat16) + + torch.manual_seed(seed) + image_ids = torch.randn((height * width, num_image_channels)).to(device, dtype=torch.bfloat16) + + timestep = torch.tensor([1.0]).to(device, dtype=torch.bfloat16).expand(batch_size) + + return { + "hidden_states": hidden_states, + "encoder_hidden_states": encoder_hidden_states, + "pooled_projections": pooled_prompt_embeds, + "txt_ids": text_ids, + "img_ids": image_ids, + "timestep": timestep, + } + + def test_model_cpu_offload(self): + init_kwargs = self.get_dummy_init_kwargs() + transformer = self.model_cls.from_pretrained( + "hf-internal-testing/tiny-flux-pipe", + quantization_config=QuantoConfig(**init_kwargs), + subfolder="transformer", + torch_dtype=torch.bfloat16, + ) + pipe = self.pipeline_cls.from_pretrained( + "hf-internal-testing/tiny-flux-pipe", transformer=transformer, torch_dtype=torch.bfloat16 + ) + pipe.enable_model_cpu_offload(device=torch_device) + _ = pipe("a cat holding a sign that says hello", num_inference_steps=2) + + def test_training(self): + quantization_config = QuantoConfig(**self.get_dummy_init_kwargs()) + quantized_model = self.model_cls.from_pretrained( + "hf-internal-testing/tiny-flux-pipe", + subfolder="transformer", + quantization_config=quantization_config, + torch_dtype=torch.bfloat16, + ).to(torch_device) + + for param in quantized_model.parameters(): + # freeze the model as only adapter layers will be trained + param.requires_grad = False + if param.ndim == 1: + param.data = param.data.to(torch.float32) + + for _, module in quantized_model.named_modules(): + if isinstance(module, Attention): + module.to_q = LoRALayer(module.to_q, rank=4) + module.to_k = LoRALayer(module.to_k, rank=4) + module.to_v = LoRALayer(module.to_v, rank=4) + + with torch.amp.autocast(str(torch_device), dtype=torch.bfloat16): + inputs = self.get_dummy_training_inputs(torch_device) + output = quantized_model(**inputs)[0] + output.norm().backward() + + for module in quantized_model.modules(): + if isinstance(module, LoRALayer): + self.assertTrue(module.adapter[1].weight.grad is not None) + + +class FluxTransformerFloat8WeightsTest(FluxTransformerQuantoMixin, unittest.TestCase): + expected_memory_reduction = 0.6 + + def get_dummy_init_kwargs(self): + return {"weights_dtype": "float8"} + + +class FluxTransformerInt8WeightsTest(FluxTransformerQuantoMixin, unittest.TestCase): + expected_memory_reduction = 0.6 + _test_torch_compile = True + + def get_dummy_init_kwargs(self): + return {"weights_dtype": "int8"} + + +@require_torch_cuda_compatibility(8.0) +class FluxTransformerInt4WeightsTest(FluxTransformerQuantoMixin, unittest.TestCase): + expected_memory_reduction = 0.55 + + def get_dummy_init_kwargs(self): + return {"weights_dtype": "int4"} + + +@require_torch_cuda_compatibility(8.0) +class FluxTransformerInt2WeightsTest(FluxTransformerQuantoMixin, unittest.TestCase): + expected_memory_reduction = 0.65 + + def get_dummy_init_kwargs(self): + return {"weights_dtype": "int2"} diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/quantization/test_pipeline_level_quantization.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/quantization/test_pipeline_level_quantization.py new file mode 100644 index 0000000000000000000000000000000000000000..51cf4057d64eda7a7c77ecbf8da24cb1981ce3f0 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/quantization/test_pipeline_level_quantization.py @@ -0,0 +1,301 @@ +# coding=utf-8 +# Copyright 2025 The HuggingFace Team Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a clone of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import json +import tempfile +import unittest + +import torch +from parameterized import parameterized + +from diffusers import BitsAndBytesConfig, DiffusionPipeline, QuantoConfig +from diffusers.quantizers import PipelineQuantizationConfig +from diffusers.utils import logging + +from ..testing_utils import ( + CaptureLogger, + is_transformers_available, + require_accelerate, + require_bitsandbytes_version_greater, + require_quanto, + require_torch, + require_torch_accelerator, + slow, + torch_device, +) + + +if is_transformers_available(): + from transformers import BitsAndBytesConfig as TranBitsAndBytesConfig +else: + TranBitsAndBytesConfig = None + + +@require_bitsandbytes_version_greater("0.43.2") +@require_quanto +@require_accelerate +@require_torch +@require_torch_accelerator +@slow +class PipelineQuantizationTests(unittest.TestCase): + model_name = "hf-internal-testing/tiny-flux-pipe" + prompt = "a beautiful sunset amidst the mountains." + num_inference_steps = 10 + seed = 0 + + def test_quant_config_set_correctly_through_kwargs(self): + components_to_quantize = ["transformer", "text_encoder_2"] + quant_config = PipelineQuantizationConfig( + quant_backend="bitsandbytes_4bit", + quant_kwargs={ + "load_in_4bit": True, + "bnb_4bit_quant_type": "nf4", + "bnb_4bit_compute_dtype": torch.bfloat16, + }, + components_to_quantize=components_to_quantize, + ) + pipe = DiffusionPipeline.from_pretrained( + self.model_name, + quantization_config=quant_config, + torch_dtype=torch.bfloat16, + ).to(torch_device) + for name, component in pipe.components.items(): + if name in components_to_quantize: + self.assertTrue(getattr(component.config, "quantization_config", None) is not None) + quantization_config = component.config.quantization_config + self.assertTrue(quantization_config.load_in_4bit) + self.assertTrue(quantization_config.quant_method == "bitsandbytes") + + _ = pipe(self.prompt, num_inference_steps=self.num_inference_steps) + + def test_quant_config_set_correctly_through_granular(self): + quant_config = PipelineQuantizationConfig( + quant_mapping={ + "transformer": QuantoConfig(weights_dtype="int8"), + "text_encoder_2": TranBitsAndBytesConfig(load_in_4bit=True, compute_dtype=torch.bfloat16), + } + ) + components_to_quantize = list(quant_config.quant_mapping.keys()) + pipe = DiffusionPipeline.from_pretrained( + self.model_name, + quantization_config=quant_config, + torch_dtype=torch.bfloat16, + ).to(torch_device) + for name, component in pipe.components.items(): + if name in components_to_quantize: + self.assertTrue(getattr(component.config, "quantization_config", None) is not None) + quantization_config = component.config.quantization_config + + if name == "text_encoder_2": + self.assertTrue(quantization_config.load_in_4bit) + self.assertTrue(quantization_config.quant_method == "bitsandbytes") + else: + self.assertTrue(quantization_config.quant_method == "quanto") + + _ = pipe(self.prompt, num_inference_steps=self.num_inference_steps) + + def test_raises_error_for_invalid_config(self): + with self.assertRaises(ValueError) as err_context: + _ = PipelineQuantizationConfig( + quant_mapping={ + "transformer": QuantoConfig(weights_dtype="int8"), + "text_encoder_2": TranBitsAndBytesConfig(load_in_4bit=True, compute_dtype=torch.bfloat16), + }, + quant_backend="bitsandbytes_4bit", + ) + + self.assertTrue( + str(err_context.exception) + == "Both `quant_backend` and `quant_mapping` cannot be specified at the same time." + ) + + def test_validation_for_kwargs(self): + components_to_quantize = ["transformer", "text_encoder_2"] + with self.assertRaises(ValueError) as err_context: + _ = PipelineQuantizationConfig( + quant_backend="quanto", + quant_kwargs={"weights_dtype": "int8"}, + components_to_quantize=components_to_quantize, + ) + + self.assertTrue( + "The signatures of the __init__ methods of the quantization config classes" in str(err_context.exception) + ) + + def test_raises_error_for_wrong_config_class(self): + quant_config = { + "transformer": QuantoConfig(weights_dtype="int8"), + "text_encoder_2": TranBitsAndBytesConfig(load_in_4bit=True, compute_dtype=torch.bfloat16), + } + with self.assertRaises(ValueError) as err_context: + _ = DiffusionPipeline.from_pretrained( + self.model_name, + quantization_config=quant_config, + torch_dtype=torch.bfloat16, + ) + self.assertTrue( + str(err_context.exception) == "`quantization_config` must be an instance of `PipelineQuantizationConfig`." + ) + + def test_validation_for_mapping(self): + with self.assertRaises(ValueError) as err_context: + _ = PipelineQuantizationConfig( + quant_mapping={ + "transformer": DiffusionPipeline(), + "text_encoder_2": TranBitsAndBytesConfig(load_in_4bit=True, compute_dtype=torch.bfloat16), + } + ) + + self.assertTrue("Provided config for module_name=transformer could not be found" in str(err_context.exception)) + + def test_saving_loading(self): + quant_config = PipelineQuantizationConfig( + quant_mapping={ + "transformer": QuantoConfig(weights_dtype="int8"), + "text_encoder_2": TranBitsAndBytesConfig(load_in_4bit=True, compute_dtype=torch.bfloat16), + } + ) + components_to_quantize = list(quant_config.quant_mapping.keys()) + pipe = DiffusionPipeline.from_pretrained( + self.model_name, + quantization_config=quant_config, + torch_dtype=torch.bfloat16, + ).to(torch_device) + + pipe_inputs = {"prompt": self.prompt, "num_inference_steps": self.num_inference_steps, "output_type": "latent"} + output_1 = pipe(**pipe_inputs, generator=torch.manual_seed(self.seed)).images + + with tempfile.TemporaryDirectory() as tmpdir: + pipe.save_pretrained(tmpdir) + loaded_pipe = DiffusionPipeline.from_pretrained(tmpdir, torch_dtype=torch.bfloat16).to(torch_device) + for name, component in loaded_pipe.components.items(): + if name in components_to_quantize: + self.assertTrue(getattr(component.config, "quantization_config", None) is not None) + quantization_config = component.config.quantization_config + + if name == "text_encoder_2": + self.assertTrue(quantization_config.load_in_4bit) + self.assertTrue(quantization_config.quant_method == "bitsandbytes") + else: + self.assertTrue(quantization_config.quant_method == "quanto") + + output_2 = loaded_pipe(**pipe_inputs, generator=torch.manual_seed(self.seed)).images + + self.assertTrue(torch.allclose(output_1, output_2)) + + @parameterized.expand(["quant_kwargs", "quant_mapping"]) + def test_warn_invalid_component(self, method): + invalid_component = "foo" + if method == "quant_kwargs": + components_to_quantize = ["transformer", invalid_component] + quant_config = PipelineQuantizationConfig( + quant_backend="bitsandbytes_8bit", + quant_kwargs={"load_in_8bit": True}, + components_to_quantize=components_to_quantize, + ) + else: + quant_config = PipelineQuantizationConfig( + quant_mapping={ + "transformer": QuantoConfig("int8"), + invalid_component: TranBitsAndBytesConfig(load_in_8bit=True), + } + ) + + logger = logging.get_logger("diffusers.pipelines.pipeline_loading_utils") + logger.setLevel(logging.WARNING) + with CaptureLogger(logger) as cap_logger: + _ = DiffusionPipeline.from_pretrained( + self.model_name, + quantization_config=quant_config, + torch_dtype=torch.bfloat16, + ) + self.assertTrue(invalid_component in cap_logger.out) + + @parameterized.expand(["quant_kwargs", "quant_mapping"]) + def test_no_quantization_for_all_invalid_components(self, method): + invalid_component = "foo" + if method == "quant_kwargs": + components_to_quantize = [invalid_component] + quant_config = PipelineQuantizationConfig( + quant_backend="bitsandbytes_8bit", + quant_kwargs={"load_in_8bit": True}, + components_to_quantize=components_to_quantize, + ) + else: + quant_config = PipelineQuantizationConfig( + quant_mapping={invalid_component: TranBitsAndBytesConfig(load_in_8bit=True)} + ) + + pipe = DiffusionPipeline.from_pretrained( + self.model_name, + quantization_config=quant_config, + torch_dtype=torch.bfloat16, + ) + for name, component in pipe.components.items(): + if isinstance(component, torch.nn.Module): + self.assertTrue(not hasattr(component.config, "quantization_config")) + + @parameterized.expand(["quant_kwargs", "quant_mapping"]) + def test_quant_config_repr(self, method): + component_name = "transformer" + if method == "quant_kwargs": + components_to_quantize = [component_name] + quant_config = PipelineQuantizationConfig( + quant_backend="bitsandbytes_8bit", + quant_kwargs={"load_in_8bit": True}, + components_to_quantize=components_to_quantize, + ) + else: + quant_config = PipelineQuantizationConfig( + quant_mapping={component_name: BitsAndBytesConfig(load_in_8bit=True)} + ) + + pipe = DiffusionPipeline.from_pretrained( + self.model_name, + quantization_config=quant_config, + torch_dtype=torch.bfloat16, + ) + self.assertTrue(getattr(pipe, "quantization_config", None) is not None) + retrieved_config = pipe.quantization_config + expected_config = """ +transformer BitsAndBytesConfig { + "_load_in_4bit": false, + "_load_in_8bit": true, + "bnb_4bit_compute_dtype": "float32", + "bnb_4bit_quant_storage": "uint8", + "bnb_4bit_quant_type": "fp4", + "bnb_4bit_use_double_quant": false, + "llm_int8_enable_fp32_cpu_offload": false, + "llm_int8_has_fp16_weight": false, + "llm_int8_skip_modules": null, + "llm_int8_threshold": 6.0, + "load_in_4bit": false, + "load_in_8bit": true, + "quant_method": "bitsandbytes" +} + +""" + expected_data = self._parse_config_string(expected_config) + actual_data = self._parse_config_string(str(retrieved_config)) + self.assertTrue(actual_data == expected_data) + + def _parse_config_string(self, config_string: str) -> tuple[str, dict]: + first_brace = config_string.find("{") + if first_brace == -1: + raise ValueError("Could not find opening brace '{' in the string.") + + json_part = config_string[first_brace:] + data = json.loads(json_part) + + return data diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/quantization/test_torch_compile_utils.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/quantization/test_torch_compile_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..29758cbdd7352fec06290df2703eff78ee9bf92a --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/quantization/test_torch_compile_utils.py @@ -0,0 +1,106 @@ +# coding=utf-8 +# Copyright 2025 The HuggingFace Team Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a clone of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import gc +import inspect + +import torch + +from diffusers import DiffusionPipeline + +from ..testing_utils import backend_empty_cache, require_torch_accelerator, slow, torch_device + + +@require_torch_accelerator +@slow +class QuantCompileTests: + @property + def quantization_config(self): + raise NotImplementedError( + "This property should be implemented in the subclass to return the appropriate quantization config." + ) + + def setUp(self): + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + torch.compiler.reset() + + def tearDown(self): + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + torch.compiler.reset() + + def _init_pipeline(self, quantization_config, torch_dtype): + pipe = DiffusionPipeline.from_pretrained( + "stabilityai/stable-diffusion-3-medium-diffusers", + quantization_config=quantization_config, + torch_dtype=torch_dtype, + ) + return pipe + + def _test_torch_compile(self, torch_dtype=torch.bfloat16): + pipe = self._init_pipeline(self.quantization_config, torch_dtype).to(torch_device) + # `fullgraph=True` ensures no graph breaks + pipe.transformer.compile(fullgraph=True) + + # small resolutions to ensure speedy execution. + with torch._dynamo.config.patch(error_on_recompile=True): + pipe("a dog", num_inference_steps=2, max_sequence_length=16, height=256, width=256) + + def _test_torch_compile_with_cpu_offload(self, torch_dtype=torch.bfloat16): + pipe = self._init_pipeline(self.quantization_config, torch_dtype) + pipe.enable_model_cpu_offload() + # regional compilation is better for offloading. + # see: https://pytorch.org/blog/torch-compile-and-diffusers-a-hands-on-guide-to-peak-performance/ + if getattr(pipe.transformer, "_repeated_blocks"): + pipe.transformer.compile_repeated_blocks(fullgraph=True) + else: + pipe.transformer.compile() + + # small resolutions to ensure speedy execution. + pipe("a dog", num_inference_steps=2, max_sequence_length=16, height=256, width=256) + + def _test_torch_compile_with_group_offload_leaf(self, torch_dtype=torch.bfloat16, *, use_stream: bool = False): + torch._dynamo.config.cache_size_limit = 1000 + + pipe = self._init_pipeline(self.quantization_config, torch_dtype) + group_offload_kwargs = { + "onload_device": torch.device(torch_device), + "offload_device": torch.device("cpu"), + "offload_type": "leaf_level", + "use_stream": use_stream, + } + pipe.transformer.enable_group_offload(**group_offload_kwargs) + pipe.transformer.compile() + for name, component in pipe.components.items(): + if name != "transformer" and isinstance(component, torch.nn.Module): + if torch.device(component.device).type == "cpu": + component.to(torch_device) + + # small resolutions to ensure speedy execution. + pipe("a dog", num_inference_steps=2, max_sequence_length=16, height=256, width=256) + + def test_torch_compile(self): + self._test_torch_compile() + + def test_torch_compile_with_cpu_offload(self): + self._test_torch_compile_with_cpu_offload() + + def test_torch_compile_with_group_offload_leaf(self, use_stream=False): + for cls in inspect.getmro(self.__class__): + if "test_torch_compile_with_group_offload_leaf" in cls.__dict__ and cls is not QuantCompileTests: + return + self._test_torch_compile_with_group_offload_leaf(use_stream=use_stream) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/quantization/torchao/README.md b/exp_code/1_benchmark/diffusers-WanS2V/tests/quantization/torchao/README.md new file mode 100644 index 0000000000000000000000000000000000000000..fadc529e12fc0876ca079f58aa90518823b88474 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/quantization/torchao/README.md @@ -0,0 +1,53 @@ +The tests here are adapted from [`transformers` tests](https://github.com/huggingface/transformers/blob/3a8eb74668e9c2cc563b2f5c62fac174797063e0/tests/quantization/torchao_integration/). + +The benchmarks were run on a single H100. Below is `nvidia-smi`: + +```bash ++---------------------------------------------------------------------------------------+ +| NVIDIA-SMI 535.104.12 Driver Version: 535.104.12 CUDA Version: 12.2 | +|-----------------------------------------+----------------------+----------------------+ +| GPU Name Persistence-M | Bus-Id Disp.A | Volatile Uncorr. ECC | +| Fan Temp Perf Pwr:Usage/Cap | Memory-Usage | GPU-Util Compute M. | +| | | MIG M. | +|=========================================+======================+======================| +| 0 NVIDIA H100 80GB HBM3 On | 00000000:53:00.0 Off | 0 | +| N/A 34C P0 69W / 700W | 2MiB / 81559MiB | 0% Default | +| | | Disabled | ++-----------------------------------------+----------------------+----------------------+ + ++---------------------------------------------------------------------------------------+ +| Processes: | +| GPU GI CI PID Type Process name GPU Memory | +| ID ID Usage | +|=======================================================================================| +| No running processes found | ++---------------------------------------------------------------------------------------+ +``` + +The benchmark results for Flux and CogVideoX can be found in [this](https://github.com/huggingface/diffusers/pull/10009) PR. + +The tests, and the expected slices, were obtained from the `aws-g6e-xlarge-plus` GPU test runners. To run the slow tests, use the following command or an equivalent: + +```bash +HF_HUB_ENABLE_HF_TRANSFER=1 RUN_SLOW=1 pytest -s tests/quantization/torchao/test_torchao.py::SlowTorchAoTests +``` + +`diffusers-cli`: + +```bash +- 🤗 Diffusers version: 0.32.0.dev0 +- Platform: Linux-5.15.0-1049-aws-x86_64-with-glibc2.31 +- Running on Google Colab?: No +- Python version: 3.10.14 +- PyTorch version (GPU?): 2.6.0.dev20241112+cu121 (False) +- Flax version (CPU?/GPU?/TPU?): not installed (NA) +- Jax version: not installed +- JaxLib version: not installed +- Huggingface_hub version: 0.26.2 +- Transformers version: 4.46.3 +- Accelerate version: 1.1.1 +- PEFT version: not installed +- Bitsandbytes version: not installed +- Safetensors version: 0.4.5 +- xFormers version: not installed +``` diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/quantization/torchao/__init__.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/quantization/torchao/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/quantization/torchao/test_torchao.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/quantization/torchao/test_torchao.py new file mode 100644 index 0000000000000000000000000000000000000000..920c3a55f56cd64b31301462a192b2580b96dcad --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/quantization/torchao/test_torchao.py @@ -0,0 +1,890 @@ +# coding=utf-8 +# Copyright 2025 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import gc +import tempfile +import unittest +from typing import List + +import numpy as np +from parameterized import parameterized +from transformers import AutoTokenizer, CLIPTextModel, CLIPTokenizer, T5EncoderModel + +from diffusers import ( + AutoencoderKL, + FlowMatchEulerDiscreteScheduler, + FluxPipeline, + FluxTransformer2DModel, + TorchAoConfig, +) +from diffusers.models.attention_processor import Attention +from diffusers.quantizers import PipelineQuantizationConfig + +from ...testing_utils import ( + backend_empty_cache, + backend_synchronize, + enable_full_determinism, + is_torch_available, + is_torchao_available, + nightly, + numpy_cosine_similarity_distance, + require_torch, + require_torch_accelerator, + require_torchao_version_greater_or_equal, + slow, + torch_device, +) +from ..test_torch_compile_utils import QuantCompileTests + + +enable_full_determinism() + + +if is_torch_available(): + import torch + import torch.nn as nn + + from ..utils import LoRALayer, get_memory_consumption_stat + + +if is_torchao_available(): + from torchao.dtypes import AffineQuantizedTensor + from torchao.quantization.linear_activation_quantized_tensor import LinearActivationQuantizedTensor + from torchao.quantization.quant_primitives import MappingType + from torchao.utils import get_model_size_in_bytes + + +@require_torch +@require_torch_accelerator +@require_torchao_version_greater_or_equal("0.7.0") +class TorchAoConfigTest(unittest.TestCase): + def test_to_dict(self): + """ + Makes sure the config format is properly set + """ + quantization_config = TorchAoConfig("int4_weight_only") + torchao_orig_config = quantization_config.to_dict() + + for key in torchao_orig_config: + self.assertEqual(getattr(quantization_config, key), torchao_orig_config[key]) + + def test_post_init_check(self): + """ + Test kwargs validations in TorchAoConfig + """ + _ = TorchAoConfig("int4_weight_only") + with self.assertRaisesRegex(ValueError, "is not supported"): + _ = TorchAoConfig("uint8") + + with self.assertRaisesRegex(ValueError, "does not support the following keyword arguments"): + _ = TorchAoConfig("int4_weight_only", group_size1=32) + + def test_repr(self): + """ + Check that there is no error in the repr + """ + quantization_config = TorchAoConfig("int4_weight_only", modules_to_not_convert=["conv"], group_size=8) + expected_repr = """TorchAoConfig { + "modules_to_not_convert": [ + "conv" + ], + "quant_method": "torchao", + "quant_type": "int4_weight_only", + "quant_type_kwargs": { + "group_size": 8 + } + }""".replace(" ", "").replace("\n", "") + quantization_repr = repr(quantization_config).replace(" ", "").replace("\n", "") + self.assertEqual(quantization_repr, expected_repr) + + quantization_config = TorchAoConfig("int4dq", group_size=64, act_mapping_type=MappingType.SYMMETRIC) + expected_repr = """TorchAoConfig { + "modules_to_not_convert": null, + "quant_method": "torchao", + "quant_type": "int4dq", + "quant_type_kwargs": { + "act_mapping_type": "SYMMETRIC", + "group_size": 64 + } + }""".replace(" ", "").replace("\n", "") + quantization_repr = repr(quantization_config).replace(" ", "").replace("\n", "") + self.assertEqual(quantization_repr, expected_repr) + + +# Slices for these tests have been obtained on our aws-g6e-xlarge-plus runners +@require_torch +@require_torch_accelerator +@require_torchao_version_greater_or_equal("0.7.0") +class TorchAoTest(unittest.TestCase): + def tearDown(self): + gc.collect() + backend_empty_cache(torch_device) + + def get_dummy_components( + self, quantization_config: TorchAoConfig, model_id: str = "hf-internal-testing/tiny-flux-pipe" + ): + transformer = FluxTransformer2DModel.from_pretrained( + model_id, + subfolder="transformer", + quantization_config=quantization_config, + torch_dtype=torch.bfloat16, + ) + text_encoder = CLIPTextModel.from_pretrained(model_id, subfolder="text_encoder", torch_dtype=torch.bfloat16) + text_encoder_2 = T5EncoderModel.from_pretrained( + model_id, subfolder="text_encoder_2", torch_dtype=torch.bfloat16 + ) + tokenizer = CLIPTokenizer.from_pretrained(model_id, subfolder="tokenizer") + tokenizer_2 = AutoTokenizer.from_pretrained(model_id, subfolder="tokenizer_2") + vae = AutoencoderKL.from_pretrained(model_id, subfolder="vae", torch_dtype=torch.bfloat16) + scheduler = FlowMatchEulerDiscreteScheduler() + + return { + "scheduler": scheduler, + "text_encoder": text_encoder, + "text_encoder_2": text_encoder_2, + "tokenizer": tokenizer, + "tokenizer_2": tokenizer_2, + "transformer": transformer, + "vae": vae, + } + + def get_dummy_inputs(self, device: torch.device, seed: int = 0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator().manual_seed(seed) + + inputs = { + "prompt": "an astronaut riding a horse in space", + "height": 32, + "width": 32, + "num_inference_steps": 2, + "output_type": "np", + "generator": generator, + } + + return inputs + + def get_dummy_tensor_inputs(self, device=None, seed: int = 0): + batch_size = 1 + num_latent_channels = 4 + num_image_channels = 3 + height = width = 4 + sequence_length = 48 + embedding_dim = 32 + + torch.manual_seed(seed) + hidden_states = torch.randn((batch_size, height * width, num_latent_channels)).to(device, dtype=torch.bfloat16) + + torch.manual_seed(seed) + encoder_hidden_states = torch.randn((batch_size, sequence_length, embedding_dim)).to( + device, dtype=torch.bfloat16 + ) + + torch.manual_seed(seed) + pooled_prompt_embeds = torch.randn((batch_size, embedding_dim)).to(device, dtype=torch.bfloat16) + + torch.manual_seed(seed) + text_ids = torch.randn((sequence_length, num_image_channels)).to(device, dtype=torch.bfloat16) + + torch.manual_seed(seed) + image_ids = torch.randn((height * width, num_image_channels)).to(device, dtype=torch.bfloat16) + + timestep = torch.tensor([1.0]).to(device, dtype=torch.bfloat16).expand(batch_size) + + return { + "hidden_states": hidden_states, + "encoder_hidden_states": encoder_hidden_states, + "pooled_projections": pooled_prompt_embeds, + "txt_ids": text_ids, + "img_ids": image_ids, + "timestep": timestep, + } + + def _test_quant_type(self, quantization_config: TorchAoConfig, expected_slice: List[float], model_id: str): + components = self.get_dummy_components(quantization_config, model_id) + pipe = FluxPipeline(**components) + pipe.to(device=torch_device) + + inputs = self.get_dummy_inputs(torch_device) + output = pipe(**inputs)[0] + output_slice = output[-1, -1, -3:, -3:].flatten() + + self.assertTrue(np.allclose(output_slice, expected_slice, atol=1e-3, rtol=1e-3)) + + def test_quantization(self): + for model_id in ["hf-internal-testing/tiny-flux-pipe", "hf-internal-testing/tiny-flux-sharded"]: + # fmt: off + QUANTIZATION_TYPES_TO_TEST = [ + ("int4wo", np.array([0.4648, 0.5234, 0.5547, 0.4219, 0.4414, 0.6445, 0.4336, 0.4531, 0.5625])), + ("int4dq", np.array([0.4688, 0.5195, 0.5547, 0.418, 0.4414, 0.6406, 0.4336, 0.4531, 0.5625])), + ("int8wo", np.array([0.4648, 0.5195, 0.5547, 0.4199, 0.4414, 0.6445, 0.4316, 0.4531, 0.5625])), + ("int8dq", np.array([0.4648, 0.5195, 0.5547, 0.4199, 0.4414, 0.6445, 0.4316, 0.4531, 0.5625])), + ("uint4wo", np.array([0.4609, 0.5234, 0.5508, 0.4199, 0.4336, 0.6406, 0.4316, 0.4531, 0.5625])), + ("uint7wo", np.array([0.4648, 0.5195, 0.5547, 0.4219, 0.4414, 0.6445, 0.4316, 0.4531, 0.5625])), + ] + + if TorchAoConfig._is_xpu_or_cuda_capability_atleast_8_9(): + QUANTIZATION_TYPES_TO_TEST.extend([ + ("float8wo_e5m2", np.array([0.4590, 0.5273, 0.5547, 0.4219, 0.4375, 0.6406, 0.4316, 0.4512, 0.5625])), + ("float8wo_e4m3", np.array([0.4648, 0.5234, 0.5547, 0.4219, 0.4414, 0.6406, 0.4316, 0.4531, 0.5625])), + # ===== + # The following lead to an internal torch error: + # RuntimeError: mat2 shape (32x4 must be divisible by 16 + # Skip these for now; TODO(aryan): investigate later + # ("float8dq_e4m3", np.array([0, 0, 0, 0, 0, 0, 0, 0, 0])), + # ("float8dq_e4m3_tensor", np.array([0, 0, 0, 0, 0, 0, 0, 0, 0])), + # ===== + # Cutlass fails to initialize for below + # ("float8dq_e4m3_row", np.array([0, 0, 0, 0, 0, 0, 0, 0, 0])), + # ===== + ("fp4", np.array([0.4668, 0.5195, 0.5547, 0.4199, 0.4434, 0.6445, 0.4316, 0.4531, 0.5625])), + ("fp6", np.array([0.4668, 0.5195, 0.5547, 0.4199, 0.4434, 0.6445, 0.4316, 0.4531, 0.5625])), + ]) + # fmt: on + + for quantization_name, expected_slice in QUANTIZATION_TYPES_TO_TEST: + quant_kwargs = {} + if quantization_name in ["uint4wo", "uint7wo"]: + # The dummy flux model that we use has smaller dimensions. This imposes some restrictions on group_size here + quant_kwargs.update({"group_size": 16}) + quantization_config = TorchAoConfig( + quant_type=quantization_name, modules_to_not_convert=["x_embedder"], **quant_kwargs + ) + self._test_quant_type(quantization_config, expected_slice, model_id) + + def test_int4wo_quant_bfloat16_conversion(self): + """ + Tests whether the dtype of model will be modified to bfloat16 for int4 weight-only quantization. + """ + quantization_config = TorchAoConfig("int4_weight_only", group_size=64) + quantized_model = FluxTransformer2DModel.from_pretrained( + "hf-internal-testing/tiny-flux-pipe", + subfolder="transformer", + quantization_config=quantization_config, + torch_dtype=torch.bfloat16, + device_map=f"{torch_device}:0", + ) + + weight = quantized_model.transformer_blocks[0].ff.net[2].weight + self.assertTrue(isinstance(weight, AffineQuantizedTensor)) + self.assertEqual(weight.quant_min, 0) + self.assertEqual(weight.quant_max, 15) + + def test_device_map(self): + """ + Test if the quantized model int4 weight-only is working properly with "auto" and custom device maps. + The custom device map performs cpu/disk offloading as well. Also verifies that the device map is + correctly set (in the `hf_device_map` attribute of the model). + """ + custom_device_map_dict = { + "time_text_embed": torch_device, + "context_embedder": torch_device, + "x_embedder": torch_device, + "transformer_blocks.0": "cpu", + "single_transformer_blocks.0": "disk", + "norm_out": torch_device, + "proj_out": "cpu", + } + device_maps = ["auto", custom_device_map_dict] + + inputs = self.get_dummy_tensor_inputs(torch_device) + # requires with different expected slices since models are different due to offload (we don't quantize modules offloaded to cpu/disk) + expected_slice_auto = np.array( + [ + 0.34179688, + -0.03613281, + 0.01428223, + -0.22949219, + -0.49609375, + 0.4375, + -0.1640625, + -0.66015625, + 0.43164062, + ] + ) + expected_slice_offload = np.array( + [0.34375, -0.03515625, 0.0123291, -0.22753906, -0.49414062, 0.4375, -0.16308594, -0.66015625, 0.43554688] + ) + for device_map in device_maps: + if device_map == "auto": + expected_slice = expected_slice_auto + else: + expected_slice = expected_slice_offload + with tempfile.TemporaryDirectory() as offload_folder: + quantization_config = TorchAoConfig("int4_weight_only", group_size=64) + quantized_model = FluxTransformer2DModel.from_pretrained( + "hf-internal-testing/tiny-flux-pipe", + subfolder="transformer", + quantization_config=quantization_config, + device_map=device_map, + torch_dtype=torch.bfloat16, + offload_folder=offload_folder, + ) + + weight = quantized_model.transformer_blocks[0].ff.net[2].weight + + # Note that when performing cpu/disk offload, the offloaded weights are not quantized, only the weights on the gpu. + # This is not the case when the model are already quantized + if "transformer_blocks.0" in device_map: + self.assertTrue(isinstance(weight, nn.Parameter)) + else: + self.assertTrue(isinstance(weight, AffineQuantizedTensor)) + + output = quantized_model(**inputs)[0] + output_slice = output.flatten()[-9:].detach().float().cpu().numpy() + self.assertTrue(numpy_cosine_similarity_distance(output_slice, expected_slice) < 2e-3) + + with tempfile.TemporaryDirectory() as offload_folder: + quantization_config = TorchAoConfig("int4_weight_only", group_size=64) + quantized_model = FluxTransformer2DModel.from_pretrained( + "hf-internal-testing/tiny-flux-sharded", + subfolder="transformer", + quantization_config=quantization_config, + device_map=device_map, + torch_dtype=torch.bfloat16, + offload_folder=offload_folder, + ) + + weight = quantized_model.transformer_blocks[0].ff.net[2].weight + if "transformer_blocks.0" in device_map: + self.assertTrue(isinstance(weight, nn.Parameter)) + else: + self.assertTrue(isinstance(weight, AffineQuantizedTensor)) + + output = quantized_model(**inputs)[0] + output_slice = output.flatten()[-9:].detach().float().cpu().numpy() + self.assertTrue(numpy_cosine_similarity_distance(output_slice, expected_slice) < 2e-3) + + def test_modules_to_not_convert(self): + quantization_config = TorchAoConfig("int8_weight_only", modules_to_not_convert=["transformer_blocks.0"]) + quantized_model_with_not_convert = FluxTransformer2DModel.from_pretrained( + "hf-internal-testing/tiny-flux-pipe", + subfolder="transformer", + quantization_config=quantization_config, + torch_dtype=torch.bfloat16, + ) + + unquantized_layer = quantized_model_with_not_convert.transformer_blocks[0].ff.net[2] + self.assertTrue(isinstance(unquantized_layer, torch.nn.Linear)) + self.assertFalse(isinstance(unquantized_layer.weight, AffineQuantizedTensor)) + self.assertEqual(unquantized_layer.weight.dtype, torch.bfloat16) + + quantized_layer = quantized_model_with_not_convert.proj_out + self.assertTrue(isinstance(quantized_layer.weight, AffineQuantizedTensor)) + + quantization_config = TorchAoConfig("int8_weight_only") + quantized_model = FluxTransformer2DModel.from_pretrained( + "hf-internal-testing/tiny-flux-pipe", + subfolder="transformer", + quantization_config=quantization_config, + torch_dtype=torch.bfloat16, + ) + + size_quantized_with_not_convert = get_model_size_in_bytes(quantized_model_with_not_convert) + size_quantized = get_model_size_in_bytes(quantized_model) + + self.assertTrue(size_quantized < size_quantized_with_not_convert) + + def test_training(self): + quantization_config = TorchAoConfig("int8_weight_only") + quantized_model = FluxTransformer2DModel.from_pretrained( + "hf-internal-testing/tiny-flux-pipe", + subfolder="transformer", + quantization_config=quantization_config, + torch_dtype=torch.bfloat16, + ).to(torch_device) + + for param in quantized_model.parameters(): + # freeze the model as only adapter layers will be trained + param.requires_grad = False + if param.ndim == 1: + param.data = param.data.to(torch.float32) + + for _, module in quantized_model.named_modules(): + if isinstance(module, Attention): + module.to_q = LoRALayer(module.to_q, rank=4) + module.to_k = LoRALayer(module.to_k, rank=4) + module.to_v = LoRALayer(module.to_v, rank=4) + + with torch.amp.autocast(str(torch_device), dtype=torch.bfloat16): + inputs = self.get_dummy_tensor_inputs(torch_device) + output = quantized_model(**inputs)[0] + output.norm().backward() + + for module in quantized_model.modules(): + if isinstance(module, LoRALayer): + self.assertTrue(module.adapter[1].weight.grad is not None) + self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0) + + @nightly + def test_torch_compile(self): + r"""Test that verifies if torch.compile works with torchao quantization.""" + for model_id in ["hf-internal-testing/tiny-flux-pipe", "hf-internal-testing/tiny-flux-sharded"]: + quantization_config = TorchAoConfig("int8_weight_only") + components = self.get_dummy_components(quantization_config, model_id=model_id) + pipe = FluxPipeline(**components) + pipe.to(device=torch_device) + + inputs = self.get_dummy_inputs(torch_device) + normal_output = pipe(**inputs)[0].flatten()[-32:] + + pipe.transformer = torch.compile(pipe.transformer, mode="max-autotune", fullgraph=True, dynamic=False) + inputs = self.get_dummy_inputs(torch_device) + compile_output = pipe(**inputs)[0].flatten()[-32:] + + # Note: Seems to require higher tolerance + self.assertTrue(np.allclose(normal_output, compile_output, atol=1e-2, rtol=1e-3)) + + def test_memory_footprint(self): + r""" + A simple test to check if the model conversion has been done correctly by checking on the + memory footprint of the converted model and the class type of the linear layers of the converted models + """ + for model_id in ["hf-internal-testing/tiny-flux-pipe", "hf-internal-testing/tiny-flux-sharded"]: + transformer_int4wo = self.get_dummy_components(TorchAoConfig("int4wo"), model_id=model_id)["transformer"] + transformer_int4wo_gs32 = self.get_dummy_components( + TorchAoConfig("int4wo", group_size=32), model_id=model_id + )["transformer"] + transformer_int8wo = self.get_dummy_components(TorchAoConfig("int8wo"), model_id=model_id)["transformer"] + transformer_bf16 = self.get_dummy_components(None, model_id=model_id)["transformer"] + + # Will not quantized all the layers by default due to the model weights shapes not being divisible by group_size=64 + for block in transformer_int4wo.transformer_blocks: + self.assertTrue(isinstance(block.ff.net[2].weight, AffineQuantizedTensor)) + self.assertTrue(isinstance(block.ff_context.net[2].weight, AffineQuantizedTensor)) + + # Will quantize all the linear layers except x_embedder + for name, module in transformer_int4wo_gs32.named_modules(): + if isinstance(module, nn.Linear) and name not in ["x_embedder"]: + self.assertTrue(isinstance(module.weight, AffineQuantizedTensor)) + + # Will quantize all the linear layers + for module in transformer_int8wo.modules(): + if isinstance(module, nn.Linear): + self.assertTrue(isinstance(module.weight, AffineQuantizedTensor)) + + total_int4wo = get_model_size_in_bytes(transformer_int4wo) + total_int4wo_gs32 = get_model_size_in_bytes(transformer_int4wo_gs32) + total_int8wo = get_model_size_in_bytes(transformer_int8wo) + total_bf16 = get_model_size_in_bytes(transformer_bf16) + + # TODO: refactor to align with other quantization tests + # Latter has smaller group size, so more groups -> more scales and zero points + self.assertTrue(total_int4wo < total_int4wo_gs32) + # int8 quantizes more layers compare to int4 with default group size + self.assertTrue(total_int8wo < total_int4wo) + # int4wo does not quantize too many layers because of default group size, but for the layers it does + # there is additional overhead of scales and zero points + self.assertTrue(total_bf16 < total_int4wo) + + def test_model_memory_usage(self): + model_id = "hf-internal-testing/tiny-flux-pipe" + expected_memory_saving_ratio = 2.0 + + inputs = self.get_dummy_tensor_inputs(device=torch_device) + + transformer_bf16 = self.get_dummy_components(None, model_id=model_id)["transformer"] + transformer_bf16.to(torch_device) + unquantized_model_memory = get_memory_consumption_stat(transformer_bf16, inputs) + del transformer_bf16 + + transformer_int8wo = self.get_dummy_components(TorchAoConfig("int8wo"), model_id=model_id)["transformer"] + transformer_int8wo.to(torch_device) + quantized_model_memory = get_memory_consumption_stat(transformer_int8wo, inputs) + assert unquantized_model_memory / quantized_model_memory >= expected_memory_saving_ratio + + def test_wrong_config(self): + with self.assertRaises(ValueError): + self.get_dummy_components(TorchAoConfig("int42")) + + def test_sequential_cpu_offload(self): + r""" + A test that checks if inference runs as expected when sequential cpu offloading is enabled. + """ + quantization_config = TorchAoConfig("int8wo") + components = self.get_dummy_components(quantization_config) + pipe = FluxPipeline(**components) + pipe.enable_sequential_cpu_offload() + + inputs = self.get_dummy_inputs(torch_device) + _ = pipe(**inputs) + + +# Slices for these tests have been obtained on our aws-g6e-xlarge-plus runners +@require_torch +@require_torch_accelerator +@require_torchao_version_greater_or_equal("0.7.0") +class TorchAoSerializationTest(unittest.TestCase): + model_name = "hf-internal-testing/tiny-flux-pipe" + + def tearDown(self): + gc.collect() + backend_empty_cache(torch_device) + + def get_dummy_model(self, quant_method, quant_method_kwargs, device=None): + quantization_config = TorchAoConfig(quant_method, **quant_method_kwargs) + quantized_model = FluxTransformer2DModel.from_pretrained( + self.model_name, + subfolder="transformer", + quantization_config=quantization_config, + torch_dtype=torch.bfloat16, + ) + return quantized_model.to(device) + + def get_dummy_tensor_inputs(self, device=None, seed: int = 0): + batch_size = 1 + num_latent_channels = 4 + num_image_channels = 3 + height = width = 4 + sequence_length = 48 + embedding_dim = 32 + + torch.manual_seed(seed) + hidden_states = torch.randn((batch_size, height * width, num_latent_channels)).to(device, dtype=torch.bfloat16) + encoder_hidden_states = torch.randn((batch_size, sequence_length, embedding_dim)).to( + device, dtype=torch.bfloat16 + ) + pooled_prompt_embeds = torch.randn((batch_size, embedding_dim)).to(device, dtype=torch.bfloat16) + text_ids = torch.randn((sequence_length, num_image_channels)).to(device, dtype=torch.bfloat16) + image_ids = torch.randn((height * width, num_image_channels)).to(device, dtype=torch.bfloat16) + timestep = torch.tensor([1.0]).to(device, dtype=torch.bfloat16).expand(batch_size) + + return { + "hidden_states": hidden_states, + "encoder_hidden_states": encoder_hidden_states, + "pooled_projections": pooled_prompt_embeds, + "txt_ids": text_ids, + "img_ids": image_ids, + "timestep": timestep, + } + + def _test_original_model_expected_slice(self, quant_method, quant_method_kwargs, expected_slice): + quantized_model = self.get_dummy_model(quant_method, quant_method_kwargs, torch_device) + inputs = self.get_dummy_tensor_inputs(torch_device) + output = quantized_model(**inputs)[0] + output_slice = output.flatten()[-9:].detach().float().cpu().numpy() + weight = quantized_model.transformer_blocks[0].ff.net[2].weight + self.assertTrue(isinstance(weight, (AffineQuantizedTensor, LinearActivationQuantizedTensor))) + self.assertTrue(numpy_cosine_similarity_distance(output_slice, expected_slice) < 1e-3) + + def _check_serialization_expected_slice(self, quant_method, quant_method_kwargs, expected_slice, device): + quantized_model = self.get_dummy_model(quant_method, quant_method_kwargs, device) + + with tempfile.TemporaryDirectory() as tmp_dir: + quantized_model.save_pretrained(tmp_dir, safe_serialization=False) + loaded_quantized_model = FluxTransformer2DModel.from_pretrained( + tmp_dir, torch_dtype=torch.bfloat16, use_safetensors=False + ).to(device=torch_device) + + inputs = self.get_dummy_tensor_inputs(torch_device) + output = loaded_quantized_model(**inputs)[0] + + output_slice = output.flatten()[-9:].detach().float().cpu().numpy() + self.assertTrue( + isinstance( + loaded_quantized_model.proj_out.weight, (AffineQuantizedTensor, LinearActivationQuantizedTensor) + ) + ) + self.assertTrue(numpy_cosine_similarity_distance(output_slice, expected_slice) < 1e-3) + + def test_int_a8w8_accelerator(self): + quant_method, quant_method_kwargs = "int8_dynamic_activation_int8_weight", {} + expected_slice = np.array([0.3633, -0.1357, -0.0188, -0.249, -0.4688, 0.5078, -0.1289, -0.6914, 0.4551]) + device = torch_device + self._test_original_model_expected_slice(quant_method, quant_method_kwargs, expected_slice) + self._check_serialization_expected_slice(quant_method, quant_method_kwargs, expected_slice, device) + + def test_int_a16w8_accelerator(self): + quant_method, quant_method_kwargs = "int8_weight_only", {} + expected_slice = np.array([0.3613, -0.127, -0.0223, -0.2539, -0.459, 0.4961, -0.1357, -0.6992, 0.4551]) + device = torch_device + self._test_original_model_expected_slice(quant_method, quant_method_kwargs, expected_slice) + self._check_serialization_expected_slice(quant_method, quant_method_kwargs, expected_slice, device) + + def test_int_a8w8_cpu(self): + quant_method, quant_method_kwargs = "int8_dynamic_activation_int8_weight", {} + expected_slice = np.array([0.3633, -0.1357, -0.0188, -0.249, -0.4688, 0.5078, -0.1289, -0.6914, 0.4551]) + device = "cpu" + self._test_original_model_expected_slice(quant_method, quant_method_kwargs, expected_slice) + self._check_serialization_expected_slice(quant_method, quant_method_kwargs, expected_slice, device) + + def test_int_a16w8_cpu(self): + quant_method, quant_method_kwargs = "int8_weight_only", {} + expected_slice = np.array([0.3613, -0.127, -0.0223, -0.2539, -0.459, 0.4961, -0.1357, -0.6992, 0.4551]) + device = "cpu" + self._test_original_model_expected_slice(quant_method, quant_method_kwargs, expected_slice) + self._check_serialization_expected_slice(quant_method, quant_method_kwargs, expected_slice, device) + + +@require_torchao_version_greater_or_equal("0.7.0") +class TorchAoCompileTest(QuantCompileTests, unittest.TestCase): + @property + def quantization_config(self): + return PipelineQuantizationConfig( + quant_mapping={ + "transformer": TorchAoConfig(quant_type="int8_weight_only"), + }, + ) + + @unittest.skip( + "Changing the device of AQT tensor with module._apply (called from doing module.to() in accelerate) does not work " + "when compiling." + ) + def test_torch_compile_with_cpu_offload(self): + # RuntimeError: _apply(): Couldn't swap Linear.weight + super().test_torch_compile_with_cpu_offload() + + @parameterized.expand([False, True]) + @unittest.skip( + """ + For `use_stream=False`: + - Changing the device of AQT tensor, with `param.data = param.data.to(device)` as done in group offloading implementation + is unsupported in TorchAO. When compiling, FakeTensor device mismatch causes failure. + For `use_stream=True`: + Using non-default stream requires ability to pin tensors. AQT does not seem to support this yet in TorchAO. + """ + ) + def test_torch_compile_with_group_offload_leaf(self, use_stream): + # For use_stream=False: + # If we run group offloading without compilation, we will see: + # RuntimeError: Attempted to set the storage of a tensor on device "cpu" to a storage on different device "cuda:0". This is no longer allowed; the devices must match. + # When running with compilation, the error ends up being different: + # Dynamo failed to run FX node with fake tensors: call_function (*(FakeTensor(..., device='cuda:0', size=(s0, 256), dtype=torch.bfloat16), AffineQuantizedTensor(tensor_impl=PlainAQTTensorImpl(data=FakeTensor(..., size=(1536, 256), dtype=torch.int8)... , scale=FakeTensor(..., size=(1536,), dtype=torch.bfloat16)... , zero_point=FakeTensor(..., size=(1536,), dtype=torch.int64)... , _layout=PlainLayout()), block_size=(1, 256), shape=torch.Size([1536, 256]), device=cpu, dtype=torch.bfloat16, requires_grad=False), Parameter(FakeTensor(..., device='cuda:0', size=(1536,), dtype=torch.bfloat16, + # requires_grad=True))), **{}): got RuntimeError('Unhandled FakeTensor Device Propagation for aten.mm.default, found two different devices cuda:0, cpu') + # Looks like something that will have to be looked into upstream. + # for linear layers, weight.tensor_impl shows cuda... but: + # weight.tensor_impl.{data,scale,zero_point}.device will be cpu + + # For use_stream=True: + # NotImplementedError: AffineQuantizedTensor dispatch: attempting to run unimplemented operator/function: func=, types=(,), arg_types=(,), kwarg_types={} + super()._test_torch_compile_with_group_offload_leaf(use_stream=use_stream) + + +# Slices for these tests have been obtained on our aws-g6e-xlarge-plus runners +@require_torch +@require_torch_accelerator +@require_torchao_version_greater_or_equal("0.7.0") +@slow +@nightly +class SlowTorchAoTests(unittest.TestCase): + def tearDown(self): + gc.collect() + backend_empty_cache(torch_device) + + def get_dummy_components(self, quantization_config: TorchAoConfig): + # This is just for convenience, so that we can modify it at one place for custom environments and locally testing + cache_dir = None + model_id = "black-forest-labs/FLUX.1-dev" + transformer = FluxTransformer2DModel.from_pretrained( + model_id, + subfolder="transformer", + quantization_config=quantization_config, + torch_dtype=torch.bfloat16, + cache_dir=cache_dir, + ) + text_encoder = CLIPTextModel.from_pretrained( + model_id, subfolder="text_encoder", torch_dtype=torch.bfloat16, cache_dir=cache_dir + ) + text_encoder_2 = T5EncoderModel.from_pretrained( + model_id, subfolder="text_encoder_2", torch_dtype=torch.bfloat16, cache_dir=cache_dir + ) + tokenizer = CLIPTokenizer.from_pretrained(model_id, subfolder="tokenizer", cache_dir=cache_dir) + tokenizer_2 = AutoTokenizer.from_pretrained(model_id, subfolder="tokenizer_2", cache_dir=cache_dir) + vae = AutoencoderKL.from_pretrained(model_id, subfolder="vae", torch_dtype=torch.bfloat16, cache_dir=cache_dir) + scheduler = FlowMatchEulerDiscreteScheduler() + + return { + "scheduler": scheduler, + "text_encoder": text_encoder, + "text_encoder_2": text_encoder_2, + "tokenizer": tokenizer, + "tokenizer_2": tokenizer_2, + "transformer": transformer, + "vae": vae, + } + + def get_dummy_inputs(self, device: torch.device, seed: int = 0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator().manual_seed(seed) + + inputs = { + "prompt": "an astronaut riding a horse in space", + "height": 512, + "width": 512, + "num_inference_steps": 20, + "output_type": "np", + "generator": generator, + } + + return inputs + + def _test_quant_type(self, quantization_config, expected_slice): + components = self.get_dummy_components(quantization_config) + pipe = FluxPipeline(**components) + pipe.enable_model_cpu_offload() + + weight = pipe.transformer.transformer_blocks[0].ff.net[2].weight + self.assertTrue(isinstance(weight, (AffineQuantizedTensor, LinearActivationQuantizedTensor))) + + inputs = self.get_dummy_inputs(torch_device) + output = pipe(**inputs)[0].flatten() + output_slice = np.concatenate((output[:16], output[-16:])) + self.assertTrue(np.allclose(output_slice, expected_slice, atol=1e-3, rtol=1e-3)) + + def test_quantization(self): + # fmt: off + QUANTIZATION_TYPES_TO_TEST = [ + ("int8wo", np.array([0.0505, 0.0742, 0.1367, 0.0429, 0.0585, 0.1386, 0.0585, 0.0703, 0.1367, 0.0566, 0.0703, 0.1464, 0.0546, 0.0703, 0.1425, 0.0546, 0.3535, 0.7578, 0.5000, 0.4062, 0.7656, 0.5117, 0.4121, 0.7656, 0.5117, 0.3984, 0.7578, 0.5234, 0.4023, 0.7382, 0.5390, 0.4570])), + ("int8dq", np.array([0.0546, 0.0761, 0.1386, 0.0488, 0.0644, 0.1425, 0.0605, 0.0742, 0.1406, 0.0625, 0.0722, 0.1523, 0.0625, 0.0742, 0.1503, 0.0605, 0.3886, 0.7968, 0.5507, 0.4492, 0.7890, 0.5351, 0.4316, 0.8007, 0.5390, 0.4179, 0.8281, 0.5820, 0.4531, 0.7812, 0.5703, 0.4921])), + ] + + if TorchAoConfig._is_xpu_or_cuda_capability_atleast_8_9(): + QUANTIZATION_TYPES_TO_TEST.extend([ + ("float8wo_e4m3", np.array([0.0546, 0.0722, 0.1328, 0.0468, 0.0585, 0.1367, 0.0605, 0.0703, 0.1328, 0.0625, 0.0703, 0.1445, 0.0585, 0.0703, 0.1406, 0.0605, 0.3496, 0.7109, 0.4843, 0.4042, 0.7226, 0.5000, 0.4160, 0.7031, 0.4824, 0.3886, 0.6757, 0.4667, 0.3710, 0.6679, 0.4902, 0.4238])), + ("fp5_e3m1", np.array([0.0527, 0.0762, 0.1309, 0.0449, 0.0645, 0.1328, 0.0566, 0.0723, 0.125, 0.0566, 0.0703, 0.1328, 0.0566, 0.0742, 0.1348, 0.0566, 0.3633, 0.7617, 0.5273, 0.4277, 0.7891, 0.5469, 0.4375, 0.8008, 0.5586, 0.4336, 0.7383, 0.5156, 0.3906, 0.6992, 0.5156, 0.4375])), + ]) + # fmt: on + + for quantization_name, expected_slice in QUANTIZATION_TYPES_TO_TEST: + quantization_config = TorchAoConfig(quant_type=quantization_name, modules_to_not_convert=["x_embedder"]) + self._test_quant_type(quantization_config, expected_slice) + gc.collect() + backend_empty_cache(torch_device) + backend_synchronize(torch_device) + + def test_serialization_int8wo(self): + quantization_config = TorchAoConfig("int8wo") + components = self.get_dummy_components(quantization_config) + pipe = FluxPipeline(**components) + pipe.enable_model_cpu_offload() + + weight = pipe.transformer.x_embedder.weight + self.assertTrue(isinstance(weight, AffineQuantizedTensor)) + + inputs = self.get_dummy_inputs(torch_device) + output = pipe(**inputs)[0].flatten()[:128] + + with tempfile.TemporaryDirectory() as tmp_dir: + pipe.transformer.save_pretrained(tmp_dir, safe_serialization=False) + pipe.remove_all_hooks() + del pipe.transformer + gc.collect() + backend_empty_cache(torch_device) + backend_synchronize(torch_device) + transformer = FluxTransformer2DModel.from_pretrained( + tmp_dir, torch_dtype=torch.bfloat16, use_safetensors=False + ) + pipe.transformer = transformer + pipe.enable_model_cpu_offload() + + weight = transformer.x_embedder.weight + self.assertTrue(isinstance(weight, AffineQuantizedTensor)) + + loaded_output = pipe(**inputs)[0].flatten()[:128] + # Seems to require higher tolerance depending on which machine it is being run. + # A difference of 0.06 in normalized pixel space (-1 to 1), corresponds to a difference of + # 0.06 / 2 * 255 = 7.65 in pixel space (0 to 255). On our CI runners, the difference is about 0.04, + # on DGX it is 0.06, and on audace it is 0.037. So, we are using a tolerance of 0.06 here. + self.assertTrue(np.allclose(output, loaded_output, atol=0.06)) + + def test_memory_footprint_int4wo(self): + # The original checkpoints are in bf16 and about 24 GB + expected_memory_in_gb = 6.0 + quantization_config = TorchAoConfig("int4wo") + cache_dir = None + transformer = FluxTransformer2DModel.from_pretrained( + "black-forest-labs/FLUX.1-dev", + subfolder="transformer", + quantization_config=quantization_config, + torch_dtype=torch.bfloat16, + cache_dir=cache_dir, + ) + int4wo_memory_in_gb = get_model_size_in_bytes(transformer) / 1024**3 + self.assertTrue(int4wo_memory_in_gb < expected_memory_in_gb) + + def test_memory_footprint_int8wo(self): + # The original checkpoints are in bf16 and about 24 GB + expected_memory_in_gb = 12.0 + quantization_config = TorchAoConfig("int8wo") + cache_dir = None + transformer = FluxTransformer2DModel.from_pretrained( + "black-forest-labs/FLUX.1-dev", + subfolder="transformer", + quantization_config=quantization_config, + torch_dtype=torch.bfloat16, + cache_dir=cache_dir, + ) + int8wo_memory_in_gb = get_model_size_in_bytes(transformer) / 1024**3 + self.assertTrue(int8wo_memory_in_gb < expected_memory_in_gb) + + +@require_torch +@require_torch_accelerator +@require_torchao_version_greater_or_equal("0.7.0") +@slow +@nightly +class SlowTorchAoPreserializedModelTests(unittest.TestCase): + def tearDown(self): + gc.collect() + backend_empty_cache(torch_device) + + def get_dummy_inputs(self, device: torch.device, seed: int = 0): + if str(device).startswith("mps"): + generator = torch.manual_seed(seed) + else: + generator = torch.Generator().manual_seed(seed) + + inputs = { + "prompt": "an astronaut riding a horse in space", + "height": 512, + "width": 512, + "num_inference_steps": 20, + "output_type": "np", + "generator": generator, + } + + return inputs + + def test_transformer_int8wo(self): + # fmt: off + expected_slice = np.array([0.0566, 0.0781, 0.1426, 0.0488, 0.0684, 0.1504, 0.0625, 0.0781, 0.1445, 0.0625, 0.0781, 0.1562, 0.0547, 0.0723, 0.1484, 0.0566, 0.5703, 0.8867, 0.7266, 0.5742, 0.875, 0.7148, 0.5586, 0.875, 0.7148, 0.5547, 0.8633, 0.7109, 0.5469, 0.8398, 0.6992, 0.5703]) + # fmt: on + + # This is just for convenience, so that we can modify it at one place for custom environments and locally testing + cache_dir = None + transformer = FluxTransformer2DModel.from_pretrained( + "hf-internal-testing/FLUX.1-Dev-TorchAO-int8wo-transformer", + torch_dtype=torch.bfloat16, + use_safetensors=False, + cache_dir=cache_dir, + ) + pipe = FluxPipeline.from_pretrained( + "black-forest-labs/FLUX.1-dev", transformer=transformer, torch_dtype=torch.bfloat16, cache_dir=cache_dir + ) + pipe.enable_model_cpu_offload() + + # Verify that all linear layer weights are quantized + for name, module in pipe.transformer.named_modules(): + if isinstance(module, nn.Linear): + self.assertTrue(isinstance(module.weight, AffineQuantizedTensor)) + + # Verify outputs match expected slice + inputs = self.get_dummy_inputs(torch_device) + output = pipe(**inputs)[0].flatten() + output_slice = np.concatenate((output[:16], output[-16:])) + self.assertTrue(np.allclose(output_slice, expected_slice, atol=1e-3, rtol=1e-3)) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/quantization/utils.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/quantization/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..a74ece5a3a3ad27de1abc004cdb2f34ec4c28287 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/quantization/utils.py @@ -0,0 +1,45 @@ +from diffusers.utils import is_torch_available + +from ..testing_utils import ( + backend_empty_cache, + backend_max_memory_allocated, + backend_reset_peak_memory_stats, + torch_device, +) + + +if is_torch_available(): + import torch + import torch.nn as nn + + class LoRALayer(nn.Module): + """Wraps a linear layer with LoRA-like adapter - Used for testing purposes only + + Taken from + https://github.com/huggingface/transformers/blob/566302686a71de14125717dea9a6a45b24d42b37/tests/quantization/bnb/test_4bit.py#L62C5-L78C77 + """ + + def __init__(self, module: nn.Module, rank: int): + super().__init__() + self.module = module + self.adapter = nn.Sequential( + nn.Linear(module.in_features, rank, bias=False), + nn.Linear(rank, module.out_features, bias=False), + ) + small_std = (2.0 / (5 * min(module.in_features, module.out_features))) ** 0.5 + nn.init.normal_(self.adapter[0].weight, std=small_std) + nn.init.zeros_(self.adapter[1].weight) + self.adapter.to(module.weight.device) + + def forward(self, input, *args, **kwargs): + return self.module(input, *args, **kwargs) + self.adapter(input) + + @torch.no_grad() + @torch.inference_mode() + def get_memory_consumption_stat(model, inputs): + backend_reset_peak_memory_stats(torch_device) + backend_empty_cache(torch_device) + + model(**inputs) + max_mem_allocated = backend_max_memory_allocated(torch_device) + return max_mem_allocated diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/remote/__init__.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/remote/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/remote/test_remote_decode.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/remote/test_remote_decode.py new file mode 100644 index 0000000000000000000000000000000000000000..27170cba08356c62e51ed0a99f653691ba7a53d6 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/remote/test_remote_decode.py @@ -0,0 +1,537 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +from typing import Tuple, Union + +import numpy as np +import PIL.Image +import torch + +from diffusers.image_processor import VaeImageProcessor +from diffusers.utils.constants import ( + DECODE_ENDPOINT_FLUX, + DECODE_ENDPOINT_HUNYUAN_VIDEO, + DECODE_ENDPOINT_SD_V1, + DECODE_ENDPOINT_SD_XL, +) +from diffusers.utils.remote_utils import ( + remote_decode, +) +from diffusers.video_processor import VideoProcessor + +from ..testing_utils import ( + enable_full_determinism, + slow, + torch_all_close, + torch_device, +) + + +enable_full_determinism() + + +class RemoteAutoencoderKLMixin: + shape: Tuple[int, ...] = None + out_hw: Tuple[int, int] = None + endpoint: str = None + dtype: torch.dtype = None + scaling_factor: float = None + shift_factor: float = None + processor_cls: Union[VaeImageProcessor, VideoProcessor] = None + output_pil_slice: torch.Tensor = None + output_pt_slice: torch.Tensor = None + partial_postprocess_return_pt_slice: torch.Tensor = None + return_pt_slice: torch.Tensor = None + width: int = None + height: int = None + + def get_dummy_inputs(self): + inputs = { + "endpoint": self.endpoint, + "tensor": torch.randn( + self.shape, + device=torch_device, + dtype=self.dtype, + generator=torch.Generator(torch_device).manual_seed(13), + ), + "scaling_factor": self.scaling_factor, + "shift_factor": self.shift_factor, + "height": self.height, + "width": self.width, + } + return inputs + + def test_no_scaling(self): + inputs = self.get_dummy_inputs() + if inputs["scaling_factor"] is not None: + inputs["tensor"] = inputs["tensor"] / inputs["scaling_factor"] + inputs["scaling_factor"] = None + if inputs["shift_factor"] is not None: + inputs["tensor"] = inputs["tensor"] + inputs["shift_factor"] + inputs["shift_factor"] = None + processor = self.processor_cls() + output = remote_decode( + output_type="pt", + # required for now, will be removed in next update + do_scaling=False, + processor=processor, + **inputs, + ) + assert isinstance(output, PIL.Image.Image) + self.assertTrue(isinstance(output, PIL.Image.Image), f"Expected `PIL.Image.Image` output, got {type(output)}") + self.assertEqual(output.height, self.out_hw[0], f"Expected image height {self.out_hw[0]}, got {output.height}") + self.assertEqual(output.width, self.out_hw[1], f"Expected image width {self.out_hw[0]}, got {output.width}") + output_slice = torch.from_numpy(np.array(output)[0, -3:, -3:].flatten()) + # Increased tolerance for Flux Packed diff [1, 0, 1, 0, 0, 0, 0, 0, 0] + self.assertTrue( + torch_all_close(output_slice, self.output_pt_slice.to(output_slice.dtype), rtol=1, atol=1), + f"{output_slice}", + ) + + def test_output_type_pt(self): + inputs = self.get_dummy_inputs() + processor = self.processor_cls() + output = remote_decode(output_type="pt", processor=processor, **inputs) + assert isinstance(output, PIL.Image.Image) + self.assertTrue(isinstance(output, PIL.Image.Image), f"Expected `PIL.Image.Image` output, got {type(output)}") + self.assertEqual(output.height, self.out_hw[0], f"Expected image height {self.out_hw[0]}, got {output.height}") + self.assertEqual(output.width, self.out_hw[1], f"Expected image width {self.out_hw[0]}, got {output.width}") + output_slice = torch.from_numpy(np.array(output)[0, -3:, -3:].flatten()) + self.assertTrue( + torch_all_close(output_slice, self.output_pt_slice.to(output_slice.dtype), rtol=1e-2), f"{output_slice}" + ) + + # output is visually the same, slice is flaky? + def test_output_type_pil(self): + inputs = self.get_dummy_inputs() + output = remote_decode(output_type="pil", **inputs) + self.assertTrue(isinstance(output, PIL.Image.Image), f"Expected `PIL.Image.Image` output, got {type(output)}") + self.assertEqual(output.height, self.out_hw[0], f"Expected image height {self.out_hw[0]}, got {output.height}") + self.assertEqual(output.width, self.out_hw[1], f"Expected image width {self.out_hw[0]}, got {output.width}") + + def test_output_type_pil_image_format(self): + inputs = self.get_dummy_inputs() + output = remote_decode(output_type="pil", image_format="png", **inputs) + self.assertTrue(isinstance(output, PIL.Image.Image), f"Expected `PIL.Image.Image` output, got {type(output)}") + self.assertEqual(output.height, self.out_hw[0], f"Expected image height {self.out_hw[0]}, got {output.height}") + self.assertEqual(output.width, self.out_hw[1], f"Expected image width {self.out_hw[0]}, got {output.width}") + self.assertEqual(output.format, "png", f"Expected image format `png`, got {output.format}") + output_slice = torch.from_numpy(np.array(output)[0, -3:, -3:].flatten()) + self.assertTrue( + torch_all_close(output_slice, self.output_pt_slice.to(output_slice.dtype), rtol=1e-2), f"{output_slice}" + ) + + def test_output_type_pt_partial_postprocess(self): + inputs = self.get_dummy_inputs() + output = remote_decode(output_type="pt", partial_postprocess=True, **inputs) + self.assertTrue(isinstance(output, PIL.Image.Image), f"Expected `PIL.Image.Image` output, got {type(output)}") + self.assertEqual(output.height, self.out_hw[0], f"Expected image height {self.out_hw[0]}, got {output.height}") + self.assertEqual(output.width, self.out_hw[1], f"Expected image width {self.out_hw[0]}, got {output.width}") + output_slice = torch.from_numpy(np.array(output)[0, -3:, -3:].flatten()) + self.assertTrue( + torch_all_close(output_slice, self.output_pt_slice.to(output_slice.dtype), rtol=1e-2), f"{output_slice}" + ) + + def test_output_type_pt_return_type_pt(self): + inputs = self.get_dummy_inputs() + output = remote_decode(output_type="pt", return_type="pt", **inputs) + self.assertTrue(isinstance(output, torch.Tensor), f"Expected `torch.Tensor` output, got {type(output)}") + self.assertEqual( + output.shape[2], self.out_hw[0], f"Expected image height {self.out_hw[0]}, got {output.shape[2]}" + ) + self.assertEqual( + output.shape[3], self.out_hw[1], f"Expected image width {self.out_hw[0]}, got {output.shape[3]}" + ) + output_slice = output[0, 0, -3:, -3:].flatten() + self.assertTrue( + torch_all_close(output_slice, self.return_pt_slice.to(output_slice.dtype), rtol=1e-3, atol=1e-3), + f"{output_slice}", + ) + + def test_output_type_pt_partial_postprocess_return_type_pt(self): + inputs = self.get_dummy_inputs() + output = remote_decode(output_type="pt", partial_postprocess=True, return_type="pt", **inputs) + self.assertTrue(isinstance(output, torch.Tensor), f"Expected `torch.Tensor` output, got {type(output)}") + self.assertEqual( + output.shape[1], self.out_hw[0], f"Expected image height {self.out_hw[0]}, got {output.shape[1]}" + ) + self.assertEqual( + output.shape[2], self.out_hw[1], f"Expected image width {self.out_hw[0]}, got {output.shape[2]}" + ) + output_slice = output[0, -3:, -3:, 0].flatten().cpu() + self.assertTrue( + torch_all_close(output_slice, self.partial_postprocess_return_pt_slice.to(output_slice.dtype), rtol=1e-2), + f"{output_slice}", + ) + + def test_do_scaling_deprecation(self): + inputs = self.get_dummy_inputs() + inputs.pop("scaling_factor", None) + inputs.pop("shift_factor", None) + with self.assertWarns(FutureWarning) as warning: + _ = remote_decode(output_type="pt", partial_postprocess=True, **inputs) + self.assertEqual( + str(warning.warnings[0].message), + "`do_scaling` is deprecated, pass `scaling_factor` and `shift_factor` if required.", + str(warning.warnings[0].message), + ) + + def test_input_tensor_type_base64_deprecation(self): + inputs = self.get_dummy_inputs() + with self.assertWarns(FutureWarning) as warning: + _ = remote_decode(output_type="pt", input_tensor_type="base64", partial_postprocess=True, **inputs) + self.assertEqual( + str(warning.warnings[0].message), + "input_tensor_type='base64' is deprecated. Using `binary`.", + str(warning.warnings[0].message), + ) + + def test_output_tensor_type_base64_deprecation(self): + inputs = self.get_dummy_inputs() + with self.assertWarns(FutureWarning) as warning: + _ = remote_decode(output_type="pt", output_tensor_type="base64", partial_postprocess=True, **inputs) + self.assertEqual( + str(warning.warnings[0].message), + "output_tensor_type='base64' is deprecated. Using `binary`.", + str(warning.warnings[0].message), + ) + + +class RemoteAutoencoderKLHunyuanVideoMixin(RemoteAutoencoderKLMixin): + def test_no_scaling(self): + inputs = self.get_dummy_inputs() + if inputs["scaling_factor"] is not None: + inputs["tensor"] = inputs["tensor"] / inputs["scaling_factor"] + inputs["scaling_factor"] = None + if inputs["shift_factor"] is not None: + inputs["tensor"] = inputs["tensor"] + inputs["shift_factor"] + inputs["shift_factor"] = None + processor = self.processor_cls() + output = remote_decode( + output_type="pt", + # required for now, will be removed in next update + do_scaling=False, + processor=processor, + **inputs, + ) + self.assertTrue( + isinstance(output, list) and isinstance(output[0], PIL.Image.Image), + f"Expected `List[PIL.Image.Image]` output, got {type(output)}", + ) + self.assertEqual( + output[0].height, self.out_hw[0], f"Expected image height {self.out_hw[0]}, got {output[0].height}" + ) + self.assertEqual( + output[0].width, self.out_hw[1], f"Expected image width {self.out_hw[0]}, got {output[0].width}" + ) + output_slice = torch.from_numpy(np.array(output[0])[0, -3:, -3:].flatten()) + self.assertTrue( + torch_all_close(output_slice, self.output_pt_slice.to(output_slice.dtype), rtol=1, atol=1), + f"{output_slice}", + ) + + def test_output_type_pt(self): + inputs = self.get_dummy_inputs() + processor = self.processor_cls() + output = remote_decode(output_type="pt", processor=processor, **inputs) + self.assertTrue( + isinstance(output, list) and isinstance(output[0], PIL.Image.Image), + f"Expected `List[PIL.Image.Image]` output, got {type(output)}", + ) + self.assertEqual( + output[0].height, self.out_hw[0], f"Expected image height {self.out_hw[0]}, got {output[0].height}" + ) + self.assertEqual( + output[0].width, self.out_hw[1], f"Expected image width {self.out_hw[0]}, got {output[0].width}" + ) + output_slice = torch.from_numpy(np.array(output[0])[0, -3:, -3:].flatten()) + self.assertTrue( + torch_all_close(output_slice, self.output_pt_slice.to(output_slice.dtype), rtol=1, atol=1), + f"{output_slice}", + ) + + # output is visually the same, slice is flaky? + def test_output_type_pil(self): + inputs = self.get_dummy_inputs() + processor = self.processor_cls() + output = remote_decode(output_type="pil", processor=processor, **inputs) + self.assertTrue( + isinstance(output, list) and isinstance(output[0], PIL.Image.Image), + f"Expected `List[PIL.Image.Image]` output, got {type(output)}", + ) + self.assertEqual( + output[0].height, self.out_hw[0], f"Expected image height {self.out_hw[0]}, got {output[0].height}" + ) + self.assertEqual( + output[0].width, self.out_hw[1], f"Expected image width {self.out_hw[0]}, got {output[0].width}" + ) + + def test_output_type_pil_image_format(self): + inputs = self.get_dummy_inputs() + processor = self.processor_cls() + output = remote_decode(output_type="pil", processor=processor, image_format="png", **inputs) + self.assertTrue( + isinstance(output, list) and isinstance(output[0], PIL.Image.Image), + f"Expected `List[PIL.Image.Image]` output, got {type(output)}", + ) + self.assertEqual( + output[0].height, self.out_hw[0], f"Expected image height {self.out_hw[0]}, got {output[0].height}" + ) + self.assertEqual( + output[0].width, self.out_hw[1], f"Expected image width {self.out_hw[0]}, got {output[0].width}" + ) + output_slice = torch.from_numpy(np.array(output[0])[0, -3:, -3:].flatten()) + self.assertTrue( + torch_all_close(output_slice, self.output_pt_slice.to(output_slice.dtype), rtol=1, atol=1), + f"{output_slice}", + ) + + def test_output_type_pt_partial_postprocess(self): + inputs = self.get_dummy_inputs() + output = remote_decode(output_type="pt", partial_postprocess=True, **inputs) + self.assertTrue( + isinstance(output, list) and isinstance(output[0], PIL.Image.Image), + f"Expected `List[PIL.Image.Image]` output, got {type(output)}", + ) + self.assertEqual( + output[0].height, self.out_hw[0], f"Expected image height {self.out_hw[0]}, got {output[0].height}" + ) + self.assertEqual( + output[0].width, self.out_hw[1], f"Expected image width {self.out_hw[0]}, got {output[0].width}" + ) + output_slice = torch.from_numpy(np.array(output[0])[0, -3:, -3:].flatten()) + self.assertTrue( + torch_all_close(output_slice, self.output_pt_slice.to(output_slice.dtype), rtol=1, atol=1), + f"{output_slice}", + ) + + def test_output_type_pt_return_type_pt(self): + inputs = self.get_dummy_inputs() + output = remote_decode(output_type="pt", return_type="pt", **inputs) + self.assertTrue(isinstance(output, torch.Tensor), f"Expected `torch.Tensor` output, got {type(output)}") + self.assertEqual( + output.shape[3], self.out_hw[0], f"Expected image height {self.out_hw[0]}, got {output.shape[2]}" + ) + self.assertEqual( + output.shape[4], self.out_hw[1], f"Expected image width {self.out_hw[0]}, got {output.shape[3]}" + ) + output_slice = output[0, 0, 0, -3:, -3:].flatten() + self.assertTrue( + torch_all_close(output_slice, self.return_pt_slice.to(output_slice.dtype), rtol=1e-3, atol=1e-3), + f"{output_slice}", + ) + + def test_output_type_mp4(self): + inputs = self.get_dummy_inputs() + output = remote_decode(output_type="mp4", return_type="mp4", **inputs) + self.assertTrue(isinstance(output, bytes), f"Expected `bytes` output, got {type(output)}") + + +class RemoteAutoencoderKLSDv1Tests( + RemoteAutoencoderKLMixin, + unittest.TestCase, +): + shape = ( + 1, + 4, + 64, + 64, + ) + out_hw = ( + 512, + 512, + ) + endpoint = DECODE_ENDPOINT_SD_V1 + dtype = torch.float16 + scaling_factor = 0.18215 + shift_factor = None + processor_cls = VaeImageProcessor + output_pt_slice = torch.tensor([31, 15, 11, 55, 30, 21, 66, 42, 30], dtype=torch.uint8) + partial_postprocess_return_pt_slice = torch.tensor([100, 130, 99, 133, 106, 112, 97, 100, 121], dtype=torch.uint8) + return_pt_slice = torch.tensor([-0.2177, 0.0217, -0.2258, 0.0412, -0.1687, -0.1232, -0.2416, -0.2130, -0.0543]) + + +class RemoteAutoencoderKLSDXLTests( + RemoteAutoencoderKLMixin, + unittest.TestCase, +): + shape = ( + 1, + 4, + 128, + 128, + ) + out_hw = ( + 1024, + 1024, + ) + endpoint = DECODE_ENDPOINT_SD_XL + dtype = torch.float16 + scaling_factor = 0.13025 + shift_factor = None + processor_cls = VaeImageProcessor + output_pt_slice = torch.tensor([104, 52, 23, 114, 61, 35, 108, 87, 38], dtype=torch.uint8) + partial_postprocess_return_pt_slice = torch.tensor([77, 86, 89, 49, 60, 75, 52, 65, 78], dtype=torch.uint8) + return_pt_slice = torch.tensor([-0.3945, -0.3289, -0.2993, -0.6177, -0.5259, -0.4119, -0.5898, -0.4863, -0.3845]) + + +class RemoteAutoencoderKLFluxTests( + RemoteAutoencoderKLMixin, + unittest.TestCase, +): + shape = ( + 1, + 16, + 128, + 128, + ) + out_hw = ( + 1024, + 1024, + ) + endpoint = DECODE_ENDPOINT_FLUX + dtype = torch.bfloat16 + scaling_factor = 0.3611 + shift_factor = 0.1159 + processor_cls = VaeImageProcessor + output_pt_slice = torch.tensor([110, 72, 91, 62, 35, 52, 69, 55, 69], dtype=torch.uint8) + partial_postprocess_return_pt_slice = torch.tensor( + [202, 203, 203, 197, 195, 193, 189, 188, 178], dtype=torch.uint8 + ) + return_pt_slice = torch.tensor([0.5820, 0.5962, 0.5898, 0.5439, 0.5327, 0.5112, 0.4797, 0.4773, 0.3984]) + + +class RemoteAutoencoderKLFluxPackedTests( + RemoteAutoencoderKLMixin, + unittest.TestCase, +): + shape = ( + 1, + 4096, + 64, + ) + out_hw = ( + 1024, + 1024, + ) + height = 1024 + width = 1024 + endpoint = DECODE_ENDPOINT_FLUX + dtype = torch.bfloat16 + scaling_factor = 0.3611 + shift_factor = 0.1159 + processor_cls = VaeImageProcessor + # slices are different due to randn on different shape. we can pack the latent instead if we want the same + output_pt_slice = torch.tensor([96, 116, 157, 45, 67, 104, 34, 56, 89], dtype=torch.uint8) + partial_postprocess_return_pt_slice = torch.tensor( + [168, 212, 202, 155, 191, 185, 150, 180, 168], dtype=torch.uint8 + ) + return_pt_slice = torch.tensor([0.3198, 0.6631, 0.5864, 0.2131, 0.4944, 0.4482, 0.1776, 0.4153, 0.3176]) + + +class RemoteAutoencoderKLHunyuanVideoTests( + RemoteAutoencoderKLHunyuanVideoMixin, + unittest.TestCase, +): + shape = ( + 1, + 16, + 3, + 40, + 64, + ) + out_hw = ( + 320, + 512, + ) + endpoint = DECODE_ENDPOINT_HUNYUAN_VIDEO + dtype = torch.float16 + scaling_factor = 0.476986 + processor_cls = VideoProcessor + output_pt_slice = torch.tensor([112, 92, 85, 112, 93, 85, 112, 94, 85], dtype=torch.uint8) + partial_postprocess_return_pt_slice = torch.tensor( + [149, 161, 168, 136, 150, 156, 129, 143, 149], dtype=torch.uint8 + ) + return_pt_slice = torch.tensor([0.1656, 0.2661, 0.3157, 0.0693, 0.1755, 0.2252, 0.0127, 0.1221, 0.1708]) + + +class RemoteAutoencoderKLSlowTestMixin: + channels: int = 4 + endpoint: str = None + dtype: torch.dtype = None + scaling_factor: float = None + shift_factor: float = None + width: int = None + height: int = None + + def get_dummy_inputs(self): + inputs = { + "endpoint": self.endpoint, + "scaling_factor": self.scaling_factor, + "shift_factor": self.shift_factor, + "height": self.height, + "width": self.width, + } + return inputs + + def test_multi_res(self): + inputs = self.get_dummy_inputs() + for height in {320, 512, 640, 704, 896, 1024, 1208, 1384, 1536, 1608, 1864, 2048}: + for width in {320, 512, 640, 704, 896, 1024, 1208, 1384, 1536, 1608, 1864, 2048}: + inputs["tensor"] = torch.randn( + (1, self.channels, height // 8, width // 8), + device=torch_device, + dtype=self.dtype, + generator=torch.Generator(torch_device).manual_seed(13), + ) + inputs["height"] = height + inputs["width"] = width + output = remote_decode(output_type="pt", partial_postprocess=True, **inputs) + output.save(f"test_multi_res_{height}_{width}.png") + + +@slow +class RemoteAutoencoderKLSDv1SlowTests( + RemoteAutoencoderKLSlowTestMixin, + unittest.TestCase, +): + endpoint = DECODE_ENDPOINT_SD_V1 + dtype = torch.float16 + scaling_factor = 0.18215 + shift_factor = None + + +@slow +class RemoteAutoencoderKLSDXLSlowTests( + RemoteAutoencoderKLSlowTestMixin, + unittest.TestCase, +): + endpoint = DECODE_ENDPOINT_SD_XL + dtype = torch.float16 + scaling_factor = 0.13025 + shift_factor = None + + +@slow +class RemoteAutoencoderKLFluxSlowTests( + RemoteAutoencoderKLSlowTestMixin, + unittest.TestCase, +): + channels = 16 + endpoint = DECODE_ENDPOINT_FLUX + dtype = torch.bfloat16 + scaling_factor = 0.3611 + shift_factor = 0.1159 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/remote/test_remote_encode.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/remote/test_remote_encode.py new file mode 100644 index 0000000000000000000000000000000000000000..4c0daf08fd8cde87997cb2187862bee898409dbe --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/remote/test_remote_encode.py @@ -0,0 +1,225 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import PIL.Image +import torch + +from diffusers.utils import load_image +from diffusers.utils.constants import ( + DECODE_ENDPOINT_FLUX, + DECODE_ENDPOINT_SD_V1, + DECODE_ENDPOINT_SD_XL, + ENCODE_ENDPOINT_FLUX, + ENCODE_ENDPOINT_SD_V1, + ENCODE_ENDPOINT_SD_XL, +) +from diffusers.utils.remote_utils import ( + remote_decode, + remote_encode, +) + +from ..testing_utils import ( + enable_full_determinism, + slow, +) + + +enable_full_determinism() + +IMAGE = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/astronaut.jpg?download=true" + + +class RemoteAutoencoderKLEncodeMixin: + channels: int = None + endpoint: str = None + decode_endpoint: str = None + dtype: torch.dtype = None + scaling_factor: float = None + shift_factor: float = None + image: PIL.Image.Image = None + + def get_dummy_inputs(self): + if self.image is None: + self.image = load_image(IMAGE) + inputs = { + "endpoint": self.endpoint, + "image": self.image, + "scaling_factor": self.scaling_factor, + "shift_factor": self.shift_factor, + } + return inputs + + def test_image_input(self): + inputs = self.get_dummy_inputs() + height, width = inputs["image"].height, inputs["image"].width + output = remote_encode(**inputs) + self.assertEqual(list(output.shape), [1, self.channels, height // 8, width // 8]) + decoded = remote_decode( + tensor=output, + endpoint=self.decode_endpoint, + scaling_factor=self.scaling_factor, + shift_factor=self.shift_factor, + image_format="png", + ) + self.assertEqual(decoded.height, height) + self.assertEqual(decoded.width, width) + # image_slice = torch.from_numpy(np.array(inputs["image"])[0, -3:, -3:].flatten()) + # decoded_slice = torch.from_numpy(np.array(decoded)[0, -3:, -3:].flatten()) + # TODO: how to test this? encode->decode is lossy. expected slice of encoded latent? + + +class RemoteAutoencoderKLSDv1Tests( + RemoteAutoencoderKLEncodeMixin, + unittest.TestCase, +): + channels = 4 + endpoint = ENCODE_ENDPOINT_SD_V1 + decode_endpoint = DECODE_ENDPOINT_SD_V1 + dtype = torch.float16 + scaling_factor = 0.18215 + shift_factor = None + + +class RemoteAutoencoderKLSDXLTests( + RemoteAutoencoderKLEncodeMixin, + unittest.TestCase, +): + channels = 4 + endpoint = ENCODE_ENDPOINT_SD_XL + decode_endpoint = DECODE_ENDPOINT_SD_XL + dtype = torch.float16 + scaling_factor = 0.13025 + shift_factor = None + + +class RemoteAutoencoderKLFluxTests( + RemoteAutoencoderKLEncodeMixin, + unittest.TestCase, +): + channels = 16 + endpoint = ENCODE_ENDPOINT_FLUX + decode_endpoint = DECODE_ENDPOINT_FLUX + dtype = torch.bfloat16 + scaling_factor = 0.3611 + shift_factor = 0.1159 + + +class RemoteAutoencoderKLEncodeSlowTestMixin: + channels: int = 4 + endpoint: str = None + decode_endpoint: str = None + dtype: torch.dtype = None + scaling_factor: float = None + shift_factor: float = None + image: PIL.Image.Image = None + + def get_dummy_inputs(self): + if self.image is None: + self.image = load_image(IMAGE) + inputs = { + "endpoint": self.endpoint, + "image": self.image, + "scaling_factor": self.scaling_factor, + "shift_factor": self.shift_factor, + } + return inputs + + def test_multi_res(self): + inputs = self.get_dummy_inputs() + for height in { + 320, + 512, + 640, + 704, + 896, + 1024, + 1208, + 1384, + 1536, + 1608, + 1864, + 2048, + }: + for width in { + 320, + 512, + 640, + 704, + 896, + 1024, + 1208, + 1384, + 1536, + 1608, + 1864, + 2048, + }: + inputs["image"] = inputs["image"].resize( + ( + width, + height, + ) + ) + output = remote_encode(**inputs) + self.assertEqual(list(output.shape), [1, self.channels, height // 8, width // 8]) + decoded = remote_decode( + tensor=output, + endpoint=self.decode_endpoint, + scaling_factor=self.scaling_factor, + shift_factor=self.shift_factor, + image_format="png", + ) + self.assertEqual(decoded.height, height) + self.assertEqual(decoded.width, width) + decoded.save(f"test_multi_res_{height}_{width}.png") + + +@slow +class RemoteAutoencoderKLSDv1SlowTests( + RemoteAutoencoderKLEncodeSlowTestMixin, + unittest.TestCase, +): + endpoint = ENCODE_ENDPOINT_SD_V1 + decode_endpoint = DECODE_ENDPOINT_SD_V1 + dtype = torch.float16 + scaling_factor = 0.18215 + shift_factor = None + + +@slow +class RemoteAutoencoderKLSDXLSlowTests( + RemoteAutoencoderKLEncodeSlowTestMixin, + unittest.TestCase, +): + endpoint = ENCODE_ENDPOINT_SD_XL + decode_endpoint = DECODE_ENDPOINT_SD_XL + dtype = torch.float16 + scaling_factor = 0.13025 + shift_factor = None + + +@slow +class RemoteAutoencoderKLFluxSlowTests( + RemoteAutoencoderKLEncodeSlowTestMixin, + unittest.TestCase, +): + channels = 16 + endpoint = ENCODE_ENDPOINT_FLUX + decode_endpoint = DECODE_ENDPOINT_FLUX + dtype = torch.bfloat16 + scaling_factor = 0.3611 + shift_factor = 0.1159 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/schedulers/__init__.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/schedulers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/schedulers/test_scheduler_consistency_model.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/schedulers/test_scheduler_consistency_model.py new file mode 100644 index 0000000000000000000000000000000000000000..4f773d7db05f752d2b1e216071af5a1ca808d658 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/schedulers/test_scheduler_consistency_model.py @@ -0,0 +1,189 @@ +import torch + +from diffusers import CMStochasticIterativeScheduler + +from .test_schedulers import SchedulerCommonTest + + +class CMStochasticIterativeSchedulerTest(SchedulerCommonTest): + scheduler_classes = (CMStochasticIterativeScheduler,) + num_inference_steps = 10 + + def get_scheduler_config(self, **kwargs): + config = { + "num_train_timesteps": 201, + "sigma_min": 0.002, + "sigma_max": 80.0, + } + + config.update(**kwargs) + return config + + # Override test_step_shape to add CMStochasticIterativeScheduler-specific logic regarding timesteps + # Problem is that we don't know two timesteps that will always be in the timestep schedule from only the scheduler + # config; scaled sigma_max is always in the timestep schedule, but sigma_min is in the sigma schedule while scaled + # sigma_min is not in the timestep schedule + def test_step_shape(self): + num_inference_steps = 10 + + scheduler_config = self.get_scheduler_config() + scheduler = self.scheduler_classes[0](**scheduler_config) + + scheduler.set_timesteps(num_inference_steps) + + timestep_0 = scheduler.timesteps[0] + timestep_1 = scheduler.timesteps[1] + + sample = self.dummy_sample + residual = 0.1 * sample + + output_0 = scheduler.step(residual, timestep_0, sample).prev_sample + output_1 = scheduler.step(residual, timestep_1, sample).prev_sample + + self.assertEqual(output_0.shape, sample.shape) + self.assertEqual(output_0.shape, output_1.shape) + + def test_timesteps(self): + for timesteps in [10, 50, 100, 1000]: + self.check_over_configs(num_train_timesteps=timesteps) + + def test_clip_denoised(self): + for clip_denoised in [True, False]: + self.check_over_configs(clip_denoised=clip_denoised) + + def test_full_loop_no_noise_onestep(self): + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + + num_inference_steps = 1 + scheduler.set_timesteps(num_inference_steps) + timesteps = scheduler.timesteps + + generator = torch.manual_seed(0) + + model = self.dummy_model() + sample = self.dummy_sample_deter * scheduler.init_noise_sigma + + for i, t in enumerate(timesteps): + # 1. scale model input + scaled_sample = scheduler.scale_model_input(sample, t) + + # 2. predict noise residual + residual = model(scaled_sample, t) + + # 3. predict previous sample x_t-1 + pred_prev_sample = scheduler.step(residual, t, sample, generator=generator).prev_sample + + sample = pred_prev_sample + + result_sum = torch.sum(torch.abs(sample)) + result_mean = torch.mean(torch.abs(sample)) + + assert abs(result_sum.item() - 192.7614) < 1e-2 + assert abs(result_mean.item() - 0.2510) < 1e-3 + + def test_full_loop_no_noise_multistep(self): + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + + timesteps = [106, 0] + scheduler.set_timesteps(timesteps=timesteps) + timesteps = scheduler.timesteps + + generator = torch.manual_seed(0) + + model = self.dummy_model() + sample = self.dummy_sample_deter * scheduler.init_noise_sigma + + for t in timesteps: + # 1. scale model input + scaled_sample = scheduler.scale_model_input(sample, t) + + # 2. predict noise residual + residual = model(scaled_sample, t) + + # 3. predict previous sample x_t-1 + pred_prev_sample = scheduler.step(residual, t, sample, generator=generator).prev_sample + + sample = pred_prev_sample + + result_sum = torch.sum(torch.abs(sample)) + result_mean = torch.mean(torch.abs(sample)) + + assert abs(result_sum.item() - 347.6357) < 1e-2 + assert abs(result_mean.item() - 0.4527) < 1e-3 + + def test_full_loop_with_noise(self): + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + + num_inference_steps = 10 + t_start = 8 + + scheduler.set_timesteps(num_inference_steps) + timesteps = scheduler.timesteps + + generator = torch.manual_seed(0) + + model = self.dummy_model() + sample = self.dummy_sample_deter * scheduler.init_noise_sigma + + noise = self.dummy_noise_deter + timesteps = scheduler.timesteps[t_start * scheduler.order :] + + sample = scheduler.add_noise(sample, noise, timesteps[:1]) + + for t in timesteps: + # 1. scale model input + scaled_sample = scheduler.scale_model_input(sample, t) + + # 2. predict noise residual + residual = model(scaled_sample, t) + + # 3. predict previous sample x_t-1 + pred_prev_sample = scheduler.step(residual, t, sample, generator=generator).prev_sample + + sample = pred_prev_sample + + result_sum = torch.sum(torch.abs(sample)) + result_mean = torch.mean(torch.abs(sample)) + + assert abs(result_sum.item() - 763.9186) < 1e-2, f" expected result sum 763.9186, but get {result_sum}" + assert abs(result_mean.item() - 0.9947) < 1e-3, f" expected result mean 0.9947, but get {result_mean}" + + def test_custom_timesteps_increasing_order(self): + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + + timesteps = [39, 30, 12, 15, 0] + + with self.assertRaises(ValueError, msg="`timesteps` must be in descending order."): + scheduler.set_timesteps(timesteps=timesteps) + + def test_custom_timesteps_passing_both_num_inference_steps_and_timesteps(self): + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + + timesteps = [39, 30, 12, 1, 0] + num_inference_steps = len(timesteps) + + with self.assertRaises(ValueError, msg="Can only pass one of `num_inference_steps` or `timesteps`."): + scheduler.set_timesteps(num_inference_steps=num_inference_steps, timesteps=timesteps) + + def test_custom_timesteps_too_large(self): + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + + timesteps = [scheduler.config.num_train_timesteps] + + with self.assertRaises( + ValueError, + msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}", + ): + scheduler.set_timesteps(timesteps=timesteps) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/schedulers/test_scheduler_ddim.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/schedulers/test_scheduler_ddim.py new file mode 100644 index 0000000000000000000000000000000000000000..13b353a44b0885ed52eda0ce97d3ca7bd4b18ad5 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/schedulers/test_scheduler_ddim.py @@ -0,0 +1,176 @@ +import torch + +from diffusers import DDIMScheduler + +from .test_schedulers import SchedulerCommonTest + + +class DDIMSchedulerTest(SchedulerCommonTest): + scheduler_classes = (DDIMScheduler,) + forward_default_kwargs = (("eta", 0.0), ("num_inference_steps", 50)) + + def get_scheduler_config(self, **kwargs): + config = { + "num_train_timesteps": 1000, + "beta_start": 0.0001, + "beta_end": 0.02, + "beta_schedule": "linear", + "clip_sample": True, + } + + config.update(**kwargs) + return config + + def full_loop(self, **config): + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config(**config) + scheduler = scheduler_class(**scheduler_config) + + num_inference_steps, eta = 10, 0.0 + + model = self.dummy_model() + sample = self.dummy_sample_deter + + scheduler.set_timesteps(num_inference_steps) + + for t in scheduler.timesteps: + residual = model(sample, t) + sample = scheduler.step(residual, t, sample, eta).prev_sample + + return sample + + def test_timesteps(self): + for timesteps in [100, 500, 1000]: + self.check_over_configs(num_train_timesteps=timesteps) + + def test_steps_offset(self): + for steps_offset in [0, 1]: + self.check_over_configs(steps_offset=steps_offset) + + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config(steps_offset=1) + scheduler = scheduler_class(**scheduler_config) + scheduler.set_timesteps(5) + assert torch.equal(scheduler.timesteps, torch.LongTensor([801, 601, 401, 201, 1])) + + def test_betas(self): + for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1], [0.002, 0.02, 0.2, 2]): + self.check_over_configs(beta_start=beta_start, beta_end=beta_end) + + def test_schedules(self): + for schedule in ["linear", "squaredcos_cap_v2"]: + self.check_over_configs(beta_schedule=schedule) + + def test_prediction_type(self): + for prediction_type in ["epsilon", "v_prediction"]: + self.check_over_configs(prediction_type=prediction_type) + + def test_clip_sample(self): + for clip_sample in [True, False]: + self.check_over_configs(clip_sample=clip_sample) + + def test_timestep_spacing(self): + for timestep_spacing in ["trailing", "leading"]: + self.check_over_configs(timestep_spacing=timestep_spacing) + + def test_rescale_betas_zero_snr(self): + for rescale_betas_zero_snr in [True, False]: + self.check_over_configs(rescale_betas_zero_snr=rescale_betas_zero_snr) + + def test_thresholding(self): + self.check_over_configs(thresholding=False) + for threshold in [0.5, 1.0, 2.0]: + for prediction_type in ["epsilon", "v_prediction"]: + self.check_over_configs( + thresholding=True, + prediction_type=prediction_type, + sample_max_value=threshold, + ) + + def test_time_indices(self): + for t in [1, 10, 49]: + self.check_over_forward(time_step=t) + + def test_inference_steps(self): + for t, num_inference_steps in zip([1, 10, 50], [10, 50, 500]): + self.check_over_forward(time_step=t, num_inference_steps=num_inference_steps) + + def test_eta(self): + for t, eta in zip([1, 10, 49], [0.0, 0.5, 1.0]): + self.check_over_forward(time_step=t, eta=eta) + + def test_variance(self): + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + + assert torch.sum(torch.abs(scheduler._get_variance(0, 0) - 0.0)) < 1e-5 + assert torch.sum(torch.abs(scheduler._get_variance(420, 400) - 0.14771)) < 1e-5 + assert torch.sum(torch.abs(scheduler._get_variance(980, 960) - 0.32460)) < 1e-5 + assert torch.sum(torch.abs(scheduler._get_variance(0, 0) - 0.0)) < 1e-5 + assert torch.sum(torch.abs(scheduler._get_variance(487, 486) - 0.00979)) < 1e-5 + assert torch.sum(torch.abs(scheduler._get_variance(999, 998) - 0.02)) < 1e-5 + + def test_full_loop_no_noise(self): + sample = self.full_loop() + + result_sum = torch.sum(torch.abs(sample)) + result_mean = torch.mean(torch.abs(sample)) + + assert abs(result_sum.item() - 172.0067) < 1e-2 + assert abs(result_mean.item() - 0.223967) < 1e-3 + + def test_full_loop_with_v_prediction(self): + sample = self.full_loop(prediction_type="v_prediction") + + result_sum = torch.sum(torch.abs(sample)) + result_mean = torch.mean(torch.abs(sample)) + + assert abs(result_sum.item() - 52.5302) < 1e-2 + assert abs(result_mean.item() - 0.0684) < 1e-3 + + def test_full_loop_with_set_alpha_to_one(self): + # We specify different beta, so that the first alpha is 0.99 + sample = self.full_loop(set_alpha_to_one=True, beta_start=0.01) + result_sum = torch.sum(torch.abs(sample)) + result_mean = torch.mean(torch.abs(sample)) + + assert abs(result_sum.item() - 149.8295) < 1e-2 + assert abs(result_mean.item() - 0.1951) < 1e-3 + + def test_full_loop_with_no_set_alpha_to_one(self): + # We specify different beta, so that the first alpha is 0.99 + sample = self.full_loop(set_alpha_to_one=False, beta_start=0.01) + result_sum = torch.sum(torch.abs(sample)) + result_mean = torch.mean(torch.abs(sample)) + + assert abs(result_sum.item() - 149.0784) < 1e-2 + assert abs(result_mean.item() - 0.1941) < 1e-3 + + def test_full_loop_with_noise(self): + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + + num_inference_steps, eta = 10, 0.0 + t_start = 8 + + model = self.dummy_model() + sample = self.dummy_sample_deter + + scheduler.set_timesteps(num_inference_steps) + + # add noise + noise = self.dummy_noise_deter + timesteps = scheduler.timesteps[t_start * scheduler.order :] + sample = scheduler.add_noise(sample, noise, timesteps[:1]) + + for t in timesteps: + residual = model(sample, t) + sample = scheduler.step(residual, t, sample, eta).prev_sample + + result_sum = torch.sum(torch.abs(sample)) + result_mean = torch.mean(torch.abs(sample)) + + assert abs(result_sum.item() - 354.5418) < 1e-2, f" expected result sum 218.4379, but get {result_sum}" + assert abs(result_mean.item() - 0.4616) < 1e-3, f" expected result mean 0.2844, but get {result_mean}" diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/schedulers/test_scheduler_ddim_inverse.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/schedulers/test_scheduler_ddim_inverse.py new file mode 100644 index 0000000000000000000000000000000000000000..81d53f1b477850f556ea8cf5d669698ebca295b0 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/schedulers/test_scheduler_ddim_inverse.py @@ -0,0 +1,138 @@ +import unittest + +import torch + +from diffusers import DDIMInverseScheduler + +from .test_schedulers import SchedulerCommonTest + + +class DDIMInverseSchedulerTest(SchedulerCommonTest): + scheduler_classes = (DDIMInverseScheduler,) + forward_default_kwargs = (("num_inference_steps", 50),) + + def get_scheduler_config(self, **kwargs): + config = { + "num_train_timesteps": 1000, + "beta_start": 0.0001, + "beta_end": 0.02, + "beta_schedule": "linear", + "clip_sample": True, + } + + config.update(**kwargs) + return config + + def full_loop(self, **config): + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config(**config) + scheduler = scheduler_class(**scheduler_config) + + num_inference_steps = 10 + + model = self.dummy_model() + sample = self.dummy_sample_deter + + scheduler.set_timesteps(num_inference_steps) + + for t in scheduler.timesteps: + residual = model(sample, t) + sample = scheduler.step(residual, t, sample).prev_sample + + return sample + + def test_timesteps(self): + for timesteps in [100, 500, 1000]: + self.check_over_configs(num_train_timesteps=timesteps) + + def test_steps_offset(self): + for steps_offset in [0, 1]: + self.check_over_configs(steps_offset=steps_offset) + + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config(steps_offset=1) + scheduler = scheduler_class(**scheduler_config) + scheduler.set_timesteps(5) + assert torch.equal(scheduler.timesteps, torch.LongTensor([1, 201, 401, 601, 801])) + + def test_betas(self): + for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1], [0.002, 0.02, 0.2, 2]): + self.check_over_configs(beta_start=beta_start, beta_end=beta_end) + + def test_schedules(self): + for schedule in ["linear", "squaredcos_cap_v2"]: + self.check_over_configs(beta_schedule=schedule) + + def test_prediction_type(self): + for prediction_type in ["epsilon", "v_prediction"]: + self.check_over_configs(prediction_type=prediction_type) + + def test_clip_sample(self): + for clip_sample in [True, False]: + self.check_over_configs(clip_sample=clip_sample) + + def test_timestep_spacing(self): + for timestep_spacing in ["trailing", "leading"]: + self.check_over_configs(timestep_spacing=timestep_spacing) + + def test_rescale_betas_zero_snr(self): + for rescale_betas_zero_snr in [True, False]: + self.check_over_configs(rescale_betas_zero_snr=rescale_betas_zero_snr) + + def test_thresholding(self): + self.check_over_configs(thresholding=False) + for threshold in [0.5, 1.0, 2.0]: + for prediction_type in ["epsilon", "v_prediction"]: + self.check_over_configs( + thresholding=True, + prediction_type=prediction_type, + sample_max_value=threshold, + ) + + def test_time_indices(self): + for t in [1, 10, 49]: + self.check_over_forward(time_step=t) + + def test_inference_steps(self): + for t, num_inference_steps in zip([1, 10, 50], [10, 50, 500]): + self.check_over_forward(time_step=t, num_inference_steps=num_inference_steps) + + @unittest.skip("Test not supported.") + def test_add_noise_device(self): + pass + + def test_full_loop_no_noise(self): + sample = self.full_loop() + + result_sum = torch.sum(torch.abs(sample)) + result_mean = torch.mean(torch.abs(sample)) + + assert abs(result_sum.item() - 671.6816) < 1e-2 + assert abs(result_mean.item() - 0.8746) < 1e-3 + + def test_full_loop_with_v_prediction(self): + sample = self.full_loop(prediction_type="v_prediction") + + result_sum = torch.sum(torch.abs(sample)) + result_mean = torch.mean(torch.abs(sample)) + + assert abs(result_sum.item() - 1394.2185) < 1e-2 + assert abs(result_mean.item() - 1.8154) < 1e-3 + + def test_full_loop_with_set_alpha_to_one(self): + # We specify different beta, so that the first alpha is 0.99 + sample = self.full_loop(set_alpha_to_one=True, beta_start=0.01) + result_sum = torch.sum(torch.abs(sample)) + result_mean = torch.mean(torch.abs(sample)) + + assert abs(result_sum.item() - 539.9622) < 1e-2 + assert abs(result_mean.item() - 0.7031) < 1e-3 + + def test_full_loop_with_no_set_alpha_to_one(self): + # We specify different beta, so that the first alpha is 0.99 + sample = self.full_loop(set_alpha_to_one=False, beta_start=0.01) + result_sum = torch.sum(torch.abs(sample)) + result_mean = torch.mean(torch.abs(sample)) + + assert abs(result_sum.item() - 542.6722) < 1e-2 + assert abs(result_mean.item() - 0.7066) < 1e-3 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/schedulers/test_scheduler_ddim_parallel.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/schedulers/test_scheduler_ddim_parallel.py new file mode 100644 index 0000000000000000000000000000000000000000..3ce8034cfb952bc1d1e39c4e37621105e0d453a2 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/schedulers/test_scheduler_ddim_parallel.py @@ -0,0 +1,216 @@ +# Copyright 2025 ParaDiGMS authors and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import torch + +from diffusers import DDIMParallelScheduler + +from .test_schedulers import SchedulerCommonTest + + +class DDIMParallelSchedulerTest(SchedulerCommonTest): + scheduler_classes = (DDIMParallelScheduler,) + forward_default_kwargs = (("eta", 0.0), ("num_inference_steps", 50)) + + def get_scheduler_config(self, **kwargs): + config = { + "num_train_timesteps": 1000, + "beta_start": 0.0001, + "beta_end": 0.02, + "beta_schedule": "linear", + "clip_sample": True, + } + + config.update(**kwargs) + return config + + def full_loop(self, **config): + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config(**config) + scheduler = scheduler_class(**scheduler_config) + + num_inference_steps, eta = 10, 0.0 + + model = self.dummy_model() + sample = self.dummy_sample_deter + + scheduler.set_timesteps(num_inference_steps) + + for t in scheduler.timesteps: + residual = model(sample, t) + sample = scheduler.step(residual, t, sample, eta).prev_sample + + return sample + + def test_timesteps(self): + for timesteps in [100, 500, 1000]: + self.check_over_configs(num_train_timesteps=timesteps) + + def test_steps_offset(self): + for steps_offset in [0, 1]: + self.check_over_configs(steps_offset=steps_offset) + + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config(steps_offset=1) + scheduler = scheduler_class(**scheduler_config) + scheduler.set_timesteps(5) + assert torch.equal(scheduler.timesteps, torch.LongTensor([801, 601, 401, 201, 1])) + + def test_betas(self): + for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1], [0.002, 0.02, 0.2, 2]): + self.check_over_configs(beta_start=beta_start, beta_end=beta_end) + + def test_schedules(self): + for schedule in ["linear", "squaredcos_cap_v2"]: + self.check_over_configs(beta_schedule=schedule) + + def test_prediction_type(self): + for prediction_type in ["epsilon", "v_prediction"]: + self.check_over_configs(prediction_type=prediction_type) + + def test_clip_sample(self): + for clip_sample in [True, False]: + self.check_over_configs(clip_sample=clip_sample) + + def test_timestep_spacing(self): + for timestep_spacing in ["trailing", "leading"]: + self.check_over_configs(timestep_spacing=timestep_spacing) + + def test_rescale_betas_zero_snr(self): + for rescale_betas_zero_snr in [True, False]: + self.check_over_configs(rescale_betas_zero_snr=rescale_betas_zero_snr) + + def test_thresholding(self): + self.check_over_configs(thresholding=False) + for threshold in [0.5, 1.0, 2.0]: + for prediction_type in ["epsilon", "v_prediction"]: + self.check_over_configs( + thresholding=True, + prediction_type=prediction_type, + sample_max_value=threshold, + ) + + def test_time_indices(self): + for t in [1, 10, 49]: + self.check_over_forward(time_step=t) + + def test_inference_steps(self): + for t, num_inference_steps in zip([1, 10, 50], [10, 50, 500]): + self.check_over_forward(time_step=t, num_inference_steps=num_inference_steps) + + def test_eta(self): + for t, eta in zip([1, 10, 49], [0.0, 0.5, 1.0]): + self.check_over_forward(time_step=t, eta=eta) + + def test_variance(self): + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + + assert torch.sum(torch.abs(scheduler._get_variance(0, 0) - 0.0)) < 1e-5 + assert torch.sum(torch.abs(scheduler._get_variance(420, 400) - 0.14771)) < 1e-5 + assert torch.sum(torch.abs(scheduler._get_variance(980, 960) - 0.32460)) < 1e-5 + assert torch.sum(torch.abs(scheduler._get_variance(0, 0) - 0.0)) < 1e-5 + assert torch.sum(torch.abs(scheduler._get_variance(487, 486) - 0.00979)) < 1e-5 + assert torch.sum(torch.abs(scheduler._get_variance(999, 998) - 0.02)) < 1e-5 + + def test_batch_step_no_noise(self): + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + + num_inference_steps, eta = 10, 0.0 + scheduler.set_timesteps(num_inference_steps) + + model = self.dummy_model() + sample1 = self.dummy_sample_deter + sample2 = self.dummy_sample_deter + 0.1 + sample3 = self.dummy_sample_deter - 0.1 + + per_sample_batch = sample1.shape[0] + samples = torch.stack([sample1, sample2, sample3], dim=0) + timesteps = torch.arange(num_inference_steps)[0:3, None].repeat(1, per_sample_batch) + + residual = model(samples.flatten(0, 1), timesteps.flatten(0, 1)) + pred_prev_sample = scheduler.batch_step_no_noise(residual, timesteps.flatten(0, 1), samples.flatten(0, 1), eta) + + result_sum = torch.sum(torch.abs(pred_prev_sample)) + result_mean = torch.mean(torch.abs(pred_prev_sample)) + + assert abs(result_sum.item() - 1147.7904) < 1e-2 + assert abs(result_mean.item() - 0.4982) < 1e-3 + + def test_full_loop_no_noise(self): + sample = self.full_loop() + + result_sum = torch.sum(torch.abs(sample)) + result_mean = torch.mean(torch.abs(sample)) + + assert abs(result_sum.item() - 172.0067) < 1e-2 + assert abs(result_mean.item() - 0.223967) < 1e-3 + + def test_full_loop_with_v_prediction(self): + sample = self.full_loop(prediction_type="v_prediction") + + result_sum = torch.sum(torch.abs(sample)) + result_mean = torch.mean(torch.abs(sample)) + + assert abs(result_sum.item() - 52.5302) < 1e-2 + assert abs(result_mean.item() - 0.0684) < 1e-3 + + def test_full_loop_with_set_alpha_to_one(self): + # We specify different beta, so that the first alpha is 0.99 + sample = self.full_loop(set_alpha_to_one=True, beta_start=0.01) + result_sum = torch.sum(torch.abs(sample)) + result_mean = torch.mean(torch.abs(sample)) + + assert abs(result_sum.item() - 149.8295) < 1e-2 + assert abs(result_mean.item() - 0.1951) < 1e-3 + + def test_full_loop_with_no_set_alpha_to_one(self): + # We specify different beta, so that the first alpha is 0.99 + sample = self.full_loop(set_alpha_to_one=False, beta_start=0.01) + result_sum = torch.sum(torch.abs(sample)) + result_mean = torch.mean(torch.abs(sample)) + + assert abs(result_sum.item() - 149.0784) < 1e-2 + assert abs(result_mean.item() - 0.1941) < 1e-3 + + def test_full_loop_with_noise(self): + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + + num_inference_steps, eta = 10, 0.0 + t_start = 8 + + model = self.dummy_model() + sample = self.dummy_sample_deter + + scheduler.set_timesteps(num_inference_steps) + + # add noise + noise = self.dummy_noise_deter + timesteps = scheduler.timesteps[t_start * scheduler.order :] + sample = scheduler.add_noise(sample, noise, timesteps[:1]) + + for t in timesteps: + residual = model(sample, t) + sample = scheduler.step(residual, t, sample, eta).prev_sample + + result_sum = torch.sum(torch.abs(sample)) + result_mean = torch.mean(torch.abs(sample)) + + assert abs(result_sum.item() - 354.5418) < 1e-2, f" expected result sum 354.5418, but get {result_sum}" + assert abs(result_mean.item() - 0.4616) < 1e-3, f" expected result mean 0.4616, but get {result_mean}" diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/schedulers/test_scheduler_ddpm.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/schedulers/test_scheduler_ddpm.py new file mode 100644 index 0000000000000000000000000000000000000000..056b5d83350e4baeadf24693d2597fb185d44f46 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/schedulers/test_scheduler_ddpm.py @@ -0,0 +1,222 @@ +import torch + +from diffusers import DDPMScheduler + +from .test_schedulers import SchedulerCommonTest + + +class DDPMSchedulerTest(SchedulerCommonTest): + scheduler_classes = (DDPMScheduler,) + + def get_scheduler_config(self, **kwargs): + config = { + "num_train_timesteps": 1000, + "beta_start": 0.0001, + "beta_end": 0.02, + "beta_schedule": "linear", + "variance_type": "fixed_small", + "clip_sample": True, + } + + config.update(**kwargs) + return config + + def test_timesteps(self): + for timesteps in [1, 5, 100, 1000]: + self.check_over_configs(num_train_timesteps=timesteps) + + def test_betas(self): + for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1], [0.002, 0.02, 0.2, 2]): + self.check_over_configs(beta_start=beta_start, beta_end=beta_end) + + def test_schedules(self): + for schedule in ["linear", "squaredcos_cap_v2"]: + self.check_over_configs(beta_schedule=schedule) + + def test_variance_type(self): + for variance in ["fixed_small", "fixed_large", "other"]: + self.check_over_configs(variance_type=variance) + + def test_clip_sample(self): + for clip_sample in [True, False]: + self.check_over_configs(clip_sample=clip_sample) + + def test_thresholding(self): + self.check_over_configs(thresholding=False) + for threshold in [0.5, 1.0, 2.0]: + for prediction_type in ["epsilon", "sample", "v_prediction"]: + self.check_over_configs( + thresholding=True, + prediction_type=prediction_type, + sample_max_value=threshold, + ) + + def test_prediction_type(self): + for prediction_type in ["epsilon", "sample", "v_prediction"]: + self.check_over_configs(prediction_type=prediction_type) + + def test_time_indices(self): + for t in [0, 500, 999]: + self.check_over_forward(time_step=t) + + def test_variance(self): + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + + assert torch.sum(torch.abs(scheduler._get_variance(0) - 0.0)) < 1e-5 + assert torch.sum(torch.abs(scheduler._get_variance(487) - 0.00979)) < 1e-5 + assert torch.sum(torch.abs(scheduler._get_variance(999) - 0.02)) < 1e-5 + + def test_rescale_betas_zero_snr(self): + for rescale_betas_zero_snr in [True, False]: + self.check_over_configs(rescale_betas_zero_snr=rescale_betas_zero_snr) + + def test_full_loop_no_noise(self): + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + + num_trained_timesteps = len(scheduler) + + model = self.dummy_model() + sample = self.dummy_sample_deter + generator = torch.manual_seed(0) + + for t in reversed(range(num_trained_timesteps)): + # 1. predict noise residual + residual = model(sample, t) + + # 2. predict previous mean of sample x_t-1 + pred_prev_sample = scheduler.step(residual, t, sample, generator=generator).prev_sample + + # if t > 0: + # noise = self.dummy_sample_deter + # variance = scheduler.get_variance(t) ** (0.5) * noise + # + # sample = pred_prev_sample + variance + sample = pred_prev_sample + + result_sum = torch.sum(torch.abs(sample)) + result_mean = torch.mean(torch.abs(sample)) + + assert abs(result_sum.item() - 258.9606) < 1e-2 + assert abs(result_mean.item() - 0.3372) < 1e-3 + + def test_full_loop_with_v_prediction(self): + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config(prediction_type="v_prediction") + scheduler = scheduler_class(**scheduler_config) + + num_trained_timesteps = len(scheduler) + + model = self.dummy_model() + sample = self.dummy_sample_deter + generator = torch.manual_seed(0) + + for t in reversed(range(num_trained_timesteps)): + # 1. predict noise residual + residual = model(sample, t) + + # 2. predict previous mean of sample x_t-1 + pred_prev_sample = scheduler.step(residual, t, sample, generator=generator).prev_sample + + # if t > 0: + # noise = self.dummy_sample_deter + # variance = scheduler.get_variance(t) ** (0.5) * noise + # + # sample = pred_prev_sample + variance + sample = pred_prev_sample + + result_sum = torch.sum(torch.abs(sample)) + result_mean = torch.mean(torch.abs(sample)) + + assert abs(result_sum.item() - 202.0296) < 1e-2 + assert abs(result_mean.item() - 0.2631) < 1e-3 + + def test_custom_timesteps(self): + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + + timesteps = [100, 87, 50, 1, 0] + + scheduler.set_timesteps(timesteps=timesteps) + + scheduler_timesteps = scheduler.timesteps + + for i, timestep in enumerate(scheduler_timesteps): + if i == len(timesteps) - 1: + expected_prev_t = -1 + else: + expected_prev_t = timesteps[i + 1] + + prev_t = scheduler.previous_timestep(timestep) + prev_t = prev_t.item() + + self.assertEqual(prev_t, expected_prev_t) + + def test_custom_timesteps_increasing_order(self): + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + + timesteps = [100, 87, 50, 51, 0] + + with self.assertRaises(ValueError, msg="`custom_timesteps` must be in descending order."): + scheduler.set_timesteps(timesteps=timesteps) + + def test_custom_timesteps_passing_both_num_inference_steps_and_timesteps(self): + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + + timesteps = [100, 87, 50, 1, 0] + num_inference_steps = len(timesteps) + + with self.assertRaises(ValueError, msg="Can only pass one of `num_inference_steps` or `custom_timesteps`."): + scheduler.set_timesteps(num_inference_steps=num_inference_steps, timesteps=timesteps) + + def test_custom_timesteps_too_large(self): + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + + timesteps = [scheduler.config.num_train_timesteps] + + with self.assertRaises( + ValueError, + msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}", + ): + scheduler.set_timesteps(timesteps=timesteps) + + def test_full_loop_with_noise(self): + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + + num_trained_timesteps = len(scheduler) + t_start = num_trained_timesteps - 2 + + model = self.dummy_model() + sample = self.dummy_sample_deter + generator = torch.manual_seed(0) + + # add noise + noise = self.dummy_noise_deter + timesteps = scheduler.timesteps[t_start * scheduler.order :] + sample = scheduler.add_noise(sample, noise, timesteps[:1]) + + for t in timesteps: + # 1. predict noise residual + residual = model(sample, t) + + # 2. predict previous mean of sample x_t-1 + pred_prev_sample = scheduler.step(residual, t, sample, generator=generator).prev_sample + sample = pred_prev_sample + + result_sum = torch.sum(torch.abs(sample)) + result_mean = torch.mean(torch.abs(sample)) + + assert abs(result_sum.item() - 387.9466) < 1e-2, f" expected result sum 387.9466, but get {result_sum}" + assert abs(result_mean.item() - 0.5051) < 1e-3, f" expected result mean 0.5051, but get {result_mean}" diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/schedulers/test_scheduler_ddpm_parallel.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/schedulers/test_scheduler_ddpm_parallel.py new file mode 100644 index 0000000000000000000000000000000000000000..377067071c2568d247e113bea2e3e1347a4d31ca --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/schedulers/test_scheduler_ddpm_parallel.py @@ -0,0 +1,251 @@ +# Copyright 2025 ParaDiGMS authors and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import torch + +from diffusers import DDPMParallelScheduler + +from .test_schedulers import SchedulerCommonTest + + +class DDPMParallelSchedulerTest(SchedulerCommonTest): + scheduler_classes = (DDPMParallelScheduler,) + + def get_scheduler_config(self, **kwargs): + config = { + "num_train_timesteps": 1000, + "beta_start": 0.0001, + "beta_end": 0.02, + "beta_schedule": "linear", + "variance_type": "fixed_small", + "clip_sample": True, + } + + config.update(**kwargs) + return config + + def test_timesteps(self): + for timesteps in [1, 5, 100, 1000]: + self.check_over_configs(num_train_timesteps=timesteps) + + def test_betas(self): + for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1], [0.002, 0.02, 0.2, 2]): + self.check_over_configs(beta_start=beta_start, beta_end=beta_end) + + def test_schedules(self): + for schedule in ["linear", "squaredcos_cap_v2"]: + self.check_over_configs(beta_schedule=schedule) + + def test_variance_type(self): + for variance in ["fixed_small", "fixed_large", "other"]: + self.check_over_configs(variance_type=variance) + + def test_clip_sample(self): + for clip_sample in [True, False]: + self.check_over_configs(clip_sample=clip_sample) + + def test_thresholding(self): + self.check_over_configs(thresholding=False) + for threshold in [0.5, 1.0, 2.0]: + for prediction_type in ["epsilon", "sample", "v_prediction"]: + self.check_over_configs( + thresholding=True, + prediction_type=prediction_type, + sample_max_value=threshold, + ) + + def test_prediction_type(self): + for prediction_type in ["epsilon", "sample", "v_prediction"]: + self.check_over_configs(prediction_type=prediction_type) + + def test_time_indices(self): + for t in [0, 500, 999]: + self.check_over_forward(time_step=t) + + def test_variance(self): + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + + assert torch.sum(torch.abs(scheduler._get_variance(0) - 0.0)) < 1e-5 + assert torch.sum(torch.abs(scheduler._get_variance(487) - 0.00979)) < 1e-5 + assert torch.sum(torch.abs(scheduler._get_variance(999) - 0.02)) < 1e-5 + + def test_rescale_betas_zero_snr(self): + for rescale_betas_zero_snr in [True, False]: + self.check_over_configs(rescale_betas_zero_snr=rescale_betas_zero_snr) + + def test_batch_step_no_noise(self): + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + + num_trained_timesteps = len(scheduler) + + model = self.dummy_model() + sample1 = self.dummy_sample_deter + sample2 = self.dummy_sample_deter + 0.1 + sample3 = self.dummy_sample_deter - 0.1 + + per_sample_batch = sample1.shape[0] + samples = torch.stack([sample1, sample2, sample3], dim=0) + timesteps = torch.arange(num_trained_timesteps)[0:3, None].repeat(1, per_sample_batch) + + residual = model(samples.flatten(0, 1), timesteps.flatten(0, 1)) + pred_prev_sample = scheduler.batch_step_no_noise(residual, timesteps.flatten(0, 1), samples.flatten(0, 1)) + + result_sum = torch.sum(torch.abs(pred_prev_sample)) + result_mean = torch.mean(torch.abs(pred_prev_sample)) + + assert abs(result_sum.item() - 1153.1833) < 1e-2 + assert abs(result_mean.item() - 0.5005) < 1e-3 + + def test_full_loop_no_noise(self): + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + + num_trained_timesteps = len(scheduler) + + model = self.dummy_model() + sample = self.dummy_sample_deter + generator = torch.manual_seed(0) + + for t in reversed(range(num_trained_timesteps)): + # 1. predict noise residual + residual = model(sample, t) + + # 2. predict previous mean of sample x_t-1 + pred_prev_sample = scheduler.step(residual, t, sample, generator=generator).prev_sample + + sample = pred_prev_sample + + result_sum = torch.sum(torch.abs(sample)) + result_mean = torch.mean(torch.abs(sample)) + + assert abs(result_sum.item() - 258.9606) < 1e-2 + assert abs(result_mean.item() - 0.3372) < 1e-3 + + def test_full_loop_with_v_prediction(self): + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config(prediction_type="v_prediction") + scheduler = scheduler_class(**scheduler_config) + + num_trained_timesteps = len(scheduler) + + model = self.dummy_model() + sample = self.dummy_sample_deter + generator = torch.manual_seed(0) + + for t in reversed(range(num_trained_timesteps)): + # 1. predict noise residual + residual = model(sample, t) + + # 2. predict previous mean of sample x_t-1 + pred_prev_sample = scheduler.step(residual, t, sample, generator=generator).prev_sample + + sample = pred_prev_sample + + result_sum = torch.sum(torch.abs(sample)) + result_mean = torch.mean(torch.abs(sample)) + + assert abs(result_sum.item() - 202.0296) < 1e-2 + assert abs(result_mean.item() - 0.2631) < 1e-3 + + def test_custom_timesteps(self): + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + + timesteps = [100, 87, 50, 1, 0] + + scheduler.set_timesteps(timesteps=timesteps) + + scheduler_timesteps = scheduler.timesteps + + for i, timestep in enumerate(scheduler_timesteps): + if i == len(timesteps) - 1: + expected_prev_t = -1 + else: + expected_prev_t = timesteps[i + 1] + + prev_t = scheduler.previous_timestep(timestep) + prev_t = prev_t.item() + + self.assertEqual(prev_t, expected_prev_t) + + def test_custom_timesteps_increasing_order(self): + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + + timesteps = [100, 87, 50, 51, 0] + + with self.assertRaises(ValueError, msg="`custom_timesteps` must be in descending order."): + scheduler.set_timesteps(timesteps=timesteps) + + def test_custom_timesteps_passing_both_num_inference_steps_and_timesteps(self): + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + + timesteps = [100, 87, 50, 1, 0] + num_inference_steps = len(timesteps) + + with self.assertRaises(ValueError, msg="Can only pass one of `num_inference_steps` or `custom_timesteps`."): + scheduler.set_timesteps(num_inference_steps=num_inference_steps, timesteps=timesteps) + + def test_custom_timesteps_too_large(self): + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + + timesteps = [scheduler.config.num_train_timesteps] + + with self.assertRaises( + ValueError, + msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}", + ): + scheduler.set_timesteps(timesteps=timesteps) + + def test_full_loop_with_noise(self): + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + + num_trained_timesteps = len(scheduler) + t_start = num_trained_timesteps - 2 + + model = self.dummy_model() + sample = self.dummy_sample_deter + generator = torch.manual_seed(0) + + # add noise + noise = self.dummy_noise_deter + timesteps = scheduler.timesteps[t_start * scheduler.order :] + sample = scheduler.add_noise(sample, noise, timesteps[:1]) + + for t in timesteps: + # 1. predict noise residual + residual = model(sample, t) + + # 2. predict previous mean of sample x_t-1 + pred_prev_sample = scheduler.step(residual, t, sample, generator=generator).prev_sample + sample = pred_prev_sample + + result_sum = torch.sum(torch.abs(sample)) + result_mean = torch.mean(torch.abs(sample)) + + assert abs(result_sum.item() - 387.9466) < 1e-2, f" expected result sum 387.9466, but get {result_sum}" + assert abs(result_mean.item() - 0.5051) < 1e-3, f" expected result mean 0.5051, but get {result_mean}" diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/schedulers/test_scheduler_deis.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/schedulers/test_scheduler_deis.py new file mode 100644 index 0000000000000000000000000000000000000000..048bde51c36618221845dd48a642882208b77458 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/schedulers/test_scheduler_deis.py @@ -0,0 +1,273 @@ +import tempfile +import unittest + +import torch + +from diffusers import ( + DEISMultistepScheduler, + DPMSolverMultistepScheduler, + DPMSolverSinglestepScheduler, + UniPCMultistepScheduler, +) + +from .test_schedulers import SchedulerCommonTest + + +class DEISMultistepSchedulerTest(SchedulerCommonTest): + scheduler_classes = (DEISMultistepScheduler,) + forward_default_kwargs = (("num_inference_steps", 25),) + + def get_scheduler_config(self, **kwargs): + config = { + "num_train_timesteps": 1000, + "beta_start": 0.0001, + "beta_end": 0.02, + "beta_schedule": "linear", + "solver_order": 2, + } + + config.update(**kwargs) + return config + + def check_over_configs(self, time_step=0, **config): + kwargs = dict(self.forward_default_kwargs) + num_inference_steps = kwargs.pop("num_inference_steps", None) + sample = self.dummy_sample + residual = 0.1 * sample + dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.10] + + for scheduler_class in self.scheduler_classes: + scheduler_config = self.get_scheduler_config(**config) + scheduler = scheduler_class(**scheduler_config) + scheduler.set_timesteps(num_inference_steps) + # copy over dummy past residuals + scheduler.model_outputs = dummy_past_residuals[: scheduler.config.solver_order] + + with tempfile.TemporaryDirectory() as tmpdirname: + scheduler.save_config(tmpdirname) + new_scheduler = scheduler_class.from_pretrained(tmpdirname) + new_scheduler.set_timesteps(num_inference_steps) + # copy over dummy past residuals + new_scheduler.model_outputs = dummy_past_residuals[: new_scheduler.config.solver_order] + + output, new_output = sample, sample + for t in range(time_step, time_step + scheduler.config.solver_order + 1): + t = scheduler.timesteps[t] + output = scheduler.step(residual, t, output, **kwargs).prev_sample + new_output = new_scheduler.step(residual, t, new_output, **kwargs).prev_sample + + assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" + + @unittest.skip("Test not supported.") + def test_from_save_pretrained(self): + pass + + def check_over_forward(self, time_step=0, **forward_kwargs): + kwargs = dict(self.forward_default_kwargs) + num_inference_steps = kwargs.pop("num_inference_steps", None) + sample = self.dummy_sample + residual = 0.1 * sample + dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.10] + + for scheduler_class in self.scheduler_classes: + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + scheduler.set_timesteps(num_inference_steps) + + # copy over dummy past residuals (must be after setting timesteps) + scheduler.model_outputs = dummy_past_residuals[: scheduler.config.solver_order] + + with tempfile.TemporaryDirectory() as tmpdirname: + scheduler.save_config(tmpdirname) + new_scheduler = scheduler_class.from_pretrained(tmpdirname) + # copy over dummy past residuals + new_scheduler.set_timesteps(num_inference_steps) + + # copy over dummy past residual (must be after setting timesteps) + new_scheduler.model_outputs = dummy_past_residuals[: new_scheduler.config.solver_order] + + output = scheduler.step(residual, time_step, sample, **kwargs).prev_sample + new_output = new_scheduler.step(residual, time_step, sample, **kwargs).prev_sample + + assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" + + def full_loop(self, scheduler=None, **config): + if scheduler is None: + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config(**config) + scheduler = scheduler_class(**scheduler_config) + + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config(**config) + scheduler = scheduler_class(**scheduler_config) + + num_inference_steps = 10 + model = self.dummy_model() + sample = self.dummy_sample_deter + scheduler.set_timesteps(num_inference_steps) + + for i, t in enumerate(scheduler.timesteps): + residual = model(sample, t) + sample = scheduler.step(residual, t, sample).prev_sample + + return sample + + def test_step_shape(self): + kwargs = dict(self.forward_default_kwargs) + + num_inference_steps = kwargs.pop("num_inference_steps", None) + + for scheduler_class in self.scheduler_classes: + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + + sample = self.dummy_sample + residual = 0.1 * sample + + if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"): + scheduler.set_timesteps(num_inference_steps) + elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"): + kwargs["num_inference_steps"] = num_inference_steps + + # copy over dummy past residuals (must be done after set_timesteps) + dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.10] + scheduler.model_outputs = dummy_past_residuals[: scheduler.config.solver_order] + + time_step_0 = scheduler.timesteps[5] + time_step_1 = scheduler.timesteps[6] + + output_0 = scheduler.step(residual, time_step_0, sample, **kwargs).prev_sample + output_1 = scheduler.step(residual, time_step_1, sample, **kwargs).prev_sample + + self.assertEqual(output_0.shape, sample.shape) + self.assertEqual(output_0.shape, output_1.shape) + + def test_switch(self): + # make sure that iterating over schedulers with same config names gives same results + # for defaults + scheduler = DEISMultistepScheduler(**self.get_scheduler_config()) + sample = self.full_loop(scheduler=scheduler) + result_mean = torch.mean(torch.abs(sample)) + + assert abs(result_mean.item() - 0.23916) < 1e-3 + + scheduler = DPMSolverSinglestepScheduler.from_config(scheduler.config) + scheduler = DPMSolverMultistepScheduler.from_config(scheduler.config) + scheduler = UniPCMultistepScheduler.from_config(scheduler.config) + scheduler = DEISMultistepScheduler.from_config(scheduler.config) + + sample = self.full_loop(scheduler=scheduler) + result_mean = torch.mean(torch.abs(sample)) + + assert abs(result_mean.item() - 0.23916) < 1e-3 + + def test_timesteps(self): + for timesteps in [25, 50, 100, 999, 1000]: + self.check_over_configs(num_train_timesteps=timesteps) + + def test_thresholding(self): + self.check_over_configs(thresholding=False) + for order in [1, 2, 3]: + for solver_type in ["logrho"]: + for threshold in [0.5, 1.0, 2.0]: + for prediction_type in ["epsilon", "sample"]: + self.check_over_configs( + thresholding=True, + prediction_type=prediction_type, + sample_max_value=threshold, + algorithm_type="deis", + solver_order=order, + solver_type=solver_type, + ) + + def test_prediction_type(self): + for prediction_type in ["epsilon", "v_prediction"]: + self.check_over_configs(prediction_type=prediction_type) + + def test_solver_order_and_type(self): + for algorithm_type in ["deis"]: + for solver_type in ["logrho"]: + for order in [1, 2, 3]: + for prediction_type in ["epsilon", "sample"]: + self.check_over_configs( + solver_order=order, + solver_type=solver_type, + prediction_type=prediction_type, + algorithm_type=algorithm_type, + ) + sample = self.full_loop( + solver_order=order, + solver_type=solver_type, + prediction_type=prediction_type, + algorithm_type=algorithm_type, + ) + assert not torch.isnan(sample).any(), "Samples have nan numbers" + + def test_lower_order_final(self): + self.check_over_configs(lower_order_final=True) + self.check_over_configs(lower_order_final=False) + + def test_inference_steps(self): + for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]: + self.check_over_forward(num_inference_steps=num_inference_steps, time_step=0) + + def test_full_loop_no_noise(self): + sample = self.full_loop() + result_mean = torch.mean(torch.abs(sample)) + + assert abs(result_mean.item() - 0.23916) < 1e-3 + + def test_full_loop_with_v_prediction(self): + sample = self.full_loop(prediction_type="v_prediction") + result_mean = torch.mean(torch.abs(sample)) + + assert abs(result_mean.item() - 0.091) < 1e-3 + + def test_fp16_support(self): + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config(thresholding=True, dynamic_thresholding_ratio=0) + scheduler = scheduler_class(**scheduler_config) + + num_inference_steps = 10 + model = self.dummy_model() + sample = self.dummy_sample_deter.half() + scheduler.set_timesteps(num_inference_steps) + + for i, t in enumerate(scheduler.timesteps): + residual = model(sample, t) + sample = scheduler.step(residual, t, sample).prev_sample + + assert sample.dtype == torch.float16 + + def test_full_loop_with_noise(self): + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + + num_inference_steps = 10 + t_start = 8 + + model = self.dummy_model() + sample = self.dummy_sample_deter + scheduler.set_timesteps(num_inference_steps) + + # add noise + noise = self.dummy_noise_deter + timesteps = scheduler.timesteps[t_start * scheduler.order :] + sample = scheduler.add_noise(sample, noise, timesteps[:1]) + + for i, t in enumerate(timesteps): + residual = model(sample, t) + sample = scheduler.step(residual, t, sample).prev_sample + + result_sum = torch.sum(torch.abs(sample)) + result_mean = torch.mean(torch.abs(sample)) + + assert abs(result_sum.item() - 315.3016) < 1e-2, f" expected result sum 315.3016, but get {result_sum}" + assert abs(result_mean.item() - 0.41054) < 1e-3, f" expected result mean 0.41054, but get {result_mean}" + + def test_beta_sigmas(self): + self.check_over_configs(use_beta_sigmas=True) + + def test_exponential_sigmas(self): + self.check_over_configs(use_exponential_sigmas=True) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/schedulers/test_scheduler_dpm_multi.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/schedulers/test_scheduler_dpm_multi.py new file mode 100644 index 0000000000000000000000000000000000000000..28c354709dc914f18347e7089ec7cb66c99f3174 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/schedulers/test_scheduler_dpm_multi.py @@ -0,0 +1,368 @@ +import tempfile +import unittest + +import torch + +from diffusers import ( + DEISMultistepScheduler, + DPMSolverMultistepScheduler, + DPMSolverSinglestepScheduler, + UniPCMultistepScheduler, +) + +from .test_schedulers import SchedulerCommonTest + + +class DPMSolverMultistepSchedulerTest(SchedulerCommonTest): + scheduler_classes = (DPMSolverMultistepScheduler,) + forward_default_kwargs = (("num_inference_steps", 25),) + + def get_scheduler_config(self, **kwargs): + config = { + "num_train_timesteps": 1000, + "beta_start": 0.0001, + "beta_end": 0.02, + "beta_schedule": "linear", + "solver_order": 2, + "prediction_type": "epsilon", + "thresholding": False, + "sample_max_value": 1.0, + "algorithm_type": "dpmsolver++", + "solver_type": "midpoint", + "lower_order_final": False, + "euler_at_final": False, + "lambda_min_clipped": -float("inf"), + "variance_type": None, + "final_sigmas_type": "sigma_min", + } + + config.update(**kwargs) + return config + + def check_over_configs(self, time_step=0, **config): + kwargs = dict(self.forward_default_kwargs) + num_inference_steps = kwargs.pop("num_inference_steps", None) + sample = self.dummy_sample + residual = 0.1 * sample + dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.10] + + for scheduler_class in self.scheduler_classes: + scheduler_config = self.get_scheduler_config(**config) + scheduler = scheduler_class(**scheduler_config) + scheduler.set_timesteps(num_inference_steps) + # copy over dummy past residuals + scheduler.model_outputs = dummy_past_residuals[: scheduler.config.solver_order] + + with tempfile.TemporaryDirectory() as tmpdirname: + scheduler.save_config(tmpdirname) + new_scheduler = scheduler_class.from_pretrained(tmpdirname) + new_scheduler.set_timesteps(num_inference_steps) + # copy over dummy past residuals + new_scheduler.model_outputs = dummy_past_residuals[: new_scheduler.config.solver_order] + + output, new_output = sample, sample + for t in range(time_step, time_step + scheduler.config.solver_order + 1): + t = new_scheduler.timesteps[t] + output = scheduler.step(residual, t, output, **kwargs).prev_sample + new_output = new_scheduler.step(residual, t, new_output, **kwargs).prev_sample + + assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" + + @unittest.skip("Test not supported.") + def test_from_save_pretrained(self): + pass + + def check_over_forward(self, time_step=0, **forward_kwargs): + kwargs = dict(self.forward_default_kwargs) + num_inference_steps = kwargs.pop("num_inference_steps", None) + sample = self.dummy_sample + residual = 0.1 * sample + dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.10] + + for scheduler_class in self.scheduler_classes: + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + scheduler.set_timesteps(num_inference_steps) + + # copy over dummy past residuals (must be after setting timesteps) + scheduler.model_outputs = dummy_past_residuals[: scheduler.config.solver_order] + + with tempfile.TemporaryDirectory() as tmpdirname: + scheduler.save_config(tmpdirname) + new_scheduler = scheduler_class.from_pretrained(tmpdirname) + # copy over dummy past residuals + new_scheduler.set_timesteps(num_inference_steps) + + # copy over dummy past residual (must be after setting timesteps) + new_scheduler.model_outputs = dummy_past_residuals[: new_scheduler.config.solver_order] + + time_step = new_scheduler.timesteps[time_step] + output = scheduler.step(residual, time_step, sample, **kwargs).prev_sample + new_output = new_scheduler.step(residual, time_step, sample, **kwargs).prev_sample + + assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" + + def full_loop(self, scheduler=None, **config): + if scheduler is None: + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config(**config) + scheduler = scheduler_class(**scheduler_config) + + num_inference_steps = 10 + model = self.dummy_model() + sample = self.dummy_sample_deter + scheduler.set_timesteps(num_inference_steps) + + generator = torch.manual_seed(0) + + for i, t in enumerate(scheduler.timesteps): + residual = model(sample, t) + sample = scheduler.step(residual, t, sample, generator=generator).prev_sample + + return sample + + def full_loop_custom_timesteps(self, **config): + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config(**config) + scheduler = scheduler_class(**scheduler_config) + + num_inference_steps = 10 + scheduler.set_timesteps(num_inference_steps) + timesteps = scheduler.timesteps + # reset the timesteps using `timesteps` + scheduler = scheduler_class(**scheduler_config) + scheduler.set_timesteps(num_inference_steps=None, timesteps=timesteps) + + generator = torch.manual_seed(0) + model = self.dummy_model() + sample = self.dummy_sample_deter + + for i, t in enumerate(scheduler.timesteps): + residual = model(sample, t) + sample = scheduler.step(residual, t, sample, generator=generator).prev_sample + + return sample + + def test_step_shape(self): + kwargs = dict(self.forward_default_kwargs) + + num_inference_steps = kwargs.pop("num_inference_steps", None) + + for scheduler_class in self.scheduler_classes: + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + + sample = self.dummy_sample + residual = 0.1 * sample + + if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"): + scheduler.set_timesteps(num_inference_steps) + elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"): + kwargs["num_inference_steps"] = num_inference_steps + + # copy over dummy past residuals (must be done after set_timesteps) + dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.10] + scheduler.model_outputs = dummy_past_residuals[: scheduler.config.solver_order] + + time_step_0 = scheduler.timesteps[5] + time_step_1 = scheduler.timesteps[6] + + output_0 = scheduler.step(residual, time_step_0, sample, **kwargs).prev_sample + output_1 = scheduler.step(residual, time_step_1, sample, **kwargs).prev_sample + + self.assertEqual(output_0.shape, sample.shape) + self.assertEqual(output_0.shape, output_1.shape) + + def test_timesteps(self): + for timesteps in [25, 50, 100, 999, 1000]: + self.check_over_configs(num_train_timesteps=timesteps) + + def test_thresholding(self): + self.check_over_configs(thresholding=False) + for order in [1, 2, 3]: + for solver_type in ["midpoint", "heun"]: + for threshold in [0.5, 1.0, 2.0]: + for prediction_type in ["epsilon", "sample"]: + self.check_over_configs( + thresholding=True, + prediction_type=prediction_type, + sample_max_value=threshold, + algorithm_type="dpmsolver++", + solver_order=order, + solver_type=solver_type, + ) + + def test_prediction_type(self): + for prediction_type in ["epsilon", "v_prediction"]: + self.check_over_configs(prediction_type=prediction_type) + + def test_solver_order_and_type(self): + for algorithm_type in ["dpmsolver", "dpmsolver++", "sde-dpmsolver", "sde-dpmsolver++"]: + for solver_type in ["midpoint", "heun"]: + for order in [1, 2, 3]: + for prediction_type in ["epsilon", "sample"]: + if algorithm_type in ["sde-dpmsolver", "sde-dpmsolver++"]: + if order == 3: + continue + else: + self.check_over_configs( + solver_order=order, + solver_type=solver_type, + prediction_type=prediction_type, + algorithm_type=algorithm_type, + ) + sample = self.full_loop( + solver_order=order, + solver_type=solver_type, + prediction_type=prediction_type, + algorithm_type=algorithm_type, + ) + assert not torch.isnan(sample).any(), "Samples have nan numbers" + + def test_lower_order_final(self): + self.check_over_configs(lower_order_final=True) + self.check_over_configs(lower_order_final=False) + + def test_euler_at_final(self): + self.check_over_configs(euler_at_final=True) + self.check_over_configs(euler_at_final=False) + + def test_lambda_min_clipped(self): + self.check_over_configs(lambda_min_clipped=-float("inf")) + self.check_over_configs(lambda_min_clipped=-5.1) + + def test_variance_type(self): + self.check_over_configs(variance_type=None) + self.check_over_configs(variance_type="learned_range") + + def test_inference_steps(self): + for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]: + self.check_over_forward(num_inference_steps=num_inference_steps, time_step=0) + + def test_rescale_betas_zero_snr(self): + for rescale_betas_zero_snr in [True, False]: + self.check_over_configs(rescale_betas_zero_snr=rescale_betas_zero_snr) + + def test_full_loop_no_noise(self): + sample = self.full_loop() + result_mean = torch.mean(torch.abs(sample)) + + assert abs(result_mean.item() - 0.3301) < 1e-3 + + def test_full_loop_with_noise(self): + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + + num_inference_steps = 10 + t_start = 5 + + model = self.dummy_model() + sample = self.dummy_sample_deter + scheduler.set_timesteps(num_inference_steps) + + # add noise + noise = self.dummy_noise_deter + timesteps = scheduler.timesteps[t_start * scheduler.order :] + sample = scheduler.add_noise(sample, noise, timesteps[:1]) + + for i, t in enumerate(timesteps): + residual = model(sample, t) + sample = scheduler.step(residual, t, sample).prev_sample + + result_sum = torch.sum(torch.abs(sample)) + result_mean = torch.mean(torch.abs(sample)) + + assert abs(result_sum.item() - 318.4111) < 1e-2, f" expected result sum 318.4111, but get {result_sum}" + assert abs(result_mean.item() - 0.4146) < 1e-3, f" expected result mean 0.4146, but get {result_mean}" + + def test_full_loop_no_noise_thres(self): + sample = self.full_loop(thresholding=True, dynamic_thresholding_ratio=0.87, sample_max_value=0.5) + result_mean = torch.mean(torch.abs(sample)) + + assert abs(result_mean.item() - 1.1364) < 1e-3 + + def test_full_loop_with_v_prediction(self): + sample = self.full_loop(prediction_type="v_prediction") + result_mean = torch.mean(torch.abs(sample)) + + assert abs(result_mean.item() - 0.2251) < 1e-3 + + def test_full_loop_with_karras_and_v_prediction(self): + sample = self.full_loop(prediction_type="v_prediction", use_karras_sigmas=True) + result_mean = torch.mean(torch.abs(sample)) + + assert abs(result_mean.item() - 0.2096) < 1e-3 + + def test_full_loop_with_lu_and_v_prediction(self): + sample = self.full_loop(prediction_type="v_prediction", use_lu_lambdas=True) + result_mean = torch.mean(torch.abs(sample)) + + assert abs(result_mean.item() - 0.1554) < 1e-3 + + def test_switch(self): + # make sure that iterating over schedulers with same config names gives same results + # for defaults + scheduler = DPMSolverMultistepScheduler(**self.get_scheduler_config()) + sample = self.full_loop(scheduler=scheduler) + result_mean = torch.mean(torch.abs(sample)) + + assert abs(result_mean.item() - 0.3301) < 1e-3 + + scheduler = DPMSolverSinglestepScheduler.from_config(scheduler.config) + scheduler = UniPCMultistepScheduler.from_config(scheduler.config) + scheduler = DEISMultistepScheduler.from_config(scheduler.config) + scheduler = DPMSolverMultistepScheduler.from_config(scheduler.config) + + sample = self.full_loop(scheduler=scheduler) + result_mean = torch.mean(torch.abs(sample)) + + assert abs(result_mean.item() - 0.3301) < 1e-3 + + def test_fp16_support(self): + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config(thresholding=True, dynamic_thresholding_ratio=0) + scheduler = scheduler_class(**scheduler_config) + + num_inference_steps = 10 + model = self.dummy_model() + sample = self.dummy_sample_deter.half() + scheduler.set_timesteps(num_inference_steps) + + for i, t in enumerate(scheduler.timesteps): + residual = model(sample, t) + sample = scheduler.step(residual, t, sample).prev_sample + + assert sample.dtype == torch.float16 + + def test_duplicated_timesteps(self): + for scheduler_class in self.scheduler_classes: + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + + scheduler.set_timesteps(scheduler.config.num_train_timesteps) + assert len(scheduler.timesteps) == scheduler.num_inference_steps + + def test_custom_timesteps(self): + for algorithm_type in ["dpmsolver++", "sde-dpmsolver++"]: + for prediction_type in ["epsilon", "sample", "v_prediction"]: + for final_sigmas_type in ["sigma_min", "zero"]: + sample = self.full_loop( + algorithm_type=algorithm_type, + prediction_type=prediction_type, + final_sigmas_type=final_sigmas_type, + ) + sample_custom_timesteps = self.full_loop_custom_timesteps( + algorithm_type=algorithm_type, + prediction_type=prediction_type, + final_sigmas_type=final_sigmas_type, + ) + assert torch.sum(torch.abs(sample - sample_custom_timesteps)) < 1e-5, ( + f"Scheduler outputs are not identical for algorithm_type: {algorithm_type}, prediction_type: {prediction_type} and final_sigmas_type: {final_sigmas_type}" + ) + + def test_beta_sigmas(self): + self.check_over_configs(use_beta_sigmas=True) + + def test_exponential_sigmas(self): + self.check_over_configs(use_exponential_sigmas=True) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/schedulers/test_scheduler_dpm_multi_inverse.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/schedulers/test_scheduler_dpm_multi_inverse.py new file mode 100644 index 0000000000000000000000000000000000000000..0eced957190ca46c14877827bb410cc7b84ed6ae --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/schedulers/test_scheduler_dpm_multi_inverse.py @@ -0,0 +1,273 @@ +import tempfile + +import torch + +from diffusers import DPMSolverMultistepInverseScheduler, DPMSolverMultistepScheduler + +from .test_schedulers import SchedulerCommonTest + + +class DPMSolverMultistepSchedulerTest(SchedulerCommonTest): + scheduler_classes = (DPMSolverMultistepInverseScheduler,) + forward_default_kwargs = (("num_inference_steps", 25),) + + def get_scheduler_config(self, **kwargs): + config = { + "num_train_timesteps": 1000, + "beta_start": 0.0001, + "beta_end": 0.02, + "beta_schedule": "linear", + "solver_order": 2, + "prediction_type": "epsilon", + "thresholding": False, + "sample_max_value": 1.0, + "algorithm_type": "dpmsolver++", + "solver_type": "midpoint", + "lower_order_final": False, + "lambda_min_clipped": -float("inf"), + "variance_type": None, + } + + config.update(**kwargs) + return config + + def check_over_configs(self, time_step=0, **config): + kwargs = dict(self.forward_default_kwargs) + num_inference_steps = kwargs.pop("num_inference_steps", None) + sample = self.dummy_sample + residual = 0.1 * sample + dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.10] + + for scheduler_class in self.scheduler_classes: + scheduler_config = self.get_scheduler_config(**config) + scheduler = scheduler_class(**scheduler_config) + scheduler.set_timesteps(num_inference_steps) + # copy over dummy past residuals + scheduler.model_outputs = dummy_past_residuals[: scheduler.config.solver_order] + + with tempfile.TemporaryDirectory() as tmpdirname: + scheduler.save_config(tmpdirname) + new_scheduler = scheduler_class.from_pretrained(tmpdirname) + new_scheduler.set_timesteps(num_inference_steps) + # copy over dummy past residuals + new_scheduler.model_outputs = dummy_past_residuals[: new_scheduler.config.solver_order] + + output, new_output = sample, sample + for t in range(time_step, time_step + scheduler.config.solver_order + 1): + t = scheduler.timesteps[t] + output = scheduler.step(residual, t, output, **kwargs).prev_sample + new_output = new_scheduler.step(residual, t, new_output, **kwargs).prev_sample + + assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" + + def test_from_save_pretrained(self): + pass + + def check_over_forward(self, time_step=0, **forward_kwargs): + kwargs = dict(self.forward_default_kwargs) + num_inference_steps = kwargs.pop("num_inference_steps", None) + sample = self.dummy_sample + residual = 0.1 * sample + dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.10] + + for scheduler_class in self.scheduler_classes: + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + scheduler.set_timesteps(num_inference_steps) + + # copy over dummy past residuals (must be after setting timesteps) + scheduler.model_outputs = dummy_past_residuals[: scheduler.config.solver_order] + + with tempfile.TemporaryDirectory() as tmpdirname: + scheduler.save_config(tmpdirname) + new_scheduler = scheduler_class.from_pretrained(tmpdirname) + # copy over dummy past residuals + new_scheduler.set_timesteps(num_inference_steps) + + # copy over dummy past residual (must be after setting timesteps) + new_scheduler.model_outputs = dummy_past_residuals[: new_scheduler.config.solver_order] + + output = scheduler.step(residual, time_step, sample, **kwargs).prev_sample + new_output = new_scheduler.step(residual, time_step, sample, **kwargs).prev_sample + + assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" + + def full_loop(self, scheduler=None, **config): + if scheduler is None: + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config(**config) + scheduler = scheduler_class(**scheduler_config) + + num_inference_steps = 10 + model = self.dummy_model() + sample = self.dummy_sample_deter + scheduler.set_timesteps(num_inference_steps) + + for i, t in enumerate(scheduler.timesteps): + residual = model(sample, t) + sample = scheduler.step(residual, t, sample).prev_sample + + return sample + + def test_step_shape(self): + kwargs = dict(self.forward_default_kwargs) + + num_inference_steps = kwargs.pop("num_inference_steps", None) + + for scheduler_class in self.scheduler_classes: + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + + sample = self.dummy_sample + residual = 0.1 * sample + + if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"): + scheduler.set_timesteps(num_inference_steps) + elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"): + kwargs["num_inference_steps"] = num_inference_steps + + # copy over dummy past residuals (must be done after set_timesteps) + dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.10] + scheduler.model_outputs = dummy_past_residuals[: scheduler.config.solver_order] + + time_step_0 = scheduler.timesteps[5] + time_step_1 = scheduler.timesteps[6] + + output_0 = scheduler.step(residual, time_step_0, sample, **kwargs).prev_sample + output_1 = scheduler.step(residual, time_step_1, sample, **kwargs).prev_sample + + self.assertEqual(output_0.shape, sample.shape) + self.assertEqual(output_0.shape, output_1.shape) + + def test_timesteps(self): + for timesteps in [25, 50, 100, 999, 1000]: + self.check_over_configs(num_train_timesteps=timesteps) + + def test_thresholding(self): + self.check_over_configs(thresholding=False) + for order in [1, 2, 3]: + for solver_type in ["midpoint", "heun"]: + for threshold in [0.5, 1.0, 2.0]: + for prediction_type in ["epsilon", "sample"]: + self.check_over_configs( + thresholding=True, + prediction_type=prediction_type, + sample_max_value=threshold, + algorithm_type="dpmsolver++", + solver_order=order, + solver_type=solver_type, + ) + + def test_prediction_type(self): + for prediction_type in ["epsilon", "v_prediction"]: + self.check_over_configs(prediction_type=prediction_type) + + def test_solver_order_and_type(self): + for algorithm_type in ["dpmsolver", "dpmsolver++"]: + for solver_type in ["midpoint", "heun"]: + for order in [1, 2, 3]: + for prediction_type in ["epsilon", "sample"]: + self.check_over_configs( + solver_order=order, + solver_type=solver_type, + prediction_type=prediction_type, + algorithm_type=algorithm_type, + ) + sample = self.full_loop( + solver_order=order, + solver_type=solver_type, + prediction_type=prediction_type, + algorithm_type=algorithm_type, + ) + assert not torch.isnan(sample).any(), "Samples have nan numbers" + + def test_lower_order_final(self): + self.check_over_configs(lower_order_final=True) + self.check_over_configs(lower_order_final=False) + + def test_lambda_min_clipped(self): + self.check_over_configs(lambda_min_clipped=-float("inf")) + self.check_over_configs(lambda_min_clipped=-5.1) + + def test_variance_type(self): + self.check_over_configs(variance_type=None) + self.check_over_configs(variance_type="learned_range") + + def test_timestep_spacing(self): + for timestep_spacing in ["trailing", "leading"]: + self.check_over_configs(timestep_spacing=timestep_spacing) + + def test_inference_steps(self): + for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]: + self.check_over_forward(num_inference_steps=num_inference_steps, time_step=0) + + def test_full_loop_no_noise(self): + sample = self.full_loop() + result_mean = torch.mean(torch.abs(sample)) + + assert abs(result_mean.item() - 0.7047) < 1e-3 + + def test_full_loop_no_noise_thres(self): + sample = self.full_loop(thresholding=True, dynamic_thresholding_ratio=0.87, sample_max_value=0.5) + result_mean = torch.mean(torch.abs(sample)) + + assert abs(result_mean.item() - 19.8933) < 1e-3 + + def test_full_loop_with_v_prediction(self): + sample = self.full_loop(prediction_type="v_prediction") + result_mean = torch.mean(torch.abs(sample)) + + assert abs(result_mean.item() - 1.5194) < 1e-3 + + def test_full_loop_with_karras_and_v_prediction(self): + sample = self.full_loop(prediction_type="v_prediction", use_karras_sigmas=True) + result_mean = torch.mean(torch.abs(sample)) + + assert abs(result_mean.item() - 1.7833) < 2e-3 + + def test_switch(self): + # make sure that iterating over schedulers with same config names gives same results + # for defaults + scheduler = DPMSolverMultistepInverseScheduler(**self.get_scheduler_config()) + sample = self.full_loop(scheduler=scheduler) + result_mean = torch.mean(torch.abs(sample)) + + assert abs(result_mean.item() - 0.7047) < 1e-3 + + scheduler = DPMSolverMultistepScheduler.from_config(scheduler.config) + scheduler = DPMSolverMultistepInverseScheduler.from_config(scheduler.config) + + sample = self.full_loop(scheduler=scheduler) + new_result_mean = torch.mean(torch.abs(sample)) + + assert abs(new_result_mean.item() - result_mean.item()) < 1e-3 + + def test_fp16_support(self): + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config(thresholding=True, dynamic_thresholding_ratio=0) + scheduler = scheduler_class(**scheduler_config) + + num_inference_steps = 10 + model = self.dummy_model() + sample = self.dummy_sample_deter.half() + scheduler.set_timesteps(num_inference_steps) + + for i, t in enumerate(scheduler.timesteps): + residual = model(sample, t) + sample = scheduler.step(residual, t, sample).prev_sample + + assert sample.dtype == torch.float16 + + def test_unique_timesteps(self, **config): + for scheduler_class in self.scheduler_classes: + scheduler_config = self.get_scheduler_config(**config) + scheduler = scheduler_class(**scheduler_config) + + scheduler.set_timesteps(scheduler.config.num_train_timesteps) + assert len(scheduler.timesteps.unique()) == scheduler.num_inference_steps + + def test_beta_sigmas(self): + self.check_over_configs(use_beta_sigmas=True) + + def test_exponential_sigmas(self): + self.check_over_configs(use_exponential_sigmas=True) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/schedulers/test_scheduler_dpm_sde.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/schedulers/test_scheduler_dpm_sde.py new file mode 100644 index 0000000000000000000000000000000000000000..e4dde67344ac7a317b4110ea511b590c88442bd7 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/schedulers/test_scheduler_dpm_sde.py @@ -0,0 +1,173 @@ +import torch + +from diffusers import DPMSolverSDEScheduler + +from ..testing_utils import require_torchsde, torch_device +from .test_schedulers import SchedulerCommonTest + + +@require_torchsde +class DPMSolverSDESchedulerTest(SchedulerCommonTest): + scheduler_classes = (DPMSolverSDEScheduler,) + num_inference_steps = 10 + + def get_scheduler_config(self, **kwargs): + config = { + "num_train_timesteps": 1100, + "beta_start": 0.0001, + "beta_end": 0.02, + "beta_schedule": "linear", + "noise_sampler_seed": 0, + } + + config.update(**kwargs) + return config + + def test_timesteps(self): + for timesteps in [10, 50, 100, 1000]: + self.check_over_configs(num_train_timesteps=timesteps) + + def test_betas(self): + for beta_start, beta_end in zip([0.00001, 0.0001, 0.001], [0.0002, 0.002, 0.02]): + self.check_over_configs(beta_start=beta_start, beta_end=beta_end) + + def test_schedules(self): + for schedule in ["linear", "scaled_linear"]: + self.check_over_configs(beta_schedule=schedule) + + def test_prediction_type(self): + for prediction_type in ["epsilon", "v_prediction"]: + self.check_over_configs(prediction_type=prediction_type) + + def test_full_loop_no_noise(self): + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + + scheduler.set_timesteps(self.num_inference_steps) + + model = self.dummy_model() + sample = self.dummy_sample_deter * scheduler.init_noise_sigma + sample = sample.to(torch_device) + + for i, t in enumerate(scheduler.timesteps): + sample = scheduler.scale_model_input(sample, t) + + model_output = model(sample, t) + + output = scheduler.step(model_output, t, sample) + sample = output.prev_sample + + result_sum = torch.sum(torch.abs(sample)) + result_mean = torch.mean(torch.abs(sample)) + + if torch_device in ["mps"]: + assert abs(result_sum.item() - 167.47821044921875) < 1e-2 + assert abs(result_mean.item() - 0.2178705964565277) < 1e-3 + elif torch_device in ["cuda", "xpu"]: + assert abs(result_sum.item() - 171.59352111816406) < 1e-2 + assert abs(result_mean.item() - 0.22342906892299652) < 1e-3 + else: + assert abs(result_sum.item() - 162.52383422851562) < 1e-2 + assert abs(result_mean.item() - 0.211619570851326) < 1e-3 + + def test_full_loop_with_v_prediction(self): + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config(prediction_type="v_prediction") + scheduler = scheduler_class(**scheduler_config) + + scheduler.set_timesteps(self.num_inference_steps) + + model = self.dummy_model() + sample = self.dummy_sample_deter * scheduler.init_noise_sigma + sample = sample.to(torch_device) + + for i, t in enumerate(scheduler.timesteps): + sample = scheduler.scale_model_input(sample, t) + + model_output = model(sample, t) + + output = scheduler.step(model_output, t, sample) + sample = output.prev_sample + + result_sum = torch.sum(torch.abs(sample)) + result_mean = torch.mean(torch.abs(sample)) + + if torch_device in ["mps"]: + assert abs(result_sum.item() - 124.77149200439453) < 1e-2 + assert abs(result_mean.item() - 0.16226289014816284) < 1e-3 + elif torch_device in ["cuda", "xpu"]: + assert abs(result_sum.item() - 128.1663360595703) < 1e-2 + assert abs(result_mean.item() - 0.16688326001167297) < 1e-3 + else: + assert abs(result_sum.item() - 119.8487548828125) < 1e-2 + assert abs(result_mean.item() - 0.1560530662536621) < 1e-3 + + def test_full_loop_device(self): + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + + scheduler.set_timesteps(self.num_inference_steps, device=torch_device) + + model = self.dummy_model() + sample = self.dummy_sample_deter.to(torch_device) * scheduler.init_noise_sigma + + for t in scheduler.timesteps: + sample = scheduler.scale_model_input(sample, t) + + model_output = model(sample, t) + + output = scheduler.step(model_output, t, sample) + sample = output.prev_sample + + result_sum = torch.sum(torch.abs(sample)) + result_mean = torch.mean(torch.abs(sample)) + + if torch_device in ["mps"]: + assert abs(result_sum.item() - 167.46957397460938) < 1e-2 + assert abs(result_mean.item() - 0.21805934607982635) < 1e-3 + elif torch_device in ["cuda", "xpu"]: + assert abs(result_sum.item() - 171.59353637695312) < 1e-2 + assert abs(result_mean.item() - 0.22342908382415771) < 1e-3 + else: + assert abs(result_sum.item() - 162.52383422851562) < 1e-2 + assert abs(result_mean.item() - 0.211619570851326) < 1e-3 + + def test_full_loop_device_karras_sigmas(self): + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config, use_karras_sigmas=True) + + scheduler.set_timesteps(self.num_inference_steps, device=torch_device) + + model = self.dummy_model() + sample = self.dummy_sample_deter.to(torch_device) * scheduler.init_noise_sigma + sample = sample.to(torch_device) + + for t in scheduler.timesteps: + sample = scheduler.scale_model_input(sample, t) + + model_output = model(sample, t) + + output = scheduler.step(model_output, t, sample) + sample = output.prev_sample + + result_sum = torch.sum(torch.abs(sample)) + result_mean = torch.mean(torch.abs(sample)) + + if torch_device in ["mps"]: + assert abs(result_sum.item() - 176.66974135742188) < 1e-2 + assert abs(result_mean.item() - 0.23003872730981811) < 1e-2 + elif torch_device in ["cuda", "xpu"]: + assert abs(result_sum.item() - 177.63653564453125) < 1e-2 + assert abs(result_mean.item() - 0.23003872730981811) < 1e-2 + else: + assert abs(result_sum.item() - 170.3135223388672) < 1e-2 + assert abs(result_mean.item() - 0.23003872730981811) < 1e-2 + + def test_beta_sigmas(self): + self.check_over_configs(use_beta_sigmas=True) + + def test_exponential_sigmas(self): + self.check_over_configs(use_exponential_sigmas=True) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/schedulers/test_scheduler_dpm_single.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/schedulers/test_scheduler_dpm_single.py new file mode 100644 index 0000000000000000000000000000000000000000..0756a5ed71ff97d0b25363643b77404a6785d936 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/schedulers/test_scheduler_dpm_single.py @@ -0,0 +1,356 @@ +import tempfile +import unittest + +import torch + +from diffusers import ( + DEISMultistepScheduler, + DPMSolverMultistepScheduler, + DPMSolverSinglestepScheduler, + UniPCMultistepScheduler, +) + +from .test_schedulers import SchedulerCommonTest + + +class DPMSolverSinglestepSchedulerTest(SchedulerCommonTest): + scheduler_classes = (DPMSolverSinglestepScheduler,) + forward_default_kwargs = (("num_inference_steps", 25),) + + def get_scheduler_config(self, **kwargs): + config = { + "num_train_timesteps": 1000, + "beta_start": 0.0001, + "beta_end": 0.02, + "beta_schedule": "linear", + "solver_order": 2, + "prediction_type": "epsilon", + "thresholding": False, + "sample_max_value": 1.0, + "algorithm_type": "dpmsolver++", + "solver_type": "midpoint", + "lambda_min_clipped": -float("inf"), + "variance_type": None, + "final_sigmas_type": "sigma_min", + } + + config.update(**kwargs) + return config + + def check_over_configs(self, time_step=0, **config): + kwargs = dict(self.forward_default_kwargs) + num_inference_steps = kwargs.pop("num_inference_steps", None) + sample = self.dummy_sample + residual = 0.1 * sample + dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.10] + + for scheduler_class in self.scheduler_classes: + scheduler_config = self.get_scheduler_config(**config) + scheduler = scheduler_class(**scheduler_config) + scheduler.set_timesteps(num_inference_steps) + # copy over dummy past residuals + scheduler.model_outputs = dummy_past_residuals[: scheduler.config.solver_order] + + with tempfile.TemporaryDirectory() as tmpdirname: + scheduler.save_config(tmpdirname) + new_scheduler = scheduler_class.from_pretrained(tmpdirname) + new_scheduler.set_timesteps(num_inference_steps) + # copy over dummy past residuals + new_scheduler.model_outputs = dummy_past_residuals[: new_scheduler.config.solver_order] + + output, new_output = sample, sample + for t in range(time_step, time_step + scheduler.config.solver_order + 1): + t = scheduler.timesteps[t] + output = scheduler.step(residual, t, output, **kwargs).prev_sample + new_output = new_scheduler.step(residual, t, new_output, **kwargs).prev_sample + + assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" + + @unittest.skip("Test not supported.") + def test_from_save_pretrained(self): + pass + + def check_over_forward(self, time_step=0, **forward_kwargs): + kwargs = dict(self.forward_default_kwargs) + num_inference_steps = kwargs.pop("num_inference_steps", None) + sample = self.dummy_sample + residual = 0.1 * sample + dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.10] + + for scheduler_class in self.scheduler_classes: + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + scheduler.set_timesteps(num_inference_steps) + + # copy over dummy past residuals (must be after setting timesteps) + scheduler.model_outputs = dummy_past_residuals[: scheduler.config.solver_order] + + with tempfile.TemporaryDirectory() as tmpdirname: + scheduler.save_config(tmpdirname) + new_scheduler = scheduler_class.from_pretrained(tmpdirname) + # copy over dummy past residuals + new_scheduler.set_timesteps(num_inference_steps) + + # copy over dummy past residual (must be after setting timesteps) + new_scheduler.model_outputs = dummy_past_residuals[: new_scheduler.config.solver_order] + + output = scheduler.step(residual, time_step, sample, **kwargs).prev_sample + new_output = new_scheduler.step(residual, time_step, sample, **kwargs).prev_sample + + assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" + + def full_loop(self, scheduler=None, **config): + if scheduler is None: + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config(**config) + scheduler = scheduler_class(**scheduler_config) + + num_inference_steps = 10 + model = self.dummy_model() + sample = self.dummy_sample_deter + scheduler.set_timesteps(num_inference_steps) + + for i, t in enumerate(scheduler.timesteps): + residual = model(sample, t) + sample = scheduler.step(residual, t, sample).prev_sample + + return sample + + def full_loop_custom_timesteps(self, **config): + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config(**config) + scheduler = scheduler_class(**scheduler_config) + + num_inference_steps = 10 + scheduler.set_timesteps(num_inference_steps) + timesteps = scheduler.timesteps + # reset the timesteps using`timesteps` + scheduler = scheduler_class(**scheduler_config) + scheduler.set_timesteps(num_inference_steps=None, timesteps=timesteps) + + model = self.dummy_model() + sample = self.dummy_sample_deter + + for i, t in enumerate(scheduler.timesteps): + residual = model(sample, t) + sample = scheduler.step(residual, t, sample).prev_sample + + return sample + + def test_full_uneven_loop(self): + scheduler = DPMSolverSinglestepScheduler(**self.get_scheduler_config()) + num_inference_steps = 50 + model = self.dummy_model() + sample = self.dummy_sample_deter + scheduler.set_timesteps(num_inference_steps) + + # make sure that the first t is uneven + for i, t in enumerate(scheduler.timesteps[3:]): + residual = model(sample, t) + sample = scheduler.step(residual, t, sample).prev_sample + + result_mean = torch.mean(torch.abs(sample)) + + assert abs(result_mean.item() - 0.2574) < 1e-3 + + def test_timesteps(self): + for timesteps in [25, 50, 100, 999, 1000]: + self.check_over_configs(num_train_timesteps=timesteps) + + def test_switch(self): + # make sure that iterating over schedulers with same config names gives same results + # for defaults + scheduler = DPMSolverSinglestepScheduler(**self.get_scheduler_config()) + sample = self.full_loop(scheduler=scheduler) + result_mean = torch.mean(torch.abs(sample)) + + assert abs(result_mean.item() - 0.2791) < 1e-3 + + scheduler = DEISMultistepScheduler.from_config(scheduler.config) + scheduler = DPMSolverMultistepScheduler.from_config(scheduler.config) + scheduler = UniPCMultistepScheduler.from_config(scheduler.config) + scheduler = DPMSolverSinglestepScheduler.from_config(scheduler.config) + + sample = self.full_loop(scheduler=scheduler) + result_mean = torch.mean(torch.abs(sample)) + + assert abs(result_mean.item() - 0.2791) < 1e-3 + + def test_thresholding(self): + self.check_over_configs(thresholding=False) + for order in [1, 2, 3]: + for solver_type in ["midpoint", "heun"]: + for threshold in [0.5, 1.0, 2.0]: + for prediction_type in ["epsilon", "sample"]: + self.check_over_configs( + thresholding=True, + prediction_type=prediction_type, + sample_max_value=threshold, + algorithm_type="dpmsolver++", + solver_order=order, + solver_type=solver_type, + ) + + def test_prediction_type(self): + for prediction_type in ["epsilon", "v_prediction"]: + self.check_over_configs(prediction_type=prediction_type) + + def test_solver_order_and_type(self): + for algorithm_type in ["dpmsolver", "dpmsolver++", "sde-dpmsolver++"]: + for solver_type in ["midpoint", "heun"]: + for order in [1, 2, 3]: + for prediction_type in ["epsilon", "sample"]: + if algorithm_type == "sde-dpmsolver++": + if order == 3: + continue + else: + self.check_over_configs( + solver_order=order, + solver_type=solver_type, + prediction_type=prediction_type, + algorithm_type=algorithm_type, + ) + sample = self.full_loop( + solver_order=order, + solver_type=solver_type, + prediction_type=prediction_type, + algorithm_type=algorithm_type, + ) + assert not torch.isnan(sample).any(), "Samples have nan numbers" + + def test_lower_order_final(self): + self.check_over_configs(lower_order_final=True) + self.check_over_configs(lower_order_final=False) + + def test_lambda_min_clipped(self): + self.check_over_configs(lambda_min_clipped=-float("inf")) + self.check_over_configs(lambda_min_clipped=-5.1) + + def test_variance_type(self): + self.check_over_configs(variance_type=None) + self.check_over_configs(variance_type="learned_range") + + def test_inference_steps(self): + for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]: + self.check_over_forward(num_inference_steps=num_inference_steps, time_step=0) + + def test_full_loop_no_noise(self): + sample = self.full_loop() + result_mean = torch.mean(torch.abs(sample)) + + assert abs(result_mean.item() - 0.2791) < 1e-3 + + def test_full_loop_with_karras(self): + sample = self.full_loop(use_karras_sigmas=True) + result_mean = torch.mean(torch.abs(sample)) + + assert abs(result_mean.item() - 0.2248) < 1e-3 + + def test_full_loop_with_v_prediction(self): + sample = self.full_loop(prediction_type="v_prediction") + result_mean = torch.mean(torch.abs(sample)) + + assert abs(result_mean.item() - 0.1453) < 1e-3 + + def test_full_loop_with_karras_and_v_prediction(self): + sample = self.full_loop(prediction_type="v_prediction", use_karras_sigmas=True) + result_mean = torch.mean(torch.abs(sample)) + + assert abs(result_mean.item() - 0.0649) < 1e-3 + + def test_fp16_support(self): + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config(thresholding=True, dynamic_thresholding_ratio=0) + scheduler = scheduler_class(**scheduler_config) + + num_inference_steps = 10 + model = self.dummy_model() + sample = self.dummy_sample_deter.half() + scheduler.set_timesteps(num_inference_steps) + + for i, t in enumerate(scheduler.timesteps): + residual = model(sample, t) + sample = scheduler.step(residual, t, sample).prev_sample + + assert sample.dtype == torch.float16 + + def test_step_shape(self): + kwargs = dict(self.forward_default_kwargs) + + num_inference_steps = kwargs.pop("num_inference_steps", None) + + for scheduler_class in self.scheduler_classes: + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + + sample = self.dummy_sample + residual = 0.1 * sample + + if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"): + scheduler.set_timesteps(num_inference_steps) + elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"): + kwargs["num_inference_steps"] = num_inference_steps + + # copy over dummy past residuals (must be done after set_timesteps) + dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.10] + scheduler.model_outputs = dummy_past_residuals[: scheduler.config.solver_order] + + time_step_0 = scheduler.timesteps[0] + time_step_1 = scheduler.timesteps[1] + + output_0 = scheduler.step(residual, time_step_0, sample, **kwargs).prev_sample + output_1 = scheduler.step(residual, time_step_1, sample, **kwargs).prev_sample + + self.assertEqual(output_0.shape, sample.shape) + self.assertEqual(output_0.shape, output_1.shape) + + def test_full_loop_with_noise(self): + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + + num_inference_steps = 10 + t_start = 5 + + model = self.dummy_model() + sample = self.dummy_sample_deter + scheduler.set_timesteps(num_inference_steps) + + # add noise + noise = self.dummy_noise_deter + timesteps = scheduler.timesteps[t_start * scheduler.order :] + sample = scheduler.add_noise(sample, noise, timesteps[:1]) + + for i, t in enumerate(timesteps): + residual = model(sample, t) + sample = scheduler.step(residual, t, sample).prev_sample + + result_sum = torch.sum(torch.abs(sample)) + result_mean = torch.mean(torch.abs(sample)) + + assert abs(result_sum.item() - 269.2187) < 1e-2, f" expected result sum 269.2187, but get {result_sum}" + assert abs(result_mean.item() - 0.3505) < 1e-3, f" expected result mean 0.3505, but get {result_mean}" + + def test_custom_timesteps(self): + for prediction_type in ["epsilon", "sample", "v_prediction"]: + for lower_order_final in [True, False]: + for final_sigmas_type in ["sigma_min", "zero"]: + sample = self.full_loop( + prediction_type=prediction_type, + lower_order_final=lower_order_final, + final_sigmas_type=final_sigmas_type, + ) + sample_custom_timesteps = self.full_loop_custom_timesteps( + prediction_type=prediction_type, + lower_order_final=lower_order_final, + final_sigmas_type=final_sigmas_type, + ) + assert torch.sum(torch.abs(sample - sample_custom_timesteps)) < 1e-5, ( + f"Scheduler outputs are not identical for prediction_type: {prediction_type}, lower_order_final: {lower_order_final} and final_sigmas_type: {final_sigmas_type}" + ) + + def test_beta_sigmas(self): + self.check_over_configs(use_beta_sigmas=True) + + def test_exponential_sigmas(self): + self.check_over_configs(use_exponential_sigmas=True) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/schedulers/test_scheduler_edm_dpmsolver_multistep.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/schedulers/test_scheduler_edm_dpmsolver_multistep.py new file mode 100644 index 0000000000000000000000000000000000000000..8525ce61c40d2670d03d7e04b075fb8236a505cc --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/schedulers/test_scheduler_edm_dpmsolver_multistep.py @@ -0,0 +1,262 @@ +import tempfile +import unittest + +import torch + +from diffusers import EDMDPMSolverMultistepScheduler + +from .test_schedulers import SchedulerCommonTest + + +class EDMDPMSolverMultistepSchedulerTest(SchedulerCommonTest): + scheduler_classes = (EDMDPMSolverMultistepScheduler,) + forward_default_kwargs = (("num_inference_steps", 25),) + + def get_scheduler_config(self, **kwargs): + config = { + "sigma_min": 0.002, + "sigma_max": 80.0, + "sigma_data": 0.5, + "num_train_timesteps": 1000, + "solver_order": 2, + "prediction_type": "epsilon", + "thresholding": False, + "sample_max_value": 1.0, + "algorithm_type": "dpmsolver++", + "solver_type": "midpoint", + "lower_order_final": False, + "euler_at_final": False, + "final_sigmas_type": "sigma_min", + } + + config.update(**kwargs) + return config + + def check_over_configs(self, time_step=0, **config): + kwargs = dict(self.forward_default_kwargs) + num_inference_steps = kwargs.pop("num_inference_steps", None) + sample = self.dummy_sample + residual = 0.1 * sample + dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.10] + + for scheduler_class in self.scheduler_classes: + scheduler_config = self.get_scheduler_config(**config) + scheduler = scheduler_class(**scheduler_config) + scheduler.set_timesteps(num_inference_steps) + # copy over dummy past residuals + scheduler.model_outputs = dummy_past_residuals[: scheduler.config.solver_order] + + with tempfile.TemporaryDirectory() as tmpdirname: + scheduler.save_config(tmpdirname) + new_scheduler = scheduler_class.from_pretrained(tmpdirname) + new_scheduler.set_timesteps(num_inference_steps) + # copy over dummy past residuals + new_scheduler.model_outputs = dummy_past_residuals[: new_scheduler.config.solver_order] + + output, new_output = sample, sample + for t in range(time_step, time_step + scheduler.config.solver_order + 1): + t = new_scheduler.timesteps[t] + output = scheduler.step(residual, t, output, **kwargs).prev_sample + new_output = new_scheduler.step(residual, t, new_output, **kwargs).prev_sample + + assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" + + @unittest.skip("Test not supported.") + def test_from_save_pretrained(self): + pass + + def check_over_forward(self, time_step=0, **forward_kwargs): + kwargs = dict(self.forward_default_kwargs) + num_inference_steps = kwargs.pop("num_inference_steps", None) + sample = self.dummy_sample + residual = 0.1 * sample + dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.10] + + for scheduler_class in self.scheduler_classes: + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + scheduler.set_timesteps(num_inference_steps) + + # copy over dummy past residuals (must be after setting timesteps) + scheduler.model_outputs = dummy_past_residuals[: scheduler.config.solver_order] + + with tempfile.TemporaryDirectory() as tmpdirname: + scheduler.save_config(tmpdirname) + new_scheduler = scheduler_class.from_pretrained(tmpdirname) + # copy over dummy past residuals + new_scheduler.set_timesteps(num_inference_steps) + + # copy over dummy past residual (must be after setting timesteps) + new_scheduler.model_outputs = dummy_past_residuals[: new_scheduler.config.solver_order] + + time_step = new_scheduler.timesteps[time_step] + output = scheduler.step(residual, time_step, sample, **kwargs).prev_sample + new_output = new_scheduler.step(residual, time_step, sample, **kwargs).prev_sample + + assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" + + def full_loop(self, scheduler=None, **config): + if scheduler is None: + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config(**config) + scheduler = scheduler_class(**scheduler_config) + + num_inference_steps = 10 + model = self.dummy_model() + sample = self.dummy_sample_deter + scheduler.set_timesteps(num_inference_steps) + + for i, t in enumerate(scheduler.timesteps): + residual = model(sample, t) + sample = scheduler.step(residual, t, sample).prev_sample + + return sample + + def test_step_shape(self): + kwargs = dict(self.forward_default_kwargs) + + num_inference_steps = kwargs.pop("num_inference_steps", None) + + for scheduler_class in self.scheduler_classes: + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + + sample = self.dummy_sample + residual = 0.1 * sample + + if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"): + scheduler.set_timesteps(num_inference_steps) + elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"): + kwargs["num_inference_steps"] = num_inference_steps + + # copy over dummy past residuals (must be done after set_timesteps) + dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.10] + scheduler.model_outputs = dummy_past_residuals[: scheduler.config.solver_order] + + time_step_0 = scheduler.timesteps[5] + time_step_1 = scheduler.timesteps[6] + + output_0 = scheduler.step(residual, time_step_0, sample, **kwargs).prev_sample + output_1 = scheduler.step(residual, time_step_1, sample, **kwargs).prev_sample + + self.assertEqual(output_0.shape, sample.shape) + self.assertEqual(output_0.shape, output_1.shape) + + def test_timesteps(self): + for timesteps in [25, 50, 100, 999, 1000]: + self.check_over_configs(num_train_timesteps=timesteps) + + def test_thresholding(self): + self.check_over_configs(thresholding=False) + for order in [1, 2, 3]: + for solver_type in ["midpoint", "heun"]: + for threshold in [0.5, 1.0, 2.0]: + for prediction_type in ["epsilon", "v_prediction"]: + self.check_over_configs( + thresholding=True, + prediction_type=prediction_type, + sample_max_value=threshold, + algorithm_type="dpmsolver++", + solver_order=order, + solver_type=solver_type, + ) + + def test_prediction_type(self): + for prediction_type in ["epsilon", "v_prediction"]: + self.check_over_configs(prediction_type=prediction_type) + + # TODO (patil-suraj): Fix this test + @unittest.skip("Skip for now, as it failing currently but works with the actual model") + def test_solver_order_and_type(self): + for algorithm_type in ["dpmsolver++", "sde-dpmsolver++"]: + for solver_type in ["midpoint", "heun"]: + for order in [1, 2, 3]: + for prediction_type in ["epsilon", "v_prediction"]: + if algorithm_type == "sde-dpmsolver++": + if order == 3: + continue + else: + self.check_over_configs( + solver_order=order, + solver_type=solver_type, + prediction_type=prediction_type, + algorithm_type=algorithm_type, + ) + sample = self.full_loop( + solver_order=order, + solver_type=solver_type, + prediction_type=prediction_type, + algorithm_type=algorithm_type, + ) + assert not torch.isnan(sample).any(), ( + f"Samples have nan numbers, {order}, {solver_type}, {prediction_type}, {algorithm_type}" + ) + + def test_lower_order_final(self): + self.check_over_configs(lower_order_final=True) + self.check_over_configs(lower_order_final=False) + + def test_euler_at_final(self): + self.check_over_configs(euler_at_final=True) + self.check_over_configs(euler_at_final=False) + + def test_inference_steps(self): + for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]: + self.check_over_forward(num_inference_steps=num_inference_steps, time_step=0) + + def test_full_loop_no_noise(self): + sample = self.full_loop() + result_mean = torch.mean(torch.abs(sample)) + + assert abs(result_mean.item() - 0.0001) < 1e-3 + + def test_full_loop_with_noise(self): + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + + num_inference_steps = 10 + t_start = 5 + + model = self.dummy_model() + sample = self.dummy_sample_deter + scheduler.set_timesteps(num_inference_steps) + + # add noise + noise = self.dummy_noise_deter + timesteps = scheduler.timesteps[t_start * scheduler.order :] + sample = scheduler.add_noise(sample, noise, timesteps[:1]) + + for i, t in enumerate(timesteps): + residual = model(sample, t) + sample = scheduler.step(residual, t, sample).prev_sample + + result_sum = torch.sum(torch.abs(sample)) + result_mean = torch.mean(torch.abs(sample)) + + assert abs(result_sum.item() - 8.1661) < 1e-2, f" expected result sum 8.1661, but get {result_sum}" + assert abs(result_mean.item() - 0.0106) < 1e-3, f" expected result mean 0.0106, but get {result_mean}" + + def test_full_loop_no_noise_thres(self): + sample = self.full_loop(thresholding=True, dynamic_thresholding_ratio=0.87, sample_max_value=0.5) + result_mean = torch.mean(torch.abs(sample)) + + assert abs(result_mean.item() - 0.0080) < 1e-3 + + def test_full_loop_with_v_prediction(self): + sample = self.full_loop(prediction_type="v_prediction") + result_mean = torch.mean(torch.abs(sample)) + + assert abs(result_mean.item() - 0.0092) < 1e-3 + + def test_duplicated_timesteps(self, **config): + for scheduler_class in self.scheduler_classes: + scheduler_config = self.get_scheduler_config(**config) + scheduler = scheduler_class(**scheduler_config) + + scheduler.set_timesteps(scheduler.config.num_train_timesteps) + assert len(scheduler.timesteps) == scheduler.num_inference_steps + + @unittest.skip("Test not supported.") + def test_trained_betas(self): + pass diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/schedulers/test_scheduler_edm_euler.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/schedulers/test_scheduler_edm_euler.py new file mode 100644 index 0000000000000000000000000000000000000000..acac4b1f4caed2aff6741a40e9214adedc0f744f --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/schedulers/test_scheduler_edm_euler.py @@ -0,0 +1,203 @@ +import inspect +import tempfile +import unittest +from typing import Dict, List, Tuple + +import torch + +from diffusers import EDMEulerScheduler + +from .test_schedulers import SchedulerCommonTest + + +class EDMEulerSchedulerTest(SchedulerCommonTest): + scheduler_classes = (EDMEulerScheduler,) + forward_default_kwargs = (("num_inference_steps", 10),) + + def get_scheduler_config(self, **kwargs): + config = { + "num_train_timesteps": 256, + "sigma_min": 0.002, + "sigma_max": 80.0, + } + + config.update(**kwargs) + return config + + def test_timesteps(self): + for timesteps in [10, 50, 100, 1000]: + self.check_over_configs(num_train_timesteps=timesteps) + + def test_prediction_type(self): + for prediction_type in ["epsilon", "v_prediction"]: + self.check_over_configs(prediction_type=prediction_type) + + def test_full_loop_no_noise(self, num_inference_steps=10, seed=0): + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + + scheduler.set_timesteps(num_inference_steps) + + model = self.dummy_model() + sample = self.dummy_sample_deter * scheduler.init_noise_sigma + + for i, t in enumerate(scheduler.timesteps): + scaled_sample = scheduler.scale_model_input(sample, t) + + model_output = model(scaled_sample, t) + + output = scheduler.step(model_output, t, sample) + sample = output.prev_sample + + result_sum = torch.sum(torch.abs(sample)) + result_mean = torch.mean(torch.abs(sample)) + + assert abs(result_sum.item() - 34.1855) < 1e-3 + assert abs(result_mean.item() - 0.044) < 1e-3 + + def test_full_loop_device(self, num_inference_steps=10, seed=0): + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + + scheduler.set_timesteps(num_inference_steps) + + model = self.dummy_model() + sample = self.dummy_sample_deter * scheduler.init_noise_sigma + + for i, t in enumerate(scheduler.timesteps): + scaled_sample = scheduler.scale_model_input(sample, t) + + model_output = model(scaled_sample, t) + + output = scheduler.step(model_output, t, sample) + sample = output.prev_sample + + result_sum = torch.sum(torch.abs(sample)) + result_mean = torch.mean(torch.abs(sample)) + + assert abs(result_sum.item() - 34.1855) < 1e-3 + assert abs(result_mean.item() - 0.044) < 1e-3 + + # Override test_from_save_pretrained to use EDMEulerScheduler-specific logic + def test_from_save_pretrained(self): + kwargs = dict(self.forward_default_kwargs) + num_inference_steps = kwargs.pop("num_inference_steps", None) + + for scheduler_class in self.scheduler_classes: + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + + with tempfile.TemporaryDirectory() as tmpdirname: + scheduler.save_config(tmpdirname) + new_scheduler = scheduler_class.from_pretrained(tmpdirname) + + scheduler.set_timesteps(num_inference_steps) + new_scheduler.set_timesteps(num_inference_steps) + timestep = scheduler.timesteps[0] + + sample = self.dummy_sample + + scaled_sample = scheduler.scale_model_input(sample, timestep) + residual = 0.1 * scaled_sample + + new_scaled_sample = new_scheduler.scale_model_input(sample, timestep) + new_residual = 0.1 * new_scaled_sample + + if "generator" in set(inspect.signature(scheduler.step).parameters.keys()): + kwargs["generator"] = torch.manual_seed(0) + output = scheduler.step(residual, timestep, sample, **kwargs).prev_sample + + if "generator" in set(inspect.signature(scheduler.step).parameters.keys()): + kwargs["generator"] = torch.manual_seed(0) + new_output = new_scheduler.step(new_residual, timestep, sample, **kwargs).prev_sample + + assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" + + # Override test_from_save_pretrained to use EDMEulerScheduler-specific logic + def test_step_shape(self): + num_inference_steps = 10 + + scheduler_config = self.get_scheduler_config() + scheduler = self.scheduler_classes[0](**scheduler_config) + + scheduler.set_timesteps(num_inference_steps) + + timestep_0 = scheduler.timesteps[0] + timestep_1 = scheduler.timesteps[1] + + sample = self.dummy_sample + scaled_sample = scheduler.scale_model_input(sample, timestep_0) + residual = 0.1 * scaled_sample + + output_0 = scheduler.step(residual, timestep_0, sample).prev_sample + output_1 = scheduler.step(residual, timestep_1, sample).prev_sample + + self.assertEqual(output_0.shape, sample.shape) + self.assertEqual(output_0.shape, output_1.shape) + + # Override test_from_save_pretrained to use EDMEulerScheduler-specific logic + def test_scheduler_outputs_equivalence(self): + def set_nan_tensor_to_zero(t): + t[t != t] = 0 + return t + + def recursive_check(tuple_object, dict_object): + if isinstance(tuple_object, (List, Tuple)): + for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object.values()): + recursive_check(tuple_iterable_value, dict_iterable_value) + elif isinstance(tuple_object, Dict): + for tuple_iterable_value, dict_iterable_value in zip(tuple_object.values(), dict_object.values()): + recursive_check(tuple_iterable_value, dict_iterable_value) + elif tuple_object is None: + return + else: + self.assertTrue( + torch.allclose( + set_nan_tensor_to_zero(tuple_object), set_nan_tensor_to_zero(dict_object), atol=1e-5 + ), + msg=( + "Tuple and dict output are not equal. Difference:" + f" {torch.max(torch.abs(tuple_object - dict_object))}. Tuple has `nan`:" + f" {torch.isnan(tuple_object).any()} and `inf`: {torch.isinf(tuple_object)}. Dict has" + f" `nan`: {torch.isnan(dict_object).any()} and `inf`: {torch.isinf(dict_object)}." + ), + ) + + kwargs = dict(self.forward_default_kwargs) + num_inference_steps = kwargs.pop("num_inference_steps", 50) + + timestep = 0 + + for scheduler_class in self.scheduler_classes: + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + + scheduler.set_timesteps(num_inference_steps) + timestep = scheduler.timesteps[0] + + sample = self.dummy_sample + scaled_sample = scheduler.scale_model_input(sample, timestep) + residual = 0.1 * scaled_sample + + # Set the seed before state as some schedulers are stochastic like EulerAncestralDiscreteScheduler, EulerDiscreteScheduler + if "generator" in set(inspect.signature(scheduler.step).parameters.keys()): + kwargs["generator"] = torch.manual_seed(0) + outputs_dict = scheduler.step(residual, timestep, sample, **kwargs) + + scheduler.set_timesteps(num_inference_steps) + + scaled_sample = scheduler.scale_model_input(sample, timestep) + residual = 0.1 * scaled_sample + + # Set the seed before state as some schedulers are stochastic like EulerAncestralDiscreteScheduler, EulerDiscreteScheduler + if "generator" in set(inspect.signature(scheduler.step).parameters.keys()): + kwargs["generator"] = torch.manual_seed(0) + outputs_tuple = scheduler.step(residual, timestep, sample, return_dict=False, **kwargs) + + recursive_check(outputs_tuple, outputs_dict) + + @unittest.skip(reason="EDMEulerScheduler does not support beta schedules.") + def test_trained_betas(self): + pass diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/schedulers/test_scheduler_euler.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/schedulers/test_scheduler_euler.py new file mode 100644 index 0000000000000000000000000000000000000000..ee99465abfc39e7a7ce7a6734b22dbeb42df5efd --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/schedulers/test_scheduler_euler.py @@ -0,0 +1,271 @@ +import torch + +from diffusers import EulerDiscreteScheduler + +from ..testing_utils import torch_device +from .test_schedulers import SchedulerCommonTest + + +class EulerDiscreteSchedulerTest(SchedulerCommonTest): + scheduler_classes = (EulerDiscreteScheduler,) + num_inference_steps = 10 + + def get_scheduler_config(self, **kwargs): + config = { + "num_train_timesteps": 1100, + "beta_start": 0.0001, + "beta_end": 0.02, + "beta_schedule": "linear", + } + + config.update(**kwargs) + return config + + def test_timesteps(self): + for timesteps in [10, 50, 100, 1000]: + self.check_over_configs(num_train_timesteps=timesteps) + + def test_betas(self): + for beta_start, beta_end in zip([0.00001, 0.0001, 0.001], [0.0002, 0.002, 0.02]): + self.check_over_configs(beta_start=beta_start, beta_end=beta_end) + + def test_schedules(self): + for schedule in ["linear", "scaled_linear"]: + self.check_over_configs(beta_schedule=schedule) + + def test_prediction_type(self): + for prediction_type in ["epsilon", "v_prediction"]: + self.check_over_configs(prediction_type=prediction_type) + + def test_timestep_type(self): + timestep_types = ["discrete", "continuous"] + for timestep_type in timestep_types: + self.check_over_configs(timestep_type=timestep_type) + + def test_karras_sigmas(self): + self.check_over_configs(use_karras_sigmas=True, sigma_min=0.02, sigma_max=700.0) + + def test_rescale_betas_zero_snr(self): + for rescale_betas_zero_snr in [True, False]: + self.check_over_configs(rescale_betas_zero_snr=rescale_betas_zero_snr) + + def full_loop(self, **config): + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config(**config) + scheduler = scheduler_class(**scheduler_config) + + num_inference_steps = self.num_inference_steps + scheduler.set_timesteps(num_inference_steps) + + generator = torch.manual_seed(0) + + model = self.dummy_model() + sample = self.dummy_sample_deter * scheduler.init_noise_sigma + sample = sample.to(torch_device) + + for i, t in enumerate(scheduler.timesteps): + sample = scheduler.scale_model_input(sample, t) + + model_output = model(sample, t) + + output = scheduler.step(model_output, t, sample, generator=generator) + sample = output.prev_sample + return sample + + def full_loop_custom_timesteps(self, **config): + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config(**config) + scheduler = scheduler_class(**scheduler_config) + + num_inference_steps = self.num_inference_steps + scheduler.set_timesteps(num_inference_steps) + timesteps = scheduler.timesteps + # reset the timesteps using `timesteps` + scheduler = scheduler_class(**scheduler_config) + scheduler.set_timesteps(num_inference_steps=None, timesteps=timesteps) + + generator = torch.manual_seed(0) + + model = self.dummy_model() + sample = self.dummy_sample_deter * scheduler.init_noise_sigma + sample = sample.to(torch_device) + + for i, t in enumerate(scheduler.timesteps): + sample = scheduler.scale_model_input(sample, t) + + model_output = model(sample, t) + + output = scheduler.step(model_output, t, sample, generator=generator) + sample = output.prev_sample + return sample + + def full_loop_custom_sigmas(self, **config): + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config(**config) + scheduler = scheduler_class(**scheduler_config) + + num_inference_steps = self.num_inference_steps + scheduler.set_timesteps(num_inference_steps) + sigmas = scheduler.sigmas + # reset the timesteps using `sigmas` + scheduler = scheduler_class(**scheduler_config) + scheduler.set_timesteps(num_inference_steps=None, sigmas=sigmas) + + generator = torch.manual_seed(0) + + model = self.dummy_model() + sample = self.dummy_sample_deter * scheduler.init_noise_sigma + sample = sample.to(torch_device) + + for i, t in enumerate(scheduler.timesteps): + sample = scheduler.scale_model_input(sample, t) + + model_output = model(sample, t) + + output = scheduler.step(model_output, t, sample, generator=generator) + sample = output.prev_sample + return sample + + def test_full_loop_no_noise(self): + sample = self.full_loop() + + result_sum = torch.sum(torch.abs(sample)) + result_mean = torch.mean(torch.abs(sample)) + + assert abs(result_sum.item() - 10.0807) < 1e-2 + assert abs(result_mean.item() - 0.0131) < 1e-3 + + def test_full_loop_with_v_prediction(self): + sample = self.full_loop(prediction_type="v_prediction") + + result_sum = torch.sum(torch.abs(sample)) + result_mean = torch.mean(torch.abs(sample)) + + assert abs(result_sum.item() - 0.0002) < 1e-2 + assert abs(result_mean.item() - 2.2676e-06) < 1e-3 + + def test_full_loop_device(self): + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + + scheduler.set_timesteps(self.num_inference_steps, device=torch_device) + + generator = torch.manual_seed(0) + + model = self.dummy_model() + sample = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() + sample = sample.to(torch_device) + + for t in scheduler.timesteps: + sample = scheduler.scale_model_input(sample, t) + + model_output = model(sample, t) + + output = scheduler.step(model_output, t, sample, generator=generator) + sample = output.prev_sample + + result_sum = torch.sum(torch.abs(sample)) + result_mean = torch.mean(torch.abs(sample)) + + assert abs(result_sum.item() - 10.0807) < 1e-2 + assert abs(result_mean.item() - 0.0131) < 1e-3 + + def test_full_loop_device_karras_sigmas(self): + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config, use_karras_sigmas=True) + + scheduler.set_timesteps(self.num_inference_steps, device=torch_device) + + generator = torch.manual_seed(0) + + model = self.dummy_model() + sample = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() + sample = sample.to(torch_device) + + for t in scheduler.timesteps: + sample = scheduler.scale_model_input(sample, t) + + model_output = model(sample, t) + + output = scheduler.step(model_output, t, sample, generator=generator) + sample = output.prev_sample + + result_sum = torch.sum(torch.abs(sample)) + result_mean = torch.mean(torch.abs(sample)) + + assert abs(result_sum.item() - 124.52299499511719) < 1e-2 + assert abs(result_mean.item() - 0.16213932633399963) < 1e-3 + + def test_full_loop_with_noise(self): + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + + scheduler.set_timesteps(self.num_inference_steps) + + generator = torch.manual_seed(0) + + model = self.dummy_model() + sample = self.dummy_sample_deter * scheduler.init_noise_sigma + + # add noise + t_start = self.num_inference_steps - 2 + noise = self.dummy_noise_deter + noise = noise.to(sample.device) + timesteps = scheduler.timesteps[t_start * scheduler.order :] + sample = scheduler.add_noise(sample, noise, timesteps[:1]) + + for i, t in enumerate(timesteps): + sample = scheduler.scale_model_input(sample, t) + + model_output = model(sample, t) + + output = scheduler.step(model_output, t, sample, generator=generator) + sample = output.prev_sample + + result_sum = torch.sum(torch.abs(sample)) + result_mean = torch.mean(torch.abs(sample)) + + assert abs(result_sum.item() - 57062.9297) < 1e-2, f" expected result sum 57062.9297, but get {result_sum}" + assert abs(result_mean.item() - 74.3007) < 1e-3, f" expected result mean 74.3007, but get {result_mean}" + + def test_custom_timesteps(self): + for prediction_type in ["epsilon", "sample", "v_prediction"]: + for interpolation_type in ["linear", "log_linear"]: + for final_sigmas_type in ["sigma_min", "zero"]: + sample = self.full_loop( + prediction_type=prediction_type, + interpolation_type=interpolation_type, + final_sigmas_type=final_sigmas_type, + ) + sample_custom_timesteps = self.full_loop_custom_timesteps( + prediction_type=prediction_type, + interpolation_type=interpolation_type, + final_sigmas_type=final_sigmas_type, + ) + assert torch.sum(torch.abs(sample - sample_custom_timesteps)) < 1e-5, ( + f"Scheduler outputs are not identical for prediction_type: {prediction_type}, interpolation_type: {interpolation_type} and final_sigmas_type: {final_sigmas_type}" + ) + + def test_custom_sigmas(self): + for prediction_type in ["epsilon", "sample", "v_prediction"]: + for final_sigmas_type in ["sigma_min", "zero"]: + sample = self.full_loop( + prediction_type=prediction_type, + final_sigmas_type=final_sigmas_type, + ) + sample_custom_timesteps = self.full_loop_custom_sigmas( + prediction_type=prediction_type, + final_sigmas_type=final_sigmas_type, + ) + assert torch.sum(torch.abs(sample - sample_custom_timesteps)) < 1e-5, ( + f"Scheduler outputs are not identical for prediction_type: {prediction_type} and final_sigmas_type: {final_sigmas_type}" + ) + + def test_beta_sigmas(self): + self.check_over_configs(use_beta_sigmas=True) + + def test_exponential_sigmas(self): + self.check_over_configs(use_exponential_sigmas=True) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/schedulers/test_scheduler_euler_ancestral.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/schedulers/test_scheduler_euler_ancestral.py new file mode 100644 index 0000000000000000000000000000000000000000..c4fe61bfc387ba1361800f6d660e761b9fdef7fd --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/schedulers/test_scheduler_euler_ancestral.py @@ -0,0 +1,156 @@ +import torch + +from diffusers import EulerAncestralDiscreteScheduler + +from ..testing_utils import torch_device +from .test_schedulers import SchedulerCommonTest + + +class EulerAncestralDiscreteSchedulerTest(SchedulerCommonTest): + scheduler_classes = (EulerAncestralDiscreteScheduler,) + num_inference_steps = 10 + + def get_scheduler_config(self, **kwargs): + config = { + "num_train_timesteps": 1100, + "beta_start": 0.0001, + "beta_end": 0.02, + "beta_schedule": "linear", + } + + config.update(**kwargs) + return config + + def test_timesteps(self): + for timesteps in [10, 50, 100, 1000]: + self.check_over_configs(num_train_timesteps=timesteps) + + def test_betas(self): + for beta_start, beta_end in zip([0.00001, 0.0001, 0.001], [0.0002, 0.002, 0.02]): + self.check_over_configs(beta_start=beta_start, beta_end=beta_end) + + def test_schedules(self): + for schedule in ["linear", "scaled_linear"]: + self.check_over_configs(beta_schedule=schedule) + + def test_prediction_type(self): + for prediction_type in ["epsilon", "v_prediction"]: + self.check_over_configs(prediction_type=prediction_type) + + def test_rescale_betas_zero_snr(self): + for rescale_betas_zero_snr in [True, False]: + self.check_over_configs(rescale_betas_zero_snr=rescale_betas_zero_snr) + + def test_full_loop_no_noise(self): + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + + scheduler.set_timesteps(self.num_inference_steps) + + generator = torch.manual_seed(0) + + model = self.dummy_model() + sample = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() + sample = sample.to(torch_device) + + for i, t in enumerate(scheduler.timesteps): + sample = scheduler.scale_model_input(sample, t) + + model_output = model(sample, t) + + output = scheduler.step(model_output, t, sample, generator=generator) + sample = output.prev_sample + + result_sum = torch.sum(torch.abs(sample)) + result_mean = torch.mean(torch.abs(sample)) + + assert abs(result_sum.item() - 152.3192) < 1e-2 + assert abs(result_mean.item() - 0.1983) < 1e-3 + + def test_full_loop_with_v_prediction(self): + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config(prediction_type="v_prediction") + scheduler = scheduler_class(**scheduler_config) + + scheduler.set_timesteps(self.num_inference_steps) + + generator = torch.manual_seed(0) + + model = self.dummy_model() + sample = self.dummy_sample_deter * scheduler.init_noise_sigma + sample = sample.to(torch_device) + + for i, t in enumerate(scheduler.timesteps): + sample = scheduler.scale_model_input(sample, t) + + model_output = model(sample, t) + + output = scheduler.step(model_output, t, sample, generator=generator) + sample = output.prev_sample + + result_sum = torch.sum(torch.abs(sample)) + result_mean = torch.mean(torch.abs(sample)) + + assert abs(result_sum.item() - 108.4439) < 1e-2 + assert abs(result_mean.item() - 0.1412) < 1e-3 + + def test_full_loop_device(self): + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + + scheduler.set_timesteps(self.num_inference_steps, device=torch_device) + generator = torch.manual_seed(0) + + model = self.dummy_model() + sample = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() + sample = sample.to(torch_device) + + for t in scheduler.timesteps: + sample = scheduler.scale_model_input(sample, t) + + model_output = model(sample, t) + + output = scheduler.step(model_output, t, sample, generator=generator) + sample = output.prev_sample + + result_sum = torch.sum(torch.abs(sample)) + result_mean = torch.mean(torch.abs(sample)) + + assert abs(result_sum.item() - 152.3192) < 1e-2 + assert abs(result_mean.item() - 0.1983) < 1e-3 + + def test_full_loop_with_noise(self): + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + + t_start = self.num_inference_steps - 2 + + scheduler.set_timesteps(self.num_inference_steps) + + generator = torch.manual_seed(0) + + model = self.dummy_model() + sample = self.dummy_sample_deter * scheduler.init_noise_sigma + + # add noise + noise = self.dummy_noise_deter + noise = noise.to(sample.device) + timesteps = scheduler.timesteps[t_start * scheduler.order :] + sample = scheduler.add_noise(sample, noise, timesteps[:1]) + + for i, t in enumerate(timesteps): + sample = scheduler.scale_model_input(sample, t) + + model_output = model(sample, t) + + output = scheduler.step(model_output, t, sample, generator=generator) + sample = output.prev_sample + + result_sum = torch.sum(torch.abs(sample)) + result_mean = torch.mean(torch.abs(sample)) + + assert abs(result_sum.item() - 56163.0508) < 1e-2, f" expected result sum 56163.0508, but get {result_sum}" + assert abs(result_mean.item() - 73.1290) < 1e-3, f" expected result mean 73.1290, but get {result_mean}" diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/schedulers/test_scheduler_heun.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/schedulers/test_scheduler_heun.py new file mode 100644 index 0000000000000000000000000000000000000000..97bef50048ba2011c106df5987af7368c033ee5a --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/schedulers/test_scheduler_heun.py @@ -0,0 +1,227 @@ +import torch + +from diffusers import HeunDiscreteScheduler + +from ..testing_utils import torch_device +from .test_schedulers import SchedulerCommonTest + + +class HeunDiscreteSchedulerTest(SchedulerCommonTest): + scheduler_classes = (HeunDiscreteScheduler,) + num_inference_steps = 10 + + def get_scheduler_config(self, **kwargs): + config = { + "num_train_timesteps": 1100, + "beta_start": 0.0001, + "beta_end": 0.02, + "beta_schedule": "linear", + } + + config.update(**kwargs) + return config + + def test_timesteps(self): + for timesteps in [10, 50, 100, 1000]: + self.check_over_configs(num_train_timesteps=timesteps) + + def test_betas(self): + for beta_start, beta_end in zip([0.00001, 0.0001, 0.001], [0.0002, 0.002, 0.02]): + self.check_over_configs(beta_start=beta_start, beta_end=beta_end) + + def test_schedules(self): + for schedule in ["linear", "scaled_linear", "exp"]: + self.check_over_configs(beta_schedule=schedule) + + def test_clip_sample(self): + for clip_sample_range in [1.0, 2.0, 3.0]: + self.check_over_configs(clip_sample_range=clip_sample_range, clip_sample=True) + + def test_prediction_type(self): + for prediction_type in ["epsilon", "v_prediction", "sample"]: + self.check_over_configs(prediction_type=prediction_type) + + def full_loop(self, **config): + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config(**config) + scheduler = scheduler_class(**scheduler_config) + + num_inference_steps = self.num_inference_steps + scheduler.set_timesteps(num_inference_steps) + + model = self.dummy_model() + sample = self.dummy_sample_deter * scheduler.init_noise_sigma + sample = sample.to(torch_device) + + for i, t in enumerate(scheduler.timesteps): + sample = scheduler.scale_model_input(sample, t) + + model_output = model(sample, t) + + output = scheduler.step(model_output, t, sample) + sample = output.prev_sample + + return sample + + def full_loop_custom_timesteps(self, **config): + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config(**config) + scheduler = scheduler_class(**scheduler_config) + + num_inference_steps = self.num_inference_steps + scheduler.set_timesteps(num_inference_steps) + timesteps = scheduler.timesteps + timesteps = torch.cat([timesteps[:1], timesteps[1::2]]) + # reset the timesteps using `timesteps` + scheduler = scheduler_class(**scheduler_config) + scheduler.set_timesteps(num_inference_steps=None, timesteps=timesteps) + + model = self.dummy_model() + sample = self.dummy_sample_deter * scheduler.init_noise_sigma + sample = sample.to(torch_device) + + for i, t in enumerate(scheduler.timesteps): + sample = scheduler.scale_model_input(sample, t) + + model_output = model(sample, t) + + output = scheduler.step(model_output, t, sample) + sample = output.prev_sample + + return sample + + def test_full_loop_no_noise(self): + sample = self.full_loop() + result_sum = torch.sum(torch.abs(sample)) + result_mean = torch.mean(torch.abs(sample)) + + if torch_device in ["cpu", "mps"]: + assert abs(result_sum.item() - 0.1233) < 1e-2 + assert abs(result_mean.item() - 0.0002) < 1e-3 + else: + # CUDA + assert abs(result_sum.item() - 0.1233) < 1e-2 + assert abs(result_mean.item() - 0.0002) < 1e-3 + + def test_full_loop_with_v_prediction(self): + sample = self.full_loop(prediction_type="v_prediction") + result_sum = torch.sum(torch.abs(sample)) + result_mean = torch.mean(torch.abs(sample)) + + if torch_device in ["cpu", "mps"]: + assert abs(result_sum.item() - 4.6934e-07) < 1e-2 + assert abs(result_mean.item() - 6.1112e-10) < 1e-3 + else: + # CUDA + assert abs(result_sum.item() - 4.693428650170972e-07) < 1e-2 + assert abs(result_mean.item() - 0.0002) < 1e-3 + + def test_full_loop_device(self): + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + + scheduler.set_timesteps(self.num_inference_steps, device=torch_device) + + model = self.dummy_model() + sample = self.dummy_sample_deter.to(torch_device) * scheduler.init_noise_sigma + + for t in scheduler.timesteps: + sample = scheduler.scale_model_input(sample, t) + + model_output = model(sample, t) + + output = scheduler.step(model_output, t, sample) + sample = output.prev_sample + + result_sum = torch.sum(torch.abs(sample)) + result_mean = torch.mean(torch.abs(sample)) + + if str(torch_device).startswith("cpu"): + # The following sum varies between 148 and 156 on mps. Why? + assert abs(result_sum.item() - 0.1233) < 1e-2 + assert abs(result_mean.item() - 0.0002) < 1e-3 + elif str(torch_device).startswith("mps"): + # Larger tolerance on mps + assert abs(result_mean.item() - 0.0002) < 1e-2 + else: + # CUDA + assert abs(result_sum.item() - 0.1233) < 1e-2 + assert abs(result_mean.item() - 0.0002) < 1e-3 + + def test_full_loop_device_karras_sigmas(self): + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config, use_karras_sigmas=True) + + scheduler.set_timesteps(self.num_inference_steps, device=torch_device) + + model = self.dummy_model() + sample = self.dummy_sample_deter.to(torch_device) * scheduler.init_noise_sigma + sample = sample.to(torch_device) + + for t in scheduler.timesteps: + sample = scheduler.scale_model_input(sample, t) + + model_output = model(sample, t) + + output = scheduler.step(model_output, t, sample) + sample = output.prev_sample + + result_sum = torch.sum(torch.abs(sample)) + result_mean = torch.mean(torch.abs(sample)) + + assert abs(result_sum.item() - 0.00015) < 1e-2 + assert abs(result_mean.item() - 1.9869554535034695e-07) < 1e-2 + + def test_full_loop_with_noise(self): + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + + scheduler.set_timesteps(self.num_inference_steps) + + model = self.dummy_model() + sample = self.dummy_sample_deter * scheduler.init_noise_sigma + sample = sample.to(torch_device) + + t_start = self.num_inference_steps - 2 + noise = self.dummy_noise_deter + noise = noise.to(torch_device) + timesteps = scheduler.timesteps[t_start * scheduler.order :] + sample = scheduler.add_noise(sample, noise, timesteps[:1]) + + for i, t in enumerate(timesteps): + sample = scheduler.scale_model_input(sample, t) + + model_output = model(sample, t) + + output = scheduler.step(model_output, t, sample) + sample = output.prev_sample + + result_sum = torch.sum(torch.abs(sample)) + result_mean = torch.mean(torch.abs(sample)) + + assert abs(result_sum.item() - 75074.8906) < 1e-2, f" expected result sum 75074.8906, but get {result_sum}" + assert abs(result_mean.item() - 97.7538) < 1e-3, f" expected result mean 97.7538, but get {result_mean}" + + def test_custom_timesteps(self): + for prediction_type in ["epsilon", "sample", "v_prediction"]: + for timestep_spacing in ["linspace", "leading"]: + sample = self.full_loop( + prediction_type=prediction_type, + timestep_spacing=timestep_spacing, + ) + sample_custom_timesteps = self.full_loop_custom_timesteps( + prediction_type=prediction_type, + timestep_spacing=timestep_spacing, + ) + assert torch.sum(torch.abs(sample - sample_custom_timesteps)) < 1e-5, ( + f"Scheduler outputs are not identical for prediction_type: {prediction_type}, timestep_spacing: {timestep_spacing}" + ) + + def test_beta_sigmas(self): + self.check_over_configs(use_beta_sigmas=True) + + def test_exponential_sigmas(self): + self.check_over_configs(use_exponential_sigmas=True) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/schedulers/test_scheduler_ipndm.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/schedulers/test_scheduler_ipndm.py new file mode 100644 index 0000000000000000000000000000000000000000..ac7973c582950816039e96e1ef4aad245d4cd3f1 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/schedulers/test_scheduler_ipndm.py @@ -0,0 +1,165 @@ +import tempfile +import unittest + +import torch + +from diffusers import IPNDMScheduler + +from .test_schedulers import SchedulerCommonTest + + +class IPNDMSchedulerTest(SchedulerCommonTest): + scheduler_classes = (IPNDMScheduler,) + forward_default_kwargs = (("num_inference_steps", 50),) + + def get_scheduler_config(self, **kwargs): + config = {"num_train_timesteps": 1000} + config.update(**kwargs) + return config + + def check_over_configs(self, time_step=0, **config): + kwargs = dict(self.forward_default_kwargs) + num_inference_steps = kwargs.pop("num_inference_steps", None) + sample = self.dummy_sample + residual = 0.1 * sample + dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] + + for scheduler_class in self.scheduler_classes: + scheduler_config = self.get_scheduler_config(**config) + scheduler = scheduler_class(**scheduler_config) + scheduler.set_timesteps(num_inference_steps) + # copy over dummy past residuals + scheduler.ets = dummy_past_residuals[:] + + if time_step is None: + time_step = scheduler.timesteps[len(scheduler.timesteps) // 2] + + with tempfile.TemporaryDirectory() as tmpdirname: + scheduler.save_config(tmpdirname) + new_scheduler = scheduler_class.from_pretrained(tmpdirname) + new_scheduler.set_timesteps(num_inference_steps) + # copy over dummy past residuals + new_scheduler.ets = dummy_past_residuals[:] + + output = scheduler.step(residual, time_step, sample, **kwargs).prev_sample + new_output = new_scheduler.step(residual, time_step, sample, **kwargs).prev_sample + + assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" + + output = scheduler.step(residual, time_step, sample, **kwargs).prev_sample + new_output = new_scheduler.step(residual, time_step, sample, **kwargs).prev_sample + + assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" + + @unittest.skip("Test not supported.") + def test_from_save_pretrained(self): + pass + + def check_over_forward(self, time_step=0, **forward_kwargs): + kwargs = dict(self.forward_default_kwargs) + num_inference_steps = kwargs.pop("num_inference_steps", None) + sample = self.dummy_sample + residual = 0.1 * sample + dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] + + for scheduler_class in self.scheduler_classes: + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + scheduler.set_timesteps(num_inference_steps) + + # copy over dummy past residuals (must be after setting timesteps) + scheduler.ets = dummy_past_residuals[:] + + if time_step is None: + time_step = scheduler.timesteps[len(scheduler.timesteps) // 2] + + with tempfile.TemporaryDirectory() as tmpdirname: + scheduler.save_config(tmpdirname) + new_scheduler = scheduler_class.from_pretrained(tmpdirname) + # copy over dummy past residuals + new_scheduler.set_timesteps(num_inference_steps) + + # copy over dummy past residual (must be after setting timesteps) + new_scheduler.ets = dummy_past_residuals[:] + + output = scheduler.step(residual, time_step, sample, **kwargs).prev_sample + new_output = new_scheduler.step(residual, time_step, sample, **kwargs).prev_sample + + assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" + + output = scheduler.step(residual, time_step, sample, **kwargs).prev_sample + new_output = new_scheduler.step(residual, time_step, sample, **kwargs).prev_sample + + assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" + + def full_loop(self, **config): + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config(**config) + scheduler = scheduler_class(**scheduler_config) + + num_inference_steps = 10 + model = self.dummy_model() + sample = self.dummy_sample_deter + scheduler.set_timesteps(num_inference_steps) + + for i, t in enumerate(scheduler.timesteps): + residual = model(sample, t) + sample = scheduler.step(residual, t, sample).prev_sample + + scheduler._step_index = None + + for i, t in enumerate(scheduler.timesteps): + residual = model(sample, t) + sample = scheduler.step(residual, t, sample).prev_sample + + return sample + + def test_step_shape(self): + kwargs = dict(self.forward_default_kwargs) + + num_inference_steps = kwargs.pop("num_inference_steps", None) + + for scheduler_class in self.scheduler_classes: + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + + sample = self.dummy_sample + residual = 0.1 * sample + + if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"): + scheduler.set_timesteps(num_inference_steps) + elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"): + kwargs["num_inference_steps"] = num_inference_steps + + # copy over dummy past residuals (must be done after set_timesteps) + dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] + scheduler.ets = dummy_past_residuals[:] + + time_step_0 = scheduler.timesteps[5] + time_step_1 = scheduler.timesteps[6] + + output_0 = scheduler.step(residual, time_step_0, sample, **kwargs).prev_sample + output_1 = scheduler.step(residual, time_step_1, sample, **kwargs).prev_sample + + self.assertEqual(output_0.shape, sample.shape) + self.assertEqual(output_0.shape, output_1.shape) + + output_0 = scheduler.step(residual, time_step_0, sample, **kwargs).prev_sample + output_1 = scheduler.step(residual, time_step_1, sample, **kwargs).prev_sample + + self.assertEqual(output_0.shape, sample.shape) + self.assertEqual(output_0.shape, output_1.shape) + + def test_timesteps(self): + for timesteps in [100, 1000]: + self.check_over_configs(num_train_timesteps=timesteps, time_step=None) + + def test_inference_steps(self): + for t, num_inference_steps in zip([1, 5, 10], [10, 50, 100]): + self.check_over_forward(num_inference_steps=num_inference_steps, time_step=None) + + def test_full_loop_no_noise(self): + sample = self.full_loop() + result_mean = torch.mean(torch.abs(sample)) + + assert abs(result_mean.item() - 2540529) < 10 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/schedulers/test_scheduler_kdpm2_ancestral.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/schedulers/test_scheduler_kdpm2_ancestral.py new file mode 100644 index 0000000000000000000000000000000000000000..135534db45361503fa5a7965e7a4c2640db33247 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/schedulers/test_scheduler_kdpm2_ancestral.py @@ -0,0 +1,164 @@ +import torch + +from diffusers import KDPM2AncestralDiscreteScheduler + +from ..testing_utils import torch_device +from .test_schedulers import SchedulerCommonTest + + +class KDPM2AncestralDiscreteSchedulerTest(SchedulerCommonTest): + scheduler_classes = (KDPM2AncestralDiscreteScheduler,) + num_inference_steps = 10 + + def get_scheduler_config(self, **kwargs): + config = { + "num_train_timesteps": 1100, + "beta_start": 0.0001, + "beta_end": 0.02, + "beta_schedule": "linear", + } + + config.update(**kwargs) + return config + + def test_timesteps(self): + for timesteps in [10, 50, 100, 1000]: + self.check_over_configs(num_train_timesteps=timesteps) + + def test_betas(self): + for beta_start, beta_end in zip([0.00001, 0.0001, 0.001], [0.0002, 0.002, 0.02]): + self.check_over_configs(beta_start=beta_start, beta_end=beta_end) + + def test_schedules(self): + for schedule in ["linear", "scaled_linear"]: + self.check_over_configs(beta_schedule=schedule) + + def test_full_loop_no_noise(self): + if torch_device == "mps": + return + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + + scheduler.set_timesteps(self.num_inference_steps) + + generator = torch.manual_seed(0) + + model = self.dummy_model() + sample = self.dummy_sample_deter * scheduler.init_noise_sigma + sample = sample.to(torch_device) + + for i, t in enumerate(scheduler.timesteps): + sample = scheduler.scale_model_input(sample, t) + + model_output = model(sample, t) + + output = scheduler.step(model_output, t, sample, generator=generator) + sample = output.prev_sample + + result_sum = torch.sum(torch.abs(sample)) + result_mean = torch.mean(torch.abs(sample)) + + assert abs(result_sum.item() - 13979.9433) < 1e-2 + assert abs(result_mean.item() - 18.2030) < 5e-3 + + def test_prediction_type(self): + for prediction_type in ["epsilon", "v_prediction"]: + self.check_over_configs(prediction_type=prediction_type) + + def test_full_loop_with_v_prediction(self): + if torch_device == "mps": + return + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config(prediction_type="v_prediction") + scheduler = scheduler_class(**scheduler_config) + + scheduler.set_timesteps(self.num_inference_steps) + + model = self.dummy_model() + sample = self.dummy_sample_deter * scheduler.init_noise_sigma + sample = sample.to(torch_device) + + generator = torch.manual_seed(0) + + for i, t in enumerate(scheduler.timesteps): + sample = scheduler.scale_model_input(sample, t) + + model_output = model(sample, t) + + output = scheduler.step(model_output, t, sample, generator=generator) + sample = output.prev_sample + + result_sum = torch.sum(torch.abs(sample)) + result_mean = torch.mean(torch.abs(sample)) + + assert abs(result_sum.item() - 331.8133) < 1e-2 + assert abs(result_mean.item() - 0.4320) < 1e-3 + + def test_full_loop_device(self): + if torch_device == "mps": + return + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + + scheduler.set_timesteps(self.num_inference_steps, device=torch_device) + generator = torch.manual_seed(0) + + model = self.dummy_model() + sample = self.dummy_sample_deter.to(torch_device) * scheduler.init_noise_sigma + + for t in scheduler.timesteps: + sample = scheduler.scale_model_input(sample, t) + + model_output = model(sample, t) + + output = scheduler.step(model_output, t, sample, generator=generator) + sample = output.prev_sample + + result_sum = torch.sum(torch.abs(sample)) + result_mean = torch.mean(torch.abs(sample)) + + assert abs(result_sum.item() - 13979.9433) < 1e-1 + assert abs(result_mean.item() - 18.2030) < 1e-3 + + def test_full_loop_with_noise(self): + if torch_device == "mps": + return + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + + scheduler.set_timesteps(self.num_inference_steps) + + generator = torch.manual_seed(0) + + model = self.dummy_model() + sample = self.dummy_sample_deter * scheduler.init_noise_sigma + + # add noise + t_start = self.num_inference_steps - 2 + noise = self.dummy_noise_deter + noise = noise.to(sample.device) + timesteps = scheduler.timesteps[t_start * scheduler.order :] + sample = scheduler.add_noise(sample, noise, timesteps[:1]) + + for i, t in enumerate(timesteps): + sample = scheduler.scale_model_input(sample, t) + + model_output = model(sample, t) + + output = scheduler.step(model_output, t, sample, generator=generator) + sample = output.prev_sample + + result_sum = torch.sum(torch.abs(sample)) + result_mean = torch.mean(torch.abs(sample)) + + assert abs(result_sum.item() - 93087.3437) < 1e-2, f" expected result sum 93087.3437, but get {result_sum}" + assert abs(result_mean.item() - 121.2074) < 5e-3, f" expected result mean 121.2074, but get {result_mean}" + + def test_beta_sigmas(self): + self.check_over_configs(use_beta_sigmas=True) + + def test_exponential_sigmas(self): + self.check_over_configs(use_exponential_sigmas=True) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/schedulers/test_scheduler_kdpm2_discrete.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/schedulers/test_scheduler_kdpm2_discrete.py new file mode 100644 index 0000000000000000000000000000000000000000..370ba2253ee28ecd06ceaf911b2947343a9ee318 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/schedulers/test_scheduler_kdpm2_discrete.py @@ -0,0 +1,172 @@ +import torch + +from diffusers import KDPM2DiscreteScheduler + +from ..testing_utils import torch_device +from .test_schedulers import SchedulerCommonTest + + +class KDPM2DiscreteSchedulerTest(SchedulerCommonTest): + scheduler_classes = (KDPM2DiscreteScheduler,) + num_inference_steps = 10 + + def get_scheduler_config(self, **kwargs): + config = { + "num_train_timesteps": 1100, + "beta_start": 0.0001, + "beta_end": 0.02, + "beta_schedule": "linear", + } + + config.update(**kwargs) + return config + + def test_timesteps(self): + for timesteps in [10, 50, 100, 1000]: + self.check_over_configs(num_train_timesteps=timesteps) + + def test_betas(self): + for beta_start, beta_end in zip([0.00001, 0.0001, 0.001], [0.0002, 0.002, 0.02]): + self.check_over_configs(beta_start=beta_start, beta_end=beta_end) + + def test_schedules(self): + for schedule in ["linear", "scaled_linear"]: + self.check_over_configs(beta_schedule=schedule) + + def test_prediction_type(self): + for prediction_type in ["epsilon", "v_prediction"]: + self.check_over_configs(prediction_type=prediction_type) + + def test_full_loop_with_v_prediction(self): + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config(prediction_type="v_prediction") + scheduler = scheduler_class(**scheduler_config) + + scheduler.set_timesteps(self.num_inference_steps) + + model = self.dummy_model() + sample = self.dummy_sample_deter * scheduler.init_noise_sigma + sample = sample.to(torch_device) + + for i, t in enumerate(scheduler.timesteps): + sample = scheduler.scale_model_input(sample, t) + + model_output = model(sample, t) + + output = scheduler.step(model_output, t, sample) + sample = output.prev_sample + + result_sum = torch.sum(torch.abs(sample)) + result_mean = torch.mean(torch.abs(sample)) + + if torch_device in ["cpu", "mps"]: + assert abs(result_sum.item() - 4.6934e-07) < 1e-2 + assert abs(result_mean.item() - 6.1112e-10) < 1e-3 + else: + # CUDA + assert abs(result_sum.item() - 4.693428650170972e-07) < 1e-2 + assert abs(result_mean.item() - 0.0002) < 1e-3 + + def test_full_loop_no_noise(self): + if torch_device == "mps": + return + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + + scheduler.set_timesteps(self.num_inference_steps) + + model = self.dummy_model() + sample = self.dummy_sample_deter * scheduler.init_noise_sigma + sample = sample.to(torch_device) + + for i, t in enumerate(scheduler.timesteps): + sample = scheduler.scale_model_input(sample, t) + + model_output = model(sample, t) + + output = scheduler.step(model_output, t, sample) + sample = output.prev_sample + + result_sum = torch.sum(torch.abs(sample)) + result_mean = torch.mean(torch.abs(sample)) + + if torch_device in ["cpu", "mps"]: + assert abs(result_sum.item() - 20.4125) < 1e-2 + assert abs(result_mean.item() - 0.0266) < 1e-3 + else: + # CUDA + assert abs(result_sum.item() - 20.4125) < 1e-2 + assert abs(result_mean.item() - 0.0266) < 1e-3 + + def test_full_loop_device(self): + if torch_device == "mps": + return + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + + scheduler.set_timesteps(self.num_inference_steps, device=torch_device) + + model = self.dummy_model() + sample = self.dummy_sample_deter.to(torch_device) * scheduler.init_noise_sigma + + for t in scheduler.timesteps: + sample = scheduler.scale_model_input(sample, t) + + model_output = model(sample, t) + + output = scheduler.step(model_output, t, sample) + sample = output.prev_sample + + result_sum = torch.sum(torch.abs(sample)) + result_mean = torch.mean(torch.abs(sample)) + + if str(torch_device).startswith("cpu"): + # The following sum varies between 148 and 156 on mps. Why? + assert abs(result_sum.item() - 20.4125) < 1e-2 + assert abs(result_mean.item() - 0.0266) < 1e-3 + else: + # CUDA + assert abs(result_sum.item() - 20.4125) < 1e-2 + assert abs(result_mean.item() - 0.0266) < 1e-3 + + def test_full_loop_with_noise(self): + if torch_device == "mps": + return + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + + scheduler.set_timesteps(self.num_inference_steps) + + model = self.dummy_model() + sample = self.dummy_sample_deter * scheduler.init_noise_sigma + sample = sample.to(torch_device) + + # add noise + t_start = self.num_inference_steps - 2 + noise = self.dummy_noise_deter + noise = noise.to(sample.device) + timesteps = scheduler.timesteps[t_start * scheduler.order :] + sample = scheduler.add_noise(sample, noise, timesteps[:1]) + + for i, t in enumerate(timesteps): + sample = scheduler.scale_model_input(sample, t) + + model_output = model(sample, t) + + output = scheduler.step(model_output, t, sample) + sample = output.prev_sample + + result_sum = torch.sum(torch.abs(sample)) + result_mean = torch.mean(torch.abs(sample)) + + assert abs(result_sum.item() - 70408.4062) < 1e-2, f" expected result sum 70408.4062, but get {result_sum}" + assert abs(result_mean.item() - 91.6776) < 1e-3, f" expected result mean 91.6776, but get {result_mean}" + + def test_beta_sigmas(self): + self.check_over_configs(use_beta_sigmas=True) + + def test_exponential_sigmas(self): + self.check_over_configs(use_exponential_sigmas=True) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/schedulers/test_scheduler_lcm.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/schedulers/test_scheduler_lcm.py new file mode 100644 index 0000000000000000000000000000000000000000..f54970e0eba33baa0cbdc7d868a339c0f60bf189 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/schedulers/test_scheduler_lcm.py @@ -0,0 +1,300 @@ +import tempfile +from typing import Dict, List, Tuple + +import torch + +from diffusers import LCMScheduler + +from ..testing_utils import torch_device +from .test_schedulers import SchedulerCommonTest + + +class LCMSchedulerTest(SchedulerCommonTest): + scheduler_classes = (LCMScheduler,) + forward_default_kwargs = (("num_inference_steps", 10),) + + def get_scheduler_config(self, **kwargs): + config = { + "num_train_timesteps": 1000, + "beta_start": 0.00085, + "beta_end": 0.0120, + "beta_schedule": "scaled_linear", + "prediction_type": "epsilon", + } + + config.update(**kwargs) + return config + + @property + def default_valid_timestep(self): + kwargs = dict(self.forward_default_kwargs) + num_inference_steps = kwargs.pop("num_inference_steps", None) + + scheduler_config = self.get_scheduler_config() + scheduler = self.scheduler_classes[0](**scheduler_config) + + scheduler.set_timesteps(num_inference_steps) + timestep = scheduler.timesteps[-1] + return timestep + + def test_timesteps(self): + for timesteps in [100, 500, 1000]: + # 0 is not guaranteed to be in the timestep schedule, but timesteps - 1 is + self.check_over_configs(time_step=timesteps - 1, num_train_timesteps=timesteps) + + def test_betas(self): + for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1], [0.002, 0.02, 0.2, 2]): + self.check_over_configs(time_step=self.default_valid_timestep, beta_start=beta_start, beta_end=beta_end) + + def test_schedules(self): + for schedule in ["linear", "scaled_linear", "squaredcos_cap_v2"]: + self.check_over_configs(time_step=self.default_valid_timestep, beta_schedule=schedule) + + def test_prediction_type(self): + for prediction_type in ["epsilon", "v_prediction"]: + self.check_over_configs(time_step=self.default_valid_timestep, prediction_type=prediction_type) + + def test_clip_sample(self): + for clip_sample in [True, False]: + self.check_over_configs(time_step=self.default_valid_timestep, clip_sample=clip_sample) + + def test_thresholding(self): + self.check_over_configs(time_step=self.default_valid_timestep, thresholding=False) + for threshold in [0.5, 1.0, 2.0]: + for prediction_type in ["epsilon", "v_prediction"]: + self.check_over_configs( + time_step=self.default_valid_timestep, + thresholding=True, + prediction_type=prediction_type, + sample_max_value=threshold, + ) + + def test_time_indices(self): + # Get default timestep schedule. + kwargs = dict(self.forward_default_kwargs) + num_inference_steps = kwargs.pop("num_inference_steps", None) + + scheduler_config = self.get_scheduler_config() + scheduler = self.scheduler_classes[0](**scheduler_config) + + scheduler.set_timesteps(num_inference_steps) + timesteps = scheduler.timesteps + for t in timesteps: + self.check_over_forward(time_step=t) + + def test_inference_steps(self): + # Hardcoded for now + for t, num_inference_steps in zip([99, 39, 39, 19], [10, 25, 26, 50]): + self.check_over_forward(time_step=t, num_inference_steps=num_inference_steps) + + # Override test_add_noise_device because the hardcoded num_inference_steps of 100 doesn't work + # for LCMScheduler under default settings + def test_add_noise_device(self, num_inference_steps=10): + for scheduler_class in self.scheduler_classes: + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + scheduler.set_timesteps(num_inference_steps) + + sample = self.dummy_sample.to(torch_device) + scaled_sample = scheduler.scale_model_input(sample, 0.0) + self.assertEqual(sample.shape, scaled_sample.shape) + + noise = torch.randn(scaled_sample.shape).to(torch_device) + t = scheduler.timesteps[5][None] + noised = scheduler.add_noise(scaled_sample, noise, t) + self.assertEqual(noised.shape, scaled_sample.shape) + + # Override test_from_save_pretrained because it hardcodes a timestep of 1 + def test_from_save_pretrained(self): + kwargs = dict(self.forward_default_kwargs) + num_inference_steps = kwargs.pop("num_inference_steps", None) + + for scheduler_class in self.scheduler_classes: + timestep = self.default_valid_timestep + + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + + sample = self.dummy_sample + residual = 0.1 * sample + + with tempfile.TemporaryDirectory() as tmpdirname: + scheduler.save_config(tmpdirname) + new_scheduler = scheduler_class.from_pretrained(tmpdirname) + + scheduler.set_timesteps(num_inference_steps) + new_scheduler.set_timesteps(num_inference_steps) + + kwargs["generator"] = torch.manual_seed(0) + output = scheduler.step(residual, timestep, sample, **kwargs).prev_sample + + kwargs["generator"] = torch.manual_seed(0) + new_output = new_scheduler.step(residual, timestep, sample, **kwargs).prev_sample + + assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" + + # Override test_step_shape because uses 0 and 1 as hardcoded timesteps + def test_step_shape(self): + kwargs = dict(self.forward_default_kwargs) + num_inference_steps = kwargs.pop("num_inference_steps", None) + + for scheduler_class in self.scheduler_classes: + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + + sample = self.dummy_sample + residual = 0.1 * sample + + scheduler.set_timesteps(num_inference_steps) + + timestep_0 = scheduler.timesteps[-2] + timestep_1 = scheduler.timesteps[-1] + + output_0 = scheduler.step(residual, timestep_0, sample, **kwargs).prev_sample + output_1 = scheduler.step(residual, timestep_1, sample, **kwargs).prev_sample + + self.assertEqual(output_0.shape, sample.shape) + self.assertEqual(output_0.shape, output_1.shape) + + # Override test_set_scheduler_outputs_equivalence since it uses 0 as a hardcoded timestep + def test_scheduler_outputs_equivalence(self): + def set_nan_tensor_to_zero(t): + t[t != t] = 0 + return t + + def recursive_check(tuple_object, dict_object): + if isinstance(tuple_object, (List, Tuple)): + for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object.values()): + recursive_check(tuple_iterable_value, dict_iterable_value) + elif isinstance(tuple_object, Dict): + for tuple_iterable_value, dict_iterable_value in zip(tuple_object.values(), dict_object.values()): + recursive_check(tuple_iterable_value, dict_iterable_value) + elif tuple_object is None: + return + else: + self.assertTrue( + torch.allclose( + set_nan_tensor_to_zero(tuple_object), set_nan_tensor_to_zero(dict_object), atol=1e-5 + ), + msg=( + "Tuple and dict output are not equal. Difference:" + f" {torch.max(torch.abs(tuple_object - dict_object))}. Tuple has `nan`:" + f" {torch.isnan(tuple_object).any()} and `inf`: {torch.isinf(tuple_object)}. Dict has" + f" `nan`: {torch.isnan(dict_object).any()} and `inf`: {torch.isinf(dict_object)}." + ), + ) + + kwargs = dict(self.forward_default_kwargs) + num_inference_steps = kwargs.pop("num_inference_steps", 50) + + timestep = self.default_valid_timestep + + for scheduler_class in self.scheduler_classes: + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + + sample = self.dummy_sample + residual = 0.1 * sample + + scheduler.set_timesteps(num_inference_steps) + kwargs["generator"] = torch.manual_seed(0) + outputs_dict = scheduler.step(residual, timestep, sample, **kwargs) + + scheduler.set_timesteps(num_inference_steps) + kwargs["generator"] = torch.manual_seed(0) + outputs_tuple = scheduler.step(residual, timestep, sample, return_dict=False, **kwargs) + + recursive_check(outputs_tuple, outputs_dict) + + def full_loop(self, num_inference_steps=10, seed=0, **config): + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config(**config) + scheduler = scheduler_class(**scheduler_config) + + model = self.dummy_model() + sample = self.dummy_sample_deter + generator = torch.manual_seed(seed) + + scheduler.set_timesteps(num_inference_steps) + + for t in scheduler.timesteps: + residual = model(sample, t) + sample = scheduler.step(residual, t, sample, generator).prev_sample + + return sample + + def test_full_loop_onestep(self): + sample = self.full_loop(num_inference_steps=1) + + result_sum = torch.sum(torch.abs(sample)) + result_mean = torch.mean(torch.abs(sample)) + + # TODO: get expected sum and mean + assert abs(result_sum.item() - 18.7097) < 1e-3 + assert abs(result_mean.item() - 0.0244) < 1e-3 + + def test_full_loop_multistep(self): + sample = self.full_loop(num_inference_steps=10) + + result_sum = torch.sum(torch.abs(sample)) + result_mean = torch.mean(torch.abs(sample)) + + # TODO: get expected sum and mean + assert abs(result_sum.item() - 197.7616) < 1e-3 + assert abs(result_mean.item() - 0.2575) < 1e-3 + + def test_custom_timesteps(self): + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + + timesteps = [100, 87, 50, 1, 0] + + scheduler.set_timesteps(timesteps=timesteps) + + scheduler_timesteps = scheduler.timesteps + + for i, timestep in enumerate(scheduler_timesteps): + if i == len(timesteps) - 1: + expected_prev_t = -1 + else: + expected_prev_t = timesteps[i + 1] + + prev_t = scheduler.previous_timestep(timestep) + prev_t = prev_t.item() + + self.assertEqual(prev_t, expected_prev_t) + + def test_custom_timesteps_increasing_order(self): + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + + timesteps = [100, 87, 50, 51, 0] + + with self.assertRaises(ValueError, msg="`custom_timesteps` must be in descending order."): + scheduler.set_timesteps(timesteps=timesteps) + + def test_custom_timesteps_passing_both_num_inference_steps_and_timesteps(self): + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + + timesteps = [100, 87, 50, 1, 0] + num_inference_steps = len(timesteps) + + with self.assertRaises(ValueError, msg="Can only pass one of `num_inference_steps` or `custom_timesteps`."): + scheduler.set_timesteps(num_inference_steps=num_inference_steps, timesteps=timesteps) + + def test_custom_timesteps_too_large(self): + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + + timesteps = [scheduler.config.num_train_timesteps] + + with self.assertRaises( + ValueError, + msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}", + ): + scheduler.set_timesteps(timesteps=timesteps) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/schedulers/test_scheduler_lms.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/schedulers/test_scheduler_lms.py new file mode 100644 index 0000000000000000000000000000000000000000..c4abca3ac973dc00870efd564e66bcdf53c2bd6e --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/schedulers/test_scheduler_lms.py @@ -0,0 +1,176 @@ +import torch + +from diffusers import LMSDiscreteScheduler + +from ..testing_utils import torch_device +from .test_schedulers import SchedulerCommonTest + + +class LMSDiscreteSchedulerTest(SchedulerCommonTest): + scheduler_classes = (LMSDiscreteScheduler,) + num_inference_steps = 10 + + def get_scheduler_config(self, **kwargs): + config = { + "num_train_timesteps": 1100, + "beta_start": 0.0001, + "beta_end": 0.02, + "beta_schedule": "linear", + } + + config.update(**kwargs) + return config + + def test_timesteps(self): + for timesteps in [10, 50, 100, 1000]: + self.check_over_configs(num_train_timesteps=timesteps) + + def test_betas(self): + for beta_start, beta_end in zip([0.00001, 0.0001, 0.001], [0.0002, 0.002, 0.02]): + self.check_over_configs(beta_start=beta_start, beta_end=beta_end) + + def test_schedules(self): + for schedule in ["linear", "scaled_linear"]: + self.check_over_configs(beta_schedule=schedule) + + def test_prediction_type(self): + for prediction_type in ["epsilon", "v_prediction"]: + self.check_over_configs(prediction_type=prediction_type) + + def test_time_indices(self): + for t in [0, 500, 800]: + self.check_over_forward(time_step=t) + + def test_full_loop_no_noise(self): + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + + scheduler.set_timesteps(self.num_inference_steps) + + model = self.dummy_model() + sample = self.dummy_sample_deter * scheduler.init_noise_sigma + + for i, t in enumerate(scheduler.timesteps): + sample = scheduler.scale_model_input(sample, t) + + model_output = model(sample, t) + + output = scheduler.step(model_output, t, sample) + sample = output.prev_sample + + result_sum = torch.sum(torch.abs(sample)) + result_mean = torch.mean(torch.abs(sample)) + + assert abs(result_sum.item() - 1006.388) < 1e-2 + assert abs(result_mean.item() - 1.31) < 1e-3 + + def test_full_loop_with_v_prediction(self): + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config(prediction_type="v_prediction") + scheduler = scheduler_class(**scheduler_config) + + scheduler.set_timesteps(self.num_inference_steps) + + model = self.dummy_model() + sample = self.dummy_sample_deter * scheduler.init_noise_sigma + + for i, t in enumerate(scheduler.timesteps): + sample = scheduler.scale_model_input(sample, t) + + model_output = model(sample, t) + + output = scheduler.step(model_output, t, sample) + sample = output.prev_sample + + result_sum = torch.sum(torch.abs(sample)) + result_mean = torch.mean(torch.abs(sample)) + + assert abs(result_sum.item() - 0.0017) < 1e-2 + assert abs(result_mean.item() - 2.2676e-06) < 1e-3 + + def test_full_loop_device(self): + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + + scheduler.set_timesteps(self.num_inference_steps, device=torch_device) + + model = self.dummy_model() + sample = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() + sample = sample.to(torch_device) + + for i, t in enumerate(scheduler.timesteps): + sample = scheduler.scale_model_input(sample, t) + + model_output = model(sample, t) + + output = scheduler.step(model_output, t, sample) + sample = output.prev_sample + + result_sum = torch.sum(torch.abs(sample)) + result_mean = torch.mean(torch.abs(sample)) + + assert abs(result_sum.item() - 1006.388) < 1e-2 + assert abs(result_mean.item() - 1.31) < 1e-3 + + def test_full_loop_device_karras_sigmas(self): + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config, use_karras_sigmas=True) + + scheduler.set_timesteps(self.num_inference_steps, device=torch_device) + + model = self.dummy_model() + sample = self.dummy_sample_deter.to(torch_device) * scheduler.init_noise_sigma + sample = sample.to(torch_device) + + for t in scheduler.timesteps: + sample = scheduler.scale_model_input(sample, t) + + model_output = model(sample, t) + + output = scheduler.step(model_output, t, sample) + sample = output.prev_sample + + result_sum = torch.sum(torch.abs(sample)) + result_mean = torch.mean(torch.abs(sample)) + + assert abs(result_sum.item() - 3812.9927) < 2e-2 + assert abs(result_mean.item() - 4.9648) < 1e-3 + + def test_full_loop_with_noise(self): + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + + scheduler.set_timesteps(self.num_inference_steps) + + model = self.dummy_model() + sample = self.dummy_sample_deter * scheduler.init_noise_sigma + + # add noise + t_start = self.num_inference_steps - 2 + noise = self.dummy_noise_deter + timesteps = scheduler.timesteps[t_start * scheduler.order :] + sample = scheduler.add_noise(sample, noise, timesteps[:1]) + + for i, t in enumerate(timesteps): + sample = scheduler.scale_model_input(sample, t) + + model_output = model(sample, t) + + output = scheduler.step(model_output, t, sample) + sample = output.prev_sample + + result_sum = torch.sum(torch.abs(sample)) + result_mean = torch.mean(torch.abs(sample)) + + assert abs(result_sum.item() - 27663.6895) < 1e-2 + assert abs(result_mean.item() - 36.0204) < 1e-3 + + def test_beta_sigmas(self): + self.check_over_configs(use_beta_sigmas=True) + + def test_exponential_sigmas(self): + self.check_over_configs(use_exponential_sigmas=True) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/schedulers/test_scheduler_pndm.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/schedulers/test_scheduler_pndm.py new file mode 100644 index 0000000000000000000000000000000000000000..13c6904682223de2382b2d0d7b38e4a435f97988 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/schedulers/test_scheduler_pndm.py @@ -0,0 +1,244 @@ +import tempfile +import unittest + +import torch + +from diffusers import PNDMScheduler + +from .test_schedulers import SchedulerCommonTest + + +class PNDMSchedulerTest(SchedulerCommonTest): + scheduler_classes = (PNDMScheduler,) + forward_default_kwargs = (("num_inference_steps", 50),) + + def get_scheduler_config(self, **kwargs): + config = { + "num_train_timesteps": 1000, + "beta_start": 0.0001, + "beta_end": 0.02, + "beta_schedule": "linear", + } + + config.update(**kwargs) + return config + + def check_over_configs(self, time_step=0, **config): + kwargs = dict(self.forward_default_kwargs) + num_inference_steps = kwargs.pop("num_inference_steps", None) + sample = self.dummy_sample + residual = 0.1 * sample + dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] + + for scheduler_class in self.scheduler_classes: + scheduler_config = self.get_scheduler_config(**config) + scheduler = scheduler_class(**scheduler_config) + scheduler.set_timesteps(num_inference_steps) + # copy over dummy past residuals + scheduler.ets = dummy_past_residuals[:] + + with tempfile.TemporaryDirectory() as tmpdirname: + scheduler.save_config(tmpdirname) + new_scheduler = scheduler_class.from_pretrained(tmpdirname) + new_scheduler.set_timesteps(num_inference_steps) + # copy over dummy past residuals + new_scheduler.ets = dummy_past_residuals[:] + + output = scheduler.step_prk(residual, time_step, sample, **kwargs).prev_sample + new_output = new_scheduler.step_prk(residual, time_step, sample, **kwargs).prev_sample + + assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" + + output = scheduler.step_plms(residual, time_step, sample, **kwargs).prev_sample + new_output = new_scheduler.step_plms(residual, time_step, sample, **kwargs).prev_sample + + assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" + + @unittest.skip("Test not supported.") + def test_from_save_pretrained(self): + pass + + def check_over_forward(self, time_step=0, **forward_kwargs): + kwargs = dict(self.forward_default_kwargs) + num_inference_steps = kwargs.pop("num_inference_steps", None) + sample = self.dummy_sample + residual = 0.1 * sample + dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] + + for scheduler_class in self.scheduler_classes: + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + scheduler.set_timesteps(num_inference_steps) + + # copy over dummy past residuals (must be after setting timesteps) + scheduler.ets = dummy_past_residuals[:] + + with tempfile.TemporaryDirectory() as tmpdirname: + scheduler.save_config(tmpdirname) + new_scheduler = scheduler_class.from_pretrained(tmpdirname) + # copy over dummy past residuals + new_scheduler.set_timesteps(num_inference_steps) + + # copy over dummy past residual (must be after setting timesteps) + new_scheduler.ets = dummy_past_residuals[:] + + output = scheduler.step_prk(residual, time_step, sample, **kwargs).prev_sample + new_output = new_scheduler.step_prk(residual, time_step, sample, **kwargs).prev_sample + + assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" + + output = scheduler.step_plms(residual, time_step, sample, **kwargs).prev_sample + new_output = new_scheduler.step_plms(residual, time_step, sample, **kwargs).prev_sample + + assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" + + def full_loop(self, **config): + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config(**config) + scheduler = scheduler_class(**scheduler_config) + + num_inference_steps = 10 + model = self.dummy_model() + sample = self.dummy_sample_deter + scheduler.set_timesteps(num_inference_steps) + + for i, t in enumerate(scheduler.prk_timesteps): + residual = model(sample, t) + sample = scheduler.step_prk(residual, t, sample).prev_sample + + for i, t in enumerate(scheduler.plms_timesteps): + residual = model(sample, t) + sample = scheduler.step_plms(residual, t, sample).prev_sample + + return sample + + def test_step_shape(self): + kwargs = dict(self.forward_default_kwargs) + + num_inference_steps = kwargs.pop("num_inference_steps", None) + + for scheduler_class in self.scheduler_classes: + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + + sample = self.dummy_sample + residual = 0.1 * sample + + if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"): + scheduler.set_timesteps(num_inference_steps) + elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"): + kwargs["num_inference_steps"] = num_inference_steps + + # copy over dummy past residuals (must be done after set_timesteps) + dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] + scheduler.ets = dummy_past_residuals[:] + + output_0 = scheduler.step_prk(residual, 0, sample, **kwargs).prev_sample + output_1 = scheduler.step_prk(residual, 1, sample, **kwargs).prev_sample + + self.assertEqual(output_0.shape, sample.shape) + self.assertEqual(output_0.shape, output_1.shape) + + output_0 = scheduler.step_plms(residual, 0, sample, **kwargs).prev_sample + output_1 = scheduler.step_plms(residual, 1, sample, **kwargs).prev_sample + + self.assertEqual(output_0.shape, sample.shape) + self.assertEqual(output_0.shape, output_1.shape) + + def test_timesteps(self): + for timesteps in [100, 1000]: + self.check_over_configs(num_train_timesteps=timesteps) + + def test_steps_offset(self): + for steps_offset in [0, 1]: + self.check_over_configs(steps_offset=steps_offset) + + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config(steps_offset=1) + scheduler = scheduler_class(**scheduler_config) + scheduler.set_timesteps(10) + assert torch.equal( + scheduler.timesteps, + torch.LongTensor( + [901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] + ), + ) + + def test_betas(self): + for beta_start, beta_end in zip([0.0001, 0.001], [0.002, 0.02]): + self.check_over_configs(beta_start=beta_start, beta_end=beta_end) + + def test_schedules(self): + for schedule in ["linear", "squaredcos_cap_v2"]: + self.check_over_configs(beta_schedule=schedule) + + def test_prediction_type(self): + for prediction_type in ["epsilon", "v_prediction"]: + self.check_over_configs(prediction_type=prediction_type) + + def test_time_indices(self): + for t in [1, 5, 10]: + self.check_over_forward(time_step=t) + + def test_inference_steps(self): + for t, num_inference_steps in zip([1, 5, 10], [10, 50, 100]): + self.check_over_forward(num_inference_steps=num_inference_steps) + + def test_pow_of_3_inference_steps(self): + # earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3 + num_inference_steps = 27 + + for scheduler_class in self.scheduler_classes: + sample = self.dummy_sample + residual = 0.1 * sample + + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + + scheduler.set_timesteps(num_inference_steps) + + # before power of 3 fix, would error on first step, so we only need to do two + for i, t in enumerate(scheduler.prk_timesteps[:2]): + sample = scheduler.step_prk(residual, t, sample).prev_sample + + def test_inference_plms_no_past_residuals(self): + with self.assertRaises(ValueError): + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + + scheduler.step_plms(self.dummy_sample, 1, self.dummy_sample).prev_sample + + def test_full_loop_no_noise(self): + sample = self.full_loop() + result_sum = torch.sum(torch.abs(sample)) + result_mean = torch.mean(torch.abs(sample)) + + assert abs(result_sum.item() - 198.1318) < 1e-2 + assert abs(result_mean.item() - 0.2580) < 1e-3 + + def test_full_loop_with_v_prediction(self): + sample = self.full_loop(prediction_type="v_prediction") + result_sum = torch.sum(torch.abs(sample)) + result_mean = torch.mean(torch.abs(sample)) + + assert abs(result_sum.item() - 67.3986) < 1e-2 + assert abs(result_mean.item() - 0.0878) < 1e-3 + + def test_full_loop_with_set_alpha_to_one(self): + # We specify different beta, so that the first alpha is 0.99 + sample = self.full_loop(set_alpha_to_one=True, beta_start=0.01) + result_sum = torch.sum(torch.abs(sample)) + result_mean = torch.mean(torch.abs(sample)) + + assert abs(result_sum.item() - 230.0399) < 1e-2 + assert abs(result_mean.item() - 0.2995) < 1e-3 + + def test_full_loop_with_no_set_alpha_to_one(self): + # We specify different beta, so that the first alpha is 0.99 + sample = self.full_loop(set_alpha_to_one=False, beta_start=0.01) + result_sum = torch.sum(torch.abs(sample)) + result_mean = torch.mean(torch.abs(sample)) + + assert abs(result_sum.item() - 186.9482) < 1e-2 + assert abs(result_mean.item() - 0.2434) < 1e-3 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/schedulers/test_scheduler_sasolver.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/schedulers/test_scheduler_sasolver.py new file mode 100644 index 0000000000000000000000000000000000000000..2c2d2c0397bb620bbf556ac8d4701eb17494e36b --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/schedulers/test_scheduler_sasolver.py @@ -0,0 +1,200 @@ +import torch + +from diffusers import SASolverScheduler + +from ..testing_utils import require_torchsde, torch_device +from .test_schedulers import SchedulerCommonTest + + +@require_torchsde +class SASolverSchedulerTest(SchedulerCommonTest): + scheduler_classes = (SASolverScheduler,) + forward_default_kwargs = (("num_inference_steps", 10),) + num_inference_steps = 10 + + def get_scheduler_config(self, **kwargs): + config = { + "num_train_timesteps": 1100, + "beta_start": 0.0001, + "beta_end": 0.02, + "beta_schedule": "linear", + } + + config.update(**kwargs) + return config + + def test_step_shape(self): + kwargs = dict(self.forward_default_kwargs) + + num_inference_steps = kwargs.pop("num_inference_steps", None) + + for scheduler_class in self.scheduler_classes: + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + + sample = self.dummy_sample + residual = 0.1 * sample + + if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"): + scheduler.set_timesteps(num_inference_steps) + elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"): + kwargs["num_inference_steps"] = num_inference_steps + + # copy over dummy past residuals (must be done after set_timesteps) + dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.10] + scheduler.model_outputs = dummy_past_residuals[ + : max( + scheduler.config.predictor_order, + scheduler.config.corrector_order - 1, + ) + ] + + time_step_0 = scheduler.timesteps[5] + time_step_1 = scheduler.timesteps[6] + + output_0 = scheduler.step(residual, time_step_0, sample, **kwargs).prev_sample + output_1 = scheduler.step(residual, time_step_1, sample, **kwargs).prev_sample + + self.assertEqual(output_0.shape, sample.shape) + self.assertEqual(output_0.shape, output_1.shape) + + def test_timesteps(self): + for timesteps in [10, 50, 100, 1000]: + self.check_over_configs(num_train_timesteps=timesteps) + + def test_betas(self): + for beta_start, beta_end in zip([0.00001, 0.0001, 0.001], [0.0002, 0.002, 0.02]): + self.check_over_configs(beta_start=beta_start, beta_end=beta_end) + + def test_schedules(self): + for schedule in ["linear", "scaled_linear"]: + self.check_over_configs(beta_schedule=schedule) + + def test_prediction_type(self): + for prediction_type in ["epsilon", "v_prediction"]: + self.check_over_configs(prediction_type=prediction_type) + + def test_full_loop_no_noise(self): + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + + scheduler.set_timesteps(self.num_inference_steps) + + model = self.dummy_model() + sample = self.dummy_sample_deter * scheduler.init_noise_sigma + sample = sample.to(torch_device) + generator = torch.manual_seed(0) + + for i, t in enumerate(scheduler.timesteps): + sample = scheduler.scale_model_input(sample, t, generator=generator) + + model_output = model(sample, t) + + output = scheduler.step(model_output, t, sample) + sample = output.prev_sample + + result_sum = torch.sum(torch.abs(sample)) + result_mean = torch.mean(torch.abs(sample)) + + if torch_device in ["cpu"]: + assert abs(result_sum.item() - 337.394287109375) < 1e-2 + assert abs(result_mean.item() - 0.43931546807289124) < 1e-3 + elif torch_device in ["cuda"]: + assert abs(result_sum.item() - 329.1999816894531) < 1e-2 + assert abs(result_mean.item() - 0.4286458194255829) < 1e-3 + + def test_full_loop_with_v_prediction(self): + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config(prediction_type="v_prediction") + scheduler = scheduler_class(**scheduler_config) + + scheduler.set_timesteps(self.num_inference_steps) + + model = self.dummy_model() + sample = self.dummy_sample_deter * scheduler.init_noise_sigma + sample = sample.to(torch_device) + generator = torch.manual_seed(0) + + for i, t in enumerate(scheduler.timesteps): + sample = scheduler.scale_model_input(sample, t, generator=generator) + + model_output = model(sample, t) + + output = scheduler.step(model_output, t, sample) + sample = output.prev_sample + + result_sum = torch.sum(torch.abs(sample)) + result_mean = torch.mean(torch.abs(sample)) + + if torch_device in ["cpu"]: + assert abs(result_sum.item() - 193.1467742919922) < 1e-2 + assert abs(result_mean.item() - 0.2514931857585907) < 1e-3 + elif torch_device in ["cuda"]: + assert abs(result_sum.item() - 193.4154052734375) < 1e-2 + assert abs(result_mean.item() - 0.2518429756164551) < 1e-3 + + def test_full_loop_device(self): + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + + scheduler.set_timesteps(self.num_inference_steps, device=torch_device) + + model = self.dummy_model() + sample = self.dummy_sample_deter.to(torch_device) * scheduler.init_noise_sigma + generator = torch.manual_seed(0) + + for t in scheduler.timesteps: + sample = scheduler.scale_model_input(sample, t) + + model_output = model(sample, t) + + output = scheduler.step(model_output, t, sample, generator=generator) + sample = output.prev_sample + + result_sum = torch.sum(torch.abs(sample)) + result_mean = torch.mean(torch.abs(sample)) + + if torch_device in ["cpu"]: + assert abs(result_sum.item() - 337.394287109375) < 1e-2 + assert abs(result_mean.item() - 0.43931546807289124) < 1e-3 + elif torch_device in ["cuda"]: + assert abs(result_sum.item() - 337.394287109375) < 1e-2 + assert abs(result_mean.item() - 0.4393154978752136) < 1e-3 + + def test_full_loop_device_karras_sigmas(self): + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config, use_karras_sigmas=True) + + scheduler.set_timesteps(self.num_inference_steps, device=torch_device) + + model = self.dummy_model() + sample = self.dummy_sample_deter.to(torch_device) * scheduler.init_noise_sigma + sample = sample.to(torch_device) + generator = torch.manual_seed(0) + + for t in scheduler.timesteps: + sample = scheduler.scale_model_input(sample, t) + + model_output = model(sample, t) + + output = scheduler.step(model_output, t, sample, generator=generator) + sample = output.prev_sample + + result_sum = torch.sum(torch.abs(sample)) + result_mean = torch.mean(torch.abs(sample)) + + if torch_device in ["cpu"]: + assert abs(result_sum.item() - 837.2554931640625) < 1e-2 + assert abs(result_mean.item() - 1.0901764631271362) < 1e-2 + elif torch_device in ["cuda"]: + assert abs(result_sum.item() - 837.25537109375) < 1e-2 + assert abs(result_mean.item() - 1.0901763439178467) < 1e-2 + + def test_beta_sigmas(self): + self.check_over_configs(use_beta_sigmas=True) + + def test_exponential_sigmas(self): + self.check_over_configs(use_exponential_sigmas=True) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/schedulers/test_scheduler_score_sde_ve.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/schedulers/test_scheduler_score_sde_ve.py new file mode 100644 index 0000000000000000000000000000000000000000..08c30f9b1e0c2ce1f7baab82f5076efabe465a69 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/schedulers/test_scheduler_score_sde_ve.py @@ -0,0 +1,189 @@ +import tempfile +import unittest + +import numpy as np +import torch + +from diffusers import ScoreSdeVeScheduler + + +class ScoreSdeVeSchedulerTest(unittest.TestCase): + # TODO adapt with class SchedulerCommonTest (scheduler needs Numpy Integration) + scheduler_classes = (ScoreSdeVeScheduler,) + forward_default_kwargs = () + + @property + def dummy_sample(self): + batch_size = 4 + num_channels = 3 + height = 8 + width = 8 + + sample = torch.rand((batch_size, num_channels, height, width)) + + return sample + + @property + def dummy_sample_deter(self): + batch_size = 4 + num_channels = 3 + height = 8 + width = 8 + + num_elems = batch_size * num_channels * height * width + sample = torch.arange(num_elems) + sample = sample.reshape(num_channels, height, width, batch_size) + sample = sample / num_elems + sample = sample.permute(3, 0, 1, 2) + + return sample + + def dummy_model(self): + def model(sample, t, *args): + return sample * t / (t + 1) + + return model + + def get_scheduler_config(self, **kwargs): + config = { + "num_train_timesteps": 2000, + "snr": 0.15, + "sigma_min": 0.01, + "sigma_max": 1348, + "sampling_eps": 1e-5, + } + + config.update(**kwargs) + return config + + def check_over_configs(self, time_step=0, **config): + kwargs = dict(self.forward_default_kwargs) + + for scheduler_class in self.scheduler_classes: + sample = self.dummy_sample + residual = 0.1 * sample + + scheduler_config = self.get_scheduler_config(**config) + scheduler = scheduler_class(**scheduler_config) + + with tempfile.TemporaryDirectory() as tmpdirname: + scheduler.save_config(tmpdirname) + new_scheduler = scheduler_class.from_pretrained(tmpdirname) + + output = scheduler.step_pred( + residual, time_step, sample, generator=torch.manual_seed(0), **kwargs + ).prev_sample + new_output = new_scheduler.step_pred( + residual, time_step, sample, generator=torch.manual_seed(0), **kwargs + ).prev_sample + + assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" + + output = scheduler.step_correct(residual, sample, generator=torch.manual_seed(0), **kwargs).prev_sample + new_output = new_scheduler.step_correct( + residual, sample, generator=torch.manual_seed(0), **kwargs + ).prev_sample + + assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler correction are not identical" + + def check_over_forward(self, time_step=0, **forward_kwargs): + kwargs = dict(self.forward_default_kwargs) + kwargs.update(forward_kwargs) + + for scheduler_class in self.scheduler_classes: + sample = self.dummy_sample + residual = 0.1 * sample + + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + + with tempfile.TemporaryDirectory() as tmpdirname: + scheduler.save_config(tmpdirname) + new_scheduler = scheduler_class.from_pretrained(tmpdirname) + + output = scheduler.step_pred( + residual, time_step, sample, generator=torch.manual_seed(0), **kwargs + ).prev_sample + new_output = new_scheduler.step_pred( + residual, time_step, sample, generator=torch.manual_seed(0), **kwargs + ).prev_sample + + assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" + + output = scheduler.step_correct(residual, sample, generator=torch.manual_seed(0), **kwargs).prev_sample + new_output = new_scheduler.step_correct( + residual, sample, generator=torch.manual_seed(0), **kwargs + ).prev_sample + + assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler correction are not identical" + + def test_timesteps(self): + for timesteps in [10, 100, 1000]: + self.check_over_configs(num_train_timesteps=timesteps) + + def test_sigmas(self): + for sigma_min, sigma_max in zip([0.0001, 0.001, 0.01], [1, 100, 1000]): + self.check_over_configs(sigma_min=sigma_min, sigma_max=sigma_max) + + def test_time_indices(self): + for t in [0.1, 0.5, 0.75]: + self.check_over_forward(time_step=t) + + def test_full_loop_no_noise(self): + kwargs = dict(self.forward_default_kwargs) + + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + + num_inference_steps = 3 + + model = self.dummy_model() + sample = self.dummy_sample_deter + + scheduler.set_sigmas(num_inference_steps) + scheduler.set_timesteps(num_inference_steps) + generator = torch.manual_seed(0) + + for i, t in enumerate(scheduler.timesteps): + sigma_t = scheduler.sigmas[i] + + for _ in range(scheduler.config.correct_steps): + with torch.no_grad(): + model_output = model(sample, sigma_t) + sample = scheduler.step_correct(model_output, sample, generator=generator, **kwargs).prev_sample + + with torch.no_grad(): + model_output = model(sample, sigma_t) + + output = scheduler.step_pred(model_output, t, sample, generator=generator, **kwargs) + sample, _ = output.prev_sample, output.prev_sample_mean + + result_sum = torch.sum(torch.abs(sample)) + result_mean = torch.mean(torch.abs(sample)) + + assert np.isclose(result_sum.item(), 14372758528.0) + assert np.isclose(result_mean.item(), 18714530.0) + + def test_step_shape(self): + kwargs = dict(self.forward_default_kwargs) + + num_inference_steps = kwargs.pop("num_inference_steps", None) + + for scheduler_class in self.scheduler_classes: + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + + sample = self.dummy_sample + residual = 0.1 * sample + + if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"): + scheduler.set_timesteps(num_inference_steps) + elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"): + kwargs["num_inference_steps"] = num_inference_steps + + output_0 = scheduler.step_pred(residual, 0, sample, generator=torch.manual_seed(0), **kwargs).prev_sample + output_1 = scheduler.step_pred(residual, 1, sample, generator=torch.manual_seed(0), **kwargs).prev_sample + + self.assertEqual(output_0.shape, sample.shape) + self.assertEqual(output_0.shape, output_1.shape) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/schedulers/test_scheduler_tcd.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/schedulers/test_scheduler_tcd.py new file mode 100644 index 0000000000000000000000000000000000000000..e95c536c7f0a279df379d872a335c1e6b30d6ccc --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/schedulers/test_scheduler_tcd.py @@ -0,0 +1,180 @@ +import torch + +from diffusers import TCDScheduler + +from .test_schedulers import SchedulerCommonTest + + +class TCDSchedulerTest(SchedulerCommonTest): + scheduler_classes = (TCDScheduler,) + forward_default_kwargs = (("num_inference_steps", 10),) + + def get_scheduler_config(self, **kwargs): + config = { + "num_train_timesteps": 1000, + "beta_start": 0.00085, + "beta_end": 0.0120, + "beta_schedule": "scaled_linear", + "prediction_type": "epsilon", + } + + config.update(**kwargs) + return config + + @property + def default_num_inference_steps(self): + return 10 + + @property + def default_valid_timestep(self): + kwargs = dict(self.forward_default_kwargs) + num_inference_steps = kwargs.pop("num_inference_steps", None) + + scheduler_config = self.get_scheduler_config() + scheduler = self.scheduler_classes[0](**scheduler_config) + + scheduler.set_timesteps(num_inference_steps) + timestep = scheduler.timesteps[-1] + return timestep + + def test_timesteps(self): + for timesteps in [100, 500, 1000]: + # 0 is not guaranteed to be in the timestep schedule, but timesteps - 1 is + self.check_over_configs(time_step=timesteps - 1, num_train_timesteps=timesteps) + + def test_betas(self): + for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1], [0.002, 0.02, 0.2, 2]): + self.check_over_configs(time_step=self.default_valid_timestep, beta_start=beta_start, beta_end=beta_end) + + def test_schedules(self): + for schedule in ["linear", "scaled_linear", "squaredcos_cap_v2"]: + self.check_over_configs(time_step=self.default_valid_timestep, beta_schedule=schedule) + + def test_prediction_type(self): + for prediction_type in ["epsilon", "v_prediction"]: + self.check_over_configs(time_step=self.default_valid_timestep, prediction_type=prediction_type) + + def test_clip_sample(self): + for clip_sample in [True, False]: + self.check_over_configs(time_step=self.default_valid_timestep, clip_sample=clip_sample) + + def test_thresholding(self): + self.check_over_configs(time_step=self.default_valid_timestep, thresholding=False) + for threshold in [0.5, 1.0, 2.0]: + for prediction_type in ["epsilon", "v_prediction"]: + self.check_over_configs( + time_step=self.default_valid_timestep, + thresholding=True, + prediction_type=prediction_type, + sample_max_value=threshold, + ) + + def test_time_indices(self): + # Get default timestep schedule. + kwargs = dict(self.forward_default_kwargs) + num_inference_steps = kwargs.pop("num_inference_steps", None) + + scheduler_config = self.get_scheduler_config() + scheduler = self.scheduler_classes[0](**scheduler_config) + + scheduler.set_timesteps(num_inference_steps) + timesteps = scheduler.timesteps + for t in timesteps: + self.check_over_forward(time_step=t) + + def test_inference_steps(self): + # Hardcoded for now + for t, num_inference_steps in zip([99, 39, 39, 19], [10, 25, 26, 50]): + self.check_over_forward(time_step=t, num_inference_steps=num_inference_steps) + + def full_loop(self, num_inference_steps=10, seed=0, **config): + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config(**config) + scheduler = scheduler_class(**scheduler_config) + + eta = 0.0 # refer to gamma in the paper + + model = self.dummy_model() + sample = self.dummy_sample_deter + generator = torch.manual_seed(seed) + scheduler.set_timesteps(num_inference_steps) + + for t in scheduler.timesteps: + residual = model(sample, t) + sample = scheduler.step(residual, t, sample, eta, generator).prev_sample + + return sample + + def test_full_loop_onestep_deter(self): + sample = self.full_loop(num_inference_steps=1) + + result_sum = torch.sum(torch.abs(sample)) + result_mean = torch.mean(torch.abs(sample)) + + assert abs(result_sum.item() - 29.8715) < 1e-3 # 0.0778918 + assert abs(result_mean.item() - 0.0389) < 1e-3 + + def test_full_loop_multistep_deter(self): + sample = self.full_loop(num_inference_steps=10) + + result_sum = torch.sum(torch.abs(sample)) + result_mean = torch.mean(torch.abs(sample)) + + assert abs(result_sum.item() - 181.2040) < 1e-3 + assert abs(result_mean.item() - 0.2359) < 1e-3 + + def test_custom_timesteps(self): + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + + timesteps = [100, 87, 50, 1, 0] + + scheduler.set_timesteps(timesteps=timesteps) + + scheduler_timesteps = scheduler.timesteps + + for i, timestep in enumerate(scheduler_timesteps): + if i == len(timesteps) - 1: + expected_prev_t = -1 + else: + expected_prev_t = timesteps[i + 1] + + prev_t = scheduler.previous_timestep(timestep) + prev_t = prev_t.item() + + self.assertEqual(prev_t, expected_prev_t) + + def test_custom_timesteps_increasing_order(self): + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + + timesteps = [100, 87, 50, 51, 0] + + with self.assertRaises(ValueError, msg="`custom_timesteps` must be in descending order."): + scheduler.set_timesteps(timesteps=timesteps) + + def test_custom_timesteps_passing_both_num_inference_steps_and_timesteps(self): + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + + timesteps = [100, 87, 50, 1, 0] + num_inference_steps = len(timesteps) + + with self.assertRaises(ValueError, msg="Can only pass one of `num_inference_steps` or `custom_timesteps`."): + scheduler.set_timesteps(num_inference_steps=num_inference_steps, timesteps=timesteps) + + def test_custom_timesteps_too_large(self): + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + + timesteps = [scheduler.config.num_train_timesteps] + + with self.assertRaises( + ValueError, + msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}", + ): + scheduler.set_timesteps(timesteps=timesteps) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/schedulers/test_scheduler_unclip.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/schedulers/test_scheduler_unclip.py new file mode 100644 index 0000000000000000000000000000000000000000..9e66a328f42e228a9ed02f6ca52d07f7e8944d64 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/schedulers/test_scheduler_unclip.py @@ -0,0 +1,141 @@ +import unittest + +import torch + +from diffusers import UnCLIPScheduler + +from .test_schedulers import SchedulerCommonTest + + +# UnCLIPScheduler is a modified DDPMScheduler with a subset of the configuration. +class UnCLIPSchedulerTest(SchedulerCommonTest): + scheduler_classes = (UnCLIPScheduler,) + + def get_scheduler_config(self, **kwargs): + config = { + "num_train_timesteps": 1000, + "variance_type": "fixed_small_log", + "clip_sample": True, + "clip_sample_range": 1.0, + "prediction_type": "epsilon", + } + + config.update(**kwargs) + return config + + def test_timesteps(self): + for timesteps in [1, 5, 100, 1000]: + self.check_over_configs(num_train_timesteps=timesteps) + + def test_variance_type(self): + for variance in ["fixed_small_log", "learned_range"]: + self.check_over_configs(variance_type=variance) + + def test_clip_sample(self): + for clip_sample in [True, False]: + self.check_over_configs(clip_sample=clip_sample) + + def test_clip_sample_range(self): + for clip_sample_range in [1, 5, 10, 20]: + self.check_over_configs(clip_sample_range=clip_sample_range) + + def test_prediction_type(self): + for prediction_type in ["epsilon", "sample"]: + self.check_over_configs(prediction_type=prediction_type) + + def test_time_indices(self): + for time_step in [0, 500, 999]: + for prev_timestep in [None, 5, 100, 250, 500, 750]: + if prev_timestep is not None and prev_timestep >= time_step: + continue + + self.check_over_forward(time_step=time_step, prev_timestep=prev_timestep) + + def test_variance_fixed_small_log(self): + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config(variance_type="fixed_small_log") + scheduler = scheduler_class(**scheduler_config) + + assert torch.sum(torch.abs(scheduler._get_variance(0) - 1.0000e-10)) < 1e-5 + assert torch.sum(torch.abs(scheduler._get_variance(487) - 0.0549625)) < 1e-5 + assert torch.sum(torch.abs(scheduler._get_variance(999) - 0.9994987)) < 1e-5 + + def test_variance_learned_range(self): + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config(variance_type="learned_range") + scheduler = scheduler_class(**scheduler_config) + + predicted_variance = 0.5 + + assert scheduler._get_variance(1, predicted_variance=predicted_variance) - -10.1712790 < 1e-5 + assert scheduler._get_variance(487, predicted_variance=predicted_variance) - -5.7998052 < 1e-5 + assert scheduler._get_variance(999, predicted_variance=predicted_variance) - -0.0010011 < 1e-5 + + def test_full_loop(self): + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + + timesteps = scheduler.timesteps + + model = self.dummy_model() + sample = self.dummy_sample_deter + generator = torch.manual_seed(0) + + for i, t in enumerate(timesteps): + # 1. predict noise residual + residual = model(sample, t) + + # 2. predict previous mean of sample x_t-1 + pred_prev_sample = scheduler.step(residual, t, sample, generator=generator).prev_sample + + sample = pred_prev_sample + + result_sum = torch.sum(torch.abs(sample)) + result_mean = torch.mean(torch.abs(sample)) + + assert abs(result_sum.item() - 252.2682495) < 1e-2 + assert abs(result_mean.item() - 0.3284743) < 1e-3 + + def test_full_loop_skip_timesteps(self): + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + + scheduler.set_timesteps(25) + + timesteps = scheduler.timesteps + + model = self.dummy_model() + sample = self.dummy_sample_deter + generator = torch.manual_seed(0) + + for i, t in enumerate(timesteps): + # 1. predict noise residual + residual = model(sample, t) + + if i + 1 == timesteps.shape[0]: + prev_timestep = None + else: + prev_timestep = timesteps[i + 1] + + # 2. predict previous mean of sample x_t-1 + pred_prev_sample = scheduler.step( + residual, t, sample, prev_timestep=prev_timestep, generator=generator + ).prev_sample + + sample = pred_prev_sample + + result_sum = torch.sum(torch.abs(sample)) + result_mean = torch.mean(torch.abs(sample)) + + assert abs(result_sum.item() - 258.2044983) < 1e-2 + assert abs(result_mean.item() - 0.3362038) < 1e-3 + + @unittest.skip("Test not supported.") + def test_trained_betas(self): + pass + + @unittest.skip("Test not supported.") + def test_add_noise_device(self): + pass diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/schedulers/test_scheduler_unipc.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/schedulers/test_scheduler_unipc.py new file mode 100644 index 0000000000000000000000000000000000000000..197c831cb015e55e329edde67bc6f8014c53c42a --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/schedulers/test_scheduler_unipc.py @@ -0,0 +1,401 @@ +import tempfile + +import torch + +from diffusers import ( + DEISMultistepScheduler, + DPMSolverMultistepScheduler, + DPMSolverSinglestepScheduler, + UniPCMultistepScheduler, +) + +from .test_schedulers import SchedulerCommonTest + + +class UniPCMultistepSchedulerTest(SchedulerCommonTest): + scheduler_classes = (UniPCMultistepScheduler,) + forward_default_kwargs = (("num_inference_steps", 25),) + + def get_scheduler_config(self, **kwargs): + config = { + "num_train_timesteps": 1000, + "beta_start": 0.0001, + "beta_end": 0.02, + "beta_schedule": "linear", + "solver_order": 2, + "solver_type": "bh2", + "final_sigmas_type": "sigma_min", + } + + config.update(**kwargs) + return config + + def check_over_configs(self, time_step=0, **config): + kwargs = dict(self.forward_default_kwargs) + num_inference_steps = kwargs.pop("num_inference_steps", None) + sample = self.dummy_sample + residual = 0.1 * sample + dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.10] + + for scheduler_class in self.scheduler_classes: + scheduler_config = self.get_scheduler_config(**config) + scheduler = scheduler_class(**scheduler_config) + scheduler.set_timesteps(num_inference_steps) + # copy over dummy past residuals + scheduler.model_outputs = dummy_past_residuals[: scheduler.config.solver_order] + + with tempfile.TemporaryDirectory() as tmpdirname: + scheduler.save_config(tmpdirname) + new_scheduler = scheduler_class.from_pretrained(tmpdirname) + new_scheduler.set_timesteps(num_inference_steps) + # copy over dummy past residuals + new_scheduler.model_outputs = dummy_past_residuals[: new_scheduler.config.solver_order] + + output, new_output = sample, sample + for t in range(time_step, time_step + scheduler.config.solver_order + 1): + t = scheduler.timesteps[t] + output = scheduler.step(residual, t, output, **kwargs).prev_sample + new_output = new_scheduler.step(residual, t, new_output, **kwargs).prev_sample + + assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" + + def check_over_forward(self, time_step=0, **forward_kwargs): + kwargs = dict(self.forward_default_kwargs) + num_inference_steps = kwargs.pop("num_inference_steps", None) + sample = self.dummy_sample + residual = 0.1 * sample + dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.10] + + for scheduler_class in self.scheduler_classes: + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + scheduler.set_timesteps(num_inference_steps) + + # copy over dummy past residuals (must be after setting timesteps) + scheduler.model_outputs = dummy_past_residuals[: scheduler.config.solver_order] + + with tempfile.TemporaryDirectory() as tmpdirname: + scheduler.save_config(tmpdirname) + new_scheduler = scheduler_class.from_pretrained(tmpdirname) + # copy over dummy past residuals + new_scheduler.set_timesteps(num_inference_steps) + + # copy over dummy past residual (must be after setting timesteps) + new_scheduler.model_outputs = dummy_past_residuals[: new_scheduler.config.solver_order] + + output = scheduler.step(residual, time_step, sample, **kwargs).prev_sample + new_output = new_scheduler.step(residual, time_step, sample, **kwargs).prev_sample + + assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" + + def full_loop(self, scheduler=None, **config): + if scheduler is None: + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config(**config) + scheduler = scheduler_class(**scheduler_config) + + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config(**config) + scheduler = scheduler_class(**scheduler_config) + + num_inference_steps = 10 + model = self.dummy_model() + sample = self.dummy_sample_deter + scheduler.set_timesteps(num_inference_steps) + + for i, t in enumerate(scheduler.timesteps): + residual = model(sample, t) + sample = scheduler.step(residual, t, sample).prev_sample + + return sample + + def test_step_shape(self): + kwargs = dict(self.forward_default_kwargs) + + num_inference_steps = kwargs.pop("num_inference_steps", None) + + for scheduler_class in self.scheduler_classes: + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + + sample = self.dummy_sample + residual = 0.1 * sample + + if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"): + scheduler.set_timesteps(num_inference_steps) + elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"): + kwargs["num_inference_steps"] = num_inference_steps + + # copy over dummy past residuals (must be done after set_timesteps) + dummy_past_residuals = [residual + 0.2, residual + 0.15, residual + 0.10] + scheduler.model_outputs = dummy_past_residuals[: scheduler.config.solver_order] + + time_step_0 = scheduler.timesteps[5] + time_step_1 = scheduler.timesteps[6] + + output_0 = scheduler.step(residual, time_step_0, sample, **kwargs).prev_sample + output_1 = scheduler.step(residual, time_step_1, sample, **kwargs).prev_sample + + self.assertEqual(output_0.shape, sample.shape) + self.assertEqual(output_0.shape, output_1.shape) + + def test_switch(self): + # make sure that iterating over schedulers with same config names gives same results + # for defaults + scheduler = UniPCMultistepScheduler(**self.get_scheduler_config()) + sample = self.full_loop(scheduler=scheduler) + result_mean = torch.mean(torch.abs(sample)) + + assert abs(result_mean.item() - 0.2464) < 1e-3 + + scheduler = DPMSolverSinglestepScheduler.from_config(scheduler.config) + scheduler = DEISMultistepScheduler.from_config(scheduler.config) + scheduler = DPMSolverMultistepScheduler.from_config(scheduler.config) + scheduler = UniPCMultistepScheduler.from_config(scheduler.config) + + sample = self.full_loop(scheduler=scheduler) + result_mean = torch.mean(torch.abs(sample)) + + assert abs(result_mean.item() - 0.2464) < 1e-3 + + def test_timesteps(self): + for timesteps in [25, 50, 100, 999, 1000]: + self.check_over_configs(num_train_timesteps=timesteps) + + def test_thresholding(self): + self.check_over_configs(thresholding=False) + for order in [1, 2, 3]: + for solver_type in ["bh1", "bh2"]: + for threshold in [0.5, 1.0, 2.0]: + for prediction_type in ["epsilon", "sample"]: + self.check_over_configs( + thresholding=True, + prediction_type=prediction_type, + sample_max_value=threshold, + solver_order=order, + solver_type=solver_type, + ) + + def test_prediction_type(self): + for prediction_type in ["epsilon", "v_prediction"]: + self.check_over_configs(prediction_type=prediction_type) + + def test_rescale_betas_zero_snr(self): + for rescale_betas_zero_snr in [True, False]: + self.check_over_configs(rescale_betas_zero_snr=rescale_betas_zero_snr) + + def test_solver_order_and_type(self): + for solver_type in ["bh1", "bh2"]: + for order in [1, 2, 3]: + for prediction_type in ["epsilon", "sample"]: + self.check_over_configs( + solver_order=order, + solver_type=solver_type, + prediction_type=prediction_type, + ) + sample = self.full_loop( + solver_order=order, + solver_type=solver_type, + prediction_type=prediction_type, + ) + assert not torch.isnan(sample).any(), "Samples have nan numbers" + + def test_lower_order_final(self): + self.check_over_configs(lower_order_final=True) + self.check_over_configs(lower_order_final=False) + + def test_inference_steps(self): + for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]: + self.check_over_forward(num_inference_steps=num_inference_steps, time_step=0) + + def test_full_loop_no_noise(self): + sample = self.full_loop() + result_mean = torch.mean(torch.abs(sample)) + + assert abs(result_mean.item() - 0.2464) < 1e-3 + + def test_full_loop_with_karras(self): + sample = self.full_loop(use_karras_sigmas=True) + result_mean = torch.mean(torch.abs(sample)) + + assert abs(result_mean.item() - 0.2925) < 1e-3 + + def test_full_loop_with_v_prediction(self): + sample = self.full_loop(prediction_type="v_prediction") + result_mean = torch.mean(torch.abs(sample)) + + assert abs(result_mean.item() - 0.1014) < 1e-3 + + def test_full_loop_with_karras_and_v_prediction(self): + sample = self.full_loop(prediction_type="v_prediction", use_karras_sigmas=True) + result_mean = torch.mean(torch.abs(sample)) + + assert abs(result_mean.item() - 0.1966) < 1e-3 + + def test_fp16_support(self): + for order in [1, 2, 3]: + for solver_type in ["bh1", "bh2"]: + for prediction_type in ["epsilon", "sample", "v_prediction"]: + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config( + thresholding=True, + dynamic_thresholding_ratio=0, + prediction_type=prediction_type, + solver_order=order, + solver_type=solver_type, + ) + scheduler = scheduler_class(**scheduler_config) + + num_inference_steps = 10 + model = self.dummy_model() + sample = self.dummy_sample_deter.half() + scheduler.set_timesteps(num_inference_steps) + + for i, t in enumerate(scheduler.timesteps): + residual = model(sample, t) + sample = scheduler.step(residual, t, sample).prev_sample + + assert sample.dtype == torch.float16 + + def test_full_loop_with_noise(self): + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + + num_inference_steps = 10 + t_start = 8 + + model = self.dummy_model() + sample = self.dummy_sample_deter + scheduler.set_timesteps(num_inference_steps) + + # add noise + noise = self.dummy_noise_deter + timesteps = scheduler.timesteps[t_start * scheduler.order :] + sample = scheduler.add_noise(sample, noise, timesteps[:1]) + + for i, t in enumerate(timesteps): + residual = model(sample, t) + sample = scheduler.step(residual, t, sample).prev_sample + + result_sum = torch.sum(torch.abs(sample)) + result_mean = torch.mean(torch.abs(sample)) + + assert abs(result_sum.item() - 315.5757) < 1e-2, f" expected result sum 315.5757, but get {result_sum}" + assert abs(result_mean.item() - 0.4109) < 1e-3, f" expected result mean 0.4109, but get {result_mean}" + + +class UniPCMultistepScheduler1DTest(UniPCMultistepSchedulerTest): + @property + def dummy_sample(self): + batch_size = 4 + num_channels = 3 + width = 8 + + sample = torch.rand((batch_size, num_channels, width)) + + return sample + + @property + def dummy_noise_deter(self): + batch_size = 4 + num_channels = 3 + width = 8 + + num_elems = batch_size * num_channels * width + sample = torch.arange(num_elems).flip(-1) + sample = sample.reshape(num_channels, width, batch_size) + sample = sample / num_elems + sample = sample.permute(2, 0, 1) + + return sample + + @property + def dummy_sample_deter(self): + batch_size = 4 + num_channels = 3 + width = 8 + + num_elems = batch_size * num_channels * width + sample = torch.arange(num_elems) + sample = sample.reshape(num_channels, width, batch_size) + sample = sample / num_elems + sample = sample.permute(2, 0, 1) + + return sample + + def test_switch(self): + # make sure that iterating over schedulers with same config names gives same results + # for defaults + scheduler = UniPCMultistepScheduler(**self.get_scheduler_config()) + sample = self.full_loop(scheduler=scheduler) + result_mean = torch.mean(torch.abs(sample)) + + assert abs(result_mean.item() - 0.2441) < 1e-3 + + scheduler = DPMSolverSinglestepScheduler.from_config(scheduler.config) + scheduler = DEISMultistepScheduler.from_config(scheduler.config) + scheduler = DPMSolverMultistepScheduler.from_config(scheduler.config) + scheduler = UniPCMultistepScheduler.from_config(scheduler.config) + + sample = self.full_loop(scheduler=scheduler) + result_mean = torch.mean(torch.abs(sample)) + + assert abs(result_mean.item() - 0.2441) < 1e-3 + + def test_full_loop_no_noise(self): + sample = self.full_loop() + result_mean = torch.mean(torch.abs(sample)) + + assert abs(result_mean.item() - 0.2441) < 1e-3 + + def test_full_loop_with_karras(self): + sample = self.full_loop(use_karras_sigmas=True) + result_mean = torch.mean(torch.abs(sample)) + + assert abs(result_mean.item() - 0.2898) < 1e-3 + + def test_full_loop_with_v_prediction(self): + sample = self.full_loop(prediction_type="v_prediction") + result_mean = torch.mean(torch.abs(sample)) + + assert abs(result_mean.item() - 0.1014) < 1e-3 + + def test_full_loop_with_karras_and_v_prediction(self): + sample = self.full_loop(prediction_type="v_prediction", use_karras_sigmas=True) + result_mean = torch.mean(torch.abs(sample)) + + assert abs(result_mean.item() - 0.1944) < 1e-3 + + def test_full_loop_with_noise(self): + scheduler_class = self.scheduler_classes[0] + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + + num_inference_steps = 10 + t_start = 8 + + model = self.dummy_model() + sample = self.dummy_sample_deter + scheduler.set_timesteps(num_inference_steps) + + # add noise + noise = self.dummy_noise_deter + timesteps = scheduler.timesteps[t_start * scheduler.order :] + sample = scheduler.add_noise(sample, noise, timesteps[:1]) + + for i, t in enumerate(timesteps): + residual = model(sample, t) + sample = scheduler.step(residual, t, sample).prev_sample + + result_sum = torch.sum(torch.abs(sample)) + result_mean = torch.mean(torch.abs(sample)) + + assert abs(result_sum.item() - 39.0870) < 1e-2, f" expected result sum 39.0870, but get {result_sum}" + assert abs(result_mean.item() - 0.4072) < 1e-3, f" expected result mean 0.4072, but get {result_mean}" + + def test_beta_sigmas(self): + self.check_over_configs(use_beta_sigmas=True) + + def test_exponential_sigmas(self): + self.check_over_configs(use_exponential_sigmas=True) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/schedulers/test_scheduler_vq_diffusion.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/schedulers/test_scheduler_vq_diffusion.py new file mode 100644 index 0000000000000000000000000000000000000000..c12825ba2e620550910be6d50a273f7043edc7fe --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/schedulers/test_scheduler_vq_diffusion.py @@ -0,0 +1,59 @@ +import unittest + +import torch +import torch.nn.functional as F + +from diffusers import VQDiffusionScheduler + +from .test_schedulers import SchedulerCommonTest + + +class VQDiffusionSchedulerTest(SchedulerCommonTest): + scheduler_classes = (VQDiffusionScheduler,) + + def get_scheduler_config(self, **kwargs): + config = { + "num_vec_classes": 4097, + "num_train_timesteps": 100, + } + + config.update(**kwargs) + return config + + def dummy_sample(self, num_vec_classes): + batch_size = 4 + height = 8 + width = 8 + + sample = torch.randint(0, num_vec_classes, (batch_size, height * width)) + + return sample + + @property + def dummy_sample_deter(self): + assert False + + def dummy_model(self, num_vec_classes): + def model(sample, t, *args): + batch_size, num_latent_pixels = sample.shape + logits = torch.rand((batch_size, num_vec_classes - 1, num_latent_pixels)) + return_value = F.log_softmax(logits.double(), dim=1).float() + return return_value + + return model + + def test_timesteps(self): + for timesteps in [2, 5, 100, 1000]: + self.check_over_configs(num_train_timesteps=timesteps) + + def test_num_vec_classes(self): + for num_vec_classes in [5, 100, 1000, 4000]: + self.check_over_configs(num_vec_classes=num_vec_classes) + + def test_time_indices(self): + for t in [0, 50, 99]: + self.check_over_forward(time_step=t) + + @unittest.skip("Test not supported.") + def test_add_noise_device(self): + pass diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/schedulers/test_schedulers.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/schedulers/test_schedulers.py new file mode 100644 index 0000000000000000000000000000000000000000..5a8380e659fcebcf35283ef0861c0b95e6185271 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/schedulers/test_schedulers.py @@ -0,0 +1,868 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import inspect +import json +import os +import tempfile +import unittest +import uuid +from typing import Dict, List, Tuple + +import numpy as np +import torch +from huggingface_hub import delete_repo + +import diffusers +from diffusers import ( + CMStochasticIterativeScheduler, + DDIMScheduler, + DEISMultistepScheduler, + DiffusionPipeline, + EDMEulerScheduler, + EulerAncestralDiscreteScheduler, + EulerDiscreteScheduler, + IPNDMScheduler, + LMSDiscreteScheduler, + UniPCMultistepScheduler, + VQDiffusionScheduler, +) +from diffusers.configuration_utils import ConfigMixin, register_to_config +from diffusers.schedulers.scheduling_utils import SchedulerMixin +from diffusers.utils import logging + +from ..others.test_utils import TOKEN, USER, is_staging_test +from ..testing_utils import CaptureLogger, torch_device + + +torch.backends.cuda.matmul.allow_tf32 = False + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +class SchedulerObject(SchedulerMixin, ConfigMixin): + config_name = "config.json" + + @register_to_config + def __init__( + self, + a=2, + b=5, + c=(2, 5), + d="for diffusion", + e=[1, 3], + ): + pass + + +class SchedulerObject2(SchedulerMixin, ConfigMixin): + config_name = "config.json" + + @register_to_config + def __init__( + self, + a=2, + b=5, + c=(2, 5), + d="for diffusion", + f=[1, 3], + ): + pass + + +class SchedulerObject3(SchedulerMixin, ConfigMixin): + config_name = "config.json" + + @register_to_config + def __init__( + self, + a=2, + b=5, + c=(2, 5), + d="for diffusion", + e=[1, 3], + f=[1, 3], + ): + pass + + +class SchedulerBaseTests(unittest.TestCase): + def test_save_load_from_different_config(self): + obj = SchedulerObject() + + # mock add obj class to `diffusers` + setattr(diffusers, "SchedulerObject", SchedulerObject) + logger = logging.get_logger("diffusers.configuration_utils") + + with tempfile.TemporaryDirectory() as tmpdirname: + obj.save_config(tmpdirname) + with CaptureLogger(logger) as cap_logger_1: + config = SchedulerObject2.load_config(tmpdirname) + new_obj_1 = SchedulerObject2.from_config(config) + + # now save a config parameter that is not expected + with open(os.path.join(tmpdirname, SchedulerObject.config_name), "r") as f: + data = json.load(f) + data["unexpected"] = True + + with open(os.path.join(tmpdirname, SchedulerObject.config_name), "w") as f: + json.dump(data, f) + + with CaptureLogger(logger) as cap_logger_2: + config = SchedulerObject.load_config(tmpdirname) + new_obj_2 = SchedulerObject.from_config(config) + + with CaptureLogger(logger) as cap_logger_3: + config = SchedulerObject2.load_config(tmpdirname) + new_obj_3 = SchedulerObject2.from_config(config) + + assert new_obj_1.__class__ == SchedulerObject2 + assert new_obj_2.__class__ == SchedulerObject + assert new_obj_3.__class__ == SchedulerObject2 + + assert cap_logger_1.out == "" + assert ( + cap_logger_2.out + == "The config attributes {'unexpected': True} were passed to SchedulerObject, but are not expected and" + " will" + " be ignored. Please verify your config.json configuration file.\n" + ) + assert cap_logger_2.out.replace("SchedulerObject", "SchedulerObject2") == cap_logger_3.out + + def test_save_load_compatible_schedulers(self): + SchedulerObject2._compatibles = ["SchedulerObject"] + SchedulerObject._compatibles = ["SchedulerObject2"] + + obj = SchedulerObject() + + # mock add obj class to `diffusers` + setattr(diffusers, "SchedulerObject", SchedulerObject) + setattr(diffusers, "SchedulerObject2", SchedulerObject2) + logger = logging.get_logger("diffusers.configuration_utils") + + with tempfile.TemporaryDirectory() as tmpdirname: + obj.save_config(tmpdirname) + + # now save a config parameter that is expected by another class, but not origin class + with open(os.path.join(tmpdirname, SchedulerObject.config_name), "r") as f: + data = json.load(f) + data["f"] = [0, 0] + data["unexpected"] = True + + with open(os.path.join(tmpdirname, SchedulerObject.config_name), "w") as f: + json.dump(data, f) + + with CaptureLogger(logger) as cap_logger: + config = SchedulerObject.load_config(tmpdirname) + new_obj = SchedulerObject.from_config(config) + + assert new_obj.__class__ == SchedulerObject + + assert ( + cap_logger.out + == "The config attributes {'unexpected': True} were passed to SchedulerObject, but are not expected and" + " will" + " be ignored. Please verify your config.json configuration file.\n" + ) + + def test_save_load_from_different_config_comp_schedulers(self): + SchedulerObject3._compatibles = ["SchedulerObject", "SchedulerObject2"] + SchedulerObject2._compatibles = ["SchedulerObject", "SchedulerObject3"] + SchedulerObject._compatibles = ["SchedulerObject2", "SchedulerObject3"] + + obj = SchedulerObject() + + # mock add obj class to `diffusers` + setattr(diffusers, "SchedulerObject", SchedulerObject) + setattr(diffusers, "SchedulerObject2", SchedulerObject2) + setattr(diffusers, "SchedulerObject3", SchedulerObject3) + logger = logging.get_logger("diffusers.configuration_utils") + logger.setLevel(diffusers.logging.INFO) + + with tempfile.TemporaryDirectory() as tmpdirname: + obj.save_config(tmpdirname) + + with CaptureLogger(logger) as cap_logger_1: + config = SchedulerObject.load_config(tmpdirname) + new_obj_1 = SchedulerObject.from_config(config) + + with CaptureLogger(logger) as cap_logger_2: + config = SchedulerObject2.load_config(tmpdirname) + new_obj_2 = SchedulerObject2.from_config(config) + + with CaptureLogger(logger) as cap_logger_3: + config = SchedulerObject3.load_config(tmpdirname) + new_obj_3 = SchedulerObject3.from_config(config) + + assert new_obj_1.__class__ == SchedulerObject + assert new_obj_2.__class__ == SchedulerObject2 + assert new_obj_3.__class__ == SchedulerObject3 + + assert cap_logger_1.out == "" + assert cap_logger_2.out == "{'f'} was not found in config. Values will be initialized to default values.\n" + assert cap_logger_3.out == "{'f'} was not found in config. Values will be initialized to default values.\n" + + def test_default_arguments_not_in_config(self): + pipe = DiffusionPipeline.from_pretrained( + "hf-internal-testing/tiny-stable-diffusion-pipe", torch_dtype=torch.float16 + ) + assert pipe.scheduler.__class__ == DDIMScheduler + + # Default for DDIMScheduler + assert pipe.scheduler.config.timestep_spacing == "leading" + + # Switch to a different one, verify we use the default for that class + pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config) + assert pipe.scheduler.config.timestep_spacing == "linspace" + + # Override with kwargs + pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config, timestep_spacing="trailing") + assert pipe.scheduler.config.timestep_spacing == "trailing" + + # Verify overridden kwargs stick + pipe.scheduler = LMSDiscreteScheduler.from_config(pipe.scheduler.config) + assert pipe.scheduler.config.timestep_spacing == "trailing" + + # And stick + pipe.scheduler = LMSDiscreteScheduler.from_config(pipe.scheduler.config) + assert pipe.scheduler.config.timestep_spacing == "trailing" + + def test_default_solver_type_after_switch(self): + pipe = DiffusionPipeline.from_pretrained( + "hf-internal-testing/tiny-stable-diffusion-pipe", torch_dtype=torch.float16 + ) + assert pipe.scheduler.__class__ == DDIMScheduler + + pipe.scheduler = DEISMultistepScheduler.from_config(pipe.scheduler.config) + assert pipe.scheduler.config.solver_type == "logrho" + + # Switch to UniPC, verify the solver is the default + pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config) + assert pipe.scheduler.config.solver_type == "bh2" + + +class SchedulerCommonTest(unittest.TestCase): + scheduler_classes = () + forward_default_kwargs = () + + @property + def default_num_inference_steps(self): + return 50 + + @property + def default_timestep(self): + kwargs = dict(self.forward_default_kwargs) + num_inference_steps = kwargs.get("num_inference_steps", self.default_num_inference_steps) + + try: + scheduler_config = self.get_scheduler_config() + scheduler = self.scheduler_classes[0](**scheduler_config) + + scheduler.set_timesteps(num_inference_steps) + timestep = scheduler.timesteps[0] + except NotImplementedError: + logger.warning( + f"The scheduler {self.__class__.__name__} does not implement a `get_scheduler_config` method." + f" `default_timestep` will be set to the default value of 1." + ) + timestep = 1 + + return timestep + + # NOTE: currently taking the convention that default_timestep > default_timestep_2 (alternatively, + # default_timestep comes earlier in the timestep schedule than default_timestep_2) + @property + def default_timestep_2(self): + kwargs = dict(self.forward_default_kwargs) + num_inference_steps = kwargs.get("num_inference_steps", self.default_num_inference_steps) + + try: + scheduler_config = self.get_scheduler_config() + scheduler = self.scheduler_classes[0](**scheduler_config) + + scheduler.set_timesteps(num_inference_steps) + if len(scheduler.timesteps) >= 2: + timestep_2 = scheduler.timesteps[1] + else: + logger.warning( + f"Using num_inference_steps from the scheduler testing class's default config leads to a timestep" + f" scheduler of length {len(scheduler.timesteps)} < 2. The default `default_timestep_2` value of 0" + f" will be used." + ) + timestep_2 = 0 + except NotImplementedError: + logger.warning( + f"The scheduler {self.__class__.__name__} does not implement a `get_scheduler_config` method." + f" `default_timestep_2` will be set to the default value of 0." + ) + timestep_2 = 0 + + return timestep_2 + + @property + def dummy_sample(self): + batch_size = 4 + num_channels = 3 + height = 8 + width = 8 + + sample = torch.rand((batch_size, num_channels, height, width)) + + return sample + + @property + def dummy_noise_deter(self): + batch_size = 4 + num_channels = 3 + height = 8 + width = 8 + + num_elems = batch_size * num_channels * height * width + sample = torch.arange(num_elems).flip(-1) + sample = sample.reshape(num_channels, height, width, batch_size) + sample = sample / num_elems + sample = sample.permute(3, 0, 1, 2) + + return sample + + @property + def dummy_sample_deter(self): + batch_size = 4 + num_channels = 3 + height = 8 + width = 8 + + num_elems = batch_size * num_channels * height * width + sample = torch.arange(num_elems) + sample = sample.reshape(num_channels, height, width, batch_size) + sample = sample / num_elems + sample = sample.permute(3, 0, 1, 2) + + return sample + + def get_scheduler_config(self): + raise NotImplementedError + + def dummy_model(self): + def model(sample, t, *args): + # if t is a tensor, match the number of dimensions of sample + if isinstance(t, torch.Tensor): + num_dims = len(sample.shape) + # pad t with 1s to match num_dims + t = t.reshape(-1, *(1,) * (num_dims - 1)).to(sample.device, dtype=sample.dtype) + + return sample * t / (t + 1) + + return model + + def check_over_configs(self, time_step=0, **config): + kwargs = dict(self.forward_default_kwargs) + + num_inference_steps = kwargs.pop("num_inference_steps", None) + time_step = time_step if time_step is not None else self.default_timestep + + for scheduler_class in self.scheduler_classes: + # TODO(Suraj) - delete the following two lines once DDPM, DDIM, and PNDM have timesteps casted to float by default + if scheduler_class in (EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler): + time_step = float(time_step) + + scheduler_config = self.get_scheduler_config(**config) + scheduler = scheduler_class(**scheduler_config) + + if scheduler_class == CMStochasticIterativeScheduler: + # Get valid timestep based on sigma_max, which should always be in timestep schedule. + scaled_sigma_max = scheduler.sigma_to_t(scheduler.config.sigma_max) + time_step = scaled_sigma_max + + if scheduler_class == EDMEulerScheduler: + time_step = scheduler.timesteps[-1] + + if scheduler_class == VQDiffusionScheduler: + num_vec_classes = scheduler_config["num_vec_classes"] + sample = self.dummy_sample(num_vec_classes) + model = self.dummy_model(num_vec_classes) + residual = model(sample, time_step) + else: + sample = self.dummy_sample + residual = 0.1 * sample + + with tempfile.TemporaryDirectory() as tmpdirname: + scheduler.save_config(tmpdirname) + new_scheduler = scheduler_class.from_pretrained(tmpdirname) + + if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"): + scheduler.set_timesteps(num_inference_steps) + new_scheduler.set_timesteps(num_inference_steps) + elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"): + kwargs["num_inference_steps"] = num_inference_steps + + # Make sure `scale_model_input` is invoked to prevent a warning + if scheduler_class == CMStochasticIterativeScheduler: + # Get valid timestep based on sigma_max, which should always be in timestep schedule. + _ = scheduler.scale_model_input(sample, scaled_sigma_max) + _ = new_scheduler.scale_model_input(sample, scaled_sigma_max) + elif scheduler_class != VQDiffusionScheduler: + _ = scheduler.scale_model_input(sample, scheduler.timesteps[-1]) + _ = new_scheduler.scale_model_input(sample, scheduler.timesteps[-1]) + + # Set the seed before step() as some schedulers are stochastic like EulerAncestralDiscreteScheduler, EulerDiscreteScheduler + if "generator" in set(inspect.signature(scheduler.step).parameters.keys()): + kwargs["generator"] = torch.manual_seed(0) + output = scheduler.step(residual, time_step, sample, **kwargs).prev_sample + + if "generator" in set(inspect.signature(scheduler.step).parameters.keys()): + kwargs["generator"] = torch.manual_seed(0) + new_output = new_scheduler.step(residual, time_step, sample, **kwargs).prev_sample + + assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" + + def check_over_forward(self, time_step=0, **forward_kwargs): + kwargs = dict(self.forward_default_kwargs) + kwargs.update(forward_kwargs) + + num_inference_steps = kwargs.pop("num_inference_steps", None) + time_step = time_step if time_step is not None else self.default_timestep + + for scheduler_class in self.scheduler_classes: + if scheduler_class in (EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler): + time_step = float(time_step) + + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + + if scheduler_class == VQDiffusionScheduler: + num_vec_classes = scheduler_config["num_vec_classes"] + sample = self.dummy_sample(num_vec_classes) + model = self.dummy_model(num_vec_classes) + residual = model(sample, time_step) + else: + sample = self.dummy_sample + residual = 0.1 * sample + + with tempfile.TemporaryDirectory() as tmpdirname: + scheduler.save_config(tmpdirname) + new_scheduler = scheduler_class.from_pretrained(tmpdirname) + + if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"): + scheduler.set_timesteps(num_inference_steps) + new_scheduler.set_timesteps(num_inference_steps) + elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"): + kwargs["num_inference_steps"] = num_inference_steps + + if "generator" in set(inspect.signature(scheduler.step).parameters.keys()): + kwargs["generator"] = torch.manual_seed(0) + output = scheduler.step(residual, time_step, sample, **kwargs).prev_sample + + if "generator" in set(inspect.signature(scheduler.step).parameters.keys()): + kwargs["generator"] = torch.manual_seed(0) + new_output = new_scheduler.step(residual, time_step, sample, **kwargs).prev_sample + + assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" + + def test_from_save_pretrained(self): + kwargs = dict(self.forward_default_kwargs) + + num_inference_steps = kwargs.pop("num_inference_steps", self.default_num_inference_steps) + + for scheduler_class in self.scheduler_classes: + timestep = self.default_timestep + if scheduler_class in (EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler): + timestep = float(timestep) + + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + + if scheduler_class == CMStochasticIterativeScheduler: + # Get valid timestep based on sigma_max, which should always be in timestep schedule. + timestep = scheduler.sigma_to_t(scheduler.config.sigma_max) + + if scheduler_class == VQDiffusionScheduler: + num_vec_classes = scheduler_config["num_vec_classes"] + sample = self.dummy_sample(num_vec_classes) + model = self.dummy_model(num_vec_classes) + residual = model(sample, timestep) + else: + sample = self.dummy_sample + residual = 0.1 * sample + + with tempfile.TemporaryDirectory() as tmpdirname: + scheduler.save_config(tmpdirname) + new_scheduler = scheduler_class.from_pretrained(tmpdirname) + + if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"): + scheduler.set_timesteps(num_inference_steps) + new_scheduler.set_timesteps(num_inference_steps) + elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"): + kwargs["num_inference_steps"] = num_inference_steps + + if "generator" in set(inspect.signature(scheduler.step).parameters.keys()): + kwargs["generator"] = torch.manual_seed(0) + output = scheduler.step(residual, timestep, sample, **kwargs).prev_sample + + if "generator" in set(inspect.signature(scheduler.step).parameters.keys()): + kwargs["generator"] = torch.manual_seed(0) + new_output = new_scheduler.step(residual, timestep, sample, **kwargs).prev_sample + + assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" + + def test_compatibles(self): + for scheduler_class in self.scheduler_classes: + scheduler_config = self.get_scheduler_config() + + scheduler = scheduler_class(**scheduler_config) + + assert all(c is not None for c in scheduler.compatibles) + + for comp_scheduler_cls in scheduler.compatibles: + comp_scheduler = comp_scheduler_cls.from_config(scheduler.config) + assert comp_scheduler is not None + + new_scheduler = scheduler_class.from_config(comp_scheduler.config) + + new_scheduler_config = {k: v for k, v in new_scheduler.config.items() if k in scheduler.config} + scheduler_diff = {k: v for k, v in new_scheduler.config.items() if k not in scheduler.config} + + # make sure that configs are essentially identical + assert new_scheduler_config == dict(scheduler.config) + + # make sure that only differences are for configs that are not in init + init_keys = inspect.signature(scheduler_class.__init__).parameters.keys() + assert set(scheduler_diff.keys()).intersection(set(init_keys)) == set() + + def test_from_pretrained(self): + for scheduler_class in self.scheduler_classes: + scheduler_config = self.get_scheduler_config() + + scheduler = scheduler_class(**scheduler_config) + + with tempfile.TemporaryDirectory() as tmpdirname: + scheduler.save_pretrained(tmpdirname) + new_scheduler = scheduler_class.from_pretrained(tmpdirname) + + # `_use_default_values` should not exist for just saved & loaded scheduler + scheduler_config = dict(scheduler.config) + del scheduler_config["_use_default_values"] + + assert scheduler_config == new_scheduler.config + + def test_step_shape(self): + kwargs = dict(self.forward_default_kwargs) + + num_inference_steps = kwargs.pop("num_inference_steps", self.default_num_inference_steps) + + timestep_0 = self.default_timestep + timestep_1 = self.default_timestep_2 + + for scheduler_class in self.scheduler_classes: + if scheduler_class in (EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler): + timestep_0 = float(timestep_0) + timestep_1 = float(timestep_1) + + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + + if scheduler_class == VQDiffusionScheduler: + num_vec_classes = scheduler_config["num_vec_classes"] + sample = self.dummy_sample(num_vec_classes) + model = self.dummy_model(num_vec_classes) + residual = model(sample, timestep_0) + else: + sample = self.dummy_sample + residual = 0.1 * sample + + if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"): + scheduler.set_timesteps(num_inference_steps) + elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"): + kwargs["num_inference_steps"] = num_inference_steps + + output_0 = scheduler.step(residual, timestep_0, sample, **kwargs).prev_sample + output_1 = scheduler.step(residual, timestep_1, sample, **kwargs).prev_sample + + self.assertEqual(output_0.shape, sample.shape) + self.assertEqual(output_0.shape, output_1.shape) + + def test_scheduler_outputs_equivalence(self): + def set_nan_tensor_to_zero(t): + t[t != t] = 0 + return t + + def recursive_check(tuple_object, dict_object): + if isinstance(tuple_object, (List, Tuple)): + for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object.values()): + recursive_check(tuple_iterable_value, dict_iterable_value) + elif isinstance(tuple_object, Dict): + for tuple_iterable_value, dict_iterable_value in zip(tuple_object.values(), dict_object.values()): + recursive_check(tuple_iterable_value, dict_iterable_value) + elif tuple_object is None: + return + else: + self.assertTrue( + torch.allclose( + set_nan_tensor_to_zero(tuple_object), set_nan_tensor_to_zero(dict_object), atol=1e-5 + ), + msg=( + "Tuple and dict output are not equal. Difference:" + f" {torch.max(torch.abs(tuple_object - dict_object))}. Tuple has `nan`:" + f" {torch.isnan(tuple_object).any()} and `inf`: {torch.isinf(tuple_object)}. Dict has" + f" `nan`: {torch.isnan(dict_object).any()} and `inf`: {torch.isinf(dict_object)}." + ), + ) + + kwargs = dict(self.forward_default_kwargs) + num_inference_steps = kwargs.pop("num_inference_steps", self.default_num_inference_steps) + + timestep = self.default_timestep + if len(self.scheduler_classes) > 0 and self.scheduler_classes[0] == IPNDMScheduler: + timestep = 1 + + for scheduler_class in self.scheduler_classes: + if scheduler_class in (EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler): + timestep = float(timestep) + + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + + if scheduler_class == CMStochasticIterativeScheduler: + # Get valid timestep based on sigma_max, which should always be in timestep schedule. + timestep = scheduler.sigma_to_t(scheduler.config.sigma_max) + + if scheduler_class == VQDiffusionScheduler: + num_vec_classes = scheduler_config["num_vec_classes"] + sample = self.dummy_sample(num_vec_classes) + model = self.dummy_model(num_vec_classes) + residual = model(sample, timestep) + else: + sample = self.dummy_sample + residual = 0.1 * sample + + if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"): + scheduler.set_timesteps(num_inference_steps) + elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"): + kwargs["num_inference_steps"] = num_inference_steps + + # Set the seed before state as some schedulers are stochastic like EulerAncestralDiscreteScheduler, EulerDiscreteScheduler + if "generator" in set(inspect.signature(scheduler.step).parameters.keys()): + kwargs["generator"] = torch.manual_seed(0) + outputs_dict = scheduler.step(residual, timestep, sample, **kwargs) + + if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"): + scheduler.set_timesteps(num_inference_steps) + elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"): + kwargs["num_inference_steps"] = num_inference_steps + + # Set the seed before state as some schedulers are stochastic like EulerAncestralDiscreteScheduler, EulerDiscreteScheduler + if "generator" in set(inspect.signature(scheduler.step).parameters.keys()): + kwargs["generator"] = torch.manual_seed(0) + outputs_tuple = scheduler.step(residual, timestep, sample, return_dict=False, **kwargs) + + recursive_check(outputs_tuple, outputs_dict) + + def test_scheduler_public_api(self): + for scheduler_class in self.scheduler_classes: + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + + if scheduler_class != VQDiffusionScheduler: + self.assertTrue( + hasattr(scheduler, "init_noise_sigma"), + f"{scheduler_class} does not implement a required attribute `init_noise_sigma`", + ) + self.assertTrue( + hasattr(scheduler, "scale_model_input"), + ( + f"{scheduler_class} does not implement a required class method `scale_model_input(sample," + " timestep)`" + ), + ) + self.assertTrue( + hasattr(scheduler, "step"), + f"{scheduler_class} does not implement a required class method `step(...)`", + ) + + if scheduler_class != VQDiffusionScheduler: + sample = self.dummy_sample + if scheduler_class == CMStochasticIterativeScheduler: + # Get valid timestep based on sigma_max, which should always be in timestep schedule. + scaled_sigma_max = scheduler.sigma_to_t(scheduler.config.sigma_max) + scaled_sample = scheduler.scale_model_input(sample, scaled_sigma_max) + elif scheduler_class == EDMEulerScheduler: + scaled_sample = scheduler.scale_model_input(sample, scheduler.timesteps[-1]) + else: + scaled_sample = scheduler.scale_model_input(sample, 0.0) + self.assertEqual(sample.shape, scaled_sample.shape) + + def test_add_noise_device(self): + for scheduler_class in self.scheduler_classes: + if scheduler_class == IPNDMScheduler: + continue + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + scheduler.set_timesteps(self.default_num_inference_steps) + + sample = self.dummy_sample.to(torch_device) + if scheduler_class == CMStochasticIterativeScheduler: + # Get valid timestep based on sigma_max, which should always be in timestep schedule. + scaled_sigma_max = scheduler.sigma_to_t(scheduler.config.sigma_max) + scaled_sample = scheduler.scale_model_input(sample, scaled_sigma_max) + elif scheduler_class == EDMEulerScheduler: + scaled_sample = scheduler.scale_model_input(sample, scheduler.timesteps[-1]) + else: + scaled_sample = scheduler.scale_model_input(sample, 0.0) + self.assertEqual(sample.shape, scaled_sample.shape) + + noise = torch.randn(scaled_sample.shape).to(torch_device) + t = scheduler.timesteps[5][None] + noised = scheduler.add_noise(scaled_sample, noise, t) + self.assertEqual(noised.shape, scaled_sample.shape) + + def test_deprecated_kwargs(self): + for scheduler_class in self.scheduler_classes: + has_kwarg_in_model_class = "kwargs" in inspect.signature(scheduler_class.__init__).parameters + has_deprecated_kwarg = len(scheduler_class._deprecated_kwargs) > 0 + + if has_kwarg_in_model_class and not has_deprecated_kwarg: + raise ValueError( + f"{scheduler_class} has `**kwargs` in its __init__ method but has not defined any deprecated" + " kwargs under the `_deprecated_kwargs` class attribute. Make sure to either remove `**kwargs` if" + " there are no deprecated arguments or add the deprecated argument with `_deprecated_kwargs =" + " []`" + ) + + if not has_kwarg_in_model_class and has_deprecated_kwarg: + raise ValueError( + f"{scheduler_class} doesn't have `**kwargs` in its __init__ method but has defined deprecated" + " kwargs under the `_deprecated_kwargs` class attribute. Make sure to either add the `**kwargs`" + f" argument to {self.model_class}.__init__ if there are deprecated arguments or remove the" + " deprecated argument from `_deprecated_kwargs = []`" + ) + + def test_trained_betas(self): + for scheduler_class in self.scheduler_classes: + if scheduler_class in (VQDiffusionScheduler, CMStochasticIterativeScheduler): + continue + + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config, trained_betas=np.array([0.1, 0.3])) + + with tempfile.TemporaryDirectory() as tmpdirname: + scheduler.save_pretrained(tmpdirname) + new_scheduler = scheduler_class.from_pretrained(tmpdirname) + + assert scheduler.betas.tolist() == new_scheduler.betas.tolist() + + def test_getattr_is_correct(self): + for scheduler_class in self.scheduler_classes: + scheduler_config = self.get_scheduler_config() + scheduler = scheduler_class(**scheduler_config) + + # save some things to test + scheduler.dummy_attribute = 5 + scheduler.register_to_config(test_attribute=5) + + logger = logging.get_logger("diffusers.configuration_utils") + # 30 for warning + logger.setLevel(30) + with CaptureLogger(logger) as cap_logger: + assert hasattr(scheduler, "dummy_attribute") + assert getattr(scheduler, "dummy_attribute") == 5 + assert scheduler.dummy_attribute == 5 + + # no warning should be thrown + assert cap_logger.out == "" + + logger = logging.get_logger("diffusers.schedulers.scheduling_utils") + # 30 for warning + logger.setLevel(30) + with CaptureLogger(logger) as cap_logger: + assert hasattr(scheduler, "save_pretrained") + fn = scheduler.save_pretrained + fn_1 = getattr(scheduler, "save_pretrained") + + assert fn == fn_1 + # no warning should be thrown + assert cap_logger.out == "" + + # warning should be thrown + with self.assertWarns(FutureWarning): + assert scheduler.test_attribute == 5 + + with self.assertWarns(FutureWarning): + assert getattr(scheduler, "test_attribute") == 5 + + with self.assertRaises(AttributeError) as error: + scheduler.does_not_exist + + assert str(error.exception) == f"'{type(scheduler).__name__}' object has no attribute 'does_not_exist'" + + +@is_staging_test +class SchedulerPushToHubTester(unittest.TestCase): + identifier = uuid.uuid4() + repo_id = f"test-scheduler-{identifier}" + org_repo_id = f"valid_org/{repo_id}-org" + + def test_push_to_hub(self): + scheduler = DDIMScheduler( + beta_start=0.00085, + beta_end=0.012, + beta_schedule="scaled_linear", + clip_sample=False, + set_alpha_to_one=False, + ) + scheduler.push_to_hub(self.repo_id, token=TOKEN) + scheduler_loaded = DDIMScheduler.from_pretrained(f"{USER}/{self.repo_id}") + + assert type(scheduler) == type(scheduler_loaded) + + # Reset repo + delete_repo(token=TOKEN, repo_id=self.repo_id) + + # Push to hub via save_config + with tempfile.TemporaryDirectory() as tmp_dir: + scheduler.save_config(tmp_dir, repo_id=self.repo_id, push_to_hub=True, token=TOKEN) + + scheduler_loaded = DDIMScheduler.from_pretrained(f"{USER}/{self.repo_id}") + + assert type(scheduler) == type(scheduler_loaded) + + # Reset repo + delete_repo(token=TOKEN, repo_id=self.repo_id) + + def test_push_to_hub_in_organization(self): + scheduler = DDIMScheduler( + beta_start=0.00085, + beta_end=0.012, + beta_schedule="scaled_linear", + clip_sample=False, + set_alpha_to_one=False, + ) + scheduler.push_to_hub(self.org_repo_id, token=TOKEN) + scheduler_loaded = DDIMScheduler.from_pretrained(self.org_repo_id) + + assert type(scheduler) == type(scheduler_loaded) + + # Reset repo + delete_repo(token=TOKEN, repo_id=self.org_repo_id) + + # Push to hub via save_config + with tempfile.TemporaryDirectory() as tmp_dir: + scheduler.save_config(tmp_dir, repo_id=self.org_repo_id, push_to_hub=True, token=TOKEN) + + scheduler_loaded = DDIMScheduler.from_pretrained(self.org_repo_id) + + assert type(scheduler) == type(scheduler_loaded) + + # Reset repo + delete_repo(token=TOKEN, repo_id=self.org_repo_id) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/single_file/__init__.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/single_file/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/single_file/single_file_testing_utils.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/single_file/single_file_testing_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..3510d3371ca5e5bbb5acd9e3dd3e2e4e4f83255a --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/single_file/single_file_testing_utils.py @@ -0,0 +1,412 @@ +import tempfile +from io import BytesIO + +import requests +import torch +from huggingface_hub import hf_hub_download, snapshot_download + +from diffusers.loaders.single_file_utils import _extract_repo_id_and_weights_name +from diffusers.models.attention_processor import AttnProcessor + +from ..testing_utils import ( + numpy_cosine_similarity_distance, + torch_device, +) + + +def download_single_file_checkpoint(repo_id, filename, tmpdir): + path = hf_hub_download(repo_id, filename=filename, local_dir=tmpdir) + return path + + +def download_original_config(config_url, tmpdir): + original_config_file = BytesIO(requests.get(config_url).content) + path = f"{tmpdir}/config.yaml" + with open(path, "wb") as f: + f.write(original_config_file.read()) + + return path + + +def download_diffusers_config(repo_id, tmpdir): + path = snapshot_download( + repo_id, + ignore_patterns=[ + "**/*.ckpt", + "*.ckpt", + "**/*.bin", + "*.bin", + "**/*.pt", + "*.pt", + "**/*.safetensors", + "*.safetensors", + ], + allow_patterns=["**/*.json", "*.json", "*.txt", "**/*.txt"], + local_dir=tmpdir, + ) + return path + + +class SDSingleFileTesterMixin: + single_file_kwargs = {} + + def _compare_component_configs(self, pipe, single_file_pipe): + for param_name, param_value in single_file_pipe.text_encoder.config.to_dict().items(): + if param_name in ["torch_dtype", "architectures", "_name_or_path"]: + continue + assert pipe.text_encoder.config.to_dict()[param_name] == param_value + + PARAMS_TO_IGNORE = [ + "torch_dtype", + "_name_or_path", + "architectures", + "_use_default_values", + "_diffusers_version", + ] + for component_name, component in single_file_pipe.components.items(): + if component_name in single_file_pipe._optional_components: + continue + + # skip testing transformer based components here + # skip text encoders / safety checkers since they have already been tested + if component_name in ["text_encoder", "tokenizer", "safety_checker", "feature_extractor"]: + continue + + assert component_name in pipe.components, f"single file {component_name} not found in pretrained pipeline" + assert isinstance(component, pipe.components[component_name].__class__), ( + f"single file {component.__class__.__name__} and pretrained {pipe.components[component_name].__class__.__name__} are not the same" + ) + + for param_name, param_value in component.config.items(): + if param_name in PARAMS_TO_IGNORE: + continue + + # Some pretrained configs will set upcast attention to None + # In single file loading it defaults to the value in the class __init__ which is False + if param_name == "upcast_attention" and pipe.components[component_name].config[param_name] is None: + pipe.components[component_name].config[param_name] = param_value + + assert pipe.components[component_name].config[param_name] == param_value, ( + f"single file {param_name}: {param_value} differs from pretrained {pipe.components[component_name].config[param_name]}" + ) + + def test_single_file_components(self, pipe=None, single_file_pipe=None): + single_file_pipe = single_file_pipe or self.pipeline_class.from_single_file( + self.ckpt_path, safety_checker=None + ) + pipe = pipe or self.pipeline_class.from_pretrained(self.repo_id, safety_checker=None) + + self._compare_component_configs(pipe, single_file_pipe) + + def test_single_file_components_local_files_only(self, pipe=None, single_file_pipe=None): + pipe = pipe or self.pipeline_class.from_pretrained(self.repo_id, safety_checker=None) + + with tempfile.TemporaryDirectory() as tmpdir: + repo_id, weight_name = _extract_repo_id_and_weights_name(self.ckpt_path) + local_ckpt_path = download_single_file_checkpoint(repo_id, weight_name, tmpdir) + + single_file_pipe = single_file_pipe or self.pipeline_class.from_single_file( + local_ckpt_path, safety_checker=None, local_files_only=True + ) + + self._compare_component_configs(pipe, single_file_pipe) + + def test_single_file_components_with_original_config( + self, + pipe=None, + single_file_pipe=None, + ): + pipe = pipe or self.pipeline_class.from_pretrained(self.repo_id, safety_checker=None) + # Not possible to infer this value when original config is provided + # we just pass it in here otherwise this test will fail + upcast_attention = pipe.unet.config.upcast_attention + + single_file_pipe = single_file_pipe or self.pipeline_class.from_single_file( + self.ckpt_path, + original_config=self.original_config, + safety_checker=None, + upcast_attention=upcast_attention, + ) + + self._compare_component_configs(pipe, single_file_pipe) + + def test_single_file_components_with_original_config_local_files_only( + self, + pipe=None, + single_file_pipe=None, + ): + pipe = pipe or self.pipeline_class.from_pretrained(self.repo_id, safety_checker=None) + + # Not possible to infer this value when original config is provided + # we just pass it in here otherwise this test will fail + upcast_attention = pipe.unet.config.upcast_attention + + with tempfile.TemporaryDirectory() as tmpdir: + repo_id, weight_name = _extract_repo_id_and_weights_name(self.ckpt_path) + local_ckpt_path = download_single_file_checkpoint(repo_id, weight_name, tmpdir) + local_original_config = download_original_config(self.original_config, tmpdir) + + single_file_pipe = single_file_pipe or self.pipeline_class.from_single_file( + local_ckpt_path, + original_config=local_original_config, + safety_checker=None, + upcast_attention=upcast_attention, + local_files_only=True, + ) + + self._compare_component_configs(pipe, single_file_pipe) + + def test_single_file_format_inference_is_same_as_pretrained(self, expected_max_diff=1e-4): + sf_pipe = self.pipeline_class.from_single_file(self.ckpt_path, safety_checker=None, **self.single_file_kwargs) + sf_pipe.unet.set_attn_processor(AttnProcessor()) + sf_pipe.enable_model_cpu_offload(device=torch_device) + + inputs = self.get_inputs(torch_device) + image_single_file = sf_pipe(**inputs).images[0] + + pipe = self.pipeline_class.from_pretrained(self.repo_id, safety_checker=None) + pipe.unet.set_attn_processor(AttnProcessor()) + pipe.enable_model_cpu_offload(device=torch_device) + + inputs = self.get_inputs(torch_device) + image = pipe(**inputs).images[0] + + max_diff = numpy_cosine_similarity_distance(image.flatten(), image_single_file.flatten()) + + assert max_diff < expected_max_diff, f"{image.flatten()} != {image_single_file.flatten()}" + + def test_single_file_components_with_diffusers_config( + self, + pipe=None, + single_file_pipe=None, + ): + single_file_pipe = single_file_pipe or self.pipeline_class.from_single_file( + self.ckpt_path, config=self.repo_id, safety_checker=None + ) + pipe = pipe or self.pipeline_class.from_pretrained(self.repo_id, safety_checker=None) + + self._compare_component_configs(pipe, single_file_pipe) + + def test_single_file_components_with_diffusers_config_local_files_only( + self, + pipe=None, + single_file_pipe=None, + ): + pipe = pipe or self.pipeline_class.from_pretrained(self.repo_id, safety_checker=None) + + with tempfile.TemporaryDirectory() as tmpdir: + repo_id, weight_name = _extract_repo_id_and_weights_name(self.ckpt_path) + local_ckpt_path = download_single_file_checkpoint(repo_id, weight_name, tmpdir) + local_diffusers_config = download_diffusers_config(self.repo_id, tmpdir) + + single_file_pipe = single_file_pipe or self.pipeline_class.from_single_file( + local_ckpt_path, config=local_diffusers_config, safety_checker=None, local_files_only=True + ) + + self._compare_component_configs(pipe, single_file_pipe) + + def test_single_file_setting_pipeline_dtype_to_fp16( + self, + single_file_pipe=None, + ): + single_file_pipe = single_file_pipe or self.pipeline_class.from_single_file( + self.ckpt_path, torch_dtype=torch.float16 + ) + + for component_name, component in single_file_pipe.components.items(): + if not isinstance(component, torch.nn.Module): + continue + + assert component.dtype == torch.float16 + + +class SDXLSingleFileTesterMixin: + def _compare_component_configs(self, pipe, single_file_pipe): + # Skip testing the text_encoder for Refiner Pipelines + if pipe.text_encoder: + for param_name, param_value in single_file_pipe.text_encoder.config.to_dict().items(): + if param_name in ["torch_dtype", "architectures", "_name_or_path"]: + continue + assert pipe.text_encoder.config.to_dict()[param_name] == param_value + + for param_name, param_value in single_file_pipe.text_encoder_2.config.to_dict().items(): + if param_name in ["torch_dtype", "architectures", "_name_or_path"]: + continue + assert pipe.text_encoder_2.config.to_dict()[param_name] == param_value + + PARAMS_TO_IGNORE = [ + "torch_dtype", + "_name_or_path", + "architectures", + "_use_default_values", + "_diffusers_version", + ] + for component_name, component in single_file_pipe.components.items(): + if component_name in single_file_pipe._optional_components: + continue + + # skip text encoders since they have already been tested + if component_name in ["text_encoder", "text_encoder_2", "tokenizer", "tokenizer_2"]: + continue + + # skip safety checker if it is not present in the pipeline + if component_name in ["safety_checker", "feature_extractor"]: + continue + + assert component_name in pipe.components, f"single file {component_name} not found in pretrained pipeline" + assert isinstance(component, pipe.components[component_name].__class__), ( + f"single file {component.__class__.__name__} and pretrained {pipe.components[component_name].__class__.__name__} are not the same" + ) + + for param_name, param_value in component.config.items(): + if param_name in PARAMS_TO_IGNORE: + continue + + # Some pretrained configs will set upcast attention to None + # In single file loading it defaults to the value in the class __init__ which is False + if param_name == "upcast_attention" and pipe.components[component_name].config[param_name] is None: + pipe.components[component_name].config[param_name] = param_value + + assert pipe.components[component_name].config[param_name] == param_value, ( + f"single file {param_name}: {param_value} differs from pretrained {pipe.components[component_name].config[param_name]}" + ) + + def test_single_file_components(self, pipe=None, single_file_pipe=None): + single_file_pipe = single_file_pipe or self.pipeline_class.from_single_file( + self.ckpt_path, safety_checker=None + ) + pipe = pipe or self.pipeline_class.from_pretrained(self.repo_id, safety_checker=None) + + self._compare_component_configs( + pipe, + single_file_pipe, + ) + + def test_single_file_components_local_files_only( + self, + pipe=None, + single_file_pipe=None, + ): + pipe = pipe or self.pipeline_class.from_pretrained(self.repo_id, safety_checker=None) + + with tempfile.TemporaryDirectory() as tmpdir: + repo_id, weight_name = _extract_repo_id_and_weights_name(self.ckpt_path) + local_ckpt_path = download_single_file_checkpoint(repo_id, weight_name, tmpdir) + + single_file_pipe = single_file_pipe or self.pipeline_class.from_single_file( + local_ckpt_path, safety_checker=None, local_files_only=True + ) + + self._compare_component_configs(pipe, single_file_pipe) + + def test_single_file_components_with_original_config( + self, + pipe=None, + single_file_pipe=None, + ): + pipe = pipe or self.pipeline_class.from_pretrained(self.repo_id, safety_checker=None) + # Not possible to infer this value when original config is provided + # we just pass it in here otherwise this test will fail + upcast_attention = pipe.unet.config.upcast_attention + single_file_pipe = single_file_pipe or self.pipeline_class.from_single_file( + self.ckpt_path, + original_config=self.original_config, + safety_checker=None, + upcast_attention=upcast_attention, + ) + + self._compare_component_configs( + pipe, + single_file_pipe, + ) + + def test_single_file_components_with_original_config_local_files_only( + self, + pipe=None, + single_file_pipe=None, + ): + pipe = pipe or self.pipeline_class.from_pretrained(self.repo_id, safety_checker=None) + # Not possible to infer this value when original config is provided + # we just pass it in here otherwise this test will fail + upcast_attention = pipe.unet.config.upcast_attention + + with tempfile.TemporaryDirectory() as tmpdir: + repo_id, weight_name = _extract_repo_id_and_weights_name(self.ckpt_path) + local_ckpt_path = download_single_file_checkpoint(repo_id, weight_name, tmpdir) + local_original_config = download_original_config(self.original_config, tmpdir) + + single_file_pipe = single_file_pipe or self.pipeline_class.from_single_file( + local_ckpt_path, + original_config=local_original_config, + upcast_attention=upcast_attention, + safety_checker=None, + local_files_only=True, + ) + + self._compare_component_configs( + pipe, + single_file_pipe, + ) + + def test_single_file_components_with_diffusers_config( + self, + pipe=None, + single_file_pipe=None, + ): + single_file_pipe = single_file_pipe or self.pipeline_class.from_single_file( + self.ckpt_path, config=self.repo_id, safety_checker=None + ) + pipe = pipe or self.pipeline_class.from_pretrained(self.repo_id, safety_checker=None) + + self._compare_component_configs(pipe, single_file_pipe) + + def test_single_file_components_with_diffusers_config_local_files_only( + self, + pipe=None, + single_file_pipe=None, + ): + pipe = pipe or self.pipeline_class.from_pretrained(self.repo_id, safety_checker=None) + + with tempfile.TemporaryDirectory() as tmpdir: + repo_id, weight_name = _extract_repo_id_and_weights_name(self.ckpt_path) + local_ckpt_path = download_single_file_checkpoint(repo_id, weight_name, tmpdir) + local_diffusers_config = download_diffusers_config(self.repo_id, tmpdir) + + single_file_pipe = single_file_pipe or self.pipeline_class.from_single_file( + local_ckpt_path, config=local_diffusers_config, safety_checker=None, local_files_only=True + ) + + self._compare_component_configs(pipe, single_file_pipe) + + def test_single_file_format_inference_is_same_as_pretrained(self, expected_max_diff=1e-4): + sf_pipe = self.pipeline_class.from_single_file(self.ckpt_path, torch_dtype=torch.float16, safety_checker=None) + sf_pipe.unet.set_default_attn_processor() + sf_pipe.enable_model_cpu_offload(device=torch_device) + + inputs = self.get_inputs(torch_device) + image_single_file = sf_pipe(**inputs).images[0] + + pipe = self.pipeline_class.from_pretrained(self.repo_id, torch_dtype=torch.float16, safety_checker=None) + pipe.unet.set_default_attn_processor() + pipe.enable_model_cpu_offload(device=torch_device) + + inputs = self.get_inputs(torch_device) + image = pipe(**inputs).images[0] + + max_diff = numpy_cosine_similarity_distance(image.flatten(), image_single_file.flatten()) + + assert max_diff < expected_max_diff + + def test_single_file_setting_pipeline_dtype_to_fp16( + self, + single_file_pipe=None, + ): + single_file_pipe = single_file_pipe or self.pipeline_class.from_single_file( + self.ckpt_path, torch_dtype=torch.float16 + ) + + for component_name, component in single_file_pipe.components.items(): + if not isinstance(component, torch.nn.Module): + continue + + assert component.dtype == torch.float16 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/single_file/test_lumina2_transformer.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/single_file/test_lumina2_transformer.py new file mode 100644 index 0000000000000000000000000000000000000000..99d9b71395c62d9a8f094f954424a7537a549199 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/single_file/test_lumina2_transformer.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import gc +import unittest + +from diffusers import ( + Lumina2Transformer2DModel, +) + +from ..testing_utils import ( + backend_empty_cache, + enable_full_determinism, + require_torch_accelerator, + torch_device, +) + + +enable_full_determinism() + + +@require_torch_accelerator +class Lumina2Transformer2DModelSingleFileTests(unittest.TestCase): + model_class = Lumina2Transformer2DModel + ckpt_path = "https://huggingface.co/Comfy-Org/Lumina_Image_2.0_Repackaged/blob/main/split_files/diffusion_models/lumina_2_model_bf16.safetensors" + alternate_keys_ckpt_paths = [ + "https://huggingface.co/Comfy-Org/Lumina_Image_2.0_Repackaged/blob/main/split_files/diffusion_models/lumina_2_model_bf16.safetensors" + ] + + repo_id = "Alpha-VLLM/Lumina-Image-2.0" + + def setUp(self): + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def test_single_file_components(self): + model = self.model_class.from_pretrained(self.repo_id, subfolder="transformer") + model_single_file = self.model_class.from_single_file(self.ckpt_path) + + PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "_use_default_values", "_diffusers_version"] + for param_name, param_value in model_single_file.config.items(): + if param_name in PARAMS_TO_IGNORE: + continue + assert model.config[param_name] == param_value, ( + f"{param_name} differs between single file loading and pretrained loading" + ) + + def test_checkpoint_loading(self): + for ckpt_path in self.alternate_keys_ckpt_paths: + backend_empty_cache(torch_device) + model = self.model_class.from_single_file(ckpt_path) + + del model + gc.collect() + backend_empty_cache(torch_device) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/single_file/test_model_autoencoder_dc_single_file.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/single_file/test_model_autoencoder_dc_single_file.py new file mode 100644 index 0000000000000000000000000000000000000000..5195f8e52f8d31023803864869567c8867f9cc0e --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/single_file/test_model_autoencoder_dc_single_file.py @@ -0,0 +1,127 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import gc +import unittest + +import torch + +from diffusers import ( + AutoencoderDC, +) + +from ..testing_utils import ( + backend_empty_cache, + enable_full_determinism, + load_hf_numpy, + numpy_cosine_similarity_distance, + require_torch_accelerator, + slow, + torch_device, +) + + +enable_full_determinism() + + +@slow +@require_torch_accelerator +class AutoencoderDCSingleFileTests(unittest.TestCase): + model_class = AutoencoderDC + ckpt_path = "https://huggingface.co/mit-han-lab/dc-ae-f32c32-sana-1.0/blob/main/model.safetensors" + repo_id = "mit-han-lab/dc-ae-f32c32-sana-1.0-diffusers" + main_input_name = "sample" + base_precision = 1e-2 + + def setUp(self): + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def get_file_format(self, seed, shape): + return f"gaussian_noise_s={seed}_shape={'_'.join([str(s) for s in shape])}.npy" + + def get_sd_image(self, seed=0, shape=(4, 3, 512, 512), fp16=False): + dtype = torch.float16 if fp16 else torch.float32 + image = torch.from_numpy(load_hf_numpy(self.get_file_format(seed, shape))).to(torch_device).to(dtype) + return image + + def test_single_file_inference_same_as_pretrained(self): + model_1 = self.model_class.from_pretrained(self.repo_id).to(torch_device) + model_2 = self.model_class.from_single_file(self.ckpt_path, config=self.repo_id).to(torch_device) + + image = self.get_sd_image(33) + + with torch.no_grad(): + sample_1 = model_1(image).sample + sample_2 = model_2(image).sample + + assert sample_1.shape == sample_2.shape + + output_slice_1 = sample_1.flatten().float().cpu() + output_slice_2 = sample_2.flatten().float().cpu() + + assert numpy_cosine_similarity_distance(output_slice_1, output_slice_2) < 1e-4 + + def test_single_file_components(self): + model = self.model_class.from_pretrained(self.repo_id) + model_single_file = self.model_class.from_single_file(self.ckpt_path) + + PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "_use_default_values", "_diffusers_version"] + for param_name, param_value in model_single_file.config.items(): + if param_name in PARAMS_TO_IGNORE: + continue + assert model.config[param_name] == param_value, ( + f"{param_name} differs between pretrained loading and single file loading" + ) + + def test_single_file_in_type_variant_components(self): + # `in` variant checkpoints require passing in a `config` parameter + # in order to set the scaling factor correctly. + # `in` and `mix` variants have the same keys and we cannot automatically infer a scaling factor. + # We default to using the `mix` config + repo_id = "mit-han-lab/dc-ae-f128c512-in-1.0-diffusers" + ckpt_path = "https://huggingface.co/mit-han-lab/dc-ae-f128c512-in-1.0/blob/main/model.safetensors" + + model = self.model_class.from_pretrained(repo_id) + model_single_file = self.model_class.from_single_file(ckpt_path, config=repo_id) + + PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "_use_default_values", "_diffusers_version"] + for param_name, param_value in model_single_file.config.items(): + if param_name in PARAMS_TO_IGNORE: + continue + assert model.config[param_name] == param_value, ( + f"{param_name} differs between pretrained loading and single file loading" + ) + + def test_single_file_mix_type_variant_components(self): + repo_id = "mit-han-lab/dc-ae-f128c512-mix-1.0-diffusers" + ckpt_path = "https://huggingface.co/mit-han-lab/dc-ae-f128c512-mix-1.0/blob/main/model.safetensors" + + model = self.model_class.from_pretrained(repo_id) + model_single_file = self.model_class.from_single_file(ckpt_path, config=repo_id) + + PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "_use_default_values", "_diffusers_version"] + for param_name, param_value in model_single_file.config.items(): + if param_name in PARAMS_TO_IGNORE: + continue + assert model.config[param_name] == param_value, ( + f"{param_name} differs between pretrained loading and single file loading" + ) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/single_file/test_model_controlnet_single_file.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/single_file/test_model_controlnet_single_file.py new file mode 100644 index 0000000000000000000000000000000000000000..e5214fe3f209bc2bcd371e0000e1bce0c28de4d0 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/single_file/test_model_controlnet_single_file.py @@ -0,0 +1,81 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import gc +import unittest + +import torch + +from diffusers import ( + ControlNetModel, +) + +from ..testing_utils import ( + backend_empty_cache, + enable_full_determinism, + require_torch_accelerator, + slow, + torch_device, +) + + +enable_full_determinism() + + +@slow +@require_torch_accelerator +class ControlNetModelSingleFileTests(unittest.TestCase): + model_class = ControlNetModel + ckpt_path = "https://huggingface.co/lllyasviel/ControlNet-v1-1/blob/main/control_v11p_sd15_canny.pth" + repo_id = "lllyasviel/control_v11p_sd15_canny" + + def setUp(self): + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def test_single_file_components(self): + model = self.model_class.from_pretrained(self.repo_id) + model_single_file = self.model_class.from_single_file(self.ckpt_path) + + PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "_use_default_values", "_diffusers_version"] + for param_name, param_value in model_single_file.config.items(): + if param_name in PARAMS_TO_IGNORE: + continue + assert model.config[param_name] == param_value, ( + f"{param_name} differs between single file loading and pretrained loading" + ) + + def test_single_file_arguments(self): + model_default = self.model_class.from_single_file(self.ckpt_path) + + assert model_default.config.upcast_attention is False + assert model_default.dtype == torch.float32 + + torch_dtype = torch.float16 + upcast_attention = True + + model = self.model_class.from_single_file( + self.ckpt_path, + upcast_attention=upcast_attention, + torch_dtype=torch_dtype, + ) + assert model.config.upcast_attention == upcast_attention + assert model.dtype == torch_dtype diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/single_file/test_model_flux_transformer_single_file.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/single_file/test_model_flux_transformer_single_file.py new file mode 100644 index 0000000000000000000000000000000000000000..a7e07e517f4d207f02e4664cc1cc876e9c90ff0b --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/single_file/test_model_flux_transformer_single_file.py @@ -0,0 +1,71 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import gc +import unittest + +from diffusers import ( + FluxTransformer2DModel, +) + +from ..testing_utils import ( + backend_empty_cache, + enable_full_determinism, + require_torch_accelerator, + torch_device, +) + + +enable_full_determinism() + + +@require_torch_accelerator +class FluxTransformer2DModelSingleFileTests(unittest.TestCase): + model_class = FluxTransformer2DModel + ckpt_path = "https://huggingface.co/black-forest-labs/FLUX.1-dev/blob/main/flux1-dev.safetensors" + alternate_keys_ckpt_paths = ["https://huggingface.co/Comfy-Org/flux1-dev/blob/main/flux1-dev-fp8.safetensors"] + + repo_id = "black-forest-labs/FLUX.1-dev" + + def setUp(self): + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def test_single_file_components(self): + model = self.model_class.from_pretrained(self.repo_id, subfolder="transformer") + model_single_file = self.model_class.from_single_file(self.ckpt_path) + + PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "_use_default_values", "_diffusers_version"] + for param_name, param_value in model_single_file.config.items(): + if param_name in PARAMS_TO_IGNORE: + continue + assert model.config[param_name] == param_value, ( + f"{param_name} differs between single file loading and pretrained loading" + ) + + def test_checkpoint_loading(self): + for ckpt_path in self.alternate_keys_ckpt_paths: + backend_empty_cache(torch_device) + model = self.model_class.from_single_file(ckpt_path) + + del model + gc.collect() + backend_empty_cache(torch_device) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/single_file/test_model_motion_adapter_single_file.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/single_file/test_model_motion_adapter_single_file.py new file mode 100644 index 0000000000000000000000000000000000000000..7aaf4b577e4b63a163d3d67aa7b9afb998186d7b --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/single_file/test_model_motion_adapter_single_file.py @@ -0,0 +1,91 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +from diffusers import ( + MotionAdapter, +) + +from ..testing_utils import ( + enable_full_determinism, +) + + +enable_full_determinism() + + +class MotionAdapterSingleFileTests(unittest.TestCase): + model_class = MotionAdapter + + def test_single_file_components_version_v1_5(self): + ckpt_path = "https://huggingface.co/guoyww/animatediff/blob/main/mm_sd_v15.ckpt" + repo_id = "guoyww/animatediff-motion-adapter-v1-5" + + model = self.model_class.from_pretrained(repo_id) + model_single_file = self.model_class.from_single_file(ckpt_path) + + PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "_use_default_values", "_diffusers_version"] + for param_name, param_value in model_single_file.config.items(): + if param_name in PARAMS_TO_IGNORE: + continue + assert model.config[param_name] == param_value, ( + f"{param_name} differs between pretrained loading and single file loading" + ) + + def test_single_file_components_version_v1_5_2(self): + ckpt_path = "https://huggingface.co/guoyww/animatediff/blob/main/mm_sd_v15_v2.ckpt" + repo_id = "guoyww/animatediff-motion-adapter-v1-5-2" + + model = self.model_class.from_pretrained(repo_id) + model_single_file = self.model_class.from_single_file(ckpt_path) + + PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "_use_default_values", "_diffusers_version"] + for param_name, param_value in model_single_file.config.items(): + if param_name in PARAMS_TO_IGNORE: + continue + assert model.config[param_name] == param_value, ( + f"{param_name} differs between pretrained loading and single file loading" + ) + + def test_single_file_components_version_v1_5_3(self): + ckpt_path = "https://huggingface.co/guoyww/animatediff/blob/main/v3_sd15_mm.ckpt" + repo_id = "guoyww/animatediff-motion-adapter-v1-5-3" + + model = self.model_class.from_pretrained(repo_id) + model_single_file = self.model_class.from_single_file(ckpt_path) + + PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "_use_default_values", "_diffusers_version"] + for param_name, param_value in model_single_file.config.items(): + if param_name in PARAMS_TO_IGNORE: + continue + assert model.config[param_name] == param_value, ( + f"{param_name} differs between pretrained loading and single file loading" + ) + + def test_single_file_components_version_sdxl_beta(self): + ckpt_path = "https://huggingface.co/guoyww/animatediff/blob/main/mm_sdxl_v10_beta.ckpt" + repo_id = "guoyww/animatediff-motion-adapter-sdxl-beta" + + model = self.model_class.from_pretrained(repo_id) + model_single_file = self.model_class.from_single_file(ckpt_path) + + PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "_use_default_values", "_diffusers_version"] + for param_name, param_value in model_single_file.config.items(): + if param_name in PARAMS_TO_IGNORE: + continue + assert model.config[param_name] == param_value, ( + f"{param_name} differs between pretrained loading and single file loading" + ) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/single_file/test_model_sd_cascade_unet_single_file.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/single_file/test_model_sd_cascade_unet_single_file.py new file mode 100644 index 0000000000000000000000000000000000000000..a5ec9dba30df723fab174ed7807a8143b51a8743 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/single_file/test_model_sd_cascade_unet_single_file.py @@ -0,0 +1,117 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import gc +import unittest + +import torch + +from diffusers import StableCascadeUNet +from diffusers.utils import logging + +from ..testing_utils import ( + backend_empty_cache, + enable_full_determinism, + require_torch_accelerator, + slow, + torch_device, +) + + +logger = logging.get_logger(__name__) + +enable_full_determinism() + + +@slow +@require_torch_accelerator +class StableCascadeUNetSingleFileTest(unittest.TestCase): + def setUp(self): + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def test_single_file_components_stage_b(self): + model_single_file = StableCascadeUNet.from_single_file( + "https://huggingface.co/stabilityai/stable-cascade/blob/main/stage_b_bf16.safetensors", + torch_dtype=torch.bfloat16, + ) + model = StableCascadeUNet.from_pretrained( + "stabilityai/stable-cascade", variant="bf16", subfolder="decoder", use_safetensors=True + ) + + PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "_use_default_values", "_diffusers_version"] + for param_name, param_value in model_single_file.config.items(): + if param_name in PARAMS_TO_IGNORE: + continue + assert model.config[param_name] == param_value, ( + f"{param_name} differs between single file loading and pretrained loading" + ) + + def test_single_file_components_stage_b_lite(self): + model_single_file = StableCascadeUNet.from_single_file( + "https://huggingface.co/stabilityai/stable-cascade/blob/main/stage_b_lite_bf16.safetensors", + torch_dtype=torch.bfloat16, + ) + model = StableCascadeUNet.from_pretrained( + "stabilityai/stable-cascade", variant="bf16", subfolder="decoder_lite" + ) + + PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "_use_default_values", "_diffusers_version"] + for param_name, param_value in model_single_file.config.items(): + if param_name in PARAMS_TO_IGNORE: + continue + assert model.config[param_name] == param_value, ( + f"{param_name} differs between single file loading and pretrained loading" + ) + + def test_single_file_components_stage_c(self): + model_single_file = StableCascadeUNet.from_single_file( + "https://huggingface.co/stabilityai/stable-cascade/blob/main/stage_c_bf16.safetensors", + torch_dtype=torch.bfloat16, + ) + model = StableCascadeUNet.from_pretrained( + "stabilityai/stable-cascade-prior", variant="bf16", subfolder="prior" + ) + + PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "_use_default_values", "_diffusers_version"] + for param_name, param_value in model_single_file.config.items(): + if param_name in PARAMS_TO_IGNORE: + continue + assert model.config[param_name] == param_value, ( + f"{param_name} differs between single file loading and pretrained loading" + ) + + def test_single_file_components_stage_c_lite(self): + model_single_file = StableCascadeUNet.from_single_file( + "https://huggingface.co/stabilityai/stable-cascade/blob/main/stage_c_lite_bf16.safetensors", + torch_dtype=torch.bfloat16, + ) + model = StableCascadeUNet.from_pretrained( + "stabilityai/stable-cascade-prior", variant="bf16", subfolder="prior_lite" + ) + + PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "_use_default_values", "_diffusers_version"] + for param_name, param_value in model_single_file.config.items(): + if param_name in PARAMS_TO_IGNORE: + continue + assert model.config[param_name] == param_value, ( + f"{param_name} differs between single file loading and pretrained loading" + ) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/single_file/test_model_vae_single_file.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/single_file/test_model_vae_single_file.py new file mode 100644 index 0000000000000000000000000000000000000000..3b9e619f13e6e9ef74b3693fd50c428c9e895f21 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/single_file/test_model_vae_single_file.py @@ -0,0 +1,119 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import gc +import unittest + +import torch + +from diffusers import ( + AutoencoderKL, +) + +from ..testing_utils import ( + backend_empty_cache, + enable_full_determinism, + load_hf_numpy, + numpy_cosine_similarity_distance, + require_torch_accelerator, + slow, + torch_device, +) + + +enable_full_determinism() + + +@slow +@require_torch_accelerator +class AutoencoderKLSingleFileTests(unittest.TestCase): + model_class = AutoencoderKL + ckpt_path = ( + "https://huggingface.co/stabilityai/sd-vae-ft-mse-original/blob/main/vae-ft-mse-840000-ema-pruned.safetensors" + ) + repo_id = "stabilityai/sd-vae-ft-mse" + main_input_name = "sample" + base_precision = 1e-2 + + def setUp(self): + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def get_file_format(self, seed, shape): + return f"gaussian_noise_s={seed}_shape={'_'.join([str(s) for s in shape])}.npy" + + def get_sd_image(self, seed=0, shape=(4, 3, 512, 512), fp16=False): + dtype = torch.float16 if fp16 else torch.float32 + image = torch.from_numpy(load_hf_numpy(self.get_file_format(seed, shape))).to(torch_device).to(dtype) + return image + + def test_single_file_inference_same_as_pretrained(self): + model_1 = self.model_class.from_pretrained(self.repo_id).to(torch_device) + model_2 = self.model_class.from_single_file(self.ckpt_path, config=self.repo_id).to(torch_device) + + image = self.get_sd_image(33) + + generator = torch.Generator(torch_device) + + with torch.no_grad(): + sample_1 = model_1(image, generator=generator.manual_seed(0)).sample + sample_2 = model_2(image, generator=generator.manual_seed(0)).sample + + assert sample_1.shape == sample_2.shape + + output_slice_1 = sample_1.flatten().float().cpu() + output_slice_2 = sample_2.flatten().float().cpu() + + assert numpy_cosine_similarity_distance(output_slice_1, output_slice_2) < 1e-4 + + def test_single_file_components(self): + model = self.model_class.from_pretrained(self.repo_id) + model_single_file = self.model_class.from_single_file(self.ckpt_path, config=self.repo_id) + + PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "_use_default_values", "_diffusers_version"] + for param_name, param_value in model_single_file.config.items(): + if param_name in PARAMS_TO_IGNORE: + continue + assert model.config[param_name] == param_value, ( + f"{param_name} differs between pretrained loading and single file loading" + ) + + def test_single_file_arguments(self): + model_default = self.model_class.from_single_file(self.ckpt_path, config=self.repo_id) + + assert model_default.config.scaling_factor == 0.18215 + assert model_default.config.sample_size == 256 + assert model_default.dtype == torch.float32 + + scaling_factor = 2.0 + sample_size = 512 + torch_dtype = torch.float16 + + model = self.model_class.from_single_file( + self.ckpt_path, + config=self.repo_id, + sample_size=sample_size, + scaling_factor=scaling_factor, + torch_dtype=torch_dtype, + ) + assert model.config.scaling_factor == scaling_factor + assert model.config.sample_size == sample_size + assert model.dtype == torch_dtype diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/single_file/test_model_wan_autoencoder_single_file.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/single_file/test_model_wan_autoencoder_single_file.py new file mode 100644 index 0000000000000000000000000000000000000000..a1f7155c1072c593ca90441525ec33369da312ee --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/single_file/test_model_wan_autoencoder_single_file.py @@ -0,0 +1,62 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import gc +import unittest + +from diffusers import ( + AutoencoderKLWan, +) + +from ..testing_utils import ( + backend_empty_cache, + enable_full_determinism, + require_torch_accelerator, + torch_device, +) + + +enable_full_determinism() + + +@require_torch_accelerator +class AutoencoderKLWanSingleFileTests(unittest.TestCase): + model_class = AutoencoderKLWan + ckpt_path = ( + "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged/blob/main/split_files/vae/wan_2.1_vae.safetensors" + ) + repo_id = "Wan-AI/Wan2.1-T2V-1.3B-Diffusers" + + def setUp(self): + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def test_single_file_components(self): + model = self.model_class.from_pretrained(self.repo_id, subfolder="vae") + model_single_file = self.model_class.from_single_file(self.ckpt_path) + + PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "_use_default_values", "_diffusers_version"] + for param_name, param_value in model_single_file.config.items(): + if param_name in PARAMS_TO_IGNORE: + continue + assert model.config[param_name] == param_value, ( + f"{param_name} differs between single file loading and pretrained loading" + ) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/single_file/test_model_wan_transformer3d_single_file.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/single_file/test_model_wan_transformer3d_single_file.py new file mode 100644 index 0000000000000000000000000000000000000000..d7c758d3d933c36b676be54db1ec1522f74e27b7 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/single_file/test_model_wan_transformer3d_single_file.py @@ -0,0 +1,94 @@ +# coding=utf-8 +# Copyright 2025 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import gc +import unittest + +import torch + +from diffusers import ( + WanTransformer3DModel, +) + +from ..testing_utils import ( + backend_empty_cache, + enable_full_determinism, + require_big_accelerator, + require_torch_accelerator, + torch_device, +) + + +enable_full_determinism() + + +@require_torch_accelerator +class WanTransformer3DModelText2VideoSingleFileTest(unittest.TestCase): + model_class = WanTransformer3DModel + ckpt_path = "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged/blob/main/split_files/diffusion_models/wan2.1_t2v_1.3B_bf16.safetensors" + repo_id = "Wan-AI/Wan2.1-T2V-1.3B-Diffusers" + + def setUp(self): + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def test_single_file_components(self): + model = self.model_class.from_pretrained(self.repo_id, subfolder="transformer") + model_single_file = self.model_class.from_single_file(self.ckpt_path) + + PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "_use_default_values", "_diffusers_version"] + for param_name, param_value in model_single_file.config.items(): + if param_name in PARAMS_TO_IGNORE: + continue + assert model.config[param_name] == param_value, ( + f"{param_name} differs between single file loading and pretrained loading" + ) + + +@require_big_accelerator +@require_torch_accelerator +class WanTransformer3DModelImage2VideoSingleFileTest(unittest.TestCase): + model_class = WanTransformer3DModel + ckpt_path = "https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged/blob/main/split_files/diffusion_models/wan2.1_i2v_480p_14B_fp8_e4m3fn.safetensors" + repo_id = "Wan-AI/Wan2.1-I2V-14B-480P-Diffusers" + torch_dtype = torch.float8_e4m3fn + + def setUp(self): + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def test_single_file_components(self): + model = self.model_class.from_pretrained(self.repo_id, subfolder="transformer", torch_dtype=self.torch_dtype) + model_single_file = self.model_class.from_single_file(self.ckpt_path, torch_dtype=self.torch_dtype) + + PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "_use_default_values", "_diffusers_version"] + for param_name, param_value in model_single_file.config.items(): + if param_name in PARAMS_TO_IGNORE: + continue + assert model.config[param_name] == param_value, ( + f"{param_name} differs between single file loading and pretrained loading" + ) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/single_file/test_sana_transformer.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/single_file/test_sana_transformer.py new file mode 100644 index 0000000000000000000000000000000000000000..c1543ba17137ab7d4647d9e7b019f33773c85124 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/single_file/test_sana_transformer.py @@ -0,0 +1,60 @@ +import gc +import unittest + +from diffusers import ( + SanaTransformer2DModel, +) + +from ..testing_utils import ( + backend_empty_cache, + enable_full_determinism, + require_torch_accelerator, + torch_device, +) + + +enable_full_determinism() + + +@require_torch_accelerator +class SanaTransformer2DModelSingleFileTests(unittest.TestCase): + model_class = SanaTransformer2DModel + ckpt_path = ( + "https://huggingface.co/Efficient-Large-Model/Sana_1600M_1024px/blob/main/checkpoints/Sana_1600M_1024px.pth" + ) + alternate_keys_ckpt_paths = [ + "https://huggingface.co/Efficient-Large-Model/Sana_1600M_1024px/blob/main/checkpoints/Sana_1600M_1024px.pth" + ] + + repo_id = "Efficient-Large-Model/Sana_1600M_1024px_diffusers" + + def setUp(self): + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def test_single_file_components(self): + model = self.model_class.from_pretrained(self.repo_id, subfolder="transformer") + model_single_file = self.model_class.from_single_file(self.ckpt_path) + + PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "_use_default_values", "_diffusers_version"] + for param_name, param_value in model_single_file.config.items(): + if param_name in PARAMS_TO_IGNORE: + continue + assert model.config[param_name] == param_value, ( + f"{param_name} differs between single file loading and pretrained loading" + ) + + def test_checkpoint_loading(self): + for ckpt_path in self.alternate_keys_ckpt_paths: + backend_empty_cache(torch_device) + model = self.model_class.from_single_file(ckpt_path) + + del model + gc.collect() + backend_empty_cache(torch_device) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/single_file/test_stable_diffusion_controlnet_img2img_single_file.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/single_file/test_stable_diffusion_controlnet_img2img_single_file.py new file mode 100644 index 0000000000000000000000000000000000000000..e558eeaf6f472a14f8189298ebc7c0f292119738 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/single_file/test_stable_diffusion_controlnet_img2img_single_file.py @@ -0,0 +1,197 @@ +import gc +import tempfile +import unittest + +import torch + +from diffusers import ControlNetModel, StableDiffusionControlNetPipeline +from diffusers.loaders.single_file_utils import _extract_repo_id_and_weights_name +from diffusers.utils import load_image + +from ..testing_utils import ( + backend_empty_cache, + enable_full_determinism, + numpy_cosine_similarity_distance, + require_torch_accelerator, + slow, + torch_device, +) +from .single_file_testing_utils import ( + SDSingleFileTesterMixin, + download_diffusers_config, + download_original_config, + download_single_file_checkpoint, +) + + +enable_full_determinism() + + +@slow +@require_torch_accelerator +class StableDiffusionControlNetPipelineSingleFileSlowTests(unittest.TestCase, SDSingleFileTesterMixin): + pipeline_class = StableDiffusionControlNetPipeline + ckpt_path = ( + "https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5/blob/main/v1-5-pruned-emaonly.safetensors" + ) + original_config = ( + "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml" + ) + repo_id = "stable-diffusion-v1-5/stable-diffusion-v1-5" + + def setUp(self): + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): + generator = torch.Generator(device=generator_device).manual_seed(seed) + init_image = load_image( + "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" + "/stable_diffusion_img2img/sketch-mountains-input.png" + ) + control_image = load_image( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" + ).resize((512, 512)) + prompt = "bird" + + inputs = { + "prompt": prompt, + "image": init_image, + "control_image": control_image, + "generator": generator, + "num_inference_steps": 3, + "strength": 0.75, + "guidance_scale": 7.5, + "output_type": "np", + } + return inputs + + def test_single_file_format_inference_is_same_as_pretrained(self): + controlnet = ControlNetModel.from_pretrained("lllyasviel/control_v11p_sd15_canny") + pipe = self.pipeline_class.from_pretrained(self.repo_id, controlnet=controlnet) + pipe.unet.set_default_attn_processor() + pipe.enable_model_cpu_offload(device=torch_device) + + pipe_sf = self.pipeline_class.from_single_file( + self.ckpt_path, + controlnet=controlnet, + ) + pipe_sf.unet.set_default_attn_processor() + pipe_sf.enable_model_cpu_offload(device=torch_device) + + inputs = self.get_inputs(torch_device) + output = pipe(**inputs).images[0] + + inputs = self.get_inputs(torch_device) + output_sf = pipe_sf(**inputs).images[0] + + max_diff = numpy_cosine_similarity_distance(output_sf.flatten(), output.flatten()) + assert max_diff < 1e-3 + + def test_single_file_components(self): + controlnet = ControlNetModel.from_pretrained("lllyasviel/control_v11p_sd15_canny") + pipe = self.pipeline_class.from_pretrained( + self.repo_id, variant="fp16", safety_checker=None, controlnet=controlnet + ) + pipe_single_file = self.pipeline_class.from_single_file( + self.ckpt_path, + safety_checker=None, + controlnet=controlnet, + ) + + super()._compare_component_configs(pipe, pipe_single_file) + + def test_single_file_components_local_files_only(self): + controlnet = ControlNetModel.from_pretrained("lllyasviel/control_v11p_sd15_canny") + pipe = self.pipeline_class.from_pretrained(self.repo_id, controlnet=controlnet) + + with tempfile.TemporaryDirectory() as tmpdir: + repo_id, weights_name = _extract_repo_id_and_weights_name(self.ckpt_path) + local_ckpt_path = download_single_file_checkpoint(repo_id, weights_name, tmpdir) + + pipe_single_file = self.pipeline_class.from_single_file( + local_ckpt_path, controlnet=controlnet, safety_checker=None, local_files_only=True + ) + + super()._compare_component_configs(pipe, pipe_single_file) + + def test_single_file_components_with_original_config(self): + controlnet = ControlNetModel.from_pretrained("lllyasviel/control_v11p_sd15_canny", variant="fp16") + pipe = self.pipeline_class.from_pretrained(self.repo_id, controlnet=controlnet) + pipe_single_file = self.pipeline_class.from_single_file( + self.ckpt_path, controlnet=controlnet, safety_checker=None, original_config=self.original_config + ) + + super()._compare_component_configs(pipe, pipe_single_file) + + def test_single_file_components_with_original_config_local_files_only(self): + controlnet = ControlNetModel.from_pretrained( + "lllyasviel/control_v11p_sd15_canny", torch_dtype=torch.float16, variant="fp16" + ) + pipe = self.pipeline_class.from_pretrained( + self.repo_id, + controlnet=controlnet, + ) + + with tempfile.TemporaryDirectory() as tmpdir: + repo_id, weights_name = _extract_repo_id_and_weights_name(self.ckpt_path) + local_ckpt_path = download_single_file_checkpoint(repo_id, weights_name, tmpdir) + + local_original_config = download_original_config(self.original_config, tmpdir) + + pipe_single_file = self.pipeline_class.from_single_file( + local_ckpt_path, + original_config=local_original_config, + controlnet=controlnet, + safety_checker=None, + local_files_only=True, + ) + super()._compare_component_configs(pipe, pipe_single_file) + + def test_single_file_components_with_diffusers_config(self): + controlnet = ControlNetModel.from_pretrained("lllyasviel/control_v11p_sd15_canny", variant="fp16") + pipe = self.pipeline_class.from_pretrained(self.repo_id, controlnet=controlnet) + pipe_single_file = self.pipeline_class.from_single_file( + self.ckpt_path, controlnet=controlnet, safety_checker=None, original_config=self.original_config + ) + + super()._compare_component_configs(pipe, pipe_single_file) + + def test_single_file_components_with_diffusers_config_local_files_only(self): + controlnet = ControlNetModel.from_pretrained( + "lllyasviel/control_v11p_sd15_canny", torch_dtype=torch.float16, variant="fp16" + ) + pipe = self.pipeline_class.from_pretrained( + self.repo_id, + controlnet=controlnet, + ) + + with tempfile.TemporaryDirectory() as tmpdir: + repo_id, weights_name = _extract_repo_id_and_weights_name(self.ckpt_path) + local_ckpt_path = download_single_file_checkpoint(repo_id, weights_name, tmpdir) + + local_diffusers_config = download_diffusers_config(self.repo_id, tmpdir) + + pipe_single_file = self.pipeline_class.from_single_file( + local_ckpt_path, + config=local_diffusers_config, + safety_checker=None, + controlnet=controlnet, + local_files_only=True, + ) + super()._compare_component_configs(pipe, pipe_single_file) + + def test_single_file_setting_pipeline_dtype_to_fp16(self): + controlnet = ControlNetModel.from_pretrained( + "lllyasviel/control_v11p_sd15_canny", torch_dtype=torch.float16, variant="fp16" + ) + single_file_pipe = self.pipeline_class.from_single_file( + self.ckpt_path, controlnet=controlnet, safety_checker=None, torch_dtype=torch.float16 + ) + super().test_single_file_setting_pipeline_dtype_to_fp16(single_file_pipe) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/single_file/test_stable_diffusion_controlnet_inpaint_single_file.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/single_file/test_stable_diffusion_controlnet_inpaint_single_file.py new file mode 100644 index 0000000000000000000000000000000000000000..54224f51a9b5418db6bb93e15ce004866564f15a --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/single_file/test_stable_diffusion_controlnet_inpaint_single_file.py @@ -0,0 +1,197 @@ +import gc +import tempfile +import unittest + +import torch + +from diffusers import ControlNetModel, StableDiffusionControlNetInpaintPipeline +from diffusers.loaders.single_file_utils import _extract_repo_id_and_weights_name +from diffusers.utils import load_image + +from ..testing_utils import ( + backend_empty_cache, + enable_full_determinism, + numpy_cosine_similarity_distance, + require_torch_accelerator, + slow, + torch_device, +) +from .single_file_testing_utils import ( + SDSingleFileTesterMixin, + download_diffusers_config, + download_original_config, + download_single_file_checkpoint, +) + + +enable_full_determinism() + + +@slow +@require_torch_accelerator +class StableDiffusionControlNetInpaintPipelineSingleFileSlowTests(unittest.TestCase, SDSingleFileTesterMixin): + pipeline_class = StableDiffusionControlNetInpaintPipeline + ckpt_path = "https://huggingface.co/botp/stable-diffusion-v1-5-inpainting/blob/main/sd-v1-5-inpainting.ckpt" + original_config = "https://raw.githubusercontent.com/runwayml/stable-diffusion/main/configs/stable-diffusion/v1-inpainting-inference.yaml" + repo_id = "stable-diffusion-v1-5/stable-diffusion-inpainting" + + def setUp(self): + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def get_inputs(self): + control_image = load_image( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" + ).resize((512, 512)) + image = load_image( + "https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png" + ).resize((512, 512)) + mask_image = load_image( + "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" + "/stable_diffusion_inpaint/input_bench_mask.png" + ).resize((512, 512)) + + inputs = { + "prompt": "bird", + "image": image, + "control_image": control_image, + "mask_image": mask_image, + "generator": torch.Generator(device="cpu").manual_seed(0), + "num_inference_steps": 3, + "output_type": "np", + } + + return inputs + + def test_single_file_format_inference_is_same_as_pretrained(self): + controlnet = ControlNetModel.from_pretrained("lllyasviel/control_v11p_sd15_canny") + pipe = self.pipeline_class.from_pretrained(self.repo_id, controlnet=controlnet, safety_checker=None) + pipe.unet.set_default_attn_processor() + pipe.enable_model_cpu_offload(device=torch_device) + + pipe_sf = self.pipeline_class.from_single_file(self.ckpt_path, controlnet=controlnet, safety_checker=None) + pipe_sf.unet.set_default_attn_processor() + pipe_sf.enable_model_cpu_offload(device=torch_device) + + inputs = self.get_inputs() + output = pipe(**inputs).images[0] + + inputs = self.get_inputs() + output_sf = pipe_sf(**inputs).images[0] + + max_diff = numpy_cosine_similarity_distance(output_sf.flatten(), output.flatten()) + assert max_diff < 2e-3 + + def test_single_file_components(self): + controlnet = ControlNetModel.from_pretrained("lllyasviel/control_v11p_sd15_canny") + pipe = self.pipeline_class.from_pretrained( + self.repo_id, variant="fp16", safety_checker=None, controlnet=controlnet + ) + pipe_single_file = self.pipeline_class.from_single_file( + self.ckpt_path, + safety_checker=None, + controlnet=controlnet, + ) + + super()._compare_component_configs(pipe, pipe_single_file) + + def test_single_file_components_local_files_only(self): + controlnet = ControlNetModel.from_pretrained("lllyasviel/control_v11p_sd15_canny") + pipe = self.pipeline_class.from_pretrained(self.repo_id, safety_checker=None, controlnet=controlnet) + + with tempfile.TemporaryDirectory() as tmpdir: + repo_id, weight_name = _extract_repo_id_and_weights_name(self.ckpt_path) + local_ckpt_path = download_single_file_checkpoint(repo_id, weight_name, tmpdir) + + pipe_single_file = self.pipeline_class.from_single_file( + local_ckpt_path, controlnet=controlnet, safety_checker=None, local_files_only=True + ) + + super()._compare_component_configs(pipe, pipe_single_file) + + @unittest.skip("runwayml original config repo does not exist") + def test_single_file_components_with_original_config(self): + controlnet = ControlNetModel.from_pretrained("lllyasviel/control_v11p_sd15_canny", variant="fp16") + pipe = self.pipeline_class.from_pretrained(self.repo_id, controlnet=controlnet) + pipe_single_file = self.pipeline_class.from_single_file( + self.ckpt_path, controlnet=controlnet, original_config=self.original_config + ) + + super()._compare_component_configs(pipe, pipe_single_file) + + @unittest.skip("runwayml original config repo does not exist") + def test_single_file_components_with_original_config_local_files_only(self): + controlnet = ControlNetModel.from_pretrained( + "lllyasviel/control_v11p_sd15_canny", torch_dtype=torch.float16, variant="fp16" + ) + pipe = self.pipeline_class.from_pretrained( + self.repo_id, + controlnet=controlnet, + safety_checker=None, + ) + + with tempfile.TemporaryDirectory() as tmpdir: + repo_id, weight_name = _extract_repo_id_and_weights_name(self.ckpt_path) + local_ckpt_path = download_single_file_checkpoint(repo_id, weight_name, tmpdir) + local_original_config = download_original_config(self.original_config, tmpdir) + + pipe_single_file = self.pipeline_class.from_single_file( + local_ckpt_path, + original_config=local_original_config, + controlnet=controlnet, + safety_checker=None, + local_files_only=True, + ) + super()._compare_component_configs(pipe, pipe_single_file) + + def test_single_file_components_with_diffusers_config(self): + controlnet = ControlNetModel.from_pretrained("lllyasviel/control_v11p_sd15_canny", variant="fp16") + pipe = self.pipeline_class.from_pretrained(self.repo_id, controlnet=controlnet) + pipe_single_file = self.pipeline_class.from_single_file( + self.ckpt_path, + controlnet=controlnet, + config=self.repo_id, + ) + + super()._compare_component_configs(pipe, pipe_single_file) + + def test_single_file_components_with_diffusers_config_local_files_only(self): + controlnet = ControlNetModel.from_pretrained( + "lllyasviel/control_v11p_sd15_canny", + torch_dtype=torch.float16, + variant="fp16", + ) + pipe = self.pipeline_class.from_pretrained( + self.repo_id, + controlnet=controlnet, + safety_checker=None, + ) + + with tempfile.TemporaryDirectory() as tmpdir: + repo_id, weight_name = _extract_repo_id_and_weights_name(self.ckpt_path) + local_ckpt_path = download_single_file_checkpoint(repo_id, weight_name, tmpdir) + local_diffusers_config = download_diffusers_config(self.repo_id, tmpdir) + + pipe_single_file = self.pipeline_class.from_single_file( + local_ckpt_path, + config=local_diffusers_config, + controlnet=controlnet, + safety_checker=None, + local_files_only=True, + ) + super()._compare_component_configs(pipe, pipe_single_file) + + def test_single_file_setting_pipeline_dtype_to_fp16(self): + controlnet = ControlNetModel.from_pretrained( + "lllyasviel/control_v11p_sd15_canny", torch_dtype=torch.float16, variant="fp16" + ) + single_file_pipe = self.pipeline_class.from_single_file( + self.ckpt_path, controlnet=controlnet, safety_checker=None, torch_dtype=torch.float16 + ) + super().test_single_file_setting_pipeline_dtype_to_fp16(single_file_pipe) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/single_file/test_stable_diffusion_controlnet_single_file.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/single_file/test_stable_diffusion_controlnet_single_file.py new file mode 100644 index 0000000000000000000000000000000000000000..e90e648a9de9bded6c9641912cd46e2234dfc474 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/single_file/test_stable_diffusion_controlnet_single_file.py @@ -0,0 +1,185 @@ +import gc +import tempfile +import unittest + +import torch + +from diffusers import ControlNetModel, StableDiffusionControlNetPipeline +from diffusers.loaders.single_file_utils import _extract_repo_id_and_weights_name +from diffusers.utils import load_image + +from ..testing_utils import ( + backend_empty_cache, + enable_full_determinism, + numpy_cosine_similarity_distance, + require_torch_accelerator, + slow, + torch_device, +) +from .single_file_testing_utils import ( + SDSingleFileTesterMixin, + download_diffusers_config, + download_original_config, + download_single_file_checkpoint, +) + + +enable_full_determinism() + + +@slow +@require_torch_accelerator +class StableDiffusionControlNetPipelineSingleFileSlowTests(unittest.TestCase, SDSingleFileTesterMixin): + pipeline_class = StableDiffusionControlNetPipeline + ckpt_path = ( + "https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5/blob/main/v1-5-pruned-emaonly.safetensors" + ) + original_config = ( + "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml" + ) + repo_id = "stable-diffusion-v1-5/stable-diffusion-v1-5" + + def setUp(self): + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def get_inputs(self): + control_image = load_image( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" + ).resize((512, 512)) + inputs = { + "prompt": "bird", + "image": control_image, + "generator": torch.Generator(device="cpu").manual_seed(0), + "num_inference_steps": 3, + "output_type": "np", + } + + return inputs + + def test_single_file_format_inference_is_same_as_pretrained(self): + controlnet = ControlNetModel.from_pretrained("lllyasviel/control_v11p_sd15_canny") + pipe = self.pipeline_class.from_pretrained(self.repo_id, controlnet=controlnet) + pipe.unet.set_default_attn_processor() + pipe.enable_model_cpu_offload(device=torch_device) + + pipe_sf = self.pipeline_class.from_single_file( + self.ckpt_path, + controlnet=controlnet, + ) + pipe_sf.unet.set_default_attn_processor() + pipe_sf.enable_model_cpu_offload(device=torch_device) + + inputs = self.get_inputs() + output = pipe(**inputs).images[0] + + inputs = self.get_inputs() + output_sf = pipe_sf(**inputs).images[0] + + max_diff = numpy_cosine_similarity_distance(output_sf.flatten(), output.flatten()) + assert max_diff < 1e-3 + + def test_single_file_components(self): + controlnet = ControlNetModel.from_pretrained("lllyasviel/control_v11p_sd15_canny") + pipe = self.pipeline_class.from_pretrained( + self.repo_id, variant="fp16", safety_checker=None, controlnet=controlnet + ) + pipe_single_file = self.pipeline_class.from_single_file( + self.ckpt_path, + safety_checker=None, + controlnet=controlnet, + ) + + super()._compare_component_configs(pipe, pipe_single_file) + + def test_single_file_components_local_files_only(self): + controlnet = ControlNetModel.from_pretrained("lllyasviel/control_v11p_sd15_canny") + pipe = self.pipeline_class.from_pretrained(self.repo_id, controlnet=controlnet) + + with tempfile.TemporaryDirectory() as tmpdir: + repo_id, weight_name = _extract_repo_id_and_weights_name(self.ckpt_path) + local_ckpt_path = download_single_file_checkpoint(repo_id, weight_name, tmpdir) + + pipe_single_file = self.pipeline_class.from_single_file( + local_ckpt_path, controlnet=controlnet, local_files_only=True + ) + + super()._compare_component_configs(pipe, pipe_single_file) + + def test_single_file_components_with_original_config(self): + controlnet = ControlNetModel.from_pretrained("lllyasviel/control_v11p_sd15_canny", variant="fp16") + pipe = self.pipeline_class.from_pretrained(self.repo_id, controlnet=controlnet) + pipe_single_file = self.pipeline_class.from_single_file( + self.ckpt_path, controlnet=controlnet, original_config=self.original_config + ) + + super()._compare_component_configs(pipe, pipe_single_file) + + def test_single_file_components_with_original_config_local_files_only(self): + controlnet = ControlNetModel.from_pretrained( + "lllyasviel/control_v11p_sd15_canny", torch_dtype=torch.float16, variant="fp16" + ) + pipe = self.pipeline_class.from_pretrained( + self.repo_id, + controlnet=controlnet, + ) + + with tempfile.TemporaryDirectory() as tmpdir: + repo_id, weight_name = _extract_repo_id_and_weights_name(self.ckpt_path) + local_ckpt_path = download_single_file_checkpoint(repo_id, weight_name, tmpdir) + local_original_config = download_original_config(self.original_config, tmpdir) + + pipe_single_file = self.pipeline_class.from_single_file( + local_ckpt_path, original_config=local_original_config, controlnet=controlnet, local_files_only=True + ) + pipe_single_file.scheduler = pipe.scheduler + + super()._compare_component_configs(pipe, pipe_single_file) + + def test_single_file_components_with_diffusers_config(self): + controlnet = ControlNetModel.from_pretrained("lllyasviel/control_v11p_sd15_canny", variant="fp16") + pipe = self.pipeline_class.from_pretrained(self.repo_id, controlnet=controlnet) + pipe_single_file = self.pipeline_class.from_single_file( + self.ckpt_path, controlnet=controlnet, safety_checker=None, config=self.repo_id + ) + + super()._compare_component_configs(pipe, pipe_single_file) + + def test_single_file_components_with_diffusers_config_local_files_only(self): + controlnet = ControlNetModel.from_pretrained( + "lllyasviel/control_v11p_sd15_canny", torch_dtype=torch.float16, variant="fp16" + ) + pipe = self.pipeline_class.from_pretrained( + self.repo_id, + controlnet=controlnet, + safety_checker=None, + ) + + with tempfile.TemporaryDirectory() as tmpdir: + repo_id, weight_name = _extract_repo_id_and_weights_name(self.ckpt_path) + local_ckpt_path = download_single_file_checkpoint(repo_id, weight_name, tmpdir) + local_diffusers_config = download_diffusers_config(self.repo_id, tmpdir) + + pipe_single_file = self.pipeline_class.from_single_file( + local_ckpt_path, + config=local_diffusers_config, + controlnet=controlnet, + safety_checker=None, + local_files_only=True, + ) + super()._compare_component_configs(pipe, pipe_single_file) + + def test_single_file_setting_pipeline_dtype_to_fp16(self): + controlnet = ControlNetModel.from_pretrained( + "lllyasviel/control_v11p_sd15_canny", torch_dtype=torch.float16, variant="fp16" + ) + single_file_pipe = self.pipeline_class.from_single_file( + self.ckpt_path, controlnet=controlnet, safety_checker=None, torch_dtype=torch.float16 + ) + super().test_single_file_setting_pipeline_dtype_to_fp16(single_file_pipe) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/single_file/test_stable_diffusion_img2img_single_file.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/single_file/test_stable_diffusion_img2img_single_file.py new file mode 100644 index 0000000000000000000000000000000000000000..387f09471dd793ab9e3058e3f8345f19298acd93 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/single_file/test_stable_diffusion_img2img_single_file.py @@ -0,0 +1,103 @@ +import gc +import unittest + +import torch + +from diffusers import ( + StableDiffusionImg2ImgPipeline, +) +from diffusers.utils import load_image + +from ..testing_utils import ( + backend_empty_cache, + enable_full_determinism, + require_torch_accelerator, + slow, + torch_device, +) +from .single_file_testing_utils import SDSingleFileTesterMixin + + +enable_full_determinism() + + +@slow +@require_torch_accelerator +class StableDiffusionImg2ImgPipelineSingleFileSlowTests(unittest.TestCase, SDSingleFileTesterMixin): + pipeline_class = StableDiffusionImg2ImgPipeline + ckpt_path = ( + "https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5/blob/main/v1-5-pruned-emaonly.safetensors" + ) + original_config = ( + "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml" + ) + repo_id = "stable-diffusion-v1-5/stable-diffusion-v1-5" + + def setUp(self): + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): + generator = torch.Generator(device=generator_device).manual_seed(seed) + init_image = load_image( + "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" + "/stable_diffusion_img2img/sketch-mountains-input.png" + ) + inputs = { + "prompt": "a fantasy landscape, concept art, high resolution", + "image": init_image, + "generator": generator, + "num_inference_steps": 3, + "strength": 0.75, + "guidance_scale": 7.5, + "output_type": "np", + } + return inputs + + def test_single_file_format_inference_is_same_as_pretrained(self): + super().test_single_file_format_inference_is_same_as_pretrained(expected_max_diff=1e-3) + + +@slow +@require_torch_accelerator +class StableDiffusion21Img2ImgPipelineSingleFileSlowTests(unittest.TestCase, SDSingleFileTesterMixin): + pipeline_class = StableDiffusionImg2ImgPipeline + ckpt_path = "https://huggingface.co/stabilityai/stable-diffusion-2-1/blob/main/v2-1_768-ema-pruned.safetensors" + original_config = "https://raw.githubusercontent.com/Stability-AI/stablediffusion/main/configs/stable-diffusion/v2-inference-v.yaml" + repo_id = "stabilityai/stable-diffusion-2-1" + + def setUp(self): + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): + generator = torch.Generator(device=generator_device).manual_seed(seed) + init_image = load_image( + "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" + "/stable_diffusion_img2img/sketch-mountains-input.png" + ) + inputs = { + "prompt": "a fantasy landscape, concept art, high resolution", + "image": init_image, + "generator": generator, + "num_inference_steps": 3, + "strength": 0.75, + "guidance_scale": 7.5, + "output_type": "np", + } + return inputs + + def test_single_file_format_inference_is_same_as_pretrained(self): + super().test_single_file_format_inference_is_same_as_pretrained(expected_max_diff=1e-3) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/single_file/test_stable_diffusion_inpaint_single_file.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/single_file/test_stable_diffusion_inpaint_single_file.py new file mode 100644 index 0000000000000000000000000000000000000000..84636ec0f0fa2f7106771e831a4eaf379854baf1 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/single_file/test_stable_diffusion_inpaint_single_file.py @@ -0,0 +1,124 @@ +import gc +import unittest + +import torch + +from diffusers import ( + StableDiffusionInpaintPipeline, +) +from diffusers.utils import load_image + +from ..testing_utils import ( + backend_empty_cache, + enable_full_determinism, + require_torch_accelerator, + slow, + torch_device, +) +from .single_file_testing_utils import SDSingleFileTesterMixin + + +enable_full_determinism() + + +@slow +@require_torch_accelerator +class StableDiffusionInpaintPipelineSingleFileSlowTests(unittest.TestCase, SDSingleFileTesterMixin): + pipeline_class = StableDiffusionInpaintPipeline + ckpt_path = "https://huggingface.co/botp/stable-diffusion-v1-5-inpainting/blob/main/sd-v1-5-inpainting.ckpt" + original_config = "https://raw.githubusercontent.com/runwayml/stable-diffusion/main/configs/stable-diffusion/v1-inpainting-inference.yaml" + repo_id = "botp/stable-diffusion-v1-5-inpainting" + + def setUp(self): + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): + generator = torch.Generator(device=generator_device).manual_seed(seed) + init_image = load_image( + "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" + "/stable_diffusion_inpaint/input_bench_image.png" + ) + mask_image = load_image( + "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" + "/stable_diffusion_inpaint/input_bench_mask.png" + ) + inputs = { + "prompt": "Face of a yellow cat, high resolution, sitting on a park bench", + "image": init_image, + "mask_image": mask_image, + "generator": generator, + "num_inference_steps": 3, + "guidance_scale": 7.5, + "output_type": "np", + } + return inputs + + def test_single_file_format_inference_is_same_as_pretrained(self): + super().test_single_file_format_inference_is_same_as_pretrained(expected_max_diff=1e-3) + + def test_single_file_loading_4_channel_unet(self): + # Test loading single file inpaint with a 4 channel UNet + ckpt_path = "https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5/blob/main/v1-5-pruned-emaonly.safetensors" + pipe = self.pipeline_class.from_single_file(ckpt_path) + + assert pipe.unet.config.in_channels == 4 + + @unittest.skip("runwayml original config has been removed") + def test_single_file_components_with_original_config(self): + return + + @unittest.skip("runwayml original config has been removed") + def test_single_file_components_with_original_config_local_files_only(self): + return + + +@slow +@require_torch_accelerator +class StableDiffusion21InpaintPipelineSingleFileSlowTests(unittest.TestCase, SDSingleFileTesterMixin): + pipeline_class = StableDiffusionInpaintPipeline + ckpt_path = ( + "https://huggingface.co/stabilityai/stable-diffusion-2-inpainting/blob/main/512-inpainting-ema.safetensors" + ) + original_config = "https://raw.githubusercontent.com/Stability-AI/stablediffusion/main/configs/stable-diffusion/v2-inpainting-inference.yaml" + repo_id = "stabilityai/stable-diffusion-2-inpainting" + + def setUp(self): + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): + generator = torch.Generator(device=generator_device).manual_seed(seed) + init_image = load_image( + "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" + "/stable_diffusion_inpaint/input_bench_image.png" + ) + mask_image = load_image( + "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" + "/stable_diffusion_inpaint/input_bench_mask.png" + ) + inputs = { + "prompt": "Face of a yellow cat, high resolution, sitting on a park bench", + "image": init_image, + "mask_image": mask_image, + "generator": generator, + "num_inference_steps": 3, + "guidance_scale": 7.5, + "output_type": "np", + } + return inputs + + def test_single_file_format_inference_is_same_as_pretrained(self): + super().test_single_file_format_inference_is_same_as_pretrained(expected_max_diff=1e-3) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/single_file/test_stable_diffusion_single_file.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/single_file/test_stable_diffusion_single_file.py new file mode 100644 index 0000000000000000000000000000000000000000..4601b75c3ab61bb9e932af2eb872385b654954b1 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/single_file/test_stable_diffusion_single_file.py @@ -0,0 +1,164 @@ +import gc +import tempfile +import unittest + +import torch + +from diffusers import EulerDiscreteScheduler, StableDiffusionInstructPix2PixPipeline, StableDiffusionPipeline +from diffusers.loaders.single_file_utils import _extract_repo_id_and_weights_name +from diffusers.utils import load_image + +from ..testing_utils import ( + backend_empty_cache, + enable_full_determinism, + nightly, + require_torch_accelerator, + slow, + torch_device, +) +from .single_file_testing_utils import ( + SDSingleFileTesterMixin, + download_original_config, + download_single_file_checkpoint, +) + + +enable_full_determinism() + + +@slow +@require_torch_accelerator +class StableDiffusionPipelineSingleFileSlowTests(unittest.TestCase, SDSingleFileTesterMixin): + pipeline_class = StableDiffusionPipeline + ckpt_path = ( + "https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5/blob/main/v1-5-pruned-emaonly.safetensors" + ) + original_config = ( + "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml" + ) + repo_id = "stable-diffusion-v1-5/stable-diffusion-v1-5" + + def setUp(self): + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): + generator = torch.Generator(device=generator_device).manual_seed(seed) + inputs = { + "prompt": "a fantasy landscape, concept art, high resolution", + "generator": generator, + "num_inference_steps": 2, + "strength": 0.75, + "guidance_scale": 7.5, + "output_type": "np", + } + return inputs + + def test_single_file_format_inference_is_same_as_pretrained(self): + super().test_single_file_format_inference_is_same_as_pretrained(expected_max_diff=1e-3) + + def test_single_file_legacy_scheduler_loading(self): + with tempfile.TemporaryDirectory() as tmpdir: + repo_id, weight_name = _extract_repo_id_and_weights_name(self.ckpt_path) + local_ckpt_path = download_single_file_checkpoint(repo_id, weight_name, tmpdir) + local_original_config = download_original_config(self.original_config, tmpdir) + + pipe = self.pipeline_class.from_single_file( + local_ckpt_path, + original_config=local_original_config, + cache_dir=tmpdir, + local_files_only=True, + scheduler_type="euler", + ) + + # Default is PNDM for this checkpoint + assert isinstance(pipe.scheduler, EulerDiscreteScheduler) + + def test_single_file_legacy_scaling_factor(self): + new_scaling_factor = 10.0 + init_pipe = self.pipeline_class.from_single_file(self.ckpt_path) + pipe = self.pipeline_class.from_single_file(self.ckpt_path, scaling_factor=new_scaling_factor) + + assert init_pipe.vae.config.scaling_factor != new_scaling_factor + assert pipe.vae.config.scaling_factor == new_scaling_factor + + +@slow +class StableDiffusion21PipelineSingleFileSlowTests(unittest.TestCase, SDSingleFileTesterMixin): + pipeline_class = StableDiffusionPipeline + ckpt_path = "https://huggingface.co/stabilityai/stable-diffusion-2-1/blob/main/v2-1_768-ema-pruned.safetensors" + original_config = "https://raw.githubusercontent.com/Stability-AI/stablediffusion/main/configs/stable-diffusion/v2-inference-v.yaml" + repo_id = "stabilityai/stable-diffusion-2-1" + + def setUp(self): + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): + generator = torch.Generator(device=generator_device).manual_seed(seed) + inputs = { + "prompt": "a fantasy landscape, concept art, high resolution", + "generator": generator, + "num_inference_steps": 2, + "strength": 0.75, + "guidance_scale": 7.5, + "output_type": "np", + } + return inputs + + def test_single_file_format_inference_is_same_as_pretrained(self): + super().test_single_file_format_inference_is_same_as_pretrained(expected_max_diff=1e-3) + + +@nightly +@slow +@require_torch_accelerator +class StableDiffusionInstructPix2PixPipelineSingleFileSlowTests(unittest.TestCase, SDSingleFileTesterMixin): + pipeline_class = StableDiffusionInstructPix2PixPipeline + ckpt_path = "https://huggingface.co/timbrooks/instruct-pix2pix/blob/main/instruct-pix2pix-00-22000.safetensors" + original_config = ( + "https://raw.githubusercontent.com/timothybrooks/instruct-pix2pix/refs/heads/main/configs/generate.yaml" + ) + repo_id = "timbrooks/instruct-pix2pix" + single_file_kwargs = {"extract_ema": True} + + def setUp(self): + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): + generator = torch.Generator(device=generator_device).manual_seed(seed) + image = load_image( + "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg" + ) + inputs = { + "prompt": "turn him into a cyborg", + "image": image, + "generator": generator, + "num_inference_steps": 3, + "guidance_scale": 7.5, + "image_guidance_scale": 1.0, + "output_type": "np", + } + return inputs + + def test_single_file_format_inference_is_same_as_pretrained(self): + super().test_single_file_format_inference_is_same_as_pretrained(expected_max_diff=1e-3) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/single_file/test_stable_diffusion_upscale_single_file.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/single_file/test_stable_diffusion_upscale_single_file.py new file mode 100644 index 0000000000000000000000000000000000000000..39ec7b0194a668859c54556253bbdb1bc871a6ba --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/single_file/test_stable_diffusion_upscale_single_file.py @@ -0,0 +1,87 @@ +import gc +import unittest + +import pytest +import torch + +from diffusers import ( + StableDiffusionUpscalePipeline, +) +from diffusers.utils import load_image + +from ..testing_utils import ( + backend_empty_cache, + enable_full_determinism, + numpy_cosine_similarity_distance, + require_torch_accelerator, + slow, + torch_device, +) +from .single_file_testing_utils import SDSingleFileTesterMixin + + +enable_full_determinism() + + +@slow +@require_torch_accelerator +class StableDiffusionUpscalePipelineSingleFileSlowTests(unittest.TestCase, SDSingleFileTesterMixin): + pipeline_class = StableDiffusionUpscalePipeline + ckpt_path = "https://huggingface.co/stabilityai/stable-diffusion-x4-upscaler/blob/main/x4-upscaler-ema.safetensors" + original_config = "https://raw.githubusercontent.com/Stability-AI/stablediffusion/main/configs/stable-diffusion/x4-upscaling.yaml" + repo_id = "stabilityai/stable-diffusion-x4-upscaler" + + def setUp(self): + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def test_single_file_format_inference_is_same_as_pretrained(self): + image = load_image( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" + "/sd2-upscale/low_res_cat.png" + ) + + prompt = "a cat sitting on a park bench" + pipe = StableDiffusionUpscalePipeline.from_pretrained(self.repo_id) + pipe.enable_model_cpu_offload(device=torch_device) + + generator = torch.Generator("cpu").manual_seed(0) + output = pipe(prompt=prompt, image=image, generator=generator, output_type="np", num_inference_steps=3) + image_from_pretrained = output.images[0] + + pipe_from_single_file = StableDiffusionUpscalePipeline.from_single_file(self.ckpt_path) + pipe_from_single_file.enable_model_cpu_offload(device=torch_device) + + generator = torch.Generator("cpu").manual_seed(0) + output_from_single_file = pipe_from_single_file( + prompt=prompt, image=image, generator=generator, output_type="np", num_inference_steps=3 + ) + image_from_single_file = output_from_single_file.images[0] + + assert image_from_pretrained.shape == (512, 512, 3) + assert image_from_single_file.shape == (512, 512, 3) + assert ( + numpy_cosine_similarity_distance(image_from_pretrained.flatten(), image_from_single_file.flatten()) < 1e-3 + ) + + @pytest.mark.xfail( + condition=True, + reason="Test fails because of mismatches in the configs but it is very hard to properly fix this considering downstream usecase.", + strict=True, + ) + def test_single_file_components_with_original_config(self): + super().test_single_file_components_with_original_config() + + @pytest.mark.xfail( + condition=True, + reason="Test fails because of mismatches in the configs but it is very hard to properly fix this considering downstream usecase.", + strict=True, + ) + def test_single_file_components_with_original_config_local_files_only(self): + super().test_single_file_components_with_original_config_local_files_only() diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/single_file/test_stable_diffusion_xl_adapter_single_file.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/single_file/test_stable_diffusion_xl_adapter_single_file.py new file mode 100644 index 0000000000000000000000000000000000000000..3de9ee736417fb6854ec82c691c8a64540e15928 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/single_file/test_stable_diffusion_xl_adapter_single_file.py @@ -0,0 +1,213 @@ +import gc +import tempfile +import unittest + +import torch + +from diffusers import ( + StableDiffusionXLAdapterPipeline, + T2IAdapter, +) +from diffusers.loaders.single_file_utils import _extract_repo_id_and_weights_name +from diffusers.utils import load_image + +from ..testing_utils import ( + backend_empty_cache, + enable_full_determinism, + numpy_cosine_similarity_distance, + require_torch_accelerator, + slow, + torch_device, +) +from .single_file_testing_utils import ( + SDXLSingleFileTesterMixin, + download_diffusers_config, + download_original_config, + download_single_file_checkpoint, +) + + +enable_full_determinism() + + +@slow +@require_torch_accelerator +class StableDiffusionXLAdapterPipelineSingleFileSlowTests(unittest.TestCase, SDXLSingleFileTesterMixin): + pipeline_class = StableDiffusionXLAdapterPipeline + ckpt_path = "https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/sd_xl_base_1.0.safetensors" + repo_id = "stabilityai/stable-diffusion-xl-base-1.0" + original_config = ( + "https://raw.githubusercontent.com/Stability-AI/generative-models/main/configs/inference/sd_xl_base.yaml" + ) + + def setUp(self): + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def get_inputs(self): + prompt = "toy" + generator = torch.Generator(device="cpu").manual_seed(0) + image = load_image( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/toy_canny.png" + ) + + inputs = { + "prompt": prompt, + "image": image, + "generator": generator, + "num_inference_steps": 2, + "guidance_scale": 7.5, + "output_type": "np", + } + + return inputs + + def test_single_file_format_inference_is_same_as_pretrained(self): + adapter = T2IAdapter.from_pretrained("TencentARC/t2i-adapter-lineart-sdxl-1.0", torch_dtype=torch.float16) + pipe_single_file = StableDiffusionXLAdapterPipeline.from_single_file( + self.ckpt_path, + adapter=adapter, + torch_dtype=torch.float16, + safety_checker=None, + ) + pipe_single_file.enable_model_cpu_offload(device=torch_device) + pipe_single_file.set_progress_bar_config(disable=None) + + inputs = self.get_inputs() + images_single_file = pipe_single_file(**inputs).images[0] + + pipe = StableDiffusionXLAdapterPipeline.from_pretrained( + self.repo_id, + adapter=adapter, + torch_dtype=torch.float16, + safety_checker=None, + ) + pipe.enable_model_cpu_offload(device=torch_device) + + inputs = self.get_inputs() + images = pipe(**inputs).images[0] + + assert images_single_file.shape == (768, 512, 3) + assert images.shape == (768, 512, 3) + + max_diff = numpy_cosine_similarity_distance(images.flatten(), images_single_file.flatten()) + assert max_diff < 5e-3 + + def test_single_file_components(self): + adapter = T2IAdapter.from_pretrained("TencentARC/t2i-adapter-lineart-sdxl-1.0", torch_dtype=torch.float16) + pipe = self.pipeline_class.from_pretrained( + self.repo_id, + variant="fp16", + adapter=adapter, + torch_dtype=torch.float16, + ) + + pipe_single_file = self.pipeline_class.from_single_file(self.ckpt_path, safety_checker=None, adapter=adapter) + super().test_single_file_components(pipe, pipe_single_file) + + def test_single_file_components_local_files_only(self): + adapter = T2IAdapter.from_pretrained("TencentARC/t2i-adapter-lineart-sdxl-1.0", torch_dtype=torch.float16) + pipe = self.pipeline_class.from_pretrained( + self.repo_id, + variant="fp16", + adapter=adapter, + torch_dtype=torch.float16, + ) + + with tempfile.TemporaryDirectory() as tmpdir: + repo_id, weight_name = _extract_repo_id_and_weights_name(self.ckpt_path) + local_ckpt_path = download_single_file_checkpoint(repo_id, weight_name, tmpdir) + + single_file_pipe = self.pipeline_class.from_single_file( + local_ckpt_path, adapter=adapter, safety_checker=None, local_files_only=True + ) + + self._compare_component_configs(pipe, single_file_pipe) + + def test_single_file_components_with_diffusers_config(self): + adapter = T2IAdapter.from_pretrained("TencentARC/t2i-adapter-lineart-sdxl-1.0", torch_dtype=torch.float16) + pipe = self.pipeline_class.from_pretrained( + self.repo_id, + variant="fp16", + adapter=adapter, + torch_dtype=torch.float16, + safety_checker=None, + ) + + pipe_single_file = self.pipeline_class.from_single_file(self.ckpt_path, config=self.repo_id, adapter=adapter) + self._compare_component_configs(pipe, pipe_single_file) + + def test_single_file_components_with_diffusers_config_local_files_only(self): + adapter = T2IAdapter.from_pretrained("TencentARC/t2i-adapter-lineart-sdxl-1.0", torch_dtype=torch.float16) + pipe = self.pipeline_class.from_pretrained( + self.repo_id, + variant="fp16", + adapter=adapter, + torch_dtype=torch.float16, + ) + + with tempfile.TemporaryDirectory() as tmpdir: + repo_id, weight_name = _extract_repo_id_and_weights_name(self.ckpt_path) + local_ckpt_path = download_single_file_checkpoint(repo_id, weight_name, tmpdir) + local_diffusers_config = download_diffusers_config(self.repo_id, tmpdir) + + pipe_single_file = self.pipeline_class.from_single_file( + local_ckpt_path, + config=local_diffusers_config, + adapter=adapter, + safety_checker=None, + local_files_only=True, + ) + self._compare_component_configs(pipe, pipe_single_file) + + def test_single_file_components_with_original_config(self): + adapter = T2IAdapter.from_pretrained("TencentARC/t2i-adapter-lineart-sdxl-1.0", torch_dtype=torch.float16) + pipe = self.pipeline_class.from_pretrained( + self.repo_id, + variant="fp16", + adapter=adapter, + torch_dtype=torch.float16, + safety_checker=None, + ) + + pipe_single_file = self.pipeline_class.from_single_file( + self.ckpt_path, original_config=self.original_config, adapter=adapter + ) + self._compare_component_configs(pipe, pipe_single_file) + + def test_single_file_components_with_original_config_local_files_only(self): + adapter = T2IAdapter.from_pretrained("TencentARC/t2i-adapter-lineart-sdxl-1.0", torch_dtype=torch.float16) + pipe = self.pipeline_class.from_pretrained( + self.repo_id, + variant="fp16", + adapter=adapter, + torch_dtype=torch.float16, + ) + + with tempfile.TemporaryDirectory() as tmpdir: + repo_id, weight_name = _extract_repo_id_and_weights_name(self.ckpt_path) + local_ckpt_path = download_single_file_checkpoint(repo_id, weight_name, tmpdir) + local_original_config = download_original_config(self.original_config, tmpdir) + + pipe_single_file = self.pipeline_class.from_single_file( + local_ckpt_path, + original_config=local_original_config, + adapter=adapter, + safety_checker=None, + local_files_only=True, + ) + self._compare_component_configs(pipe, pipe_single_file) + + def test_single_file_setting_pipeline_dtype_to_fp16(self): + adapter = T2IAdapter.from_pretrained("TencentARC/t2i-adapter-lineart-sdxl-1.0", torch_dtype=torch.float16) + + single_file_pipe = self.pipeline_class.from_single_file( + self.ckpt_path, adapter=adapter, torch_dtype=torch.float16 + ) + super().test_single_file_setting_pipeline_dtype_to_fp16(single_file_pipe) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/single_file/test_stable_diffusion_xl_controlnet_single_file.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/single_file/test_stable_diffusion_xl_controlnet_single_file.py new file mode 100644 index 0000000000000000000000000000000000000000..a0a1aba1030fa8858e1274147bc1af4ce7beacb0 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/single_file/test_stable_diffusion_xl_controlnet_single_file.py @@ -0,0 +1,208 @@ +import gc +import tempfile +import unittest + +import torch + +from diffusers import ControlNetModel, StableDiffusionXLControlNetPipeline +from diffusers.loaders.single_file_utils import _extract_repo_id_and_weights_name +from diffusers.utils import load_image + +from ..testing_utils import ( + backend_empty_cache, + enable_full_determinism, + numpy_cosine_similarity_distance, + require_torch_accelerator, + slow, + torch_device, +) +from .single_file_testing_utils import ( + SDXLSingleFileTesterMixin, + download_diffusers_config, + download_single_file_checkpoint, +) + + +enable_full_determinism() + + +@slow +@require_torch_accelerator +class StableDiffusionXLControlNetPipelineSingleFileSlowTests(unittest.TestCase, SDXLSingleFileTesterMixin): + pipeline_class = StableDiffusionXLControlNetPipeline + ckpt_path = "https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/sd_xl_base_1.0.safetensors" + repo_id = "stabilityai/stable-diffusion-xl-base-1.0" + original_config = ( + "https://raw.githubusercontent.com/Stability-AI/generative-models/main/configs/inference/sd_xl_base.yaml" + ) + + def setUp(self): + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): + generator = torch.Generator(device=generator_device).manual_seed(seed) + image = load_image( + "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/stormtrooper_depth.png" + ) + inputs = { + "prompt": "Stormtrooper's lecture", + "image": image, + "generator": generator, + "num_inference_steps": 2, + "strength": 0.75, + "guidance_scale": 7.5, + "output_type": "np", + } + + return inputs + + def test_single_file_format_inference_is_same_as_pretrained(self): + controlnet = ControlNetModel.from_pretrained("diffusers/controlnet-depth-sdxl-1.0", torch_dtype=torch.float16) + pipe_single_file = self.pipeline_class.from_single_file( + self.ckpt_path, controlnet=controlnet, torch_dtype=torch.float16 + ) + pipe_single_file.unet.set_default_attn_processor() + pipe_single_file.enable_model_cpu_offload(device=torch_device) + pipe_single_file.set_progress_bar_config(disable=None) + + inputs = self.get_inputs(torch_device) + single_file_images = pipe_single_file(**inputs).images[0] + + pipe = self.pipeline_class.from_pretrained(self.repo_id, controlnet=controlnet, torch_dtype=torch.float16) + pipe.unet.set_default_attn_processor() + pipe.enable_model_cpu_offload(device=torch_device) + + inputs = self.get_inputs(torch_device) + images = pipe(**inputs).images[0] + + assert images.shape == (512, 512, 3) + assert single_file_images.shape == (512, 512, 3) + + max_diff = numpy_cosine_similarity_distance(images[0].flatten(), single_file_images[0].flatten()) + assert max_diff < 5e-2 + + def test_single_file_components(self): + controlnet = ControlNetModel.from_pretrained( + "diffusers/controlnet-depth-sdxl-1.0", torch_dtype=torch.float16, variant="fp16" + ) + pipe = self.pipeline_class.from_pretrained( + self.repo_id, + variant="fp16", + controlnet=controlnet, + torch_dtype=torch.float16, + ) + + pipe_single_file = self.pipeline_class.from_single_file(self.ckpt_path, controlnet=controlnet) + super().test_single_file_components(pipe, pipe_single_file) + + def test_single_file_components_local_files_only(self): + controlnet = ControlNetModel.from_pretrained( + "diffusers/controlnet-depth-sdxl-1.0", torch_dtype=torch.float16, variant="fp16" + ) + pipe = self.pipeline_class.from_pretrained( + self.repo_id, + variant="fp16", + controlnet=controlnet, + torch_dtype=torch.float16, + ) + + with tempfile.TemporaryDirectory() as tmpdir: + repo_id, weight_name = _extract_repo_id_and_weights_name(self.ckpt_path) + local_ckpt_path = download_single_file_checkpoint(repo_id, weight_name, tmpdir) + + single_file_pipe = self.pipeline_class.from_single_file( + local_ckpt_path, controlnet=controlnet, safety_checker=None, local_files_only=True + ) + + self._compare_component_configs(pipe, single_file_pipe) + + def test_single_file_components_with_original_config(self): + controlnet = ControlNetModel.from_pretrained( + "diffusers/controlnet-depth-sdxl-1.0", torch_dtype=torch.float16, variant="fp16" + ) + pipe = self.pipeline_class.from_pretrained( + self.repo_id, + variant="fp16", + controlnet=controlnet, + torch_dtype=torch.float16, + ) + + pipe_single_file = self.pipeline_class.from_single_file( + self.ckpt_path, + original_config=self.original_config, + controlnet=controlnet, + ) + self._compare_component_configs(pipe, pipe_single_file) + + def test_single_file_components_with_original_config_local_files_only(self): + controlnet = ControlNetModel.from_pretrained( + "diffusers/controlnet-depth-sdxl-1.0", torch_dtype=torch.float16, variant="fp16" + ) + pipe = self.pipeline_class.from_pretrained( + self.repo_id, + variant="fp16", + controlnet=controlnet, + torch_dtype=torch.float16, + ) + + with tempfile.TemporaryDirectory() as tmpdir: + repo_id, weight_name = _extract_repo_id_and_weights_name(self.ckpt_path) + local_ckpt_path = download_single_file_checkpoint(repo_id, weight_name, tmpdir) + + pipe_single_file = self.pipeline_class.from_single_file( + local_ckpt_path, + safety_checker=None, + controlnet=controlnet, + local_files_only=True, + ) + self._compare_component_configs(pipe, pipe_single_file) + + def test_single_file_components_with_diffusers_config(self): + controlnet = ControlNetModel.from_pretrained( + "diffusers/controlnet-depth-sdxl-1.0", torch_dtype=torch.float16, variant="fp16" + ) + pipe = self.pipeline_class.from_pretrained(self.repo_id, controlnet=controlnet) + pipe_single_file = self.pipeline_class.from_single_file( + self.ckpt_path, controlnet=controlnet, config=self.repo_id + ) + + super()._compare_component_configs(pipe, pipe_single_file) + + def test_single_file_components_with_diffusers_config_local_files_only(self): + controlnet = ControlNetModel.from_pretrained( + "diffusers/controlnet-depth-sdxl-1.0", torch_dtype=torch.float16, variant="fp16" + ) + pipe = self.pipeline_class.from_pretrained( + self.repo_id, + controlnet=controlnet, + ) + + with tempfile.TemporaryDirectory() as tmpdir: + repo_id, weight_name = _extract_repo_id_and_weights_name(self.ckpt_path) + local_ckpt_path = download_single_file_checkpoint(repo_id, weight_name, tmpdir) + local_diffusers_config = download_diffusers_config(self.repo_id, tmpdir) + + pipe_single_file = self.pipeline_class.from_single_file( + local_ckpt_path, + config=local_diffusers_config, + safety_checker=None, + controlnet=controlnet, + local_files_only=True, + ) + super()._compare_component_configs(pipe, pipe_single_file) + + def test_single_file_setting_pipeline_dtype_to_fp16(self): + controlnet = ControlNetModel.from_pretrained( + "diffusers/controlnet-depth-sdxl-1.0", torch_dtype=torch.float16, variant="fp16" + ) + single_file_pipe = self.pipeline_class.from_single_file( + self.ckpt_path, controlnet=controlnet, safety_checker=None, torch_dtype=torch.float16 + ) + super().test_single_file_setting_pipeline_dtype_to_fp16(single_file_pipe) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/single_file/test_stable_diffusion_xl_img2img_single_file.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/single_file/test_stable_diffusion_xl_img2img_single_file.py new file mode 100644 index 0000000000000000000000000000000000000000..810f412f8def1bad7ffa5f62e1f5f7da77f33379 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/single_file/test_stable_diffusion_xl_img2img_single_file.py @@ -0,0 +1,107 @@ +import gc +import unittest + +import torch + +from diffusers import ( + DDIMScheduler, + StableDiffusionXLImg2ImgPipeline, +) +from diffusers.utils import load_image + +from ..testing_utils import ( + backend_empty_cache, + enable_full_determinism, + numpy_cosine_similarity_distance, + require_torch_accelerator, + slow, + torch_device, +) +from .single_file_testing_utils import SDXLSingleFileTesterMixin + + +enable_full_determinism() + + +@slow +@require_torch_accelerator +class StableDiffusionXLImg2ImgPipelineSingleFileSlowTests(unittest.TestCase, SDXLSingleFileTesterMixin): + pipeline_class = StableDiffusionXLImg2ImgPipeline + ckpt_path = "https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/sd_xl_base_1.0.safetensors" + repo_id = "stabilityai/stable-diffusion-xl-base-1.0" + original_config = ( + "https://raw.githubusercontent.com/Stability-AI/generative-models/main/configs/inference/sd_xl_base.yaml" + ) + + def setUp(self): + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): + generator = torch.Generator(device=generator_device).manual_seed(seed) + init_image = load_image( + "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" + "/stable_diffusion_img2img/sketch-mountains-input.png" + ) + inputs = { + "prompt": "a fantasy landscape, concept art, high resolution", + "image": init_image, + "generator": generator, + "num_inference_steps": 3, + "strength": 0.75, + "guidance_scale": 7.5, + "output_type": "np", + } + return inputs + + def test_single_file_format_inference_is_same_as_pretrained(self): + super().test_single_file_format_inference_is_same_as_pretrained(expected_max_diff=1e-3) + + +@slow +@require_torch_accelerator +class StableDiffusionXLImg2ImgRefinerPipelineSingleFileSlowTests(unittest.TestCase): + pipeline_class = StableDiffusionXLImg2ImgPipeline + ckpt_path = ( + "https://huggingface.co/stabilityai/stable-diffusion-xl-refiner-1.0/blob/main/sd_xl_refiner_1.0.safetensors" + ) + repo_id = "stabilityai/stable-diffusion-xl-refiner-1.0" + original_config = ( + "https://raw.githubusercontent.com/Stability-AI/generative-models/main/configs/inference/sd_xl_refiner.yaml" + ) + + def test_single_file_format_inference_is_same_as_pretrained(self): + init_image = load_image( + "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" + "/stable_diffusion_img2img/sketch-mountains-input.png" + ) + + pipe = self.pipeline_class.from_pretrained(self.repo_id, torch_dtype=torch.float16) + pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config) + pipe.unet.set_default_attn_processor() + pipe.enable_model_cpu_offload(device=torch_device) + + generator = torch.Generator(device="cpu").manual_seed(0) + image = pipe( + prompt="mountains", image=init_image, num_inference_steps=5, generator=generator, output_type="np" + ).images[0] + + pipe_single_file = self.pipeline_class.from_single_file(self.ckpt_path, torch_dtype=torch.float16) + pipe_single_file.scheduler = DDIMScheduler.from_config(pipe_single_file.scheduler.config) + pipe_single_file.unet.set_default_attn_processor() + pipe_single_file.enable_model_cpu_offload(device=torch_device) + + generator = torch.Generator(device="cpu").manual_seed(0) + image_single_file = pipe_single_file( + prompt="mountains", image=init_image, num_inference_steps=5, generator=generator, output_type="np" + ).images[0] + + max_diff = numpy_cosine_similarity_distance(image.flatten(), image_single_file.flatten()) + + assert max_diff < 5e-4 diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/single_file/test_stable_diffusion_xl_instruct_pix2pix.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/single_file/test_stable_diffusion_xl_instruct_pix2pix.py new file mode 100644 index 0000000000000000000000000000000000000000..011d59222a5b0c656e74c8c84d9ce946aa62761d --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/single_file/test_stable_diffusion_xl_instruct_pix2pix.py @@ -0,0 +1,53 @@ +import gc +import unittest + +import torch + +from diffusers import StableDiffusionXLInstructPix2PixPipeline + +from ..testing_utils import ( + backend_empty_cache, + enable_full_determinism, + require_torch_accelerator, + slow, + torch_device, +) + + +enable_full_determinism() + + +@slow +@require_torch_accelerator +class StableDiffusionXLInstructPix2PixPipeline(unittest.TestCase): + pipeline_class = StableDiffusionXLInstructPix2PixPipeline + ckpt_path = "https://huggingface.co/stabilityai/cosxl/blob/main/cosxl_edit.safetensors" + original_config = None + repo_id = "diffusers/sdxl-instructpix2pix-768" + + def setUp(self): + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): + generator = torch.Generator(device=generator_device).manual_seed(seed) + inputs = { + "prompt": "a fantasy landscape, concept art, high resolution", + "generator": generator, + "num_inference_steps": 2, + "strength": 0.75, + "guidance_scale": 7.5, + "output_type": "np", + } + return inputs + + def test_single_file_setting_cosxl_edit(self): + # Default is PNDM for this checkpoint + pipe = self.pipeline_class.from_single_file(self.ckpt_path, config=self.repo_id, is_cosxl_edit=True) + assert pipe.is_cosxl_edit is True diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/single_file/test_stable_diffusion_xl_single_file.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/single_file/test_stable_diffusion_xl_single_file.py new file mode 100644 index 0000000000000000000000000000000000000000..0ad180de17dbb323b2e1e4a61b01238cbe5c76d8 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/single_file/test_stable_diffusion_xl_single_file.py @@ -0,0 +1,56 @@ +import gc +import unittest + +import torch + +from diffusers import ( + StableDiffusionXLPipeline, +) + +from ..testing_utils import ( + backend_empty_cache, + enable_full_determinism, + require_torch_accelerator, + slow, + torch_device, +) +from .single_file_testing_utils import SDXLSingleFileTesterMixin + + +enable_full_determinism() + + +@slow +@require_torch_accelerator +class StableDiffusionXLPipelineSingleFileSlowTests(unittest.TestCase, SDXLSingleFileTesterMixin): + pipeline_class = StableDiffusionXLPipeline + ckpt_path = "https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/sd_xl_base_1.0.safetensors" + repo_id = "stabilityai/stable-diffusion-xl-base-1.0" + original_config = ( + "https://raw.githubusercontent.com/Stability-AI/generative-models/main/configs/inference/sd_xl_base.yaml" + ) + + def setUp(self): + super().setUp() + gc.collect() + backend_empty_cache(torch_device) + + def tearDown(self): + super().tearDown() + gc.collect() + backend_empty_cache(torch_device) + + def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): + generator = torch.Generator(device=generator_device).manual_seed(seed) + inputs = { + "prompt": "a fantasy landscape, concept art, high resolution", + "generator": generator, + "num_inference_steps": 2, + "strength": 0.75, + "guidance_scale": 7.5, + "output_type": "np", + } + return inputs + + def test_single_file_format_inference_is_same_as_pretrained(self): + super().test_single_file_format_inference_is_same_as_pretrained(expected_max_diff=1e-3) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/tests/testing_utils.py b/exp_code/1_benchmark/diffusers-WanS2V/tests/testing_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..7f849219c16f2398984b6aad895c44c8f4c5fcf7 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/tests/testing_utils.py @@ -0,0 +1,1557 @@ +import functools +import glob +import importlib +import importlib.metadata +import inspect +import io +import logging +import multiprocessing +import os +import random +import re +import struct +import sys +import tempfile +import time +import unittest +import urllib.parse +from collections import UserDict +from contextlib import contextmanager +from io import BytesIO, StringIO +from pathlib import Path +from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Set, Tuple, Union + +import numpy as np +import PIL.Image +import PIL.ImageOps +import requests +from numpy.linalg import norm +from packaging import version + +from diffusers.utils.constants import DIFFUSERS_REQUEST_TIMEOUT +from diffusers.utils.import_utils import ( + BACKENDS_MAPPING, + is_accelerate_available, + is_bitsandbytes_available, + is_compel_available, + is_flax_available, + is_gguf_available, + is_kernels_available, + is_note_seq_available, + is_onnx_available, + is_opencv_available, + is_optimum_quanto_available, + is_peft_available, + is_timm_available, + is_torch_available, + is_torch_version, + is_torchao_available, + is_torchsde_available, + is_transformers_available, +) +from diffusers.utils.logging import get_logger + + +if is_torch_available(): + import torch + + IS_ROCM_SYSTEM = torch.version.hip is not None + IS_CUDA_SYSTEM = torch.version.cuda is not None + IS_XPU_SYSTEM = getattr(torch.version, "xpu", None) is not None +else: + IS_ROCM_SYSTEM = False + IS_CUDA_SYSTEM = False + IS_XPU_SYSTEM = False + +global_rng = random.Random() + +logger = get_logger(__name__) + +_required_peft_version = is_peft_available() and version.parse( + version.parse(importlib.metadata.version("peft")).base_version +) > version.parse("0.5") +_required_transformers_version = is_transformers_available() and version.parse( + version.parse(importlib.metadata.version("transformers")).base_version +) > version.parse("4.33") + +USE_PEFT_BACKEND = _required_peft_version and _required_transformers_version +BIG_GPU_MEMORY = int(os.getenv("BIG_GPU_MEMORY", 40)) + +if is_torch_available(): + import torch + + # Set a backend environment variable for any extra module import required for a custom accelerator + if "DIFFUSERS_TEST_BACKEND" in os.environ: + backend = os.environ["DIFFUSERS_TEST_BACKEND"] + try: + _ = importlib.import_module(backend) + except ModuleNotFoundError as e: + raise ModuleNotFoundError( + f"Failed to import `DIFFUSERS_TEST_BACKEND` '{backend}'! This should be the name of an installed module \ + to enable a specified backend.):\n{e}" + ) from e + + if "DIFFUSERS_TEST_DEVICE" in os.environ: + torch_device = os.environ["DIFFUSERS_TEST_DEVICE"] + try: + # try creating device to see if provided device is valid + _ = torch.device(torch_device) + except RuntimeError as e: + raise RuntimeError( + f"Unknown testing device specified by environment variable `DIFFUSERS_TEST_DEVICE`: {torch_device}" + ) from e + logger.info(f"torch_device overrode to {torch_device}") + else: + if torch.cuda.is_available(): + torch_device = "cuda" + elif torch.xpu.is_available(): + torch_device = "xpu" + else: + torch_device = "cpu" + is_torch_higher_equal_than_1_12 = version.parse( + version.parse(torch.__version__).base_version + ) >= version.parse("1.12") + + if is_torch_higher_equal_than_1_12: + # Some builds of torch 1.12 don't have the mps backend registered. See #892 for more details + mps_backend_registered = hasattr(torch.backends, "mps") + torch_device = "mps" if (mps_backend_registered and torch.backends.mps.is_available()) else torch_device + + from diffusers.utils.torch_utils import get_torch_cuda_device_capability + + +def torch_all_close(a, b, *args, **kwargs): + if not is_torch_available(): + raise ValueError("PyTorch needs to be installed to use this function.") + if not torch.allclose(a, b, *args, **kwargs): + assert False, f"Max diff is absolute {(a - b).abs().max()}. Diff tensor is {(a - b).abs()}." + return True + + +def numpy_cosine_similarity_distance(a, b): + similarity = np.dot(a, b) / (norm(a) * norm(b)) + distance = 1.0 - similarity.mean() + + return distance + + +def check_if_dicts_are_equal(dict1, dict2): + dict1, dict2 = dict1.copy(), dict2.copy() + + for key, value in dict1.items(): + if isinstance(value, set): + dict1[key] = sorted(value) + for key, value in dict2.items(): + if isinstance(value, set): + dict2[key] = sorted(value) + + for key in dict1: + if key not in dict2: + return False + if dict1[key] != dict2[key]: + return False + + for key in dict2: + if key not in dict1: + return False + + return True + + +def print_tensor_test( + tensor, + limit_to_slices=None, + max_torch_print=None, + filename="test_corrections.txt", + expected_tensor_name="expected_slice", +): + if max_torch_print: + torch.set_printoptions(threshold=10_000) + + test_name = os.environ.get("PYTEST_CURRENT_TEST") + if not torch.is_tensor(tensor): + tensor = torch.from_numpy(tensor) + if limit_to_slices: + tensor = tensor[0, -3:, -3:, -1] + + tensor_str = str(tensor.detach().cpu().flatten().to(torch.float32)).replace("\n", "") + # format is usually: + # expected_slice = np.array([-0.5713, -0.3018, -0.9814, 0.04663, -0.879, 0.76, -1.734, 0.1044, 1.161]) + output_str = tensor_str.replace("tensor", f"{expected_tensor_name} = np.array") + test_file, test_class, test_fn = test_name.split("::") + test_fn = test_fn.split()[0] + with open(filename, "a") as f: + print("::".join([test_file, test_class, test_fn, output_str]), file=f) + + +def get_tests_dir(append_path=None): + """ + Args: + append_path: optional path to append to the tests dir path + Return: + The full path to the `tests` dir, so that the tests can be invoked from anywhere. Optionally `append_path` is + joined after the `tests` dir the former is provided. + """ + # this function caller's __file__ + caller__file__ = inspect.stack()[1][1] + tests_dir = os.path.abspath(os.path.dirname(caller__file__)) + + while not tests_dir.endswith("tests"): + tests_dir = os.path.dirname(tests_dir) + + if append_path: + return Path(tests_dir, append_path).as_posix() + else: + return tests_dir + + +# Taken from the following PR: +# https://github.com/huggingface/accelerate/pull/1964 +def str_to_bool(value) -> int: + """ + Converts a string representation of truth to `True` (1) or `False` (0). True values are `y`, `yes`, `t`, `true`, + `on`, and `1`; False value are `n`, `no`, `f`, `false`, `off`, and `0`; + """ + value = value.lower() + if value in ("y", "yes", "t", "true", "on", "1"): + return 1 + elif value in ("n", "no", "f", "false", "off", "0"): + return 0 + else: + raise ValueError(f"invalid truth value {value}") + + +def parse_flag_from_env(key, default=False): + try: + value = os.environ[key] + except KeyError: + # KEY isn't set, default to `default`. + _value = default + else: + # KEY is set, convert it to True or False. + try: + _value = str_to_bool(value) + except ValueError: + # More values are supported, but let's keep the message simple. + raise ValueError(f"If set, {key} must be yes or no.") + return _value + + +_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False) +_run_nightly_tests = parse_flag_from_env("RUN_NIGHTLY", default=False) +_run_compile_tests = parse_flag_from_env("RUN_COMPILE", default=False) + + +def floats_tensor(shape, scale=1.0, rng=None, name=None): + """Creates a random float32 tensor""" + if rng is None: + rng = global_rng + + total_dims = 1 + for dim in shape: + total_dims *= dim + + values = [] + for _ in range(total_dims): + values.append(rng.random() * scale) + + return torch.tensor(data=values, dtype=torch.float).view(shape).contiguous() + + +def slow(test_case): + """ + Decorator marking a test as slow. + + Slow tests are skipped by default. Set the RUN_SLOW environment variable to a truthy value to run them. + + """ + return unittest.skipUnless(_run_slow_tests, "test is slow")(test_case) + + +def nightly(test_case): + """ + Decorator marking a test that runs nightly in the diffusers CI. + + Slow tests are skipped by default. Set the RUN_NIGHTLY environment variable to a truthy value to run them. + + """ + return unittest.skipUnless(_run_nightly_tests, "test is nightly")(test_case) + + +def is_torch_compile(test_case): + """ + Decorator marking a test that runs compile tests in the diffusers CI. + + Compile tests are skipped by default. Set the RUN_COMPILE environment variable to a truthy value to run them. + + """ + return unittest.skipUnless(_run_compile_tests, "test is torch compile")(test_case) + + +def require_torch(test_case): + """ + Decorator marking a test that requires PyTorch. These tests are skipped when PyTorch isn't installed. + """ + return unittest.skipUnless(is_torch_available(), "test requires PyTorch")(test_case) + + +def require_torch_2(test_case): + """ + Decorator marking a test that requires PyTorch 2. These tests are skipped when it isn't installed. + """ + return unittest.skipUnless(is_torch_available() and is_torch_version(">=", "2.0.0"), "test requires PyTorch 2")( + test_case + ) + + +def require_torch_version_greater_equal(torch_version): + """Decorator marking a test that requires torch with a specific version or greater.""" + + def decorator(test_case): + correct_torch_version = is_torch_available() and is_torch_version(">=", torch_version) + return unittest.skipUnless( + correct_torch_version, f"test requires torch with the version greater than or equal to {torch_version}" + )(test_case) + + return decorator + + +def require_torch_version_greater(torch_version): + """Decorator marking a test that requires torch with a specific version greater.""" + + def decorator(test_case): + correct_torch_version = is_torch_available() and is_torch_version(">", torch_version) + return unittest.skipUnless( + correct_torch_version, f"test requires torch with the version greater than {torch_version}" + )(test_case) + + return decorator + + +def require_torch_gpu(test_case): + """Decorator marking a test that requires CUDA and PyTorch.""" + return unittest.skipUnless(is_torch_available() and torch_device == "cuda", "test requires PyTorch+CUDA")( + test_case + ) + + +def require_torch_cuda_compatibility(expected_compute_capability): + def decorator(test_case): + if torch.cuda.is_available(): + current_compute_capability = get_torch_cuda_device_capability() + return unittest.skipUnless( + float(current_compute_capability) == float(expected_compute_capability), + "Test not supported for this compute capability.", + ) + + return decorator + + +# These decorators are for accelerator-specific behaviours that are not GPU-specific +def require_torch_accelerator(test_case): + """Decorator marking a test that requires an accelerator backend and PyTorch.""" + return unittest.skipUnless(is_torch_available() and torch_device != "cpu", "test requires accelerator+PyTorch")( + test_case + ) + + +def require_torch_multi_gpu(test_case): + """ + Decorator marking a test that requires a multi-GPU setup (in PyTorch). These tests are skipped on a machine without + multiple GPUs. To run *only* the multi_gpu tests, assuming all test names contain multi_gpu: $ pytest -sv ./tests + -k "multi_gpu" + """ + if not is_torch_available(): + return unittest.skip("test requires PyTorch")(test_case) + + import torch + + return unittest.skipUnless(torch.cuda.device_count() > 1, "test requires multiple GPUs")(test_case) + + +def require_torch_multi_accelerator(test_case): + """ + Decorator marking a test that requires a multi-accelerator setup (in PyTorch). These tests are skipped on a machine + without multiple hardware accelerators. + """ + if not is_torch_available(): + return unittest.skip("test requires PyTorch")(test_case) + + import torch + + return unittest.skipUnless( + torch.cuda.device_count() > 1 or torch.xpu.device_count() > 1, "test requires multiple hardware accelerators" + )(test_case) + + +def require_torch_accelerator_with_fp16(test_case): + """Decorator marking a test that requires an accelerator with support for the FP16 data type.""" + return unittest.skipUnless(_is_torch_fp16_available(torch_device), "test requires accelerator with fp16 support")( + test_case + ) + + +def require_torch_accelerator_with_fp64(test_case): + """Decorator marking a test that requires an accelerator with support for the FP64 data type.""" + return unittest.skipUnless(_is_torch_fp64_available(torch_device), "test requires accelerator with fp64 support")( + test_case + ) + + +def require_big_gpu_with_torch_cuda(test_case): + """ + Decorator marking a test that requires a bigger GPU (24GB) for execution. Some example pipelines: Flux, SD3, Cog, + etc. + """ + if not is_torch_available(): + return unittest.skip("test requires PyTorch")(test_case) + + import torch + + if not torch.cuda.is_available(): + return unittest.skip("test requires PyTorch CUDA")(test_case) + + device_properties = torch.cuda.get_device_properties(0) + total_memory = device_properties.total_memory / (1024**3) + return unittest.skipUnless( + total_memory >= BIG_GPU_MEMORY, f"test requires a GPU with at least {BIG_GPU_MEMORY} GB memory" + )(test_case) + + +def require_big_accelerator(test_case): + """ + Decorator marking a test that requires a bigger hardware accelerator (24GB) for execution. Some example pipelines: + Flux, SD3, Cog, etc. + """ + import pytest + + test_case = pytest.mark.big_accelerator(test_case) + + if not is_torch_available(): + return unittest.skip("test requires PyTorch")(test_case) + + import torch + + if not (torch.cuda.is_available() or torch.xpu.is_available()): + return unittest.skip("test requires PyTorch CUDA")(test_case) + + if torch.xpu.is_available(): + device_properties = torch.xpu.get_device_properties(0) + else: + device_properties = torch.cuda.get_device_properties(0) + + total_memory = device_properties.total_memory / (1024**3) + return unittest.skipUnless( + total_memory >= BIG_GPU_MEMORY, + f"test requires a hardware accelerator with at least {BIG_GPU_MEMORY} GB memory", + )(test_case) + + +def require_torch_accelerator_with_training(test_case): + """Decorator marking a test that requires an accelerator with support for training.""" + return unittest.skipUnless( + is_torch_available() and backend_supports_training(torch_device), + "test requires accelerator with training support", + )(test_case) + + +def skip_mps(test_case): + """Decorator marking a test to skip if torch_device is 'mps'""" + return unittest.skipUnless(torch_device != "mps", "test requires non 'mps' device")(test_case) + + +def require_flax(test_case): + """ + Decorator marking a test that requires JAX & Flax. These tests are skipped when one / both are not installed + """ + return unittest.skipUnless(is_flax_available(), "test requires JAX & Flax")(test_case) + + +def require_compel(test_case): + """ + Decorator marking a test that requires compel: https://github.com/damian0815/compel. These tests are skipped when + the library is not installed. + """ + return unittest.skipUnless(is_compel_available(), "test requires compel")(test_case) + + +def require_onnxruntime(test_case): + """ + Decorator marking a test that requires onnxruntime. These tests are skipped when onnxruntime isn't installed. + """ + return unittest.skipUnless(is_onnx_available(), "test requires onnxruntime")(test_case) + + +def require_note_seq(test_case): + """ + Decorator marking a test that requires note_seq. These tests are skipped when note_seq isn't installed. + """ + return unittest.skipUnless(is_note_seq_available(), "test requires note_seq")(test_case) + + +def require_accelerator(test_case): + """ + Decorator marking a test that requires a hardware accelerator backend. These tests are skipped when there are no + hardware accelerator available. + """ + return unittest.skipUnless(torch_device != "cpu", "test requires a hardware accelerator")(test_case) + + +def require_torchsde(test_case): + """ + Decorator marking a test that requires torchsde. These tests are skipped when torchsde isn't installed. + """ + return unittest.skipUnless(is_torchsde_available(), "test requires torchsde")(test_case) + + +def require_peft_backend(test_case): + """ + Decorator marking a test that requires PEFT backend, this would require some specific versions of PEFT and + transformers. + """ + return unittest.skipUnless(USE_PEFT_BACKEND, "test requires PEFT backend")(test_case) + + +def require_timm(test_case): + """ + Decorator marking a test that requires timm. These tests are skipped when timm isn't installed. + """ + return unittest.skipUnless(is_timm_available(), "test requires timm")(test_case) + + +def require_bitsandbytes(test_case): + """ + Decorator marking a test that requires bitsandbytes. These tests are skipped when bitsandbytes isn't installed. + """ + return unittest.skipUnless(is_bitsandbytes_available(), "test requires bitsandbytes")(test_case) + + +def require_quanto(test_case): + """ + Decorator marking a test that requires quanto. These tests are skipped when quanto isn't installed. + """ + return unittest.skipUnless(is_optimum_quanto_available(), "test requires quanto")(test_case) + + +def require_accelerate(test_case): + """ + Decorator marking a test that requires accelerate. These tests are skipped when accelerate isn't installed. + """ + return unittest.skipUnless(is_accelerate_available(), "test requires accelerate")(test_case) + + +def require_peft_version_greater(peft_version): + """ + Decorator marking a test that requires PEFT backend with a specific version, this would require some specific + versions of PEFT and transformers. + """ + + def decorator(test_case): + correct_peft_version = is_peft_available() and version.parse( + version.parse(importlib.metadata.version("peft")).base_version + ) > version.parse(peft_version) + return unittest.skipUnless( + correct_peft_version, f"test requires PEFT backend with the version greater than {peft_version}" + )(test_case) + + return decorator + + +def require_transformers_version_greater(transformers_version): + """ + Decorator marking a test that requires transformers with a specific version, this would require some specific + versions of PEFT and transformers. + """ + + def decorator(test_case): + correct_transformers_version = is_transformers_available() and version.parse( + version.parse(importlib.metadata.version("transformers")).base_version + ) > version.parse(transformers_version) + return unittest.skipUnless( + correct_transformers_version, + f"test requires transformers with the version greater than {transformers_version}", + )(test_case) + + return decorator + + +def require_accelerate_version_greater(accelerate_version): + def decorator(test_case): + correct_accelerate_version = is_accelerate_available() and version.parse( + version.parse(importlib.metadata.version("accelerate")).base_version + ) > version.parse(accelerate_version) + return unittest.skipUnless( + correct_accelerate_version, f"Test requires accelerate with the version greater than {accelerate_version}." + )(test_case) + + return decorator + + +def require_bitsandbytes_version_greater(bnb_version): + def decorator(test_case): + correct_bnb_version = is_bitsandbytes_available() and version.parse( + version.parse(importlib.metadata.version("bitsandbytes")).base_version + ) > version.parse(bnb_version) + return unittest.skipUnless( + correct_bnb_version, f"Test requires bitsandbytes with the version greater than {bnb_version}." + )(test_case) + + return decorator + + +def require_hf_hub_version_greater(hf_hub_version): + def decorator(test_case): + correct_hf_hub_version = version.parse( + version.parse(importlib.metadata.version("huggingface_hub")).base_version + ) > version.parse(hf_hub_version) + return unittest.skipUnless( + correct_hf_hub_version, f"Test requires huggingface_hub with the version greater than {hf_hub_version}." + )(test_case) + + return decorator + + +def require_gguf_version_greater_or_equal(gguf_version): + def decorator(test_case): + correct_gguf_version = is_gguf_available() and version.parse( + version.parse(importlib.metadata.version("gguf")).base_version + ) >= version.parse(gguf_version) + return unittest.skipUnless( + correct_gguf_version, f"Test requires gguf with the version greater than {gguf_version}." + )(test_case) + + return decorator + + +def require_torchao_version_greater_or_equal(torchao_version): + def decorator(test_case): + correct_torchao_version = is_torchao_available() and version.parse( + version.parse(importlib.metadata.version("torchao")).base_version + ) >= version.parse(torchao_version) + return unittest.skipUnless( + correct_torchao_version, f"Test requires torchao with version greater than {torchao_version}." + )(test_case) + + return decorator + + +def require_kernels_version_greater_or_equal(kernels_version): + def decorator(test_case): + correct_kernels_version = is_kernels_available() and version.parse( + version.parse(importlib.metadata.version("kernels")).base_version + ) >= version.parse(kernels_version) + return unittest.skipUnless( + correct_kernels_version, f"Test requires kernels with version greater than {kernels_version}." + )(test_case) + + return decorator + + +def deprecate_after_peft_backend(test_case): + """ + Decorator marking a test that will be skipped after PEFT backend + """ + return unittest.skipUnless(not USE_PEFT_BACKEND, "test skipped in favor of PEFT backend")(test_case) + + +def get_python_version(): + sys_info = sys.version_info + major, minor = sys_info.major, sys_info.minor + return major, minor + + +def load_numpy(arry: Union[str, np.ndarray], local_path: Optional[str] = None) -> np.ndarray: + if isinstance(arry, str): + if local_path is not None: + # local_path can be passed to correct images of tests + return Path(local_path, arry.split("/")[-5], arry.split("/")[-2], arry.split("/")[-1]).as_posix() + elif arry.startswith("http://") or arry.startswith("https://"): + response = requests.get(arry, timeout=DIFFUSERS_REQUEST_TIMEOUT) + response.raise_for_status() + arry = np.load(BytesIO(response.content)) + elif os.path.isfile(arry): + arry = np.load(arry) + else: + raise ValueError( + f"Incorrect path or url, URLs must start with `http://` or `https://`, and {arry} is not a valid path" + ) + elif isinstance(arry, np.ndarray): + pass + else: + raise ValueError( + "Incorrect format used for numpy ndarray. Should be an url linking to an image, a local path, or a" + " ndarray." + ) + + return arry + + +def load_pt(url: str, map_location: Optional[str] = None, weights_only: Optional[bool] = True): + response = requests.get(url, timeout=DIFFUSERS_REQUEST_TIMEOUT) + response.raise_for_status() + arry = torch.load(BytesIO(response.content), map_location=map_location, weights_only=weights_only) + return arry + + +def load_image(image: Union[str, PIL.Image.Image]) -> PIL.Image.Image: + """ + Loads `image` to a PIL Image. + + Args: + image (`str` or `PIL.Image.Image`): + The image to convert to the PIL Image format. + Returns: + `PIL.Image.Image`: + A PIL Image. + """ + if isinstance(image, str): + if image.startswith("http://") or image.startswith("https://"): + image = PIL.Image.open(requests.get(image, stream=True, timeout=DIFFUSERS_REQUEST_TIMEOUT).raw) + elif os.path.isfile(image): + image = PIL.Image.open(image) + else: + raise ValueError( + f"Incorrect path or url, URLs must start with `http://` or `https://`, and {image} is not a valid path" + ) + elif isinstance(image, PIL.Image.Image): + image = image + else: + raise ValueError( + "Incorrect format used for image. Should be an url linking to an image, a local path, or a PIL image." + ) + image = PIL.ImageOps.exif_transpose(image) + image = image.convert("RGB") + return image + + +def preprocess_image(image: PIL.Image, batch_size: int): + w, h = image.size + w, h = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8 + image = image.resize((w, h), resample=PIL.Image.LANCZOS) + image = np.array(image).astype(np.float32) / 255.0 + image = np.vstack([image[None].transpose(0, 3, 1, 2)] * batch_size) + image = torch.from_numpy(image) + return 2.0 * image - 1.0 + + +def export_to_gif(image: List[PIL.Image.Image], output_gif_path: str = None) -> str: + if output_gif_path is None: + output_gif_path = tempfile.NamedTemporaryFile(suffix=".gif").name + + image[0].save( + output_gif_path, + save_all=True, + append_images=image[1:], + optimize=False, + duration=100, + loop=0, + ) + return output_gif_path + + +@contextmanager +def buffered_writer(raw_f): + f = io.BufferedWriter(raw_f) + yield f + f.flush() + + +def export_to_ply(mesh, output_ply_path: str = None): + """ + Write a PLY file for a mesh. + """ + if output_ply_path is None: + output_ply_path = tempfile.NamedTemporaryFile(suffix=".ply").name + + coords = mesh.verts.detach().cpu().numpy() + faces = mesh.faces.cpu().numpy() + rgb = np.stack([mesh.vertex_channels[x].detach().cpu().numpy() for x in "RGB"], axis=1) + + with buffered_writer(open(output_ply_path, "wb")) as f: + f.write(b"ply\n") + f.write(b"format binary_little_endian 1.0\n") + f.write(bytes(f"element vertex {len(coords)}\n", "ascii")) + f.write(b"property float x\n") + f.write(b"property float y\n") + f.write(b"property float z\n") + if rgb is not None: + f.write(b"property uchar red\n") + f.write(b"property uchar green\n") + f.write(b"property uchar blue\n") + if faces is not None: + f.write(bytes(f"element face {len(faces)}\n", "ascii")) + f.write(b"property list uchar int vertex_index\n") + f.write(b"end_header\n") + + if rgb is not None: + rgb = (rgb * 255.499).round().astype(int) + vertices = [ + (*coord, *rgb) + for coord, rgb in zip( + coords.tolist(), + rgb.tolist(), + ) + ] + format = struct.Struct("<3f3B") + for item in vertices: + f.write(format.pack(*item)) + else: + format = struct.Struct("<3f") + for vertex in coords.tolist(): + f.write(format.pack(*vertex)) + + if faces is not None: + format = struct.Struct(" str: + if is_opencv_available(): + import cv2 + else: + raise ImportError(BACKENDS_MAPPING["opencv"][1].format("export_to_video")) + if output_video_path is None: + output_video_path = tempfile.NamedTemporaryFile(suffix=".mp4").name + + fourcc = cv2.VideoWriter_fourcc(*"mp4v") + h, w, c = video_frames[0].shape + video_writer = cv2.VideoWriter(output_video_path, fourcc, fps=8, frameSize=(w, h)) + for i in range(len(video_frames)): + img = cv2.cvtColor(video_frames[i], cv2.COLOR_RGB2BGR) + video_writer.write(img) + return output_video_path + + +def load_hf_numpy(path) -> np.ndarray: + base_url = "https://huggingface.co/datasets/fusing/diffusers-testing/resolve/main" + + if not path.startswith("http://") and not path.startswith("https://"): + path = os.path.join(base_url, urllib.parse.quote(path)) + + return load_numpy(path) + + +# --- pytest conf functions --- # + +# to avoid multiple invocation from tests/conftest.py and examples/conftest.py - make sure it's called only once +pytest_opt_registered = {} + + +def pytest_addoption_shared(parser): + """ + This function is to be called from `conftest.py` via `pytest_addoption` wrapper that has to be defined there. + + It allows loading both `conftest.py` files at once without causing a failure due to adding the same `pytest` + option. + + """ + option = "--make-reports" + if option not in pytest_opt_registered: + parser.addoption( + option, + action="store", + default=False, + help="generate report files. The value of this option is used as a prefix to report names", + ) + pytest_opt_registered[option] = 1 + + +def pytest_terminal_summary_main(tr, id): + """ + Generate multiple reports at the end of test suite run - each report goes into a dedicated file in the current + directory. The report files are prefixed with the test suite name. + + This function emulates --duration and -rA pytest arguments. + + This function is to be called from `conftest.py` via `pytest_terminal_summary` wrapper that has to be defined + there. + + Args: + - tr: `terminalreporter` passed from `conftest.py` + - id: unique id like `tests` or `examples` that will be incorporated into the final reports filenames - this is + needed as some jobs have multiple runs of pytest, so we can't have them overwrite each other. + + NB: this functions taps into a private _pytest API and while unlikely, it could break should + pytest do internal changes - also it calls default internal methods of terminalreporter which + can be hijacked by various `pytest-` plugins and interfere. + + """ + from _pytest.config import create_terminal_writer + + if not len(id): + id = "tests" + + config = tr.config + orig_writer = config.get_terminal_writer() + orig_tbstyle = config.option.tbstyle + orig_reportchars = tr.reportchars + + dir = "reports" + Path(dir).mkdir(parents=True, exist_ok=True) + report_files = { + k: f"{dir}/{id}_{k}.txt" + for k in [ + "durations", + "errors", + "failures_long", + "failures_short", + "failures_line", + "passes", + "stats", + "summary_short", + "warnings", + ] + } + + # custom durations report + # note: there is no need to call pytest --durations=XX to get this separate report + # adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/runner.py#L66 + dlist = [] + for replist in tr.stats.values(): + for rep in replist: + if hasattr(rep, "duration"): + dlist.append(rep) + if dlist: + dlist.sort(key=lambda x: x.duration, reverse=True) + with open(report_files["durations"], "w") as f: + durations_min = 0.05 # sec + f.write("slowest durations\n") + for i, rep in enumerate(dlist): + if rep.duration < durations_min: + f.write(f"{len(dlist) - i} durations < {durations_min} secs were omitted") + break + f.write(f"{rep.duration:02.2f}s {rep.when:<8} {rep.nodeid}\n") + + def summary_failures_short(tr): + # expecting that the reports were --tb=long (default) so we chop them off here to the last frame + reports = tr.getreports("failed") + if not reports: + return + tr.write_sep("=", "FAILURES SHORT STACK") + for rep in reports: + msg = tr._getfailureheadline(rep) + tr.write_sep("_", msg, red=True, bold=True) + # chop off the optional leading extra frames, leaving only the last one + longrepr = re.sub(r".*_ _ _ (_ ){10,}_ _ ", "", rep.longreprtext, 0, re.M | re.S) + tr._tw.line(longrepr) + # note: not printing out any rep.sections to keep the report short + + # use ready-made report funcs, we are just hijacking the filehandle to log to a dedicated file each + # adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/terminal.py#L814 + # note: some pytest plugins may interfere by hijacking the default `terminalreporter` (e.g. + # pytest-instafail does that) + + # report failures with line/short/long styles + config.option.tbstyle = "auto" # full tb + with open(report_files["failures_long"], "w") as f: + tr._tw = create_terminal_writer(config, f) + tr.summary_failures() + + # config.option.tbstyle = "short" # short tb + with open(report_files["failures_short"], "w") as f: + tr._tw = create_terminal_writer(config, f) + summary_failures_short(tr) + + config.option.tbstyle = "line" # one line per error + with open(report_files["failures_line"], "w") as f: + tr._tw = create_terminal_writer(config, f) + tr.summary_failures() + + with open(report_files["errors"], "w") as f: + tr._tw = create_terminal_writer(config, f) + tr.summary_errors() + + with open(report_files["warnings"], "w") as f: + tr._tw = create_terminal_writer(config, f) + tr.summary_warnings() # normal warnings + tr.summary_warnings() # final warnings + + tr.reportchars = "wPpsxXEf" # emulate -rA (used in summary_passes() and short_test_summary()) + with open(report_files["passes"], "w") as f: + tr._tw = create_terminal_writer(config, f) + tr.summary_passes() + + with open(report_files["summary_short"], "w") as f: + tr._tw = create_terminal_writer(config, f) + tr.short_test_summary() + + with open(report_files["stats"], "w") as f: + tr._tw = create_terminal_writer(config, f) + tr.summary_stats() + + # restore: + tr._tw = orig_writer + tr.reportchars = orig_reportchars + config.option.tbstyle = orig_tbstyle + + +# Adapted from https://github.com/huggingface/transformers/blob/000e52aec8850d3fe2f360adc6fd256e5b47fe4c/src/transformers..testing_utils.py#L1905 +def is_flaky(max_attempts: int = 5, wait_before_retry: Optional[float] = None, description: Optional[str] = None): + """ + To decorate flaky tests (methods or entire classes). They will be retried on failures. + + Args: + max_attempts (`int`, *optional*, defaults to 5): + The maximum number of attempts to retry the flaky test. + wait_before_retry (`float`, *optional*): + If provided, will wait that number of seconds before retrying the test. + description (`str`, *optional*): + A string to describe the situation (what / where / why is flaky, link to GH issue/PR comments, errors, + etc.) + """ + + def decorator(obj): + # If decorating a class, wrap each test method on it + if inspect.isclass(obj): + for attr_name, attr_value in list(obj.__dict__.items()): + if callable(attr_value) and attr_name.startswith("test"): + # recursively decorate the method + setattr(obj, attr_name, decorator(attr_value)) + return obj + + # Otherwise we're decorating a single test function / method + @functools.wraps(obj) + def wrapper(*args, **kwargs): + retry_count = 1 + while retry_count < max_attempts: + try: + return obj(*args, **kwargs) + except Exception as err: + msg = ( + f"[FLAKY] {description or obj.__name__!r} " + f"failed on attempt {retry_count}/{max_attempts}: {err}" + ) + print(msg, file=sys.stderr) + if wait_before_retry is not None: + time.sleep(wait_before_retry) + retry_count += 1 + + return obj(*args, **kwargs) + + return wrapper + + return decorator + + +# Taken from: https://github.com/huggingface/transformers/blob/3658488ff77ff8d45101293e749263acf437f4d5/src/transformers..testing_utils.py#L1787 +def run_test_in_subprocess(test_case, target_func, inputs=None, timeout=None): + """ + To run a test in a subprocess. In particular, this can avoid (GPU) memory issue. + + Args: + test_case (`unittest.TestCase`): + The test that will run `target_func`. + target_func (`Callable`): + The function implementing the actual testing logic. + inputs (`dict`, *optional*, defaults to `None`): + The inputs that will be passed to `target_func` through an (input) queue. + timeout (`int`, *optional*, defaults to `None`): + The timeout (in seconds) that will be passed to the input and output queues. If not specified, the env. + variable `PYTEST_TIMEOUT` will be checked. If still `None`, its value will be set to `600`. + """ + if timeout is None: + timeout = int(os.environ.get("PYTEST_TIMEOUT", 600)) + + start_methohd = "spawn" + ctx = multiprocessing.get_context(start_methohd) + + input_queue = ctx.Queue(1) + output_queue = ctx.JoinableQueue(1) + + # We can't send `unittest.TestCase` to the child, otherwise we get issues regarding pickle. + input_queue.put(inputs, timeout=timeout) + + process = ctx.Process(target=target_func, args=(input_queue, output_queue, timeout)) + process.start() + # Kill the child process if we can't get outputs from it in time: otherwise, the hanging subprocess prevents + # the test to exit properly. + try: + results = output_queue.get(timeout=timeout) + output_queue.task_done() + except Exception as e: + process.terminate() + test_case.fail(e) + process.join(timeout=timeout) + + if results["error"] is not None: + test_case.fail(f"{results['error']}") + + +class CaptureLogger: + """ + Args: + Context manager to capture `logging` streams + logger: 'logging` logger object + Returns: + The captured output is available via `self.out` + Example: + ```python + >>> from diffusers import logging + >>> from diffusers..testing_utils import CaptureLogger + + >>> msg = "Testing 1, 2, 3" + >>> logging.set_verbosity_info() + >>> logger = logging.get_logger("diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.py") + >>> with CaptureLogger(logger) as cl: + ... logger.info(msg) + >>> assert cl.out, msg + "\n" + ``` + """ + + def __init__(self, logger): + self.logger = logger + self.io = StringIO() + self.sh = logging.StreamHandler(self.io) + self.out = "" + + def __enter__(self): + self.logger.addHandler(self.sh) + return self + + def __exit__(self, *exc): + self.logger.removeHandler(self.sh) + self.out = self.io.getvalue() + + def __repr__(self): + return f"captured: {self.out}\n" + + +def enable_full_determinism(): + """ + Helper function for reproducible behavior during distributed training. See + - https://pytorch.org/docs/stable/notes/randomness.html for pytorch + """ + # Enable PyTorch deterministic mode. This potentially requires either the environment + # variable 'CUDA_LAUNCH_BLOCKING' or 'CUBLAS_WORKSPACE_CONFIG' to be set, + # depending on the CUDA version, so we set them both here + os.environ["CUDA_LAUNCH_BLOCKING"] = "1" + os.environ["CUBLAS_WORKSPACE_CONFIG"] = ":16:8" + torch.use_deterministic_algorithms(True) + + # Enable CUDNN deterministic mode + torch.backends.cudnn.deterministic = True + torch.backends.cudnn.benchmark = False + torch.backends.cuda.matmul.allow_tf32 = False + + +def disable_full_determinism(): + os.environ["CUDA_LAUNCH_BLOCKING"] = "0" + os.environ["CUBLAS_WORKSPACE_CONFIG"] = "" + torch.use_deterministic_algorithms(False) + + +# Utils for custom and alternative accelerator devices +def _is_torch_fp16_available(device): + if not is_torch_available(): + return False + + import torch + + device = torch.device(device) + + try: + x = torch.zeros((2, 2), dtype=torch.float16).to(device) + _ = torch.mul(x, x) + return True + + except Exception as e: + if device.type == "cuda": + raise ValueError( + f"You have passed a device of type 'cuda' which should work with 'fp16', but 'cuda' does not seem to be correctly installed on your machine: {e}" + ) + + return False + + +def _is_torch_fp64_available(device): + if not is_torch_available(): + return False + + import torch + + device = torch.device(device) + + try: + x = torch.zeros((2, 2), dtype=torch.float64).to(device) + _ = torch.mul(x, x) + return True + + except Exception as e: + if device.type == "cuda": + raise ValueError( + f"You have passed a device of type 'cuda' which should work with 'fp64', but 'cuda' does not seem to be correctly installed on your machine: {e}" + ) + + return False + + +# Guard these lookups for when Torch is not used - alternative accelerator support is for PyTorch +if is_torch_available(): + # Behaviour flags + BACKEND_SUPPORTS_TRAINING = {"cuda": True, "xpu": True, "cpu": True, "mps": False, "default": True} + + # Function definitions + BACKEND_EMPTY_CACHE = { + "cuda": torch.cuda.empty_cache, + "xpu": torch.xpu.empty_cache, + "cpu": None, + "mps": torch.mps.empty_cache, + "default": None, + } + BACKEND_DEVICE_COUNT = { + "cuda": torch.cuda.device_count, + "xpu": torch.xpu.device_count, + "cpu": lambda: 0, + "mps": lambda: 0, + "default": 0, + } + BACKEND_MANUAL_SEED = { + "cuda": torch.cuda.manual_seed, + "xpu": torch.xpu.manual_seed, + "cpu": torch.manual_seed, + "mps": torch.mps.manual_seed, + "default": torch.manual_seed, + } + BACKEND_RESET_PEAK_MEMORY_STATS = { + "cuda": torch.cuda.reset_peak_memory_stats, + "xpu": getattr(torch.xpu, "reset_peak_memory_stats", None), + "cpu": None, + "mps": None, + "default": None, + } + BACKEND_RESET_MAX_MEMORY_ALLOCATED = { + "cuda": torch.cuda.reset_max_memory_allocated, + "xpu": getattr(torch.xpu, "reset_peak_memory_stats", None), + "cpu": None, + "mps": None, + "default": None, + } + BACKEND_MAX_MEMORY_ALLOCATED = { + "cuda": torch.cuda.max_memory_allocated, + "xpu": getattr(torch.xpu, "max_memory_allocated", None), + "cpu": 0, + "mps": 0, + "default": 0, + } + BACKEND_SYNCHRONIZE = { + "cuda": torch.cuda.synchronize, + "xpu": getattr(torch.xpu, "synchronize", None), + "cpu": None, + "mps": None, + "default": None, + } + + +# This dispatches a defined function according to the accelerator from the function definitions. +def _device_agnostic_dispatch(device: str, dispatch_table: Dict[str, Callable], *args, **kwargs): + if device not in dispatch_table: + return dispatch_table["default"](*args, **kwargs) + + fn = dispatch_table[device] + + # Some device agnostic functions return values. Need to guard against 'None' instead at + # user level + if not callable(fn): + return fn + + return fn(*args, **kwargs) + + +# These are callables which automatically dispatch the function specific to the accelerator +def backend_manual_seed(device: str, seed: int): + return _device_agnostic_dispatch(device, BACKEND_MANUAL_SEED, seed) + + +def backend_synchronize(device: str): + return _device_agnostic_dispatch(device, BACKEND_SYNCHRONIZE) + + +def backend_empty_cache(device: str): + return _device_agnostic_dispatch(device, BACKEND_EMPTY_CACHE) + + +def backend_device_count(device: str): + return _device_agnostic_dispatch(device, BACKEND_DEVICE_COUNT) + + +def backend_reset_peak_memory_stats(device: str): + return _device_agnostic_dispatch(device, BACKEND_RESET_PEAK_MEMORY_STATS) + + +def backend_reset_max_memory_allocated(device: str): + return _device_agnostic_dispatch(device, BACKEND_RESET_MAX_MEMORY_ALLOCATED) + + +def backend_max_memory_allocated(device: str): + return _device_agnostic_dispatch(device, BACKEND_MAX_MEMORY_ALLOCATED) + + +# These are callables which return boolean behaviour flags and can be used to specify some +# device agnostic alternative where the feature is unsupported. +def backend_supports_training(device: str): + if not is_torch_available(): + return False + + if device not in BACKEND_SUPPORTS_TRAINING: + device = "default" + + return BACKEND_SUPPORTS_TRAINING[device] + + +# Guard for when Torch is not available +if is_torch_available(): + # Update device function dict mapping + def update_mapping_from_spec(device_fn_dict: Dict[str, Callable], attribute_name: str): + try: + # Try to import the function directly + spec_fn = getattr(device_spec_module, attribute_name) + device_fn_dict[torch_device] = spec_fn + except AttributeError as e: + # If the function doesn't exist, and there is no default, throw an error + if "default" not in device_fn_dict: + raise AttributeError( + f"`{attribute_name}` not found in '{device_spec_path}' and no default fallback function found." + ) from e + + if "DIFFUSERS_TEST_DEVICE_SPEC" in os.environ: + device_spec_path = os.environ["DIFFUSERS_TEST_DEVICE_SPEC"] + if not Path(device_spec_path).is_file(): + raise ValueError(f"Specified path to device specification file is not found. Received {device_spec_path}") + + try: + import_name = device_spec_path[: device_spec_path.index(".py")] + except ValueError as e: + raise ValueError(f"Provided device spec file is not a Python file! Received {device_spec_path}") from e + + device_spec_module = importlib.import_module(import_name) + + try: + device_name = device_spec_module.DEVICE_NAME + except AttributeError: + raise AttributeError("Device spec file did not contain `DEVICE_NAME`") + + if "DIFFUSERS_TEST_DEVICE" in os.environ and torch_device != device_name: + msg = f"Mismatch between environment variable `DIFFUSERS_TEST_DEVICE` '{torch_device}' and device found in spec '{device_name}'\n" + msg += "Either unset `DIFFUSERS_TEST_DEVICE` or ensure it matches device spec name." + raise ValueError(msg) + + torch_device = device_name + + # Add one entry here for each `BACKEND_*` dictionary. + update_mapping_from_spec(BACKEND_MANUAL_SEED, "MANUAL_SEED_FN") + update_mapping_from_spec(BACKEND_EMPTY_CACHE, "EMPTY_CACHE_FN") + update_mapping_from_spec(BACKEND_DEVICE_COUNT, "DEVICE_COUNT_FN") + update_mapping_from_spec(BACKEND_SUPPORTS_TRAINING, "SUPPORTS_TRAINING") + update_mapping_from_spec(BACKEND_RESET_PEAK_MEMORY_STATS, "RESET_PEAK_MEMORY_STATS_FN") + update_mapping_from_spec(BACKEND_RESET_MAX_MEMORY_ALLOCATED, "RESET_MAX_MEMORY_ALLOCATED_FN") + update_mapping_from_spec(BACKEND_MAX_MEMORY_ALLOCATED, "MAX_MEMORY_ALLOCATED_FN") + + +# Modified from https://github.com/huggingface/transformers/blob/cdfb018d0300fef3b07d9220f3efe9c2a9974662/src/transformers..testing_utils.py#L3090 + +# Type definition of key used in `Expectations` class. +DeviceProperties = Tuple[Union[str, None], Union[int, None]] + + +@functools.lru_cache +def get_device_properties() -> DeviceProperties: + """ + Get environment device properties. + """ + if IS_CUDA_SYSTEM or IS_ROCM_SYSTEM: + import torch + + major, _ = torch.cuda.get_device_capability() + if IS_ROCM_SYSTEM: + return ("rocm", major) + else: + return ("cuda", major) + elif IS_XPU_SYSTEM: + import torch + + # To get more info of the architecture meaning and bit allocation, refer to https://github.com/intel/llvm/blob/sycl/sycl/include/sycl/ext/oneapi/experimental/device_architecture.def + arch = torch.xpu.get_device_capability()["architecture"] + gen_mask = 0x000000FF00000000 + gen = (arch & gen_mask) >> 32 + return ("xpu", gen) + else: + return (torch_device, None) + + +if TYPE_CHECKING: + DevicePropertiesUserDict = UserDict[DeviceProperties, Any] +else: + DevicePropertiesUserDict = UserDict + +if is_torch_available(): + from diffusers.hooks._common import _GO_LC_SUPPORTED_PYTORCH_LAYERS + from diffusers.hooks.group_offloading import ( + _GROUP_ID_LAZY_LEAF, + _compute_group_hash, + _find_parent_module_in_module_dict, + _gather_buffers_with_no_group_offloading_parent, + _gather_parameters_with_no_group_offloading_parent, + ) + + def _get_expected_safetensors_files( + module: torch.nn.Module, + offload_to_disk_path: str, + offload_type: str, + num_blocks_per_group: Optional[int] = None, + ) -> Set[str]: + expected_files = set() + + def get_hashed_filename(group_id: str) -> str: + short_hash = _compute_group_hash(group_id) + return os.path.join(offload_to_disk_path, f"group_{short_hash}.safetensors") + + if offload_type == "block_level": + if num_blocks_per_group is None: + raise ValueError("num_blocks_per_group must be provided for 'block_level' offloading.") + + # Handle groups of ModuleList and Sequential blocks + unmatched_modules = [] + for name, submodule in module.named_children(): + if not isinstance(submodule, (torch.nn.ModuleList, torch.nn.Sequential)): + unmatched_modules.append(module) + continue + + for i in range(0, len(submodule), num_blocks_per_group): + current_modules = submodule[i : i + num_blocks_per_group] + if not current_modules: + continue + group_id = f"{name}_{i}_{i + len(current_modules) - 1}" + expected_files.add(get_hashed_filename(group_id)) + + # Handle the group for unmatched top-level modules and parameters + for module in unmatched_modules: + expected_files.add(get_hashed_filename(f"{module.__class__.__name__}_unmatched_group")) + + elif offload_type == "leaf_level": + # Handle leaf-level module groups + for name, submodule in module.named_modules(): + if isinstance(submodule, _GO_LC_SUPPORTED_PYTORCH_LAYERS): + # These groups will always have parameters, so a file is expected + expected_files.add(get_hashed_filename(name)) + + # Handle groups for non-leaf parameters/buffers + modules_with_group_offloading = { + name for name, sm in module.named_modules() if isinstance(sm, _GO_LC_SUPPORTED_PYTORCH_LAYERS) + } + parameters = _gather_parameters_with_no_group_offloading_parent(module, modules_with_group_offloading) + buffers = _gather_buffers_with_no_group_offloading_parent(module, modules_with_group_offloading) + + all_orphans = parameters + buffers + if all_orphans: + parent_to_tensors = {} + module_dict = dict(module.named_modules()) + for tensor_name, _ in all_orphans: + parent_name = _find_parent_module_in_module_dict(tensor_name, module_dict) + if parent_name not in parent_to_tensors: + parent_to_tensors[parent_name] = [] + parent_to_tensors[parent_name].append(tensor_name) + + for parent_name in parent_to_tensors: + # A file is expected for each parent that gathers orphaned tensors + expected_files.add(get_hashed_filename(parent_name)) + expected_files.add(get_hashed_filename(_GROUP_ID_LAZY_LEAF)) + + else: + raise ValueError(f"Unsupported offload_type: {offload_type}") + + return expected_files + + def _check_safetensors_serialization( + module: torch.nn.Module, + offload_to_disk_path: str, + offload_type: str, + num_blocks_per_group: Optional[int] = None, + ) -> bool: + if not os.path.isdir(offload_to_disk_path): + return False, None, None + + expected_files = _get_expected_safetensors_files( + module, offload_to_disk_path, offload_type, num_blocks_per_group + ) + actual_files = set(glob.glob(os.path.join(offload_to_disk_path, "*.safetensors"))) + missing_files = expected_files - actual_files + extra_files = actual_files - expected_files + + is_correct = not missing_files and not extra_files + return is_correct, extra_files, missing_files + + +class Expectations(DevicePropertiesUserDict): + def get_expectation(self) -> Any: + """ + Find best matching expectation based on environment device properties. + """ + return self.find_expectation(get_device_properties()) + + @staticmethod + def is_default(key: DeviceProperties) -> bool: + return all(p is None for p in key) + + @staticmethod + def score(key: DeviceProperties, other: DeviceProperties) -> int: + """ + Returns score indicating how similar two instances of the `Properties` tuple are. Points are calculated using + bits, but documented as int. Rules are as follows: + * Matching `type` gives 8 points. + * Semi-matching `type`, for example cuda and rocm, gives 4 points. + * Matching `major` (compute capability major version) gives 2 points. + * Default expectation (if present) gives 1 points. + """ + (device_type, major) = key + (other_device_type, other_major) = other + + score = 0b0 + if device_type == other_device_type: + score |= 0b1000 + elif device_type in ["cuda", "rocm"] and other_device_type in ["cuda", "rocm"]: + score |= 0b100 + + if major == other_major and other_major is not None: + score |= 0b10 + + if Expectations.is_default(other): + score |= 0b1 + + return int(score) + + def find_expectation(self, key: DeviceProperties = (None, None)) -> Any: + """ + Find best matching expectation based on provided device properties. + """ + (result_key, result) = max(self.data.items(), key=lambda x: Expectations.score(key, x[0])) + + if Expectations.score(key, result_key) == 0: + raise ValueError(f"No matching expectation found for {key}") + + return result + + def __repr__(self): + return f"{self.data}" diff --git a/exp_code/1_benchmark/diffusers-WanS2V/utils/check_config_docstrings.py b/exp_code/1_benchmark/diffusers-WanS2V/utils/check_config_docstrings.py new file mode 100644 index 0000000000000000000000000000000000000000..d39fe6a618d4d7e0476f32578a488aa3402abde6 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/utils/check_config_docstrings.py @@ -0,0 +1,84 @@ +# coding=utf-8 +# Copyright 2025 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import importlib +import inspect +import os +import re + + +# All paths are set with the intent you should run this script from the root of the repo with the command +# python utils/check_config_docstrings.py +PATH_TO_TRANSFORMERS = "src/transformers" + + +# This is to make sure the transformers module imported is the one in the repo. +spec = importlib.util.spec_from_file_location( + "transformers", + os.path.join(PATH_TO_TRANSFORMERS, "__init__.py"), + submodule_search_locations=[PATH_TO_TRANSFORMERS], +) +transformers = spec.loader.load_module() + +CONFIG_MAPPING = transformers.models.auto.configuration_auto.CONFIG_MAPPING + +# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`. +# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)` +_re_checkpoint = re.compile(r"\[(.+?)\]\((https://huggingface\.co/.+?)\)") + + +CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK = { + "CLIPConfigMixin", + "DecisionTransformerConfigMixin", + "EncoderDecoderConfigMixin", + "RagConfigMixin", + "SpeechEncoderDecoderConfigMixin", + "VisionEncoderDecoderConfigMixin", + "VisionTextDualEncoderConfigMixin", +} + + +def check_config_docstrings_have_checkpoints(): + configs_without_checkpoint = [] + + for config_class in list(CONFIG_MAPPING.values()): + checkpoint_found = False + + # source code of `config_class` + config_source = inspect.getsource(config_class) + checkpoints = _re_checkpoint.findall(config_source) + + for checkpoint in checkpoints: + # Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link. + # For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')` + ckpt_name, ckpt_link = checkpoint + + # verify the checkpoint name corresponds to the checkpoint link + ckpt_link_from_name = f"https://huggingface.co/{ckpt_name}" + if ckpt_link == ckpt_link_from_name: + checkpoint_found = True + break + + name = config_class.__name__ + if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK: + configs_without_checkpoint.append(name) + + if len(configs_without_checkpoint) > 0: + message = "\n".join(sorted(configs_without_checkpoint)) + raise ValueError(f"The following configurations don't contain any valid checkpoint:\n{message}") + + +if __name__ == "__main__": + check_config_docstrings_have_checkpoints() diff --git a/exp_code/1_benchmark/diffusers-WanS2V/utils/check_copies.py b/exp_code/1_benchmark/diffusers-WanS2V/utils/check_copies.py new file mode 100644 index 0000000000000000000000000000000000000000..001366c1905f5fda8a8e22f38b532013657e8ee3 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/utils/check_copies.py @@ -0,0 +1,222 @@ +# coding=utf-8 +# Copyright 2025 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import glob +import os +import re +import subprocess + + +# All paths are set with the intent you should run this script from the root of the repo with the command +# python utils/check_copies.py +DIFFUSERS_PATH = "src/diffusers" +REPO_PATH = "." + + +def _should_continue(line, indent): + return line.startswith(indent) or len(line) <= 1 or re.search(r"^\s*\)(\s*->.*:|:)\s*$", line) is not None + + +def find_code_in_diffusers(object_name): + """Find and return the code source code of `object_name`.""" + parts = object_name.split(".") + i = 0 + + # First let's find the module where our object lives. + module = parts[i] + while i < len(parts) and not os.path.isfile(os.path.join(DIFFUSERS_PATH, f"{module}.py")): + i += 1 + if i < len(parts): + module = os.path.join(module, parts[i]) + if i >= len(parts): + raise ValueError(f"`object_name` should begin with the name of a module of diffusers but got {object_name}.") + + with open( + os.path.join(DIFFUSERS_PATH, f"{module}.py"), + "r", + encoding="utf-8", + newline="\n", + ) as f: + lines = f.readlines() + + # Now let's find the class / func in the code! + indent = "" + line_index = 0 + for name in parts[i + 1 :]: + while ( + line_index < len(lines) and re.search(rf"^{indent}(class|def)\s+{name}(\(|\:)", lines[line_index]) is None + ): + line_index += 1 + indent += " " + line_index += 1 + + if line_index >= len(lines): + raise ValueError(f" {object_name} does not match any function or class in {module}.") + + # We found the beginning of the class / func, now let's find the end (when the indent diminishes). + start_index = line_index + while line_index < len(lines) and _should_continue(lines[line_index], indent): + line_index += 1 + # Clean up empty lines at the end (if any). + while len(lines[line_index - 1]) <= 1: + line_index -= 1 + + code_lines = lines[start_index:line_index] + return "".join(code_lines) + + +_re_copy_warning = re.compile(r"^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)") +_re_replace_pattern = re.compile(r"^\s*(\S+)->(\S+)(\s+.*|$)") +_re_fill_pattern = re.compile(r"]*>") + + +def get_indent(code): + lines = code.split("\n") + idx = 0 + while idx < len(lines) and len(lines[idx]) == 0: + idx += 1 + if idx < len(lines): + return re.search(r"^(\s*)\S", lines[idx]).groups()[0] + return "" + + +def run_ruff(code): + command = ["ruff", "format", "-", "--config", "pyproject.toml", "--silent"] + process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE) + stdout, _ = process.communicate(input=code.encode()) + return stdout.decode() + + +def stylify(code: str) -> str: + """ + Applies the ruff part of our `make style` command to some code. This formats the code using `ruff format`. + As `ruff` does not provide a python api this cannot be done on the fly. + + Args: + code (`str`): The code to format. + + Returns: + `str`: The formatted code. + """ + has_indent = len(get_indent(code)) > 0 + if has_indent: + code = f"class Bla:\n{code}" + formatted_code = run_ruff(code) + return formatted_code[len("class Bla:\n") :] if has_indent else formatted_code + + +def is_copy_consistent(filename, overwrite=False): + """ + Check if the code commented as a copy in `filename` matches the original. + Return the differences or overwrites the content depending on `overwrite`. + """ + with open(filename, "r", encoding="utf-8", newline="\n") as f: + lines = f.readlines() + diffs = [] + line_index = 0 + # Not a for loop cause `lines` is going to change (if `overwrite=True`). + while line_index < len(lines): + search = _re_copy_warning.search(lines[line_index]) + if search is None: + line_index += 1 + continue + + # There is some copied code here, let's retrieve the original. + indent, object_name, replace_pattern = search.groups() + theoretical_code = find_code_in_diffusers(object_name) + theoretical_indent = get_indent(theoretical_code) + + start_index = line_index + 1 if indent == theoretical_indent else line_index + 2 + indent = theoretical_indent + line_index = start_index + + # Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment. + should_continue = True + while line_index < len(lines) and should_continue: + line_index += 1 + if line_index >= len(lines): + break + line = lines[line_index] + should_continue = _should_continue(line, indent) and re.search(f"^{indent}# End copy", line) is None + # Clean up empty lines at the end (if any). + while len(lines[line_index - 1]) <= 1: + line_index -= 1 + + observed_code_lines = lines[start_index:line_index] + observed_code = "".join(observed_code_lines) + + # Remove any nested `Copied from` comments to avoid circular copies + theoretical_code = [line for line in theoretical_code.split("\n") if _re_copy_warning.search(line) is None] + theoretical_code = "\n".join(theoretical_code) + + # Before comparing, use the `replace_pattern` on the original code. + if len(replace_pattern) > 0: + patterns = replace_pattern.replace("with", "").split(",") + patterns = [_re_replace_pattern.search(p) for p in patterns] + for pattern in patterns: + if pattern is None: + continue + obj1, obj2, option = pattern.groups() + theoretical_code = re.sub(obj1, obj2, theoretical_code) + if option.strip() == "all-casing": + theoretical_code = re.sub(obj1.lower(), obj2.lower(), theoretical_code) + theoretical_code = re.sub(obj1.upper(), obj2.upper(), theoretical_code) + + # stylify after replacement. To be able to do that, we need the header (class or function definition) + # from the previous line + theoretical_code = stylify(lines[start_index - 1] + theoretical_code) + theoretical_code = theoretical_code[len(lines[start_index - 1]) :] + + # Test for a diff and act accordingly. + if observed_code != theoretical_code: + diffs.append([object_name, start_index]) + if overwrite: + lines = lines[:start_index] + [theoretical_code] + lines[line_index:] + line_index = start_index + 1 + + if overwrite and len(diffs) > 0: + # Warn the user a file has been modified. + print(f"Detected changes, rewriting {filename}.") + with open(filename, "w", encoding="utf-8", newline="\n") as f: + f.writelines(lines) + return diffs + + +def check_copies(overwrite: bool = False): + all_files = glob.glob(os.path.join(DIFFUSERS_PATH, "**/*.py"), recursive=True) + diffs = [] + for filename in all_files: + new_diffs = is_copy_consistent(filename, overwrite) + diffs += [f"- {filename}: copy does not match {d[0]} at line {d[1]}" for d in new_diffs] + if not overwrite and len(diffs) > 0: + diff = "\n".join(diffs) + raise Exception( + "Found the following copy inconsistencies:\n" + + diff + + "\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them." + ) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument( + "--fix_and_overwrite", + action="store_true", + help="Whether to fix inconsistencies.", + ) + args = parser.parse_args() + + check_copies(args.fix_and_overwrite) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/utils/check_doc_toc.py b/exp_code/1_benchmark/diffusers-WanS2V/utils/check_doc_toc.py new file mode 100644 index 0000000000000000000000000000000000000000..0dd02cde86c1286c7fb2cc00d469d31714ce3a09 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/utils/check_doc_toc.py @@ -0,0 +1,210 @@ +# coding=utf-8 +# Copyright 2025 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +from collections import defaultdict + +import yaml + + +PATH_TO_TOC = "docs/source/en/_toctree.yml" + + +def clean_doc_toc(doc_list): + """ + Cleans the table of content of the model documentation by removing duplicates and sorting models alphabetically. + """ + counts = defaultdict(int) + overview_doc = [] + new_doc_list = [] + for doc in doc_list: + if "local" in doc: + counts[doc["local"]] += 1 + + if doc["title"].lower() == "overview": + overview_doc.append({"local": doc["local"], "title": doc["title"]}) + else: + new_doc_list.append(doc) + + doc_list = new_doc_list + duplicates = [key for key, value in counts.items() if value > 1] + + new_doc = [] + for duplicate_key in duplicates: + titles = list({doc["title"] for doc in doc_list if doc["local"] == duplicate_key}) + if len(titles) > 1: + raise ValueError( + f"{duplicate_key} is present several times in the documentation table of content at " + "`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the " + "others." + ) + # Only add this once + new_doc.append({"local": duplicate_key, "title": titles[0]}) + + # Add none duplicate-keys + new_doc.extend([doc for doc in doc_list if "local" not in counts or counts[doc["local"]] == 1]) + new_doc = sorted(new_doc, key=lambda s: s["title"].lower()) + + # "overview" gets special treatment and is always first + if len(overview_doc) > 1: + raise ValueError("{doc_list} has two 'overview' docs which is not allowed.") + + overview_doc.extend(new_doc) + + # Sort + return overview_doc + + +def check_scheduler_doc(overwrite=False): + with open(PATH_TO_TOC, encoding="utf-8") as f: + content = yaml.safe_load(f.read()) + + # Get to the API doc + api_idx = 0 + while content[api_idx]["title"] != "API": + api_idx += 1 + api_doc = content[api_idx]["sections"] + + # Then to the model doc + scheduler_idx = 0 + while api_doc[scheduler_idx]["title"] != "Schedulers": + scheduler_idx += 1 + + scheduler_doc = api_doc[scheduler_idx]["sections"] + new_scheduler_doc = clean_doc_toc(scheduler_doc) + + diff = False + if new_scheduler_doc != scheduler_doc: + diff = True + if overwrite: + api_doc[scheduler_idx]["sections"] = new_scheduler_doc + + if diff: + if overwrite: + content[api_idx]["sections"] = api_doc + with open(PATH_TO_TOC, "w", encoding="utf-8") as f: + f.write(yaml.dump(content, allow_unicode=True)) + else: + raise ValueError( + "The model doc part of the table of content is not properly sorted, run `make style` to fix this." + ) + + +def check_pipeline_doc(overwrite=False): + with open(PATH_TO_TOC, encoding="utf-8") as f: + content = yaml.safe_load(f.read()) + + # Get to the API doc + api_idx = 0 + while content[api_idx]["title"] != "API": + api_idx += 1 + api_doc = content[api_idx]["sections"] + + # Then to the model doc + pipeline_idx = 0 + while api_doc[pipeline_idx]["title"] != "Pipelines": + pipeline_idx += 1 + + diff = False + pipeline_docs = api_doc[pipeline_idx]["sections"] + new_pipeline_docs = [] + + # sort sub pipeline docs + for pipeline_doc in pipeline_docs: + if "sections" in pipeline_doc: + sub_pipeline_doc = pipeline_doc["sections"] + new_sub_pipeline_doc = clean_doc_toc(sub_pipeline_doc) + if new_sub_pipeline_doc != sub_pipeline_doc: + diff = True + if overwrite: + pipeline_doc["sections"] = new_sub_pipeline_doc + new_pipeline_docs.append(pipeline_doc) + + # sort overall pipeline doc + new_pipeline_docs = clean_doc_toc(new_pipeline_docs) + + if new_pipeline_docs != pipeline_docs: + diff = True + if overwrite: + api_doc[pipeline_idx]["sections"] = new_pipeline_docs + + if diff: + if overwrite: + content[api_idx]["sections"] = api_doc + with open(PATH_TO_TOC, "w", encoding="utf-8") as f: + f.write(yaml.dump(content, allow_unicode=True)) + else: + raise ValueError( + "The model doc part of the table of content is not properly sorted, run `make style` to fix this." + ) + + +def check_model_doc(overwrite=False): + with open(PATH_TO_TOC, encoding="utf-8") as f: + content = yaml.safe_load(f.read()) + + # Get to the API doc + api_idx = 0 + while content[api_idx]["title"] != "API": + api_idx += 1 + api_doc = content[api_idx]["sections"] + + # Then to the model doc + model_idx = 0 + while api_doc[model_idx]["title"] != "Models": + model_idx += 1 + + diff = False + model_docs = api_doc[model_idx]["sections"] + new_model_docs = [] + + # sort sub model docs + for model_doc in model_docs: + if "sections" in model_doc: + sub_model_doc = model_doc["sections"] + new_sub_model_doc = clean_doc_toc(sub_model_doc) + if new_sub_model_doc != sub_model_doc: + diff = True + if overwrite: + model_doc["sections"] = new_sub_model_doc + new_model_docs.append(model_doc) + + # sort overall model doc + new_model_docs = clean_doc_toc(new_model_docs) + + if new_model_docs != model_docs: + diff = True + if overwrite: + api_doc[model_idx]["sections"] = new_model_docs + + if diff: + if overwrite: + content[api_idx]["sections"] = api_doc + with open(PATH_TO_TOC, "w", encoding="utf-8") as f: + f.write(yaml.dump(content, allow_unicode=True)) + else: + raise ValueError( + "The model doc part of the table of content is not properly sorted, run `make style` to fix this." + ) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.") + args = parser.parse_args() + + check_scheduler_doc(args.fix_and_overwrite) + check_pipeline_doc(args.fix_and_overwrite) + check_model_doc(args.fix_and_overwrite) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/utils/check_dummies.py b/exp_code/1_benchmark/diffusers-WanS2V/utils/check_dummies.py new file mode 100644 index 0000000000000000000000000000000000000000..04a670c2f5d91c76b32b2c6d200b342675df6bb0 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/utils/check_dummies.py @@ -0,0 +1,175 @@ +# coding=utf-8 +# Copyright 2025 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import os +import re + + +# All paths are set with the intent you should run this script from the root of the repo with the command +# python utils/check_dummies.py +PATH_TO_DIFFUSERS = "src/diffusers" + +# Matches is_xxx_available() +_re_backend = re.compile(r"is\_([a-z_]*)_available\(\)") +# Matches from xxx import bla +_re_single_line_import = re.compile(r"\s+from\s+\S*\s+import\s+([^\(\s].*)\n") + + +DUMMY_CONSTANT = """ +{0} = None +""" + +DUMMY_CLASS = """ +class {0}(metaclass=DummyObject): + _backends = {1} + + def __init__(self, *args, **kwargs): + requires_backends(self, {1}) + + @classmethod + def from_config(cls, *args, **kwargs): + requires_backends(cls, {1}) + + @classmethod + def from_pretrained(cls, *args, **kwargs): + requires_backends(cls, {1}) +""" + + +DUMMY_FUNCTION = """ +def {0}(*args, **kwargs): + requires_backends({0}, {1}) +""" + + +def find_backend(line): + """Find one (or multiple) backend in a code line of the init.""" + backends = _re_backend.findall(line) + if len(backends) == 0: + return None + + return "_and_".join(backends) + + +def read_init(): + """Read the init and extracts PyTorch, TensorFlow, SentencePiece and Tokenizers objects.""" + with open(os.path.join(PATH_TO_DIFFUSERS, "__init__.py"), "r", encoding="utf-8", newline="\n") as f: + lines = f.readlines() + + # Get to the point we do the actual imports for type checking + line_index = 0 + while not lines[line_index].startswith("if TYPE_CHECKING"): + line_index += 1 + + backend_specific_objects = {} + # Go through the end of the file + while line_index < len(lines): + # If the line contains is_backend_available, we grab all objects associated with the `else` block + backend = find_backend(lines[line_index]) + if backend is not None: + while not lines[line_index].startswith(" else:"): + line_index += 1 + line_index += 1 + objects = [] + # Until we unindent, add backend objects to the list + while len(lines[line_index]) <= 1 or lines[line_index].startswith(" " * 8): + line = lines[line_index] + single_line_import_search = _re_single_line_import.search(line) + if single_line_import_search is not None: + objects.extend(single_line_import_search.groups()[0].split(", ")) + elif line.startswith(" " * 12): + objects.append(line[12:-2]) + line_index += 1 + + if len(objects) > 0: + backend_specific_objects[backend] = objects + else: + line_index += 1 + + return backend_specific_objects + + +def create_dummy_object(name, backend_name): + """Create the code for the dummy object corresponding to `name`.""" + if name.isupper(): + return DUMMY_CONSTANT.format(name) + elif name.islower(): + return DUMMY_FUNCTION.format(name, backend_name) + else: + return DUMMY_CLASS.format(name, backend_name) + + +def create_dummy_files(backend_specific_objects=None): + """Create the content of the dummy files.""" + if backend_specific_objects is None: + backend_specific_objects = read_init() + # For special correspondence backend to module name as used in the function requires_modulename + dummy_files = {} + + for backend, objects in backend_specific_objects.items(): + backend_name = "[" + ", ".join(f'"{b}"' for b in backend.split("_and_")) + "]" + dummy_file = "# This file is autogenerated by the command `make fix-copies`, do not edit.\n" + dummy_file += "from ..utils import DummyObject, requires_backends\n\n" + dummy_file += "\n".join([create_dummy_object(o, backend_name) for o in objects]) + dummy_files[backend] = dummy_file + + return dummy_files + + +def check_dummies(overwrite=False): + """Check if the dummy files are up to date and maybe `overwrite` with the right content.""" + dummy_files = create_dummy_files() + # For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py + short_names = {"torch": "pt"} + + # Locate actual dummy modules and read their content. + path = os.path.join(PATH_TO_DIFFUSERS, "utils") + dummy_file_paths = { + backend: os.path.join(path, f"dummy_{short_names.get(backend, backend)}_objects.py") + for backend in dummy_files.keys() + } + + actual_dummies = {} + for backend, file_path in dummy_file_paths.items(): + if os.path.isfile(file_path): + with open(file_path, "r", encoding="utf-8", newline="\n") as f: + actual_dummies[backend] = f.read() + else: + actual_dummies[backend] = "" + + for backend in dummy_files.keys(): + if dummy_files[backend] != actual_dummies[backend]: + if overwrite: + print( + f"Updating diffusers.utils.dummy_{short_names.get(backend, backend)}_objects.py as the main " + "__init__ has new objects." + ) + with open(dummy_file_paths[backend], "w", encoding="utf-8", newline="\n") as f: + f.write(dummy_files[backend]) + else: + raise ValueError( + "The main __init__ has objects that are not present in " + f"diffusers.utils.dummy_{short_names.get(backend, backend)}_objects.py. Run `make fix-copies` " + "to fix this." + ) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.") + args = parser.parse_args() + + check_dummies(args.fix_and_overwrite) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/utils/check_inits.py b/exp_code/1_benchmark/diffusers-WanS2V/utils/check_inits.py new file mode 100644 index 0000000000000000000000000000000000000000..8208fa634186536767054e8fb5f0d6f81bee7ba0 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/utils/check_inits.py @@ -0,0 +1,299 @@ +# coding=utf-8 +# Copyright 2025 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import collections +import importlib.util +import os +import re +from pathlib import Path + + +PATH_TO_TRANSFORMERS = "src/transformers" + + +# Matches is_xxx_available() +_re_backend = re.compile(r"is\_([a-z_]*)_available()") +# Catches a one-line _import_struct = {xxx} +_re_one_line_import_struct = re.compile(r"^_import_structure\s+=\s+\{([^\}]+)\}") +# Catches a line with a key-values pattern: "bla": ["foo", "bar"] +_re_import_struct_key_value = re.compile(r'\s+"\S*":\s+\[([^\]]*)\]') +# Catches a line if not is_foo_available +_re_test_backend = re.compile(r"^\s*if\s+not\s+is\_[a-z_]*\_available\(\)") +# Catches a line _import_struct["bla"].append("foo") +_re_import_struct_add_one = re.compile(r'^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)') +# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] +_re_import_struct_add_many = re.compile(r"^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]") +# Catches a line with an object between quotes and a comma: "MyModel", +_re_quote_object = re.compile(r'^\s+"([^"]+)",') +# Catches a line with objects between brackets only: ["foo", "bar"], +_re_between_brackets = re.compile(r"^\s+\[([^\]]+)\]") +# Catches a line with from foo import bar, bla, boo +_re_import = re.compile(r"\s+from\s+\S*\s+import\s+([^\(\s].*)\n") +# Catches a line with try: +_re_try = re.compile(r"^\s*try:") +# Catches a line with else: +_re_else = re.compile(r"^\s*else:") + + +def find_backend(line): + """Find one (or multiple) backend in a code line of the init.""" + if _re_test_backend.search(line) is None: + return None + backends = [b[0] for b in _re_backend.findall(line)] + backends.sort() + return "_and_".join(backends) + + +def parse_init(init_file): + """ + Read an init_file and parse (per backend) the _import_structure objects defined and the TYPE_CHECKING objects + defined + """ + with open(init_file, "r", encoding="utf-8", newline="\n") as f: + lines = f.readlines() + + line_index = 0 + while line_index < len(lines) and not lines[line_index].startswith("_import_structure = {"): + line_index += 1 + + # If this is a traditional init, just return. + if line_index >= len(lines): + return None + + # First grab the objects without a specific backend in _import_structure + objects = [] + while not lines[line_index].startswith("if TYPE_CHECKING") and find_backend(lines[line_index]) is None: + line = lines[line_index] + # If we have everything on a single line, let's deal with it. + if _re_one_line_import_struct.search(line): + content = _re_one_line_import_struct.search(line).groups()[0] + imports = re.findall(r"\[([^\]]+)\]", content) + for imp in imports: + objects.extend([obj[1:-1] for obj in imp.split(", ")]) + line_index += 1 + continue + single_line_import_search = _re_import_struct_key_value.search(line) + if single_line_import_search is not None: + imports = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(", ") if len(obj) > 0] + objects.extend(imports) + elif line.startswith(" " * 8 + '"'): + objects.append(line[9:-3]) + line_index += 1 + + import_dict_objects = {"none": objects} + # Let's continue with backend-specific objects in _import_structure + while not lines[line_index].startswith("if TYPE_CHECKING"): + # If the line is an if not is_backend_available, we grab all objects associated. + backend = find_backend(lines[line_index]) + # Check if the backend declaration is inside a try block: + if _re_try.search(lines[line_index - 1]) is None: + backend = None + + if backend is not None: + line_index += 1 + + # Scroll until we hit the else block of try-except-else + while _re_else.search(lines[line_index]) is None: + line_index += 1 + + line_index += 1 + + objects = [] + # Until we unindent, add backend objects to the list + while len(lines[line_index]) <= 1 or lines[line_index].startswith(" " * 4): + line = lines[line_index] + if _re_import_struct_add_one.search(line) is not None: + objects.append(_re_import_struct_add_one.search(line).groups()[0]) + elif _re_import_struct_add_many.search(line) is not None: + imports = _re_import_struct_add_many.search(line).groups()[0].split(", ") + imports = [obj[1:-1] for obj in imports if len(obj) > 0] + objects.extend(imports) + elif _re_between_brackets.search(line) is not None: + imports = _re_between_brackets.search(line).groups()[0].split(", ") + imports = [obj[1:-1] for obj in imports if len(obj) > 0] + objects.extend(imports) + elif _re_quote_object.search(line) is not None: + objects.append(_re_quote_object.search(line).groups()[0]) + elif line.startswith(" " * 8 + '"'): + objects.append(line[9:-3]) + elif line.startswith(" " * 12 + '"'): + objects.append(line[13:-3]) + line_index += 1 + + import_dict_objects[backend] = objects + else: + line_index += 1 + + # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend + objects = [] + while ( + line_index < len(lines) + and find_backend(lines[line_index]) is None + and not lines[line_index].startswith("else") + ): + line = lines[line_index] + single_line_import_search = _re_import.search(line) + if single_line_import_search is not None: + objects.extend(single_line_import_search.groups()[0].split(", ")) + elif line.startswith(" " * 8): + objects.append(line[8:-2]) + line_index += 1 + + type_hint_objects = {"none": objects} + # Let's continue with backend-specific objects + while line_index < len(lines): + # If the line is an if is_backend_available, we grab all objects associated. + backend = find_backend(lines[line_index]) + # Check if the backend declaration is inside a try block: + if _re_try.search(lines[line_index - 1]) is None: + backend = None + + if backend is not None: + line_index += 1 + + # Scroll until we hit the else block of try-except-else + while _re_else.search(lines[line_index]) is None: + line_index += 1 + + line_index += 1 + + objects = [] + # Until we unindent, add backend objects to the list + while len(lines[line_index]) <= 1 or lines[line_index].startswith(" " * 8): + line = lines[line_index] + single_line_import_search = _re_import.search(line) + if single_line_import_search is not None: + objects.extend(single_line_import_search.groups()[0].split(", ")) + elif line.startswith(" " * 12): + objects.append(line[12:-2]) + line_index += 1 + + type_hint_objects[backend] = objects + else: + line_index += 1 + + return import_dict_objects, type_hint_objects + + +def analyze_results(import_dict_objects, type_hint_objects): + """ + Analyze the differences between _import_structure objects and TYPE_CHECKING objects found in an init. + """ + + def find_duplicates(seq): + return [k for k, v in collections.Counter(seq).items() if v > 1] + + if list(import_dict_objects.keys()) != list(type_hint_objects.keys()): + return ["Both sides of the init do not have the same backends!"] + + errors = [] + for key in import_dict_objects.keys(): + duplicate_imports = find_duplicates(import_dict_objects[key]) + if duplicate_imports: + errors.append(f"Duplicate _import_structure definitions for: {duplicate_imports}") + duplicate_type_hints = find_duplicates(type_hint_objects[key]) + if duplicate_type_hints: + errors.append(f"Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}") + + if sorted(set(import_dict_objects[key])) != sorted(set(type_hint_objects[key])): + name = "base imports" if key == "none" else f"{key} backend" + errors.append(f"Differences for {name}:") + for a in type_hint_objects[key]: + if a not in import_dict_objects[key]: + errors.append(f" {a} in TYPE_HINT but not in _import_structure.") + for a in import_dict_objects[key]: + if a not in type_hint_objects[key]: + errors.append(f" {a} in _import_structure but not in TYPE_HINT.") + return errors + + +def check_all_inits(): + """ + Check all inits in the transformers repo and raise an error if at least one does not define the same objects in + both halves. + """ + failures = [] + for root, _, files in os.walk(PATH_TO_TRANSFORMERS): + if "__init__.py" in files: + fname = os.path.join(root, "__init__.py") + objects = parse_init(fname) + if objects is not None: + errors = analyze_results(*objects) + if len(errors) > 0: + errors[0] = f"Problem in {fname}, both halves do not define the same objects.\n{errors[0]}" + failures.append("\n".join(errors)) + if len(failures) > 0: + raise ValueError("\n\n".join(failures)) + + +def get_transformers_submodules(): + """ + Returns the list of Transformers submodules. + """ + submodules = [] + for path, directories, files in os.walk(PATH_TO_TRANSFORMERS): + for folder in directories: + # Ignore private modules + if folder.startswith("_"): + directories.remove(folder) + continue + # Ignore leftovers from branches (empty folders apart from pycache) + if len(list((Path(path) / folder).glob("*.py"))) == 0: + continue + short_path = str((Path(path) / folder).relative_to(PATH_TO_TRANSFORMERS)) + submodule = short_path.replace(os.path.sep, ".") + submodules.append(submodule) + for fname in files: + if fname == "__init__.py": + continue + short_path = str((Path(path) / fname).relative_to(PATH_TO_TRANSFORMERS)) + submodule = short_path.replace(".py", "").replace(os.path.sep, ".") + if len(submodule.split(".")) == 1: + submodules.append(submodule) + return submodules + + +IGNORE_SUBMODULES = [ + "convert_pytorch_checkpoint_to_tf2", + "modeling_flax_pytorch_utils", +] + + +def check_submodules(): + # This is to make sure the transformers module imported is the one in the repo. + spec = importlib.util.spec_from_file_location( + "transformers", + os.path.join(PATH_TO_TRANSFORMERS, "__init__.py"), + submodule_search_locations=[PATH_TO_TRANSFORMERS], + ) + transformers = spec.loader.load_module() + + module_not_registered = [ + module + for module in get_transformers_submodules() + if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys() + ] + if len(module_not_registered) > 0: + list_of_modules = "\n".join(f"- {module}" for module in module_not_registered) + raise ValueError( + "The following submodules are not properly registered in the main init of Transformers:\n" + f"{list_of_modules}\n" + "Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value." + ) + + +if __name__ == "__main__": + check_all_inits() + check_submodules() diff --git a/exp_code/1_benchmark/diffusers-WanS2V/utils/check_repo.py b/exp_code/1_benchmark/diffusers-WanS2V/utils/check_repo.py new file mode 100644 index 0000000000000000000000000000000000000000..14bdbe60adf06e4b6a3f79378f68393cc083b6a3 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/utils/check_repo.py @@ -0,0 +1,755 @@ +# coding=utf-8 +# Copyright 2025 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import importlib +import inspect +import os +import re +import warnings +from collections import OrderedDict +from difflib import get_close_matches +from pathlib import Path + +from diffusers.models.auto import get_values +from diffusers.utils import ENV_VARS_TRUE_VALUES, is_flax_available, is_torch_available + + +# All paths are set with the intent you should run this script from the root of the repo with the command +# python utils/check_repo.py +PATH_TO_DIFFUSERS = "src/diffusers" +PATH_TO_TESTS = "tests" +PATH_TO_DOC = "docs/source/en" + +# Update this list with models that are supposed to be private. +PRIVATE_MODELS = [ + "DPRSpanPredictor", + "RealmBertModel", + "T5Stack", + "TFDPRSpanPredictor", +] + +# Update this list for models that are not tested with a comment explaining the reason it should not be. +# Being in this list is an exception and should **not** be the rule. +IGNORE_NON_TESTED = PRIVATE_MODELS.copy() + [ + # models to ignore for not tested + "OPTDecoder", # Building part of bigger (tested) model. + "DecisionTransformerGPT2Model", # Building part of bigger (tested) model. + "SegformerDecodeHead", # Building part of bigger (tested) model. + "PLBartEncoder", # Building part of bigger (tested) model. + "PLBartDecoder", # Building part of bigger (tested) model. + "PLBartDecoderWrapper", # Building part of bigger (tested) model. + "BigBirdPegasusEncoder", # Building part of bigger (tested) model. + "BigBirdPegasusDecoder", # Building part of bigger (tested) model. + "BigBirdPegasusDecoderWrapper", # Building part of bigger (tested) model. + "DetrEncoder", # Building part of bigger (tested) model. + "DetrDecoder", # Building part of bigger (tested) model. + "DetrDecoderWrapper", # Building part of bigger (tested) model. + "M2M100Encoder", # Building part of bigger (tested) model. + "M2M100Decoder", # Building part of bigger (tested) model. + "Speech2TextEncoder", # Building part of bigger (tested) model. + "Speech2TextDecoder", # Building part of bigger (tested) model. + "LEDEncoder", # Building part of bigger (tested) model. + "LEDDecoder", # Building part of bigger (tested) model. + "BartDecoderWrapper", # Building part of bigger (tested) model. + "BartEncoder", # Building part of bigger (tested) model. + "BertLMHeadModel", # Needs to be setup as decoder. + "BlenderbotSmallEncoder", # Building part of bigger (tested) model. + "BlenderbotSmallDecoderWrapper", # Building part of bigger (tested) model. + "BlenderbotEncoder", # Building part of bigger (tested) model. + "BlenderbotDecoderWrapper", # Building part of bigger (tested) model. + "MBartEncoder", # Building part of bigger (tested) model. + "MBartDecoderWrapper", # Building part of bigger (tested) model. + "MegatronBertLMHeadModel", # Building part of bigger (tested) model. + "MegatronBertEncoder", # Building part of bigger (tested) model. + "MegatronBertDecoder", # Building part of bigger (tested) model. + "MegatronBertDecoderWrapper", # Building part of bigger (tested) model. + "PegasusEncoder", # Building part of bigger (tested) model. + "PegasusDecoderWrapper", # Building part of bigger (tested) model. + "DPREncoder", # Building part of bigger (tested) model. + "ProphetNetDecoderWrapper", # Building part of bigger (tested) model. + "RealmBertModel", # Building part of bigger (tested) model. + "RealmReader", # Not regular model. + "RealmScorer", # Not regular model. + "RealmForOpenQA", # Not regular model. + "ReformerForMaskedLM", # Needs to be setup as decoder. + "Speech2Text2DecoderWrapper", # Building part of bigger (tested) model. + "TFDPREncoder", # Building part of bigger (tested) model. + "TFElectraMainLayer", # Building part of bigger (tested) model (should it be a TFModelMixin ?) + "TFRobertaForMultipleChoice", # TODO: fix + "TrOCRDecoderWrapper", # Building part of bigger (tested) model. + "SeparableConv1D", # Building part of bigger (tested) model. + "FlaxBartForCausalLM", # Building part of bigger (tested) model. + "FlaxBertForCausalLM", # Building part of bigger (tested) model. Tested implicitly through FlaxRobertaForCausalLM. + "OPTDecoderWrapper", +] + +# Update this list with test files that don't have a tester with a `all_model_classes` variable and which don't +# trigger the common tests. +TEST_FILES_WITH_NO_COMMON_TESTS = [ + "models/decision_transformer/test_modeling_decision_transformer.py", + "models/camembert/test_modeling_camembert.py", + "models/mt5/test_modeling_flax_mt5.py", + "models/mbart/test_modeling_mbart.py", + "models/mt5/test_modeling_mt5.py", + "models/pegasus/test_modeling_pegasus.py", + "models/camembert/test_modeling_tf_camembert.py", + "models/mt5/test_modeling_tf_mt5.py", + "models/xlm_roberta/test_modeling_tf_xlm_roberta.py", + "models/xlm_roberta/test_modeling_flax_xlm_roberta.py", + "models/xlm_prophetnet/test_modeling_xlm_prophetnet.py", + "models/xlm_roberta/test_modeling_xlm_roberta.py", + "models/vision_text_dual_encoder/test_modeling_vision_text_dual_encoder.py", + "models/vision_text_dual_encoder/test_modeling_flax_vision_text_dual_encoder.py", + "models/decision_transformer/test_modeling_decision_transformer.py", +] + +# Update this list for models that are not in any of the auto MODEL_XXX_MAPPING. Being in this list is an exception and +# should **not** be the rule. +IGNORE_NON_AUTO_CONFIGURED = PRIVATE_MODELS.copy() + [ + # models to ignore for model xxx mapping + "DPTForDepthEstimation", + "DecisionTransformerGPT2Model", + "GLPNForDepthEstimation", + "ViltForQuestionAnswering", + "ViltForImagesAndTextClassification", + "ViltForImageAndTextRetrieval", + "ViltForMaskedLM", + "XGLMEncoder", + "XGLMDecoder", + "XGLMDecoderWrapper", + "PerceiverForMultimodalAutoencoding", + "PerceiverForOpticalFlow", + "SegformerDecodeHead", + "FlaxBeitForMaskedImageModeling", + "PLBartEncoder", + "PLBartDecoder", + "PLBartDecoderWrapper", + "BeitForMaskedImageModeling", + "CLIPTextModel", + "CLIPVisionModel", + "TFCLIPTextModel", + "TFCLIPVisionModel", + "FlaxCLIPTextModel", + "FlaxCLIPVisionModel", + "FlaxWav2Vec2ForCTC", + "DetrForSegmentation", + "DPRReader", + "FlaubertForQuestionAnswering", + "FlavaImageCodebook", + "FlavaTextModel", + "FlavaImageModel", + "FlavaMultimodalModel", + "GPT2DoubleHeadsModel", + "LukeForMaskedLM", + "LukeForEntityClassification", + "LukeForEntityPairClassification", + "LukeForEntitySpanClassification", + "OpenAIGPTDoubleHeadsModel", + "RagModel", + "RagSequenceForGeneration", + "RagTokenForGeneration", + "RealmEmbedder", + "RealmForOpenQA", + "RealmScorer", + "RealmReader", + "TFDPRReader", + "TFGPT2DoubleHeadsModel", + "TFOpenAIGPTDoubleHeadsModel", + "TFRagModel", + "TFRagSequenceForGeneration", + "TFRagTokenForGeneration", + "Wav2Vec2ForCTC", + "HubertForCTC", + "SEWForCTC", + "SEWDForCTC", + "XLMForQuestionAnswering", + "XLNetForQuestionAnswering", + "SeparableConv1D", + "VisualBertForRegionToPhraseAlignment", + "VisualBertForVisualReasoning", + "VisualBertForQuestionAnswering", + "VisualBertForMultipleChoice", + "TFWav2Vec2ForCTC", + "TFHubertForCTC", + "MaskFormerForInstanceSegmentation", +] + +# Update this list for models that have multiple model types for the same +# model doc +MODEL_TYPE_TO_DOC_MAPPING = OrderedDict( + [ + ("data2vec-text", "data2vec"), + ("data2vec-audio", "data2vec"), + ("data2vec-vision", "data2vec"), + ] +) + + +# This is to make sure the transformers module imported is the one in the repo. +spec = importlib.util.spec_from_file_location( + "diffusers", + os.path.join(PATH_TO_DIFFUSERS, "__init__.py"), + submodule_search_locations=[PATH_TO_DIFFUSERS], +) +diffusers = spec.loader.load_module() + + +def check_model_list(): + """Check the model list inside the transformers library.""" + # Get the models from the directory structure of `src/diffusers/models/` + models_dir = os.path.join(PATH_TO_DIFFUSERS, "models") + _models = [] + for model in os.listdir(models_dir): + model_dir = os.path.join(models_dir, model) + if os.path.isdir(model_dir) and "__init__.py" in os.listdir(model_dir): + _models.append(model) + + # Get the models from the directory structure of `src/transformers/models/` + models = [model for model in dir(diffusers.models) if not model.startswith("__")] + + missing_models = sorted(set(_models).difference(models)) + if missing_models: + raise Exception( + f"The following models should be included in {models_dir}/__init__.py: {','.join(missing_models)}." + ) + + +# If some modeling modules should be ignored for all checks, they should be added in the nested list +# _ignore_modules of this function. +def get_model_modules(): + """Get the model modules inside the transformers library.""" + _ignore_modules = [ + "modeling_auto", + "modeling_encoder_decoder", + "modeling_marian", + "modeling_mmbt", + "modeling_outputs", + "modeling_retribert", + "modeling_utils", + "modeling_flax_auto", + "modeling_flax_encoder_decoder", + "modeling_flax_utils", + "modeling_speech_encoder_decoder", + "modeling_flax_speech_encoder_decoder", + "modeling_flax_vision_encoder_decoder", + "modeling_transfo_xl_utilities", + "modeling_tf_auto", + "modeling_tf_encoder_decoder", + "modeling_tf_outputs", + "modeling_tf_pytorch_utils", + "modeling_tf_utils", + "modeling_tf_transfo_xl_utilities", + "modeling_tf_vision_encoder_decoder", + "modeling_vision_encoder_decoder", + ] + modules = [] + for model in dir(diffusers.models): + # There are some magic dunder attributes in the dir, we ignore them + if not model.startswith("__"): + model_module = getattr(diffusers.models, model) + for submodule in dir(model_module): + if submodule.startswith("modeling") and submodule not in _ignore_modules: + modeling_module = getattr(model_module, submodule) + if inspect.ismodule(modeling_module): + modules.append(modeling_module) + return modules + + +def get_models(module, include_pretrained=False): + """Get the objects in module that are models.""" + models = [] + model_classes = (diffusers.ModelMixin, diffusers.TFModelMixin, diffusers.FlaxModelMixin) + for attr_name in dir(module): + if not include_pretrained and ("Pretrained" in attr_name or "PreTrained" in attr_name): + continue + attr = getattr(module, attr_name) + if isinstance(attr, type) and issubclass(attr, model_classes) and attr.__module__ == module.__name__: + models.append((attr_name, attr)) + return models + + +def is_a_private_model(model): + """Returns True if the model should not be in the main init.""" + if model in PRIVATE_MODELS: + return True + + # Wrapper, Encoder and Decoder are all privates + if model.endswith("Wrapper"): + return True + if model.endswith("Encoder"): + return True + if model.endswith("Decoder"): + return True + return False + + +def check_models_are_in_init(): + """Checks all models defined in the library are in the main init.""" + models_not_in_init = [] + dir_transformers = dir(diffusers) + for module in get_model_modules(): + models_not_in_init += [ + model[0] for model in get_models(module, include_pretrained=True) if model[0] not in dir_transformers + ] + + # Remove private models + models_not_in_init = [model for model in models_not_in_init if not is_a_private_model(model)] + if len(models_not_in_init) > 0: + raise Exception(f"The following models should be in the main init: {','.join(models_not_in_init)}.") + + +# If some test_modeling files should be ignored when checking models are all tested, they should be added in the +# nested list _ignore_files of this function. +def get_model_test_files(): + """Get the model test files. + + The returned files should NOT contain the `tests` (i.e. `PATH_TO_TESTS` defined in this script). They will be + considered as paths relative to `tests`. A caller has to use `os.path.join(PATH_TO_TESTS, ...)` to access the files. + """ + + _ignore_files = [ + "test_modeling_common", + "test_modeling_encoder_decoder", + "test_modeling_flax_encoder_decoder", + "test_modeling_flax_speech_encoder_decoder", + "test_modeling_marian", + "test_modeling_tf_common", + "test_modeling_tf_encoder_decoder", + ] + test_files = [] + # Check both `PATH_TO_TESTS` and `PATH_TO_TESTS/models` + model_test_root = os.path.join(PATH_TO_TESTS, "models") + model_test_dirs = [] + for x in os.listdir(model_test_root): + x = os.path.join(model_test_root, x) + if os.path.isdir(x): + model_test_dirs.append(x) + + for target_dir in [PATH_TO_TESTS] + model_test_dirs: + for file_or_dir in os.listdir(target_dir): + path = os.path.join(target_dir, file_or_dir) + if os.path.isfile(path): + filename = os.path.split(path)[-1] + if "test_modeling" in filename and os.path.splitext(filename)[0] not in _ignore_files: + file = os.path.join(*path.split(os.sep)[1:]) + test_files.append(file) + + return test_files + + +# This is a bit hacky but I didn't find a way to import the test_file as a module and read inside the tester class +# for the all_model_classes variable. +def find_tested_models(test_file): + """Parse the content of test_file to detect what's in all_model_classes""" + # This is a bit hacky but I didn't find a way to import the test_file as a module and read inside the class + with open(os.path.join(PATH_TO_TESTS, test_file), "r", encoding="utf-8", newline="\n") as f: + content = f.read() + all_models = re.findall(r"all_model_classes\s+=\s+\(\s*\(([^\)]*)\)", content) + # Check with one less parenthesis as well + all_models += re.findall(r"all_model_classes\s+=\s+\(([^\)]*)\)", content) + if len(all_models) > 0: + model_tested = [] + for entry in all_models: + for line in entry.split(","): + name = line.strip() + if len(name) > 0: + model_tested.append(name) + return model_tested + + +def check_models_are_tested(module, test_file): + """Check models defined in module are tested in test_file.""" + # XxxModelMixin are not tested + defined_models = get_models(module) + tested_models = find_tested_models(test_file) + if tested_models is None: + if test_file.replace(os.path.sep, "/") in TEST_FILES_WITH_NO_COMMON_TESTS: + return + return [ + f"{test_file} should define `all_model_classes` to apply common tests to the models it tests. " + + "If this intentional, add the test filename to `TEST_FILES_WITH_NO_COMMON_TESTS` in the file " + + "`utils/check_repo.py`." + ] + failures = [] + for model_name, _ in defined_models: + if model_name not in tested_models and model_name not in IGNORE_NON_TESTED: + failures.append( + f"{model_name} is defined in {module.__name__} but is not tested in " + + f"{os.path.join(PATH_TO_TESTS, test_file)}. Add it to the all_model_classes in that file." + + "If common tests should not applied to that model, add its name to `IGNORE_NON_TESTED`" + + "in the file `utils/check_repo.py`." + ) + return failures + + +def check_all_models_are_tested(): + """Check all models are properly tested.""" + modules = get_model_modules() + test_files = get_model_test_files() + failures = [] + for module in modules: + test_file = [file for file in test_files if f"test_{module.__name__.split('.')[-1]}.py" in file] + if len(test_file) == 0: + failures.append(f"{module.__name__} does not have its corresponding test file {test_file}.") + elif len(test_file) > 1: + failures.append(f"{module.__name__} has several test files: {test_file}.") + else: + test_file = test_file[0] + new_failures = check_models_are_tested(module, test_file) + if new_failures is not None: + failures += new_failures + if len(failures) > 0: + raise Exception(f"There were {len(failures)} failures:\n" + "\n".join(failures)) + + +def get_all_auto_configured_models(): + """Return the list of all models in at least one auto class.""" + result = set() # To avoid duplicates we concatenate all model classes in a set. + if is_torch_available(): + for attr_name in dir(diffusers.models.auto.modeling_auto): + if attr_name.startswith("MODEL_") and attr_name.endswith("MAPPING_NAMES"): + result = result | set(get_values(getattr(diffusers.models.auto.modeling_auto, attr_name))) + if is_flax_available(): + for attr_name in dir(diffusers.models.auto.modeling_flax_auto): + if attr_name.startswith("FLAX_MODEL_") and attr_name.endswith("MAPPING_NAMES"): + result = result | set(get_values(getattr(diffusers.models.auto.modeling_flax_auto, attr_name))) + return list(result) + + +def ignore_unautoclassed(model_name): + """Rules to determine if `name` should be in an auto class.""" + # Special white list + if model_name in IGNORE_NON_AUTO_CONFIGURED: + return True + # Encoder and Decoder should be ignored + if "Encoder" in model_name or "Decoder" in model_name: + return True + return False + + +def check_models_are_auto_configured(module, all_auto_models): + """Check models defined in module are each in an auto class.""" + defined_models = get_models(module) + failures = [] + for model_name, _ in defined_models: + if model_name not in all_auto_models and not ignore_unautoclassed(model_name): + failures.append( + f"{model_name} is defined in {module.__name__} but is not present in any of the auto mapping. " + "If that is intended behavior, add its name to `IGNORE_NON_AUTO_CONFIGURED` in the file " + "`utils/check_repo.py`." + ) + return failures + + +def check_all_models_are_auto_configured(): + """Check all models are each in an auto class.""" + missing_backends = [] + if not is_torch_available(): + missing_backends.append("PyTorch") + if not is_flax_available(): + missing_backends.append("Flax") + if len(missing_backends) > 0: + missing = ", ".join(missing_backends) + if os.getenv("TRANSFORMERS_IS_CI", "").upper() in ENV_VARS_TRUE_VALUES: + raise Exception( + "Full quality checks require all backends to be installed (with `pip install -e .[dev]` in the " + f"Transformers repo, the following are missing: {missing}." + ) + else: + warnings.warn( + "Full quality checks require all backends to be installed (with `pip install -e .[dev]` in the " + f"Transformers repo, the following are missing: {missing}. While it's probably fine as long as you " + "didn't make any change in one of those backends modeling files, you should probably execute the " + "command above to be on the safe side." + ) + modules = get_model_modules() + all_auto_models = get_all_auto_configured_models() + failures = [] + for module in modules: + new_failures = check_models_are_auto_configured(module, all_auto_models) + if new_failures is not None: + failures += new_failures + if len(failures) > 0: + raise Exception(f"There were {len(failures)} failures:\n" + "\n".join(failures)) + + +_re_decorator = re.compile(r"^\s*@(\S+)\s+$") + + +def check_decorator_order(filename): + """Check that in the test file `filename` the slow decorator is always last.""" + with open(filename, "r", encoding="utf-8", newline="\n") as f: + lines = f.readlines() + decorator_before = None + errors = [] + for i, line in enumerate(lines): + search = _re_decorator.search(line) + if search is not None: + decorator_name = search.groups()[0] + if decorator_before is not None and decorator_name.startswith("parameterized"): + errors.append(i) + decorator_before = decorator_name + elif decorator_before is not None: + decorator_before = None + return errors + + +def check_all_decorator_order(): + """Check that in all test files, the slow decorator is always last.""" + errors = [] + for fname in os.listdir(PATH_TO_TESTS): + if fname.endswith(".py"): + filename = os.path.join(PATH_TO_TESTS, fname) + new_errors = check_decorator_order(filename) + errors += [f"- {filename}, line {i}" for i in new_errors] + if len(errors) > 0: + msg = "\n".join(errors) + raise ValueError( + "The parameterized decorator (and its variants) should always be first, but this is not the case in the" + f" following files:\n{msg}" + ) + + +def find_all_documented_objects(): + """Parse the content of all doc files to detect which classes and functions it documents""" + documented_obj = [] + for doc_file in Path(PATH_TO_DOC).glob("**/*.rst"): + with open(doc_file, "r", encoding="utf-8", newline="\n") as f: + content = f.read() + raw_doc_objs = re.findall(r"(?:autoclass|autofunction):: transformers.(\S+)\s+", content) + documented_obj += [obj.split(".")[-1] for obj in raw_doc_objs] + for doc_file in Path(PATH_TO_DOC).glob("**/*.md"): + with open(doc_file, "r", encoding="utf-8", newline="\n") as f: + content = f.read() + raw_doc_objs = re.findall(r"\[\[autodoc\]\]\s+(\S+)\s+", content) + documented_obj += [obj.split(".")[-1] for obj in raw_doc_objs] + return documented_obj + + +# One good reason for not being documented is to be deprecated. Put in this list deprecated objects. +DEPRECATED_OBJECTS = [ + "AutoModelWithLMHead", + "BartPretrainedModel", + "DataCollator", + "DataCollatorForSOP", + "GlueDataset", + "GlueDataTrainingArguments", + "LineByLineTextDataset", + "LineByLineWithRefDataset", + "LineByLineWithSOPTextDataset", + "PretrainedBartModel", + "PretrainedFSMTModel", + "SingleSentenceClassificationProcessor", + "SquadDataTrainingArguments", + "SquadDataset", + "SquadExample", + "SquadFeatures", + "SquadV1Processor", + "SquadV2Processor", + "TFAutoModelWithLMHead", + "TFBartPretrainedModel", + "TextDataset", + "TextDatasetForNextSentencePrediction", + "Wav2Vec2ForMaskedLM", + "Wav2Vec2Tokenizer", + "glue_compute_metrics", + "glue_convert_examples_to_features", + "glue_output_modes", + "glue_processors", + "glue_tasks_num_labels", + "squad_convert_examples_to_features", + "xnli_compute_metrics", + "xnli_output_modes", + "xnli_processors", + "xnli_tasks_num_labels", + "TFTrainer", + "TFTrainingArguments", +] + +# Exceptionally, some objects should not be documented after all rules passed. +# ONLY PUT SOMETHING IN THIS LIST AS A LAST RESORT! +UNDOCUMENTED_OBJECTS = [ + "AddedToken", # This is a tokenizers class. + "BasicTokenizer", # Internal, should never have been in the main init. + "CharacterTokenizer", # Internal, should never have been in the main init. + "DPRPretrainedReader", # Like an Encoder. + "DummyObject", # Just picked by mistake sometimes. + "MecabTokenizer", # Internal, should never have been in the main init. + "ModelCard", # Internal type. + "SqueezeBertModule", # Internal building block (should have been called SqueezeBertLayer) + "TFDPRPretrainedReader", # Like an Encoder. + "TransfoXLCorpus", # Internal type. + "WordpieceTokenizer", # Internal, should never have been in the main init. + "absl", # External module + "add_end_docstrings", # Internal, should never have been in the main init. + "add_start_docstrings", # Internal, should never have been in the main init. + "cached_path", # Internal used for downloading models. + "convert_tf_weight_name_to_pt_weight_name", # Internal used to convert model weights + "logger", # Internal logger + "logging", # External module + "requires_backends", # Internal function +] + +# This list should be empty. Objects in it should get their own doc page. +SHOULD_HAVE_THEIR_OWN_PAGE = [ + # Benchmarks + "PyTorchBenchmark", + "PyTorchBenchmarkArguments", + "TensorFlowBenchmark", + "TensorFlowBenchmarkArguments", +] + + +def ignore_undocumented(name): + """Rules to determine if `name` should be undocumented.""" + # NOT DOCUMENTED ON PURPOSE. + # Constants uppercase are not documented. + if name.isupper(): + return True + # ModelMixins / Encoders / Decoders / Layers / Embeddings / Attention are not documented. + if ( + name.endswith("ModelMixin") + or name.endswith("Decoder") + or name.endswith("Encoder") + or name.endswith("Layer") + or name.endswith("Embeddings") + or name.endswith("Attention") + ): + return True + # Submodules are not documented. + if os.path.isdir(os.path.join(PATH_TO_DIFFUSERS, name)) or os.path.isfile( + os.path.join(PATH_TO_DIFFUSERS, f"{name}.py") + ): + return True + # All load functions are not documented. + if name.startswith("load_tf") or name.startswith("load_pytorch"): + return True + # is_xxx_available functions are not documented. + if name.startswith("is_") and name.endswith("_available"): + return True + # Deprecated objects are not documented. + if name in DEPRECATED_OBJECTS or name in UNDOCUMENTED_OBJECTS: + return True + # MMBT model does not really work. + if name.startswith("MMBT"): + return True + if name in SHOULD_HAVE_THEIR_OWN_PAGE: + return True + return False + + +def check_all_objects_are_documented(): + """Check all models are properly documented.""" + documented_objs = find_all_documented_objects() + modules = diffusers._modules + objects = [c for c in dir(diffusers) if c not in modules and not c.startswith("_")] + undocumented_objs = [c for c in objects if c not in documented_objs and not ignore_undocumented(c)] + if len(undocumented_objs) > 0: + raise Exception( + "The following objects are in the public init so should be documented:\n - " + + "\n - ".join(undocumented_objs) + ) + check_docstrings_are_in_md() + check_model_type_doc_match() + + +def check_model_type_doc_match(): + """Check all doc pages have a corresponding model type.""" + model_doc_folder = Path(PATH_TO_DOC) / "model_doc" + model_docs = [m.stem for m in model_doc_folder.glob("*.md")] + + model_types = list(diffusers.models.auto.configuration_auto.MODEL_NAMES_MAPPING.keys()) + model_types = [MODEL_TYPE_TO_DOC_MAPPING[m] if m in MODEL_TYPE_TO_DOC_MAPPING else m for m in model_types] + + errors = [] + for m in model_docs: + if m not in model_types and m != "auto": + close_matches = get_close_matches(m, model_types) + error_message = f"{m} is not a proper model identifier." + if len(close_matches) > 0: + close_matches = "/".join(close_matches) + error_message += f" Did you mean {close_matches}?" + errors.append(error_message) + + if len(errors) > 0: + raise ValueError( + "Some model doc pages do not match any existing model type:\n" + + "\n".join(errors) + + "\nYou can add any missing model type to the `MODEL_NAMES_MAPPING` constant in " + "models/auto/configuration_auto.py." + ) + + +# Re pattern to catch :obj:`xx`, :class:`xx`, :func:`xx` or :meth:`xx`. +_re_rst_special_words = re.compile(r":(?:obj|func|class|meth):`([^`]+)`") +# Re pattern to catch things between double backquotes. +_re_double_backquotes = re.compile(r"(^|[^`])``([^`]+)``([^`]|$)") +# Re pattern to catch example introduction. +_re_rst_example = re.compile(r"^\s*Example.*::\s*$", flags=re.MULTILINE) + + +def is_rst_docstring(docstring): + """ + Returns `True` if `docstring` is written in rst. + """ + if _re_rst_special_words.search(docstring) is not None: + return True + if _re_double_backquotes.search(docstring) is not None: + return True + if _re_rst_example.search(docstring) is not None: + return True + return False + + +def check_docstrings_are_in_md(): + """Check all docstrings are in md""" + files_with_rst = [] + for file in Path(PATH_TO_DIFFUSERS).glob("**/*.py"): + with open(file, "r") as f: + code = f.read() + docstrings = code.split('"""') + + for idx, docstring in enumerate(docstrings): + if idx % 2 == 0 or not is_rst_docstring(docstring): + continue + files_with_rst.append(file) + break + + if len(files_with_rst) > 0: + raise ValueError( + "The following files have docstrings written in rst:\n" + + "\n".join([f"- {f}" for f in files_with_rst]) + + "\nTo fix this run `doc-builder convert path_to_py_file` after installing `doc-builder`\n" + "(`pip install git+https://github.com/huggingface/doc-builder`)" + ) + + +def check_repo_quality(): + """Check all models are properly tested and documented.""" + print("Checking all models are included.") + check_model_list() + print("Checking all models are public.") + check_models_are_in_init() + print("Checking all models are properly tested.") + check_all_decorator_order() + check_all_models_are_tested() + print("Checking all objects are properly documented.") + check_all_objects_are_documented() + print("Checking all models are in at least one auto class.") + check_all_models_are_auto_configured() + + +if __name__ == "__main__": + check_repo_quality() diff --git a/exp_code/1_benchmark/diffusers-WanS2V/utils/check_support_list.py b/exp_code/1_benchmark/diffusers-WanS2V/utils/check_support_list.py new file mode 100644 index 0000000000000000000000000000000000000000..ade9df3b64faa2e9daa227e36c036df4e69116bc --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/utils/check_support_list.py @@ -0,0 +1,124 @@ +# coding=utf-8 +# Copyright 2025 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +""" +Utility that checks that modules like attention processors are listed in the documentation file. + +```bash +python utils/check_support_list.py +``` + +It has no auto-fix mode. +""" + +import os +import re + + +# All paths are set with the intent that you run this script from the root of the repo +REPO_PATH = "." + + +def read_documented_classes(doc_path, autodoc_regex=r"\[\[autodoc\]\]\s([^\n]+)"): + """ + Reads documented classes from a doc file using a regex to find lines like [[autodoc]] my.module.Class. + Returns a list of documented class names (just the class name portion). + """ + with open(os.path.join(REPO_PATH, doc_path), "r") as f: + doctext = f.read() + matches = re.findall(autodoc_regex, doctext) + return [match.split(".")[-1] for match in matches] + + +def read_source_classes(src_path, class_regex, exclude_conditions=None): + """ + Reads class names from a source file using a regex that captures class definitions. + Optionally exclude classes based on a list of conditions (functions that take class name and return bool). + """ + if exclude_conditions is None: + exclude_conditions = [] + with open(os.path.join(REPO_PATH, src_path), "r") as f: + doctext = f.read() + classes = re.findall(class_regex, doctext) + # Filter out classes that meet any of the exclude conditions + filtered_classes = [c for c in classes if not any(cond(c) for cond in exclude_conditions)] + return filtered_classes + + +def check_documentation(doc_path, src_path, doc_regex, src_regex, exclude_conditions=None): + """ + Generic function to check if all classes defined in `src_path` are documented in `doc_path`. + Returns a set of undocumented class names. + """ + documented = set(read_documented_classes(doc_path, doc_regex)) + source_classes = set(read_source_classes(src_path, src_regex, exclude_conditions=exclude_conditions)) + + # Find which classes in source are not documented in a deterministic way. + undocumented = sorted(source_classes - documented) + return undocumented + + +if __name__ == "__main__": + # Define the checks we need to perform + checks = { + "Attention Processors": { + "doc_path": "docs/source/en/api/attnprocessor.md", + "src_path": "src/diffusers/models/attention_processor.py", + "doc_regex": r"\[\[autodoc\]\]\s([^\n]+)", + "src_regex": r"class\s+(\w+Processor(?:\d*_?\d*))[:(]", + "exclude_conditions": [lambda c: "LoRA" in c, lambda c: c == "Attention"], + }, + "Image Processors": { + "doc_path": "docs/source/en/api/image_processor.md", + "src_path": "src/diffusers/image_processor.py", + "doc_regex": r"\[\[autodoc\]\]\s([^\n]+)", + "src_regex": r"class\s+(\w+Processor(?:\d*_?\d*))[:(]", + }, + "Activations": { + "doc_path": "docs/source/en/api/activations.md", + "src_path": "src/diffusers/models/activations.py", + "doc_regex": r"\[\[autodoc\]\]\s([^\n]+)", + "src_regex": r"class\s+(\w+)\s*\(.*?nn\.Module.*?\):", + }, + "Normalizations": { + "doc_path": "docs/source/en/api/normalization.md", + "src_path": "src/diffusers/models/normalization.py", + "doc_regex": r"\[\[autodoc\]\]\s([^\n]+)", + "src_regex": r"class\s+(\w+)\s*\(.*?nn\.Module.*?\):", + "exclude_conditions": [ + # Exclude LayerNorm as it's an intentional exception + lambda c: c == "LayerNorm" + ], + }, + "LoRA Mixins": { + "doc_path": "docs/source/en/api/loaders/lora.md", + "src_path": "src/diffusers/loaders/lora_pipeline.py", + "doc_regex": r"\[\[autodoc\]\]\s([^\n]+)", + "src_regex": r"class\s+(\w+LoraLoaderMixin(?:\d*_?\d*))[:(]", + }, + } + + missing_items = {} + for category, params in checks.items(): + undocumented = check_documentation( + doc_path=params["doc_path"], + src_path=params["src_path"], + doc_regex=params["doc_regex"], + src_regex=params["src_regex"], + exclude_conditions=params.get("exclude_conditions"), + ) + if undocumented: + missing_items[category] = undocumented + + # If we have any missing items, raise a single combined error + if missing_items: + error_msg = ["Some classes are not documented properly:\n"] + for category, classes in missing_items.items(): + error_msg.append(f"- {category}: {', '.join(sorted(classes))}") + raise ValueError("\n".join(error_msg)) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/utils/check_table.py b/exp_code/1_benchmark/diffusers-WanS2V/utils/check_table.py new file mode 100644 index 0000000000000000000000000000000000000000..83c29aa74eca042df81386a73c12ffaf128fc4c2 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/utils/check_table.py @@ -0,0 +1,185 @@ +# coding=utf-8 +# Copyright 2025 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import collections +import importlib.util +import os +import re + + +# All paths are set with the intent you should run this script from the root of the repo with the command +# python utils/check_table.py +TRANSFORMERS_PATH = "src/diffusers" +PATH_TO_DOCS = "docs/source/en" +REPO_PATH = "." + + +def _find_text_in_file(filename, start_prompt, end_prompt): + """ + Find the text in `filename` between a line beginning with `start_prompt` and before `end_prompt`, removing empty + lines. + """ + with open(filename, "r", encoding="utf-8", newline="\n") as f: + lines = f.readlines() + # Find the start prompt. + start_index = 0 + while not lines[start_index].startswith(start_prompt): + start_index += 1 + start_index += 1 + + end_index = start_index + while not lines[end_index].startswith(end_prompt): + end_index += 1 + end_index -= 1 + + while len(lines[start_index]) <= 1: + start_index += 1 + while len(lines[end_index]) <= 1: + end_index -= 1 + end_index += 1 + return "".join(lines[start_index:end_index]), start_index, end_index, lines + + +# Add here suffixes that are used to identify models, separated by | +ALLOWED_MODEL_SUFFIXES = "Model|Encoder|Decoder|ForConditionalGeneration" +# Regexes that match TF/Flax/PT model names. +_re_tf_models = re.compile(r"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)") +_re_flax_models = re.compile(r"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)") +# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes. +_re_pt_models = re.compile(r"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)") + + +# This is to make sure the diffusers module imported is the one in the repo. +spec = importlib.util.spec_from_file_location( + "diffusers", + os.path.join(TRANSFORMERS_PATH, "__init__.py"), + submodule_search_locations=[TRANSFORMERS_PATH], +) +diffusers_module = spec.loader.load_module() + + +# Thanks to https://stackoverflow.com/questions/29916065/how-to-do-camelcase-split-in-python +def camel_case_split(identifier): + """Split a camelcased `identifier` into words.""" + matches = re.finditer(".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)", identifier) + return [m.group(0) for m in matches] + + +def _center_text(text, width): + text_length = 2 if text == "✅" or text == "❌" else len(text) + left_indent = (width - text_length) // 2 + right_indent = width - text_length - left_indent + return " " * left_indent + text + " " * right_indent + + +def get_model_table_from_auto_modules(): + """Generates an up-to-date model table from the content of the auto modules.""" + # Dictionary model names to config. + config_mapping_names = diffusers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES + model_name_to_config = { + name: config_mapping_names[code] + for code, name in diffusers_module.MODEL_NAMES_MAPPING.items() + if code in config_mapping_names + } + model_name_to_prefix = {name: config.replace("ConfigMixin", "") for name, config in model_name_to_config.items()} + + # Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax. + slow_tokenizers = collections.defaultdict(bool) + fast_tokenizers = collections.defaultdict(bool) + pt_models = collections.defaultdict(bool) + tf_models = collections.defaultdict(bool) + flax_models = collections.defaultdict(bool) + + # Let's lookup through all diffusers object (once). + for attr_name in dir(diffusers_module): + lookup_dict = None + if attr_name.endswith("Tokenizer"): + lookup_dict = slow_tokenizers + attr_name = attr_name[:-9] + elif attr_name.endswith("TokenizerFast"): + lookup_dict = fast_tokenizers + attr_name = attr_name[:-13] + elif _re_tf_models.match(attr_name) is not None: + lookup_dict = tf_models + attr_name = _re_tf_models.match(attr_name).groups()[0] + elif _re_flax_models.match(attr_name) is not None: + lookup_dict = flax_models + attr_name = _re_flax_models.match(attr_name).groups()[0] + elif _re_pt_models.match(attr_name) is not None: + lookup_dict = pt_models + attr_name = _re_pt_models.match(attr_name).groups()[0] + + if lookup_dict is not None: + while len(attr_name) > 0: + if attr_name in model_name_to_prefix.values(): + lookup_dict[attr_name] = True + break + # Try again after removing the last word in the name + attr_name = "".join(camel_case_split(attr_name)[:-1]) + + # Let's build that table! + model_names = list(model_name_to_config.keys()) + model_names.sort(key=str.lower) + columns = ["Model", "Tokenizer slow", "Tokenizer fast", "PyTorch support", "TensorFlow support", "Flax Support"] + # We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side). + widths = [len(c) + 2 for c in columns] + widths[0] = max([len(name) for name in model_names]) + 2 + + # Build the table per se + table = "|" + "|".join([_center_text(c, w) for c, w in zip(columns, widths)]) + "|\n" + # Use ":-----:" format to center-aligned table cell texts + table += "|" + "|".join([":" + "-" * (w - 2) + ":" for w in widths]) + "|\n" + + check = {True: "✅", False: "❌"} + for name in model_names: + prefix = model_name_to_prefix[name] + line = [ + name, + check[slow_tokenizers[prefix]], + check[fast_tokenizers[prefix]], + check[pt_models[prefix]], + check[tf_models[prefix]], + check[flax_models[prefix]], + ] + table += "|" + "|".join([_center_text(l, w) for l, w in zip(line, widths)]) + "|\n" + return table + + +def check_model_table(overwrite=False): + """Check the model table in the index.rst is consistent with the state of the lib and maybe `overwrite`.""" + current_table, start_index, end_index, lines = _find_text_in_file( + filename=os.path.join(PATH_TO_DOCS, "index.md"), + start_prompt="", + ) + new_table = get_model_table_from_auto_modules() + + if current_table != new_table: + if overwrite: + with open(os.path.join(PATH_TO_DOCS, "index.md"), "w", encoding="utf-8", newline="\n") as f: + f.writelines(lines[:start_index] + [new_table] + lines[end_index:]) + else: + raise ValueError( + "The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this." + ) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.") + args = parser.parse_args() + + check_model_table(args.fix_and_overwrite) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/utils/consolidated_test_report.py b/exp_code/1_benchmark/diffusers-WanS2V/utils/consolidated_test_report.py new file mode 100644 index 0000000000000000000000000000000000000000..134fecf721e44ab50c16d8ed3469c94c74ffd408 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/utils/consolidated_test_report.py @@ -0,0 +1,789 @@ +#!/usr/bin/env python +import argparse +import glob +import os +import re +from datetime import date, datetime + +from slack_sdk import WebClient +from tabulate import tabulate + + +MAX_LEN_MESSAGE = 3001 # slack endpoint has a limit of 3001 characters + +parser = argparse.ArgumentParser() +parser.add_argument("--slack_channel_name", default="diffusers-ci-nightly") +parser.add_argument( + "--reports_dir", + default="reports", + help="Directory containing test reports (will search recursively in all subdirectories)", +) +parser.add_argument("--output_file", default=None, help="Path to save the consolidated report (markdown format)") + + +def parse_stats_file(file_path): + """Parse a stats file to extract test statistics.""" + try: + with open(file_path, "r") as f: + content = f.read() + + # Extract the numbers using regex + tests_pattern = r"collected (\d+) items" + passed_pattern = r"(\d+) passed" + failed_pattern = r"(\d+) failed" + skipped_pattern = r"(\d+) skipped" + xpassed_pattern = r"(\d+) xpassed" + + tests_match = re.search(tests_pattern, content) + passed_match = re.search(passed_pattern, content) + failed_match = re.search(failed_pattern, content) + skipped_match = re.search(skipped_pattern, content) + xpassed_match = re.search(xpassed_pattern, content) + + passed = int(passed_match.group(1)) if passed_match else 0 + failed = int(failed_match.group(1)) if failed_match else 0 + skipped = int(skipped_match.group(1)) if skipped_match else 0 + xpassed = int(xpassed_match.group(1)) if xpassed_match else 0 + + # If tests_match exists, use it, otherwise calculate from passed/failed/skipped + if tests_match: + tests = int(tests_match.group(1)) + else: + tests = passed + failed + skipped + xpassed + + # Extract timing information if available + timing_pattern = r"slowest \d+ test durations[\s\S]*?\n([\s\S]*?)={70}" + timing_match = re.search(timing_pattern, content, re.MULTILINE) + slowest_tests = [] + + if timing_match: + timing_text = timing_match.group(1).strip() + test_timing_lines = timing_text.split("\n") + for line in test_timing_lines: + if line.strip(): + # Format is typically: 10.37s call tests/path/to/test.py::TestClass::test_method + parts = line.strip().split() + if len(parts) >= 3: + time_str = parts[0] + test_path = " ".join(parts[2:]) + + # Skip entries with "< 0.05 secs were omitted" or similar + if "secs were omitted" in test_path: + continue + + try: + time_seconds = float(time_str.rstrip("s")) + slowest_tests.append({"test": test_path, "duration": time_seconds}) + except ValueError: + pass + + return { + "tests": tests, + "passed": passed, + "failed": failed, + "skipped": skipped, + "slowest_tests": slowest_tests, + } + except Exception as e: + print(f"Error parsing {file_path}: {e}") + return {"tests": 0, "passed": 0, "failed": 0, "skipped": 0, "slowest_tests": []} + + +def parse_durations_file(file_path): + """Parse a durations file to extract test timing information.""" + slowest_tests = [] + try: + durations_file = file_path.replace("_stats.txt", "_durations.txt") + if os.path.exists(durations_file): + with open(durations_file, "r") as f: + content = f.read() + + # Skip the header line + for line in content.split("\n")[1:]: + if line.strip(): + # Format is typically: 10.37s call tests/path/to/test.py::TestClass::test_method + parts = line.strip().split() + if len(parts) >= 3: + time_str = parts[0] + test_path = " ".join(parts[2:]) + + # Skip entries with "< 0.05 secs were omitted" or similar + if "secs were omitted" in test_path: + continue + + try: + time_seconds = float(time_str.rstrip("s")) + slowest_tests.append({"test": test_path, "duration": time_seconds}) + except ValueError: + # If time_str is not a valid float, it might be a different format + # For example, some pytest formats show "< 0.05s" or similar + if test_path.startswith("<") and "secs were omitted" in test_path: + # Extract the time value from test_path if it's in the format "< 0.05 secs were omitted" + try: + # This handles entries where the time is in the test_path itself + dur_match = re.search(r"(\d+(?:\.\d+)?)", test_path) + if dur_match: + time_seconds = float(dur_match.group(1)) + slowest_tests.append({"test": test_path, "duration": time_seconds}) + except ValueError: + pass + except Exception as e: + print(f"Error parsing durations file {file_path.replace('_stats.txt', '_durations.txt')}: {e}") + + return slowest_tests + + +def parse_failures_file(file_path): + """Parse a failures file to extract failed test details.""" + failures = [] + try: + with open(file_path, "r") as f: + content = f.read() + + # We don't need the base file name anymore as we're getting test paths from summary + + # Check if it's a short stack format + if "============================= FAILURES SHORT STACK =============================" in content: + # First, look for pytest-style failure headers with underscores and clean them up + test_headers = re.findall(r"_{5,}\s+([^_\n]+?)\s+_{5,}", content) + + for test_name in test_headers: + test_name = test_name.strip() + # Make sure it's a valid test name (contains a dot and doesn't look like a number) + if "." in test_name and not test_name.replace(".", "").isdigit(): + # For test names missing the full path, check if we can reconstruct it from failures_line.txt + # This is a best effort - we won't always have the line file available + if not test_name.endswith(".py") and "::" not in test_name and "/" not in test_name: + # Try to look for a corresponding line file + line_file = file_path.replace("_failures_short.txt", "_failures_line.txt") + if os.path.exists(line_file): + try: + with open(line_file, "r") as lf: + line_content = lf.read() + # Look for test name in line file which might have the full path + path_match = re.search( + r"(tests/[\w/]+\.py::[^:]+::" + test_name.split(".")[-1] + ")", + line_content, + ) + if path_match: + test_name = path_match.group(1) + except Exception: + pass # If we can't read the line file, just use what we have + + failures.append( + { + "test": test_name, + "error": "Error occurred", + "original_test_name": test_name, # Keep original for reference + } + ) + + # If we didn't find any pytest-style headers, try other formats + if not failures: + # Look for test names at the beginning of the file (in first few lines) + first_lines = content.split("\n")[:20] # Look at first 20 lines + for line in first_lines: + # Look for test names in various formats + # Format: tests/file.py::TestClass::test_method + path_match = re.search(r"(tests/[\w/]+\.py::[\w\.]+::\w+)", line) + # Format: TestClass.test_method + class_match = re.search(r"([A-Za-z][A-Za-z0-9_]+\.[A-Za-z][A-Za-z0-9_]+)", line) + + if path_match: + test_name = path_match.group(1) + failures.append( + {"test": test_name, "error": "Error occurred", "original_test_name": test_name} + ) + break # Found a full path, stop looking + elif class_match and "test" in line.lower(): + test_name = class_match.group(1) + # Make sure it's likely a test name (contains test in method name) + if "test" in test_name.lower(): + failures.append( + {"test": test_name, "error": "Error occurred", "original_test_name": test_name} + ) + else: + # Standard format - try to extract from standard pytest output + failure_blocks = re.split(r"={70}", content) + + for block in failure_blocks: + if not block.strip(): + continue + + # Look for test paths in the format: path/to/test.py::TestClass::test_method + path_matches = re.findall(r"([\w/]+\.py::[\w\.]+::\w+)", block) + if path_matches: + for test_name in path_matches: + failures.append( + {"test": test_name, "error": "Error occurred", "original_test_name": test_name} + ) + else: + # Try alternative format: TestClass.test_method + class_matches = re.findall(r"([A-Za-z][A-Za-z0-9_]+\.[A-Za-z][A-Za-z0-9_]+)", block) + for test_name in class_matches: + # Filter out things that don't look like test names + if ( + not test_name.startswith(("e.g", "i.e", "etc.")) + and not test_name.isdigit() + and "test" in test_name.lower() + ): + failures.append( + {"test": test_name, "error": "Error occurred", "original_test_name": test_name} + ) + + except Exception as e: + print(f"Error parsing failures in {file_path}: {e}") + + return failures + + +def consolidate_reports(reports_dir): + """Consolidate test reports from multiple test runs, including from subdirectories.""" + # Get all stats files, including those in subdirectories + stats_files = glob.glob(f"{reports_dir}/**/*_stats.txt", recursive=True) + + results = {} + total_stats = {"tests": 0, "passed": 0, "failed": 0, "skipped": 0} + + # Collect all slow tests across all test suites + all_slow_tests = [] + + # Process each stats file and its corresponding failures file + for stats_file in stats_files: + # Extract test suite name from filename (e.g., tests_pipeline_allegro_cuda_stats.txt -> pipeline_allegro_cuda) + base_name = os.path.basename(stats_file).replace("_stats.txt", "") + + # Include parent directory in suite name if it's in a subdirectory + rel_path = os.path.relpath(os.path.dirname(stats_file), reports_dir) + if rel_path and rel_path != ".": + # Remove 'test_reports' suffix from directory name if present + dir_name = os.path.basename(rel_path) + if dir_name.endswith("_test_reports"): + dir_name = dir_name[:-13] # Remove '_test_reports' suffix + base_name = f"{dir_name}/{base_name}" + + # Parse stats + stats = parse_stats_file(stats_file) + + # If no slowest tests found in stats file, try the durations file directly + if not stats.get("slowest_tests"): + stats["slowest_tests"] = parse_durations_file(stats_file) + + # Update total stats + for key in ["tests", "passed", "failed", "skipped"]: + total_stats[key] += stats[key] + + # Collect slowest tests with their suite name + for slow_test in stats.get("slowest_tests", []): + all_slow_tests.append({"test": slow_test["test"], "duration": slow_test["duration"], "suite": base_name}) + + # Parse failures if there are any + failures = [] + if stats["failed"] > 0: + # First try to get test paths from summary_short.txt which has the best format + summary_file = stats_file.replace("_stats.txt", "_summary_short.txt") + if os.path.exists(summary_file): + try: + with open(summary_file, "r") as f: + content = f.read() + # Look for full lines with test path and error message: "FAILED test_path - error_msg" + failed_test_lines = re.findall( + r"FAILED\s+(tests/[\w/]+\.py::[A-Za-z0-9_\.]+::[A-Za-z0-9_]+)(?:\s+-\s+(.+))?", content + ) + + if failed_test_lines: + for match in failed_test_lines: + test_path = match[0] + error_msg = match[1] if len(match) > 1 and match[1] else "No error message" + + failures.append({"test": test_path, "error": error_msg}) + except Exception as e: + print(f"Error parsing summary file: {e}") + + # If no failures found in summary, try other failure files + if not failures: + failure_patterns = ["_failures_short.txt", "_failures.txt", "_failures_line.txt", "_failures_long.txt"] + + for pattern in failure_patterns: + failures_file = stats_file.replace("_stats.txt", pattern) + if os.path.exists(failures_file): + failures = parse_failures_file(failures_file) + if failures: + break + + # No debug output needed + + # Store results for this test suite + results[base_name] = {"stats": stats, "failures": failures} + + # Filter out entries with "secs were omitted" + filtered_slow_tests = [test for test in all_slow_tests if "secs were omitted" not in test["test"]] + + # Sort all slow tests by duration (descending) + filtered_slow_tests.sort(key=lambda x: x["duration"], reverse=True) + + # Get the number of slowest tests to show from environment variable or default to 10 + num_slowest_tests = int(os.environ.get("SHOW_SLOWEST_TESTS", "10")) + top_slowest_tests = filtered_slow_tests[:num_slowest_tests] if filtered_slow_tests else [] + + # Calculate additional duration statistics + total_duration = sum(test["duration"] for test in all_slow_tests) + + # Calculate duration per suite + suite_durations = {} + for test in all_slow_tests: + suite_name = test["suite"] + if suite_name not in suite_durations: + suite_durations[suite_name] = 0 + suite_durations[suite_name] += test["duration"] + + # Removed duration categories + + return { + "total_stats": total_stats, + "test_suites": results, + "slowest_tests": top_slowest_tests, + "duration_stats": {"total_duration": total_duration, "suite_durations": suite_durations}, + } + + +def generate_report(consolidated_data): + """Generate a comprehensive markdown report from consolidated data.""" + report = [] + + # Add report header + report.append("# Diffusers Nightly Test Report") + report.append(f"Generated on: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n") + + # Removed comparison section + + # Add summary section + total = consolidated_data["total_stats"] + report.append("## Summary") + + # Get duration stats if available + duration_stats = consolidated_data.get("duration_stats", {}) + total_duration = duration_stats.get("total_duration", 0) + + summary_table = [ + ["Total Tests", total["tests"]], + ["Passed", total["passed"]], + ["Failed", total["failed"]], + ["Skipped", total["skipped"]], + ["Success Rate", f"{(total['passed'] / total['tests'] * 100):.2f}%" if total["tests"] > 0 else "N/A"], + ["Total Duration", f"{total_duration:.2f}s" if total_duration else "N/A"], + ] + + report.append(tabulate(summary_table, tablefmt="pipe")) + report.append("") + + # Removed duration distribution section + + # Add test suites summary + report.append("## Test Suites") + + # Include duration in test suites table if available + suite_durations = consolidated_data.get("duration_stats", {}).get("suite_durations", {}) + + if suite_durations: + suites_table = [["Test Suite", "Tests", "Passed", "Failed", "Skipped", "Success Rate", "Duration (s)"]] + else: + suites_table = [["Test Suite", "Tests", "Passed", "Failed", "Skipped", "Success Rate"]] + + # Sort test suites by success rate (ascending - least successful first) + sorted_suites = sorted( + consolidated_data["test_suites"].items(), + key=lambda x: (x[1]["stats"]["passed"] / x[1]["stats"]["tests"] * 100) if x[1]["stats"]["tests"] > 0 else 0, + reverse=False, + ) + + for suite_name, suite_data in sorted_suites: + stats = suite_data["stats"] + success_rate = f"{(stats['passed'] / stats['tests'] * 100):.2f}%" if stats["tests"] > 0 else "N/A" + + if suite_durations: + duration = suite_durations.get(suite_name, 0) + suites_table.append( + [ + suite_name, + stats["tests"], + stats["passed"], + stats["failed"], + stats["skipped"], + success_rate, + f"{duration:.2f}", + ] + ) + else: + suites_table.append( + [suite_name, stats["tests"], stats["passed"], stats["failed"], stats["skipped"], success_rate] + ) + + report.append(tabulate(suites_table, headers="firstrow", tablefmt="pipe")) + report.append("") + + # Add slowest tests section + slowest_tests = consolidated_data.get("slowest_tests", []) + if slowest_tests: + report.append("## Slowest Tests") + + slowest_table = [["Rank", "Test", "Duration (s)", "Test Suite"]] + for i, test in enumerate(slowest_tests, 1): + # Skip entries that don't contain actual test names + if "< 0.05 secs were omitted" in test["test"]: + continue + slowest_table.append([i, test["test"], f"{test['duration']:.2f}", test["suite"]]) + + report.append(tabulate(slowest_table, headers="firstrow", tablefmt="pipe")) + report.append("") + + # Add failures section if there are any + failed_suites = [s for s in sorted_suites if s[1]["stats"]["failed"] > 0] + + if failed_suites: + report.append("## Failures") + + # Group failures by module for cleaner organization + failures_by_module = {} + + for suite_name, suite_data in failed_suites: + # Extract failures data for this suite + for failure in suite_data.get("failures", []): + test_name = failure["test"] + + # If test name doesn't look like a full path, try to reconstruct it + if not ("/" in test_name or "::" in test_name) and "." in test_name: + # For simple 'TestClass.test_method' format, try to get full path from suite name + # Form: tests__cuda -> tests//test_.py::TestClass::test_method + if suite_name.startswith("tests_") and "_cuda" in suite_name: + # Extract component name from suite + component = suite_name.replace("tests_", "").replace("_cuda", "") + if "." in test_name: + class_name, method_name = test_name.split(".", 1) + possible_path = f"tests/{component}/test_{component}.py::{class_name}::{method_name}" + # Use this constructed path if it seems reasonable + if "test_" in method_name: + test_name = possible_path + + # Extract module name from test name + if "::" in test_name: + # For path/file.py::TestClass::test_method format + parts = test_name.split("::") + module_name = parts[-2] if len(parts) >= 2 else "Other" # TestClass + elif "." in test_name: + # For TestClass.test_method format + parts = test_name.split(".") + module_name = parts[0] # TestClass + else: + module_name = "Other" + + # Skip module names that don't look like class/module names + if ( + module_name.startswith(("e.g", "i.e", "etc")) + or module_name.replace(".", "").isdigit() + or len(module_name) < 3 + ): + module_name = "Other" + + # Add to the module group + if module_name not in failures_by_module: + failures_by_module[module_name] = [] + + # Prepend the suite name if the test name doesn't already have a full path + if "/" not in test_name and suite_name not in test_name: + full_test_name = f"{suite_name}::{test_name}" + else: + full_test_name = test_name + + # Add this failure to the module group + failures_by_module[module_name].append( + {"test": full_test_name, "original_test": test_name, "error": failure["error"]} + ) + + # Create a list of failing tests for each module + if failures_by_module: + for module_name, failures in sorted(failures_by_module.items()): + report.append(f"### {module_name}") + + # Put all failed tests in a single code block + report.append("```") + for failure in failures: + # Show test path and error message if available + if failure.get("error") and failure["error"] != "No error message": + report.append(f"{failure['test']} - {failure['error']}") + else: + report.append(failure["test"]) + report.append("```") + + report.append("") # Add space between modules + else: + report.append("*No detailed failure information available*") + report.append("") + + return "\n".join(report) + + +def create_test_groups_table(test_groups, total_tests, total_success_rate): + """Create a table-like format for test groups showing total tests and success rate.""" + if not test_groups: + return None + + # Sort by total test count (descending) + sorted_groups = sorted(test_groups.items(), key=lambda x: x[1]["total"], reverse=True) + + # Create table lines + table_lines = ["```"] + table_lines.append("Test Results Summary") + table_lines.append("-------------------") + table_lines.append(f"Total Tests: {total_tests:,}") + table_lines.append(f"Success Rate: {total_success_rate}") + table_lines.append("") + table_lines.append("Category | Total Tests | Failed | Success Rate") + table_lines.append("------------------- | ----------- | ------ | ------------") + + # Add rows + for category, stats in sorted_groups: + # Pad category name to fixed width (19 chars) + padded_cat = category[:19].ljust(19) # Truncate if too long + # Right-align counts + padded_total = str(stats["total"]).rjust(11) + padded_failed = str(stats["failed"]).rjust(6) + # Calculate and format success rate + if stats["total"] > 0: + cat_success_rate = f"{((stats['total'] - stats['failed']) / stats['total'] * 100):.1f}%" + else: + cat_success_rate = "N/A" + padded_rate = cat_success_rate.rjust(12) + table_lines.append(f"{padded_cat} | {padded_total} | {padded_failed} | {padded_rate}") + + table_lines.append("```") + + total_failures = sum(stats["failed"] for stats in test_groups.values()) + return ( + f"*Test Groups Summary ({total_failures} {'failure' if total_failures == 1 else 'failures'}):*\n" + + "\n".join(table_lines) + ) + + +def create_slack_payload(consolidated_data): + """Create a concise Slack message payload from consolidated data.""" + total = consolidated_data["total_stats"] + success_rate = f"{(total['passed'] / total['tests'] * 100):.2f}%" if total["tests"] > 0 else "N/A" + + # Determine emoji based on success rate + if total["failed"] == 0: + emoji = "✅" + elif total["failed"] / total["tests"] < 0.1: + emoji = "⚠️" + else: + emoji = "❌" + + # Create a more compact summary section + summary = f"{emoji} *Diffusers Nightly Tests:* {success_rate} success ({total['passed']}/{total['tests']} tests" + if total["skipped"] > 0: + summary += f", {total['skipped']} skipped" + summary += ")" + + # Create the test suites table in markdown format + # Build the markdown table with proper alignment + table_lines = [] + table_lines.append("```") + + # Sort test suites by success rate (ascending - least successful first) + sorted_suites = sorted( + consolidated_data["test_suites"].items(), + key=lambda x: (x[1]["stats"]["passed"] / x[1]["stats"]["tests"] * 100) if x[1]["stats"]["tests"] > 0 else 0, + reverse=False, + ) + + # Calculate max widths for proper alignment + max_suite_name_len = max(len(suite_name) for suite_name, _ in sorted_suites) if sorted_suites else 10 + max_suite_name_len = max(max_suite_name_len, len("Test Suite")) # Ensure header fits + + # Create header with proper spacing (only Tests, Failed, Success Rate) + header = f"| {'Test Suite'.ljust(max_suite_name_len)} | {'Tests'.rjust(6)} | {'Failed'.rjust(6)} | {'Success Rate'.ljust(12)} |" + separator = f"|:{'-' * max_suite_name_len}|{'-' * 7}:|{'-' * 7}:|:{'-' * 11}|" + + table_lines.append(header) + table_lines.append(separator) + + # Add data rows with proper alignment + for suite_name, suite_data in sorted_suites: + stats = suite_data["stats"] + suite_success_rate = f"{(stats['passed'] / stats['tests'] * 100):.2f}%" if stats["tests"] > 0 else "N/A" + + row = f"| {suite_name.ljust(max_suite_name_len)} | {str(stats['tests']).rjust(6)} | {str(stats['failed']).rjust(6)} | {suite_success_rate.ljust(12)} |" + + table_lines.append(row) + + table_lines.append("```") + + # Create the Slack payload with character limit enforcement + payload = [ + {"type": "section", "text": {"type": "mrkdwn", "text": summary}}, + {"type": "section", "text": {"type": "mrkdwn", "text": "\n".join(table_lines)}}, + ] + + # Add action button + if os.environ.get("GITHUB_RUN_ID"): + run_id = os.environ["GITHUB_RUN_ID"] + payload.append( + { + "type": "section", + "text": { + "type": "mrkdwn", + "text": f"**", + }, + } + ) + + # Add date in more compact form + payload.append( + { + "type": "context", + "elements": [ + { + "type": "plain_text", + "text": f"Results for {date.today()}", + }, + ], + } + ) + + # Enforce 3001 character limit + payload_text = str(payload) + if len(payload_text) > MAX_LEN_MESSAGE: + # Truncate table if payload is too long + # Remove rows from the bottom until under limit + original_table_lines = table_lines[:] + while len(str(payload)) > MAX_LEN_MESSAGE and len(table_lines) > 3: # Keep at least header and separator + # Remove the last data row (but keep ``` at the end) + table_lines.pop(-2) # Remove second to last (last is the closing ```) + + # Recreate payload with truncated table + payload[1] = {"type": "section", "text": {"type": "mrkdwn", "text": "\n".join(table_lines)}} + + # Add note if we had to truncate + if len(table_lines) < len(original_table_lines): + truncated_count = len(original_table_lines) - len(table_lines) + table_lines.insert(-1, f"... {truncated_count} more test suites (truncated due to message limit)") + payload[1] = {"type": "section", "text": {"type": "mrkdwn", "text": "\n".join(table_lines)}} + + return payload + + +def create_failed_tests_by_suite_ordered(consolidated_data): + """Group failed tests by test suite, ordered by success rate (ascending).""" + # Sort test suites by success rate (ascending - least successful first) + sorted_suites = sorted( + consolidated_data["test_suites"].items(), + key=lambda x: (x[1]["stats"]["passed"] / x[1]["stats"]["tests"] * 100) if x[1]["stats"]["tests"] > 0 else 0, + reverse=False, + ) + + failed_suite_tests = [] + + # Process suites in order of success rate + for suite_name, suite_data in sorted_suites: + if suite_data["stats"]["failed"] > 0: + suite_failures = [] + + for failure in suite_data.get("failures", []): + test_name = failure["test"] + + # Try to reconstruct full path if partial + if "::" in test_name and "/" in test_name: + full_test_name = test_name + elif "::" in test_name or "." in test_name: + if "/" not in test_name and suite_name not in test_name: + full_test_name = f"{suite_name}::{test_name}" + else: + full_test_name = test_name + else: + full_test_name = f"{suite_name}::{test_name}" + + suite_failures.append(full_test_name) + + # Sort and deduplicate tests within the suite + suite_failures = sorted(set(suite_failures)) + + if suite_failures: + failed_suite_tests.append( + { + "suite_name": suite_name, + "tests": suite_failures, + "success_rate": (suite_data["stats"]["passed"] / suite_data["stats"]["tests"] * 100) + if suite_data["stats"]["tests"] > 0 + else 0, + } + ) + + return failed_suite_tests + + +def main(args): + # Make sure reports directory exists + if not os.path.isdir(args.reports_dir): + print(f"Error: Reports directory '{args.reports_dir}' does not exist.") + return + + # Consolidate reports + consolidated_data = consolidate_reports(args.reports_dir) + + # Check if we found any test results + if consolidated_data["total_stats"]["tests"] == 0: + print(f"Warning: No test results found in '{args.reports_dir}' or its subdirectories.") + + # Generate markdown report + report = generate_report(consolidated_data) + + # Save report to file if specified + if args.output_file: + # Create parent directories if they don't exist + output_dir = os.path.dirname(args.output_file) + if output_dir and not os.path.exists(output_dir): + os.makedirs(output_dir) + + with open(args.output_file, "w") as f: + f.write(report) + + # Only print the report when saving to file + print(report) + + # Send to Slack if token is available (optional, can be disabled) + slack_token = os.environ.get("SLACK_API_TOKEN") + if slack_token and args.slack_channel_name: + payload = create_slack_payload(consolidated_data) + + try: + client = WebClient(token=slack_token) + # Send main message + response = client.chat_postMessage(channel=f"#{args.slack_channel_name}", blocks=payload) + print(f"Report sent to Slack channel: {args.slack_channel_name}") + + # Send failed tests as separate threaded replies grouped by test suite (ordered by success rate) + total = consolidated_data["total_stats"] + if total["failed"] > 0: + failed_suites = create_failed_tests_by_suite_ordered(consolidated_data) + for suite_info in failed_suites: + suite_name = suite_info["suite_name"] + suite_tests = suite_info["tests"] + success_rate = suite_info["success_rate"] + message_text = ( + f"**{suite_name}** (Success Rate: {success_rate:.2f}%)\n```\n" + + "\n".join(suite_tests) + + "\n```" + ) + client.chat_postMessage( + channel=f"#{args.slack_channel_name}", + thread_ts=response["ts"], # Reply in thread + text=message_text, # Use text instead of blocks for markdown + ) + print(f"Failed tests details sent as {len(failed_suites)} thread replies") + except Exception as e: + print(f"Error sending report to Slack: {e}") + + +if __name__ == "__main__": + args = parser.parse_args() + main(args) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/utils/custom_init_isort.py b/exp_code/1_benchmark/diffusers-WanS2V/utils/custom_init_isort.py new file mode 100644 index 0000000000000000000000000000000000000000..cc3bccb9bd63a0fbd28105f5d946024c1637b303 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/utils/custom_init_isort.py @@ -0,0 +1,330 @@ +# coding=utf-8 +# Copyright 2025 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Utility that sorts the imports in the custom inits of Diffusers. Diffusers uses init files that delay the +import of an object to when it's actually needed. This is to avoid the main init importing all models, which would +make the line `import transformers` very slow when the user has all optional dependencies installed. The inits with +delayed imports have two halves: one defining a dictionary `_import_structure` which maps modules to the name of the +objects in each module, and one in `TYPE_CHECKING` which looks like a normal init for type-checkers. `isort` or `ruff` +properly sort the second half which looks like traditionl imports, the goal of this script is to sort the first half. + +Use from the root of the repo with: + +```bash +python utils/custom_init_isort.py +``` + +which will auto-sort the imports (used in `make style`). + +For a check only (as used in `make quality`) run: + +```bash +python utils/custom_init_isort.py --check_only +``` +""" + +import argparse +import os +import re +from typing import Any, Callable, List, Optional + + +# Path is defined with the intent you should run this script from the root of the repo. +PATH_TO_TRANSFORMERS = "src/diffusers" + +# Pattern that looks at the indentation in a line. +_re_indent = re.compile(r"^(\s*)\S") +# Pattern that matches `"key":" and puts `key` in group 0. +_re_direct_key = re.compile(r'^\s*"([^"]+)":') +# Pattern that matches `_import_structure["key"]` and puts `key` in group 0. +_re_indirect_key = re.compile(r'^\s*_import_structure\["([^"]+)"\]') +# Pattern that matches `"key",` and puts `key` in group 0. +_re_strip_line = re.compile(r'^\s*"([^"]+)",\s*$') +# Pattern that matches any `[stuff]` and puts `stuff` in group 0. +_re_bracket_content = re.compile(r"\[([^\]]+)\]") + + +def get_indent(line: str) -> str: + """Returns the indent in given line (as string).""" + search = _re_indent.search(line) + return "" if search is None else search.groups()[0] + + +def split_code_in_indented_blocks( + code: str, indent_level: str = "", start_prompt: Optional[str] = None, end_prompt: Optional[str] = None +) -> List[str]: + """ + Split some code into its indented blocks, starting at a given level. + + Args: + code (`str`): The code to split. + indent_level (`str`): The indent level (as string) to use for identifying the blocks to split. + start_prompt (`str`, *optional*): If provided, only starts splitting at the line where this text is. + end_prompt (`str`, *optional*): If provided, stops splitting at a line where this text is. + + Warning: + The text before `start_prompt` or after `end_prompt` (if provided) is not ignored, just not split. The input `code` + can thus be retrieved by joining the result. + + Returns: + `List[str]`: The list of blocks. + """ + # Let's split the code into lines and move to start_index. + index = 0 + lines = code.split("\n") + if start_prompt is not None: + while not lines[index].startswith(start_prompt): + index += 1 + blocks = ["\n".join(lines[:index])] + else: + blocks = [] + + # This variable contains the block treated at a given time. + current_block = [lines[index]] + index += 1 + # We split into blocks until we get to the `end_prompt` (or the end of the file). + while index < len(lines) and (end_prompt is None or not lines[index].startswith(end_prompt)): + # We have a non-empty line with the proper indent -> start of a new block + if len(lines[index]) > 0 and get_indent(lines[index]) == indent_level: + # Store the current block in the result and rest. There are two cases: the line is part of the block (like + # a closing parenthesis) or not. + if len(current_block) > 0 and get_indent(current_block[-1]).startswith(indent_level + " "): + # Line is part of the current block + current_block.append(lines[index]) + blocks.append("\n".join(current_block)) + if index < len(lines) - 1: + current_block = [lines[index + 1]] + index += 1 + else: + current_block = [] + else: + # Line is not part of the current block + blocks.append("\n".join(current_block)) + current_block = [lines[index]] + else: + # Just add the line to the current block + current_block.append(lines[index]) + index += 1 + + # Adds current block if it's nonempty. + if len(current_block) > 0: + blocks.append("\n".join(current_block)) + + # Add final block after end_prompt if provided. + if end_prompt is not None and index < len(lines): + blocks.append("\n".join(lines[index:])) + + return blocks + + +def ignore_underscore_and_lowercase(key: Callable[[Any], str]) -> Callable[[Any], str]: + """ + Wraps a key function (as used in a sort) to lowercase and ignore underscores. + """ + + def _inner(x): + return key(x).lower().replace("_", "") + + return _inner + + +def sort_objects(objects: List[Any], key: Optional[Callable[[Any], str]] = None) -> List[Any]: + """ + Sort a list of objects following the rules of isort (all uppercased first, camel-cased second and lower-cased + last). + + Args: + objects (`List[Any]`): + The list of objects to sort. + key (`Callable[[Any], str]`, *optional*): + A function taking an object as input and returning a string, used to sort them by alphabetical order. + If not provided, will default to noop (so a `key` must be provided if the `objects` are not of type string). + + Returns: + `List[Any]`: The sorted list with the same elements as in the inputs + """ + + # If no key is provided, we use a noop. + def noop(x): + return x + + if key is None: + key = noop + # Constants are all uppercase, they go first. + constants = [obj for obj in objects if key(obj).isupper()] + # Classes are not all uppercase but start with a capital, they go second. + classes = [obj for obj in objects if key(obj)[0].isupper() and not key(obj).isupper()] + # Functions begin with a lowercase, they go last. + functions = [obj for obj in objects if not key(obj)[0].isupper()] + + # Then we sort each group. + key1 = ignore_underscore_and_lowercase(key) + return sorted(constants, key=key1) + sorted(classes, key=key1) + sorted(functions, key=key1) + + +def sort_objects_in_import(import_statement: str) -> str: + """ + Sorts the imports in a single import statement. + + Args: + import_statement (`str`): The import statement in which to sort the imports. + + Returns: + `str`: The same as the input, but with objects properly sorted. + """ + + # This inner function sort imports between [ ]. + def _replace(match): + imports = match.groups()[0] + # If there is one import only, nothing to do. + if "," not in imports: + return f"[{imports}]" + keys = [part.strip().replace('"', "") for part in imports.split(",")] + # We will have a final empty element if the line finished with a comma. + if len(keys[-1]) == 0: + keys = keys[:-1] + return "[" + ", ".join([f'"{k}"' for k in sort_objects(keys)]) + "]" + + lines = import_statement.split("\n") + if len(lines) > 3: + # Here we have to sort internal imports that are on several lines (one per name): + # key: [ + # "object1", + # "object2", + # ... + # ] + + # We may have to ignore one or two lines on each side. + idx = 2 if lines[1].strip() == "[" else 1 + keys_to_sort = [(i, _re_strip_line.search(line).groups()[0]) for i, line in enumerate(lines[idx:-idx])] + sorted_indices = sort_objects(keys_to_sort, key=lambda x: x[1]) + sorted_lines = [lines[x[0] + idx] for x in sorted_indices] + return "\n".join(lines[:idx] + sorted_lines + lines[-idx:]) + elif len(lines) == 3: + # Here we have to sort internal imports that are on one separate line: + # key: [ + # "object1", "object2", ... + # ] + if _re_bracket_content.search(lines[1]) is not None: + lines[1] = _re_bracket_content.sub(_replace, lines[1]) + else: + keys = [part.strip().replace('"', "") for part in lines[1].split(",")] + # We will have a final empty element if the line finished with a comma. + if len(keys[-1]) == 0: + keys = keys[:-1] + lines[1] = get_indent(lines[1]) + ", ".join([f'"{k}"' for k in sort_objects(keys)]) + return "\n".join(lines) + else: + # Finally we have to deal with imports fitting on one line + import_statement = _re_bracket_content.sub(_replace, import_statement) + return import_statement + + +def sort_imports(file: str, check_only: bool = True): + """ + Sort the imports defined in the `_import_structure` of a given init. + + Args: + file (`str`): The path to the init to check/fix. + check_only (`bool`, *optional*, defaults to `True`): Whether or not to just check (and not auto-fix) the init. + """ + with open(file, encoding="utf-8") as f: + code = f.read() + + # If the file is not a custom init, there is nothing to do. + if "_import_structure" not in code: + return + + # Blocks of indent level 0 + main_blocks = split_code_in_indented_blocks( + code, start_prompt="_import_structure = {", end_prompt="if TYPE_CHECKING:" + ) + + # We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt). + for block_idx in range(1, len(main_blocks) - 1): + # Check if the block contains some `_import_structure`s thingy to sort. + block = main_blocks[block_idx] + block_lines = block.split("\n") + + # Get to the start of the imports. + line_idx = 0 + while line_idx < len(block_lines) and "_import_structure" not in block_lines[line_idx]: + # Skip dummy import blocks + if "import dummy" in block_lines[line_idx]: + line_idx = len(block_lines) + else: + line_idx += 1 + if line_idx >= len(block_lines): + continue + + # Ignore beginning and last line: they don't contain anything. + internal_block_code = "\n".join(block_lines[line_idx:-1]) + indent = get_indent(block_lines[1]) + # Slit the internal block into blocks of indent level 1. + internal_blocks = split_code_in_indented_blocks(internal_block_code, indent_level=indent) + # We have two categories of import key: list or _import_structure[key].append/extend + pattern = _re_direct_key if "_import_structure = {" in block_lines[0] else _re_indirect_key + # Grab the keys, but there is a trap: some lines are empty or just comments. + keys = [(pattern.search(b).groups()[0] if pattern.search(b) is not None else None) for b in internal_blocks] + # We only sort the lines with a key. + keys_to_sort = [(i, key) for i, key in enumerate(keys) if key is not None] + sorted_indices = [x[0] for x in sorted(keys_to_sort, key=lambda x: x[1])] + + # We reorder the blocks by leaving empty lines/comments as they were and reorder the rest. + count = 0 + reordered_blocks = [] + for i in range(len(internal_blocks)): + if keys[i] is None: + reordered_blocks.append(internal_blocks[i]) + else: + block = sort_objects_in_import(internal_blocks[sorted_indices[count]]) + reordered_blocks.append(block) + count += 1 + + # And we put our main block back together with its first and last line. + main_blocks[block_idx] = "\n".join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]]) + + if code != "\n".join(main_blocks): + if check_only: + return True + else: + print(f"Overwriting {file}.") + with open(file, "w", encoding="utf-8") as f: + f.write("\n".join(main_blocks)) + + +def sort_imports_in_all_inits(check_only=True): + """ + Sort the imports defined in the `_import_structure` of all inits in the repo. + + Args: + check_only (`bool`, *optional*, defaults to `True`): Whether or not to just check (and not auto-fix) the init. + """ + failures = [] + for root, _, files in os.walk(PATH_TO_TRANSFORMERS): + if "__init__.py" in files: + result = sort_imports(os.path.join(root, "__init__.py"), check_only=check_only) + if result: + failures = [os.path.join(root, "__init__.py")] + if len(failures) > 0: + raise ValueError(f"Would overwrite {len(failures)} files, run `make style`.") + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--check_only", action="store_true", help="Whether to only check or fix style.") + args = parser.parse_args() + + sort_imports_in_all_inits(check_only=args.check_only) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/utils/extract_tests_from_mixin.py b/exp_code/1_benchmark/diffusers-WanS2V/utils/extract_tests_from_mixin.py new file mode 100644 index 0000000000000000000000000000000000000000..c8b65b96ee16071042d93b66071367acad658d9c --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/utils/extract_tests_from_mixin.py @@ -0,0 +1,61 @@ +import argparse +import inspect +import sys +from pathlib import Path +from typing import List, Type + + +root_dir = Path(__file__).parent.parent.absolute() +sys.path.insert(0, str(root_dir)) + +parser = argparse.ArgumentParser() +parser.add_argument("--type", type=str, default=None) +args = parser.parse_args() + + +def get_test_methods_from_class(cls: Type) -> List[str]: + """ + Get all test method names from a given class. + Only returns methods that start with 'test_'. + """ + test_methods = [] + for name, obj in inspect.getmembers(cls): + if name.startswith("test_") and inspect.isfunction(obj): + test_methods.append(name) + return sorted(test_methods) + + +def generate_pytest_pattern(test_methods: List[str]) -> str: + """Generate pytest pattern string for the -k flag.""" + return " or ".join(test_methods) + + +def generate_pattern_for_mixin(mixin_class: Type) -> str: + """ + Generate pytest pattern for a specific mixin class. + """ + if mixin_cls is None: + return "" + test_methods = get_test_methods_from_class(mixin_class) + return generate_pytest_pattern(test_methods) + + +if __name__ == "__main__": + mixin_cls = None + if args.type == "pipeline": + from tests.pipelines.test_pipelines_common import PipelineTesterMixin + + mixin_cls = PipelineTesterMixin + + elif args.type == "models": + from tests.models.test_modeling_common import ModelTesterMixin + + mixin_cls = ModelTesterMixin + + elif args.type == "lora": + from tests.lora.utils import PeftLoraLoaderMixinTests + + mixin_cls = PeftLoraLoaderMixinTests + + pattern = generate_pattern_for_mixin(mixin_cls) + print(pattern) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/utils/fetch_latest_release_branch.py b/exp_code/1_benchmark/diffusers-WanS2V/utils/fetch_latest_release_branch.py new file mode 100644 index 0000000000000000000000000000000000000000..5b0be6253e1b96af3883da916fc06c5522321dcf --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/utils/fetch_latest_release_branch.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# Copyright 2025 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import requests +from packaging.version import parse + + +# GitHub repository details +USER = "huggingface" +REPO = "diffusers" + + +def fetch_all_branches(user, repo): + branches = [] # List to store all branches + page = 1 # Start from first page + while True: + # Make a request to the GitHub API for the branches + response = requests.get( + f"https://api.github.com/repos/{user}/{repo}/branches", + params={"page": page}, + timeout=60, + ) + + # Check if the request was successful + if response.status_code == 200: + # Add the branches from the current page to the list + branches.extend([branch["name"] for branch in response.json()]) + + # Check if there is a 'next' link for pagination + if "next" in response.links: + page += 1 # Move to the next page + else: + break # Exit loop if there is no next page + else: + print("Failed to retrieve branches:", response.status_code) + break + + return branches + + +def main(): + # Fetch all branches + branches = fetch_all_branches(USER, REPO) + + # Filter branches. + # print(f"Total branches: {len(branches)}") + filtered_branches = [] + for branch in branches: + if branch.startswith("v") and ("-release" in branch or "-patch" in branch): + filtered_branches.append(branch) + # print(f"Filtered: {branch}") + + sorted_branches = sorted(filtered_branches, key=lambda x: parse(x.split("-")[0][1:]), reverse=True) + latest_branch = sorted_branches[0] + # print(f"Latest branch: {latest_branch}") + return latest_branch + + +if __name__ == "__main__": + print(main()) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/utils/fetch_torch_cuda_pipeline_test_matrix.py b/exp_code/1_benchmark/diffusers-WanS2V/utils/fetch_torch_cuda_pipeline_test_matrix.py new file mode 100644 index 0000000000000000000000000000000000000000..196f35628ac19922671724b85f80d75fc19fb436 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/utils/fetch_torch_cuda_pipeline_test_matrix.py @@ -0,0 +1,99 @@ +import json +import logging +import os +from collections import defaultdict +from pathlib import Path + +from huggingface_hub import HfApi + +import diffusers + + +PATH_TO_REPO = Path(__file__).parent.parent.resolve() +ALWAYS_TEST_PIPELINE_MODULES = [ + "controlnet", + "controlnet_flux", + "controlnet_sd3", + "stable_diffusion", + "stable_diffusion_2", + "stable_diffusion_3", + "stable_diffusion_xl", + "ip_adapters", + "flux", +] +PIPELINE_USAGE_CUTOFF = int(os.getenv("PIPELINE_USAGE_CUTOFF", 50000)) + +logger = logging.getLogger(__name__) +api = HfApi() + + +def filter_pipelines(usage_dict, usage_cutoff=10000): + output = [] + for diffusers_object, usage in usage_dict.items(): + if usage < usage_cutoff: + continue + + is_diffusers_pipeline = hasattr(diffusers.pipelines, diffusers_object) + if not is_diffusers_pipeline: + continue + + output.append(diffusers_object) + + return output + + +def fetch_pipeline_objects(): + models = api.list_models(library="diffusers") + downloads = defaultdict(int) + + for model in models: + is_counted = False + for tag in model.tags: + if tag.startswith("diffusers:"): + is_counted = True + downloads[tag[len("diffusers:") :]] += model.downloads + + if not is_counted: + downloads["other"] += model.downloads + + # Remove 0 downloads + downloads = {k: v for k, v in downloads.items() if v > 0} + pipeline_objects = filter_pipelines(downloads, PIPELINE_USAGE_CUTOFF) + + return pipeline_objects + + +def fetch_pipeline_modules_to_test(): + try: + pipeline_objects = fetch_pipeline_objects() + except Exception as e: + logger.error(e) + raise RuntimeError("Unable to fetch model list from HuggingFace Hub.") + + test_modules = [] + for pipeline_name in pipeline_objects: + module = getattr(diffusers, pipeline_name) + + test_module = module.__module__.split(".")[-2].strip() + test_modules.append(test_module) + + return test_modules + + +def main(): + test_modules = fetch_pipeline_modules_to_test() + test_modules.extend(ALWAYS_TEST_PIPELINE_MODULES) + + # Get unique modules + test_modules = sorted(set(test_modules)) + print(json.dumps(test_modules)) + + save_path = f"{PATH_TO_REPO}/reports" + os.makedirs(save_path, exist_ok=True) + + with open(f"{save_path}/test-pipelines.json", "w") as f: + json.dump({"pipeline_test_modules": test_modules}, f) + + +if __name__ == "__main__": + main() diff --git a/exp_code/1_benchmark/diffusers-WanS2V/utils/get_modified_files.py b/exp_code/1_benchmark/diffusers-WanS2V/utils/get_modified_files.py new file mode 100644 index 0000000000000000000000000000000000000000..e392e50c12d3af270df748987be33ee6c3937901 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/utils/get_modified_files.py @@ -0,0 +1,34 @@ +# coding=utf-8 +# Copyright 2025 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.: +# python ./utils/get_modified_files.py utils src tests examples +# +# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered +# since the output of this script is fed into Makefile commands it doesn't print a newline after the results + +import re +import subprocess +import sys + + +fork_point_sha = subprocess.check_output("git merge-base main HEAD".split()).decode("utf-8") +modified_files = subprocess.check_output(f"git diff --name-only {fork_point_sha}".split()).decode("utf-8").split() + +joined_dirs = "|".join(sys.argv[1:]) +regex = re.compile(rf"^({joined_dirs}).*?\.py$") + +relevant_modified_files = [x for x in modified_files if regex.match(x)] +print(" ".join(relevant_modified_files), end="") diff --git a/exp_code/1_benchmark/diffusers-WanS2V/utils/log_reports.py b/exp_code/1_benchmark/diffusers-WanS2V/utils/log_reports.py new file mode 100644 index 0000000000000000000000000000000000000000..5575c9ba8415a73358fe48e3d0ac41b2188691d5 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/utils/log_reports.py @@ -0,0 +1,139 @@ +import argparse +import json +import os +from datetime import date +from pathlib import Path + +from slack_sdk import WebClient +from tabulate import tabulate + + +MAX_LEN_MESSAGE = 2900 # slack endpoint has a limit of 3001 characters + +parser = argparse.ArgumentParser() +parser.add_argument("--slack_channel_name", default="diffusers-ci-nightly") + + +def main(slack_channel_name=None): + failed = [] + passed = [] + + group_info = [] + + total_num_failed = 0 + empty_file = False or len(list(Path().glob("*.log"))) == 0 + + total_empty_files = [] + + for log in Path().glob("*.log"): + section_num_failed = 0 + i = 0 + with open(log) as f: + for line in f: + line = json.loads(line) + i += 1 + if line.get("nodeid", "") != "": + test = line["nodeid"] + if line.get("duration", None) is not None: + duration = f"{line['duration']:.4f}" + if line.get("outcome", "") == "failed": + section_num_failed += 1 + failed.append([test, duration, log.name.split("_")[0]]) + total_num_failed += 1 + else: + passed.append([test, duration, log.name.split("_")[0]]) + empty_file = i == 0 + group_info.append([str(log), section_num_failed, failed]) + total_empty_files.append(empty_file) + os.remove(log) + failed = [] + text = ( + "🌞 There were no failures!" + if not any(total_empty_files) + else "Something went wrong there is at least one empty file - please check GH action results." + ) + no_error_payload = { + "type": "section", + "text": { + "type": "plain_text", + "text": text, + "emoji": True, + }, + } + + message = "" + payload = [ + { + "type": "header", + "text": { + "type": "plain_text", + "text": "🤗 Results of the Diffusers scheduled nightly tests.", + }, + }, + ] + if total_num_failed > 0: + for i, (name, num_failed, failed_tests) in enumerate(group_info): + if num_failed > 0: + if num_failed == 1: + message += f"*{name}: {num_failed} failed test*\n" + else: + message += f"*{name}: {num_failed} failed tests*\n" + failed_table = [] + for test in failed_tests: + failed_table.append(test[0].split("::")) + failed_table = tabulate( + failed_table, + headers=["Test Location", "Test Case", "Test Name"], + showindex="always", + tablefmt="grid", + maxcolwidths=[12, 12, 12], + ) + message += "\n```\n" + failed_table + "\n```" + + if total_empty_files[i]: + message += f"\n*{name}: Warning! Empty file - please check the GitHub action job *\n" + print(f"### {message}") + else: + payload.append(no_error_payload) + + if len(message) > MAX_LEN_MESSAGE: + print(f"Truncating long message from {len(message)} to {MAX_LEN_MESSAGE}") + message = message[:MAX_LEN_MESSAGE] + "..." + + if len(message) != 0: + md_report = { + "type": "section", + "text": {"type": "mrkdwn", "text": message}, + } + payload.append(md_report) + action_button = { + "type": "section", + "text": {"type": "mrkdwn", "text": "*For more details:*"}, + "accessory": { + "type": "button", + "text": {"type": "plain_text", "text": "Check Action results", "emoji": True}, + "url": f"https://github.com/huggingface/diffusers/actions/runs/{os.environ['GITHUB_RUN_ID']}", + }, + } + payload.append(action_button) + + date_report = { + "type": "context", + "elements": [ + { + "type": "plain_text", + "text": f"Nightly test results for {date.today()}", + }, + ], + } + payload.append(date_report) + + print(payload) + + client = WebClient(token=os.environ.get("SLACK_API_TOKEN")) + client.chat_postMessage(channel=f"#{slack_channel_name}", text=message, blocks=payload) + + +if __name__ == "__main__": + args = parser.parse_args() + main(args.slack_channel_name) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/utils/notify_benchmarking_status.py b/exp_code/1_benchmark/diffusers-WanS2V/utils/notify_benchmarking_status.py new file mode 100644 index 0000000000000000000000000000000000000000..8a426a15b5edfaa0aeb252d19def51a63c9f189c --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/utils/notify_benchmarking_status.py @@ -0,0 +1,56 @@ +# coding=utf-8 +# Copyright 2025 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import os + +import requests + + +# Configuration +GITHUB_REPO = "huggingface/diffusers" +GITHUB_RUN_ID = os.getenv("GITHUB_RUN_ID") +SLACK_WEBHOOK_URL = os.getenv("SLACK_WEBHOOK_URL") + + +def main(args): + action_url = f"https://github.com/{GITHUB_REPO}/actions/runs/{GITHUB_RUN_ID}" + if args.status == "success": + hub_path = "https://huggingface.co/datasets/diffusers/benchmarks/blob/main/collated_results.csv" + message = ( + "✅ New benchmark workflow successfully run.\n" + f"🕸️ GitHub Action URL: {action_url}.\n" + f"🤗 Check out the benchmarks here: {hub_path}." + ) + else: + message = ( + "❌ Something wrong happened in the benchmarking workflow.\n" + f"Check out the GitHub Action to know more: {action_url}." + ) + + payload = {"text": message} + response = requests.post(SLACK_WEBHOOK_URL, json=payload) + + if response.status_code == 200: + print("Notification sent to Slack successfully.") + else: + print("Failed to send notification to Slack.") + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--status", type=str, default="success", choices=["success", "failure"]) + args = parser.parse_args() + main(args) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/utils/notify_community_pipelines_mirror.py b/exp_code/1_benchmark/diffusers-WanS2V/utils/notify_community_pipelines_mirror.py new file mode 100644 index 0000000000000000000000000000000000000000..2981f008501f1d437e2df165263c76ddb3939a86 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/utils/notify_community_pipelines_mirror.py @@ -0,0 +1,54 @@ +# coding=utf-8 +# Copyright 2025 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import os + +import requests + + +# Configuration +GITHUB_REPO = "huggingface/diffusers" +GITHUB_RUN_ID = os.getenv("GITHUB_RUN_ID") +SLACK_WEBHOOK_URL = os.getenv("SLACK_WEBHOOK_URL") +PATH_IN_REPO = os.getenv("PATH_IN_REPO") + + +def main(args): + action_url = f"https://github.com/{GITHUB_REPO}/actions/runs/{GITHUB_RUN_ID}" + if args.status == "success": + hub_path = f"https://huggingface.co/datasets/diffusers/community-pipelines-mirror/tree/main/{PATH_IN_REPO}" + message = ( + "✅ Community pipelines successfully mirrored.\n" + f"🕸️ GitHub Action URL: {action_url}.\n" + f"🤗 Hub location: {hub_path}." + ) + else: + message = f"❌ Something wrong happened. Check out the GitHub Action to know more: {action_url}." + + payload = {"text": message} + response = requests.post(SLACK_WEBHOOK_URL, json=payload) + + if response.status_code == 200: + print("Notification sent to Slack successfully.") + else: + print("Failed to send notification to Slack.") + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--status", type=str, default="success", choices=["success", "failure"]) + args = parser.parse_args() + main(args) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/utils/notify_slack_about_release.py b/exp_code/1_benchmark/diffusers-WanS2V/utils/notify_slack_about_release.py new file mode 100644 index 0000000000000000000000000000000000000000..a68182f8174cfe286d27b7c5c64c4c1258290703 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/utils/notify_slack_about_release.py @@ -0,0 +1,81 @@ +# coding=utf-8 +# Copyright 2025 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +import requests + + +# Configuration +LIBRARY_NAME = "diffusers" +GITHUB_REPO = "huggingface/diffusers" +SLACK_WEBHOOK_URL = os.getenv("SLACK_WEBHOOK_URL") + + +def check_pypi_for_latest_release(library_name): + """Check PyPI for the latest release of the library.""" + response = requests.get(f"https://pypi.org/pypi/{library_name}/json", timeout=60) + if response.status_code == 200: + data = response.json() + return data["info"]["version"] + else: + print("Failed to fetch library details from PyPI.") + return None + + +def get_github_release_info(github_repo): + """Fetch the latest release info from GitHub.""" + url = f"https://api.github.com/repos/{github_repo}/releases/latest" + response = requests.get(url, timeout=60) + + if response.status_code == 200: + data = response.json() + return {"tag_name": data["tag_name"], "url": data["html_url"], "release_time": data["published_at"]} + + else: + print("Failed to fetch release info from GitHub.") + return None + + +def notify_slack(webhook_url, library_name, version, release_info): + """Send a notification to a Slack channel.""" + message = ( + f"🚀 New release for {library_name} available: version **{version}** 🎉\n" + f"📜 Release Notes: {release_info['url']}\n" + f"⏱️ Release time: {release_info['release_time']}" + ) + payload = {"text": message} + response = requests.post(webhook_url, json=payload) + + if response.status_code == 200: + print("Notification sent to Slack successfully.") + else: + print("Failed to send notification to Slack.") + + +def main(): + latest_version = check_pypi_for_latest_release(LIBRARY_NAME) + release_info = get_github_release_info(GITHUB_REPO) + parsed_version = release_info["tag_name"].replace("v", "") + + if latest_version and release_info and latest_version == parsed_version: + notify_slack(SLACK_WEBHOOK_URL, LIBRARY_NAME, latest_version, release_info) + else: + print(f"{latest_version=}, {release_info=}, {parsed_version=}") + raise ValueError("There were some problems.") + + +if __name__ == "__main__": + main() diff --git a/exp_code/1_benchmark/diffusers-WanS2V/utils/overwrite_expected_slice.py b/exp_code/1_benchmark/diffusers-WanS2V/utils/overwrite_expected_slice.py new file mode 100644 index 0000000000000000000000000000000000000000..723c1c98fc21321d9fa86b0d7c2024576eaff3cc --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/utils/overwrite_expected_slice.py @@ -0,0 +1,90 @@ +# coding=utf-8 +# Copyright 2025 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import argparse +from collections import defaultdict + + +def overwrite_file(file, class_name, test_name, correct_line, done_test): + _id = f"{file}_{class_name}_{test_name}" + done_test[_id] += 1 + + with open(file, "r") as f: + lines = f.readlines() + + class_regex = f"class {class_name}(" + test_regex = f"{4 * ' '}def {test_name}(" + line_begin_regex = f"{8 * ' '}{correct_line.split()[0]}" + another_line_begin_regex = f"{16 * ' '}{correct_line.split()[0]}" + in_class = False + in_func = False + in_line = False + insert_line = False + count = 0 + spaces = 0 + + new_lines = [] + for line in lines: + if line.startswith(class_regex): + in_class = True + elif in_class and line.startswith(test_regex): + in_func = True + elif in_class and in_func and (line.startswith(line_begin_regex) or line.startswith(another_line_begin_regex)): + spaces = len(line.split(correct_line.split()[0])[0]) + count += 1 + + if count == done_test[_id]: + in_line = True + + if in_class and in_func and in_line: + if ")" not in line: + continue + else: + insert_line = True + + if in_class and in_func and in_line and insert_line: + new_lines.append(f"{spaces * ' '}{correct_line}") + in_class = in_func = in_line = insert_line = False + else: + new_lines.append(line) + + with open(file, "w") as f: + for line in new_lines: + f.write(line) + + +def main(correct, fail=None): + if fail is not None: + with open(fail, "r") as f: + test_failures = {l.strip() for l in f.readlines()} + else: + test_failures = None + + with open(correct, "r") as f: + correct_lines = f.readlines() + + done_tests = defaultdict(int) + for line in correct_lines: + file, class_name, test_name, correct_line = line.split("::") + if test_failures is None or "::".join([file, class_name, test_name]) in test_failures: + overwrite_file(file, class_name, test_name, correct_line, done_tests) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--correct_filename", help="filename of tests with expected result") + parser.add_argument("--fail_filename", help="filename of test failures", type=str, default=None) + args = parser.parse_args() + + main(args.correct_filename, args.fail_filename) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/utils/print_env.py b/exp_code/1_benchmark/diffusers-WanS2V/utils/print_env.py new file mode 100644 index 0000000000000000000000000000000000000000..2fe0777daf7dd266f59ef2c1aaa321d502dd13fe --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/utils/print_env.py @@ -0,0 +1,73 @@ +#!/usr/bin/env python3 + +# coding=utf-8 +# Copyright 2025 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# this script dumps information about the environment + +import os +import platform +import sys + + +os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3" + +print("Python version:", sys.version) + +print("OS platform:", platform.platform()) +print("OS architecture:", platform.machine()) +try: + import psutil + + vm = psutil.virtual_memory() + total_gb = vm.total / (1024**3) + available_gb = vm.available / (1024**3) + print(f"Total RAM: {total_gb:.2f} GB") + print(f"Available RAM: {available_gb:.2f} GB") +except ImportError: + pass + +try: + import torch + + print("Torch version:", torch.__version__) + print("Cuda available:", torch.cuda.is_available()) + if torch.cuda.is_available(): + print("Cuda version:", torch.version.cuda) + print("CuDNN version:", torch.backends.cudnn.version()) + print("Number of GPUs available:", torch.cuda.device_count()) + device_properties = torch.cuda.get_device_properties(0) + total_memory = device_properties.total_memory / (1024**3) + print(f"CUDA memory: {total_memory} GB") + + print("XPU available:", hasattr(torch, "xpu") and torch.xpu.is_available()) + if hasattr(torch, "xpu") and torch.xpu.is_available(): + print("XPU model:", torch.xpu.get_device_properties(0).name) + print("XPU compiler version:", torch.version.xpu) + print("Number of XPUs available:", torch.xpu.device_count()) + device_properties = torch.xpu.get_device_properties(0) + total_memory = device_properties.total_memory / (1024**3) + print(f"XPU memory: {total_memory} GB") + + +except ImportError: + print("Torch version:", None) + +try: + import transformers + + print("transformers version:", transformers.__version__) +except ImportError: + print("transformers version:", None) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/utils/release.py b/exp_code/1_benchmark/diffusers-WanS2V/utils/release.py new file mode 100644 index 0000000000000000000000000000000000000000..a0800b99fbebd9d2956b64fd92d93490e36f9a41 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/utils/release.py @@ -0,0 +1,162 @@ +# coding=utf-8 +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import os +import re + +import packaging.version + + +PATH_TO_EXAMPLES = "examples/" +REPLACE_PATTERNS = { + "examples": (re.compile(r'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'), + "init": (re.compile(r'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'), + "setup": (re.compile(r'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), r'\1version="VERSION",'), + "doc": (re.compile(r'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'), +} +REPLACE_FILES = { + "init": "src/diffusers/__init__.py", + "setup": "setup.py", +} +README_FILE = "README.md" + + +def update_version_in_file(fname, version, pattern): + """Update the version in one file using a specific pattern.""" + with open(fname, "r", encoding="utf-8", newline="\n") as f: + code = f.read() + re_pattern, replace = REPLACE_PATTERNS[pattern] + replace = replace.replace("VERSION", version) + code = re_pattern.sub(replace, code) + with open(fname, "w", encoding="utf-8", newline="\n") as f: + f.write(code) + + +def update_version_in_examples(version): + """Update the version in all examples files.""" + for folder, directories, fnames in os.walk(PATH_TO_EXAMPLES): + # Removing some of the folders with non-actively maintained examples from the walk + if "research_projects" in directories: + directories.remove("research_projects") + if "legacy" in directories: + directories.remove("legacy") + for fname in fnames: + if fname.endswith(".py"): + update_version_in_file(os.path.join(folder, fname), version, pattern="examples") + + +def global_version_update(version, patch=False): + """Update the version in all needed files.""" + for pattern, fname in REPLACE_FILES.items(): + update_version_in_file(fname, version, pattern) + if not patch: + update_version_in_examples(version) + + +def clean_main_ref_in_model_list(): + """Replace the links from main doc tp stable doc in the model list of the README.""" + # If the introduction or the conclusion of the list change, the prompts may need to be updated. + _start_prompt = "🤗 Transformers currently provides the following architectures" + _end_prompt = "1. Want to contribute a new model?" + with open(README_FILE, "r", encoding="utf-8", newline="\n") as f: + lines = f.readlines() + + # Find the start of the list. + start_index = 0 + while not lines[start_index].startswith(_start_prompt): + start_index += 1 + start_index += 1 + + index = start_index + # Update the lines in the model list. + while not lines[index].startswith(_end_prompt): + if lines[index].startswith("1."): + lines[index] = lines[index].replace( + "https://huggingface.co/docs/diffusers/main/model_doc", + "https://huggingface.co/docs/diffusers/model_doc", + ) + index += 1 + + with open(README_FILE, "w", encoding="utf-8", newline="\n") as f: + f.writelines(lines) + + +def get_version(): + """Reads the current version in the __init__.""" + with open(REPLACE_FILES["init"], "r") as f: + code = f.read() + default_version = REPLACE_PATTERNS["init"][0].search(code).groups()[0] + return packaging.version.parse(default_version) + + +def pre_release_work(patch=False): + """Do all the necessary pre-release steps.""" + # First let's get the default version: base version if we are in dev, bump minor otherwise. + default_version = get_version() + if patch and default_version.is_devrelease: + raise ValueError("Can't create a patch version from the dev branch, checkout a released version!") + if default_version.is_devrelease: + default_version = default_version.base_version + elif patch: + default_version = f"{default_version.major}.{default_version.minor}.{default_version.micro + 1}" + else: + default_version = f"{default_version.major}.{default_version.minor + 1}.0" + + # Now let's ask nicely if that's the right one. + version = input(f"Which version are you releasing? [{default_version}]") + if len(version) == 0: + version = default_version + + print(f"Updating version to {version}.") + global_version_update(version, patch=patch) + + +# if not patch: +# print("Cleaning main README, don't forget to run `make fix-copies`.") +# clean_main_ref_in_model_list() + + +def post_release_work(): + """Do all the necessary post-release steps.""" + # First let's get the current version + current_version = get_version() + dev_version = f"{current_version.major}.{current_version.minor + 1}.0.dev0" + current_version = current_version.base_version + + # Check with the user we got that right. + version = input(f"Which version are we developing now? [{dev_version}]") + if len(version) == 0: + version = dev_version + + print(f"Updating version to {version}.") + global_version_update(version) + + +# print("Cleaning main README, don't forget to run `make fix-copies`.") +# clean_main_ref_in_model_list() + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.") + parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.") + args = parser.parse_args() + if not args.post_release: + pre_release_work(patch=args.patch) + elif args.patch: + print("Nothing to do after a patch :-)") + else: + post_release_work() diff --git a/exp_code/1_benchmark/diffusers-WanS2V/utils/stale.py b/exp_code/1_benchmark/diffusers-WanS2V/utils/stale.py new file mode 100644 index 0000000000000000000000000000000000000000..b92fb83ceb4cd531f86edc590f4e22c674787503 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/utils/stale.py @@ -0,0 +1,69 @@ +# Copyright 2025 The HuggingFace Team, the AllenNLP library authors. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Script to close stale issue. Taken in part from the AllenNLP repository. +https://github.com/allenai/allennlp. +""" + +import os +from datetime import datetime as dt +from datetime import timezone + +from github import Github + + +LABELS_TO_EXEMPT = [ + "close-to-merge", + "good first issue", + "good second issue", + "good difficult issue", + "enhancement", + "new pipeline/model", + "new scheduler", + "wip", +] + + +def main(): + g = Github(os.environ["GITHUB_TOKEN"]) + repo = g.get_repo("huggingface/diffusers") + open_issues = repo.get_issues(state="open") + + for issue in open_issues: + labels = [label.name.lower() for label in issue.get_labels()] + if "stale" in labels: + comments = sorted(issue.get_comments(), key=lambda i: i.created_at, reverse=True) + last_comment = comments[0] if len(comments) > 0 else None + if last_comment is not None and last_comment.user.login != "github-actions[bot]": + # Opens the issue if someone other than Stalebot commented. + issue.edit(state="open") + issue.remove_from_labels("stale") + elif ( + (dt.now(timezone.utc) - issue.updated_at).days > 23 + and (dt.now(timezone.utc) - issue.created_at).days >= 30 + and not any(label in LABELS_TO_EXEMPT for label in labels) + ): + # Post a Stalebot notification after 23 days of inactivity. + issue.create_comment( + "This issue has been automatically marked as stale because it has not had " + "recent activity. If you think this still needs to be addressed " + "please comment on this thread.\n\nPlease note that issues that do not follow the " + "[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) " + "are likely to be ignored." + ) + issue.add_to_labels("stale") + + +if __name__ == "__main__": + main() diff --git a/exp_code/1_benchmark/diffusers-WanS2V/utils/tests_fetcher.py b/exp_code/1_benchmark/diffusers-WanS2V/utils/tests_fetcher.py new file mode 100644 index 0000000000000000000000000000000000000000..abdc9fd409dbe85e3cf2f72cc32de617b2434ade --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/utils/tests_fetcher.py @@ -0,0 +1,1128 @@ +# coding=utf-8 +# Copyright 2021 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Welcome to tests_fetcher V2. + +This util is designed to fetch tests to run on a PR so that only the tests impacted by the modifications are run, and +when too many models are being impacted, only run the tests of a subset of core models. It works like this. + +Stage 1: Identify the modified files. For jobs that run on the main branch, it's just the diff with the last commit. +On a PR, this takes all the files from the branching point to the current commit (so all modifications in a PR, not +just the last commit) but excludes modifications that are on docstrings or comments only. + +Stage 2: Extract the tests to run. This is done by looking at the imports in each module and test file: if module A +imports module B, then changing module B impacts module A, so the tests using module A should be run. We thus get the +dependencies of each model and then recursively builds the 'reverse' map of dependencies to get all modules and tests +impacted by a given file. We then only keep the tests (and only the core models tests if there are too many modules). + +Caveats: + - This module only filters tests by files (not individual tests) so it's better to have tests for different things + in different files. + - This module assumes inits are just importing things, not really building objects, so it's better to structure + them this way and move objects building in separate submodules. + +Usage: + +Base use to fetch the tests in a pull request + +```bash +python utils/tests_fetcher.py +``` + +Base use to fetch the tests on a the main branch (with diff from the last commit): + +```bash +python utils/tests_fetcher.py --diff_with_last_commit +``` +""" + +import argparse +import collections +import json +import os +import re +from contextlib import contextmanager +from pathlib import Path +from typing import Dict, List, Optional, Tuple, Union + +from git import Repo + + +PATH_TO_REPO = Path(__file__).parent.parent.resolve() +PATH_TO_EXAMPLES = PATH_TO_REPO / "examples" +PATH_TO_DIFFUSERS = PATH_TO_REPO / "src/diffusers" +PATH_TO_TESTS = PATH_TO_REPO / "tests" + +# Ignore fixtures in tests folder +# Ignore lora since they are always tested +MODULES_TO_IGNORE = ["fixtures", "lora"] + +IMPORTANT_PIPELINES = [ + "controlnet", + "stable_diffusion", + "stable_diffusion_2", + "stable_diffusion_xl", + "stable_video_diffusion", + "deepfloyd_if", + "kandinsky", + "kandinsky2_2", + "text_to_video_synthesis", + "wuerstchen", +] + + +@contextmanager +def checkout_commit(repo: Repo, commit_id: str): + """ + Context manager that checks out a given commit when entered, but gets back to the reference it was at on exit. + + Args: + repo (`git.Repo`): A git repository (for instance the Transformers repo). + commit_id (`str`): The commit reference to checkout inside the context manager. + """ + current_head = repo.head.commit if repo.head.is_detached else repo.head.ref + + try: + repo.git.checkout(commit_id) + yield + + finally: + repo.git.checkout(current_head) + + +def clean_code(content: str) -> str: + """ + Remove docstrings, empty line or comments from some code (used to detect if a diff is real or only concern + comments or docstrings). + + Args: + content (`str`): The code to clean + + Returns: + `str`: The cleaned code. + """ + # We need to deactivate autoformatting here to write escaped triple quotes (we cannot use real triple quotes or + # this would mess up the result if this function applied to this particular file). + # fmt: off + # Remove docstrings by splitting on triple " then triple ': + splits = content.split('\"\"\"') + content = "".join(splits[::2]) + splits = content.split("\'\'\'") + # fmt: on + content = "".join(splits[::2]) + + # Remove empty lines and comments + lines_to_keep = [] + for line in content.split("\n"): + # remove anything that is after a # sign. + line = re.sub("#.*$", "", line) + # remove white lines + if len(line) != 0 and not line.isspace(): + lines_to_keep.append(line) + return "\n".join(lines_to_keep) + + +def keep_doc_examples_only(content: str) -> str: + """ + Remove everything from the code content except the doc examples (used to determined if a diff should trigger doc + tests or not). + + Args: + content (`str`): The code to clean + + Returns: + `str`: The cleaned code. + """ + # Keep doc examples only by splitting on triple "`" + splits = content.split("```") + # Add leading and trailing "```" so the navigation is easier when compared to the original input `content` + content = "```" + "```".join(splits[1::2]) + "```" + + # Remove empty lines and comments + lines_to_keep = [] + for line in content.split("\n"): + # remove anything that is after a # sign. + line = re.sub("#.*$", "", line) + # remove white lines + if len(line) != 0 and not line.isspace(): + lines_to_keep.append(line) + return "\n".join(lines_to_keep) + + +def get_all_tests() -> List[str]: + """ + Walks the `tests` folder to return a list of files/subfolders. This is used to split the tests to run when using + parallelism. The split is: + + - folders under `tests`: (`tokenization`, `pipelines`, etc) except the subfolder `models` is excluded. + - folders under `tests/models`: `bert`, `gpt2`, etc. + - test files under `tests`: `test_modeling_common.py`, `test_tokenization_common.py`, etc. + """ + + # test folders/files directly under `tests` folder + tests = os.listdir(PATH_TO_TESTS) + tests = [f"tests/{f}" for f in tests if "__pycache__" not in f] + tests = sorted([f for f in tests if (PATH_TO_REPO / f).is_dir() or f.startswith("tests/test_")]) + + return tests + + +def diff_is_docstring_only(repo: Repo, branching_point: str, filename: str) -> bool: + """ + Check if the diff is only in docstrings (or comments and whitespace) in a filename. + + Args: + repo (`git.Repo`): A git repository (for instance the Transformers repo). + branching_point (`str`): The commit reference of where to compare for the diff. + filename (`str`): The filename where we want to know if the diff isonly in docstrings/comments. + + Returns: + `bool`: Whether the diff is docstring/comments only or not. + """ + folder = Path(repo.working_dir) + with checkout_commit(repo, branching_point): + with open(folder / filename, "r", encoding="utf-8") as f: + old_content = f.read() + + with open(folder / filename, "r", encoding="utf-8") as f: + new_content = f.read() + + old_content_clean = clean_code(old_content) + new_content_clean = clean_code(new_content) + + return old_content_clean == new_content_clean + + +def diff_contains_doc_examples(repo: Repo, branching_point: str, filename: str) -> bool: + """ + Check if the diff is only in code examples of the doc in a filename. + + Args: + repo (`git.Repo`): A git repository (for instance the Transformers repo). + branching_point (`str`): The commit reference of where to compare for the diff. + filename (`str`): The filename where we want to know if the diff is only in codes examples. + + Returns: + `bool`: Whether the diff is only in code examples of the doc or not. + """ + folder = Path(repo.working_dir) + with checkout_commit(repo, branching_point): + with open(folder / filename, "r", encoding="utf-8") as f: + old_content = f.read() + + with open(folder / filename, "r", encoding="utf-8") as f: + new_content = f.read() + + old_content_clean = keep_doc_examples_only(old_content) + new_content_clean = keep_doc_examples_only(new_content) + + return old_content_clean != new_content_clean + + +def get_diff(repo: Repo, base_commit: str, commits: List[str]) -> List[str]: + """ + Get the diff between a base commit and one or several commits. + + Args: + repo (`git.Repo`): + A git repository (for instance the Transformers repo). + base_commit (`str`): + The commit reference of where to compare for the diff. This is the current commit, not the branching point! + commits (`List[str]`): + The list of commits with which to compare the repo at `base_commit` (so the branching point). + + Returns: + `List[str]`: The list of Python files with a diff (files added, renamed or deleted are always returned, files + modified are returned if the diff in the file is not only in docstrings or comments, see + `diff_is_docstring_only`). + """ + print("\n### DIFF ###\n") + code_diff = [] + for commit in commits: + for diff_obj in commit.diff(base_commit): + # We always add new python files + if diff_obj.change_type == "A" and diff_obj.b_path.endswith(".py"): + code_diff.append(diff_obj.b_path) + # We check that deleted python files won't break corresponding tests. + elif diff_obj.change_type == "D" and diff_obj.a_path.endswith(".py"): + code_diff.append(diff_obj.a_path) + # Now for modified files + elif diff_obj.change_type in ["M", "R"] and diff_obj.b_path.endswith(".py"): + # In case of renames, we'll look at the tests using both the old and new name. + if diff_obj.a_path != diff_obj.b_path: + code_diff.extend([diff_obj.a_path, diff_obj.b_path]) + else: + # Otherwise, we check modifications are in code and not docstrings. + if diff_is_docstring_only(repo, commit, diff_obj.b_path): + print(f"Ignoring diff in {diff_obj.b_path} as it only concerns docstrings or comments.") + else: + code_diff.append(diff_obj.a_path) + + return code_diff + + +def get_modified_python_files(diff_with_last_commit: bool = False) -> List[str]: + """ + Return a list of python files that have been modified between: + + - the current head and the main branch if `diff_with_last_commit=False` (default) + - the current head and its parent commit otherwise. + + Returns: + `List[str]`: The list of Python files with a diff (files added, renamed or deleted are always returned, files + modified are returned if the diff in the file is not only in docstrings or comments, see + `diff_is_docstring_only`). + """ + repo = Repo(PATH_TO_REPO) + + if not diff_with_last_commit: + # Need to fetch refs for main using remotes when running with github actions. + upstream_main = repo.remotes.origin.refs.main + + print(f"main is at {upstream_main.commit}") + print(f"Current head is at {repo.head.commit}") + + branching_commits = repo.merge_base(upstream_main, repo.head) + for commit in branching_commits: + print(f"Branching commit: {commit}") + return get_diff(repo, repo.head.commit, branching_commits) + else: + print(f"main is at {repo.head.commit}") + parent_commits = repo.head.commit.parents + for commit in parent_commits: + print(f"Parent commit: {commit}") + return get_diff(repo, repo.head.commit, parent_commits) + + +def get_diff_for_doctesting(repo: Repo, base_commit: str, commits: List[str]) -> List[str]: + """ + Get the diff in doc examples between a base commit and one or several commits. + + Args: + repo (`git.Repo`): + A git repository (for instance the Transformers repo). + base_commit (`str`): + The commit reference of where to compare for the diff. This is the current commit, not the branching point! + commits (`List[str]`): + The list of commits with which to compare the repo at `base_commit` (so the branching point). + + Returns: + `List[str]`: The list of Python and Markdown files with a diff (files added or renamed are always returned, files + modified are returned if the diff in the file is only in doctest examples). + """ + print("\n### DIFF ###\n") + code_diff = [] + for commit in commits: + for diff_obj in commit.diff(base_commit): + # We only consider Python files and doc files. + if not diff_obj.b_path.endswith(".py") and not diff_obj.b_path.endswith(".md"): + continue + # We always add new python/md files + if diff_obj.change_type in ["A"]: + code_diff.append(diff_obj.b_path) + # Now for modified files + elif diff_obj.change_type in ["M", "R"]: + # In case of renames, we'll look at the tests using both the old and new name. + if diff_obj.a_path != diff_obj.b_path: + code_diff.extend([diff_obj.a_path, diff_obj.b_path]) + else: + # Otherwise, we check modifications contain some doc example(s). + if diff_contains_doc_examples(repo, commit, diff_obj.b_path): + code_diff.append(diff_obj.a_path) + else: + print(f"Ignoring diff in {diff_obj.b_path} as it doesn't contain any doc example.") + + return code_diff + + +def get_all_doctest_files() -> List[str]: + """ + Return the complete list of python and Markdown files on which we run doctest. + + At this moment, we restrict this to only take files from `src/` or `docs/source/en/` that are not in `utils/not_doctested.txt`. + + Returns: + `List[str]`: The complete list of Python and Markdown files on which we run doctest. + """ + py_files = [str(x.relative_to(PATH_TO_REPO)) for x in PATH_TO_REPO.glob("**/*.py")] + md_files = [str(x.relative_to(PATH_TO_REPO)) for x in PATH_TO_REPO.glob("**/*.md")] + test_files_to_run = py_files + md_files + + # only include files in `src` or `docs/source/en/` + test_files_to_run = [x for x in test_files_to_run if x.startswith(("src/", "docs/source/en/"))] + # not include init files + test_files_to_run = [x for x in test_files_to_run if not x.endswith(("__init__.py",))] + + # These are files not doctested yet. + with open("utils/not_doctested.txt") as fp: + not_doctested = {x.split(" ")[0] for x in fp.read().strip().split("\n")} + + # So far we don't have 100% coverage for doctest. This line will be removed once we achieve 100%. + test_files_to_run = [x for x in test_files_to_run if x not in not_doctested] + + return sorted(test_files_to_run) + + +def get_new_doctest_files(repo, base_commit, branching_commit) -> List[str]: + """ + Get the list of files that were removed from "utils/not_doctested.txt", between `base_commit` and + `branching_commit`. + + Returns: + `List[str]`: List of files that were removed from "utils/not_doctested.txt". + """ + for diff_obj in branching_commit.diff(base_commit): + # Ignores all but the "utils/not_doctested.txt" file. + if diff_obj.a_path != "utils/not_doctested.txt": + continue + # Loads the two versions + folder = Path(repo.working_dir) + with checkout_commit(repo, branching_commit): + with open(folder / "utils/not_doctested.txt", "r", encoding="utf-8") as f: + old_content = f.read() + with open(folder / "utils/not_doctested.txt", "r", encoding="utf-8") as f: + new_content = f.read() + # Compute the removed lines and return them + removed_content = {x.split(" ")[0] for x in old_content.split("\n")} - { + x.split(" ")[0] for x in new_content.split("\n") + } + return sorted(removed_content) + return [] + + +def get_doctest_files(diff_with_last_commit: bool = False) -> List[str]: + """ + Return a list of python and Markdown files where doc example have been modified between: + + - the current head and the main branch if `diff_with_last_commit=False` (default) + - the current head and its parent commit otherwise. + + Returns: + `List[str]`: The list of Python and Markdown files with a diff (files added or renamed are always returned, files + modified are returned if the diff in the file is only in doctest examples). + """ + repo = Repo(PATH_TO_REPO) + + test_files_to_run = [] # noqa + if not diff_with_last_commit: + upstream_main = repo.remotes.origin.refs.main + print(f"main is at {upstream_main.commit}") + print(f"Current head is at {repo.head.commit}") + + branching_commits = repo.merge_base(upstream_main, repo.head) + for commit in branching_commits: + print(f"Branching commit: {commit}") + test_files_to_run = get_diff_for_doctesting(repo, repo.head.commit, branching_commits) + else: + print(f"main is at {repo.head.commit}") + parent_commits = repo.head.commit.parents + for commit in parent_commits: + print(f"Parent commit: {commit}") + test_files_to_run = get_diff_for_doctesting(repo, repo.head.commit, parent_commits) + + all_test_files_to_run = get_all_doctest_files() + + # Add to the test files to run any removed entry from "utils/not_doctested.txt". + new_test_files = get_new_doctest_files(repo, repo.head.commit, upstream_main.commit) + test_files_to_run = list(set(test_files_to_run + new_test_files)) + + # Do not run slow doctest tests on CircleCI + with open("utils/slow_documentation_tests.txt") as fp: + slow_documentation_tests = set(fp.read().strip().split("\n")) + test_files_to_run = [ + x for x in test_files_to_run if x in all_test_files_to_run and x not in slow_documentation_tests + ] + + # Make sure we did not end up with a test file that was removed + test_files_to_run = [f for f in test_files_to_run if (PATH_TO_REPO / f).exists()] + + return sorted(test_files_to_run) + + +# (:?^|\n) -> Non-catching group for the beginning of the doc or a new line. +# \s*from\s+(\.+\S+)\s+import\s+([^\n]+) -> Line only contains from .xxx import yyy and we catch .xxx and yyy +# (?=\n) -> Look-ahead to a new line. We can't just put \n here or using find_all on this re will only catch every +# other import. +_re_single_line_relative_imports = re.compile(r"(?:^|\n)\s*from\s+(\.+\S+)\s+import\s+([^\n]+)(?=\n)") +# (:?^|\n) -> Non-catching group for the beginning of the doc or a new line. +# \s*from\s+(\.+\S+)\s+import\s+\(([^\)]+)\) -> Line continues with from .xxx import (yyy) and we catch .xxx and yyy +# yyy will take multiple lines otherwise there wouldn't be parenthesis. +_re_multi_line_relative_imports = re.compile(r"(?:^|\n)\s*from\s+(\.+\S+)\s+import\s+\(([^\)]+)\)") +# (:?^|\n) -> Non-catching group for the beginning of the doc or a new line. +# \s*from\s+transformers(\S*)\s+import\s+([^\n]+) -> Line only contains from transformers.xxx import yyy and we catch +# .xxx and yyy +# (?=\n) -> Look-ahead to a new line. We can't just put \n here or using find_all on this re will only catch every +# other import. +_re_single_line_direct_imports = re.compile(r"(?:^|\n)\s*from\s+diffusers(\S*)\s+import\s+([^\n]+)(?=\n)") +# (:?^|\n) -> Non-catching group for the beginning of the doc or a new line. +# \s*from\s+transformers(\S*)\s+import\s+\(([^\)]+)\) -> Line continues with from transformers.xxx import (yyy) and we +# catch .xxx and yyy. yyy will take multiple lines otherwise there wouldn't be parenthesis. +_re_multi_line_direct_imports = re.compile(r"(?:^|\n)\s*from\s+diffusers(\S*)\s+import\s+\(([^\)]+)\)") + + +def extract_imports(module_fname: str, cache: Dict[str, List[str]] = None) -> List[str]: + """ + Get the imports a given module makes. + + Args: + module_fname (`str`): + The name of the file of the module where we want to look at the imports (given relative to the root of + the repo). + cache (Dictionary `str` to `List[str]`, *optional*): + To speed up this function if it was previously called on `module_fname`, the cache of all previously + computed results. + + Returns: + `List[str]`: The list of module filenames imported in the input `module_fname` (a submodule we import from that + is a subfolder will give its init file). + """ + if cache is not None and module_fname in cache: + return cache[module_fname] + + with open(PATH_TO_REPO / module_fname, "r", encoding="utf-8") as f: + content = f.read() + + # Filter out all docstrings to not get imports in code examples. As before we need to deactivate formatting to + # keep this as escaped quotes and avoid this function failing on this file. + # fmt: off + splits = content.split('\"\"\"') + # fmt: on + content = "".join(splits[::2]) + + module_parts = str(module_fname).split(os.path.sep) + imported_modules = [] + + # Let's start with relative imports + relative_imports = _re_single_line_relative_imports.findall(content) + relative_imports = [ + (mod, imp) for mod, imp in relative_imports if "# tests_ignore" not in imp and imp.strip() != "(" + ] + multiline_relative_imports = _re_multi_line_relative_imports.findall(content) + relative_imports += [(mod, imp) for mod, imp in multiline_relative_imports if "# tests_ignore" not in imp] + + # We need to remove parts of the module name depending on the depth of the relative imports. + for module, imports in relative_imports: + level = 0 + while module.startswith("."): + module = module[1:] + level += 1 + + if len(module) > 0: + dep_parts = module_parts[: len(module_parts) - level] + module.split(".") + else: + dep_parts = module_parts[: len(module_parts) - level] + imported_module = os.path.sep.join(dep_parts) + imported_modules.append((imported_module, [imp.strip() for imp in imports.split(",")])) + + # Let's continue with direct imports + direct_imports = _re_single_line_direct_imports.findall(content) + direct_imports = [(mod, imp) for mod, imp in direct_imports if "# tests_ignore" not in imp and imp.strip() != "("] + multiline_direct_imports = _re_multi_line_direct_imports.findall(content) + direct_imports += [(mod, imp) for mod, imp in multiline_direct_imports if "# tests_ignore" not in imp] + + # We need to find the relative path of those imports. + for module, imports in direct_imports: + import_parts = module.split(".")[1:] # ignore the name of the repo since we add it below. + dep_parts = ["src", "diffusers"] + import_parts + imported_module = os.path.sep.join(dep_parts) + imported_modules.append((imported_module, [imp.strip() for imp in imports.split(",")])) + + result = [] + # Double check we get proper modules (either a python file or a folder with an init). + for module_file, imports in imported_modules: + if (PATH_TO_REPO / f"{module_file}.py").is_file(): + module_file = f"{module_file}.py" + elif (PATH_TO_REPO / module_file).is_dir() and (PATH_TO_REPO / module_file / "__init__.py").is_file(): + module_file = os.path.sep.join([module_file, "__init__.py"]) + imports = [imp for imp in imports if len(imp) > 0 and re.match("^[A-Za-z0-9_]*$", imp)] + if len(imports) > 0: + result.append((module_file, imports)) + + if cache is not None: + cache[module_fname] = result + + return result + + +def get_module_dependencies(module_fname: str, cache: Dict[str, List[str]] = None) -> List[str]: + """ + Refines the result of `extract_imports` to remove subfolders and get a proper list of module filenames: if a file + as an import `from utils import Foo, Bar`, with `utils` being a subfolder containing many files, this will traverse + the `utils` init file to check where those dependencies come from: for instance the files utils/foo.py and utils/bar.py. + + Warning: This presupposes that all intermediate inits are properly built (with imports from the respective + submodules) and work better if objects are defined in submodules and not the intermediate init (otherwise the + intermediate init is added, and inits usually have a lot of dependencies). + + Args: + module_fname (`str`): + The name of the file of the module where we want to look at the imports (given relative to the root of + the repo). + cache (Dictionary `str` to `List[str]`, *optional*): + To speed up this function if it was previously called on `module_fname`, the cache of all previously + computed results. + + Returns: + `List[str]`: The list of module filenames imported in the input `module_fname` (with submodule imports refined). + """ + dependencies = [] + imported_modules = extract_imports(module_fname, cache=cache) + # The while loop is to recursively traverse all inits we may encounter: we will add things as we go. + while len(imported_modules) > 0: + new_modules = [] + for module, imports in imported_modules: + # If we end up in an __init__ we are often not actually importing from this init (except in the case where + # the object is fully defined in the __init__) + if module.endswith("__init__.py"): + # So we get the imports from that init then try to find where our objects come from. + new_imported_modules = extract_imports(module, cache=cache) + for new_module, new_imports in new_imported_modules: + if any(i in new_imports for i in imports): + if new_module not in dependencies: + new_modules.append((new_module, [i for i in new_imports if i in imports])) + imports = [i for i in imports if i not in new_imports] + if len(imports) > 0: + # If there are any objects lefts, they may be a submodule + path_to_module = PATH_TO_REPO / module.replace("__init__.py", "") + dependencies.extend( + [ + os.path.join(module.replace("__init__.py", ""), f"{i}.py") + for i in imports + if (path_to_module / f"{i}.py").is_file() + ] + ) + imports = [i for i in imports if not (path_to_module / f"{i}.py").is_file()] + if len(imports) > 0: + # Then if there are still objects left, they are fully defined in the init, so we keep it as a + # dependency. + dependencies.append(module) + else: + dependencies.append(module) + + imported_modules = new_modules + + return dependencies + + +def create_reverse_dependency_tree() -> List[Tuple[str, str]]: + """ + Create a list of all edges (a, b) which mean that modifying a impacts b with a going over all module and test files. + """ + cache = {} + all_modules = list(PATH_TO_DIFFUSERS.glob("**/*.py")) + list(PATH_TO_TESTS.glob("**/*.py")) + all_modules = [str(mod.relative_to(PATH_TO_REPO)) for mod in all_modules] + edges = [(dep, mod) for mod in all_modules for dep in get_module_dependencies(mod, cache=cache)] + + return list(set(edges)) + + +def get_tree_starting_at(module: str, edges: List[Tuple[str, str]]) -> List[Union[str, List[str]]]: + """ + Returns the tree starting at a given module following all edges. + + Args: + module (`str`): The module that will be the root of the subtree we want. + edges (`List[Tuple[str, str]]`): The list of all edges of the tree. + + Returns: + `List[Union[str, List[str]]]`: The tree to print in the following format: [module, [list of edges + starting at module], [list of edges starting at the preceding level], ...] + """ + vertices_seen = [module] + new_edges = [edge for edge in edges if edge[0] == module and edge[1] != module and "__init__.py" not in edge[1]] + tree = [module] + while len(new_edges) > 0: + tree.append(new_edges) + final_vertices = list({edge[1] for edge in new_edges}) + vertices_seen.extend(final_vertices) + new_edges = [ + edge + for edge in edges + if edge[0] in final_vertices and edge[1] not in vertices_seen and "__init__.py" not in edge[1] + ] + + return tree + + +def print_tree_deps_of(module, all_edges=None): + """ + Prints the tree of modules depending on a given module. + + Args: + module (`str`): The module that will be the root of the subtree we want. + all_edges (`List[Tuple[str, str]]`, *optional*): + The list of all edges of the tree. Will be set to `create_reverse_dependency_tree()` if not passed. + """ + if all_edges is None: + all_edges = create_reverse_dependency_tree() + tree = get_tree_starting_at(module, all_edges) + + # The list of lines is a list of tuples (line_to_be_printed, module) + # Keeping the modules lets us know where to insert each new lines in the list. + lines = [(tree[0], tree[0])] + for index in range(1, len(tree)): + edges = tree[index] + start_edges = {edge[0] for edge in edges} + + for start in start_edges: + end_edges = {edge[1] for edge in edges if edge[0] == start} + # We will insert all those edges just after the line showing start. + pos = 0 + while lines[pos][1] != start: + pos += 1 + lines = lines[: pos + 1] + [(" " * (2 * index) + end, end) for end in end_edges] + lines[pos + 1 :] + + for line in lines: + # We don't print the refs that where just here to help build lines. + print(line[0]) + + +def init_test_examples_dependencies() -> Tuple[Dict[str, List[str]], List[str]]: + """ + The test examples do not import from the examples (which are just scripts, not modules) so we need some extra + care initializing the dependency map, which is the goal of this function. It initializes the dependency map for + example files by linking each example to the example test file for the example framework. + + Returns: + `Tuple[Dict[str, List[str]], List[str]]`: A tuple with two elements: the initialized dependency map which is a + dict test example file to list of example files potentially tested by that test file, and the list of all + example files (to avoid recomputing it later). + """ + test_example_deps = {} + all_examples = [] + for framework in ["flax", "pytorch", "tensorflow"]: + test_files = list((PATH_TO_EXAMPLES / framework).glob("test_*.py")) + all_examples.extend(test_files) + # Remove the files at the root of examples/framework since they are not proper examples (they are either utils + # or example test files). + examples = [ + f for f in (PATH_TO_EXAMPLES / framework).glob("**/*.py") if f.parent != PATH_TO_EXAMPLES / framework + ] + all_examples.extend(examples) + for test_file in test_files: + with open(test_file, "r", encoding="utf-8") as f: + content = f.read() + # Map all examples to the test files found in examples/framework. + test_example_deps[str(test_file.relative_to(PATH_TO_REPO))] = [ + str(e.relative_to(PATH_TO_REPO)) for e in examples if e.name in content + ] + # Also map the test files to themselves. + test_example_deps[str(test_file.relative_to(PATH_TO_REPO))].append( + str(test_file.relative_to(PATH_TO_REPO)) + ) + return test_example_deps, all_examples + + +def create_reverse_dependency_map() -> Dict[str, List[str]]: + """ + Create the dependency map from module/test filename to the list of modules/tests that depend on it recursively. + + Returns: + `Dict[str, List[str]]`: The reverse dependency map as a dictionary mapping filenames to all the filenames + depending on it recursively. This way the tests impacted by a change in file A are the test files in the list + corresponding to key A in this result. + """ + cache = {} + # Start from the example deps init. + example_deps, examples = init_test_examples_dependencies() + # Add all modules and all tests to all examples + all_modules = list(PATH_TO_DIFFUSERS.glob("**/*.py")) + list(PATH_TO_TESTS.glob("**/*.py")) + examples + all_modules = [str(mod.relative_to(PATH_TO_REPO)) for mod in all_modules] + # Compute the direct dependencies of all modules. + direct_deps = {m: get_module_dependencies(m, cache=cache) for m in all_modules} + direct_deps.update(example_deps) + + # This recurses the dependencies + something_changed = True + while something_changed: + something_changed = False + for m in all_modules: + for d in direct_deps[m]: + # We stop recursing at an init (cause we always end up in the main init and we don't want to add all + # files which the main init imports) + if d.endswith("__init__.py"): + continue + if d not in direct_deps: + raise ValueError(f"KeyError:{d}. From {m}") + new_deps = set(direct_deps[d]) - set(direct_deps[m]) + if len(new_deps) > 0: + direct_deps[m].extend(list(new_deps)) + something_changed = True + + # Finally we can build the reverse map. + reverse_map = collections.defaultdict(list) + for m in all_modules: + for d in direct_deps[m]: + reverse_map[d].append(m) + + # For inits, we don't do the reverse deps but the direct deps: if modifying an init, we want to make sure we test + # all the modules impacted by that init. + for m in [f for f in all_modules if f.endswith("__init__.py")]: + direct_deps = get_module_dependencies(m, cache=cache) + deps = sum([reverse_map[d] for d in direct_deps if not d.endswith("__init__.py")], direct_deps) + reverse_map[m] = list(set(deps) - {m}) + + return reverse_map + + +def create_module_to_test_map(reverse_map: Dict[str, List[str]] = None) -> Dict[str, List[str]]: + """ + Extract the tests from the reverse_dependency_map and potentially filters the model tests. + + Args: + reverse_map (`Dict[str, List[str]]`, *optional*): + The reverse dependency map as created by `create_reverse_dependency_map`. Will default to the result of + that function if not provided. + filter_pipelines (`bool`, *optional*, defaults to `False`): + Whether or not to filter pipeline tests to only include core pipelines if a file impacts a lot of models. + + Returns: + `Dict[str, List[str]]`: A dictionary that maps each file to the tests to execute if that file was modified. + """ + if reverse_map is None: + reverse_map = create_reverse_dependency_map() + + # Utility that tells us if a given file is a test (taking test examples into account) + def is_test(fname): + if fname.startswith("tests"): + return True + if fname.startswith("examples") and fname.split(os.path.sep)[-1].startswith("test"): + return True + return False + + # Build the test map + test_map = {module: [f for f in deps if is_test(f)] for module, deps in reverse_map.items()} + + return test_map + + +def check_imports_all_exist(): + """ + Isn't used per se by the test fetcher but might be used later as a quality check. Putting this here for now so the + code is not lost. This checks all imports in a given file do exist. + """ + cache = {} + all_modules = list(PATH_TO_DIFFUSERS.glob("**/*.py")) + list(PATH_TO_TESTS.glob("**/*.py")) + all_modules = [str(mod.relative_to(PATH_TO_REPO)) for mod in all_modules] + direct_deps = {m: get_module_dependencies(m, cache=cache) for m in all_modules} + + for module, deps in direct_deps.items(): + for dep in deps: + if not (PATH_TO_REPO / dep).is_file(): + print(f"{module} has dependency on {dep} which does not exist.") + + +def _print_list(l) -> str: + """ + Pretty print a list of elements with one line per element and a - starting each line. + """ + return "\n".join([f"- {f}" for f in l]) + + +def update_test_map_with_core_pipelines(json_output_file: str): + print(f"\n### ADD CORE PIPELINE TESTS ###\n{_print_list(IMPORTANT_PIPELINES)}") + with open(json_output_file, "rb") as fp: + test_map = json.load(fp) + + # Add core pipelines as their own test group + test_map["core_pipelines"] = " ".join( + sorted([str(PATH_TO_TESTS / f"pipelines/{pipe}") for pipe in IMPORTANT_PIPELINES]) + ) + + # If there are no existing pipeline tests save the map + if "pipelines" not in test_map: + with open(json_output_file, "w", encoding="UTF-8") as fp: + json.dump(test_map, fp, ensure_ascii=False) + + pipeline_tests = test_map.pop("pipelines") + pipeline_tests = pipeline_tests.split(" ") + + # Remove core pipeline tests from the fetched pipeline tests + updated_pipeline_tests = [] + for pipe in pipeline_tests: + if pipe == "tests/pipelines" or Path(pipe).parts[2] in IMPORTANT_PIPELINES: + continue + updated_pipeline_tests.append(pipe) + + if len(updated_pipeline_tests) > 0: + test_map["pipelines"] = " ".join(sorted(updated_pipeline_tests)) + + with open(json_output_file, "w", encoding="UTF-8") as fp: + json.dump(test_map, fp, ensure_ascii=False) + + +def create_json_map(test_files_to_run: List[str], json_output_file: Optional[str] = None): + """ + Creates a map from a list of tests to run to easily split them by category, when running parallelism of slow tests. + + Args: + test_files_to_run (`List[str]`): The list of tests to run. + json_output_file (`str`): The path where to store the built json map. + """ + if json_output_file is None: + return + + test_map = {} + for test_file in test_files_to_run: + # `test_file` is a path to a test folder/file, starting with `tests/`. For example, + # - `tests/models/bert/test_modeling_bert.py` or `tests/models/bert` + # - `tests/trainer/test_trainer.py` or `tests/trainer` + # - `tests/test_modeling_common.py` + names = test_file.split(os.path.sep) + module = names[1] + if module in MODULES_TO_IGNORE: + continue + + if len(names) > 2 or not test_file.endswith(".py"): + # test folders under `tests` or python files under them + # take the part like tokenization, `pipeline`, etc. for other test categories + key = os.path.sep.join(names[1:2]) + else: + # common test files directly under `tests/` + key = "common" + + if key not in test_map: + test_map[key] = [] + test_map[key].append(test_file) + + # sort the keys & values + keys = sorted(test_map.keys()) + test_map = {k: " ".join(sorted(test_map[k])) for k in keys} + + with open(json_output_file, "w", encoding="UTF-8") as fp: + json.dump(test_map, fp, ensure_ascii=False) + + +def infer_tests_to_run( + output_file: str, + diff_with_last_commit: bool = False, + json_output_file: Optional[str] = None, +): + """ + The main function called by the test fetcher. Determines the tests to run from the diff. + + Args: + output_file (`str`): + The path where to store the summary of the test fetcher analysis. Other files will be stored in the same + folder: + + - examples_test_list.txt: The list of examples tests to run. + - test_repo_utils.txt: Will indicate if the repo utils tests should be run or not. + - doctest_list.txt: The list of doctests to run. + + diff_with_last_commit (`bool`, *optional*, defaults to `False`): + Whether to analyze the diff with the last commit (for use on the main branch after a PR is merged) or with + the branching point from main (for use on each PR). + filter_models (`bool`, *optional*, defaults to `True`): + Whether or not to filter the tests to core models only, when a file modified results in a lot of model + tests. + json_output_file (`str`, *optional*): + The path where to store the json file mapping categories of tests to tests to run (used for parallelism or + the slow tests). + """ + modified_files = get_modified_python_files(diff_with_last_commit=diff_with_last_commit) + print(f"\n### MODIFIED FILES ###\n{_print_list(modified_files)}") + # Create the map that will give us all impacted modules. + reverse_map = create_reverse_dependency_map() + impacted_files = modified_files.copy() + for f in modified_files: + if f in reverse_map: + impacted_files.extend(reverse_map[f]) + + # Remove duplicates + impacted_files = sorted(set(impacted_files)) + print(f"\n### IMPACTED FILES ###\n{_print_list(impacted_files)}") + + # Grab the corresponding test files: + if any(x in modified_files for x in ["setup.py"]): + test_files_to_run = ["tests", "examples"] + + # in order to trigger pipeline tests even if no code change at all + if "tests/utils/tiny_model_summary.json" in modified_files: + test_files_to_run = ["tests"] + any(f.split(os.path.sep)[0] == "utils" for f in modified_files) + else: + # All modified tests need to be run. + test_files_to_run = [ + f for f in modified_files if f.startswith("tests") and f.split(os.path.sep)[-1].startswith("test") + ] + # Then we grab the corresponding test files. + test_map = create_module_to_test_map(reverse_map=reverse_map) + for f in modified_files: + if f in test_map: + test_files_to_run.extend(test_map[f]) + test_files_to_run = sorted(set(test_files_to_run)) + # Make sure we did not end up with a test file that was removed + test_files_to_run = [f for f in test_files_to_run if (PATH_TO_REPO / f).exists()] + + any(f.split(os.path.sep)[0] == "utils" for f in modified_files) + + examples_tests_to_run = [f for f in test_files_to_run if f.startswith("examples")] + test_files_to_run = [f for f in test_files_to_run if not f.startswith("examples")] + print(f"\n### TEST TO RUN ###\n{_print_list(test_files_to_run)}") + if len(test_files_to_run) > 0: + with open(output_file, "w", encoding="utf-8") as f: + f.write(" ".join(test_files_to_run)) + + # Create a map that maps test categories to test files, i.e. `models/bert` -> [...test_modeling_bert.py, ...] + + # Get all test directories (and some common test files) under `tests` and `tests/models` if `test_files_to_run` + # contains `tests` (i.e. when `setup.py` is changed). + if "tests" in test_files_to_run: + test_files_to_run = get_all_tests() + + create_json_map(test_files_to_run, json_output_file) + + print(f"\n### EXAMPLES TEST TO RUN ###\n{_print_list(examples_tests_to_run)}") + if len(examples_tests_to_run) > 0: + # We use `all` in the case `commit_flags["test_all"]` as well as in `create_circleci_config.py` for processing + if examples_tests_to_run == ["examples"]: + examples_tests_to_run = ["all"] + example_file = Path(output_file).parent / "examples_test_list.txt" + with open(example_file, "w", encoding="utf-8") as f: + f.write(" ".join(examples_tests_to_run)) + + +def filter_tests(output_file: str, filters: List[str]): + """ + Reads the content of the output file and filters out all the tests in a list of given folders. + + Args: + output_file (`str` or `os.PathLike`): The path to the output file of the tests fetcher. + filters (`List[str]`): A list of folders to filter. + """ + if not os.path.isfile(output_file): + print("No test file found.") + return + with open(output_file, "r", encoding="utf-8") as f: + test_files = f.read().split(" ") + + if len(test_files) == 0 or test_files == [""]: + print("No tests to filter.") + return + + if test_files == ["tests"]: + test_files = [os.path.join("tests", f) for f in os.listdir("tests") if f not in ["__init__.py"] + filters] + else: + test_files = [f for f in test_files if f.split(os.path.sep)[1] not in filters] + + with open(output_file, "w", encoding="utf-8") as f: + f.write(" ".join(test_files)) + + +def parse_commit_message(commit_message: str) -> Dict[str, bool]: + """ + Parses the commit message to detect if a command is there to skip, force all or part of the CI. + + Args: + commit_message (`str`): The commit message of the current commit. + + Returns: + `Dict[str, bool]`: A dictionary of strings to bools with keys the following keys: `"skip"`, + `"test_all_models"` and `"test_all"`. + """ + if commit_message is None: + return {"skip": False, "no_filter": False, "test_all": False} + + command_search = re.search(r"\[([^\]]*)\]", commit_message) + if command_search is not None: + command = command_search.groups()[0] + command = command.lower().replace("-", " ").replace("_", " ") + skip = command in ["ci skip", "skip ci", "circleci skip", "skip circleci"] + no_filter = set(command.split(" ")) == {"no", "filter"} + test_all = set(command.split(" ")) == {"test", "all"} + return {"skip": skip, "no_filter": no_filter, "test_all": test_all} + else: + return {"skip": False, "no_filter": False, "test_all": False} + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument( + "--output_file", type=str, default="test_list.txt", help="Where to store the list of tests to run" + ) + parser.add_argument( + "--json_output_file", + type=str, + default="test_map.json", + help="Where to store the tests to run in a dictionary format mapping test categories to test files", + ) + parser.add_argument( + "--diff_with_last_commit", + action="store_true", + help="To fetch the tests between the current commit and the last commit", + ) + parser.add_argument( + "--filter_tests", + action="store_true", + help="Will filter the pipeline/repo utils tests outside of the generated list of tests.", + ) + parser.add_argument( + "--print_dependencies_of", + type=str, + help="Will only print the tree of modules depending on the file passed.", + default=None, + ) + parser.add_argument( + "--commit_message", + type=str, + help="The commit message (which could contain a command to force all tests or skip the CI).", + default=None, + ) + args = parser.parse_args() + if args.print_dependencies_of is not None: + print_tree_deps_of(args.print_dependencies_of) + else: + repo = Repo(PATH_TO_REPO) + commit_message = repo.head.commit.message + commit_flags = parse_commit_message(commit_message) + if commit_flags["skip"]: + print("Force-skipping the CI") + quit() + if commit_flags["no_filter"]: + print("Running all tests fetched without filtering.") + if commit_flags["test_all"]: + print("Force-launching all tests") + + diff_with_last_commit = args.diff_with_last_commit + if not diff_with_last_commit and not repo.head.is_detached and repo.head.ref == repo.refs.main: + print("main branch detected, fetching tests against last commit.") + diff_with_last_commit = True + + if not commit_flags["test_all"]: + try: + infer_tests_to_run( + args.output_file, + diff_with_last_commit=diff_with_last_commit, + json_output_file=args.json_output_file, + ) + filter_tests(args.output_file, ["repo_utils"]) + update_test_map_with_core_pipelines(json_output_file=args.json_output_file) + + except Exception as e: + print(f"\nError when trying to grab the relevant tests: {e}\n\nRunning all tests.") + commit_flags["test_all"] = True + + if commit_flags["test_all"]: + with open(args.output_file, "w", encoding="utf-8") as f: + f.write("tests") + example_file = Path(args.output_file).parent / "examples_test_list.txt" + with open(example_file, "w", encoding="utf-8") as f: + f.write("all") + + test_files_to_run = get_all_tests() + create_json_map(test_files_to_run, args.json_output_file) + update_test_map_with_core_pipelines(json_output_file=args.json_output_file) diff --git a/exp_code/1_benchmark/diffusers-WanS2V/utils/update_metadata.py b/exp_code/1_benchmark/diffusers-WanS2V/utils/update_metadata.py new file mode 100644 index 0000000000000000000000000000000000000000..4fde581d4170ef46b168c4cbf59671463d9666c7 --- /dev/null +++ b/exp_code/1_benchmark/diffusers-WanS2V/utils/update_metadata.py @@ -0,0 +1,125 @@ +# coding=utf-8 +# Copyright 2025 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Utility that updates the metadata of the Diffusers library in the repository `huggingface/diffusers-metadata`. + +Usage for an update (as used by the GitHub action `update_metadata`): + +```bash +python utils/update_metadata.py +``` + +Script modified from: +https://github.com/huggingface/transformers/blob/main/utils/update_metadata.py +""" + +import argparse +import os +import tempfile + +import pandas as pd +from datasets import Dataset +from huggingface_hub import hf_hub_download, upload_folder + +from diffusers.pipelines.auto_pipeline import ( + AUTO_IMAGE2IMAGE_PIPELINES_MAPPING, + AUTO_INPAINT_PIPELINES_MAPPING, + AUTO_TEXT2IMAGE_PIPELINES_MAPPING, +) + + +PIPELINE_TAG_JSON = "pipeline_tags.json" + + +def get_supported_pipeline_table() -> dict: + """ + Generates a dictionary containing the supported auto classes for each pipeline type, + using the content of the auto modules. + """ + # All supported pipelines for automatic mapping. + all_supported_pipeline_classes = [ + (class_name.__name__, "text-to-image", "AutoPipelineForText2Image") + for _, class_name in AUTO_TEXT2IMAGE_PIPELINES_MAPPING.items() + ] + all_supported_pipeline_classes += [ + (class_name.__name__, "image-to-image", "AutoPipelineForImage2Image") + for _, class_name in AUTO_IMAGE2IMAGE_PIPELINES_MAPPING.items() + ] + all_supported_pipeline_classes += [ + (class_name.__name__, "image-to-image", "AutoPipelineForInpainting") + for _, class_name in AUTO_INPAINT_PIPELINES_MAPPING.items() + ] + all_supported_pipeline_classes = list(set(all_supported_pipeline_classes)) + all_supported_pipeline_classes.sort(key=lambda x: x[0]) + + data = {} + data["pipeline_class"] = [sample[0] for sample in all_supported_pipeline_classes] + data["pipeline_tag"] = [sample[1] for sample in all_supported_pipeline_classes] + data["auto_class"] = [sample[2] for sample in all_supported_pipeline_classes] + + return data + + +def update_metadata(commit_sha: str): + """ + Update the metadata for the Diffusers repo in `huggingface/diffusers-metadata`. + + Args: + commit_sha (`str`): The commit SHA on Diffusers corresponding to this update. + """ + pipelines_table = get_supported_pipeline_table() + pipelines_table = pd.DataFrame(pipelines_table) + pipelines_dataset = Dataset.from_pandas(pipelines_table) + + hub_pipeline_tags_json = hf_hub_download( + repo_id="huggingface/diffusers-metadata", + filename=PIPELINE_TAG_JSON, + repo_type="dataset", + ) + with open(hub_pipeline_tags_json) as f: + hub_pipeline_tags_json = f.read() + + with tempfile.TemporaryDirectory() as tmp_dir: + pipelines_dataset.to_json(os.path.join(tmp_dir, PIPELINE_TAG_JSON)) + + with open(os.path.join(tmp_dir, PIPELINE_TAG_JSON)) as f: + pipeline_tags_json = f.read() + + hub_pipeline_tags_equal = hub_pipeline_tags_json == pipeline_tags_json + if hub_pipeline_tags_equal: + print("No updates, not pushing the metadata files.") + return + + if commit_sha is not None: + commit_message = ( + f"Update with commit {commit_sha}\n\nSee: https://github.com/huggingface/diffusers/commit/{commit_sha}" + ) + else: + commit_message = "Update" + + upload_folder( + repo_id="huggingface/diffusers-metadata", + folder_path=tmp_dir, + repo_type="dataset", + commit_message=commit_message, + ) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--commit_sha", default=None, type=str, help="The sha of the commit going with this update.") + args = parser.parse_args() + + update_metadata(args.commit_sha) diff --git a/exp_code/1_benchmark/musubi-tuner/.github/FUNDING.yml b/exp_code/1_benchmark/musubi-tuner/.github/FUNDING.yml new file mode 100644 index 0000000000000000000000000000000000000000..5b08f44140f62bf2cca4cc5c0770ca320607dbca --- /dev/null +++ b/exp_code/1_benchmark/musubi-tuner/.github/FUNDING.yml @@ -0,0 +1,3 @@ +# These are supported funding model platforms + +github: kohya-ss diff --git a/exp_code/1_benchmark/musubi-tuner/.python-version b/exp_code/1_benchmark/musubi-tuner/.python-version new file mode 100644 index 0000000000000000000000000000000000000000..c8cfe3959183f8e9a50f83f54cd723f2dc9c252d --- /dev/null +++ b/exp_code/1_benchmark/musubi-tuner/.python-version @@ -0,0 +1 @@ +3.10 diff --git a/exp_code/1_benchmark/musubi-tuner/README.ja.md b/exp_code/1_benchmark/musubi-tuner/README.ja.md new file mode 100644 index 0000000000000000000000000000000000000000..7befc2f2dd1572e27fac15028d9ea63caa463437 --- /dev/null +++ b/exp_code/1_benchmark/musubi-tuner/README.ja.md @@ -0,0 +1,464 @@ +# Musubi Tuner + +[English](./README.md) | [日本語](./README.ja.md) + +## 目次 + +
+クリックすると展開します + +- [はじめに](#はじめに) + - [スポンサー募集のお知らせ](#スポンサー募集のお知らせ) + - [最近の更新](#最近の更新) + - [リリースについて](#リリースについて) +- [概要](#概要) + - [ハードウェア要件](#ハードウェア要件) + - [特徴](#特徴) +- [インストール](#インストール) +- [モデルのダウンロード](#モデルのダウンロード) + - [HunyuanVideoの公式モデルを使う](#HunyuanVideoの公式モデルを使う) + - [Text EncoderにComfyUI提供のモデルを使う](#Text-EncoderにComfyUI提供のモデルを使う) +- [使い方](#使い方) + - [データセット設定](#データセット設定) + - [latentの事前キャッシュ](#latentの事前キャッシュ) + - [Text Encoder出力の事前キャッシュ](#Text-Encoder出力の事前キャッシュ) + - [Accelerateの設定](#Accelerateの設定) + - [学習](#学習) + - [LoRAの重みのマージ](#LoRAの重みのマージ) + - [推論](#推論) + - [SkyReels V1での推論](#SkyReels-V1での推論) + - [LoRAの形式の変換](#LoRAの形式の変換) +- [その他](#その他) + - [SageAttentionのインストール方法](#SageAttentionのインストール方法) +- [免責事項](#免責事項) +- [コントリビューションについて](#コントリビューションについて) +- [ライセンス](#ライセンス) +
+ +## はじめに + +このリポジトリは、HunyuanVideo、Wan2.1、FramePackのLoRA学習用のコマンドラインツールです。このリポジトリは非公式であり、公式のHunyuanVideoやWan2.1、FramePackのリポジトリとは関係ありません。 + +Wan2.1については、[Wan2.1のドキュメント](./docs/wan.md)も参照してください。FramePackについては、[FramePackのドキュメント](./docs/framepack.md)を参照してください。 + +*リポジトリは開発中です。* + +### スポンサー募集のお知らせ + +このプロジェクトがお役に立ったなら、ご支援いただけると嬉しく思います。 [GitHub Sponsors](https://github.com/sponsors/kohya-ss/)で受け付けています。 + + +### 最近の更新 + +- GitHub Discussionsを有効にしました。コミュニティのQ&A、知識共有、技術情報の交換などにご利用ください。バグ報告や機能リクエストにはIssuesを、質問や経験の共有にはDiscussionsをご利用ください。[Discussionはこちら](https://github.com/kohya-ss/musubi-tuner/discussions) + +- 2025/06/25 + - Wan2.1アーキテクチャで1フレーム推論および学習をサポートしました。詳細は[Wanの1フレーム推論のドキュメント](./docs/wan_1f.md)を参照してください。 + +- 2025/06/17 + - FramePackの推論スクリプトで [MagCache](https://github.com/Zehong-Ma/MagCache) をサポートしました。詳しくは[高度な設定](./docs/advanced_config.md#magcache)を参照してください。 + - FramePackの推論スクリプトで、対話モードおよびバッチモードでText Encoderの出力をキャッシュするようにしました。また処理順を見直し、モデルオフロードのタイミングを調整することで、連続生成時の処理時間を短縮しました。 + +- 2025/06/13 + - `lora_post_hoc_ema.py`に`--sima_rel`オプションを追加しました。これにより、Post Hoc EMAの適用時にPower Function EMAを使用することができます。詳細は[こちらのドキュメント](./docs/advanced_config.md#lora-post-hoc-ema-merging--loraのpost-hoc-emaマージ)を参照してください。 + +- 2025/06/12 + - LoRAモデルのPost Hoc EMAを行う`lora_post_hoc_ema.py`を追加しました。LoRAモデルの学習後に、Post Hoc EMAを適用してモデルの精度を向上させることができます。詳細は[こちらのドキュメント](./docs/advanced_config.md#lora-post-hoc-ema-merging--loraのpost-hoc-emaマージ)を参照してください。 + +- 2025/06/11 + - リポジトリのパッケージングに関するPRをマージしました。xhiroga氏に感謝します。PR [#319](https://github.com/kohya-ss/musubi-tuner/pull/319) + - `pyproject.toml`を導入し、インストール方法を更新しました。既存の環境からの移行方法については、[このディスカッションの投稿](https://github.com/kohya-ss/musubi-tuner/discussions/345)を参照してください。 + - `README.md`を更新し、`pyproject.toml`を使用した新しいインストール方法を反映しました。 + +- 2025/06/09 + - FramePackの1フレーム推論のドキュメントに `--control_image_path` についての説明を追加しました。詳細は[1フレーム推論のドキュメント](./docs/framepack_1f.md#one-single-frame-inference--1フレーム推論)を参照してください。 + - FramePackの1フレーム学習で、no_4xを指定しないとサンプル画像生成がクラッシュする不具合を修正しました。PR [#339](https://github.com/kohya-ss/musubi-tuner/pull/339) + +- 2025/06/08 + - wan_generate_video.pyとfpack_generate_video.pyのinteractiveモードで、`prompt-toolkit`がインストールされている場合、それを使用するようになりました。これにより、特にLinux環境でプロンプトの編集や補完が用意になります。PR [#330](https://github.com/kohya-ss/musubi-tuner/issues/312) + - この機能はオプションです。有効にするには、`pip install prompt-toolkit`でインストールしてください。インストールしてある場合は自動的に有効になります。 + +- 2025/05/30 + - データセットの読み込み時にリサイズが正しく行われない場合がある不具合を修正しました。キャッシュの再作成をお願いします。PR [#312](https://github.com/kohya-ss/musubi-tuner/issues/312) sdbds 氏に感謝します。 + - リサイズ前の画像の幅または高さがバケットの幅または高さと一致していて、かつもう片方が異なる場合(具体的には、たとえば元画像が640\*480で、バケットが640\*360の場合など)に不具合が発生していました。 + - FramePackの1フレーム推論、学習のコードを大幅に改良しました。詳細は[FramePackの1フレーム推論のドキュメント](./docs/framepack_1f.md)を参照してください。 + - **破壊的変更**: 1フレーム学習のデータセット形式、学習オプション、推論オプションが変更されました。ドキュメントに従って、データセット設定の変更、キャッシュの再作成、学習・推論オプションの変更を行ってください。 + - FramePackの1フレーム推論と学習についてのドキュメントを追加しました。詳細は[前述のドキュメント](./docs/framepack_1f.md)を参照してください。 + +### リリースについて + +Musubi Tunerの解説記事執筆や、関連ツールの開発に取り組んでくださる方々に感謝いたします。このプロジェクトは開発中のため、互換性のない変更や機能追加が起きる可能性があります。想定外の互換性問題を避けるため、参照用として[リリース](https://github.com/kohya-ss/musubi-tuner/releases)をお使いください。 + +最新のリリースとバージョン履歴は[リリースページ](https://github.com/kohya-ss/musubi-tuner/releases)で確認できます。 + +## 概要 + +### ハードウェア要件 + +- VRAM: 静止画での学習は12GB以上推奨、動画での学習は24GB以上推奨。 + - *解像度等の学習設定により異なります。*12GBでは解像度 960x544 以下とし、`--blocks_to_swap`、`--fp8_llm`等の省メモリオプションを使用してください。 +- メインメモリ: 64GB以上を推奨、32GB+スワップで動作するかもしれませんが、未検証です。 + +### 特徴 + +- 省メモリに特化 +- Windows対応(Linuxでの動作報告もあります) +- マルチGPUには対応していません + +## インストール + +### pipによるインストール + +Python 3.10以上を使用してください(3.10で動作確認済み)。 + +適当な仮想環境を作成し、ご利用のCUDAバージョンに合わせたPyTorchとtorchvisionをインストールしてください。 + +PyTorchはバージョン2.5.1以上を使用してください([補足](#PyTorchのバージョンについて))。 + +```bash +pip install torch torchvision --index-url https://download.pytorch.org/whl/cu124 +``` + +以下のコマンドを使用して、必要な依存関係をインストールします。 + +```bash +pip install -e . +``` + +オプションとして、FlashAttention、SageAttention(**推論にのみ使用できます**、インストール方法は[こちら](#SageAttentionのインストール方法)を参照)を使用できます。 + +また、`ascii-magic`(データセットの確認に使用)、`matplotlib`(timestepsの可視化に使用)、`tensorboard`(学習ログの記録に使用)、`prompt-toolkit`を必要に応じてインストールしてください。 + +`prompt-toolkit`をインストールするとWan2.1およびFramePackのinteractive modeでの編集に、自動的に使用されます。特にLinux環境でプロンプトの編集が容易になります。 + +```bash +pip install ascii-magic matplotlib tensorboard prompt-toolkit +``` + +### uvによるインストール + +uvを使用してインストールすることもできますが、uvによるインストールは試験的なものです。フィードバックを歓迎します。 + +#### Linux/MacOS + +```sh +curl -LsSf https://astral.sh/uv/install.sh | sh +``` + +表示される指示に従い、pathを設定してください。 + +#### Windows + +```powershell +powershell -c "irm https://astral.sh/uv/install.ps1 | iex" +``` + +表示される指示に従い、PATHを設定するか、この時点でシステムを再起動してください。 + +## モデルのダウンロード + +以下のいずれかの方法で、モデルをダウンロードしてください。 + +### HunyuanVideoの公式モデルを使う + +[公式のREADME](https://github.com/Tencent/HunyuanVideo/blob/main/ckpts/README.md)を参考にダウンロードし、任意のディレクトリに以下のように配置します。 + +``` + ckpts + ├──hunyuan-video-t2v-720p + │ ├──transformers + │ ├──vae + ├──text_encoder + ├──text_encoder_2 + ├──... +``` + +### Text EncoderにComfyUI提供のモデルを使う + +こちらの方法の方がより簡単です。DiTとVAEのモデルはHumyuanVideoのものを使用します。 + +https://huggingface.co/tencent/HunyuanVideo/tree/main/hunyuan-video-t2v-720p/transformers から、[mp_rank_00_model_states.pt](https://huggingface.co/tencent/HunyuanVideo/resolve/main/hunyuan-video-t2v-720p/transformers/mp_rank_00_model_states.pt) をダウンロードし、任意のディレクトリに配置します。 + +(同じページにfp8のモデルもありますが、未検証です。) + +`--fp8_base`を指定して学習する場合は、`mp_rank_00_model_states.pt`の代わりに、[こちら](https://huggingface.co/kohya-ss/HunyuanVideo-fp8_e4m3fn-unofficial)の`mp_rank_00_model_states_fp8.safetensors`を使用可能です。(このファイルは非公式のもので、重みを単純にfloat8_e4m3fnに変換したものです。) + +また、https://huggingface.co/tencent/HunyuanVideo/tree/main/hunyuan-video-t2v-720p/vae から、[pytorch_model.pt](https://huggingface.co/tencent/HunyuanVideo/resolve/main/hunyuan-video-t2v-720p/vae/pytorch_model.pt) をダウンロードし、任意のディレクトリに配置します。 + +Text EncoderにはComfyUI提供のモデルを使用させていただきます。[ComyUIのページ](https://comfyanonymous.github.io/ComfyUI_examples/hunyuan_video/)を参考に、https://huggingface.co/Comfy-Org/HunyuanVideo_repackaged/tree/main/split_files/text_encoders から、llava_llama3_fp16.safetensors (Text Encoder 1、LLM)と、clip_l.safetensors (Text Encoder 2、CLIP)をダウンロードし、任意のディレクトリに配置します。 + +(同じページにfp8のLLMモデルもありますが、動作未検証です。) + +## 使い方 + +### データセット設定 + +[こちら](./src/musubi_tuner/dataset/dataset_config.md)を参照してください。 + +### latentの事前キャッシュ + +latentの事前キャッシュは必須です。以下のコマンドを使用して、事前キャッシュを作成してください。(pipによるインストールの場合) + +```bash +python src/musubi_tuner/cache_latents.py --dataset_config path/to/toml --vae path/to/ckpts/hunyuan-video-t2v-720p/vae/pytorch_model.pt --vae_chunk_size 32 --vae_tiling +``` + +uvでインストールした場合は、`uv run --extra cu124 python src/musubi_tuner/cache_latents.py ...`のように、`uv run --extra cu124`を先頭につけてください。CUDA 12.8に対応している場合は、`uv run --extra cu128`も利用可能です。以下のコマンドも同様です。 + +その他のオプションは`python src/musubi_tuner/cache_latents.py --help`で確認できます。 + +VRAMが足りない場合は、`--vae_spatial_tile_sample_min_size`を128程度に減らし、`--batch_size`を小さくしてください。 + +`--debug_mode image` を指定するとデータセットの画像とキャプションが新規ウィンドウに表示されます。`--debug_mode console`でコンソールに表示されます(`ascii-magic`が必要)。 + +`--debug_mode video`で、キャッシュディレクトリに画像または動画が保存されます(確認後、削除してください)。動画のビットレートは確認用に低くしてあります。実際には元動画の画像が学習に使用されます。 + +`--debug_mode`指定時は、実際のキャッシュ処理は行われません。 + +デフォルトではデータセットに含まれないキャッシュファイルは自動的に削除されます。`--keep_cache`を指定すると、キャッシュファイルを残すことができます。 + +### Text Encoder出力の事前キャッシュ + +Text Encoder出力の事前キャッシュは必須です。以下のコマンドを使用して、事前キャッシュを作成してください。 + +```bash +python src/musubi_tuner/cache_text_encoder_outputs.py --dataset_config path/to/toml --text_encoder1 path/to/ckpts/text_encoder --text_encoder2 path/to/ckpts/text_encoder_2 --batch_size 16 +``` + +その他のオプションは`python src/musubi_tuner/cache_text_encoder_outputs.py --help`で確認できます。 + +`--batch_size`はVRAMに合わせて調整してください。 + +VRAMが足りない場合(16GB程度未満の場合)は、`--fp8_llm`を指定して、fp8でLLMを実行してください。 + +デフォルトではデータセットに含まれないキャッシュファイルは自動的に削除されます。`--keep_cache`を指定すると、キャッシュファイルを残すことができます。 + +### Accelerateの設定 + +`accelerate config`を実行して、Accelerateの設定を行います。それぞれの質問に、環境に応じた適切な値を選択してください(値を直接入力するか、矢印キーとエンターで選択、大文字がデフォルトなので、デフォルト値でよい場合は何も入力せずエンター)。GPU 1台での学習の場合、以下のように答えてください。 + +```txt +- In which compute environment are you running?: This machine +- Which type of machine are you using?: No distributed training +- Do you want to run your training on CPU only (even if a GPU / Apple Silicon / Ascend NPU device is available)?[yes/NO]: NO +- Do you wish to optimize your script with torch dynamo?[yes/NO]: NO +- Do you want to use DeepSpeed? [yes/NO]: NO +- What GPU(s) (by id) should be used for training on this machine as a comma-seperated list? [all]: all +- Would you like to enable numa efficiency? (Currently only supported on NVIDIA hardware). [yes/NO]: NO +- Do you wish to use mixed precision?: bf16 +``` + +※場合によって ``ValueError: fp16 mixed precision requires a GPU`` というエラーが出ることがあるようです。この場合、6番目の質問( +``What GPU(s) (by id) should be used for training on this machine as a comma-separated list? [all]:``)に「0」と答えてください。(id `0`、つまり1台目のGPUが使われます。) + +### 学習 + +以下のコマンドを使用して、学習を開始します(実際には一行で入力してください)。 + +```bash +accelerate launch --num_cpu_threads_per_process 1 --mixed_precision bf16 src/musubi_tuner/hv_train_network.py + --dit path/to/ckpts/hunyuan-video-t2v-720p/transformers/mp_rank_00_model_states.pt + --dataset_config path/to/toml --sdpa --mixed_precision bf16 --fp8_base + --optimizer_type adamw8bit --learning_rate 2e-4 --gradient_checkpointing + --max_data_loader_n_workers 2 --persistent_data_loader_workers + --network_module networks.lora --network_dim 32 + --timestep_sampling shift --discrete_flow_shift 7.0 + --max_train_epochs 16 --save_every_n_epochs 1 --seed 42 + --output_dir path/to/output_dir --output_name name-of-lora +``` + +__更新__:サンプルの学習率を1e-3から2e-4に、`--timestep_sampling`を`sigmoid`から`shift`に、`--discrete_flow_shift`を1.0から7.0に変更しました。より高速な学習が期待されます。ディテールが甘くなる場合は、discrete flow shiftを3.0程度に下げてみてください。 + +ただ、適切な学習率、学習ステップ数、timestepsの分布、loss weightingなどのパラメータは、以前として不明な点が数多くあります。情報提供をお待ちしています。 + +その他のオプションは`python src/musubi_tuner/hv_train_network.py --help`で確認できます(ただし多くのオプションは動作未確認です)。 + +`--fp8_base`を指定すると、DiTがfp8で学習されます。未指定時はmixed precisionのデータ型が使用されます。fp8は大きく消費メモリを削減できますが、品質は低下する可能性があります。`--fp8_base`を指定しない場合はVRAM 24GB以上を推奨します。また必要に応じて`--blocks_to_swap`を使用してください。 + +VRAMが足りない場合は、`--blocks_to_swap`を指定して、一部のブロックをCPUにオフロードしてください。最大36が指定できます。 + +(block swapのアイデアは2kpr氏の実装に基づくものです。2kpr氏にあらためて感謝します。) + +`--sdpa`でPyTorchのscaled dot product attentionを使用します。`--flash_attn`で[FlashAttention]:(https://github.com/Dao-AILab/flash-attention)を使用します。`--xformers`でxformersの利用も可能ですが、xformersを使う場合は`--split_attn`を指定してください。`--sage_attn`でSageAttentionを使用しますが、SageAttentionは現時点では学習に未対応のため、エラーが発生します。 + +`--split_attn`を指定すると、attentionを分割して処理します。速度が多少低下しますが、VRAM使用量はわずかに減ります。 + +学習されるLoRAの形式は、`sd-scripts`と同じです。 + +`--show_timesteps`に`image`(`matplotlib`が必要)または`console`を指定すると、学習時のtimestepsの分布とtimestepsごとのloss weightingが確認できます。 + +学習時のログの記録が可能です。[TensorBoard形式のログの保存と参照](./docs/advanced_config.md#save-and-view-logs-in-tensorboard-format--tensorboard形式のログの保存と参照)を参照してください。 + +PyTorch Dynamoによる最適化を行う場合は、[こちら](./docs/advanced_config.md#pytorch-dynamo-optimization-for-model-training--モデルの学習におけるpytorch-dynamoの最適化)を参照してください。 + +`--gradient_checkpointing`を指定すると、gradient checkpointingが有効になります。VRAM使用量は減りますが、学習速度は低下します。 + +`--optimizer_type`には`adamw8bit`、`adamw8bit_apex_fused`、`adamw8bit_apex_fused_legacy`、`adamw8bit_apex_fused_legacy_no_scale`のいずれかを指定してください。 + +学習中のサンプル画像生成については、[こちらのドキュメント](./docs/sampling_during_training.md)を参照してください。その他の高度な設定については[こちらのドキュメント](./docs/advanced_config.md)を参照してください。 + +### LoRAの重みのマージ + +注:Wan 2.1には対応していません。 + +```bash +python src/musubi_tuner/merge_lora.py \ + --dit path/to/ckpts/hunyuan-video-t2v-720p/transformers/mp_rank_00_model_states.pt \ + --lora_weight path/to/lora.safetensors \ + --save_merged_model path/to/merged_model.safetensors \ + --device cpu \ + --lora_multiplier 1.0 +``` + +`--device`には計算を行うデバイス(`cpu`または`cuda`等)を指定してください。`cuda`を指定すると計算が高速化されます。 + +`--lora_weight`にはマージするLoRAの重みを、`--lora_multiplier`にはLoRAの重みの係数を、それぞれ指定してください。複数個が指定可能で、両者の数は一致させてください。 + +### 推論 + +以下のコマンドを使用して動画を生成します。 + +```bash +python src/musubi_tuner/hv_generate_video.py --fp8 --video_size 544 960 --video_length 5 --infer_steps 30 + --prompt "A cat walks on the grass, realistic style." --save_path path/to/save/dir --output_type both + --dit path/to/ckpts/hunyuan-video-t2v-720p/transformers/mp_rank_00_model_states.pt --attn_mode sdpa --split_attn + --vae path/to/ckpts/hunyuan-video-t2v-720p/vae/pytorch_model.pt + --vae_chunk_size 32 --vae_spatial_tile_sample_min_size 128 + --text_encoder1 path/to/ckpts/text_encoder + --text_encoder2 path/to/ckpts/text_encoder_2 + --seed 1234 --lora_multiplier 1.0 --lora_weight path/to/lora.safetensors +``` + +その他のオプションは`python src/musubi_tuner/hv_generate_video.py --help`で確認できます。 + +`--fp8`を指定すると、DiTがfp8で推論されます。fp8は大きく消費メモリを削減できますが、品質は低下する可能性があります。 + +RTX 40x0シリーズのGPUを使用している場合は、`--fp8_fast`オプションを指定することで、高速推論が可能です。このオプションを指定する場合は、`--fp8`も指定してください。 + +VRAMが足りない場合は、`--blocks_to_swap`を指定して、一部のブロックをCPUにオフロードしてください。最大38が指定できます。 + +`--attn_mode`には`flash`、`torch`、`sageattn`、`xformers`または`sdpa`(`torch`指定時と同じ)のいずれかを指定してください。それぞれFlashAttention、scaled dot product attention、SageAttention、xformersに対応します。デフォルトは`torch`です。SageAttentionはVRAMの削減に有効です。 + +`--split_attn`を指定すると、attentionを分割して処理します。SageAttention利用時で10%程度の高速化が見込まれます。 + +`--output_type`には`both`、`latent`、`video`、`images`のいずれかを指定してください。`both`はlatentと動画の両方を出力します。VAEでOut of Memoryエラーが発生する場合に備えて、`both`を指定することをお勧めします。`--latent_path`に保存されたlatentを指定し、`--output_type video` (または`images`)としてスクリプトを実行すると、VAEのdecodeのみを行えます。 + +`--seed`は省略可能です。指定しない場合はランダムなシードが使用されます。 + +`--video_length`は「4の倍数+1」を指定してください。 + +`--flow_shift`にタイムステップのシフト値(discrete flow shift)を指定可能です。省略時のデフォルト値は7.0で、これは推論ステップ数が50の時の推奨値です。HunyuanVideoの論文では、ステップ数50の場合は7.0、ステップ数20未満(10など)で17.0が推奨されています。 + +`--video_path`に読み込む動画を指定すると、video2videoの推論が可能です。動画ファイルを指定するか、複数の画像ファイルが入ったディレクトリを指定してください(画像ファイルはファイル名でソートされ、各フレームとして用いられます)。`--video_length`よりも短い動画を指定するとエラーになります。`--strength`で強度を指定できます。0~1.0で指定でき、大きいほど元の動画からの変化が大きくなります。 + +なおvideo2video推論の処理は実験的なものです。 + +`--compile`オプションでPyTorchのコンパイル機能を有効にします(実験的機能)。tritonのインストールが必要です。また、WindowsではVisual C++ build toolsが必要で、かつPyTorch>=2.6.0でのみ動作します。`--compile_args`でコンパイル時の引数を渡すことができます。 + +`--compile`は初回実行時にかなりの時間がかかりますが、2回目以降は高速化されます。 + +`--save_merged_model`オプションで、LoRAマージ後のDiTモデルを保存できます。`--save_merged_model path/to/merged_model.safetensors`のように指定してください。なおこのオプションを指定すると推論は行われません。 + +### SkyReels V1での推論 + +SkyReels V1のT2VとI2Vモデルがサポートされています(推論のみ)。 + +モデルは[こちら](https://huggingface.co/Kijai/SkyReels-V1-Hunyuan_comfy)からダウンロードできます。モデルを提供してくださったKijai氏に感謝します。`skyreels_hunyuan_i2v_bf16.safetensors`がI2Vモデル、`skyreels_hunyuan_t2v_bf16.safetensors`がT2Vモデルです。`bf16`以外の形式は未検証です(`fp8_e4m3fn`は動作するかもしれません)。 + +T2V推論を行う場合、以下のオプションを推論コマンドに追加してください: + +```bash +--guidance_scale 6.0 --embedded_cfg_scale 1.0 --negative_prompt "Aerial view, aerial view, overexposed, low quality, deformation, a poor composition, bad hands, bad teeth, bad eyes, bad limbs, distortion" --split_uncond +``` + +SkyReels V1はclassifier free guidance(ネガティブプロンプト)を必要とするようです。`--guidance_scale`はネガティブプロンプトのガイダンススケールです。公式リポジトリの推奨値は6.0です。デフォルトは1.0で、この場合はclassifier free guidanceは使用されません(ネガティブプロンプトは無視されます)。 + +`--embedded_cfg_scale`は埋め込みガイダンスのスケールです。公式リポジトリの推奨値は1.0です(埋め込みガイダンスなしを意味すると思われます)。 + +`--negative_prompt`はいわゆるネガティブプロンプトです。上記のサンプルは公式リポジトリのものです。`--guidance_scale`を指定し、`--negative_prompt`を指定しなかった場合は、空文字列が使用されます。 + +`--split_uncond`を指定すると、モデル呼び出しをuncondとcond(ネガティブプロンプトとプロンプト)に分割します。VRAM使用量が減りますが、推論速度は低下する可能性があります。`--split_attn`が指定されている場合、`--split_uncond`は自動的に有効になります。 + +### LoRAの形式の変換 + +ComfyUIで使用可能な形式(Diffusion-pipeと思われる)への変換は以下のコマンドで行えます。 + +```bash +python src/musubi_tuner/convert_lora.py --input path/to/musubi_lora.safetensors --output path/to/another_format.safetensors --target other +``` + +`--input`と`--output`はそれぞれ入力と出力のファイルパスを指定してください。 + +`--target`には`other`を指定してください。`default`を指定すると、他の形式から当リポジトリの形式に変換できます。 + +Wan2.1も対応済みです。 + +## その他 + +### SageAttentionのインストール方法 + +sdbds氏によるWindows対応のSageAttentionのwheelが https://github.com/sdbds/SageAttention-for-windows で公開されています。triton をインストールし、Python、PyTorch、CUDAのバージョンが一致する場合は、[Releases](https://github.com/sdbds/SageAttention-for-windows/releases)からビルド済みwheelをダウンロードしてインストールすることが可能です。sdbds氏に感謝します。 + +参考までに、以下は、SageAttentionをビルドしインストールするための簡単な手順です。Microsoft Visual C++ 再頒布可能パッケージを最新にする必要があるかもしれません。 + +1. Pythonのバージョンに応じたtriton 3.1.0のwhellを[こちら](https://github.com/woct0rdho/triton-windows/releases/tag/v3.1.0-windows.post5)からダウンロードしてインストールします。 + +2. Microsoft Visual Studio 2022かBuild Tools for Visual Studio 2022を、C++のビルドができるよう設定し、インストールします。(上のRedditの投稿を参照してください)。 + +3. 任意のフォルダにSageAttentionのリポジトリをクローンします。 + ```shell + git clone https://github.com/thu-ml/SageAttention.git + ``` + +4. スタートメニューから Visual Studio 2022 内の `x64 Native Tools Command Prompt for VS 2022` を選択してコマンドプロンプトを開きます。 + +5. venvを有効にし、SageAttentionのフォルダに移動して以下のコマンドを実行します。DISTUTILSが設定されていない、のようなエラーが出た場合は `set DISTUTILS_USE_SDK=1`としてから再度実行してください。 + ```shell + python setup.py install + ``` + +以上でSageAttentionのインストールが完了です。 + +### PyTorchのバージョンについて + +`--attn_mode`に`torch`を指定する場合、2.5.1以降のPyTorchを使用してください(それより前のバージョンでは生成される動画が真っ黒になるようです)。 + +古いバージョンを使う場合、xformersやSageAttentionを使用してください。 + +## 免責事項 + +このリポジトリは非公式であり、公式のHunyuanVideoリポジトリとは関係ありません。また、このリポジトリは開発中で、実験的なものです。テストおよびフィードバックを歓迎しますが、以下の点にご注意ください: + +- 実際の稼働環境での動作を意図したものではありません +- 機能やAPIは予告なく変更されることがあります +- いくつもの機能が未検証です +- 動画学習機能はまだ開発中です + +問題やバグについては、以下の情報とともにIssueを作成してください: + +- 問題の詳細な説明 +- 再現手順 +- 環境の詳細(OS、GPU、VRAM、Pythonバージョンなど) +- 関連するエラーメッセージやログ + +## コントリビューションについて + +コントリビューションを歓迎します。ただし、以下にご注意ください: + +- メンテナーのリソースが限られているため、PRのレビューやマージには時間がかかる場合があります +- 大きな変更に取り組む前には、議論のためのIssueを作成してください +- PRに関して: + - 変更は焦点を絞り、適度なサイズにしてください + - 明確な説明をお願いします + - 既存のコードスタイルに従ってください + - ドキュメントが更新されていることを確認してください + +## ライセンス + +`hunyuan_model`ディレクトリ以下のコードは、[HunyuanVideo](https://github.com/Tencent/HunyuanVideo)のコードを一部改変して使用しているため、そちらのライセンスに従います。 + +`wan`ディレクトリ以下のコードは、[Wan2.1](https://github.com/Wan-Video/Wan2.1)のコードを一部改変して使用しています。ライセンスはApache License 2.0です。 + +`frame_pack`ディレクトリ以下のコードは、[frame_pack](https://github.com/lllyasviel/FramePack)のコードを一部改変して使用しています。ライセンスはApache License 2.0です。 + +他のコードはApache License 2.0に従います。一部Diffusersのコードをコピー、改変して使用しています。 diff --git a/exp_code/1_benchmark/musubi-tuner/README.md b/exp_code/1_benchmark/musubi-tuner/README.md new file mode 100644 index 0000000000000000000000000000000000000000..f0a97a4d3b31cf2359b24cbbbe48ed0da59d7bd4 --- /dev/null +++ b/exp_code/1_benchmark/musubi-tuner/README.md @@ -0,0 +1,537 @@ +# Musubi Tuner + +[English](./README.md) | [日本語](./README.ja.md) + +## Table of Contents + +
+Click to expand + +- [Musubi Tuner](#musubi-tuner) + - [Table of Contents](#table-of-contents) + - [Introduction](#introduction) + - [Support the Project](#support-the-project) + - [Recent Updates](#recent-updates) + - [Releases](#releases) + - [Overview](#overview) + - [Hardware Requirements](#hardware-requirements) + - [Features](#features) + - [Installation](#installation) + - [pip based installation](#pip-based-installation) + - [uv based installation](#uv-based-installation) + - [Linux/MacOS](#linuxmacos) + - [Windows](#windows) + - [Model Download](#model-download) + - [Use the Official HunyuanVideo Model](#use-the-official-hunyuanvideo-model) + - [Using ComfyUI Models for Text Encoder](#using-comfyui-models-for-text-encoder) + - [Usage](#usage) + - [Dataset Configuration](#dataset-configuration) + - [Latent Pre-caching](#latent-pre-caching) + - [Text Encoder Output Pre-caching](#text-encoder-output-pre-caching) + - [Configuration of Accelerate](#configuration-of-accelerate) + - [Training](#training) + - [Merging LoRA Weights](#merging-lora-weights) + - [Inference](#inference) + - [Inference with SkyReels V1](#inference-with-skyreels-v1) + - [Convert LoRA to another format](#convert-lora-to-another-format) + - [Miscellaneous](#miscellaneous) + - [SageAttention Installation](#sageattention-installation) + - [PyTorch version](#pytorch-version) + - [Disclaimer](#disclaimer) + - [Contributing](#contributing) + - [License](#license) + +
+ +## Introduction + +This repository provides scripts for training LoRA (Low-Rank Adaptation) models with HunyuanVideo, Wan2.1 and FramePack architectures. + +This repository is unofficial and not affiliated with the official HunyanVideo/Wan2.1/FramePack repositories. + +For Wan2.1, please also refer to [Wan2.1 documentation](./docs/wan.md). For FramePack, please also refer to [FramePack documentation](./docs/framepack.md). + +*This repository is under development.* + +### Support the Project + +If you find this project helpful, please consider supporting its development via [GitHub Sponsors](https://github.com/sponsors/kohya-ss/). Your support is greatly appreciated! + +### Recent Updates + +- GitHub Discussions Enabled: We've enabled GitHub Discussions for community Q&A, knowledge sharing, and technical information exchange. Please use Issues for bug reports and feature requests, and Discussions for questions and sharing experiences. [Join the conversation →](https://github.com/kohya-ss/musubi-tuner/discussions) + +- June 25, 2025: + - Added support for one-frame inference and training in Wan2.1 architecture. For details, see the [Wan's one frame inference documentation](./docs/wan_1f.md). + +- June 17, 2025: + - Added support for [MagCache](https://github.com/Zehong-Ma/MagCache) in FramePack's inference script. See [Advanced Configuration](./docs/advanced_config.md#magcache) for details. + - Implemented caching of Text Encoder outputs in both interactive and batch modes in FramePack's inference script. Additionally, we reviewed the processing order and adjusted the timing of model offloading to reduce processing time during continuous generation. + +- June 13, 2025: + - Added `--sima_rel` option to `lora_post_hoc_ema.py`. This allows you to use Power Function EMA when applying Post Hoc EMA. For details, see [this document](./docs/advanced_config.md#lora-post-hoc-ema-merging--loraのpost-hoc-emaマージ). + +- June 12, 2025: + - Added `lora_post_hoc_ema.py` for Post Hoc EMA of LoRA models. This allows you to apply Post Hoc EMA after training a LoRA model to improve accuracy. For details, see [this document](./docs/advanced_config.md#lora-post-hoc-ema-merging--loraのpost-hoc-emaマージ). + +- June 11, 2025: + - Merged the pull request for packaging the repository. Thank you for xhiroga for PR [#319](https://github.com/kohya-ss/musubi-tuner/pull/319)! This introduces `pyproject.toml` and updates installation instructions. For details on migrating your existing environment, please refer to [this discussion post](https://github.com/kohya-ss/musubi-tuner/discussions/345). + - Updated `README.md` to reflect the new installation methods using `pip` and `uv` with `pyproject.toml`. + +- June 9, 2025: + - Added documentation for `--control_image_path` in FramePack's one frame inference documentation. See [FramePack's one frame inference documentation](./docs/framepack_1f.md#one-single-frame-inference--1フレーム推論) for details. + - Fixed a bug in FramePack's one frame training where sample image generation would crash if `no_4x` was not specified. PR [#339](https://github.com/kohya-ss/musubi-tuner/pull/339) + +- June 8, 2025: + - Added support for interactive mode in `wan_generate_video.py` and `fpack_generate_video.py`. If `prompt-toolkit` is installed, it will be used for prompt editing and completion, especially useful in Linux environments. PR [#330](https://github.com/kohya-ss/musubi-tuner/issues/330) + - This feature is optional. To enable it, install `prompt-toolkit` with `pip install prompt-toolkit`. If installed, it will be automatically enabled. + +- May 30, 2025: + - Fixed a bug where the resizing of images and videos during dataset loading was not performed correctly. Please recreate the cache. Thank you sdbds for PR [#312](https://github.com/kohya-ss/musubi-tuner/issues/312). + - The bug occurred when the width or height of the image before resizing matched the bucket's width or height, but the other dimension was different (for example, if the original image was 640x480 and the bucket was 640x360). + - Updated the code for FramePack's one frame inference and training. The code has been significantly improved. See [FramePack's one frame inference documentation](./docs/framepack_1f.md) for details. + - **Breaking change**: The dataset format, training options, and inference options for one frame training have changed. Please follow the documentation to update your dataset configuration, recreate the cache, and modify your training and inference options. + - Added documentation for FramePack's one frame inference and training. See the [documentation](./docs/framepack_1f.md) for details. + +### Releases + +We are grateful to everyone who has been contributing to the Musubi Tuner ecosystem through documentation and third-party tools. To support these valuable contributions, we recommend working with our [releases](https://github.com/kohya-ss/musubi-tuner/releases) as stable reference points, as this project is under active development and breaking changes may occur. + +You can find the latest release and version history in our [releases page](https://github.com/kohya-ss/musubi-tuner/releases). + +## Overview + +### Hardware Requirements + +- VRAM: 12GB or more recommended for image training, 24GB or more for video training + - *Actual requirements depend on resolution and training settings.* For 12GB, use a resolution of 960x544 or lower and use memory-saving options such as `--blocks_to_swap`, `--fp8_llm`, etc. +- Main Memory: 64GB or more recommended, 32GB + swap may work + +### Features + +- Memory-efficient implementation +- Windows compatibility confirmed (Linux compatibility confirmed by community) +- Multi-GPU support not implemented + +## Installation + +### pip based installation + +Python 3.10 or later is required (verified with 3.10). + +Create a virtual environment and install PyTorch and torchvision matching your CUDA version. + +PyTorch 2.5.1 or later is required (see [note](#PyTorch-version)). + +```bash +pip install torch torchvision --index-url https://download.pytorch.org/whl/cu124 +``` + +Install the required dependencies using the following command. + +```bash +pip install -e . +``` + +Optionally, you can use FlashAttention and SageAttention (**for inference only**; see [SageAttention Installation](#sageattention-installation) for installation instructions). + +Optional dependencies for additional features: +- `ascii-magic`: Used for dataset verification +- `matplotlib`: Used for timestep visualization +- `tensorboard`: Used for logging training progress +- `prompt-toolkit`: Used for interactive prompt editing in Wan2.1 and FramePack inference scripts. If installed, it will be automatically used in interactive mode. Especially useful in Linux environments for easier prompt editing. + +```bash +pip install ascii-magic matplotlib tensorboard prompt-toolkit +``` + +### uv based installation (experimenal) + +You can also install using uv, but installation with uv is experimental. Feedback is welcome. + +1. Install uv (if not already present on your OS). + +#### Linux/MacOS + +```sh +curl -LsSf https://astral.sh/uv/install.sh | sh +``` + +Follow the instructions to add the uv path manually until you restart your session... + +#### Windows + +```powershell +powershell -c "irm https://astral.sh/uv/install.ps1 | iex" +``` + +Follow the instructions to add the uv path manually until you reboot your system... or just reboot your system at this point. + +## Model Download + +There are two ways to download the model. + +### Use the Official HunyuanVideo Model + +Download the model following the [official README](https://github.com/Tencent/HunyuanVideo/blob/main/ckpts/README.md) and place it in your chosen directory with the following structure: + +``` + ckpts + ├──hunyuan-video-t2v-720p + │ ├──transformers + │ ├──vae + ├──text_encoder + ├──text_encoder_2 + ├──... +``` + +### Using ComfyUI Models for Text Encoder + +This method is easier. + +For DiT and VAE, use the HunyuanVideo models. + +From https://huggingface.co/tencent/HunyuanVideo/tree/main/hunyuan-video-t2v-720p/transformers, download [mp_rank_00_model_states.pt](https://huggingface.co/tencent/HunyuanVideo/resolve/main/hunyuan-video-t2v-720p/transformers/mp_rank_00_model_states.pt) and place it in your chosen directory. + +(Note: The fp8 model on the same page is unverified.) + +If you are training with `--fp8_base`, you can use `mp_rank_00_model_states_fp8.safetensors` from [here](https://huggingface.co/kohya-ss/HunyuanVideo-fp8_e4m3fn-unofficial) instead of `mp_rank_00_model_states.pt`. (This file is unofficial and simply converts the weights to float8_e4m3fn.) + +From https://huggingface.co/tencent/HunyuanVideo/tree/main/hunyuan-video-t2v-720p/vae, download [pytorch_model.pt](https://huggingface.co/tencent/HunyuanVideo/resolve/main/hunyuan-video-t2v-720p/vae/pytorch_model.pt) and place it in your chosen directory. + +For the Text Encoder, use the models provided by ComfyUI. Refer to [ComfyUI's page](https://comfyanonymous.github.io/ComfyUI_examples/hunyuan_video/), from https://huggingface.co/Comfy-Org/HunyuanVideo_repackaged/tree/main/split_files/text_encoders, download `llava_llama3_fp16.safetensors` (Text Encoder 1, LLM) and `clip_l.safetensors` (Text Encoder 2, CLIP) and place them in your chosen directory. + +(Note: The fp8 LLM model on the same page is unverified.) + +## Usage + +### Dataset Configuration + +Please refer to [dataset configuration guide](./src/musubi_tuner/dataset/dataset_config.md). + +### Latent Pre-caching + +Latent pre-caching is required. Create the cache using the following command: + +If you have installed using pip: + +```bash +python src/musubi_tuner/cache_latents.py --dataset_config path/to/toml --vae path/to/ckpts/hunyuan-video-t2v-720p/vae/pytorch_model.pt --vae_chunk_size 32 --vae_tiling +``` + +If you have installed with `uv`, you can use `uv run --extra cu124` to run the script. If CUDA 12.8 is supported, `uv run --extra cu128` is also available. Other scripts can be run in the same way. (Note that the installation with `uv` is experimental. Feedback is welcome. If you encounter any issues, please use the pip-based installation.) + +```bash +uv run --extra cu124 src/musubi_tuner/cache_latents.py --dataset_config path/to/toml --vae path/to/ckpts/hunyuan-video-t2v-720p/vae/pytorch_model.pt --vae_chunk_size 32 --vae_tiling +``` + +For additional options, use `python src/musubi_tuner/cache_latents.py --help`. + +If you're running low on VRAM, reduce `--vae_spatial_tile_sample_min_size` to around 128 and lower the `--batch_size`. + +Use `--debug_mode image` to display dataset images and captions in a new window, or `--debug_mode console` to display them in the console (requires `ascii-magic`). + +With `--debug_mode video`, images or videos will be saved in the cache directory (please delete them after checking). The bitrate of the saved video is set to 1Mbps for preview purposes. The images decoded from the original video (not degraded) are used for the cache (for training). + +When `--debug_mode` is specified, the actual caching process is not performed. + +By default, cache files not included in the dataset are automatically deleted. You can still keep cache files as before by specifying `--keep_cache`. + +### Text Encoder Output Pre-caching + +Text Encoder output pre-caching is required. Create the cache using the following command: + +```bash +python src/musubi_tuner/cache_text_encoder_outputs.py --dataset_config path/to/toml --text_encoder1 path/to/ckpts/text_encoder --text_encoder2 path/to/ckpts/text_encoder_2 --batch_size 16 +``` + +or for uv: + +```bash +uv run --extra cu124 src/musubi_tuner/cache_text_encoder_outputs.py --dataset_config path/to/toml --text_encoder1 path/to/ckpts/text_encoder --text_encoder2 path/to/ckpts/text_encoder_2 --batch_size 16 +``` + +For additional options, use `python src/musubi_tuner/cache_text_encoder_outputs.py --help`. + +Adjust `--batch_size` according to your available VRAM. + +For systems with limited VRAM (less than ~16GB), use `--fp8_llm` to run the LLM in fp8 mode. + +By default, cache files not included in the dataset are automatically deleted. You can still keep cache files as before by specifying `--keep_cache`. + +### Configuration of Accelerate + +Run `accelerate config` to configure Accelerate. Choose appropriate values for each question based on your environment (either input values directly or use arrow keys and enter to select; uppercase is default, so if the default value is fine, just press enter without inputting anything). For training with a single GPU, answer the questions as follows: + + +```txt +- In which compute environment are you running?: This machine +- Which type of machine are you using?: No distributed training +- Do you want to run your training on CPU only (even if a GPU / Apple Silicon / Ascend NPU device is available)?[yes/NO]: NO +- Do you wish to optimize your script with torch dynamo?[yes/NO]: NO +- Do you want to use DeepSpeed? [yes/NO]: NO +- What GPU(s) (by id) should be used for training on this machine as a comma-seperated list? [all]: all +- Would you like to enable numa efficiency? (Currently only supported on NVIDIA hardware). [yes/NO]: NO +- Do you wish to use mixed precision?: bf16 +``` + +*Note*: In some cases, you may encounter the error `ValueError: fp16 mixed precision requires a GPU`. If this happens, answer "0" to the sixth question (`What GPU(s) (by id) should be used for training on this machine as a comma-separated list? [all]:`). This means that only the first GPU (id `0`) will be used. + +### Training + +Start training using the following command (input as a single line): + +```bash +accelerate launch --num_cpu_threads_per_process 1 --mixed_precision bf16 src/musubi_tuner/hv_train_network.py + --dit path/to/ckpts/hunyuan-video-t2v-720p/transformers/mp_rank_00_model_states.pt + --dataset_config path/to/toml --sdpa --mixed_precision bf16 --fp8_base + --optimizer_type adamw8bit --learning_rate 2e-4 --gradient_checkpointing + --max_data_loader_n_workers 2 --persistent_data_loader_workers + --network_module networks.lora --network_dim 32 + --timestep_sampling shift --discrete_flow_shift 7.0 + --max_train_epochs 16 --save_every_n_epochs 1 --seed 42 + --output_dir path/to/output_dir --output_name name-of-lora +``` + +or for uv: + +```bash +uv run --extra cu124 accelerate launch --num_cpu_threads_per_process 1 --mixed_precision bf16 src/musubi_tuner/hv_train_network.py + --dit path/to/ckpts/hunyuan-video-t2v-720p/transformers/mp_rank_00_model_states.pt + --dataset_config path/to/toml --sdpa --mixed_precision bf16 --fp8_base + --optimizer_type adamw8bit --learning_rate 2e-4 --gradient_checkpointing + --max_data_loader_n_workers 2 --persistent_data_loader_workers + --network_module networks.lora --network_dim 32 + --timestep_sampling shift --discrete_flow_shift 7.0 + --max_train_epochs 16 --save_every_n_epochs 1 --seed 42 + --output_dir path/to/output_dir --output_name name-of-lora +``` + +__Update__: Changed the sample training settings to a learning rate of 2e-4, `--timestep_sampling` to `shift`, and `--discrete_flow_shift` to 7.0. Faster training is expected. If the details of the image are not learned well, try lowering the discete flow shift to around 3.0. + +However, the training settings are still experimental. Appropriate learning rates, training steps, timestep distribution, loss weighting, etc. are not yet known. Feedback is welcome. + +For additional options, use `python src/musubi_tuner/hv_train_network.py --help` (note that many options are unverified). + +Specifying `--fp8_base` runs DiT in fp8 mode. Without this flag, mixed precision data type will be used. fp8 can significantly reduce memory consumption but may impact output quality. If `--fp8_base` is not specified, 24GB or more VRAM is recommended. Use `--blocks_to_swap` as needed. + +If you're running low on VRAM, use `--blocks_to_swap` to offload some blocks to CPU. Maximum value is 36. + +(The idea of block swap is based on the implementation by 2kpr. Thanks again to 2kpr.) + +Use `--sdpa` for PyTorch's scaled dot product attention. Use `--flash_attn` for [FlashAttention](https://github.com/Dao-AILab/flash-attention). Use `--xformers` for xformers, but specify `--split_attn` when using xformers. `--sage_attn` for SageAttention, but SageAttention is not yet supported for training, so it raises a ValueError. + +`--split_attn` processes attention in chunks. Speed may be slightly reduced, but VRAM usage is slightly reduced. + +The format of LoRA trained is the same as `sd-scripts`. + +`--show_timesteps` can be set to `image` (requires `matplotlib`) or `console` to display timestep distribution and loss weighting during training. + +You can record logs during training. Refer to [Save and view logs in TensorBoard format](./docs/advanced_config.md#save-and-view-logs-in-tensorboard-format--tensorboard形式のログの保存と参照). + +For PyTorch Dynamo optimization, refer to [this document](./docs/advanced_config.md#pytorch-dynamo-optimization-for-model-training--モデルの学習におけるpytorch-dynamoの最適化). + +For sample image generation during training, refer to [this document](./docs/sampling_during_training.md). For advanced configuration, refer to [this document](./docs/advanced_config.md). + +### Merging LoRA Weights + +Note: Wan2.1 is not supported for merging LoRA weights. + +```bash +python src/musubi_tuner/merge_lora.py \ + --dit path/to/ckpts/hunyuan-video-t2v-720p/transformers/mp_rank_00_model_states.pt \ + --lora_weight path/to/lora.safetensors \ + --save_merged_model path/to/merged_model.safetensors \ + --device cpu \ + --lora_multiplier 1.0 +``` + +or for uv: + +```bash +uv run --extra cu124 src/musubi_tuner/merge_lora.py \ + --dit path/to/ckpts/hunyuan-video-t2v-720p/transformers/mp_rank_00_model_states.pt \ + --lora_weight path/to/lora.safetensors \ + --save_merged_model path/to/merged_model.safetensors \ + --device cpu \ + --lora_multiplier 1.0 +``` + +Specify the device to perform the calculation (`cpu` or `cuda`, etc.) with `--device`. Calculation will be faster if `cuda` is specified. + +Specify the LoRA weights to merge with `--lora_weight` and the multiplier for the LoRA weights with `--lora_multiplier`. Multiple values can be specified, and the number of values must match. + +### Inference + +Generate videos using the following command: + +```bash +python src/musubi_tuner/hv_generate_video.py --fp8 --video_size 544 960 --video_length 5 --infer_steps 30 + --prompt "A cat walks on the grass, realistic style." --save_path path/to/save/dir --output_type both + --dit path/to/ckpts/hunyuan-video-t2v-720p/transformers/mp_rank_00_model_states.pt --attn_mode sdpa --split_attn + --vae path/to/ckpts/hunyuan-video-t2v-720p/vae/pytorch_model.pt + --vae_chunk_size 32 --vae_spatial_tile_sample_min_size 128 + --text_encoder1 path/to/ckpts/text_encoder + --text_encoder2 path/to/ckpts/text_encoder_2 + --seed 1234 --lora_multiplier 1.0 --lora_weight path/to/lora.safetensors +``` + +or for uv: + +```bash +uv run --extra cu124 src/musubi_tuner/hv_generate_video.py --fp8 --video_size 544 960 --video_length 5 --infer_steps 30 + --prompt "A cat walks on the grass, realistic style." --save_path path/to/save/dir --output_type both + --dit path/to/ckpts/hunyuan-video-t2v-720p/transformers/mp_rank_00_model_states.pt --attn_mode sdpa --split_attn + --vae path/to/ckpts/hunyuan-video-t2v-720p/vae/pytorch_model.pt + --vae_chunk_size 32 --vae_spatial_tile_sample_min_size 128 + --text_encoder1 path/to/ckpts/text_encoder + --text_encoder2 path/to/ckpts/text_encoder_2 + --seed 1234 --lora_multiplier 1.0 --lora_weight path/to/lora.safetensors +``` + +For additional options, use `python src/musubi_tuner/hv_generate_video.py --help`. + +Specifying `--fp8` runs DiT in fp8 mode. fp8 can significantly reduce memory consumption but may impact output quality. + +`--fp8_fast` option is also available for faster inference on RTX 40x0 GPUs. This option requires `--fp8` option. + +If you're running low on VRAM, use `--blocks_to_swap` to offload some blocks to CPU. Maximum value is 38. + +For `--attn_mode`, specify either `flash`, `torch`, `sageattn`, `xformers`, or `sdpa` (same as `torch`). These correspond to FlashAttention, scaled dot product attention, SageAttention, and xformers, respectively. Default is `torch`. SageAttention is effective for VRAM reduction. + +Specifing `--split_attn` will process attention in chunks. Inference with SageAttention is expected to be about 10% faster. + +For `--output_type`, specify either `both`, `latent`, `video` or `images`. `both` outputs both latents and video. Recommended to use `both` in case of Out of Memory errors during VAE processing. You can specify saved latents with `--latent_path` and use `--output_type video` (or `images`) to only perform VAE decoding. + +`--seed` is optional. A random seed will be used if not specified. + +`--video_length` should be specified as "a multiple of 4 plus 1". + +`--flow_shift` can be specified to shift the timestep (discrete flow shift). The default value when omitted is 7.0, which is the recommended value for 50 inference steps. In the HunyuanVideo paper, 7.0 is recommended for 50 steps, and 17.0 is recommended for less than 20 steps (e.g. 10). + +By specifying `--video_path`, video2video inference is possible. Specify a video file or a directory containing multiple image files (the image files are sorted by file name and used as frames). An error will occur if the video is shorter than `--video_length`. You can specify the strength with `--strength`. It can be specified from 0 to 1.0, and the larger the value, the greater the change from the original video. + +Note that video2video inference is experimental. + +`--compile` option enables PyTorch's compile feature (experimental). Requires triton. On Windows, also requires Visual C++ build tools installed and PyTorch>=2.6.0 (Visual C++ build tools is also required). You can pass arguments to the compiler with `--compile_args`. + +The `--compile` option takes a long time to run the first time, but speeds up on subsequent runs. + +You can save the DiT model after LoRA merge with the `--save_merged_model` option. Specify `--save_merged_model path/to/merged_model.safetensors`. Note that inference will not be performed when this option is specified. + +### Inference with SkyReels V1 + +SkyReels V1 T2V and I2V models are supported (inference only). + +The model can be downloaded from [here](https://huggingface.co/Kijai/SkyReels-V1-Hunyuan_comfy). Many thanks to Kijai for providing the model. `skyreels_hunyuan_i2v_bf16.safetensors` is the I2V model, and `skyreels_hunyuan_t2v_bf16.safetensors` is the T2V model. The models other than bf16 are not tested (`fp8_e4m3fn` may work). + +For T2V inference, add the following options to the inference command: + +```bash +--guidance_scale 6.0 --embedded_cfg_scale 1.0 --negative_prompt "Aerial view, aerial view, overexposed, low quality, deformation, a poor composition, bad hands, bad teeth, bad eyes, bad limbs, distortion" --split_uncond +``` + +SkyReels V1 seems to require a classfier free guidance (negative prompt).`--guidance_scale` is a guidance scale for the negative prompt. The recommended value is 6.0 from the official repository. The default is 1.0, it means no classifier free guidance. + +`--embedded_cfg_scale` is a scale of the embedded guidance. The recommended value is 1.0 from the official repository (it may mean no embedded guidance). + +`--negative_prompt` is a negative prompt for the classifier free guidance. The above sample is from the official repository. If you don't specify this, and specify `--guidance_scale` other than 1.0, an empty string will be used as the negative prompt. + +`--split_uncond` is a flag to split the model call into unconditional and conditional parts. This reduces VRAM usage but may slow down inference. If `--split_attn` is specified, `--split_uncond` is automatically set. + +You can also perform image2video inference with SkyReels V1 I2V model. Specify the image file path with `--image_path`. The image will be resized to the given `--video_size`. + +```bash +--image_path path/to/image.jpg +``` + +### Convert LoRA to another format + +You can convert LoRA to a format compatible with ComfyUI (presumed to be Diffusion-pipe) using the following command: + +```bash +python src/musubi_tuner/convert_lora.py --input path/to/musubi_lora.safetensors --output path/to/another_format.safetensors --target other +``` + +or for uv: + +```bash +uv run --extra cu124 src/musubi_tuner/convert_lora.py --input path/to/musubi_lora.safetensors --output path/to/another_format.safetensors --target other +``` + +Specify the input and output file paths with `--input` and `--output`, respectively. + +Specify `other` for `--target`. Use `default` to convert from another format to the format of this repository. + +Wan2.1 is also supported. + +## Miscellaneous + +### SageAttention Installation + +sdbsd has provided a Windows-compatible SageAttention implementation and pre-built wheels here: https://github.com/sdbds/SageAttention-for-windows. After installing triton, if your Python, PyTorch, and CUDA versions match, you can download and install the pre-built wheel from the [Releases](https://github.com/sdbds/SageAttention-for-windows/releases) page. Thanks to sdbsd for this contribution. + +For reference, the build and installation instructions are as follows. You may need to update Microsoft Visual C++ Redistributable to the latest version. + +1. Download and install triton 3.1.0 wheel matching your Python version from [here](https://github.com/woct0rdho/triton-windows/releases/tag/v3.1.0-windows.post5). + +2. Install Microsoft Visual Studio 2022 or Build Tools for Visual Studio 2022, configured for C++ builds. + +3. Clone the SageAttention repository in your preferred directory: + ```shell + git clone https://github.com/thu-ml/SageAttention.git + ``` + +4. Open `x64 Native Tools Command Prompt for VS 2022` from the Start menu under Visual Studio 2022. + +5. Activate your venv, navigate to the SageAttention folder, and run the following command. If you get a DISTUTILS not configured error, set `set DISTUTILS_USE_SDK=1` and try again: + ```shell + python setup.py install + ``` + +This completes the SageAttention installation. + +### PyTorch version + +If you specify `torch` for `--attn_mode`, use PyTorch 2.5.1 or later (earlier versions may result in black videos). + +If you use an earlier version, use xformers or SageAttention. + +## Disclaimer + +This repository is unofficial and not affiliated with the official HunyuanVideo repository. + +This repository is experimental and under active development. While we welcome community usage and feedback, please note: + +- This is not intended for production use +- Features and APIs may change without notice +- Some functionalities are still experimental and may not work as expected +- Video training features are still under development + +If you encounter any issues or bugs, please create an Issue in this repository with: +- A detailed description of the problem +- Steps to reproduce +- Your environment details (OS, GPU, VRAM, Python version, etc.) +- Any relevant error messages or logs + +## Contributing + +We welcome contributions! However, please note: + +- Due to limited maintainer resources, PR reviews and merges may take some time +- Before starting work on major changes, please open an Issue for discussion +- For PRs: + - Keep changes focused and reasonably sized + - Include clear descriptions + - Follow the existing code style + - Ensure documentation is updated + +## License + +Code under the `hunyuan_model` directory is modified from [HunyuanVideo](https://github.com/Tencent/HunyuanVideo) and follows their license. + +Code under the `wan` directory is modified from [Wan2.1](https://github.com/Wan-Video/Wan2.1). The license is under the Apache License 2.0. + +Code under the `frame_pack` directory is modified from [FramePack](https://github.com/lllyasviel/FramePack). The license is under the Apache License 2.0. + +Other code is under the Apache License 2.0. Some code is copied and modified from Diffusers. \ No newline at end of file diff --git a/exp_code/1_benchmark/musubi-tuner/cache_latents.py b/exp_code/1_benchmark/musubi-tuner/cache_latents.py new file mode 100644 index 0000000000000000000000000000000000000000..dc2cab0eafa81412242bc6ad16f3c08ab3e6aa17 --- /dev/null +++ b/exp_code/1_benchmark/musubi-tuner/cache_latents.py @@ -0,0 +1,4 @@ +from musubi_tuner.cache_latents import main + +if __name__ == "__main__": + main() diff --git a/exp_code/1_benchmark/musubi-tuner/cache_text_encoder_outputs.py b/exp_code/1_benchmark/musubi-tuner/cache_text_encoder_outputs.py new file mode 100644 index 0000000000000000000000000000000000000000..c7be3af2173913a2fc5d4a40cd34a223f9ee066f --- /dev/null +++ b/exp_code/1_benchmark/musubi-tuner/cache_text_encoder_outputs.py @@ -0,0 +1,4 @@ +from musubi_tuner.cache_text_encoder_outputs import main + +if __name__ == "__main__": + main() diff --git a/exp_code/1_benchmark/musubi-tuner/convert_lora.py b/exp_code/1_benchmark/musubi-tuner/convert_lora.py new file mode 100644 index 0000000000000000000000000000000000000000..8280a39d527fbb3ad51d91db9514bdbfc64088a3 --- /dev/null +++ b/exp_code/1_benchmark/musubi-tuner/convert_lora.py @@ -0,0 +1,4 @@ +from musubi_tuner.convert_lora import main + +if __name__ == "__main__": + main() diff --git a/exp_code/1_benchmark/musubi-tuner/docs/advanced_config.md b/exp_code/1_benchmark/musubi-tuner/docs/advanced_config.md new file mode 100644 index 0000000000000000000000000000000000000000..f9c70c9346af30b6402e35465b3af63cd246ba44 --- /dev/null +++ b/exp_code/1_benchmark/musubi-tuner/docs/advanced_config.md @@ -0,0 +1,731 @@ +> 📝 Click on the language section to expand / 言語をクリックして展開 + +# Advanced configuration / 高度な設定 + +## Table of contents / 目次 + +- [How to specify `network_args`](#how-to-specify-network_args--network_argsの指定方法) +- [LoRA+](#lora) +- [Select the target modules of LoRA](#select-the-target-modules-of-lora--loraの対象モジュールを選択する) +- [Save and view logs in TensorBoard format](#save-and-view-logs-in-tensorboard-format--tensorboard形式のログの保存と参照) +- [Save and view logs in wandb](#save-and-view-logs-in-wandb--wandbでログの保存と参照) +- [FP8 weight optimization for models](#fp8-weight-optimization-for-models--モデルの重みのfp8への最適化) +- [PyTorch Dynamo optimization for model training](#pytorch-dynamo-optimization-for-model-training--モデルの学習におけるpytorch-dynamoの最適化) +- [LoRA Post-Hoc EMA merging](#lora-post-hoc-ema-merging--loraのpost-hoc-emaマージ) +- [MagCache](#magcache) + +## How to specify `network_args` / `network_args`の指定方法 + +The `--network_args` option is an option for specifying detailed arguments to LoRA. Specify the arguments in the form of `key=value` in `--network_args`. + +
+日本語 +`--network_args`オプションは、LoRAへの詳細な引数を指定するためのオプションです。`--network_args`には、`key=value`の形式で引数を指定します。 +
+ +### Example / 記述例 + +If you specify it on the command line, write as follows. / コマンドラインで指定する場合は以下のように記述します。 + +```bash +accelerate launch --num_cpu_threads_per_process 1 --mixed_precision bf16 src/musubi_tuner/hv_train_network.py --dit ... + --network_module networks.lora --network_dim 32 + --network_args "key1=value1" "key2=value2" ... +``` + +If you specify it in the configuration file, write as follows. / 設定ファイルで指定する場合は以下のように記述します。 + +```toml +network_args = ["key1=value1", "key2=value2", ...] +``` + +If you specify `"verbose=True"`, detailed information of LoRA will be displayed. / `"verbose=True"`を指定するとLoRAの詳細な情報が表示されます。 + +```bash +--network_args "verbose=True" "key1=value1" "key2=value2" ... +``` + +## LoRA+ + +LoRA+ is a method to improve the training speed by increasing the learning rate of the UP side (LoRA-B) of LoRA. Specify the multiplier for the learning rate. The original paper recommends 16, but adjust as needed. It seems to be good to start from around 4. For details, please refer to the [related PR of sd-scripts](https://github.com/kohya-ss/sd-scripts/pull/1233). + +Specify `loraplus_lr_ratio` with `--network_args`. + +
+日本語 + +LoRA+は、LoRAのUP側(LoRA-B)の学習率を上げることで学習速度を向上させる手法です。学習率に対する倍率を指定します。元論文では16を推奨していますが、必要に応じて調整してください。4程度から始めるとよいようです。詳細は[sd-scriptsの関連PR]https://github.com/kohya-ss/sd-scripts/pull/1233)を参照してください。 + +`--network_args`で`loraplus_lr_ratio`を指定します。 +
+ +### Example / 記述例 + +```bash +accelerate launch --num_cpu_threads_per_process 1 --mixed_precision bf16 src/musubi_tuner/hv_train_network.py --dit ... + --network_module networks.lora --network_dim 32 --network_args "loraplus_lr_ratio=4" ... +``` + +## Select the target modules of LoRA / LoRAの対象モジュールを選択する + +*This feature is highly experimental and the specification may change. / この機能は特に実験的なもので、仕様は変更される可能性があります。* + +By specifying `exclude_patterns` and `include_patterns` with `--network_args`, you can select the target modules of LoRA. + +`exclude_patterns` excludes modules that match the specified pattern. `include_patterns` targets only modules that match the specified pattern. + +Specify the values as a list. For example, `"exclude_patterns=[r'.*single_blocks.*', r'.*double_blocks\.[0-9]\..*']"`. + +The pattern is a regular expression for the module name. The module name is in the form of `double_blocks.0.img_mod.linear` or `single_blocks.39.modulation.linear`. The regular expression is not a partial match but a complete match. + +The patterns are applied in the order of `exclude_patterns`→`include_patterns`. By default, the Linear layers of `img_mod`, `txt_mod`, and `modulation` of double blocks and single blocks are excluded. + +(`.*(img_mod|txt_mod|modulation).*` is specified.) + +
+日本語 + +`--network_args`で`exclude_patterns`と`include_patterns`を指定することで、LoRAの対象モジュールを選択することができます。 + +`exclude_patterns`は、指定したパターンに一致するモジュールを除外します。`include_patterns`は、指定したパターンに一致するモジュールのみを対象とします。 + +値は、リストで指定します。`"exclude_patterns=[r'.*single_blocks.*', r'.*double_blocks\.[0-9]\..*']"`のようになります。 + +パターンは、モジュール名に対する正規表現です。モジュール名は、たとえば`double_blocks.0.img_mod.linear`や`single_blocks.39.modulation.linear`のような形式です。正規表現は部分一致ではなく完全一致です。 + +パターンは、`exclude_patterns`→`include_patterns`の順で適用されます。デフォルトは、double blocksとsingle blocksのLinear層のうち、`img_mod`、`txt_mod`、`modulation`が除外されています。 + +(`.*(img_mod|txt_mod|modulation).*`が指定されています。) +
+ +### Example / 記述例 + +Only the modules of double blocks / double blocksのモジュールのみを対象とする場合: + +```bash +--network_args "exclude_patterns=[r'.*single_blocks.*']" +``` + +Only the modules of single blocks from the 10th / single blocksの10番目以降のLinearモジュールのみを対象とする場合: + +```bash +--network_args "exclude_patterns=[r'.*']" "include_patterns=[r'.*single_blocks\.\d{2}\.linear.*']" +``` + +## Save and view logs in TensorBoard format / TensorBoard形式のログの保存と参照 + +Specify the folder to save the logs with the `--logging_dir` option. Logs in TensorBoard format will be saved. + +For example, if you specify `--logging_dir=logs`, a `logs` folder will be created in the working folder, and logs will be saved in the date folder inside it. + +Also, if you specify the `--log_prefix` option, the specified string will be added before the date. For example, use `--logging_dir=logs --log_prefix=lora_setting1_` for identification. + +To view logs in TensorBoard, open another command prompt and activate the virtual environment. Then enter the following in the working folder. + +```powershell +tensorboard --logdir=logs +``` + +(tensorboard installation is required.) + +Then open a browser and access http://localhost:6006/ to display it. + +
+日本語 +`--logging_dir`オプションにログ保存先フォルダを指定してください。TensorBoard形式のログが保存されます。 + +たとえば`--logging_dir=logs`と指定すると、作業フォルダにlogsフォルダが作成され、その中の日時フォルダにログが保存されます。 + +また`--log_prefix`オプションを指定すると、日時の前に指定した文字列が追加されます。`--logging_dir=logs --log_prefix=lora_setting1_`などとして識別用にお使いください。 + +TensorBoardでログを確認するには、別のコマンドプロンプトを開き、仮想環境を有効にしてから、作業フォルダで以下のように入力します。 + +```powershell +tensorboard --logdir=logs +``` + +(tensorboardのインストールが必要です。) + +その後ブラウザを開き、http://localhost:6006/ へアクセスすると表示されます。 +
+ +## Save and view logs in wandb / wandbでログの保存と参照 + +`--log_with wandb` option is available to save logs in wandb format. `tensorboard` or `all` is also available. The default is `tensorboard`. + +Specify the project name with `--log_tracker_name` when using wandb. + +
+日本語 +`--log_with wandb`オプションを指定するとwandb形式でログを保存することができます。`tensorboard`や`all`も指定可能です。デフォルトは`tensorboard`です。 + +wandbを使用する場合は、`--log_tracker_name`でプロジェクト名を指定してください。 +
+ +## FP8 weight optimization for models / モデルの重みのFP8への最適化 + +The `--fp8_scaled` option is available to quantize the weights of the model to FP8 (E4M3) format with appropriate scaling. This reduces the VRAM usage while maintaining precision. Important weights are kept in FP16/BF16/FP32 format. + +The model weights must be in fp16 or bf16. Weights that have been pre-converted to float8_e4m3 cannot be used. + +Wan2.1 inference and training are supported. + +Specify the `--fp8_scaled` option in addition to the `--fp8` option during inference. + +Specify the `--fp8_scaled` option in addition to the `--fp8_base` option during training. + +Acknowledgments: This feature is based on the [implementation](https://github.com/Tencent/HunyuanVideo/blob/7df4a45c7e424a3f6cd7d653a7ff1f60cddc1eb1/hyvideo/modules/fp8_optimization.py) of [HunyuanVideo](https://github.com/Tencent/HunyuanVideo). The selection of high-precision modules is based on the [implementation](https://github.com/tdrussell/diffusion-pipe/blob/407c04fdae1c9ab5e67b54d33bef62c3e0a8dbc7/models/wan.py) of [diffusion-pipe](https://github.com/tdrussell/diffusion-pipe). I would like to thank these repositories. + +
+日本語 +重みを単純にFP8へcastするのではなく、適切なスケーリングでFP8形式に量子化することで、精度を維持しつつVRAM使用量を削減します。また、重要な重みはFP16/BF16/FP32形式で保持します。 + +モデルの重みは、fp16またはbf16が必要です。あらかじめfloat8_e4m3に変換された重みは使用できません。 + +Wan2.1の推論、学習のみ対応しています。 + +推論時は`--fp8`オプションに加えて `--fp8_scaled`オプションを指定してください。 + +学習時は`--fp8_base`オプションに加えて `--fp8_scaled`オプションを指定してください。 + +謝辞:この機能は、[HunyuanVideo](https://github.com/Tencent/HunyuanVideo)の[実装](https://github.com/Tencent/HunyuanVideo/blob/7df4a45c7e424a3f6cd7d653a7ff1f60cddc1eb1/hyvideo/modules/fp8_optimization.py)を参考にしました。また、高精度モジュールの選択においては[diffusion-pipe](https://github.com/tdrussell/diffusion-pipe)の[実装](https://github.com/tdrussell/diffusion-pipe/blob/407c04fdae1c9ab5e67b54d33bef62c3e0a8dbc7/models/wan.py)を参考にしました。これらのリポジトリに感謝します。 + +
+ +### Key features and implementation details / 主な特徴と実装の詳細 + +- Implements FP8 (E4M3) weight quantization for Linear layers +- Reduces VRAM requirements by using 8-bit weights for storage (slightly increased compared to existing `--fp8` `--fp8_base` options) +- Quantizes weights to FP8 format with appropriate scaling instead of simple cast to FP8 +- Maintains computational precision by dequantizing to original precision (FP16/BF16/FP32) during forward pass +- Preserves important weights in FP16/BF16/FP32 format + +The implementation: + +1. Quantizes weights to FP8 format with appropriate scaling +2. Replaces weights by FP8 quantized weights and stores scale factors in model state dict +3. Applies monkey patching to Linear layers for transparent dequantization during computation + +
+日本語 + +- Linear層のFP8(E4M3)重み量子化を実装 +- 8ビットの重みを使用することでVRAM使用量を削減(既存の`--fp8` `--fp8_base` オプションに比べて微増) +- 単純なFP8へのcastではなく、適切な値でスケールして重みをFP8形式に量子化 +- forward時に元の精度(FP16/BF16/FP32)に逆量子化して計算精度を維持 +- 精度が重要な重みはFP16/BF16/FP32のまま保持 + +実装: + +1. 精度を維持できる適切な倍率で重みをFP8形式に量子化 +2. 重みをFP8量子化重みに置き換え、倍率をモデルのstate dictに保存 +3. Linear層にmonkey patchingすることでモデルを変更せずに逆量子化 +
+ + ## PyTorch Dynamo optimization for model training / モデルの学習におけるPyTorch Dynamoの最適化 + +The PyTorch Dynamo options are now available to optimize the training process. PyTorch Dynamo is a Python-level JIT compiler designed to make unmodified PyTorch programs faster by using TorchInductor, a deep learning compiler. This integration allows for potential speedups in training while maintaining model accuracy. + +[PR #215](https://github.com/kohya-ss/musubi-tuner/pull/215) added this feature. + +Specify the `--dynamo_backend` option to enable Dynamo optimization with one of the available backends from the `DynamoBackend` enum. + +Additional options allow for fine-tuning the Dynamo behavior: +- `--dynamo_mode`: Controls the optimization strategy +- `--dynamo_fullgraph`: Enables fullgraph mode for potentially better optimization +- `--dynamo_dynamic`: Enables dynamic shape handling + +The `--dynamo_dynamic` option has been reported to have many problems based on the validation in PR #215. + +### Available options: + +``` +--dynamo_backend {NO, INDUCTOR, NVFUSER, CUDAGRAPHS, CUDAGRAPHS_FALLBACK, etc.} + Specifies the Dynamo backend to use (default is NO, which disables Dynamo) + +--dynamo_mode {default, reduce-overhead, max-autotune} + Specifies the optimization mode (default is 'default') + - 'default': Standard optimization + - 'reduce-overhead': Focuses on reducing compilation overhead + - 'max-autotune': Performs extensive autotuning for potentially better performance + +--dynamo_fullgraph + Flag to enable fullgraph mode, which attempts to capture and optimize the entire model graph + +--dynamo_dynamic + Flag to enable dynamic shape handling for models with variable input shapes +``` + +### Usage example: + +```bash +python src/musubi_tuner/hv_train_network.py --dynamo_backend INDUCTOR --dynamo_mode default +``` + +For more aggressive optimization: +```bash +python src/musubi_tuner/hv_train_network.py --dynamo_backend INDUCTOR --dynamo_mode max-autotune --dynamo_fullgraph +``` + +Note: The best combination of options may depend on your specific model and hardware. Experimentation may be necessary to find the optimal configuration. + +
+日本語 +PyTorch Dynamoオプションが学習プロセスを最適化するために追加されました。PyTorch Dynamoは、TorchInductor(ディープラーニングコンパイラ)を使用して、変更を加えることなくPyTorchプログラムを高速化するためのPythonレベルのJITコンパイラです。この統合により、モデルの精度を維持しながら学習の高速化が期待できます。 + +[PR #215](https://github.com/kohya-ss/musubi-tuner/pull/215) で追加されました。 + +`--dynamo_backend`オプションを指定して、`DynamoBackend`列挙型から利用可能なバックエンドの一つを選択することで、Dynamo最適化を有効にします。 + +追加のオプションにより、Dynamoの動作を微調整できます: +- `--dynamo_mode`:最適化戦略を制御します +- `--dynamo_fullgraph`:より良い最適化の可能性のためにフルグラフモードを有効にします +- `--dynamo_dynamic`:動的形状処理を有効にします + +PR #215での検証によると、`--dynamo_dynamic`には問題が多いことが報告されています。 + +__利用可能なオプション:__ + +``` +--dynamo_backend {NO, INDUCTOR, NVFUSER, CUDAGRAPHS, CUDAGRAPHS_FALLBACK, など} + 使用するDynamoバックエンドを指定します(デフォルトはNOで、Dynamoを無効にします) + +--dynamo_mode {default, reduce-overhead, max-autotune} + 最適化モードを指定します(デフォルトは 'default') + - 'default':標準的な最適化 + - 'reduce-overhead':コンパイルのオーバーヘッド削減に焦点を当てる + - 'max-autotune':より良いパフォーマンスのために広範な自動調整を実行 + +--dynamo_fullgraph + フルグラフモードを有効にするフラグ。モデルグラフ全体をキャプチャして最適化しようとします + +--dynamo_dynamic + 可変入力形状を持つモデルのための動的形状処理を有効にするフラグ +``` + +__使用例:__ + +```bash +python src/musubi_tuner/hv_train_network.py --dynamo_backend INDUCTOR --dynamo_mode default +``` + +より積極的な最適化の場合: +```bash +python src/musubi_tuner/hv_train_network.py --dynamo_backend INDUCTOR --dynamo_mode max-autotune --dynamo_fullgraph +``` + +注意:最適なオプションの組み合わせは、特定のモデルとハードウェアに依存する場合があります。最適な構成を見つけるために実験が必要かもしれません。 +
+ +## LoRA Post-Hoc EMA merging / LoRAのPost-Hoc EMAマージ + +The LoRA Post-Hoc EMA (Exponential Moving Average) merging is a technique to combine multiple LoRA checkpoint files into a single, potentially more stable model. This method applies exponential moving average across multiple checkpoints sorted by modification time, with configurable decay rates. + +The Post-Hoc EMA method works by: + +1. Sorting checkpoint files by modification time (oldest to newest) +2. Using the oldest checkpoint as the base +3. Iteratively merging subsequent checkpoints with a decay rate (beta) +4. Optionally using linear interpolation between two beta values across the merge process + +Pseudo-code for merging multiple checkpoints with beta=0.95 would look like this: + +``` +beta = 0.95 +checkpoints = [checkpoint1, checkpoint2, checkpoint3] # List of checkpoints +merged_weights = checkpoints[0] # Use the first checkpoint as the base +for checkpoint in checkpoints[1:]: + merged_weights = beta * merged_weights + (1 - beta) * checkpoint +``` + +### Key features: + +- **Temporal ordering**: Automatically sorts files by modification time +- **Configurable decay rates**: Supports single beta value or linear interpolation between two beta values +- **Metadata preservation**: Maintains and updates metadata from the last checkpoint +- **Hash updating**: Recalculates model hashes for the merged weights +- **Dtype preservation**: Maintains original data types of tensors + +### Usage + +The LoRA Post-Hoc EMA merging is available as a standalone script: + +```bash +python src/musubi_tuner/lora_post_hoc_ema.py checkpoint1.safetensors checkpoint2.safetensors checkpoint3.safetensors --output_file merged_lora.safetensors --beta 0.95 +``` + +### Command line options: + +``` +path [path ...] + List of paths to the LoRA weight files to merge + +--beta BETA + Decay rate for merging weights (default: 0.95) + Higher values (closer to 1.0) give more weight to the accumulated average + Lower values give more weight to the current checkpoint + +--beta2 BETA2 + Second decay rate for linear interpolation (optional) + If specified, the decay rate will linearly interpolate from beta to beta2 + across the merging process + +--sigma_rel SIGMA_REL + Relative sigma for Power Function EMA (optional, mutually exclusive with beta/beta2) + This resolves the issue where the first checkpoint has a disproportionately large influence when beta is specified. + If specified, beta is calculated using the Power Function EMA method from the paper: + https://arxiv.org/pdf/2312.02696. This overrides beta and beta2. + +--output_file OUTPUT_FILE + Output file path for the merged weights (required) + +--no_sort + Disable sorting of checkpoint files (merge in specified order) +``` + +### Examples: + +Basic usage with constant decay rate: +```bash +python src/musubi_tuner/lora_post_hoc_ema.py \ + lora_epoch_001.safetensors \ + lora_epoch_002.safetensors \ + lora_epoch_003.safetensors \ + --output_file lora_ema_merged.safetensors \ + --beta 0.95 +``` + +Using linear interpolation between two decay rates: +```bash +python src/musubi_tuner/lora_post_hoc_ema.py \ + lora_epoch_001.safetensors \ + lora_epoch_002.safetensors \ + lora_epoch_003.safetensors \ + --output_file lora_ema_interpolated.safetensors \ + --beta 0.90 \ + --beta2 0.95 +``` + +Using Power Function EMA with `sigma_rel`: +```bash +python src/musubi_tuner/lora_post_hoc_ema.py \ + lora_epoch_001.safetensors \ + lora_epoch_002.safetensors \ + lora_epoch_003.safetensors \ + --output_file lora_power_ema_merged.safetensors \ + --sigma_rel 0.2 +``` + + +#### betas for different σ-rel values: + +![beta-sigma_rel-graph](./betas_for_sigma_rel.png) + +### Recommended settings example (after training for 30 epochs, using `--beta`) + +If you're unsure which settings to try, start with the following "General Recommended Settings". + +#### 1. General Recommended Settings (start with these combinations) + +- **Target Epochs:** `15-30` (the latter half of training) +- **beta:** `0.9` (a balanced value) + +#### 2. If training converged early + +- **Situation:** Loss dropped early and stabilized afterwards. +- **Target Epochs:** `10-30` (from the epoch where loss stabilized to the end) +- **beta:** `0.95` (wider range, smoother) + +#### 3. If you want to avoid overfitting + +- **Situation:** In the latter part of training, generated results are too similar to training data. +- **Target Epochs:** `15-25` (focus on the peak performance range) +- **beta:** `0.8` (more emphasis on the latter part of the range while maintaining diversity) + +**Note:** The optimal values may vary depending on the model and dataset. It's recommended to experiment with multiple `beta` values (e.g., 0.8, 0.9, 0.95) and compare the generated results. + +### Recommended Settings Example (30 epochs training, using `--sigma_rel`) + +When using `--sigma_rel`, the beta decay schedule is determined by the Power Function EMA method. Here are some starting points: + +#### 1. General Recommended Settings +- **Target Epochs:** All epochs (from the first to the last). +- **sigma_rel:** `0.2` (a general starting point). + +#### 2. If training converged early +- **Situation:** Loss dropped early and stabilized afterwards. +- **Target Epochs:** All epochs. +- **sigma_rel:** `0.25` (gives more weight to earlier checkpoints, suitable for early convergence). + +#### 3. If you want to avoid overfitting +- **Situation:** In the latter part of training, generated results are too similar to training data. +- **Target Epochs:** From the first epoch, omitting the last few potentially overfitted epochs. +- **sigma_rel:** `0.15` (gives more weight to later (but not the very last) checkpoints, helping to mitigate overfitting from the final stages). + +**Note:** The optimal `sigma_rel` value can depend on the dataset, model, and training duration. Experimentation is encouraged. Values typically range from 0.1 to 0.5. A graph showing the relationship between `sigma_rel` and the calculated `beta` values over epochs will be provided later to help understand its behavior. + +### Notes: + +- Files are automatically sorted by modification time, so the order in the command line doesn't matter +- The `--sigma_rel` option is mutually exclusive with `--beta` and `--beta2`. If `--sigma_rel` is provided, it will determine the beta values, and any provided `--beta` or `--beta2` will be ignored. +- All checkpoint files to be merged should be from the same training run, saved per epoch or step + - Merging is possible if shapes match, but may not work correctly as Post Hoc EMA +- All checkpoint files must have the same alpha value +- The merged model will have updated hash values in its metadata +- The metadata of the merged model will be taken from the last checkpoint, with only the hash value recalculated +- Non-float tensors (long, int, bool, etc.) are not merged and will use the first checkpoint's values +- Processing is done in float32 precision to maintain numerical stability during merging. The original data types are preserved when saving + +
+日本語 + +LoRA Post-Hoc EMA(指数移動平均)マージは、複数のLoRAチェックポイントファイルを単一の、より安定したモデルに結合する手法です。スクリプトでは、修正時刻でソート(古い順)された複数のチェックポイントに対して指定された減衰率で指数移動平均を適用します。減衰率は指定可能です。 + +Post-Hoc EMA方法の動作: + +1. チェックポイントファイルを修正時刻順(古いものから新しいものへ)にソート +2. 最古のチェックポイントをベースとして使用 +3. 減衰率(beta)を使って後続のチェックポイントを反復的にマージ +4. オプションで、マージプロセス全体で2つのベータ値間の線形補間を使用 + +疑似コードによるイメージ:複数のチェックポイントをbeta=0.95でマージする場合、次のように計算されます。 + +``` +beta = 0.95 +checkpoints = [checkpoint1, checkpoint2, checkpoint3] # チェックポイントのリスト +merged_weights = checkpoints[0] # 最初のチェックポイントをベースとして使用 +for checkpoint in checkpoints[1:]: + merged_weights = beta * merged_weights + (1 - beta) * checkpoint +``` + +### 主な特徴: + +- **時系列順序付け**: ファイルを修正時刻で自動的にソート +- **設定可能な減衰率**: 単一のベータ値または2つのベータ値間の線形補間をサポート +- **メタデータ保持**: 最後のチェックポイントからメタデータを維持・更新 +- **ハッシュ更新**: マージされた重みのモデルハッシュを再計算 +- **データ型保持**: テンソルの元のデータ型を維持 + +### 使用法 + +LoRA Post-Hoc EMAマージは独立したスクリプトとして提供されています: + +```bash +python src/musubi_tuner/lora_post_hoc_ema.py checkpoint1.safetensors checkpoint2.safetensors checkpoint3.safetensors --output_file merged_lora.safetensors --beta 0.95 +``` + +### コマンドラインオプション: + +``` +path [path ...] + マージするLoRA重みファイルのパスのリスト + +--beta BETA + 重みマージのための減衰率(デフォルト:0.95) + 高い値(1.0に近い)は累積平均により大きな重みを与える(古いチェックポイントを重視) + 低い値は現在のチェックポイントにより大きな重みを与える + +--beta2 BETA2 + 線形補間のための第2減衰率(オプション) + 指定された場合、減衰率はマージプロセス全体でbetaからbeta2へ線形補間される + +--sigma_rel SIGMA_REL + Power Function EMAのための相対シグマ(オプション、beta/beta2と同時に指定できません) + betaを指定した場合の、最初のチェックポイントが相対的に大きな影響を持つ欠点を解決します + 指定された場合、betaは次の論文に基づいてPower Function EMA法で計算されます: + https://arxiv.org/pdf/2312.02696. これによりbetaとbeta2が上書きされます。 + +--output_file OUTPUT_FILE + マージされた重みの出力ファイルパス(必須) + +--no_sort + チェックポイントファイルのソートを無効にする(指定した順序でマージ) +``` + +### 例: + +定数減衰率での基本的な使用法: +```bash +python src/musubi_tuner/lora_post_hoc_ema.py \ + lora_epoch_001.safetensors \ + lora_epoch_002.safetensors \ + lora_epoch_003.safetensors \ + --output_file lora_ema_merged.safetensors \ + --beta 0.95 +``` + +2つの減衰率間の線形補間を使用: +```bash +python src/musubi_tuner/lora_post_hoc_ema.py \ + lora_epoch_001.safetensors \ + lora_epoch_002.safetensors \ + lora_epoch_003.safetensors \ + --output_file lora_ema_interpolated.safetensors \ + --beta 0.90 \ + --beta2 0.95 +``` + +`シグマ_rel`を使用したPower Function EMA: +```bash +python src/musubi_tuner/lora_post_hoc_ema.py \ + lora_epoch_001.safetensors \ + lora_epoch_002.safetensors \ + lora_epoch_003.safetensors \ + --output_file lora_power_ema_merged.safetensors \ + --sigma_rel 0.2 +``` + +### 推奨設定の例 (30エポック学習し、 `--beta`を使用する場合) + +どの設定から試せば良いか分からない場合は、まず以下の「**一般的な推奨設定**」から始めてみてください。 + +#### 1. 一般的な推奨設定 (まず試すべき組み合わせ) + +- **対象エポック:** `15-30` (学習の後半半分) +- **beta:** `0.9` (バランスの取れた値) + +#### 2. 早期に学習が収束した場合 + +- **状況:** lossが早い段階で下がり、その後は安定している。 +- **対象エポック:** `10-30` (lossが安定し始めたエポックから最後まで) +- **beta:** `0.95` (対象範囲が広いので、より滑らかにする) + +#### 3. 過学習を避けたい場合 + +- **状況:** 学習の最後の方で、生成結果が学習データに似すぎている。 +- **対象エポック:** `15-25` (性能のピークと思われる範囲に絞る) +- **beta:** `0.8` (範囲の終盤を重視しつつ、多様性を残す) + +**ヒント:** 最適な値はモデルやデータセットによって異なります。複数の`beta`(例: 0.8, 0.9, 0.95)を試して、生成結果を比較することをお勧めします。 + +### 推奨設定の例 (30エポック学習し、 `--sigma_rel`を使用する場合) + +`--sigma_rel` を使用する場合、betaの減衰スケジュールはPower Function EMA法によって決定されます。以下はいくつかの開始点です。 + +#### 1. 一般的な推奨設定 +- **対象エポック:** 全てのエポック(最初から最後まで) +- **sigma_rel:** `0.2` (一般的な開始点) + +#### 2. 早期に学習が収束した場合 +- **状況:** lossが早い段階で下がり、その後は安定している。 +- **対象エポック:** 全てのエポック +- **sigma_rel:** `0.25` (初期のチェックポイントに重きを置くため、早期収束に適しています) + +#### 3. 過学習を避けたい場合 +- **状況:** 学習の最後の方で、生成結果が学習データに似すぎている。 +- **対象エポック:** 最初のエポックから、過学習の可能性がある最後の数エポックを除外 +- **sigma_rel:** `0.15` (終盤(ただし最後の最後ではない)のチェックポイントに重きを置き、最終段階での過学習を軽減するのに役立ちます) + +**ヒント:** 最適な `sigma_rel` の値は、データセット、モデル、学習期間によって異なる場合があります。実験を推奨します。値は通常0.1から0.5の範囲です。`sigma_rel` とエポックごとの計算された `beta` 値の関係を示すグラフは、その挙動を理解するのに役立つよう後ほど提供する予定です。 + +### 注意点: + +- ファイルは修正時刻で自動的にソートされるため、コマンドラインでの順序は関係ありません +- `--sigma_rel`オプションは`--beta`および`--beta2`と相互に排他的です。`--sigma_rel`が指定された場合、それがベータ値を決定し、指定された`--beta`または`--beta2`は無視されます。 +- マージする全てのチェックポイントファイルは、ひとつの学習で、エポックごと、またはステップごとに保存されたモデルである必要があります + - 形状が一致していればマージはできますが、Post Hoc EMAとしては正しく動作しません +- alpha値はすべてのチェックポイントで同じである必要があります +- マージされたモデルのメタデータは、最後のチェックポイントのものが利用されます。ハッシュ値のみが再計算されます +- 浮動小数点以外の、long、int、boolなどのテンソルはマージされません(最初のチェックポイントのものが使用されます) +- マージ中の数値安定性を維持するためにfloat32精度で計算されます。保存時は元のデータ型が維持されます + +
+ +## MagCache + +The following is quoted from the [MagCache github repository](https://github.com/Zehong-Ma/MagCache) "Magnitude-aware Cache (MagCache) for Video Diffusion Models": + +> We introduce Magnitude-aware Cache (MagCache), a training-free caching approach that estimates and leverages the fluctuating differences among model outputs across timesteps based on the robust magnitude observations, thereby accelerating the inference. MagCache works well for Video Diffusion Models, Image Diffusion models. + +We have implemented the MagCache feature in Musubi Tuner. Some of the code is based on the MagCache repository. It is available for `fpack_generate_video.py` for now. + +### Usage + +1. Calibrate the mag ratios + - Run the inference script as normal, but with the `--magcache_calibration` option to calibrate the mag ratios. You will get a following output: + + ``` + INFO:musubi_tuner.fpack_generate_video:Copy and paste following values to --magcache_mag_ratios argument to use them: + 1.00000,1.26562,1.08594,1.02344,1.00781,1.01562,1.01562,1.03125,1.04688,1.00781,1.03125,1.00000,1.01562,1.01562,1.02344,1.01562,0.98438,1.05469,0.98438,0.97266,1.03125,0.96875,0.93359,0.95703,0.77734 + ``` + - It is recommended to run the calibration with your custom prompt and model. + - If you inference the multi-section video, you will get the mag ratios for each section. You can use the one of the sections or average them. + +2. Use the mag ratios + - Run the inference script with the `--magcache_mag_ratios` option to use the mag ratios. For example: + + ```bash + python fpack_generate_video.py --magcache_mag_ratios 1.00000,1.26562,1.08594,1.02344,1.00781,1.01562,1.01562,1.03125,1.04688,1.00781,1.03125,1.00000,1.01562,1.01562,1.02344,1.01562,0.98438,1.05469,0.98438,0.97266,1.03125,0.96875,0.93359,0.95703,0.77734 + ``` + + - Specify `--magcache_mag_ratios 0` to use the default mag ratios from the MagCache repository. + - It is recommended to use the same steps as the calibration. If the steps are different, the mag ratios is interpolated to the specified steps. + - You can also specify the `--magcache_retention_ratio`, `--magcache_threshold`, and `--magcache_k` options to control the MagCache behavior. The default values are 0.2, 0.24, and 6, respectively (same as the MagCache repository). + + ```bash + python fpack_generate_video.py --magcache_retention_ratio 0.2 --magcache_threshold 0.24 --magcache_k 6 + ``` + + - The `--magcache_retention_ratio` option controls the ratio of the steps not to cache. For example, if you set it to 0.2, the first 20% of the steps will not be cached. The default value is 0.2. + - The `--magcache_threshold` option controls the threshold whether to use the cached output or not. If the accumulated error is less than the threshold, the cached output will be used. The default value is 0.24. + - The error is calculated by the accumulated error multiplied by the mag ratio. + - The `--magcache_k` option controls the number of steps to use for the cache. The default value is 6, which means the consecutive 6 steps will be used for the cache. The default value 6 is recommended for 50 steps, so you may want to lower it for smaller number of steps. + +### Generated video example + +Using F1-model, without MagCache, approximately 90 seconds are required to generate single section video with 25 steps (without VAE decoding) in my environment. + +https://github.com/user-attachments/assets/30b8d05e-9bd6-42bf-997f-5ba5b3dde876 + +With MagCache, default settings, approximately 30 seconds are required to generate with the same settings. + +https://github.com/user-attachments/assets/080076ea-4088-443c-8138-4eeb00694ec5 + +With MagCache, `--magcache_retention_ratio 0.2 --magcache_threshold 0.12 --magcache_k 3`, approximately 35 seconds are required to generate with the same settings. + +https://github.com/user-attachments/assets/27d6c7ff-e3db-4c52-8668-9a887441acef + +
+日本語 + +以下は、[MagCache githubリポジトリ](https://github.com/Zehong-Ma/MagCache) "Magnitude-aware Cache (MagCache) for Video Diffusion Models"からの引用の拙訳です: + +> Magnitude-aware Cache (MagCache)は、トレーニング不要のキャッシングアプローチで、堅牢なマグニチュード観測に基づいてタイムステップ間のモデル出力の変動差を推定および活用し、推論を加速します。MagCacheは、ビデオ拡散モデル、画像拡散モデルに適しています。 + +Musubi TunerにMagCache機能を実装しました。一部のコードはMagCacheリポジトリのコードを基にしています。現在は`fpack_generate_video.py`でのみ利用可能です。 + +### 使用方法 + +1. mag_ratiosのキャリブレーション + - `--magcache_calibration`オプションを指定して、それ以外は通常通り推論スクリプトを実行し、mag ratiosをキャリブレーションします。以下のような出力が得られます: + + ``` + INFO:musubi_tuner.fpack_generate_video:Copy and paste following values to --magcache_mag_ratios argument to use them: + 1.00000,1.26562,1.08594,1.02344,1.00781,1.01562,1.01562,1.03125,1.04688,1.00781,1.03125,1.00000,1.01562,1.01562,1.02344,1.01562,0.98438,1.05469,0.98438,0.97266,1.03125,0.96875,0.93359,0.95703,0.77734 + ``` + - カスタムプロンプトとモデルでキャリブレーションを実行することをお勧めします。 + - 複数セクションビデオを推論する場合、各セクションのmag ratiosが出力されます。どれか一つ、またはそれらを平均した値を使ってください。 + +2. mag ratiosの使用 + - `--magcache_mag_ratios`オプションでmag ratiosを指定して推論スクリプトを実行します。例: + + ```bash + python fpack_generate_video.py --magcache_mag_ratios 1.00000,1.26562,1.08594,1.02344,1.00781,1.01562,1.01562,1.03125,1.04688,1.00781,1.03125,1.00000,1.01562,1.01562,1.02344,1.01562,0.98438,1.05469,0.98438,0.97266,1.03125,0.96875,0.93359,0.95703,0.77734 + ``` + + - `--magcache_mag_ratios 0`を指定すると、MagCacheリポジトリのデフォルトのmag ratiosが使用されます。 + - mag ratiosの数はキャリブレーションした時と同じステップ数を指定することをお勧めします。ステップ数が異なる場合、mag ratiosは指定されたステップ数に合うように補間されます。 + - `--magcache_retention_ratio`, `--magcache_threshold`, `--magcache_k`オプションを指定してMagCacheの動作を制御できます。デフォルト値は0.2、0.24、6です(MagCacheリポジトリと同じです)。 + + ```bash + python fpack_generate_video.py --magcache_retention_ratio 0.2 --magcache_threshold 0.24 --magcache_k 6 + ``` + + - `--magcache_retention_ratio`オプションは、キャッシュしないステップの割合を制御します。例えば、0.2に設定すると、最初の20%のステップはキャッシュされません。デフォルト値は0.2です。 + - `--magcache_threshold`オプションは、キャッシュされた出力を使用するかどうかの閾値を制御します。累積誤差がこの閾値未満の場合、キャッシュされた出力が使用されます。デフォルト値は0.24です。 + - 誤差は、累積誤差にmag ratioを掛けたものとして計算されます。 + - `--magcache_k`オプションは、キャッシュに使用するステップ数を制御します。デフォルト値は6で、これは連続する6ステップがキャッシュに使用されることを意味します。デフォルト値6は恐らく50ステップの場合の推奨値のため、ステップ数が少ない場合は減らすことを検討してください。 + +生成サンプルは英語での説明を参照してください。 + +
diff --git a/exp_code/1_benchmark/musubi-tuner/docs/framepack.md b/exp_code/1_benchmark/musubi-tuner/docs/framepack.md new file mode 100644 index 0000000000000000000000000000000000000000..f8fb2b664e875377d6e7ed96799323d030baae01 --- /dev/null +++ b/exp_code/1_benchmark/musubi-tuner/docs/framepack.md @@ -0,0 +1,607 @@ +# FramePack + +## Overview / 概要 + +This document describes the usage of the [FramePack](https://github.com/lllyasviel/FramePack) architecture within the Musubi Tuner framework. FramePack is a novel video generation architecture developed by lllyasviel. + +Key differences from HunyuanVideo: +- FramePack only supports Image-to-Video (I2V) generation. Text-to-Video (T2V) is not supported. +- It utilizes a different DiT model architecture and requires an additional Image Encoder. VAE is same as HunyuanVideo. Text Encoders seem to be the same as HunyuanVideo but we employ the original FramePack method to utilize them. +- Caching and training scripts are specific to FramePack (`fpack_*.py`). +- Due to its progressive generation nature, VRAM usage can be significantly lower, especially for longer videos, compared to other architectures. + +The official documentation does not provide detailed explanations on how to train the model, but it is based on the FramePack implementation and paper. + +This feature is experimental. + +For one-frame inference and training, see [here](./framepack_1f.md). + +
+日本語 + +このドキュメントは、Musubi Tunerフレームワーク内での[FramePack](https://github.com/lllyasviel/FramePack) アーキテクチャの使用法について説明しています。FramePackは、lllyasviel氏にによって開発された新しいビデオ生成アーキテクチャです。 + +HunyuanVideoとの主な違いは次のとおりです。 +- FramePackは、画像からビデオ(I2V)生成のみをサポートしています。テキストからビデオ(T2V)はサポートされていません。 +- 異なるDiTモデルアーキテクチャを使用し、追加の画像エンコーダーが必要です。VAEはHunyuanVideoと同じです。テキストエンコーダーはHunyuanVideoと同じと思われますが、FramePack公式と同じ方法で推論を行っています。 +- キャッシングと学習スクリプトはFramePack専用(`fpack_*.py`)です。 +- セクションずつ生成するため、他のアーキテクチャと比較して、特に長いビデオの場合、VRAM使用量が大幅に少なくなる可能性があります。 + +学習方法について公式からは詳細な説明はありませんが、FramePackの実装と論文を参考にしています。 + +この機能は実験的なものです。 + +1フレーム推論、学習については[こちら](./framepack_1f.md)を参照してください。 +
+ +## Download the model / モデルのダウンロード + +You need to download the DiT, VAE, Text Encoder 1 (LLaMA), Text Encoder 2 (CLIP), and Image Encoder (SigLIP) models specifically for FramePack. Several download options are available for each component. + +***Note:** The weights are publicly available on the following page: [maybleMyers/framepack_h1111](https://huggingface.co/maybleMyers/framepack_h1111) (except for FramePack-F1). Thank you maybleMyers! + +### DiT Model + +Choose one of the following methods: + +1. **From lllyasviel's Hugging Face repo:** Download the three `.safetensors` files (starting with `diffusion_pytorch_model-00001-of-00003.safetensors`) from [lllyasviel/FramePackI2V_HY](https://huggingface.co/lllyasviel/FramePackI2V_HY). Specify the path to the first file (`...-00001-of-00003.safetensors`) as the `--dit` argument. For FramePack-F1, download from [lllyasviel/FramePack_F1_I2V_HY_20250503](https://huggingface.co/lllyasviel/FramePack_F1_I2V_HY_20250503). + +2. **From local FramePack installation:** If you have cloned and run the official FramePack repository, the model might be downloaded locally. Specify the path to the snapshot directory, e.g., `path/to/FramePack/hf_download/hub/models--lllyasviel--FramePackI2V_HY/snapshots/`. FramePack-F1 is also available in the same way. + +3. **From Kijai's Hugging Face repo:** Download the single file `FramePackI2V_HY_bf16.safetensors` from [Kijai/HunyuanVideo_comfy](https://huggingface.co/Kijai/HunyuanVideo_comfy/blob/main/FramePackI2V_HY_bf16.safetensors). Specify the path to this file as the `--dit` argument. No FramePack-F1 model is available here currently. + +### VAE Model + +Choose one of the following methods: + +1. **Use official HunyuanVideo VAE:** Follow the instructions in the main [README.md](../README.md#model-download). +2. **From hunyuanvideo-community Hugging Face repo:** Download `vae/diffusion_pytorch_model.safetensors` from [hunyuanvideo-community/HunyuanVideo](https://huggingface.co/hunyuanvideo-community/HunyuanVideo). +3. **From local FramePack installation:** If you have cloned and run the official FramePack repository, the VAE might be downloaded locally within the HunyuanVideo community model snapshot. Specify the path to the snapshot directory, e.g., `path/to/FramePack/hf_download/hub/models--hunyuanvideo-community--HunyuanVideo/snapshots/`. + +### Text Encoder 1 (LLaMA) Model + +Choose one of the following methods: + +1. **From Comfy-Org Hugging Face repo:** Download `split_files/text_encoders/llava_llama3_fp16.safetensors` from [Comfy-Org/HunyuanVideo_repackaged](https://huggingface.co/Comfy-Org/HunyuanVideo_repackaged). +2. **From hunyuanvideo-community Hugging Face repo:** Download the four `.safetensors` files (starting with `text_encoder/model-00001-of-00004.safetensors`) from [hunyuanvideo-community/HunyuanVideo](https://huggingface.co/hunyuanvideo-community/HunyuanVideo). Specify the path to the first file (`...-00001-of-00004.safetensors`) as the `--text_encoder1` argument. +3. **From local FramePack installation:** (Same as VAE) Specify the path to the HunyuanVideo community model snapshot directory, e.g., `path/to/FramePack/hf_download/hub/models--hunyuanvideo-community--HunyuanVideo/snapshots/`. + +### Text Encoder 2 (CLIP) Model + +Choose one of the following methods: + +1. **From Comfy-Org Hugging Face repo:** Download `split_files/text_encoders/clip_l.safetensors` from [Comfy-Org/HunyuanVideo_repackaged](https://huggingface.co/Comfy-Org/HunyuanVideo_repackaged). +2. **From hunyuanvideo-community Hugging Face repo:** Download `text_encoder_2/model.safetensors` from [hunyuanvideo-community/HunyuanVideo](https://huggingface.co/hunyuanvideo-community/HunyuanVideo). +3. **From local FramePack installation:** (Same as VAE) Specify the path to the HunyuanVideo community model snapshot directory, e.g., `path/to/FramePack/hf_download/hub/models--hunyuanvideo-community--HunyuanVideo/snapshots/`. + +### Image Encoder (SigLIP) Model + +Choose one of the following methods: + +1. **From Comfy-Org Hugging Face repo:** Download `sigclip_vision_patch14_384.safetensors` from [Comfy-Org/sigclip_vision_384](https://huggingface.co/Comfy-Org/sigclip_vision_384). +2. **From lllyasviel's Hugging Face repo:** Download `image_encoder/model.safetensors` from [lllyasviel/flux_redux_bfl](https://huggingface.co/lllyasviel/flux_redux_bfl). +3. **From local FramePack installation:** If you have cloned and run the official FramePack repository, the model might be downloaded locally. Specify the path to the snapshot directory, e.g., `path/to/FramePack/hf_download/hub/models--lllyasviel--flux_redux_bfl/snapshots/`. + +
+日本語 + +※以下のページに重みが一括で公開されています(FramePack-F1を除く)。maybleMyers 氏に感謝いたします。: https://huggingface.co/maybleMyers/framepack_h1111 + +DiT、VAE、テキストエンコーダー1(LLaMA)、テキストエンコーダー2(CLIP)、および画像エンコーダー(SigLIP)モデルは複数の方法でダウンロードできます。英語の説明を参考にして、ダウンロードしてください。 + +FramePack公式のリポジトリをクローンして実行した場合、モデルはローカルにダウンロードされている可能性があります。スナップショットディレクトリへのパスを指定してください。例:`path/to/FramePack/hf_download/hub/models--lllyasviel--flux_redux_bfl/snapshots/` + +HunyuanVideoの推論をComfyUIですでに行っている場合、いくつかのモデルはすでにダウンロードされている可能性があります。 +
+ +## Pre-caching / 事前キャッシング + +The default resolution for FramePack is 640x640. See [the source code](../src/musubi_tuner/frame_pack/bucket_tools.py) for the default resolution of each bucket. + +The dataset for training must be a video dataset. Image datasets are not supported. You can train on videos of any length. Specify `frame_extraction` as `full` and set `max_frames` to a sufficiently large value. However, if the video is too long, you may run out of VRAM during VAE encoding. + +### Latent Pre-caching / latentの事前キャッシング + +Latent pre-caching uses a dedicated script for FramePack. You **must** provide the Image Encoder model. + +```bash +python src/musubi_tuner/fpack_cache_latents.py \ + --dataset_config path/to/toml \ + --vae path/to/vae_model.safetensors \ + --image_encoder path/to/image_encoder_model.safetensors \ + --vae_chunk_size 32 --vae_spatial_tile_sample_min_size 128 +``` + +Key differences from HunyuanVideo caching: +- Uses `fpack_cache_latents.py`. +- Requires the `--image_encoder` argument pointing to the downloaded SigLIP model. +- The script generates multiple cache files per video, each corresponding to a different section, with the section index appended to the filename (e.g., `..._frame_pos-0000-count_...` becomes `..._frame_pos-0000-0000-count_...`, `..._frame_pos-0000-0001-count_...`, etc.). +- Image embeddings are calculated using the Image Encoder and stored in the cache files alongside the latents. + +For VRAM savings during VAE decoding, consider using `--vae_chunk_size` and `--vae_spatial_tile_sample_min_size`. If VRAM is overflowing and using shared memory, it is recommended to set `--vae_chunk_size` to 16 or 8, and `--vae_spatial_tile_sample_min_size` to 64 or 32. + +Specifying `--f1` is required for FramePack-F1 training. For one-frame training, specify `--one_frame`. If you change the presence of these options, please overwrite the existing cache without specifying `--skip_existing`. + +`--one_frame_no_2x` and `--one_frame_no_4x` options are available for one-frame training, described in the next section. + +**FramePack-F1 support:** +You can apply the FramePack-F1 sampling method by specifying `--f1` during caching. The training script also requires specifying `--f1` to change the options during sample generation. + +By default, the sampling method used is Inverted anti-drifting (the same as during inference with the original FramePack model, using the latent and index in reverse order), described in the paper. You can switch to FramePack-F1 sampling (Vanilla sampling, using the temporally ordered latent and index) by specifying `--f1`. + +
+日本語 + +FramePackのデフォルト解像度は640x640です。各バケットのデフォルト解像度については、[ソースコード](../src/musubi_tuner/frame_pack/bucket_tools.py)を参照してください。 + +画像データセットでの学習は行えません。また動画の長さによらず学習可能です。 `frame_extraction` に `full` を指定して、`max_frames` に十分に大きな値を指定してください。ただし、あまりにも長いとVAEのencodeでVRAMが不足する可能性があります。 + +latentの事前キャッシングはFramePack専用のスクリプトを使用します。画像エンコーダーモデルを指定する必要があります。 + +HunyuanVideoのキャッシングとの主な違いは次のとおりです。 +- `fpack_cache_latents.py`を使用します。 +- ダウンロードしたSigLIPモデルを指す`--image_encoder`引数が必要です。 +- スクリプトは、各ビデオに対して複数のキャッシュファイルを生成します。各ファイルは異なるセクションに対応し、セクションインデックスがファイル名に追加されます(例:`..._frame_pos-0000-count_...`は`..._frame_pos-0000-0000-count_...`、`..._frame_pos-0000-0001-count_...`などになります)。 +- 画像埋め込みは画像エンコーダーを使用して計算され、latentとともにキャッシュファイルに保存されます。 + +VAEのdecode時のVRAM節約のために、`--vae_chunk_size`と`--vae_spatial_tile_sample_min_size`を使用することを検討してください。VRAMがあふれて共有メモリを使用している場合には、`--vae_chunk_size`を16、8などに、`--vae_spatial_tile_sample_min_size`を64、32などに変更することをお勧めします。 + +FramePack-F1の学習を行う場合は`--f1`を指定してください。これらのオプションの有無を変更する場合には、`--skip_existing`を指定せずに既存のキャッシュを上書きしてください。 + +**FramePack-F1のサポート:** +キャッシュ時のオプションに`--f1`を指定することで、FramePack-F1のサンプリング方法を適用できます。学習スクリプトについても`--f1`を指定してサンプル生成時のオプションを変更する必要があります。 + +デフォルトでは、論文のサンプリング方法 Inverted anti-drifting (無印のFramePackの推論時と同じ、逆順の latent と index を使用)を使用します。`--f1`を指定すると FramePack-F1 の Vanilla sampling (時間順の latent と index を使用)に変更できます。 +
+ +### Text Encoder Output Pre-caching / テキストエンコーダー出力の事前キャッシング + +Text encoder output pre-caching also uses a dedicated script. + +```bash +python src/musubi_tuner/fpack_cache_text_encoder_outputs.py \ + --dataset_config path/to/toml \ + --text_encoder1 path/to/text_encoder1 \ + --text_encoder2 path/to/text_encoder2 \ + --batch_size 16 +``` + +Key differences from HunyuanVideo caching: +- Uses `fpack_cache_text_encoder_outputs.py`. +- Requires both `--text_encoder1` (LLaMA) and `--text_encoder2` (CLIP) arguments. +- Uses `--fp8_llm` option to run the LLaMA Text Encoder 1 in fp8 mode for VRAM savings (similar to `--fp8_t5` in Wan2.1). +- Saves LLaMA embeddings, attention mask, and CLIP pooler output to the cache file. + +
+日本語 + +テキストエンコーダー出力の事前キャッシングも専用のスクリプトを使用します。 + +HunyuanVideoのキャッシングとの主な違いは次のとおりです。 +- `fpack_cache_text_encoder_outputs.py`を使用します。 +- LLaMAとCLIPの両方の引数が必要です。 +- LLaMAテキストエンコーダー1をfp8モードで実行するための`--fp8_llm`オプションを使用します(Wan2.1の`--fp8_t5`に似ています)。 +- LLaMAの埋め込み、アテンションマスク、CLIPのプーラー出力をキャッシュファイルに保存します。 + +
+ + +## Training / 学習 + +### Training + +Training uses a dedicated script `fpack_train_network.py`. Remember FramePack only supports I2V training. + +```bash +accelerate launch --num_cpu_threads_per_process 1 --mixed_precision bf16 src/musubi_tuner/fpack_train_network.py \ + --dit path/to/dit_model \ + --vae path/to/vae_model.safetensors \ + --text_encoder1 path/to/text_encoder1 \ + --text_encoder2 path/to/text_encoder2 \ + --image_encoder path/to/image_encoder_model.safetensors \ + --dataset_config path/to/toml \ + --sdpa --mixed_precision bf16 \ + --optimizer_type adamw8bit --learning_rate 2e-4 --gradient_checkpointing \ + --timestep_sampling shift --weighting_scheme none --discrete_flow_shift 3.0 \ + --max_data_loader_n_workers 2 --persistent_data_loader_workers \ + --network_module networks.lora_framepack --network_dim 32 \ + --max_train_epochs 16 --save_every_n_epochs 1 --seed 42 \ + --output_dir path/to/output_dir --output_name name-of-lora +``` + +If you use the command prompt (Windows, not PowerShell), you may need to write them in a single line, or use `^` instead of `\` at the end of each line to continue the command. + +The maximum value for `--blocks_to_swap` is 36. The default resolution for FramePack is 640x640, which requires around 17GB of VRAM. If you run out of VRAM, consider lowering the dataset resolution. + +Key differences from HunyuanVideo training: +- Uses `fpack_train_network.py`. +- `--f1` option is available for FramePack-F1 model training. You need to specify the FramePack-F1 model as `--dit`. This option only changes the sample generation during training. The training process itself is the same as the original FramePack model. +- **Requires** specifying `--vae`, `--text_encoder1`, `--text_encoder2`, and `--image_encoder`. +- **Requires** specifying `--network_module networks.lora_framepack`. +- Optional `--latent_window_size` argument (default 9, should match caching). +- Memory saving options like `--fp8` (for DiT) and `--fp8_llm` (for Text Encoder 1) are available. `--fp8_scaled` is recommended when using `--fp8` for DiT. +- `--vae_chunk_size` and `--vae_spatial_tile_sample_min_size` options are available for the VAE to prevent out-of-memory during sampling (similar to caching). +- `--gradient_checkpointing` is available for memory savings. +- If you encounter an error when the batch size is greater than 1 (especially when specifying `--sdpa` or `--xformers`, it will always result in an error), please specify `--split_attn`. + + +Training settings (learning rate, optimizers, etc.) are experimental. Feedback is welcome. + +
+日本語 + +FramePackの学習は専用のスクリプト`fpack_train_network.py`を使用します。FramePackはI2V学習のみをサポートしています。 + +コマンド記述例は英語版を参考にしてください。WindowsでPowerShellではなくコマンドプロンプトを使用している場合、コマンドを1行で記述するか、各行の末尾に`\`の代わりに`^`を付けてコマンドを続ける必要があります。 + +`--blocks_to_swap`の最大値は36です。FramePackのデフォルト解像度(640x640)では、17GB程度のVRAMが必要です。VRAM容量が不足する場合は、データセットの解像度を下げてください。 + +HunyuanVideoの学習との主な違いは次のとおりです。 +- `fpack_train_network.py`を使用します。 +- FramePack-F1モデルの学習時には`--f1`を指定してください。この場合、`--dit`にFramePack-F1モデルを指定する必要があります。このオプションは学習時のサンプル生成時のみに影響し、学習プロセス自体は元のFramePackモデルと同じです。 +- `--vae`、`--text_encoder1`、`--text_encoder2`、`--image_encoder`を指定する必要があります。 +- `--network_module networks.lora_framepack`を指定する必要があります。 +- 必要に応じて`--latent_window_size`引数(デフォルト9)を指定できます(キャッシング時と一致させる必要があります)。 +- `--fp8`(DiT用)や`--fp8_llm`(テキストエンコーダー1用)などのメモリ節約オプションが利用可能です。`--fp8_scaled`を使用することをお勧めします。 +- サンプル生成時にメモリ不足を防ぐため、VAE用の`--vae_chunk_size`、`--vae_spatial_tile_sample_min_size`オプションが利用可能です(キャッシング時と同様)。 +- メモリ節約のために`--gradient_checkpointing`が利用可能です。 +- バッチサイズが1より大きい場合にエラーが出た時には(特に`--sdpa`や`--xformers`を指定すると必ずエラーになります。)、`--split_attn`を指定してください。 + +
+ +## Inference + +Inference uses a dedicated script `fpack_generate_video.py`. + +```bash +python src/musubi_tuner/fpack_generate_video.py \ + --dit path/to/dit_model \ + --vae path/to/vae_model.safetensors \ + --text_encoder1 path/to/text_encoder1 \ + --text_encoder2 path/to/text_encoder2 \ + --image_encoder path/to/image_encoder_model.safetensors \ + --image_path path/to/start_image.jpg \ + --prompt "A cat walks on the grass, realistic style." \ + --video_size 512 768 --video_seconds 5 --fps 30 --infer_steps 25 \ + --attn_mode sdpa --fp8_scaled \ + --vae_chunk_size 32 --vae_spatial_tile_sample_min_size 128 \ + --save_path path/to/save/dir --output_type both \ + --seed 1234 --lora_multiplier 1.0 --lora_weight path/to/lora.safetensors +``` + + +Key differences from HunyuanVideo inference: +- Uses `fpack_generate_video.py`. +- `--f1` option is available for FramePack-F1 model inference (forward generation). You need to specify the FramePack-F1 model as `--dit`. +- **Requires** specifying `--vae`, `--text_encoder1`, `--text_encoder2`, and `--image_encoder`. +- **Requires** specifying `--image_path` for the starting frame. +- **Requires** specifying `--video_seconds` or `--video_sections`. `--video_seconds` specifies the length of the video in seconds, while `--video_sections` specifies the number of sections. If `--video_sections` is specified, `--video_seconds` is ignored. +- `--video_size` is the size of the generated video, height and width are specified in that order. +- `--prompt`: Prompt for generation. +- Optional `--latent_window_size` argument (default 9, should match caching and training). +- `--fp8_scaled` option is available for DiT to reduce memory usage. Quality may be slightly lower. `--fp8_llm` option is available to reduce memory usage of Text Encoder 1. `--fp8` alone is also an option for DiT but `--fp8_scaled` potentially offers better quality. +- LoRA loading options (`--lora_weight`, `--lora_multiplier`, `--include_patterns`, `--exclude_patterns`) are available. `--lycoris` is also supported. +- `--embedded_cfg_scale` (default 10.0) controls the distilled guidance scale. +- `--guidance_scale` (default 1.0) controls the standard classifier-free guidance scale. **Changing this from 1.0 is generally not recommended for the base FramePack model.** +- `--guidance_rescale` (default 0.0) is available but typically not needed. +- `--bulk_decode` option can decode all frames at once, potentially faster but uses more VRAM during decoding. `--vae_chunk_size` and `--vae_spatial_tile_sample_min_size` options are recommended to prevent out-of-memory errors. +- `--sample_solver` (default `unipc`) is available but only `unipc` is implemented. +- `--save_merged_model` option is available to save the DiT model after merging LoRA weights. Inference is skipped if this is specified. +- `--latent_paddings` option overrides the default padding for each section. Specify it as a comma-separated list of integers, e.g., `--latent_paddings 0,0,0,0`. This option is ignored if `--f1` is specified. +- `--custom_system_prompt` option overrides the default system prompt for the LLaMA Text Encoder 1. Specify it as a string. See [here](../src/musubi_tunerhunyuan_model/text_encoder.py#L152) for the default system prompt. +- `--rope_scaling_timestep_threshold` option is the RoPE scaling timestep threshold, default is None (disabled). If set, RoPE scaling is applied only when the timestep exceeds the threshold. Start with around 800 and adjust as needed. This option is intended for one-frame inference and may not be suitable for other cases. +- `--rope_scaling_factor` option is the RoPE scaling factor, default is 0.5, assuming a resolution of 2x. For 1.5x resolution, around 0.7 is recommended. + +Other options like `--video_size`, `--fps`, `--infer_steps`, `--save_path`, `--output_type`, `--seed`, `--attn_mode`, `--blocks_to_swap`, `--vae_chunk_size`, `--vae_spatial_tile_sample_min_size` function similarly to HunyuanVideo/Wan2.1 where applicable. + +`--output_type` supports `latent_images` in addition to the options available in HunyuanVideo/Wan2.1. This option saves the latent and image files in the specified directory. + +The LoRA weights that can be specified in `--lora_weight` are not limited to the FramePack weights trained in this repository. You can also specify the HunyuanVideo LoRA weights from this repository and the HunyuanVideo LoRA weights from diffusion-pipe (automatic detection). + +The maximum value for `--blocks_to_swap` is 38. + +
+日本語 + +FramePackの推論は専用のスクリプト`fpack_generate_video.py`を使用します。コマンド記述例は英語版を参考にしてください。 + +HunyuanVideoの推論との主な違いは次のとおりです。 +- `fpack_generate_video.py`を使用します。 +- `--f1`を指定すると、FramePack-F1モデルの推論を行います(順方向で生成)。`--dit`にFramePack-F1モデルを指定する必要があります。 +- `--vae`、`--text_encoder1`、`--text_encoder2`、`--image_encoder`を指定する必要があります。 +- `--image_path`を指定する必要があります(開始フレーム)。 +- `--video_seconds` または `--video_sections` を指定する必要があります。`--video_seconds`は秒単位でのビデオの長さを指定し、`--video_sections`はセクション数を指定します。`--video_sections`を指定した場合、`--video_seconds`は無視されます。 +- `--video_size`は生成するビデオのサイズで、高さと幅をその順番で指定します。 +- `--prompt`: 生成用のプロンプトです。 +- 必要に応じて`--latent_window_size`引数(デフォルト9)を指定できます(キャッシング時、学習時と一致させる必要があります)。 +- DiTのメモリ使用量を削減するために、`--fp8_scaled`オプションを指定可能です。品質はやや低下する可能性があります。またText Encoder 1のメモリ使用量を削減するために、`--fp8_llm`オプションを指定可能です。DiT用に`--fp8`単独のオプションも用意されていますが、`--fp8_scaled`の方が品質が良い可能性があります。 +- LoRAの読み込みオプション(`--lora_weight`、`--lora_multiplier`、`--include_patterns`、`--exclude_patterns`)が利用可能です。LyCORISもサポートされています。 +- `--embedded_cfg_scale`(デフォルト10.0)は、蒸留されたガイダンススケールを制御します。通常は変更しないでください。 +- `--guidance_scale`(デフォルト1.0)は、標準の分類器フリーガイダンススケールを制御します。**FramePackモデルのベースモデルでは、通常1.0から変更しないことをお勧めします。** +- `--guidance_rescale`(デフォルト0.0)も利用可能ですが、通常は必要ありません。 +- `--bulk_decode`オプションは、すべてのフレームを一度にデコードできるオプションです。高速ですが、デコード中にVRAMを多く使用します。VRAM不足エラーを防ぐために、`--vae_chunk_size`と`--vae_spatial_tile_sample_min_size`オプションを指定することをお勧めします。 +- `--sample_solver`(デフォルト`unipc`)は利用可能ですが、`unipc`のみが実装されています。 +- `--save_merged_model`オプションは、LoRAの重みをマージした後にDiTモデルを保存するためのオプションです。これを指定すると推論はスキップされます。 +- `--latent_paddings`オプションは、各セクションのデフォルトのパディングを上書きします。カンマ区切りの整数リストとして指定します。例:`--latent_paddings 0,0,0,0`。`--f1`を指定した場合は無視されます。 +- `--custom_system_prompt`オプションは、LLaMA Text Encoder 1のデフォルトのシステムプロンプトを上書きします。文字列として指定します。デフォルトのシステムプロンプトは[こちら](../src/musubi_tuner/hunyuan_model/text_encoder.py#L152)を参照してください。 +- `--rope_scaling_timestep_threshold`オプションはRoPEスケーリングのタイムステップ閾値で、デフォルトはNone(無効)です。設定すると、タイムステップが閾値以上の場合にのみRoPEスケーリングが適用されます。800程度から初めて調整してください。1フレーム推論時での使用を想定しており、それ以外の場合は想定していません。 +- `--rope_scaling_factor`オプションはRoPEスケーリング係数で、デフォルトは0.5で、解像度が2倍の場合を想定しています。1.5倍なら0.7程度が良いでしょう。 + +`--video_size`、`--fps`、`--infer_steps`、`--save_path`、`--output_type`、`--seed`、`--attn_mode`、`--blocks_to_swap`、`--vae_chunk_size`、`--vae_spatial_tile_sample_min_size`などの他のオプションは、HunyuanVideo/Wan2.1と同様に機能します。 + +`--lora_weight`に指定できるLoRAの重みは、当リポジトリで学習したFramePackの重み以外に、当リポジトリのHunyuanVideoのLoRA、diffusion-pipeのHunyuanVideoのLoRAが指定可能です(自動判定)。 + +`--blocks_to_swap`の最大値は38です。 +
+ +## Batch and Interactive Modes / バッチモードとインタラクティブモード + +In addition to single video generation, FramePack now supports batch generation from file and interactive prompt input: + +### Batch Mode from File / ファイルからのバッチモード + +Generate multiple videos from prompts stored in a text file: + +```bash +python src/musubi_tuner/fpack_generate_video.py --from_file prompts.txt +--dit path/to/dit_model --vae path/to/vae_model.safetensors +--text_encoder1 path/to/text_encoder1 --text_encoder2 path/to/text_encoder2 +--image_encoder path/to/image_encoder_model.safetensors --save_path output_directory +``` + +The prompts file format: +- One prompt per line +- Empty lines and lines starting with # are ignored (comments) +- Each line can include prompt-specific parameters using command-line style format: + +``` +A beautiful sunset over mountains --w 832 --h 480 --f 5 --d 42 --s 20 --i path/to/start_image.jpg +A busy city street at night --w 480 --h 832 --i path/to/another_start.jpg +``` + +Supported inline parameters (if omitted, default values from the command line are used): +- `--w`: Width +- `--h`: Height +- `--f`: Video seconds +- `--d`: Seed +- `--s`: Inference steps +- `--g` or `--l`: Guidance scale +- `--i`: Image path (for start image) +- `--im`: Image mask path +- `--n`: Negative prompt +- `--vs`: Video sections +- `--ei`: End image path +- `--ci`: Control image path (explained in one-frame inference documentation) +- `--cim`: Control image mask path (explained in one-frame inference documentation) +- `--of`: One frame inference mode options (same as `--one_frame_inference` in the command line), options for one-frame inference + +In batch mode, models are loaded once and reused for all prompts, significantly improving overall generation time compared to multiple single runs. + +### Interactive Mode / インタラクティブモード + +Interactive command-line interface for entering prompts: + +```bash +python src/musubi_tuner/fpack_generate_video.py --interactive +--dit path/to/dit_model --vae path/to/vae_model.safetensors +--text_encoder1 path/to/text_encoder1 --text_encoder2 path/to/text_encoder2 +--image_encoder path/to/image_encoder_model.safetensors --save_path output_directory +``` + +In interactive mode: +- Enter prompts directly at the command line +- Use the same inline parameter format as batch mode +- Use Ctrl+D (or Ctrl+Z on Windows) to exit +- Models remain loaded between generations for efficiency + +
+日本語 + +単一動画の生成に加えて、FramePackは現在、ファイルからのバッチ生成とインタラクティブなプロンプト入力をサポートしています。 + +#### ファイルからのバッチモード + +テキストファイルに保存されたプロンプトから複数の動画を生成します: + +```bash +python src/musubi_tuner/fpack_generate_video.py --from_file prompts.txt +--dit path/to/dit_model --vae path/to/vae_model.safetensors +--text_encoder1 path/to/text_encoder1 --text_encoder2 path/to/text_encoder2 +--image_encoder path/to/image_encoder_model.safetensors --save_path output_directory +``` + +プロンプトファイルの形式(サンプルは英語ドキュメントを参照): +- 1行に1つのプロンプト +- 空行や#で始まる行は無視されます(コメント) +- 各行にはコマンドライン形式でプロンプト固有のパラメータを含めることができます: + +サポートされているインラインパラメータ(省略した場合、コマンドラインのデフォルト値が使用されます) +- `--w`: 幅 +- `--h`: 高さ +- `--f`: 動画の秒数 +- `--d`: シード +- `--s`: 推論ステップ +- `--g` または `--l`: ガイダンススケール +- `--i`: 画像パス(開始画像用) +- `--im`: 画像マスクパス +- `--n`: ネガティブプロンプト +- `--vs`: 動画セクション数 +- `--ei`: 終了画像パス +- `--ci`: 制御画像パス(1フレーム推論のドキュメントで解説) +- `--cim`: 制御画像マスクパス(1フレーム推論のドキュメントで解説) +- `--of`: 1フレーム推論モードオプション(コマンドラインの`--one_frame_inference`と同様、1フレーム推論のオプション) + +バッチモードでは、モデルは一度だけロードされ、すべてのプロンプトで再利用されるため、複数回の単一実行と比較して全体的な生成時間が大幅に改善されます。 + +#### インタラクティブモード + +プロンプトを入力するためのインタラクティブなコマンドラインインターフェース: + +```bash +python src/musubi_tuner/fpack_generate_video.py --interactive +--dit path/to/dit_model --vae path/to/vae_model.safetensors +--text_encoder1 path/to/text_encoder1 --text_encoder2 path/to/text_encoder2 +--image_encoder path/to/image_encoder_model.safetensors --save_path output_directory +``` + +インタラクティブモードでは: +- コマンドラインで直接プロンプトを入力 +- バッチモードと同じインラインパラメータ形式を使用 +- 終了するには Ctrl+D (Windowsでは Ctrl+Z) を使用 +- 効率のため、モデルは生成間で読み込まれたままになります +
+ +## Advanced Video Control Features (Experimental) / 高度なビデオ制御機能(実験的) + +This section describes experimental features added to the `fpack_generate_video.py` script to provide finer control over the generated video content, particularly useful for longer videos or sequences requiring specific transitions or states. These features leverage the Inverted Anti-drifting sampling method inherent to FramePack. + +### **1. End Image Guidance (`--end_image_path`)** + +* **Functionality:** Guides the generation process to make the final frame(s) of the video resemble a specified target image. +* **Usage:** `--end_image_path ` +* **Mechanism:** The provided image is encoded using the VAE. This latent representation is used as a target or starting point during the generation of the final video section (which is the first step in Inverted Anti-drifting). +* **Use Cases:** Defining a clear ending for the video, such as a character striking a specific pose or a product appearing in a close-up. + +This option is ignored if `--f1` is specified. The end image is not used in the FramePack-F1 model. + +### **2. Section Start Image Guidance (`--image_path` Extended Format)** + +* **Functionality:** Guides specific sections within the video to start with a visual state close to a provided image. + * You can force the start image by setting `--latent_paddings` to `0,0,0,0` (specify the number of sections as a comma-separated list). If `latent_paddings` is set to 1 or more, the specified image will be used as a reference image (default behavior). +* **Usage:** `--image_path "SECTION_SPEC:path/to/image.jpg;;;SECTION_SPEC:path/to/another.jpg;;;..."` + * `SECTION_SPEC`: Defines the target section(s). Rules: + * `0`: The first section of the video (generated last in Inverted Anti-drifting). + * `-1`: The last section of the video (generated first). + * `N` (non-negative integer): The N-th section (0-indexed). + * `-N` (negative integer): The N-th section from the end. + * `S-E` (range, e.g., `0-2`): Applies the same image guidance to sections S through E (inclusive). + * Use `;;;` as a separator between definitions. + * If no image is specified for a section, generation proceeds based on the prompt and preceding (future time) section context. +* **Mechanism:** When generating a specific section, if a corresponding start image is provided, its VAE latent representation is strongly referenced as the "initial state" for that section. This guides the beginning of the section towards the specified image while attempting to maintain temporal consistency with the subsequent (already generated) section. +* **Use Cases:** Defining clear starting points for scene changes, specifying character poses or attire at the beginning of certain sections. + +### **3. Section-Specific Prompts (`--prompt` Extended Format)** + +* **Functionality:** Allows providing different text prompts for different sections of the video, enabling more granular control over the narrative or action flow. +* **Usage:** `--prompt "SECTION_SPEC:Prompt text for section(s);;;SECTION_SPEC:Another prompt;;;..."` + * `SECTION_SPEC`: Uses the same rules as `--image_path`. + * Use `;;;` as a separator. + * If a prompt for a specific section is not provided, the prompt associated with index `0` (or the closest specified applicable prompt) is typically used. Check behavior if defaults are critical. +* **Mechanism:** During the generation of each section, the corresponding section-specific prompt is used as the primary textual guidance for the model. +* **Prompt Content Recommendation** when using `--latent_paddings 0,0,0,0` without `--f1` (original FramePack model): + * Recall that FramePack uses Inverted Anti-drifting and references future context. + * It is recommended to describe "**the main content or state change that should occur in the current section, *and* the subsequent events or states leading towards the end of the video**" in the prompt for each section. + * Including the content of subsequent sections in the current section's prompt helps the model maintain context and overall coherence. + * Example: For section 1, the prompt might describe what happens in section 1 *and* briefly summarize section 2 (and beyond). + * However, based on observations (e.g., the `latent_paddings` comment), the model's ability to perfectly utilize very long-term context might be limited. Experimentation is key. Describing just the "goal for the current section" might also work. Start by trying the "section and onwards" approach. +* Use the default prompt when `latent_paddings` is >= 1 or `--latent_paddings` is not specified, or when using `--f1` (FramePack-F1 model). +* **Use Cases:** Describing evolving storylines, gradual changes in character actions or emotions, step-by-step processes over time. + +### **Combined Usage Example** (with `--f1` not specified) + +Generating a 3-section video of "A dog runs towards a thrown ball, catches it, and runs back": + +```bash +python src/musubi_tuner/fpack_generate_video.py \ + --prompt "0:A dog runs towards a thrown ball, catches it, and runs back;;;1:The dog catches the ball and then runs back towards the viewer;;;2:The dog runs back towards the viewer holding the ball" \ + --image_path "0:./img_start_running.png;;;1:./img_catching.png;;;2:./img_running_back.png" \ + --end_image_path ./img_returned.png \ + --save_path ./output \ + # ... other arguments +``` + +* **Generation Order:** Section 2 -> Section 1 -> Section 0 +* **Generating Section 2:** + * Prompt: "The dog runs back towards the viewer holding the ball" + * Start Image: `./img_running_back.png` + * End Image: `./img_returned.png` (Initial target) +* **Generating Section 1:** + * Prompt: "The dog catches the ball and then runs back towards the viewer" + * Start Image: `./img_catching.png` + * Future Context: Generated Section 2 latent +* **Generating Section 0:** + * Prompt: "A dog runs towards a thrown ball, catches it, and runs back" + * Start Image: `./img_start_running.png` + * Future Context: Generated Section 1 & 2 latents + +### **Important Considerations** + +* **Inverted Generation:** Always remember that generation proceeds from the end of the video towards the beginning. Section `-1` (the last section, `2` in the example) is generated first. +* **Continuity vs. Guidance:** While start image guidance is powerful, drastically different images between sections might lead to unnatural transitions. Balance guidance strength with the need for smooth flow. +* **Prompt Optimization:** The prompt content recommendation is a starting point. Fine-tune prompts based on observed model behavior and desired output quality. + +
+日本語 + +### **高度な動画制御機能(実験的)** + +このセクションでは、`fpack_generate_video.py` スクリプトに追加された実験的な機能について説明します。これらの機能は、生成される動画の内容をより詳細に制御するためのもので、特に長い動画や特定の遷移・状態が必要なシーケンスに役立ちます。これらの機能は、FramePack固有のInverted Anti-driftingサンプリング方式を活用しています。 + +#### **1. 終端画像ガイダンス (`--end_image_path`)** + +* **機能:** 動画の最後のフレーム(群)を指定したターゲット画像に近づけるように生成を誘導します。 +* **書式:** `--end_image_path <画像ファイルパス>` +* **動作:** 指定された画像はVAEでエンコードされ、その潜在表現が動画の最終セクション(Inverted Anti-driftingでは最初に生成される)の生成時の目標または開始点として使用されます。 +* **用途:** キャラクターが特定のポーズで終わる、特定の商品がクローズアップで終わるなど、動画の結末を明確に定義する場合。 + +このオプションは、`--f1`を指定した場合は無視されます。FramePack-F1モデルでは終端画像は使用されません。 + +#### **2. セクション開始画像ガイダンス (`--image_path` 拡張書式)** + +* **機能:** 動画内の特定のセクションが、指定された画像に近い視覚状態から始まるように誘導します。 + * `--latent_paddings`を`0,0,0,0`(カンマ区切りでセクション数だけ指定)に設定することで、セクションの開始画像を強制できます。`latent_paddings`が1以上の場合、指定された画像は参照画像として使用されます。 +* **書式:** `--image_path "セクション指定子:画像パス;;;セクション指定子:別の画像パス;;;..."` + * `セクション指定子`: 対象セクションを定義します。ルール: + * `0`: 動画の最初のセクション(Inverted Anti-driftingでは最後に生成)。 + * `-1`: 動画の最後のセクション(最初に生成)。 + * `N`(非負整数): N番目のセクション(0始まり)。 + * `-N`(負整数): 最後からN番目のセクション。 + * `S-E`(範囲, 例:`0-2`): セクションSからE(両端含む)に同じ画像を適用。 + * 区切り文字は `;;;` です。 + * セクションに画像が指定されていない場合、プロンプトと後続(未来時刻)セクションのコンテキストに基づいて生成されます。 +* **動作:** 特定セクションの生成時、対応する開始画像が指定されていれば、そのVAE潜在表現がそのセクションの「初期状態」として強く参照されます。これにより、後続(生成済み)セクションとの時間的連続性を維持しようとしつつ、セクションの始まりを指定画像に近づけます。 +* **用途:** シーン変更の起点を明確にする、特定のセクション開始時のキャラクターのポーズや服装を指定するなど。 + +#### **3. セクション別プロンプト (`--prompt` 拡張書式)** + +* **機能:** 動画のセクションごとに異なるテキストプロンプトを与え、物語やアクションの流れをより細かく指示できます。 +* **書式:** `--prompt "セクション指定子:プロンプトテキスト;;;セクション指定子:別のプロンプト;;;..."` + * `セクション指定子`: `--image_path` と同じルールです。 + * 区切り文字は `;;;` です。 + * 特定セクションのプロンプトがない場合、通常はインデックス`0`に関連付けられたプロンプト(または最も近い適用可能な指定プロンプト)が使用されます。デフォルトの挙動が重要な場合は確認してください。 +* **動作:** 各セクションの生成時、対応するセクション別プロンプトがモデルへの主要なテキスト指示として使用されます。 +* `latent_paddings`に`0`を指定した場合(非F1モデル)の **プロンプト内容の推奨:** + * FramePackはInverted Anti-driftingを採用し、未来のコンテキストを参照することを思い出してください。 + * 各セクションのプロンプトには、「**現在のセクションで起こるべき主要な内容や状態変化、*および*それに続く動画の終端までの内容**」を記述することを推奨します。 + * 現在のセクションのプロンプトに後続セクションの内容を含めることで、モデルが全体的な文脈を把握し、一貫性を保つのに役立ちます。 + * 例:セクション1のプロンプトには、セクション1の内容 *と* セクション2の簡単な要約を記述します。 + * ただし、モデルの長期コンテキスト完全利用能力には限界がある可能性も示唆されています(例:`latent_paddings`コメント)。実験が鍵となります。「現在のセクションの目標」のみを記述するだけでも機能する場合があります。まずは「セクションと以降」アプローチを試すことをお勧めします。 +* 使用するプロンプトは、`latent_paddings`が`1`以上または指定されていない場合、または`--f1`(FramePack-F1モデル)を使用している場合は、通常のプロンプト内容を記述してください。 +* **用途:** 時間経過に伴うストーリーの変化、キャラクターの行動や感情の段階的な変化、段階的なプロセスなどを記述する場合。 + +#### **組み合わせ使用例** (`--f1`未指定時) + +「投げられたボールに向かって犬が走り、それを捕まえ、走って戻ってくる」3セクション動画の生成: +(コマンド記述例は英語版を参考にしてください) + +* **生成順序:** セクション2 → セクション1 → セクション0 +* **セクション2生成時:** + * プロンプト: "犬がボールを咥えてこちらに向かって走ってくる" + * 開始画像: `./img_running_back.png` + * 終端画像: `./img_returned.png` (初期目標) +* **セクション1生成時:** + * プロンプト: "犬がボールを捕まえ、その後こちらに向かって走ってくる" + * 開始画像: `./img_catching.png` + * 未来コンテキスト: 生成済みセクション2の潜在表現 +* **セクション0生成時:** + * プロンプト: "犬が投げられたボールに向かって走り、それを捕まえ、走って戻ってくる" + * 開始画像: `./img_start_running.png` + * 未来コンテキスト: 生成済みセクション1 & 2の潜在表現 + +#### **重要な考慮事項** + +* **逆順生成:** 生成は動画の終わりから始まりに向かって進むことを常に意識してください。セクション`-1`(最後のセクション、上の例では `2`)が最初に生成されます。 +* **連続性とガイダンスのバランス:** 開始画像ガイダンスは強力ですが、セクション間で画像が大きく異なると、遷移が不自然になる可能性があります。ガイダンスの強さとスムーズな流れの必要性のバランスを取ってください。 +* **プロンプトの最適化:** 推奨されるプロンプト内容はあくまでも参考です。モデルの観察された挙動と望ましい出力品質に基づいてプロンプトを微調整してください。 + +
diff --git a/exp_code/1_benchmark/musubi-tuner/docs/framepack_1f.md b/exp_code/1_benchmark/musubi-tuner/docs/framepack_1f.md new file mode 100644 index 0000000000000000000000000000000000000000..689d001464355022f413578e4ccd9e60fdcab376 --- /dev/null +++ b/exp_code/1_benchmark/musubi-tuner/docs/framepack_1f.md @@ -0,0 +1,363 @@ +# FramePack One Frame (Single Frame) Inference and Training / FramePack 1フレーム推論と学習 + +## Overview / 概要 + +This document explains advanced inference and training methods using the FramePack model, particularly focusing on **"1-frame inference"** and its extensions. These features aim to leverage FramePack's flexibility to enable diverse image generation and editing tasks beyond simple video generation. + +### The Concept and Development of 1-Frame Inference + +While FramePack is originally a model for generating sequential video frames (or frame sections), it was discovered that by focusing on its internal structure, particularly how it handles temporal information with RoPE (Rotary Position Embedding), interesting control over single-frame generation is possible. + +1. **Basic 1-Frame Inference**: + * It takes an initial image and a prompt as input, limiting the number of generated frames to just one. + * In this process, by intentionally setting a large RoPE timestamp (`target_index`) for the single frame to be generated, a single static image can be obtained that reflects temporal and semantic changes from the initial image according to the prompt. + * This utilizes FramePack's characteristic of being highly sensitive to RoPE timestamps, as it supports bidirectional contexts like "Inverted anti-drifting." This allows for operations similar to natural language-based image editing, albeit in a limited capacity, without requiring additional training. + +2. **Kisekaeichi Method (Feature Merging via Post-Reference)**: + * This method, an extension of basic 1-frame inference, was **proposed by furusu**. In addition to the initial image, it also uses a reference image corresponding to a "next section-start image" (treated as `clean_latent_post`) as input. + * The RoPE timestamp (`target_index`) for the image to be generated is set to an intermediate value between the timestamps of the initial image and the section-end image. + * More importantly, masking (e.g., zeroing out specific regions) is applied to the latent representation of each reference image. For example, by setting masks to extract a character's face and body shape from the initial image and clothing textures from the reference image, an image can be generated that fuses the desired features of both, similar to a character "dress-up" or outfit swapping. This method can also be fundamentally achieved without additional training. + +3. **1f-mc (one frame multi-control) Method (Proximal Frame Blending)**: + * This method was **proposed by mattyamonaca**. It takes two reference images as input: an initial image (e.g., at `t=0`) and a subsequent image (e.g., at `t=1`, the first frame of a section), and generates a single image blending their features. + * Unlike Kisekaeichi, latent masking is typically not performed. + * To fully leverage this method, additional training using LoRA (Low-Rank Adaptation) is recommended. Through training, the model can better learn the relationship and blending method between the two input images to achieve specific editing effects. + +### Integration into a Generalized Control Framework + +The concepts utilized in the methods above—specifying reference images, manipulating timestamps, and applying latent masks—have been generalized to create a more flexible control framework. +Users can arbitrarily specify the following elements for both inference and LoRA training: + +* **Control Images**: Any set of input images intended to influence the model. +* **Clean Latent Index (Indices)**: Timestamps corresponding to each control image. These are treated as `clean latent index` internally by FramePack and can be set to any position on the time axis. This is specified as `control_index`. +* **Latent Masks**: Masks applied to the latent representation of each control image, allowing selective control over which features from the control images are utilized. This is specified as `control_image_mask_path` or the alpha channel of the control image. +* **Target Index**: The timestamp for the single frame to be generated. + +This generalized control framework, along with corresponding extensions to the inference and LoRA training tools, has enabled advanced applications such as: + +* Development of LoRAs that stabilize 1-frame inference effects (e.g., a camera orbiting effect) that were previously unstable with prompts alone. +* Development of Kisekaeichi LoRAs that learn to perform desired feature merging under specific conditions (e.g., ignoring character information from a clothing reference image), thereby automating the masking process through learning. + +These features maximize FramePack's potential and open up new creative possibilities in static image generation and editing. Subsequent sections will detail the specific options for utilizing these functionalities. + +
+日本語 + +このドキュメントでは、FramePackモデルを用いた高度な推論および学習手法、特に「1フレーム推論」とその拡張機能について解説します。これらの機能は、FramePackの柔軟性を活かし、動画生成に留まらない多様な画像生成・編集タスクを実現することを目的としています。 + +### 1フレーム推論の発想と発展 + +FramePackは本来、連続する動画フレーム(またはフレームセクション)を生成するモデルですが、その内部構造、特に時間情報を扱うRoPE (Rotary Position Embedding) の扱いに着目することで、単一フレームの生成においても興味深い制御が可能になることが発見されました。 + +1. **基本的な1フレーム推論**: + * 開始画像とプロンプトを入力とし、生成するフレーム数を1フレームに限定します。 + * この際、生成する1フレームに割り当てるRoPEのタイムスタンプ(`target_index`)を意図的に大きな値に設定することで、開始画像からプロンプトに従って時間的・意味的に変化した単一の静止画を得ることができます。 + * これは、FramePackがInverted anti-driftingなどの双方向コンテキストに対応するため、RoPEのタイムスタンプに対して敏感に反応する特性を利用したものです。これにより、学習なしで限定的ながら自然言語による画像編集に近い操作が可能です。 + +2. **kisekaeichi方式 (ポスト参照による特徴マージ)**: + * 基本的な1フレーム推論を発展させたこの方式は、**furusu氏により提案されました**。開始画像に加え、「次のセクションの開始画像」に相当する参照画像(`clean_latent_post`として扱われる)も入力として利用します。 + * 生成する画像のRoPEタイムスタンプ(`target_index`)を、開始画像のタイムスタンプとセクション終端画像のタイムスタンプの中間的な値に設定します。 + * さらに重要な点として、各参照画像のlatent表現に対してマスク処理(特定領域を0で埋めるなど)を施します。例えば、開始画像からはキャラクターの顔や体型を、参照画像からは服装のテクスチャを抽出するようにマスクを設定することで、キャラクターの「着せ替え」のような、両者の望ましい特徴を融合させた画像を生成できます。この手法も基本的には学習不要で実現可能です。 + +3. **1f-mc (one frame multi-control) 方式 (近接フレームブレンド)**: + * この方式は、**mattyamonaca氏により提案されました**。開始画像(例: `t=0`)と、その直後の画像(例: `t=1`、セクションの最初のフレーム)の2つを参照画像として入力し、それらの特徴をブレンドした単一画像を生成します。 + * kisekaeichiとは異なり、latentマスクは通常行いません。 + * この方式の真価を発揮するには、LoRA (Low-Rank Adaptation) による追加学習が推奨されます。学習により、モデルは2つの入力画像間の関係性やブレンド方法をより適切に学習し、特定の編集効果を実現できます。 + +### 汎用的な制御フレームワークへの統合 + +上記の各手法で利用されていた「参照画像の指定」「タイムスタンプの操作」「latentマスクの適用」といった概念を一般化し、より柔軟な制御を可能にするための拡張が行われました。 +ユーザーは以下の要素を任意に指定して、推論およびLoRA学習を行うことができます。 + +* **制御画像 (Control Images)**: モデルに影響を与えるための任意の入力画像群。 +* **Clean Latent Index (Indices)**: 各制御画像に対応するタイムスタンプ。FramePack内部の`clean latent index`として扱われ、時間軸上の任意の位置を指定可能です。`control_index`として指定します。 +* **Latentマスク (Latent Masks)**: 各制御画像のlatentに適用するマスク。これにより、制御画像から利用する特徴を選択的に制御します。`control_image_mask_path`または制御画像のアルファチャンネルとして指定します。 +* **Target Index**: 生成したい単一フレームのタイムスタンプ。 + +この汎用的な制御フレームワークと、それに対応した推論ツールおよびLoRA学習ツールの拡張により、以下のような高度な応用が可能になりました。 + +* プロンプトだけでは不安定だった1フレーム推論の効果(例: カメラ旋回)を安定化させるLoRAの開発。 +* マスク処理を手動で行う代わりに、特定の条件下(例: 服の参照画像からキャラクター情報を無視する)で望ましい特徴マージを行うように学習させたkisekaeichi LoRAの開発。 + +これらの機能は、FramePackのポテンシャルを最大限に引き出し、静止画生成・編集における新たな創造の可能性を拓くものです。以降のセクションでは、これらの機能を実際に利用するための具体的なオプションについて説明します。 + +
+ +## One Frame (Single Frame) Training / 1フレーム学習 + +**This feature is experimental.** It trains in the same way as one frame inference. + +The dataset must be an image dataset. If you use caption files, you need to specify `control_directory` and place the **start images** in that directory. The `image_directory` should contain the images after the change. The filenames of both directories must match. Caption files should be placed in the `image_directory`. + +If you use JSONL files, specify them as `{"image_path": "/path/to/target_image1.jpg", "control_path": "/path/to/source_image1.jpg", "caption": "The object changes to red."}`. The `image_path` should point to the images after the change, and `control_path` should point to the starting images. + +For the dataset configuration, see [here](../src/musubi_tuner/dataset/dataset_config.md#sample-for-image-dataset-with-control-images) and [here](../src/musubi_tuner/dataset/dataset_config.md#framepack-one-frame-training). There are also examples for kisekaeichi and 1f-mc settings. + +For single frame training, specify `--one_frame` in `fpack_cache_latents.py` to create the cache. You can also use `--one_frame_no_2x` and `--one_frame_no_4x` options, which have the same meaning as `no_2x` and `no_4x` during inference. It is recommended to set these options to match the inference settings. + +If you change whether to use one frame training or these options, please overwrite the existing cache without specifying `--skip_existing`. + +Specify `--one_frame` in `fpack_train_network.py` to change the inference method during sample generation. + +The optimal training settings are currently unknown. Feedback is welcome. + +### Example of prompt file description for sample generation + +The command line options `--one_frame_inference` corresponds to `--of`, and `--control_image_path` corresponds to `--ci`. + +Note that `--ci` can be specified multiple times, but `--control_image_path` is specified as `--control_image_path img1.png img2.png`, while `--ci` is specified as `--ci img1.png --ci img2.png`. + +Normal single frame training: +``` +The girl wears a school uniform. --i path/to/start.png --ci path/to/start.png --of no_2x,no_4x,target_index=1,control_index=0 --d 1111 --f 1 --s 10 --fs 7 --d 1234 --w 384 --h 576 +``` + +Kisekaeichi training: +``` +The girl wears a school uniform. --i path/to/start_with_alpha.png --ci path/to/ref_with_alpha.png --ci path/to/start_with_alpha.png --of no_post,no_2x,no_4x,target_index=5,control_index=0;10 --d 1111 --f 1 --s 10 --fs 7 --d 1234 --w 384 --h 576 +``` + +
+日本語 + +**この機能は実験的なものです。** 1フレーム推論と同様の方法で学習を行います。 + +データセットは画像データセットである必要があります。キャプションファイルを用いる場合は、`control_directory`を追加で指定し、そのディレクトリに**開始画像**を格納してください。`image_directory`には変化後の画像を格納します。両者のファイル名は一致させる必要があります。キャプションファイルは`image_directory`に格納してください。 + +JSONLファイルを用いる場合は、`{"image_path": "/path/to/target_image1.jpg", "control_path": "/path/to/source_image1.jpg", "caption": "The object changes to red"}`のように指定してください。`image_path`は変化後の画像、`control_path`は開始画像を指定します。 + +データセットの設定については、[こちら](../src/musubi_tuner/dataset/dataset_config.md#sample-for-image-dataset-with-control-images)と[こちら](../src/musubi_tuner/dataset/dataset_config.md#framepack-one-frame-training)も参照してください。kisekaeichiと1f-mcの設定例もそちらにあります。 + +1フレーム学習時は、`fpack_cache_latents.py`に`--one_frame`を指定してキャッシュを作成してください。また`--one_frame_no_2x`と`--one_frame_no_4x`オプションも利用可能です。推論時の`no_2x`、`no_4x`と同じ意味を持ちますので、推論時と同じ設定にすることをお勧めします。 + +1フレーム学習か否かを変更する場合、またこれらのオプションを変更する場合は、`--skip_existing`を指定せずに既存のキャッシュを上書きしてください。 + +また、`fpack_train_network.py`に`--one_frame`を指定してサンプル画像生成時の推論方法を変更してください。 + +最適な学習設定は今のところ不明です。フィードバックを歓迎します。 + +**サンプル生成のプロンプトファイル記述例** + +コマンドラインオプション`--one_frame_inference`に相当する `--of`と、`--control_image_path`に相当する`--ci`が用意されています。 + +※ `--ci`は複数指定可能ですが、`--control_image_path`は`--control_image_path img1.png img2.png`のようにスペースで区切るのに対して、`--ci`は`--ci img1.png --ci img2.png`のように指定するので注意してください。 + +通常の1フレーム学習: +``` +The girl wears a school uniform. --i path/to/start.png --ci path/to/start.png --of no_2x,no_4x,target_index=1,control_index=0 --d 1111 --f 1 --s 10 --fs 7 --d 1234 --w 384 --h 576 +``` + +kisekaeichi方式: +``` +The girl wears a school uniform. --i path/to/start_with_alpha.png --ci path/to/ref_with_alpha.png --ci path/to/start_with_alpha.png --of no_post,no_2x,no_4x,target_index=5,control_index=0;10 --d 1111 --f 1 --s 10 --fs 7 --d 1234 --w 384 --h 576 +``` + +
+ +## One (single) Frame Inference / 1フレーム推論 + +**This feature is highly experimental** and not officially supported. It is intended for users who want to explore the potential of FramePack for one frame inference, which is not a standard feature of the model. + +This script also allows for one frame inference, which is not an official feature of FramePack but rather a custom implementation. + +Theoretically, it generates an image after a specified time from the starting image, following the prompt. This means that, although limited, it allows for natural language-based image editing. + +To perform one frame inference, specify some option in the `--one_frame_inference` option. Here is an example: + +```bash +--video_sections 1 --output_type latent_images --one_frame_inference default --image_path start_image.png --control_image_path start_image.png +``` + +The `--image_path` is used to obtain the SIGCLIP features for one frame inference. Normally, you should specify the starting image. The `--control_image_path` is newly used to specify the control image, but for normal one frame inference, you should also specify the starting image. + +The `--one_frame_inference` option is recommended to be set to `default` or `no_2x,no_4x`. If you specify `--output_type` as `latent_images`, both the latent and image will be saved. + +You can specify the following strings in the `--one_frame_inference` option, separated by commas: + +- `no_2x`: Generates without passing clean latents 2x with zero vectors to the model. Slightly improves generation speed. The impact on generation results is unknown. +- `no_4x`: Generates without passing clean latents 4x with zero vectors to the model. Slightly improves generation speed. The impact on generation results is unknown. +- `no_post`: Generates without passing clean latents post with zero vectors to the model. Improves generation speed by about 20%, but may result in unstable generation. +- `target_index=`: Specifies the index of the image to be generated. The default is the last frame (i.e., `latent_window_size`). + +For example, you can use `--one_frame_inference default` to pass clean latents 2x, clean latents 4x, and post to the model. `--one_frame_inference no_2x,no_4x` if you want to skip passing clean latents 2x and 4x to the model. `--one_frame_inference target_index=9` can be used to specify the target index for the generated image. + +The `--one_frame_inference` option also supports advanced inference, which is described in the next section. This option allows for more detailed control using additional parameters like `target_index` and `control_index` within this option. + +Normally, specify `--video_sections 1` to indicate only one section (one image). + +Increasing `target_index` from the default of 9 may result in larger changes. It has been confirmed that generation can be performed without breaking up to around 40. + +The `--end_image_path` is ignored for one frame inference. + +
+日本語 + +**この機能は非常に実験的であり**、公式にはサポートされていません。FramePackを使用して1フレーム推論の可能性を試したいユーザーに向けたものです。 + +このスクリプトでは、単一画像の推論を行うこともできます。FramePack公式の機能ではなく、独自の実装です。 + +理論的には、開始画像から、プロンプトに従い、指定時間経過後の画像を生成します。つまり制限付きですが自然言語による画像編集を行うことができます。 + +単一画像推論を行うには`--one_frame_inference`オプションに、何らかのオプションを指定してください。記述例は以下の通りです。 + +```bash +--video_sections 1 --output_type latent_images --one_frame_inference default --image_path start_image.png --control_image_path start_image.png +``` + +`--image_path`は、1フレーム推論ではSIGCLIPの特徴量を取得するために用いられます。通常は開始画像を指定してください。`--control_image_path`は新しく追加された引数で、制御用画像を指定するために用いられますが、通常は開始画像を指定してください。 + +`--one_frame_inference`のオプションは、`default`または `no_2x,no_4x`を推奨します。`--output_type`に`latent_images`を指定するとlatentと画像の両方が保存されます。 + +`--one_frame_inference`のオプションには、カンマ区切りで以下のオプションを任意個数指定できます。 + +- `no_2x`: ゼロベクトルの clean latents 2xをモデルに渡さずに生成します。わずかに生成速度が向上します。生成結果への影響は不明です。 +- `no_4x`: ゼロベクトルの clean latents 4xをモデルに渡さずに生成します。わずかに生成速度が向上します。生成結果への影響は不明です。 +- `no_post`: ゼロベクトルの clean latents の post を渡さずに生成します。生成速度が20%程度向上しますが、生成結果が不安定になる場合があります。 +- `target_index=<整数>`: 生成する画像のindexを指定します。デフォルトは最後のフレームです(=latent_window_size)。 + +たとえば、`--one_frame_inference default`を使用すると、clean latents 2x、clean latents 4x、postをモデルに渡します。`--one_frame_inference no_2x,no_4x`を使用すると、clean latents 2xと4xをモデルに渡すのをスキップします。`--one_frame_inference target_index=9`を使用して、生成する画像のターゲットインデックスを指定できます。 + +後述の高度な推論では、このオプション内で `target_index`、`control_index` といった追加のパラメータを指定して、より詳細な制御が可能です。 + +clean latents 2x、clean latents 4x、postをモデルに渡す場合でも値はゼロベクトルですが、値を渡すか否かで結果は変わります。特に`no_post`を指定すると、`latent_window_size`を大きくしたときに生成結果が不安定になる場合があります。 + +通常は`--video_sections 1` として1セクションのみ(画像1枚)を指定してください。 + +`target_index` をデフォルトの9から大きくすると、変化量が大きくなる可能性があります。40程度までは破綻なく生成されることを確認しています。 + +`--end_image_path`は無視されます。 + +
+ +## kisekaeichi method (Post Reference Options) and 1f-mc (Multi-Control) / kisekaeichi方式(ポスト参照オプション)と1f-mc(マルチコントロール) + +The `kisekaeichi` method was proposed by furusu. The `1f-mc` method was proposed by mattyamonaca in pull request [#304](https://github.com/kohya-ss/musubi-tuner/pull/304). + +In this repository, these methods have been integrated and can be specified with the `--one_frame_inference` option. This allows for specifying any number of control images as clean latents, along with indices. This means you can specify multiple starting images and multiple clean latent posts. Additionally, masks can be applied to each image. + +It is expected to work only with FramePack (non-F1 model) and not with F1 models. + +The following options have been added to `--one_frame_inference`. These can be used in conjunction with existing flags like `target_index`, `no_post`, `no_2x`, and `no_4x`. + +- `control_index=`: Specifies the index(es) of the clean latent for the control image(s). You must specify the same number of indices as the number of control images specified with `--control_image_path`. + +Additionally, the following command-line options have been added. These arguments are only valid when `--one_frame_inference` is specified. + +- `--control_image_path [ ...]` : Specifies the path(s) to control (reference) image(s) for one frame inference. Provide one or more paths separated by spaces. Images with an alpha channel can be specified. If an alpha channel is present, it is used as a mask for the clean latent. +- `--control_image_mask_path [ ...]` : Specifies the path(s) to grayscale mask(s) to be applied to the control image(s). Provide one or more paths separated by spaces. Each mask is applied to the corresponding control image. The 255 areas are referenced, while the 0 areas are ignored. + +**Example of specifying kisekaeichi:** + +The kisekaeichi method works without training, but using a dedicated LoRA may yield better results. + +```bash +--video_sections 1 --output_type latent_images --image_path start_image.png --control_image_path start_image.png clean_latent_post_image.png \ +--one_frame_inference target_index=1,control_index=0;10,no_post,no_2x,no_4x --control_image_mask_path ctrl_mask1.png ctrl_mask2.png +``` + +In this example, `start_image.png` (for `clean_latent_pre`) and `clean_latent_post_image.png` (for `clean_latent_post`) are the reference images. The `target_index` specifies the index of the generated image. The `control_index` specifies the clean latent index for each control image, so it will be `0;10`. The masks for the control images are specified with `--control_image_mask_path`. + +The optimal values for `target_index` and `control_index` are unknown. The `target_index` should be specified as 1 or higher. The `control_index` should be set to an appropriate value relative to `latent_window_size`. Specifying 1 for `target_index` results in less change from the starting image, but may introduce noise. Specifying 9 or 13 may reduce noise but result in larger changes from the original image. + +The `control_index` should be larger than `target_index`. Typically, it is set to `10`, but larger values (e.g., around `13-16`) may also work. + +Sample images and command lines for reproduction are as follows: + +```bash +python fpack_generate_video.py --video_size 832 480 --video_sections 1 --infer_steps 25 \ + --prompt "The girl in a school blazer in a classroom." --save_path path/to/output --output_type latent_images \ + --dit path/to/dit --vae path/to/vae --text_encoder1 path/to/text_encoder1 --text_encoder2 path/to/text_encoder2 \ + --image_encoder path/to/image_encoder --attn_mode sdpa --vae_spatial_tile_sample_min_size 128 --vae_chunk_size 32 \ + --image_path path/to/kisekaeichi_start.png --control_image_path path/to/kisekaeichi_start.png path/to/kisekaeichi_ref.png + --one_frame_inference target_index=1,control_index=0;10,no_2x,no_4x,no_post + --control_image_mask_path path/to/kisekaeichi_start_mask.png path/to/kisekaeichi_ref_mask.png --seed 1234 +``` + +Specify `--fp8_scaled` and `--blocks_to_swap` options according to your VRAM capacity. + +- [kisekaeichi_start.png](./kisekaeichi_start.png) +- [kisekaeichi_ref.png](./kisekaeichi_ref.png) +- [kisekaeichi_start_mask.png](./kisekaeichi_start_mask.png) +- [kisekaeichi_ref_mask.png](./kisekaeichi_ref_mask.png) + +Generation result: [kisekaeichi_result.png](./kisekaeichi_result.png) + + +**Example of 1f-mc (Multi-Control):** + +```bash +--video_sections 1 --output_type latent_images --image_path start_image.png --control_image_path start_image.png 2nd_image.png \ +--one_frame_inference target_index=9,control_index=0;1,no_2x,no_4x +``` + +In this example, `start_image.png` is the starting image, and `2nd_image.png` is the reference image. The `target_index=9` specifies the index of the generated image, while `control_index=0;1` specifies the clean latent indices for each control image. + +1f-mc is intended to be used in combination with a trained LoRA, so adjust `target_index` and `control_index` according to the LoRA's description. + +
+日本語 + +`kisekaeichi`方式はfurusu氏により提案されました。また`1f-mc`方式はmattyamonaca氏によりPR [#304](https://github.com/kohya-ss/musubi-tuner/pull/304) で提案されました。 + +当リポジトリではこれらの方式を統合し、`--one_frame_inference`オプションで指定できるようにしました。これにより、任意の枚数の制御用画像を clean latentとして指定し、さらにインデックスを指定できます。つまり開始画像の複数枚指定やclean latent postの複数枚指定などが可能です。また、それぞれの画像にマスクを適用することもできます。 + +なお、FramePack無印のみ動作し、F1モデルでは動作しないと思われます。 + +`--one_frame_inference`に以下のオプションが追加されています。`target_index`、`no_post`、`no_2x`や`no_4x`など既存のフラグと併用できます。 + +- `control_index=<整数またはセミコロン区切りの整数>`: 制御用画像のclean latentのインデックスを指定します。`--control_image_path`で指定した制御用画像の数と同じ数のインデックスを指定してください。 + +またコマンドラインオプションに以下が追加されています。これらの引数は`--one_frame_inference`を指定した場合のみ有効です。 + +- `--control_image_path <パス1> [<パス2> ...]` : 1フレーム推論用の制御用(参照)画像のパスを1つ以上、スペース区切りで指定します。アルファチャンネルを持つ画像が指定可能です。アルファチャンネルがある場合は、clean latentへのマスクとして利用されます。 +- `--control_image_mask_path <パス1> [<パス2> ...]` : 制御用画像に適用するグレースケールマスクのパスを1つ以上、スペース区切りで指定します。各マスクは対応する制御用画像に適用されます。255の部分が参照される部分、0の部分が無視される部分です。 + +**kisekaeichiの指定例**: + +kisekaeichi方式は学習なしでも動作しますが、専用のLoRAを使用することで、より良い結果が得られる可能性があります。 + +```bash +--video_sections 1 --output_type latent_images --image_path start_image.png --control_image_path start_image.png clean_latent_post_image.png \ +--one_frame_inference target_index=1,control_index=0;10,no_post,no_2x,no_4x --control_image_mask_path ctrl_mask1.png ctrl_mask2.png +``` + +`start_image.png`(clean_latent_preに相当)と`clean_latent_post_image.png`は参照画像(clean_latent_postに相当)です。`target_index`は生成する画像のインデックスを指定します。`control_index`はそれぞれの制御用画像のclean latent indexを指定しますので、`0;10` になります。また`--control_image_mask_path`に制御用画像に適用するマスクを指定します。 + +`target_index`、`control_index`の最適値は不明です。`target_index`は1以上を指定してください。`control_index`は`latent_window_size`に対して適切な値を指定してください。`target_index`に1を指定すると開始画像からの変化が少なくなりますが、ノイズが乗ったりすることが多いようです。9や13などを指定するとノイズは改善されるかもしれませんが、元の画像からの変化が大きくなります。 + +`control_index`は`target_index`より大きい値を指定してください。通常は`10`ですが、これ以上大きな値、たとえば`13~16程度でも動作するようです。 + +サンプル画像と再現のためのコマンドラインは以下のようになります。 + +```bash +python fpack_generate_video.py --video_size 832 480 --video_sections 1 --infer_steps 25 \ + --prompt "The girl in a school blazer in a classroom." --save_path path/to/output --output_type latent_images \ + --dit path/to/dit --vae path/to/vae --text_encoder1 path/to/text_encoder1 --text_encoder2 path/to/text_encoder2 \ + --image_encoder path/to/image_encoder --attn_mode sdpa --vae_spatial_tile_sample_min_size 128 --vae_chunk_size 32 \ + --image_path path/to/kisekaeichi_start.png --control_image_path path/to/kisekaeichi_start.png path/to/kisekaeichi_ref.png + --one_frame_inference target_index=1,control_index=0;10,no_2x,no_4x,no_post + --control_image_mask_path path/to/kisekaeichi_start_mask.png path/to/kisekaeichi_ref_mask.png --seed 1234 +``` + +VRAM容量に応じて、`--fp8_scaled`や`--blocks_to_swap`等のオプションを調整してください。 + +- [kisekaeichi_start.png](./kisekaeichi_start.png) +- [kisekaeichi_ref.png](./kisekaeichi_ref.png) +- [kisekaeichi_start_mask.png](./kisekaeichi_start_mask.png) +- [kisekaeichi_ref_mask.png](./kisekaeichi_ref_mask.png) + +生成結果: +- [kisekaeichi_result.png](./kisekaeichi_result.png) + +**1f-mcの指定例**: + +```bash +--video_sections 1 --output_type latent_images --image_path start_image.png --control_image_path start_image.png 2nd_image.png \ +--one_frame_inference target_index=9,control_index=0;1,no_2x,no_4x +``` + +この例では、`start_image.png`が開始画像で、`2nd_image.png`が参照画像です。`target_index=9`は生成する画像のインデックスを指定し、`control_index=0;1`はそれぞれの制御用画像のclean latent indexを指定しています。 + +1f-mcは学習したLoRAと組み合わせることを想定していますので、そのLoRAの説明に従って、`target_index`や`control_index`を調整してください。 + +
\ No newline at end of file diff --git a/exp_code/1_benchmark/musubi-tuner/docs/sampling_during_training.md b/exp_code/1_benchmark/musubi-tuner/docs/sampling_during_training.md new file mode 100644 index 0000000000000000000000000000000000000000..e466331eb9e29a8ff64183fa037c632dcb671ac6 --- /dev/null +++ b/exp_code/1_benchmark/musubi-tuner/docs/sampling_during_training.md @@ -0,0 +1,116 @@ +> 📝 Click on the language section to expand / 言語をクリックして展開 + +# Sampling during training / 学習中のサンプル画像生成 + +By preparing a prompt file, you can generate sample images during training. + +Please be aware that it consumes a considerable amount of VRAM, so be careful when generating sample images for videos with a large number of frames. Also, since it takes time to generate, adjust the frequency of sample image generation as needed. + +
+日本語 + +プロンプトファイルを用意することで、学習中にサンプル画像を生成することができます。 + +VRAMをそれなりに消費しますので、特にフレーム数が多い動画を生成する場合は注意してください。また生成には時間がかかりますので、サンプル画像生成の頻度は適宜調整してください。 +
+ +## How to use / 使い方 + +### Command line options for training with sampling / サンプル画像生成に関連する学習時のコマンドラインオプション + +Example of command line options for training with sampling / 記述例: + +```bash +--vae path/to/ckpts/hunyuan-video-t2v-720p/vae/pytorch_model.pt +--vae_chunk_size 32 --vae_spatial_tile_sample_min_size 128 +--text_encoder1 path/to/ckpts/text_encoder +--text_encoder2 path/to/ckpts/text_encoder_2 +--sample_prompts /path/to/prompt_file.txt +--sample_every_n_epochs 1 --sample_every_n_steps 1000 --sample_at_first +``` + +`--vae`, `--vae_chunk_size`, `--vae_spatial_tile_sample_min_size`, `--text_encoder1`, `--text_encoder2` are the same as when generating images, so please refer to [here](/README.md#inference) for details. `--fp8_llm` can also be specified. + +`--sample_prompts` specifies the path to the prompt file used for sample image generation. Details are described below. + +`--sample_every_n_epochs` specifies how often to generate sample images in epochs, and `--sample_every_n_steps` specifies how often to generate sample images in steps. + +`--sample_at_first` is specified when generating sample images at the beginning of training. + +Sample images and videos are saved in the `sample` directory in the directory specified by `--output_dir`. They are saved as `.png` for still images and `.mp4` for videos. + +
+日本語 + +`--vae`、`--vae_chunk_size`、`--vae_spatial_tile_sample_min_size`、`--text_encoder1`、`--text_encoder2`は、画像生成時と同様ですので、詳細は[こちら](/README.ja.md#推論)を参照してください。`--fp8_llm`も指定可能です。 + +`--sample_prompts`は、サンプル画像生成に使用するプロンプトファイルのパスを指定します。詳細は後述します。 + +`--sample_every_n_epochs`は、何エポックごとにサンプル画像を生成するかを、`--sample_every_n_steps`は、何ステップごとにサンプル画像を生成するかを指定します。 + +`--sample_at_first`は、学習開始時にサンプル画像を生成する場合に指定します。 + +サンプル画像、動画は、`--output_dir`で指定したディレクトリ内の、`sample`ディレクトリに保存されます。静止画の場合は`.png`、動画の場合は`.mp4`で保存されます。 +
+ +### Prompt file / プロンプトファイル + +The prompt file is a text file that contains the prompts for generating sample images. The example is as follows. / プロンプトファイルは、サンプル画像生成のためのプロンプトを記述したテキストファイルです。例は以下の通りです。 + +``` +# prompt 1: for generating a cat video +A cat walks on the grass, realistic style. --w 640 --h 480 --f 25 --d 1 --s 20 + +# prompt 2: for generating a dog image +A dog runs on the beach, realistic style. --w 960 --h 544 --f 1 --d 2 --s 20 +``` + +A line starting with `#` is a comment. + +* `--w` specifies the width of the generated image or video. The default is 256. +* `--h` specifies the height. The default is 256. +* `--f` specifies the number of frames. The default is 1, which generates a still image. +* `--d` specifies the seed. The default is random. +* `--s` specifies the number of steps in generation. The default is 20. +* `--g` specifies the embedded guidance scale (not CFG scale). The default is 6.0 for HunyuanVideo, 10.0 for FramePack, which is the default value during inference of each architecture. Specify 1.0 for SkyReels V1 models. Ignore this option for Wan2.1 models. +* `--fs` specifies the discrete flow shift. The default is 14.5, which corresponds to the number of steps 20. In the HunyuanVideo paper, 7.0 is recommended for 50 steps, and 17.0 is recommended for less than 20 steps (e.g. 10). Ignore this option for FramePack models (it uses 10.0). + +If you train I2V models, you must add the following option. + +* `--i path/to/image.png`: the image path for image2video inference. + +If you train Wan2.1-Fun-Control models, you must add the following option. + +* `--cn path/to/control_video_or_dir_of_images`: the path to the video or directory containing multiple images for control. + +If you train the model with classifier free guidance (such as Wan2.1), you can use the additional options below. + +*`--n negative prompt...`: the negative prompt for the classifier free guidance. The default prompt for each model is used if omitted. +*`--l 6.0`: the classifier free guidance scale. Should be set to 6.0 for SkyReels V1 models. 5.0 is the default value for Wan2.1 (if omitted). + +
+日本語 + +`#` で始まる行はコメントです。 + +* `--w` 生成画像、動画の幅を指定します。省略時は256です。 +* `--h` 高さを指定します。省略時は256です。 +* `--f` フレーム数を指定します。省略時は1で、静止画を生成します。 +* `--d` シードを指定します。省略時はランダムです。 +* `--s` 生成におけるステップ数を指定します。省略時は20です。 +* `--g` embedded guidance scaleを指定します(CFG scaleではありません)。省略時はHunyuanVideoは6.0、FramePackは10.0で、各アーキテクチャの推論時のデフォルト値です。SkyReels V1モデルの場合は1.0を指定してください。Wan2.1モデルの場合はこのオプションは無視されます。 +* `--fs` discrete flow shiftを指定します。省略時は14.5で、ステップ数20の場合に対応した値です。HunyuanVideoの論文では、ステップ数50の場合は7.0、ステップ数20未満(10など)で17.0が推奨されています。FramePackモデルはこのオプションは無視され、10.0が使用されます。 + +I2Vモデルを学習する場合、以下のオプションを追加してください。 + +* `--i path/to/image.png`: image2video推論用の画像パス。 + +Wan2.1-Fun-Controlモデルを学習する場合、以下のオプションを追加してください。 + +* `--cn path/to/control_video_or_dir_of_images`: control用の動画または複数枚の画像を含むディレクトリのパス。 + +classifier free guidance(ネガティブプロンプト)を必要とするモデル(Wan2.1など)を学習する場合、以下の追加オプションを使用できます。 + +*`--n negative prompt...`: classifier free guidance用のネガティブプロンプト。省略時はモデルごとのデフォルトプロンプトが使用されます。 +*`--l 6.0`: classifier free guidance scale。SkyReels V1モデルの場合は6.0に設定してください。Wan2.1の場合はデフォルト値が5.0です(省略時)。 +
diff --git a/exp_code/1_benchmark/musubi-tuner/docs/wan.md b/exp_code/1_benchmark/musubi-tuner/docs/wan.md new file mode 100644 index 0000000000000000000000000000000000000000..d16ceeb1015882539505a5e7b1d50d32561a319d --- /dev/null +++ b/exp_code/1_benchmark/musubi-tuner/docs/wan.md @@ -0,0 +1,531 @@ +> 📝 Click on the language section to expand / 言語をクリックして展開 + +# Wan 2.1 + +## Overview / 概要 + +This is an unofficial training and inference script for [Wan2.1](https://github.com/Wan-Video/Wan2.1). The features are as follows. + +- fp8 support and memory reduction by block swap: Inference of a 720x1280x81frames videos with 24GB VRAM, training with 720x1280 images with 24GB VRAM +- Inference without installing Flash attention (using PyTorch's scaled dot product attention) +- Supports xformers and Sage attention + +This feature is experimental. + +
+日本語 +[Wan2.1](https://github.com/Wan-Video/Wan2.1) の非公式の学習および推論スクリプトです。 + +以下の特徴があります。 + +- fp8対応およびblock swapによる省メモリ化:720x1280x81framesの動画を24GB VRAMで推論可能、720x1280の画像での学習が24GB VRAMで可能 +- Flash attentionのインストールなしでの実行(PyTorchのscaled dot product attentionを使用) +- xformersおよびSage attention対応 + +この機能は実験的なものです。 +
+ +## Download the model / モデルのダウンロード + +Download the T5 `models_t5_umt5-xxl-enc-bf16.pth` and CLIP `models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth` from the following page: https://huggingface.co/Wan-AI/Wan2.1-I2V-14B-720P/tree/main + +Download the VAE from the above page `Wan2.1_VAE.pth` or download `split_files/vae/wan_2.1_vae.safetensors` from the following page: https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged/tree/main/split_files/vae + +Download the DiT weights from the following page: https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged/tree/main/split_files/diffusion_models + +Wan2.1 Fun Control model weights can be downloaded from [here](https://huggingface.co/alibaba-pai/Wan2.1-Fun-14B-Control). Navigate to each weight page and download. The Fun Control model seems to support not only T2V but also I2V tasks. + +Please select the appropriate weights according to T2V, I2V, resolution, model size, etc. + +`fp16` and `bf16` models can be used, and `fp8_e4m3fn` models can be used if `--fp8` (or `--fp8_base`) is specified without specifying `--fp8_scaled`. **Please note that `fp8_scaled` models are not supported even with `--fp8_scaled`.** + +(Thanks to Comfy-Org for providing the repackaged weights.) + +### Model support matrix / モデルサポートマトリックス + +* columns: training dtype (行:学習時のデータ型) +* rows: model dtype (列:モデルのデータ型) + +| model \ training |bf16|fp16|--fp8_base|--fp8base & --fp8_scaled| +|--|--|--|--|--| +|bf16|✓|--|✓|✓| +|fp16|--|✓|✓|✓| +|fp8_e4m3fn|--|--|✓|--| +|fp8_scaled|--|--|--|--| + +
+日本語 +T5 `models_t5_umt5-xxl-enc-bf16.pth` およびCLIP `models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth` を、次のページからダウンロードしてください:https://huggingface.co/Wan-AI/Wan2.1-I2V-14B-720P/tree/main + +VAEは上のページから `Wan2.1_VAE.pth` をダウンロードするか、次のページから `split_files/vae/wan_2.1_vae.safetensors` をダウンロードしてください:https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged/tree/main/split_files/vae + +DiTの重みを次のページからダウンロードしてください:https://huggingface.co/Comfy-Org/Wan_2.1_ComfyUI_repackaged/tree/main/split_files/diffusion_models + +Wan2.1 Fun Controlモデルの重みは、[こちら](https://huggingface.co/alibaba-pai/Wan2.1-Fun-14B-Control)から、それぞれの重みのページに遷移し、ダウンロードしてください。Fun ControlモデルはT2VだけでなくI2Vタスクにも対応しているようです。 + +T2VやI2V、解像度、モデルサイズなどにより適切な重みを選択してください。 + +`fp16` および `bf16` モデルを使用できます。また、`--fp8` (または`--fp8_base`)を指定し`--fp8_scaled`を指定をしないときには `fp8_e4m3fn` モデルを使用できます。**`fp8_scaled` モデルはいずれの場合もサポートされていませんのでご注意ください。** + +(repackaged版の重みを提供してくださっているComfy-Orgに感謝いたします。) +
+ +## Pre-caching / 事前キャッシュ + +### Latent Pre-caching + +Latent pre-caching is almost the same as in HunyuanVideo. Create the cache using the following command: + +```bash +python src/musubi_tuner/wan_cache_latents.py --dataset_config path/to/toml --vae path/to/wan_2.1_vae.safetensors +``` + +If you train I2V models, add `--clip path/to/models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth` to specify the CLIP model. If not specified, the training will raise an error. + +If you're running low on VRAM, specify `--vae_cache_cpu` to use the CPU for the VAE internal cache, which will reduce VRAM usage somewhat. + +The control video settings are required for training the Fun-Control model. Please refer to [Dataset Settings](/src/musubi_tuner/dataset/dataset_config.md#sample-for-video-dataset-with-control-images) for details. + +
+日本語 +latentの事前キャッシングはHunyuanVideoとほぼ同じです。上のコマンド例を使用してキャッシュを作成してください。 + +I2Vモデルを学習する場合は、`--clip path/to/models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth` を追加してCLIPモデルを指定してください。指定しないと学習時にエラーが発生します。 + +VRAMが不足している場合は、`--vae_cache_cpu` を指定するとVAEの内部キャッシュにCPUを使うことで、使用VRAMを多少削減できます。 + +Fun-Controlモデルを学習する場合は、制御用動画の設定が必要です。[データセット設定](/src/musubi_tuner/dataset/dataset_config.md#sample-for-video-dataset-with-control-images)を参照してください。 +
+ +### Text Encoder Output Pre-caching + +Text encoder output pre-caching is also almost the same as in HunyuanVideo. Create the cache using the following command: + +```bash +python src/musubi_tuner/wan_cache_text_encoder_outputs.py --dataset_config path/to/toml --t5 path/to/models_t5_umt5-xxl-enc-bf16.pth --batch_size 16 +``` + +Adjust `--batch_size` according to your available VRAM. + +For systems with limited VRAM (less than ~16GB), use `--fp8_t5` to run the T5 in fp8 mode. + +
+日本語 +テキストエンコーダ出力の事前キャッシングもHunyuanVideoとほぼ同じです。上のコマンド例を使用してキャッシュを作成してください。 + +使用可能なVRAMに合わせて `--batch_size` を調整してください。 + +VRAMが限られているシステム(約16GB未満)の場合は、T5をfp8モードで実行するために `--fp8_t5` を使用してください。 +
+ +## Training / 学習 + +### Training + +Start training using the following command (input as a single line): + +```bash +accelerate launch --num_cpu_threads_per_process 1 --mixed_precision bf16 src/musubi_tuner/wan_train_network.py + --task t2v-1.3B + --dit path/to/wan2.1_xxx_bf16.safetensors + --dataset_config path/to/toml --sdpa --mixed_precision bf16 --fp8_base + --optimizer_type adamw8bit --learning_rate 2e-4 --gradient_checkpointing + --max_data_loader_n_workers 2 --persistent_data_loader_workers + --network_module networks.lora_wan --network_dim 32 + --timestep_sampling shift --discrete_flow_shift 3.0 + --max_train_epochs 16 --save_every_n_epochs 1 --seed 42 + --output_dir path/to/output_dir --output_name name-of-lora +``` +The above is an example. The appropriate values for `timestep_sampling` and `discrete_flow_shift` need to be determined by experimentation. + +For additional options, use `python src/musubi_tuner/wan_train_network.py --help` (note that many options are unverified). + +`--task` is one of `t2v-1.3B`, `t2v-14B`, `i2v-14B`, `t2i-14B` (for Wan2.1 official models), `t2v-1.3B-FC`, `t2v-14B-FC`, and `i2v-14B-FC` (for Wan2.1 Fun Control model). Specify the DiT weights for the task with `--dit`. + +Don't forget to specify `--network_module networks.lora_wan`. + +Other options are mostly the same as `hv_train_network.py`. + +Use `convert_lora.py` for converting the LoRA weights after training, as in HunyuanVideo. + +
+日本語 +`timestep_sampling`や`discrete_flow_shift`は一例です。どのような値が適切かは実験が必要です。 + +その他のオプションについては `python src/musubi_tuner/wan_train_network.py --help` を使用してください(多くのオプションは未検証です)。 + +`--task` には `t2v-1.3B`, `t2v-14B`, `i2v-14B`, `t2i-14B` (これらはWan2.1公式モデル)、`t2v-1.3B-FC`, `t2v-14B-FC`, `i2v-14B-FC`(Wan2.1-Fun Controlモデル)を指定します。`--dit`に、taskに応じたDiTの重みを指定してください。 + + `--network_module` に `networks.lora_wan` を指定することを忘れないでください。 + +その他のオプションは、ほぼ`hv_train_network.py`と同様です。 + +学習後のLoRAの重みの変換は、HunyuanVideoと同様に`convert_lora.py`を使用してください。 +
+ +### Command line options for training with sampling / サンプル画像生成に関連する学習時のコマンドラインオプション + +Example of command line options for training with sampling / 記述例: + +```bash +--vae path/to/wan_2.1_vae.safetensors +--t5 path/to/models_t5_umt5-xxl-enc-bf16.pth +--sample_prompts /path/to/prompt_file.txt +--sample_every_n_epochs 1 --sample_every_n_steps 1000 -- sample_at_first +``` +Each option is the same as when generating images or as HunyuanVideo. Please refer to [here](/docs/sampling_during_training.md) for details. + +If you train I2V models, add `--clip path/to/models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth` to specify the CLIP model. + +You can specify the initial image, the negative prompt and the control video (for Wan2.1-Fun-Control) in the prompt file. Please refer to [here](/docs/sampling_during_training.md#prompt-file--プロンプトファイル). + +
+日本語 +各オプションは推論時、およびHunyuanVideoの場合と同様です。[こちら](/docs/sampling_during_training.md)を参照してください。 + +I2Vモデルを学習する場合は、`--clip path/to/models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth` を追加してCLIPモデルを指定してください。 + +プロンプトファイルで、初期画像やネガティブプロンプト、制御動画(Wan2.1-Fun-Control用)等を指定できます。[こちら](/docs/sampling_during_training.md#prompt-file--プロンプトファイル)を参照してください。 +
+ + +## Inference / 推論 + +### Inference Options Comparison / 推論オプション比較 + +#### Speed Comparison (Faster → Slower) / 速度比較(速い→遅い) +*Note: Results may vary depending on GPU type* + +fp8_fast > bf16/fp16 (no block swap) > fp8 > fp8_scaled > bf16/fp16 (block swap) + +#### Quality Comparison (Higher → Lower) / 品質比較(高→低) + +bf16/fp16 > fp8_scaled > fp8 >> fp8_fast + +### T2V Inference / T2V推論 + +The following is an example of T2V inference (input as a single line): + +```bash +python src/musubi_tuner/wan_generate_video.py --fp8 --task t2v-1.3B --video_size 832 480 --video_length 81 --infer_steps 20 +--prompt "prompt for the video" --save_path path/to/save.mp4 --output_type both +--dit path/to/wan2.1_t2v_1.3B_bf16_etc.safetensors --vae path/to/wan_2.1_vae.safetensors +--t5 path/to/models_t5_umt5-xxl-enc-bf16.pth +--attn_mode torch +``` + +`--task` is one of `t2v-1.3B`, `t2v-14B`, `i2v-14B`, `t2i-14B` (these are Wan2.1 official models), `t2v-1.3B-FC`, `t2v-14B-FC` and `i2v-14B-FC` (for Wan2.1-Fun Control model). + +`--attn_mode` is `torch`, `sdpa` (same as `torch`), `xformers`, `sageattn`,`flash2`, `flash` (same as `flash2`) or `flash3`. `torch` is the default. Other options require the corresponding library to be installed. `flash3` (Flash attention 3) is not tested. + +Specifying `--fp8` runs DiT in fp8 mode. fp8 can significantly reduce memory consumption but may impact output quality. + +`--fp8_scaled` can be specified in addition to `--fp8` to run the model in fp8 weights optimization. This increases memory consumption and speed slightly but improves output quality. See [here](advanced_config.md#fp8-weight-optimization-for-models--モデルの重みのfp8への最適化) for details. + +`--fp8_fast` option is also available for faster inference on RTX 40x0 GPUs. This option requires `--fp8_scaled` option. **This option seems to degrade the output quality.** + +`--fp8_t5` can be used to specify the T5 model in fp8 format. This option reduces memory usage for the T5 model. + +`--negative_prompt` can be used to specify a negative prompt. If omitted, the default negative prompt is used. + +`--flow_shift` can be used to specify the flow shift (default 3.0 for I2V with 480p, 5.0 for others). + +`--guidance_scale` can be used to specify the guidance scale for classifier free guidance (default 5.0). + +`--blocks_to_swap` is the number of blocks to swap during inference. The default value is None (no block swap). The maximum value is 39 for 14B model and 29 for 1.3B model. + +`--vae_cache_cpu` enables VAE cache in main memory. This reduces VRAM usage slightly but processing is slower. + +`--compile` enables torch.compile. See [here](/README.md#inference) for details. + +`--trim_tail_frames` can be used to trim the tail frames when saving. The default is 0. + +`--cfg_skip_mode` specifies the mode for skipping CFG in different steps. The default is `none` (all steps).`--cfg_apply_ratio` specifies the ratio of steps where CFG is applied. See below for details. + +`--include_patterns` and `--exclude_patterns` can be used to specify which LoRA modules to apply or exclude during training. If not specified, all modules are applied by default. These options accept regular expressions. + +`--include_patterns` specifies the modules to be applied, and `--exclude_patterns` specifies the modules to be excluded. The regular expression is matched against the LoRA key name, and include takes precedence. + +The key name to be searched is in sd-scripts format (`lora_unet_`). For example, `lora_unet_blocks_9_cross_attn_k`. + +For example, if you specify `--exclude_patterns "blocks_[23]\d_"`, it will exclude modules containing `blocks_20` to `blocks_39`. If you specify `--include_patterns "cross_attn" --exclude_patterns "blocks_(0|1|2|3|4)_"`, it will apply LoRA to modules containing `cross_attn` and not containing `blocks_0` to `blocks_4`. + +If you specify multiple LoRA weights, please specify them with multiple arguments. For example: `--include_patterns "cross_attn" ".*" --exclude_patterns "dummy_do_not_exclude" "blocks_(0|1|2|3|4)"`. `".*"` is a regex that matches everything. `dummy_do_not_exclude` is a dummy regex that does not match anything. + +`--cpu_noise` generates initial noise on the CPU. This may result in the same results as ComfyUI with the same seed (depending on other settings). + +If you are using the Fun Control model, specify the control video with `--control_path`. You can specify a video file or a folder containing multiple image files. The number of frames in the video file (or the number of images) should be at least the number specified in `--video_length` (plus 1 frame if you specify `--end_image_path`). + +Please try to match the aspect ratio of the control video with the aspect ratio specified in `--video_size` (there may be some deviation from the initial image of I2V due to the use of bucketing processing). + +Other options are same as `hv_generate_video.py` (some options are not supported, please check the help). + +
+日本語 +`--task` には `t2v-1.3B`, `t2v-14B`, `i2v-14B`, `t2i-14B` (これらはWan2.1公式モデル)、`t2v-1.3B-FC`, `t2v-14B-FC`, `i2v-14B-FC`(Wan2.1-Fun Controlモデル)を指定します。 + +`--attn_mode` には `torch`, `sdpa`(`torch`と同じ)、`xformers`, `sageattn`, `flash2`, `flash`(`flash2`と同じ), `flash3` のいずれかを指定します。デフォルトは `torch` です。その他のオプションを使用する場合は、対応するライブラリをインストールする必要があります。`flash3`(Flash attention 3)は未テストです。 + +`--fp8` を指定するとDiTモデルをfp8形式で実行します。fp8はメモリ消費を大幅に削減できますが、出力品質に影響を与える可能性があります。 + +`--fp8_scaled` を `--fp8` と併用すると、fp8への重み量子化を行います。メモリ消費と速度はわずかに悪化しますが、出力品質が向上します。詳しくは[こちら](advanced_config.md#fp8-weight-optimization-for-models--モデルの重みのfp8への最適化)を参照してください。 + +`--fp8_fast` オプションはRTX 40x0 GPUでの高速推論に使用されるオプションです。このオプションは `--fp8_scaled` オプションが必要です。**出力品質が劣化するようです。** + +`--fp8_t5` を指定するとT5モデルをfp8形式で実行します。T5モデル呼び出し時のメモリ使用量を削減します。 + +`--negative_prompt` でネガティブプロンプトを指定できます。省略した場合はデフォルトのネガティブプロンプトが使用されます。 + +`--flow_shift` でflow shiftを指定できます(480pのI2Vの場合はデフォルト3.0、それ以外は5.0)。 + +`--guidance_scale` でclassifier free guianceのガイダンススケールを指定できます(デフォルト5.0)。 + +`--blocks_to_swap` は推論時のblock swapの数です。デフォルト値はNone(block swapなし)です。最大値は14Bモデルの場合39、1.3Bモデルの場合29です。 + +`--vae_cache_cpu` を有効にすると、VAEのキャッシュをメインメモリに保持します。VRAM使用量が多少減りますが、処理は遅くなります。 + +`--compile`でtorch.compileを有効にします。詳細については[こちら](/README.md#inference)を参照してください。 + +`--trim_tail_frames` で保存時に末尾のフレームをトリミングできます。デフォルトは0です。 + +`--cfg_skip_mode` は異なるステップでCFGをスキップするモードを指定します。デフォルトは `none`(全ステップ)。`--cfg_apply_ratio` はCFGが適用されるステップの割合を指定します。詳細は後述します。 + +LoRAのどのモジュールを適用するかを、`--include_patterns`と`--exclude_patterns`で指定できます(未指定時・デフォルトは全モジュール適用されます +)。これらのオプションには、正規表現を指定します。`--include_patterns`は適用するモジュール、`--exclude_patterns`は適用しないモジュールを指定します。正規表現がLoRAのキー名に含まれるかどうかで判断され、includeが優先されます。 + +検索対象となるキー名は sd-scripts 形式(`lora_unet_<モジュール名のドットを_に置換したもの>`)です。例:`lora_unet_blocks_9_cross_attn_k` + +たとえば `--exclude_patterns "blocks_[23]\d_"`のみを指定すると、`blocks_20`から`blocks_39`を含むモジュールが除外されます。`--include_patterns "cross_attn" --exclude_patterns "blocks_(0|1|2|3|4)_"`のようにincludeとexcludeを指定すると、`cross_attn`を含むモジュールで、かつ`blocks_0`から`blocks_4`を含まないモジュールにLoRAが適用されます。 + +複数のLoRAの重みを指定する場合は、複数個の引数で指定してください。例:`--include_patterns "cross_attn" ".*" --exclude_patterns "dummy_do_not_exclude" "blocks_(0|1|2|3|4)"` `".*"`は全てにマッチする正規表現です。`dummy_do_not_exclude`は何にもマッチしないダミーの正規表現です。 + +`--cpu_noise`を指定すると初期ノイズをCPUで生成します。これにより同一seed時の結果がComfyUIと同じになる可能性があります(他の設定にもよります)。 + +Fun Controlモデルを使用する場合は、`--control_path`で制御用の映像を指定します。動画ファイル、または複数枚の画像ファイルを含んだフォルダを指定できます。動画ファイルのフレーム数(または画像の枚数)は、`--video_length`で指定したフレーム数以上にしてください(後述の`--end_image_path`を指定した場合は、さらに+1フレーム)。 + +制御用の映像のアスペクト比は、`--video_size`で指定したアスペクト比とできるかぎり合わせてください(bucketingの処理を流用しているためI2Vの初期画像とズレる場合があります)。 + +その他のオプションは `hv_generate_video.py` と同じです(一部のオプションはサポートされていないため、ヘルプを確認してください)。 +
+ +#### CFG Skip Mode / CFGスキップモード + + These options allow you to balance generation speed against prompt accuracy. More skipped steps results in faster generation with potential quality degradation. + +Setting `--cfg_apply_ratio` to 0.5 speeds up the denoising loop by up to 25%. + +`--cfg_skip_mode` specified one of the following modes: + +- `early`: Skips CFG in early steps for faster generation, applying guidance mainly in later refinement steps +- `late`: Skips CFG in later steps, applying guidance during initial structure formation +- `middle`: Skips CFG in middle steps, applying guidance in both early and later steps +- `early_late`: Skips CFG in both early and late steps, applying only in middle steps +- `alternate`: Applies CFG in alternate steps based on the specified ratio +- `none`: Applies CFG at all steps (default) + +`--cfg_apply_ratio` specifies a value from 0.0 to 1.0 controlling the proportion of steps where CFG is applied. For example, setting 0.5 means CFG will be applied in only 50% of the steps. + +If num_steps is 10, the following table shows the steps where CFG is applied based on the `--cfg_skip_mode` option (A means CFG is applied, S means it is skipped, `--cfg_apply_ratio` is 0.6): + +| skip mode | CFG apply pattern | +|---|---| +| early | SSSSAAAAAA | +| late | AAAAAASSSS | +| middle | AAASSSSAAA | +| early_late | SSAAAAAASS | +| alternate | SASASAASAS | + +The appropriate settings are unknown, but you may want to try `late` or `early_late` mode with a ratio of around 0.3 to 0.5. +
+日本語 +これらのオプションは、生成速度とプロンプトの精度のバランスを取ることができます。スキップされるステップが多いほど、生成速度が速くなりますが、品質が低下する可能性があります。 + +ratioに0.5を指定することで、デノイジングのループが最大25%程度、高速化されます。 + +`--cfg_skip_mode` は次のモードのいずれかを指定します: + +- `early`:初期のステップでCFGをスキップして、主に終盤の精細化のステップで適用します +- `late`:終盤のステップでCFGをスキップし、初期の構造が決まる段階で適用します +- `middle`:中間のステップでCFGをスキップし、初期と終盤のステップの両方で適用します +- `early_late`:初期と終盤のステップの両方でCFGをスキップし、中間のステップのみ適用します +- `alternate`:指定された割合に基づいてCFGを適用します + +`--cfg_apply_ratio` は、CFGが適用されるステップの割合を0.0から1.0の値で指定します。たとえば、0.5に設定すると、CFGはステップの50%のみで適用されます。 + +具体的なパターンは上のテーブルを参照してください。 + +適切な設定は不明ですが、モードは`late`または`early_late`、ratioは0.3~0.5程度から試してみると良いかもしれません。 +
+ +#### Skip Layer Guidance + +Skip Layer Guidance is a feature that uses the output of a model with some blocks skipped as the unconditional output of classifier free guidance. It was originally proposed in [SD 3.5](https://github.com/comfyanonymous/ComfyUI/pull/5404) and first applied in Wan2GP in [this PR](https://github.com/deepbeepmeep/Wan2GP/pull/61). It may improve the quality of generated videos. + +The implementation of SD 3.5 is [here](https://github.com/Stability-AI/sd3.5/blob/main/sd3_impls.py), and the implementation of Wan2GP (the PR mentioned above) has some different specifications. This inference script allows you to choose between the two methods. + +*The SD3.5 method applies slg output in addition to cond and uncond (slows down the speed). The Wan2GP method uses only cond and slg output.* + +The following arguments are available: + +- `--slg_mode`: Specifies the SLG mode. `original` for SD 3.5 method, `uncond` for Wan2GP method. Default is None (no SLG). +- `--slg_layers`: Specifies the indices of the blocks (layers) to skip in SLG, separated by commas. Example: `--slg_layers 4,5,6`. Default is empty (no skip). If this option is not specified, `--slg_mode` is ignored. +- `--slg_scale`: Specifies the scale of SLG when `original`. Default is 3.0. +- `--slg_start`: Specifies the start step of SLG application in inference steps from 0.0 to 1.0. Default is 0.0 (applied from the beginning). +- `--slg_end`: Specifies the end step of SLG application in inference steps from 0.0 to 1.0. Default is 0.3 (applied up to 30% from the beginning). + +Appropriate settings are unknown, but you may want to try `original` mode with a scale of around 3.0 and a start ratio of 0.0 and an end ratio of 0.5, with layers 4, 5, and 6 skipped. + +
+日本語 +Skip Layer Guidanceは、一部のblockをスキップしたモデル出力をclassifier free guidanceのunconditional出力に使用する機能です。元々は[SD 3.5](https://github.com/comfyanonymous/ComfyUI/pull/5404)で提案されたもので、Wan2.1には[Wan2GPのこちらのPR](https://github.com/deepbeepmeep/Wan2GP/pull/61)で初めて適用されました。生成動画の品質が向上する可能性があります。 + +SD 3.5の実装は[こちら](https://github.com/Stability-AI/sd3.5/blob/main/sd3_impls.py)で、Wan2GPの実装(前述のPR)は一部仕様が異なります。この推論スクリプトでは両者の方式を選択できるようになっています。 + +※SD3.5方式はcondとuncondに加えてslg outputを適用します(速度が低下します)。Wan2GP方式はcondとslg outputのみを使用します。 + +以下の引数があります。 + +- `--slg_mode`:SLGのモードを指定します。`original`でSD 3.5の方式、`uncond`でWan2GPの方式です。デフォルトはNoneで、SLGを使用しません。 +- `--slg_layers`:SLGでスキップするblock (layer)のインデクスをカンマ区切りで指定します。例:`--slg_layers 4,5,6`。デフォルトは空(スキップしない)です。このオプションを指定しないと`--slg_mode`は無視されます。 +- `--slg_scale`:`original`のときのSLGのスケールを指定します。デフォルトは3.0です。 +- `--slg_start`:推論ステップのSLG適用開始ステップを0.0から1.0の割合で指定します。デフォルトは0.0です(最初から適用)。 +- `--slg_end`:推論ステップのSLG適用終了ステップを0.0から1.0の割合で指定します。デフォルトは0.3です(最初から30%まで適用)。 + +適切な設定は不明ですが、`original`モードでスケールを3.0程度、開始割合を0.0、終了割合を0.5程度に設定し、4, 5, 6のlayerをスキップする設定から始めると良いかもしれません。 +
+ +### I2V Inference / I2V推論 + +The following is an example of I2V inference (input as a single line): + +```bash +python src/musubi_tuner/wan_generate_video.py --fp8 --task i2v-14B --video_size 832 480 --video_length 81 --infer_steps 20 +--prompt "prompt for the video" --save_path path/to/save.mp4 --output_type both +--dit path/to/wan2.1_i2v_480p_14B_bf16_etc.safetensors --vae path/to/wan_2.1_vae.safetensors +--t5 path/to/models_t5_umt5-xxl-enc-bf16.pth --clip path/to/models_clip_open-clip-xlm-roberta-large-vit-huge-14.pth +--attn_mode torch --image_path path/to/image.jpg +``` + +Add `--clip` to specify the CLIP model. `--image_path` is the path to the image to be used as the initial frame. + +`--end_image_path` can be used to specify the end image. This option is experimental. When this option is specified, the saved video will be slightly longer than the specified number of frames and will have noise, so it is recommended to specify `--trim_tail_frames 3` to trim the tail frames. + +You can also use the Fun Control model for I2V inference. Specify the control video with `--control_path`. + +Other options are same as T2V inference. + +
+日本語 +`--clip` を追加してCLIPモデルを指定します。`--image_path` は初期フレームとして使用する画像のパスです。 + +`--end_image_path` で終了画像を指定できます。このオプションは実験的なものです。このオプションを指定すると、保存される動画が指定フレーム数よりもやや多くなり、かつノイズが乗るため、`--trim_tail_frames 3` などを指定して末尾のフレームをトリミングすることをお勧めします。 + +I2V推論でもFun Controlモデルが使用できます。`--control_path` で制御用の映像を指定します。 + +その他のオプションはT2V推論と同じです。 +
+ +### New Batch and Interactive Modes / 新しいバッチモードとインタラクティブモード + +In addition to single video generation, Wan 2.1 now supports batch generation from file and interactive prompt input: + +#### Batch Mode from File / ファイルからのバッチモード + +Generate multiple videos from prompts stored in a text file: + +```bash +python src/musubi_tuner/wan_generate_video.py --from_file prompts.txt --task t2v-14B +--dit path/to/model.safetensors --vae path/to/vae.safetensors +--t5 path/to/t5_model.pth --save_path output_directory +``` + +The prompts file format: +- One prompt per line +- Empty lines and lines starting with # are ignored (comments) +- Each line can include prompt-specific parameters using command-line style format: + +``` +A beautiful sunset over mountains --w 832 --h 480 --f 81 --d 42 --s 20 +A busy city street at night --w 480 --h 832 --g 7.5 --n low quality, blurry +``` + +Supported inline parameters (if ommitted, default values from the command line are used): +- `--w`: Width +- `--h`: Height +- `--f`: Frame count +- `--d`: Seed +- `--s`: Inference steps +- `--g` or `--l`: Guidance scale +- `--fs`: Flow shift +- `--i`: Image path (for I2V) +- `--cn`: Control path (for Fun Control) +- `--n`: Negative prompt + +In batch mode, models are loaded once and reused for all prompts, significantly improving overall generation time compared to multiple single runs. + +#### Interactive Mode / インタラクティブモード + +Interactive command-line interface for entering prompts: + +```bash +python src/musubi_tuner/wan_generate_video.py --interactive --task t2v-14B +--dit path/to/model.safetensors --vae path/to/vae.safetensors +--t5 path/to/t5_model.pth --save_path output_directory +``` + +In interactive mode: +- Enter prompts directly at the command line +- Use the same inline parameter format as batch mode +- Use Ctrl+D (or Ctrl+Z on Windows) to exit +- Models remain loaded between generations for efficiency + +
+日本語 +単一動画の生成に加えて、Wan 2.1は現在、ファイルからのバッチ生成とインタラクティブなプロンプト入力をサポートしています。 + +#### ファイルからのバッチモード + +テキストファイルに保存されたプロンプトから複数の動画を生成します: + +```bash +python src/musubi_tuner/wan_generate_video.py --from_file prompts.txt --task t2v-14B +--dit path/to/model.safetensors --vae path/to/vae.safetensors +--t5 path/to/t5_model.pth --save_path output_directory +``` + +プロンプトファイルの形式: +- 1行に1つのプロンプト +- 空行や#で始まる行は無視されます(コメント) +- 各行にはコマンドライン形式でプロンプト固有のパラメータを含めることができます: + +サポートされているインラインパラメータ(省略した場合、コマンドラインのデフォルト値が使用されます) +- `--w`: 幅 +- `--h`: 高さ +- `--f`: フレーム数 +- `--d`: シード +- `--s`: 推論ステップ +- `--g` または `--l`: ガイダンススケール +- `--fs`: フローシフト +- `--i`: 画像パス(I2V用) +- `--cn`: コントロールパス(Fun Control用) +- `--n`: ネガティブプロンプト + +バッチモードでは、モデルは一度だけロードされ、すべてのプロンプトで再利用されるため、複数回の単一実行と比較して全体的な生成時間が大幅に改善されます。 + +#### インタラクティブモード + +プロンプトを入力するためのインタラクティブなコマンドラインインターフェース: + +```bash +python src/musubi_tuner/wan_generate_video.py --interactive --task t2v-14B +--dit path/to/model.safetensors --vae path/to/vae.safetensors +--t5 path/to/t5_model.pth --save_path output_directory +``` + +インタラクティブモードでは: +- コマンドラインで直接プロンプトを入力 +- バッチモードと同じインラインパラメータ形式を使用 +- 終了するには Ctrl+D (Windowsでは Ctrl+Z) を使用 +- 効率のため、モデルは生成間で読み込まれたままになります +
+ diff --git a/exp_code/1_benchmark/musubi-tuner/docs/wan_1f.md b/exp_code/1_benchmark/musubi-tuner/docs/wan_1f.md new file mode 100644 index 0000000000000000000000000000000000000000..ea9b0369c68f7f0140efdb38689acdb0e5f65733 --- /dev/null +++ b/exp_code/1_benchmark/musubi-tuner/docs/wan_1f.md @@ -0,0 +1,175 @@ +# Wan2.1 One Frame (Single Frame) Inference and Training / Wan2.1 1フレーム推論と学習 + +## Overview / 概要 + +This document describes the application of "One Frame Inference" found in the FramePack model to Wan2.1. + +1. **Basic One Frame Inference**: + * Input the starting image and prompt, limiting the number of frames to generate to 1 frame. Use the Wan2.1 I2V model. + * Intentionally set a large value for the RoPE timestamp assigned to the generated single frame. This aims to obtain a single static image that has changed temporally and semantically according to the prompt from the starting image. + * However, unlike FramePack, using Wan2.1's model as is for inference results in images that are almost identical to the starting image, with noise mixed in. This seems to be due to the characteristics of Wan2.1. + * By additionally training a LoRA, it is possible to reflect changes according to the prompt in the generated image while also reducing noise. + +2. **Intermediate Frame One Frame Inference**: + * Similar to the kisekaeichi method, use the FLF2V (First and Last Frame to Video) method to generate intermediate frames. Use the FLF2V model. + * Set the RoPE timestamp of the generated image to an intermediate value between the timestamps of the starting image and the ending image. + * (This is a theoretical proposal, implemented but not yet tested.) + +
+日本語 + +このドキュメントでは、FramePackモデルで見いだされた「1フレーム推論」の、Wan2.1への適用について説明します。 + +1. **基本的な1フレーム推論**: + * 開始画像とプロンプトを入力とし、生成するフレーム数を1フレームに限定します。Wan2.1の I2V モデルを使用します。 + * この際、生成する1フレームに割り当てるRoPEのタイムスタンプを意図的に大きな値に設定します。これは開始画像からプロンプトに従って時間的・意味的に変化した単一の静止画を得ることを目的としています。 + * しかしながらFramePackと異なり、Wan2.1のモデルをそのまま利用した推論では、このように設定しても生成される画像は開始画像とほぼ同じものになり、またノイズも混ざります。これはWan2.1の特性によるもの思われます。 + * 追加でLoRAを学習することで、プロンプトに従った変化を生成画像に反映させることが可能で、かつノイズも抑えられることがわかりました。 + +2. **中間フレームの1フレーム推論**: + * kisekaeichi方式と似た、FLF2V (First and Last Frame to Video) 方式を利用し、中間のフレームを生成します。FLF2Vモデルを使用します。 + * 生成する画像のRoPEタイムスタンプを、開始画像のタイムスタンプと終端画像のタイムスタンプの中間的な値に設定します。 + +
+ +## One (single) Frame Inference / 1フレーム推論 + +**This feature is highly experimental** and is not officially supported. It is an independent implementation, not an official feature of Wan2.1. + +To perform one-frame inference, specify the `--one_frame_inference` option with `target_index` and `control_index`. In Wan2.1, it is necessary to combine this with LoRA, so please set it up similarly to LoRA training settings. The model used should also be the same. + +An example description is as follows: + +```bash +--output_type latent_images --image_path start_image.png --control_image_path start_image.png \ +--one_frame_inference control_index=0,target_index=1 +``` + +To perform one-frame inference for intermediate frames, specify multiple indices for `control_index` separated by semicolons. The description is as follows: + +```bash +--output_type latent_images --image_path start_image.png --control_image_path start_image.png end_image.png \ +--one_frame_inference control_index=0;2,target_index=1 +``` + +When specifying `--output_type` as `latent_images`, both latent and image will be saved. + +The `--image_path` is used to obtain CLIP features for one-frame inference. Usually, the starting image should be specified. The `--end_image_path` is used to obtain CLIP features for the ending image. Usually, the ending image should be specified. + +The `--control_image_path` is a newly added argument to specify the control image. Usually, the starting image (and both starting and ending images for intermediate frame inference) should be specified. + +The options for `--one_frame_inference` are specified as comma-separated values. Here, the index represents the RoPE timestamp. + +- `target_index=`: Specifies the index of the generated image. +- `control_index=`: Specifies the index of the control image. Please specify the same number of indices as the number of control images specified in `--control_image_path`. + +The optimal values for `target_index` and `control_index` are unknown. Please specify `target_index` as 1 or greater. For one-frame inference, specify `control_index=0`. For intermediate frame one-frame inference, specify `control_index=0;2`, where 0 and a value greater than `target_index` are specified. + +
+日本語 + +**この機能は非常に実験的であり**、公式にはサポートされていません。Wan2.1公式の機能ではなく、独自の実装です。 + +1フレーム推論を行うには`--one_frame_inference`オプションに `target_index` と `control_index` を指定してください。Wan2.1ではLoRAとの組み合わせが必要になりますので、LoRAの学習設定と同様の設定を行ってください。使用するモデルについても同様です。 + +記述例は以下の通りです。 + +```bash +--output_type latent_images --image_path start_image.png --control_image_path start_image.png \ +--one_frame_inference control_index=0,target_index=1 +``` + +中間フレームの1フレーム推論を行うには、`control_index`にセミコロン区切りで複数のインデックスを指定します。以下のように記述します。 + +```bash +--output_type latent_images --image_path start_image.png --end_image_path end_image.png \ +--control_image_path start_image.png end_image.png --one_frame_inference control_index=0;2,target_index=1 +``` + +`--output_type`に`latent_images`を指定するとlatentと画像の両方が保存されます。 + +`--image_path`は、1フレーム推論ではCLIPの特徴量を取得するために用いられます。通常は開始画像を指定してください。`--end_image_path`は、終了画像のCLIP特徴量を取得するために用いられます。通常は終了画像を指定してください。 + +`--control_image_path`は新しく追加された引数で、制御用画像を指定するために用いられます。通常は開始画像(中間フレーム推論の場合は開始画像と終了画像の両方)を指定してください。 + +`--one_frame_inference`のオプションには、カンマ区切りで以下のオプションを指定します。ここでindexはRoPEのタイムスタンプを表します。 + +- `target_index=<整数>`: 生成する画像のindexを指定します。 +- `control_index=<整数またはセミコロン区切りの整数>`: 制御用画像のindexを指定します。`--control_image_path`で指定した制御用画像の数と同じ数のインデックスを指定してください。 + +`target_index`、`control_index`の最適値は不明です。`target_index`は1以上を指定してください。`control_index`は、1フレーム推論では`control_index=0`を指定します。中間フレームの1フレーム推論では、`control_index=0;2`のように、0と`target_index`より大きい値を指定します。 + +
+ +## One Frame (Single Frame) Training / 1フレーム学習 + +**This feature is experimental.** It performs training in a manner similar to one-frame inference. + +This currently reuses the dataset settings of the FramePack model. Please refer to the [FramePack documentation](./framepack_1f.md#one-frame-single-frame-training--1フレーム学習) and the [FramePack dataset settings](../src/musubi_tuner/dataset/dataset_config.md#framepack-one-frame-training). + +`fp_1f_clean_indices` corresponds to the `control_index` described below. + +However, `fp_1f_no_post` is ignored in Wan2.1, and alpha masks are not yet supported. + +When performing one-frame training, please create the cache by specifying `--one_frame` in `wan_cache_latents.py`. Also, specify `--one_frame` in `wan_train_network.py` to change the inference method for sample image generation. + +In one-frame training, the I2V 14B model is used. Specify `--task i2v-14B` and the corresponding weights. For intermediate frame one-frame training, the FLF2V model is used. Specify `--task flf2v-14B` and the corresponding weights. + +In simple experiments for intermediate frame one-frame training, using `control_index=0;2`, `target_index=1` (in dataset settings, `fp_1f_clean_indices = [0, 2]`, `fp_1f_target_index = 1`), yielded better results than `0;10` and `5`. + +The optimal training settings are currently unknown. Feedback is welcome. + +### Example of prompt file description for sample generation + +The description is almost the same as for FramePack. The command line option `--one_frame_inference` corresponds to `--of`, and `--control_image_path` corresponds to `--ci`. `--ei` is used to specify the ending image. + +Note that while `--ci` can be specified multiple times, it should be specified as `--ci img1.png --ci img2.png`, unlike `--control_image_path` which is specified as `--control_image_path img1.png img2.png`. + +For normal one-frame training: +``` +The girl wears a school uniform. --i path/to/start.png --ci path/to/start.png --of target_index=1,control_index=0 --d 1111 --f 1 --s 10 --fs 7 --d 1234 --w 384 --h 576 +``` + +For intermediate frame one-frame training +``` +The girl wears a school uniform. --i path/to/start.png --ei path/to/end.png --ci path/to/start.png --ci path/to/end.png --of target_index=1,control_index=0;2 --d 1111 --f 1 --s 10 --fs 7 --d 1234 --w 384 --h 576 +``` + +
+日本語 + +**この機能は実験的なものです。** 1フレーム推論と同様の方法で学習を行います。 + +現在は、FramePackモデルのデータセット設定を流用しています。[FramePackのドキュメント](./framepack_1f.md#one-frame-single-frame-training--1フレーム学習)および +[FramePackのデータセット設定](../src/musubi_tuner/dataset/dataset_config.md#framepack-one-frame-training)を参照してください。 + +`fp_1f_clean_indices` が後述の `control_index` に相当します。 + +ただし、`fp_1f_no_post`はWan2.1では無視されます。またアルファ値によるマスクも未対応です。 + +1フレーム学習時は、`wan_cache_latents.py`に`--one_frame`を指定してキャッシュを作成してください。また、`wan_train_network.py`に`--one_frame`を指定してサンプル画像生成時の推論方法を変更してください。 + +1フレーム学習ではI2Vの14Bモデルを使用します。`--task i2v-14B`を指定し、該当する重みを指定してください。中間フレームの1フレーム学習では、FLF2Vモデルを使用します。`--task flf2v-14B`を指定し、該当する重みを指定してください。 + +中間フレーム学習の簡単な実験では、`control_index=0;2`、`target_index=1`が(データセット設定では `fp_1f_clean_indices = [0, 2]`、`fp_1f_target_index = 1`)、`0;10`および`5`よりも良い結果を得られました。 + +最適な学習設定は今のところ不明です。フィードバックを歓迎します。 + +**サンプル生成のプロンプトファイル記述例** + +FramePackとほぼ同様です。コマンドラインオプション`--one_frame_inference`に相当する `--of`と、`--control_image_path`に相当する`--ci`が用意されています。`--ei`は終端画像を指定します。 + +※ `--control_image_path`は`--control_image_path img1.png img2.png`のようにスペースで区切るのに対して、`--ci`は`--ci img1.png --ci img2.png`のように指定するので注意してください。 + +通常の1フレーム学習: +``` +The girl wears a school uniform. --i path/to/start.png --ci path/to/start.png --of target_index=1,control_index=0 --d 1111 --f 1 --s 10 --fs 7 --d 1234 --w 384 --h 576 +``` + +中間フレームの1フレーム学習(開始画像と終端画像の両方を指定): +``` +The girl wears a school uniform. --i path/to/start.png --ei path/to/end.png --ci path/to/start.png --ci path/to/end.png --of target_index=1,control_index=0;2 --d 1111 --f 1 --s 10 --fs 7 --d 1234 --w 384 --h 576 +``` + +
+ diff --git a/exp_code/1_benchmark/musubi-tuner/fpack_cache_latents.py b/exp_code/1_benchmark/musubi-tuner/fpack_cache_latents.py new file mode 100644 index 0000000000000000000000000000000000000000..89098570d44bbe33ce673161f067648912f3e570 --- /dev/null +++ b/exp_code/1_benchmark/musubi-tuner/fpack_cache_latents.py @@ -0,0 +1,4 @@ +from musubi_tuner.fpack_cache_latents import main + +if __name__ == "__main__": + main() diff --git a/exp_code/1_benchmark/musubi-tuner/fpack_cache_text_encoder_outputs.py b/exp_code/1_benchmark/musubi-tuner/fpack_cache_text_encoder_outputs.py new file mode 100644 index 0000000000000000000000000000000000000000..ccfae7454c15b1e58a77ad9679a18188c9a7739f --- /dev/null +++ b/exp_code/1_benchmark/musubi-tuner/fpack_cache_text_encoder_outputs.py @@ -0,0 +1,4 @@ +from musubi_tuner.fpack_cache_text_encoder_outputs import main + +if __name__ == "__main__": + main() diff --git a/exp_code/1_benchmark/musubi-tuner/fpack_generate_video.py b/exp_code/1_benchmark/musubi-tuner/fpack_generate_video.py new file mode 100644 index 0000000000000000000000000000000000000000..6515d248b4fdc045310a15287cbaad2fe80eec26 --- /dev/null +++ b/exp_code/1_benchmark/musubi-tuner/fpack_generate_video.py @@ -0,0 +1,4 @@ +from musubi_tuner.fpack_generate_video import main + +if __name__ == "__main__": + main() diff --git a/exp_code/1_benchmark/musubi-tuner/fpack_train_network.py b/exp_code/1_benchmark/musubi-tuner/fpack_train_network.py new file mode 100644 index 0000000000000000000000000000000000000000..ad03707e043599e2e47e28a7fec07b3732fe4d82 --- /dev/null +++ b/exp_code/1_benchmark/musubi-tuner/fpack_train_network.py @@ -0,0 +1,4 @@ +from musubi_tuner.fpack_train_network import main + +if __name__ == "__main__": + main() diff --git a/exp_code/1_benchmark/musubi-tuner/hv_generate_video.py b/exp_code/1_benchmark/musubi-tuner/hv_generate_video.py new file mode 100644 index 0000000000000000000000000000000000000000..77a662bcd580f1a6fc35ea4318eb5c2866f1ec08 --- /dev/null +++ b/exp_code/1_benchmark/musubi-tuner/hv_generate_video.py @@ -0,0 +1,4 @@ +from musubi_tuner.hv_generate_video import main + +if __name__ == "__main__": + main() diff --git a/exp_code/1_benchmark/musubi-tuner/hv_train.py b/exp_code/1_benchmark/musubi-tuner/hv_train.py new file mode 100644 index 0000000000000000000000000000000000000000..0ce30c77e103eff59d600f21efc7f37d3a30027f --- /dev/null +++ b/exp_code/1_benchmark/musubi-tuner/hv_train.py @@ -0,0 +1,4 @@ +from musubi_tuner.hv_train import main + +if __name__ == "__main__": + main() diff --git a/exp_code/1_benchmark/musubi-tuner/hv_train_network.py b/exp_code/1_benchmark/musubi-tuner/hv_train_network.py new file mode 100644 index 0000000000000000000000000000000000000000..a9428cc2d56b96438ccac11216196413bc689d86 --- /dev/null +++ b/exp_code/1_benchmark/musubi-tuner/hv_train_network.py @@ -0,0 +1,4 @@ +from musubi_tuner.hv_train_network import main + +if __name__ == "__main__": + main() diff --git a/exp_code/1_benchmark/musubi-tuner/lora_post_hoc_ema.py b/exp_code/1_benchmark/musubi-tuner/lora_post_hoc_ema.py new file mode 100644 index 0000000000000000000000000000000000000000..87dedf0a4981522ccbc482645005ef93230e32c3 --- /dev/null +++ b/exp_code/1_benchmark/musubi-tuner/lora_post_hoc_ema.py @@ -0,0 +1,4 @@ +from musubi_tuner.lora_post_hoc_ema import main + +if __name__ == "__main__": + main() diff --git a/exp_code/1_benchmark/musubi-tuner/merge_lora.py b/exp_code/1_benchmark/musubi-tuner/merge_lora.py new file mode 100644 index 0000000000000000000000000000000000000000..dee3b3931bc51b26f302379938ecb2ea3c4b2835 --- /dev/null +++ b/exp_code/1_benchmark/musubi-tuner/merge_lora.py @@ -0,0 +1,4 @@ +from musubi_tuner.merge_lora import main + +if __name__ == "__main__": + main() diff --git a/exp_code/1_benchmark/musubi-tuner/pyproject.toml b/exp_code/1_benchmark/musubi-tuner/pyproject.toml new file mode 100644 index 0000000000000000000000000000000000000000..1852b2358c3733957f9b597e73fbf683996fa179 --- /dev/null +++ b/exp_code/1_benchmark/musubi-tuner/pyproject.toml @@ -0,0 +1,75 @@ +[project] +name = "musubi-tuner" +version = "0.1.0" +description = "Musubi Tuner by kohya_ss" +readme = "README.md" +requires-python = ">=3.10, <3.13" +dependencies = [ + "accelerate>=1.6.0", + "av==14.0.1", + "bitsandbytes==0.45.4", + "diffusers>=0.32.1", + "einops==0.7.0", + "huggingface-hub>=0.30.0", + "opencv-python==4.10.0.84", + "pillow>=10.2.0", + "safetensors==0.4.5", + # "sageattention>=1.0.6", + "toml==0.10.2", + "tqdm==4.67.1", + "transformers>=4.46.3", + "voluptuous==0.15.2", + # Wan2.1 + "ftfy==6.3.1", + "easydict==1.13", +] + +[project.optional-dependencies] +cu124 = [ + "torch>=2.5.1", + "torchvision>=0.20.1", +] +cu128 = [ + "torch>=2.7.1", + "torchvision>=0.22.1", +] + +[tool.uv] +conflicts = [ + [ + { extra = "cu124" }, + { extra = "cu128" }, + ], +] + +[dependency-groups] +dev = [ + "ascii-magic==2.3.0", + "matplotlib==3.10.0", + "tensorboard", + "prompt-toolkit==3.0.51", +] + +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" + +[tool.uv.sources] +torch = [ + { index = "pytorch-cu124", extra = "cu124" }, + { index = "pytorch-cu128", extra = "cu128" }, +] +torchvision = [ + { index = "pytorch-cu124", extra = "cu124" }, + { index = "pytorch-cu128", extra = "cu128" }, +] + +[[tool.uv.index]] +name = "pytorch-cu124" +url = "https://download.pytorch.org/whl/cu124" +explicit = true + +[[tool.uv.index]] +name = "pytorch-cu128" +url = "https://download.pytorch.org/whl/cu128" +explicit = true diff --git a/exp_code/1_benchmark/musubi-tuner/scripts/cache_latent.sh b/exp_code/1_benchmark/musubi-tuner/scripts/cache_latent.sh new file mode 100644 index 0000000000000000000000000000000000000000..900ec616e7ee5b9b7cd4ea175819158e08508056 --- /dev/null +++ b/exp_code/1_benchmark/musubi-tuner/scripts/cache_latent.sh @@ -0,0 +1,6 @@ +python ../src/musubi_tuner/fpack_cache_latents_ysh.py \ + --dataset_config /mnt/workspace/ysh/Code/Efficient_Model/2_code/musubi-tuner/src/dataset_config.toml \ + --vae /mnt/workspace/checkpoints/hunyuanvideo-community/HunyuanVideo/vae/diffusion_pytorch_model.safetensors \ + --image_encoder /mnt/workspace/checkpoints/lllyasviel/flux_redux_bfl/image_encoder/model.safetensors \ + --vae_chunk_size 32 --vae_spatial_tile_sample_min_size 128 + # --debug_mode video \ No newline at end of file diff --git a/exp_code/1_benchmark/musubi-tuner/scripts/cache_text_encoder.sh b/exp_code/1_benchmark/musubi-tuner/scripts/cache_text_encoder.sh new file mode 100644 index 0000000000000000000000000000000000000000..07c45d025daeb02ab5738ba464adb8fe6ca64e69 --- /dev/null +++ b/exp_code/1_benchmark/musubi-tuner/scripts/cache_text_encoder.sh @@ -0,0 +1,5 @@ +CUDA_VISIBLE_DEVICES=1 python ../src/musubi_tuner/fpack_cache_text_encoder_outputs.py \ + --dataset_config /mnt/workspace/ysh/Code/Efficient_Model/2_code/musubi-tuner/src/dataset_config.toml \ + --text_encoder1 /mnt/workspace/checkpoints/hunyuanvideo-community/HunyuanVideo/text_encoder/model-00001-of-00004.safetensors \ + --text_encoder2 /mnt/workspace/checkpoints/hunyuanvideo-community/HunyuanVideo/text_encoder_2/model.safetensors \ + --batch_size 16 \ No newline at end of file diff --git a/exp_code/1_benchmark/musubi-tuner/scripts/inference.sh b/exp_code/1_benchmark/musubi-tuner/scripts/inference.sh new file mode 100644 index 0000000000000000000000000000000000000000..ba9f8cadac0a01d6801208f4a3fcc449d8c689a4 --- /dev/null +++ b/exp_code/1_benchmark/musubi-tuner/scripts/inference.sh @@ -0,0 +1,13 @@ +CUDA_VISIBLE_DEVICES=7 python ../src/musubi_tuner/fpack_generate_video.py \ + --dit /mnt/workspace/checkpoints/lllyasviel/FramePackI2V_HY/diffusion_pytorch_model-00001-of-00003.safetensors \ + --vae /mnt/workspace/checkpoints/hunyuanvideo-community/HunyuanVideo/vae/diffusion_pytorch_model.safetensors \ + --text_encoder1 /mnt/workspace/checkpoints/hunyuanvideo-community/HunyuanVideo/text_encoder/model-00001-of-00004.safetensors \ + --text_encoder2 /mnt/workspace/checkpoints/hunyuanvideo-community/HunyuanVideo/text_encoder_2/model.safetensors \ + --image_encoder /mnt/workspace/checkpoints/lllyasviel/flux_redux_bfl/image_encoder/model.safetensors \ + --image_path /mnt/workspace/ysh/Code/Efficient_Model/2_code/musubi-tuner/demo_output/peng.png \ + --prompt "a penguin playfully dancing in the snow, Antarctica." \ + --video_size 480 832 --video_seconds 5 --fps 30 --infer_steps 25 \ + --attn_mode sdpa --fp8_scaled \ + --vae_chunk_size 32 --vae_spatial_tile_sample_min_size 128 \ + --save_path ./demo_output_dir --output_type both \ + --seed 1234 --lora_multiplier 1.0 --lora_weight /mnt/workspace/ysh/Code/Efficient_Model/2_code/musubi-tuner/demo_output/name-of-lora-000090.safetensors \ No newline at end of file diff --git a/exp_code/1_benchmark/musubi-tuner/scripts/prompts.txt b/exp_code/1_benchmark/musubi-tuner/scripts/prompts.txt new file mode 100644 index 0000000000000000000000000000000000000000..d1908a1985df58af89f7620b2e48d06d9224a2ac --- /dev/null +++ b/exp_code/1_benchmark/musubi-tuner/scripts/prompts.txt @@ -0,0 +1 @@ +A beautiful sunset over mountains --w 832 --h 480 --f 5 --d 42 --s 20 --i path/to/start_image.jpg \ No newline at end of file diff --git a/exp_code/1_benchmark/musubi-tuner/scripts/train.sh b/exp_code/1_benchmark/musubi-tuner/scripts/train.sh new file mode 100644 index 0000000000000000000000000000000000000000..6b21ff0a9d93fdd00c027e708e71b34c6115c185 --- /dev/null +++ b/exp_code/1_benchmark/musubi-tuner/scripts/train.sh @@ -0,0 +1,18 @@ +export PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True + +accelerate launch --num_cpu_threads_per_process 2 --mixed_precision bf16 ../src/musubi_tuner/fpack_train_network.py \ + --dit /mnt/workspace/checkpoints/lllyasviel/FramePackI2V_HY/diffusion_pytorch_model-00001-of-00003.safetensors \ + --vae /mnt/workspace/checkpoints/hunyuanvideo-community/HunyuanVideo/vae/diffusion_pytorch_model.safetensors \ + --text_encoder1 /mnt/workspace/checkpoints/hunyuanvideo-community/HunyuanVideo/text_encoder/model-00001-of-00004.safetensors \ + --text_encoder2 /mnt/workspace/checkpoints/hunyuanvideo-community/HunyuanVideo/text_encoder_2/model.safetensors \ + --image_encoder /mnt/workspace/checkpoints/lllyasviel/flux_redux_bfl/image_encoder/model.safetensors \ + --dataset_config /mnt/workspace/ysh/Code/Efficient_Model/2_code/musubi-tuner/src/dataset_config.toml \ + --mixed_precision bf16 \ + --optimizer_type adamw8bit --learning_rate 2e-3 --gradient_checkpointing \ + --timestep_sampling shift --weighting_scheme none --discrete_flow_shift 3.0 \ + --max_data_loader_n_workers 8 --persistent_data_loader_workers \ + --network_module networks.lora_framepack --network_dim 128 \ + --max_train_epochs 100 --save_every_n_epochs 10 --seed 42 \ + --output_dir /mnt/workspace/ysh/Code/Efficient_Model/2_code/musubi-tuner/demo_output \ + --output_name name-of-lora \ + --flash_attn \ No newline at end of file diff --git a/exp_code/1_benchmark/musubi-tuner/src/caption_video.json b/exp_code/1_benchmark/musubi-tuner/src/caption_video.json new file mode 100644 index 0000000000000000000000000000000000000000..deab2cbfd7b0cce88489807766bdc2dfd9a37ac3 --- /dev/null +++ b/exp_code/1_benchmark/musubi-tuner/src/caption_video.json @@ -0,0 +1,69 @@ +{"video_path": "/mnt/workspace/checkpoints/Wild-Heart/Disney-VideoGeneration-Dataset/videos/a3c275fc2eb0a67168a7c58a6a9adb14.mp4", "caption": "A black and white animated scene unfolds with an anthropomorphic goat surrounded by musical notes and symbols, suggesting a playful environment. Mickey Mouse appears, leaning forward in curiosity as the goat remains still. The goat then engages with Mickey, who bends down to converse or react. The dynamics shift as Mickey grabs the goat, potentially in surprise or playfulness, amidst a minimalistic background. The scene captures the evolving relationship between the two characters in a whimsical, animated setting, emphasizing their interactions and emotions."} +{"video_path": "/mnt/workspace/checkpoints/Wild-Heart/Disney-VideoGeneration-Dataset/videos/8ae679ab483ab344c881d4a813e0cb51.mp4", "caption": "A black and white animated sequence on a ship's deck features an anthropomorphic bulldog character, showcasing exaggerated facial expressions and body language. The character progresses from confident to focused, then to strained and distressed, displaying a range of emotions as it navigates challenges. The ship's interior remains static in the background, with minimalistic details such as a bell and open door. The character's dynamic movements and changing expressions drive the narrative, with no camera movement to distract from its evolving reactions and physical gestures."} +{"video_path": "/mnt/workspace/checkpoints/Wild-Heart/Disney-VideoGeneration-Dataset/videos/6619a387964ad49eb1127079d25d17d8.mp4", "caption": "A domestic scene unfolds indoors, with a parrot on a stand and a mouse-like character standing next to it, amidst a domestic setting. A lamp is knocked over, causing a sudden change in lighting and affecting the mood. The scene shifts to a maritime setting, where a sailor-like character is shown in dynamic poses near ship's wheel controls and a bell, with a view of waves and distant land through a window."} +{"video_path": "/mnt/workspace/checkpoints/Wild-Heart/Disney-VideoGeneration-Dataset/videos/bf06573576ae0ea4d27a178b4d6e95a1.mp4", "caption": "A static black and white scene of three anthropomorphic characters is depicted in a series of animated frames. The first character, with oversized shoes, sings or speaks into a megaphone held by the central figure, who has an exaggerated open mouth. The third character, wearing a chef's hat, controls the megaphone's volume. Musical notes surround the characters, hinting at music, but there is no movement. The plain background focuses attention on the characters' interaction, maintaining a consistent composition throughout the scene."} +{"video_path": "/mnt/workspace/checkpoints/Wild-Heart/Disney-VideoGeneration-Dataset/videos/907704ed979d5e5651fac98ccbec2d4b.mp4", "caption": "A black and white animated scene unfolds featuring a distressed upright cow with prominent horns and expressive eyes, suspended by its legs from a hook on a static background wall. A smaller Mickey Mouse-like character enters, standing near a wooden bench, initiating interaction between the two. The cow's posture changes as it leans, stretches, and falls, while the mouse watches with a concerned expression, its face a mixture of curiosity and worry, in a world devoid of color."} +{"video_path": "/mnt/workspace/checkpoints/Wild-Heart/Disney-VideoGeneration-Dataset/videos/96d342ea7c7cfddbe1106072bc34be5a.mp4", "caption": "A black and white animated kitchen scene unfolds, featuring two characters with distinct attire. The first, with a flower on its hat, sits near the doorway, looking down in distress. The second character enters, approaching with concern, and the two engage in a friendly exchange, their body language relaxed and engaged. As they communicate, the second character prepares to leave, extending a hand in a gesture of farewell, amidst a simple, unchanged kitchen setting with hanging pots and a bucket."} +{"video_path": "/mnt/workspace/checkpoints/Wild-Heart/Disney-VideoGeneration-Dataset/videos/328fc12cf9cf3d540e67efadeb893f61.mp4", "caption": "A playful Mickey Mouse character is seen in a vibrant kitchen setting, surrounded by pots, pans, and a barrel. The character's actions unfold with comedic intent: they sit, gesture, and then reach for a trash can, pulling out a banana peel. Next, they stand and stretch to retrieve a hanging pot or utensil. The character's dynamic movements and exaggerated expressions bring a lively and slapstick atmosphere to the scene, set against a static kitchen backdrop."} +{"video_path": "/mnt/workspace/checkpoints/Wild-Heart/Disney-VideoGeneration-Dataset/videos/4adbb3a2945c9edd78785daccfd23e80.mp4", "caption": "A static camera captures a monochromatic cartoon bird perched on a wooden pole, its large beak and expressive eyes conveying a sense of calm. The scene shifts to reveal two animated characters: a distressed monkey-like figure with scattered musical notes and a calm character dressed as a chef or entertainer, holding an object towards the monkey. The two characters' contrasting expressions create a dynamic, hinting at a possible interaction or conflict, as the static camera focuses on the evolving emotional exchange."} +{"video_path": "/mnt/workspace/checkpoints/Wild-Heart/Disney-VideoGeneration-Dataset/videos/1151c01bd77450dfc603a2eb7352822e.mp4", "caption": "A black and white animated kitchen scene unfolds, with Mickey Mouse hanging upside down from a door frame, holding a pot lid, amidst kitchen utensils and a ladder in a bucket. An anthropomorphic goat enters, showing curiosity and amusement as it interacts with a newspaper titled 'Journey in the Straw' and a ukulele, displaying a range of expressions from contemplative to excited, in a static yet dynamic environment."} +{"video_path": "/mnt/workspace/checkpoints/Wild-Heart/Disney-VideoGeneration-Dataset/videos/81c5dab878d73e6c21181d18d83f2808.mp4", "caption": "A parrot perches on a stand to the left, with Mickey Mouse emerging from a doorway on the right in a sparsely decorated room featuring a cup, soap, a brush, and a circular window. The mouse interacts with these objects, then appears to clean or organize the area, lifting a bucket and stepping out. The parrot observes the mouse's movements, becoming animated and opening its beak, as if speaking or reacting to something. The camera zooms in on the parrot's facial expressions, capturing its subtle body language and reactions."} +{"video_path": "/mnt/workspace/checkpoints/Wild-Heart/Disney-VideoGeneration-Dataset/videos/7fe0c83572de828da1cab0c118dece14.mp4", "caption": "A black-and-white animated scene unfolds, featuring a calm cartoon toucan perched on a cylindrical stand. Small droplets of water fall around the toucan, suggesting rain or an unseen source. A mouse-like figure enters, standing on two legs and holding a broom. The mouse interacts with a bucket of water, changing posture from upright to bending over. The background remains static, focusing on the toucan and mouse's actions within a simple, monochromatic setting. The camera captures their interaction, hinting at a narrative centered on these characters."} +{"video_path": "/mnt/workspace/checkpoints/Wild-Heart/Disney-VideoGeneration-Dataset/videos/05ccfa61ece031e881d173289761cf91.mp4", "caption": "A black cow with white horns and hooves stands on a platform in a monochromatic, rural environment. Initially neutral, the cow's expression changes to surprise, then shock, with its back arched and tail raised high. It eventually relaxes, holding a small object in its mouth. The scene shifts to reveal a vast, empty landscape, with the cow sitting calmly on the platform, surrounded by objects. The cow appears small compared to the expansive backdrop, conveying a sense of isolation and tranquility."} +{"video_path": "/mnt/workspace/checkpoints/Wild-Heart/Disney-VideoGeneration-Dataset/videos/cb227eefdfcbcd3db945131b1d153273.mp4", "caption": "A small, animated mouse stands beside a calm cow in a rural farm setting, with rolling hills in the background. The mouse approaches the cow, and they interact peacefully. The mouse then moves to a hay cart, showing anticipation. The mouse pulls a long pole to move the cart, demonstrating initiative. The cow and mouse work together in a communal farming activity, surrounded by the serene and idyllic rural landscape, with the rolling hills providing a consistent backdrop to their gentle and harmonious interaction."} +{"video_path": "/mnt/workspace/checkpoints/Wild-Heart/Disney-VideoGeneration-Dataset/videos/c4903e10393ef7c42fc8131f05f3f233.mp4", "caption": "A whimsical kitchen scene unfolds with Mickey Mouse-like animated character at its center. The character playfully interacts with kitchen utensils and appliances, showcasing a range of energetic and mischievous behavior. It juggles pans from a rack above, but ultimately loses balance and falls to the floor in a comical mishap. The kitchen remains orderly and unchanged throughout, with no changes in lighting or camera perspective, emphasizing the comedic contrast between the character's antics and the static surroundings."} +{"video_path": "/mnt/workspace/checkpoints/Wild-Heart/Disney-VideoGeneration-Dataset/videos/ec80a7b9cfb3740e5cdf9300fdc8d8a9.mp4", "caption": "A domestic kitchen scene unfolds with two animated characters at its center. A larger character with horns and a smile stands on the left, while a smaller, mouse-like creature bends over near the floor. The smaller character raises its hands, presenting something to the larger one. The larger character's expression changes from friendly to intense, holding a spatula. The smaller character reacts with surprise, wide eyes and raised eyebrows, amidst a static kitchen background with hanging pots."} +{"video_path": "/mnt/workspace/checkpoints/Wild-Heart/Disney-VideoGeneration-Dataset/videos/349059f6c41460df59778d5e6d3d4861.mp4", "caption": "A black-and-white animated scene unfolds on a semi-rural dock, with a cow standing on wooden planks, holding a piece of paper with 'FOB' written on it. The cow is the central focus, amidst static barrels, crates, and a 'PODUNK LANDING' sign in the background. The atmosphere remains calm and still, with the cow's presence subtly shifting the narrative's tone. A sign of pause or anticipation, the scene is frozen in time, inviting the viewer to ponder the story's next development."} +{"video_path": "/mnt/workspace/checkpoints/Wild-Heart/Disney-VideoGeneration-Dataset/videos/fe0efaedf8c47812bff8da9951f77975.mp4", "caption": "A comedic kitchen scene unfolds with two animated characters, one standing and the other seated near a trash can. The standing character is lifted off the ground by the seated one, leading to a forceful interaction. The seated character then uses a vacuum cleaner to 'clean' the standing character, who struggles and kicks against the suction. The scene culminates with the vacuum successfully sucking the standing character into the trash can, amidst a chaotic and humorous domestic setting."} +{"video_path": "/mnt/workspace/checkpoints/Wild-Heart/Disney-VideoGeneration-Dataset/videos/0bb5f6dbf8ed2e0060f0ac4164b24847.mp4", "caption": "A black and white animated scene unfolds with a curious, round-bodied mouse observing a tethered, cow-like character in a rural landscape. The cow's distress sparks no reaction from the mouse, who remains stationary. The scene shifts to the mouse's dynamic run across a bridge, showcasing exaggerated gestures and expressions. Joined by a human holding a bottle, they approach a bustling steamboat docked at a riverside location, surrounded by other figures, setting the stage for a more complex adventure."} +{"video_path": "/mnt/workspace/checkpoints/Wild-Heart/Disney-VideoGeneration-Dataset/videos/be20f3db31cfe8140f004e34a75d6ba4.mp4", "caption": "A kitchen scene unfolds with two animated characters: one stands with an open mouth, while the other interacts with a large object resembling a trash can. The first character falls onto its back, and the second, a cartoon mouse with round ears and tail, approaches. The mouse shows concern, leaning over the fallen figure and tugging at its clothing. The background remains static, focusing attention on the interaction between the two characters as the mouse's actions become more forceful."} +{"video_path": "/mnt/workspace/checkpoints/Wild-Heart/Disney-VideoGeneration-Dataset/videos/82219ab81b21299db9848ab0d55e2f82.mp4", "caption": "A black and white cartoon scene unfolds in a domestic setting, possibly a kitchen or storage room, featuring Mickey Mouse in a dynamic, startled pose, holding a knife near a 'POTATO BIN' overflowing with potatoes. The camera zooms in on Mickey's shocked face, highlighting wide eyes and an open mouth. The scene transitions to darkness, building suspense, before concluding with a title card announcing 'MICKEY MOUSE SOUND CARTOON,' accompanied by playful images of Mickey Mouse characters in a circular motif, set against a Walt Disney comic series backdrop."} +{"video_path": "/mnt/workspace/checkpoints/Wild-Heart/Disney-VideoGeneration-Dataset/videos/fec5a5184b05acafc7904cd419cbb5a3.mp4", "caption": "A black and white animated scene unfolds with a steamboat on a serene river or canal, surrounded by a dock-like structure and rocky shores. The boat emits dark smoke from two tall smokestacks as it moves, leaving a trail behind. As the steamboat accelerates, the smoke grows denser. It eventually disappears from view, and a character emerges from a nearby house-like structure, standing on a small pier, observing the surroundings in a simplistic, classic animation style."} +{"video_path": "/mnt/workspace/checkpoints/Wild-Heart/Disney-VideoGeneration-Dataset/videos/614cf13ae1974436cf4072a5cc7d7c57.mp4", "caption": "A black-and-white animated video showcases a central character with a round body and large ears standing in an indoor setting with a plain background. The character is surrounded by smaller figures, displaying various expressions of interest or curiosity. As the video progresses, subtle changes occur among the figures, suggesting movement and reactions. The scene transitions to focus on a single bird perched on a perch, with its posture and expression changing subtly throughout the frames, showing signs of activity."} +{"video_path": "/mnt/workspace/checkpoints/Wild-Heart/Disney-VideoGeneration-Dataset/videos/7d6dcf13f5c3d45b85c5ea0544c429e4.mp4", "caption": "A black-and-white animated scene unfolds on the deck of a ship, with a sailor-clad anthropomorphic character standing at the helm. Initially, the character grips the ship's wheel firmly, showing signs of strain. As they lean back, their expression changes to one of joy and triumph, with a wide smile and raised eyebrows. The character's posture relaxes in the final frame, indicating a task is complete. The open sea and sky remain the consistent backdrop, reinforcing the maritime setting throughout."} +{"video_path": "/mnt/workspace/checkpoints/Wild-Heart/Disney-VideoGeneration-Dataset/videos/1094139d474e65852826d64a1b4aa520.mp4", "caption": "Three mechanical towers, each with unique designs on their tops, are connected by pipes on a flat surface against a cloudy black-and-white sky. Rain clouds appear, and water droplets fall, setting an industrial scene. The scene shifts to a ship's interior, where two cartoon characters interact amidst nautical elements. A claw-like appendage emerges, reaching for the ship, causing tension. The appendage extends, tilting the ship dramatically, introducing movement and a sense of urgency, highlighting potential danger within the ship's confines."} +{"video_path": "/mnt/workspace/checkpoints/Wild-Heart/Disney-VideoGeneration-Dataset/videos/d2cfac77af77bba35a6986beca2ae052.mp4", "caption": "A black and white animated steamboat sits on a serene body of water amidst a hilly or mountainous landscape under a cloudy sky. The steamboat's tall stack emits smoke in various stages, transforming from thick and dark to light and dispersed, eventually dissipating into billowing clouds. The boat remains stationary, with minor movements due to water currents, in a consistent environment with static lighting conditions, focusing solely on the dynamic smoke emission from the steamboat's stack."} +{"video_path": "/mnt/workspace/checkpoints/Wild-Heart/Disney-VideoGeneration-Dataset/videos/93877aefdefcee7b2978d7fb90690c8f.mp4", "caption": "A black and white animated sequence unfolds with three cartoon characters interacting in a domestic setting. Mickey Mouse-like and an anthropomorphic creature with horns initially share a moment, before a chef character enters, wearing a hat and apron. The characters engage with each other, exchanging gestures and expressions. The scene remains static, with no environmental changes, focusing on the characters' interactions and possible storytelling through their body language and positioning, capturing a moment of everyday life and subtle narrative."} +{"video_path": "/mnt/workspace/checkpoints/Wild-Heart/Disney-VideoGeneration-Dataset/videos/fe1009b64da05dfea77928e501284868.mp4", "caption": "A black-and-white animated short film features a menacing creature with its mouth wide open, revealing rows of teeth and colorful candy or pills inside. Mickey Mouse, a small figure with distinctive ears and gloves, interacts with the creature in various poses. Mickey stands confidently on tiptoes, leans closer, and extends his hand, gauging the creature's reaction. The creature remains motionless, maintaining an imposing presence, as Mickey's expressions change with anticipation and caution, hinting at a tense narrative of potential conflict or negotiation."} +{"video_path": "/mnt/workspace/checkpoints/Wild-Heart/Disney-VideoGeneration-Dataset/videos/05a234b0164d015d468f2f53e771b4cf.mp4", "caption": "A classic cartoon scene unfolds in a monochromatic setting, featuring a small anthropomorphic dog and a stylized, horned character hanging by its tail from a horizontal bar. The dog moves closer, while the hanging character's posture changes, suggesting manipulation. The scene builds to a comedic climax as the character's limbs flail, and sweat drips from its face, capturing a moment of tension and physical comedy common in vintage animation."} +{"video_path": "/mnt/workspace/checkpoints/Wild-Heart/Disney-VideoGeneration-Dataset/videos/2c1ed5408882479b06681f7cf372916a.mp4", "caption": "A classic black and white animated scene unfolds, featuring two cartoon characters: a large, exaggerated creature with an open mouth and numerous teeth, and a small, anthropomorphic figure with oversized ears. The smaller character exhibits various expressions and gestures, initially reaching into the creature's mouth, then raising its arms, and finally offering a lollipop or candy. The scene captures a moment of interaction, suggesting communication, negotiation, or playful engagement between the two characters in a whimsical, animated world."} +{"video_path": "/mnt/workspace/checkpoints/Wild-Heart/Disney-VideoGeneration-Dataset/videos/b3e3b147675a68de8a29ffa762d59adb.mp4", "caption": "A whimsical, monochromatic animated character, reminiscent of early animation styles, is seen in a domestic setting near a 'POTATO BIN' sign. The character picks up a potato, places it on its head like a hat, and holds a knife. As it rises, it swings the knife playfully or dances, maintaining a humorous tone. The character concludes its activity, standing upright with one arm on the bin and the other at its side, exuding satisfaction in this confined, unchanged background."} +{"video_path": "/mnt/workspace/checkpoints/Wild-Heart/Disney-VideoGeneration-Dataset/videos/3f0979e6cae25447f416372c49ad5e07.mp4", "caption": "A black-and-white cartoon sequence unfolds with two animated characters in a comedic, lighthearted scenario. One character, with an oversized mouth surrounded by musical notes, sings or speaks loudly. The other, a bull-like creature with horns, playfully teases. A smaller mouse character enters, standing confidently and facing the larger one in a playful confrontation. The background remains static, with the characters' actions evoking a humorous, classic animation style. Their interactions are exaggerated and whimsical, characteristic of a timeless cartoon tale."} +{"video_path": "/mnt/workspace/checkpoints/Wild-Heart/Disney-VideoGeneration-Dataset/videos/8d616fee8e0a280d2d87e478b948a729.mp4", "caption": "A black and white animated kitchen scene unfolds with two characters: one standing on a chair, laughing, and holding a bucket, while the other is seated at a table, seemingly asleep. The standing character leans over, attempting to wake or engage the seated one. They pull something from the bucket, possibly to surprise or interact further, amidst a backdrop of hanging pots, a barrel, and other kitchen items, capturing a playful moment of mischief in a static domestic setting."} +{"video_path": "/mnt/workspace/checkpoints/Wild-Heart/Disney-VideoGeneration-Dataset/videos/8e7722634784cf969c15f4a597f3af4d.mp4", "caption": "A classic black and white cartoon scene unfolds in a series of static shots, showcasing a group of characters in various poses. One central figure falls, maintaining a surprised expression, while the surrounding characters watch with concern or amusement. The plain background emphasizes the action, and consistent lighting suggests an older animation. The scene is devoid of camera movement, focusing on the expressive movements and interactions of the characters in a timeless, classic cartoon style."} +{"video_path": "/mnt/workspace/checkpoints/Wild-Heart/Disney-VideoGeneration-Dataset/videos/46e9d133d051655c956c7089b672f519.mp4", "caption": "A black and white animated scene unfolds, showcasing two cartoon characters in a simple, unadorned setting. A mouse with large ears, wearing shorts and shoes, stands upright over a larger, round-bodied figure lying down. The mouse's posture suggests a playful or curious engagement with the other character. Both figures remain stationary, with the environment providing a consistent backdrop. The scene emphasizes the interaction between the two characters, devoid of camera movement or additional elements, highlighting their central interaction in a static, yet engaging animation."} +{"video_path": "/mnt/workspace/checkpoints/Wild-Heart/Disney-VideoGeneration-Dataset/videos/5a0229ffdb3bd9d8e81dca7988d7cdbb.mp4", "caption": "A black-and-white animated scene featuring three characters in a static setting. Mickey Mouse-like character stands on one leg, hands on hips, with a playful expression. Center character has an exaggerated open mouth, caught in mid-motion, suggesting singing or surprise. Female character in a tutu and flower-adorned hat dances, arms raised. Background features a plain wall with scattered musical notes. The characters maintain their positions and expressions, with no changes in lighting, environment, or camera perspective, focusing on their interaction within this continuous moment."} +{"video_path": "/mnt/workspace/checkpoints/Wild-Heart/Disney-VideoGeneration-Dataset/videos/3108dd567bd8669967bc83e0bc50dab2.mp4", "caption": "A tranquil, monochromatic scene unfolds with an animated character standing on a dock beside a body of water, a lighthouse visible in the background. The character is startled, running away from the viewer's perspective. A mechanical device with levers and a counter is introduced, attached to a rope, with a smaller, humanoid figure near it. The smaller figure engages with the machine, pulling the rope, while the larger figure remains in motion, creating a sense of tension and urgency in this classic, hand-drawn animated sequence."} +{"video_path": "/mnt/workspace/checkpoints/Wild-Heart/Disney-VideoGeneration-Dataset/videos/4c918b917308ff03120e9e86650a2d3c.mp4", "caption": "Two animated characters interact in a classic black and white setting, reminiscent of vintage cartoons. One character stands on the ground, elongated with a curved tail, while the other hangs above, descending in a playful or comedic manner. As they meet, their expressions and body language suggest a dynamic exchange, possibly humorous or light-hearted. The static background focuses attention on their evolving interaction, emphasizing their gestures and expressions in a timeless, monochromatic stage."} +{"video_path": "/mnt/workspace/checkpoints/Wild-Heart/Disney-VideoGeneration-Dataset/videos/e6a0293632a942712fd61867aeae5e9f.mp4", "caption": "A black-and-white animated scene unfolds featuring Mickey Mouse and a goat in a comedic struggle. Mickey initially pulls the goat's horns, but his grip weakens, causing him to fall backward. The goat stands upright, relieved, and gestures with a hoof. Scattered letters and musical notes surround them, hinting at a playful conflict. Mickey looks up at the goat with surprise and resignation, while the goat explains or celebrates its victory. The scene remains static, focusing on the characters' expressions and actions in a lighthearted, humorous altercation."} +{"video_path": "/mnt/workspace/checkpoints/Wild-Heart/Disney-VideoGeneration-Dataset/videos/a8369dff8c96925513892c6377bacb1c.mp4", "caption": "A vintage-inspired animated scene unfolds in a monochromatic setting. A small bird perched on a round window's edge observes a cartoon mouse crouched beside a 'POTATO BIN,' holding a potato sack and shovel. As the camera zooms in on the bird, its facial expressions shift from curiosity to concern. The bird's determined gaze hints at a narrative progression, while the mouse remains still, awaiting the bird's reaction. The scene is set for a subtle, yet engaging, character-driven story."} +{"video_path": "/mnt/workspace/checkpoints/Wild-Heart/Disney-VideoGeneration-Dataset/videos/1d50a3d9703f152758d5422c8b48010f.mp4", "caption": "A dynamic sequence unfolds on the deck of a ship, where a small, mouse-like character with large ears and short pants enthusiastically steers the vessel using a wheel. A larger, bulky character with a long pole engages in a playful confrontation, asserting dominance or playfully provoking the smaller one. Expressive gestures and movements convey emotions and intentions, set against a nautical backdrop featuring a steering wheel, life preserver, and bell. The two characters interact in a lively, competitive, or friendly exchange."} +{"video_path": "/mnt/workspace/checkpoints/Wild-Heart/Disney-VideoGeneration-Dataset/videos/6781b101e8bd362e9853749fcc2cd3e3.mp4", "caption": "A whimsical, animated scene unfolds in black and white, featuring a large, cartoonish creature with a wide-open mouth filled with rounded objects, resembling teeth or candy. Mickey Mouse, recognizable by his iconic ears and gloves, stands before it, holding a stick or rod near the creature's mouth. Both characters remain static, with Mickey's posture suggesting a potential interaction or presentation. The scene captures a moment of stillness, focusing on the potential connection between the two characters in a classic animation style."} +{"video_path": "/mnt/workspace/checkpoints/Wild-Heart/Disney-VideoGeneration-Dataset/videos/5383e4e3e8156787c43bc0cd839536c0.mp4", "caption": "A cartoon character resembling Mickey Mouse, dressed in a sailor's outfit, runs towards the right side of the frame on a sandy terrain, pursued by a swinging rope with a hook. As the character holds onto the rope, they're pulled into a kitchen, where they use a hammer to interact with the door or wall. The scene transitions from an outdoor, action-packed sequence to a domestic, indoor scenario, showcasing a shift in location and activity."} +{"video_path": "/mnt/workspace/checkpoints/Wild-Heart/Disney-VideoGeneration-Dataset/videos/8520a081252604e3c478f76f4dddddf8.mp4", "caption": "Two cartoon characters stand on a monochromatic ship deck, a simplistic setting reminiscent of early animated films. One operates a winch, propelling the other into the air, who then lands back on the deck, looking disoriented. The winch operator looks on, possibly reacting to the outcome. The scene abruptly shifts to a blank screen, suggesting a narrative shift, comedic pause, or interlude. The characters and their environment are bathed in a soft, muted light, emphasizing the whimsical nature of the animation."} +{"video_path": "/mnt/workspace/checkpoints/Wild-Heart/Disney-VideoGeneration-Dataset/videos/46f4eee0864dd89c9225367d826a657f.mp4", "caption": "A sailor-clad animated character stands on a ship's deck, gripping a large wooden wheel mounted on a post to the left. The calm seas and distant land visible through a window behind the character set a tranquil maritime backdrop. The character's expressions change from focused determination to relaxed contemplation, surprise, and exasperation as they interact with the wheel, maintaining contact throughout. The scene captures a moment of dynamic activity within a static nautical environment, highlighting the character's reactions and efforts."} +{"video_path": "/mnt/workspace/checkpoints/Wild-Heart/Disney-VideoGeneration-Dataset/videos/d3af95bf8f9cce29e5c99e839e630c59.mp4", "caption": "A black-and-white animated sequence unfolds in a kitchen setting, centered around Mickey Mouse interacting with a trash can. Initially, Mickey holds an object over the trash can, then proceeds to insert his hand inside it. His expressions shift from focused to contented, suggesting a successful completion of the task. The background remains consistent, featuring hanging pots and pans, as Mickey's actions evolve throughout the sequence, maintaining a steady focus on his interactions with the trash can in a seamless and whimsical animation."} +{"video_path": "/mnt/workspace/checkpoints/Wild-Heart/Disney-VideoGeneration-Dataset/videos/4010c114ce5fb6a86d2327e711669813.mp4", "caption": "A cartoon Mickey Mouse character stands next to a wooden 'POTATO BIN' crate, holding a large knife. The mouse cuts into the bin, drops the knife, and falls backward in surprise. It struggles to get up but remains on the ground, looking bewildered. After a moment, the mouse sits upright, regains its composure, and holds the knife again. The scene takes place in a confined space with a plain wall, a bucket, and the potato bin, capturing the mouse's narrative arc from preparation to recovery."} +{"video_path": "/mnt/workspace/checkpoints/Wild-Heart/Disney-VideoGeneration-Dataset/videos/8adfde998361b1d7c6f38a35481667fd.mp4", "caption": "A domestic scene unfolds in a well-equipped kitchen, where an animated character is seen preparing for various activities. The character stands beside a barrel, then moves towards a wooden chair, interacting with objects around them. Next, they sit at a table, resting or reading a piece of paper. The kitchen setting remains consistent, with hanging utensils and a consistent background, as the character goes about their daily life, engaging in tasks such as cooking, tidying, or relaxation."} +{"video_path": "/mnt/workspace/checkpoints/Wild-Heart/Disney-VideoGeneration-Dataset/videos/12e51adf1acbf7acbb703a96a464a39b.mp4", "caption": "Mickey Mouse, with his distinctive round ears and black gloves, crouches attentively beside a pig character lying on the ground. The pig undergoes subtle movements, possibly preparing for action, while Mickey's posture and expression change as they interact. Mickey's mouth opens in surprise, and his body language suggests engagement. The pig raises its head or moves its limbs, indicating alertness or response. The background remains unchanged, with consistent lighting and shading highlighting the characters against a plain backdrop, capturing a series of moments of communication and reaction between the two."} +{"video_path": "/mnt/workspace/checkpoints/Wild-Heart/Disney-VideoGeneration-Dataset/videos/d3d4ce6894178d7664effcc4eef1945b.mp4", "caption": "A cartoon kitchen scene unfolds with two anthropomorphic characters. The first, wearing gloves, stands next to an open trash can, ready to throw something away. It bends over, holds the can's edge, and throws the object in. The second character, with large ears and a round body, enters, initially neutral-faced, but becomes startled as it approaches. It falls into the trash can, leaving only its backside visible, in a comedic exchange amidst a domestic kitchen setting with hanging utensils on the wall."} +{"video_path": "/mnt/workspace/checkpoints/Wild-Heart/Disney-VideoGeneration-Dataset/videos/11982a10fc2ba10d295d23054f7f68c8.mp4", "caption": "A black and white character with a hat kneels beside a steering wheel, intensely gripping it, while a smaller character resembling Mickey Mouse stands nearby, watching with concern. The scene remains static, with the characters' expressions changing as they interact. The smaller character raises its arms in surprise, prompting the larger character to defend or express itself. The larger character leans forward, determined or aggressive, while the smaller character looks on, caught off guard. The fixed camera captures the characters within a maritime setting, showcasing their evolving emotions."} +{"video_path": "/mnt/workspace/checkpoints/Wild-Heart/Disney-VideoGeneration-Dataset/videos/6049124903a8de3fa164bbb5bb2f586e.mp4", "caption": "A black-and-white animated short film unfolds in a kitchen setting, featuring a character reminiscent of a well-known mouse cartoonist. The character performs a lively song, swinging a pot as a makeshift microphone, before transitioning to a domestic activity near a trash can. The background remains consistent, showcasing a stylized kitchen with barrel, utensils, and household items. The animation's monochromatic scheme highlights shapes and movements, capturing a series of dynamic moments that blend performance, domestic life, and playful engagement within the kitchen space."} +{"video_path": "/mnt/workspace/checkpoints/Wild-Heart/Disney-VideoGeneration-Dataset/videos/f0f570e5bc69d03af276a06a96cde800.mp4", "caption": "A black and white animated sailor stands on the deck of a ship, initially relaxed with one hand on the wheel and the other raised to their face. The sailor's body language becomes increasingly dynamic, with expressive gestures and movements, suggesting a change in action or emotion. The character raises both arms in an emphatic gesture, amidst a consistent maritime background, emphasizing their expressions within the static setting."} +{"video_path": "/mnt/workspace/checkpoints/Wild-Heart/Disney-VideoGeneration-Dataset/videos/ceb2998f50feefef1844032e9cd19739.mp4", "caption": "Two animated characters interact in a domestic kitchen setting, near a labeled 'POTATO BIN' filled with scattered items. One character stands by an open door, while the other sits on the floor. The seated character stands up and moves towards the bin, prompting the standing character to retrieve a large potato. The potato is released back into the bin, causing both characters to react with surprise and confusion, displaying astonishment and bewilderment through their facial expressions and body language."} +{"video_path": "/mnt/workspace/checkpoints/Wild-Heart/Disney-VideoGeneration-Dataset/videos/58b88d44575e945cd7dcd11b3aac6ff0.mp4", "caption": "Two animated characters, one with large ears and expressive eyes, interact in a simple, monochromatic room. The larger character, with a jovial demeanor, initially boasts, but then shows concern. The scene shifts to a storage area with a potato bin and a bucket. The larger character engages in an activity while the smaller character looks on in confusion. The interaction escalates into a confrontation, with the larger character extending its arm, startling the smaller character, who recoils in surprise, arms flailing."} +{"video_path": "/mnt/workspace/checkpoints/Wild-Heart/Disney-VideoGeneration-Dataset/videos/4461a0e8ddd86d615ccf73ef0ff0705b.mp4", "caption": "A black-and-white animated sequence of a cat's facial expressions unfolds, starting with calm, closed eyes. The cat's face transitions through surprise, focus, raised eyebrows, and a wide-open mouth, suggesting shouting or spitting. The intensity escalates, with motion lines around the mouth area, before softening slightly. The cat returns to a neutral state, with a straight mouth and relaxed eyes, as if shifting from intense activity to calmness. The background and camera remain static, focusing the viewer's attention on the evolving emotions and actions of the cat character."} +{"video_path": "/mnt/workspace/checkpoints/Wild-Heart/Disney-VideoGeneration-Dataset/videos/2325e5f8e287753e50e47ab2fc2e8241.mp4", "caption": "A serene black and white cartoon landscape unfolds, featuring a riverbank where a dog stands on a table beside a stack of books, gazing towards the left. A steamboat appears on the right bank, moving from right to left, emitting dark smoke from its smokestacks. The dog remains stationary as the boat approaches, tilts, and eventually moves ashore, creating waves. The environment remains calm, with a fixed camera angle, showcasing the dog's stoic demeanor amidst the steamboat's actions in a tranquil, unchanging setting."} +{"video_path": "/mnt/workspace/checkpoints/Wild-Heart/Disney-VideoGeneration-Dataset/videos/d8a33062ef6a446c168acb06bf45b77d.mp4", "caption": "A monochromatic scene from an early 20th-century animated film unfolds with Mickey Mouse struggling to pull a heavily loaded wagon labeled 'HAY.' As the wagon's contents spill out, a bull-like animal tethered to it becomes agitated, its mouth open in surprise. Mickey Mouse stands nearby, concerned or curious about the bull's state. The background features a simplistic landscape, emphasizing the characters' interactions without distractions, capturing a narrative of effort, chaos, and reaction."} +{"video_path": "/mnt/workspace/checkpoints/Wild-Heart/Disney-VideoGeneration-Dataset/videos/0288f3d69c08e816d81b014da620db49.mp4", "caption": "A black and white animated scene unfolds, featuring a bulldog in overalls and a hat, standing on a ship's deck. The bulldog assumes various poses, then walks towards a dockside with two ducks and a cow. A wooden platform reads 'PODUNK LANDING,' while a building marked 'BOAT TICKETS' and scattered barrels hint at a destination. The bulldog and ducks move purposefully, possibly heading towards a food stand or boating services, amidst a monochromatic backdrop with no noticeable changes in environment or lighting."} +{"video_path": "/mnt/workspace/checkpoints/Wild-Heart/Disney-VideoGeneration-Dataset/videos/560c6472660330638c2809d823d59be3.mp4", "caption": "A kitchen scene unfolds with Mickey Mouse-like character on the left, startled, while a female mouse character, wearing a hat, holds a gun to her head. Above, a 'KITCHEN' sign is visible. The scene shifts to a black goat character standing alone against a plain background, initially facing away, then turning with a content smile and raised hoof near a guitar. The scene returns to the kitchen, with both mice interacting near the goat, looking surprised or curious about the situation."} +{"video_path": "/mnt/workspace/checkpoints/Wild-Heart/Disney-VideoGeneration-Dataset/videos/bbf1174e69f404009bba630332c8cbdb.mp4", "caption": "A black-and-white animated sequence features a stationary, large bovine creature with its mouth open, as if chewing or reacting. A small mouse with a lollipop stands before it, initially calm but gradually showing signs of distress. The mouse's anxiety grows, eventually recoiling sharply and falling backward. The scene remains static, focusing on the interaction between the two characters. The progression from calm to fear highlights a sudden shift in their relationship or situation, with the large creature's presence now a source of concern."} +{"video_path": "/mnt/workspace/checkpoints/Wild-Heart/Disney-VideoGeneration-Dataset/videos/c789b44119c14c5732e835cae89bf7ac.mp4", "caption": "A black and white animated kitchen scene unfolds with Mickey Mouse standing next to a barrel, surrounded by hanging kitchen utensils. A curious cat enters, approaching the ladder and interacting with the trash can. The cat's movements suggest intent, while Mickey remains stationary, observing without change. The background kitchen elements remain consistent, providing a stable backdrop for the characters' interactions. The scene is static, with minimal camera movement, focusing attention on the unfolding activity between the two characters in this confined domestic space."} +{"video_path": "/mnt/workspace/checkpoints/Wild-Heart/Disney-VideoGeneration-Dataset/videos/383cb4b496d17695554655f3ec79c587.mp4", "caption": "A black-and-white animated scene features Mickey Mouse, dressed as a sailor, standing on a ship's deck, ready for an adventure. The background shows a vast open sea under a cloudy sky. Three cylindrical structures, with conical caps, stand side by side against a cloudy backdrop. The central cylinder rises, descends, and breaks apart into fragments, scattering pieces across the platform, while the other two structures remain undisturbed, creating a narrative centered on the central object's unexpected destruction."} +{"video_path": "/mnt/workspace/checkpoints/Wild-Heart/Disney-VideoGeneration-Dataset/videos/f6a95cd4397b6a102e82becdcd05d585.mp4", "caption": "A black screen fades to a grayscale depiction of a lighthouse at night, with the silhouette of the structure standing out against a dark background. The scene shifts to a detailed, monochromatic steamboat, with smoke billowing from its stack and various mechanical details. The perspective changes to show the boat in a broader landscape, with hills, vegetation, and a nostalgic atmosphere. The camera returns to the steamboat, emphasizing its intricate design and mechanical components, with a focus on shapes, lines, and contrasts."} +{"video_path": "/mnt/workspace/checkpoints/Wild-Heart/Disney-VideoGeneration-Dataset/videos/72306312252707a5ecf83a8eeed9b55a.mp4", "caption": "A calm interior of a ship is the backdrop for a poignant exchange between two animated characters. One, seated on a bench, wears a hat and suspenders, while the other, smaller figure, stands attentively before them. The seated character's demeanor shifts from relaxed to intense, with narrowed eyes and open mouth, as they engage in a stern conversation. The smaller figure remains stationary, maintaining a neutral gaze. Maritime elements like a life preserver and bell adorn the background, reinforcing the nautical setting."} +{"video_path": "/mnt/workspace/checkpoints/Wild-Heart/Disney-VideoGeneration-Dataset/videos/b65565e3b61c97b3fa0643c6f45dc4de.mp4", "caption": "A black and white cartoon character stands on a ship's deck, gripping the wheel and railing as they gaze out at the open ocean. The character's expression changes from engagement to alarm, then to distress, before finally regaining composure. The ship's interior remains steady, with the steering wheel and railings as constant elements. The camera remains fixed, capturing the character's evolving emotional state against the backdrop of the maritime setting, with the horizon visible in the background."} +{"video_path": "/mnt/workspace/checkpoints/Wild-Heart/Disney-VideoGeneration-Dataset/videos/894905168d8da4f4b0862e7b0f756e44.mp4", "caption": "A black and white animated scene unfolds, featuring a goat with large horns standing next to a newspaper titled 'Journey in the Straw,' marked with musical notes and words like 'HEY HEY!' The goat initially appears startled, then plays along with the music, closing its eyes and moving its head rhythmically. The scene transitions to a 'KITCHEN' where a mouse in a chef's hat and another character engage in a kitchen activity, possibly cooking or preparing food, with the focus on their expressions and actions within their environments."} +{"video_path": "/mnt/workspace/checkpoints/Wild-Heart/Disney-VideoGeneration-Dataset/videos/485b43aa4524327f3c7a40d28e1cf7bc.mp4", "caption": "A black and white animated cat character is seen in various expressive states, transitioning from contentment, with eyes closed and a relaxed smile, to surprise, with a wide-open mouth. The cat then exhibits distress, gripping its stomach, followed by a brief respite and a sense of resignation or contemplation, with droopy eyes and a half-closed mouth. The dark, nondescript background emphasizes the cat's evolving expressions and body language, conveying a narrative arc of emotions."} +{"video_path": "/mnt/workspace/checkpoints/Wild-Heart/Disney-VideoGeneration-Dataset/videos/38963e6acf01fdcc9b647ec9d63edc9a.mp4", "caption": "A cartoon version of Mickey Mouse is seen in a cozy kitchen setting, surrounded by hanging pots and utensils. The character initially bends to interact with a wooden crate on the floor, then pulls out a pot and places it on the stove. Mickey manipulates a ladle above the pot, stirring vigorously, before raising the ladle in a dynamic pose, indicating completion of their task. The background remains consistent, emphasizing the domestic environment, as Mickey's actions progress from preparation to execution."} +{"video_path": "/mnt/workspace/checkpoints/Wild-Heart/Disney-VideoGeneration-Dataset/videos/fca1d53370805cf052bed822e2d20604.mp4", "caption": "A black and white animated kitchen scene unfolds with two characters: one hit on the back of the head with a frying pan, causing musical notes to scatter. The hit character stands upright, while the other lies on the ground, indicating a comedic interaction. The hit character then appears distressed, but soon adopts a mischievous grin, lifting a heavy basket of bread in preparation for a prank, before realizing the potential consequences and freezing in surprise."} diff --git a/exp_code/1_benchmark/musubi-tuner/src/caption_video_test.json b/exp_code/1_benchmark/musubi-tuner/src/caption_video_test.json new file mode 100644 index 0000000000000000000000000000000000000000..04635e98e57d3f849be300088cdfc7b25fcb0145 --- /dev/null +++ b/exp_code/1_benchmark/musubi-tuner/src/caption_video_test.json @@ -0,0 +1 @@ +{"video_path": "/mnt/workspace/checkpoints/Wild-Heart/Disney-VideoGeneration-Dataset/videos/a3c275fc2eb0a67168a7c58a6a9adb14.mp4", "caption": "A black and white animated scene unfolds with an anthropomorphic goat surrounded by musical notes and symbols, suggesting a playful environment. Mickey Mouse appears, leaning forward in curiosity as the goat remains still. The goat then engages with Mickey, who bends down to converse or react. The dynamics shift as Mickey grabs the goat, potentially in surprise or playfulness, amidst a minimalistic background. The scene captures the evolving relationship between the two characters in a whimsical, animated setting, emphasizing their interactions and emotions."} diff --git a/exp_code/1_benchmark/musubi-tuner/src/convert_json.py b/exp_code/1_benchmark/musubi-tuner/src/convert_json.py new file mode 100644 index 0000000000000000000000000000000000000000..ffd54726df74a1523edae5d7e1f6eaa735aa7a28 --- /dev/null +++ b/exp_code/1_benchmark/musubi-tuner/src/convert_json.py @@ -0,0 +1,29 @@ +import json + +def create_json_dataset(prompt_file, video_file, output_file): + with open(prompt_file, 'r', encoding='utf-8') as f_prompt, \ + open(video_file, 'r', encoding='utf-8') as f_video, \ + open(output_file, 'w', encoding='utf-8') as f_out: + + for caption, video_path in zip(f_prompt, f_video): + # 去除两端的空白字符和换行符 + caption = caption.strip() + video_path = video_path.strip() + + # 创建字典并转换为JSON字符串 + data = { + "video_path": video_path, + "caption": caption + } + json_str = json.dumps(data) + + # 写入输出文件 + f_out.write(json_str + '\n') + +# 使用示例 +prompt_file = '/mnt/workspace/checkpoints/Wild-Heart/Disney-VideoGeneration-Dataset/prompt.txt' +video_file = '/mnt/workspace/checkpoints/Wild-Heart/Disney-VideoGeneration-Dataset/videos.txt' +output_file = 'caption_video.json' + +create_json_dataset(prompt_file, video_file, output_file) +print(f"JSON文件已生成: {output_file}") \ No newline at end of file diff --git a/exp_code/1_benchmark/musubi-tuner/src/dataset_config.toml b/exp_code/1_benchmark/musubi-tuner/src/dataset_config.toml new file mode 100644 index 0000000000000000000000000000000000000000..a481bd4dc08cf92023e581d2be89f5415ebb739d --- /dev/null +++ b/exp_code/1_benchmark/musubi-tuner/src/dataset_config.toml @@ -0,0 +1,14 @@ +# general configurations +[general] +resolution = [832, 480] # optional, [W, H], default is [960, 544]. This is the default resolution for all datasets +batch_size = 1 # optional, default is 1. This is the default batch size for all datasets +num_repeats = 1 # optional, default is 1. Number of times to repeat the dataset. Useful to balance the multiple datasets with different sizes. +enable_bucket = true # optional, default is false. Enable bucketing for datasets +bucket_no_upscale = false # optional, default is false. Disable upscaling for bucketing. Ignored if enable_bucket is false + +[[datasets]] +video_jsonl_file = "/mnt/workspace/ysh/Code/Efficient_Model/2_code/musubi-tuner/src/caption_video_test.json" +cache_directory = "/mnt/workspace/ysh/Code/Efficient_Model/2_code/musubi-tuner/demo_output/cache" +frame_extraction = "full" +max_frames = 73 +source_fps = 30.0 \ No newline at end of file diff --git a/exp_code/1_benchmark/musubi-tuner/src/get_video_info.py b/exp_code/1_benchmark/musubi-tuner/src/get_video_info.py new file mode 100644 index 0000000000000000000000000000000000000000..386656c7cad2ee22202bf27acc82a0e1edaac32e --- /dev/null +++ b/exp_code/1_benchmark/musubi-tuner/src/get_video_info.py @@ -0,0 +1,33 @@ +import cv2 + +def get_video_info(video_path): + # 打开视频文件 + cap = cv2.VideoCapture(video_path) + + if not cap.isOpened(): + print("无法打开视频文件") + return + + # 获取视频信息 + width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) + height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) + fps = cap.get(cv2.CAP_PROP_FPS) + frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) + + # 打印视频信息 + print(f"视频路径: {video_path}") + print(f"宽度: {width} 像素") + print(f"高度: {height} 像素") + print(f"帧率(FPS): {fps}") + print(f"总帧数: {frame_count}") + + # 计算视频时长(秒) + duration = frame_count / fps + print(f"视频时长: {duration:.2f} 秒") + + # 释放视频对象 + cap.release() + +# 使用示例 +video_path = "/mnt/workspace/ysh/Code/Efficient_Model/2_code/musubi-tuner/src/a3c275fc2eb0a67168a7c58a6a9adb14.mp4" +get_video_info(video_path) \ No newline at end of file diff --git a/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/__init__.py b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/__pycache__/__init__.cpython-312.pyc b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5dcf2adbdb81a45c4c2fb2b2825dc3d13103bb1c Binary files /dev/null and b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/__pycache__/__init__.cpython-312.pyc differ diff --git a/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/__pycache__/cache_latents.cpython-312.pyc b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/__pycache__/cache_latents.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6d7ae879b3512935a64668f6208bbd2cbd47403f Binary files /dev/null and b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/__pycache__/cache_latents.cpython-312.pyc differ diff --git a/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/__pycache__/cache_text_encoder_outputs.cpython-312.pyc b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/__pycache__/cache_text_encoder_outputs.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..efaab0ab87d5ebd4c8ecfaeae90292cd9d75791e Binary files /dev/null and b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/__pycache__/cache_text_encoder_outputs.cpython-312.pyc differ diff --git a/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/__pycache__/fpack_generate_video.cpython-312.pyc b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/__pycache__/fpack_generate_video.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fa3c79b4216a670939a28ef66523df6df5fef091 Binary files /dev/null and b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/__pycache__/fpack_generate_video.cpython-312.pyc differ diff --git a/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/__pycache__/hv_generate_video.cpython-312.pyc b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/__pycache__/hv_generate_video.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..941ca3183d7cb9d262ab0b95b0abdac8952e21fd Binary files /dev/null and b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/__pycache__/hv_generate_video.cpython-312.pyc differ diff --git a/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/__pycache__/wan_generate_video.cpython-312.pyc b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/__pycache__/wan_generate_video.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..326d12956082e317af6f139c748c98aab69585e2 Binary files /dev/null and b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/__pycache__/wan_generate_video.cpython-312.pyc differ diff --git a/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/cache_latents.py b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/cache_latents.py new file mode 100644 index 0000000000000000000000000000000000000000..50c500d9e98f48433a4332f0ef4ac33c846b5f2c --- /dev/null +++ b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/cache_latents.py @@ -0,0 +1,387 @@ +import argparse +import os +import glob +from typing import Optional, Union + +import numpy as np +import torch +from tqdm import tqdm + +from musubi_tuner.dataset import config_utils +from musubi_tuner.dataset.config_utils import BlueprintGenerator, ConfigSanitizer +from PIL import Image + +import logging + +from musubi_tuner.dataset.image_video_dataset import BaseDataset, ItemInfo, save_latent_cache, ARCHITECTURE_HUNYUAN_VIDEO +from musubi_tuner.hunyuan_model.vae import load_vae +from musubi_tuner.hunyuan_model.autoencoder_kl_causal_3d import AutoencoderKLCausal3D +from musubi_tuner.utils.model_utils import str_to_dtype + +logger = logging.getLogger(__name__) +logging.basicConfig(level=logging.INFO) + + +def show_image(image: Union[list[Union[Image.Image, np.ndarray], Union[Image.Image, np.ndarray]]]) -> int: + import cv2 + + imgs = ( + [image] + if (isinstance(image, np.ndarray) and len(image.shape) == 3) or isinstance(image, Image.Image) + else [image[0], image[-1]] + ) + if len(imgs) > 1: + print(f"Number of images: {len(image)}") + for i, img in enumerate(imgs): + if len(imgs) > 1: + print(f"{'First' if i == 0 else 'Last'} image: {img.shape}") + else: + print(f"Image: {img.shape}") + cv2_img = np.array(img) if isinstance(img, Image.Image) else img + cv2_img = cv2.cvtColor(cv2_img, cv2.COLOR_RGB2BGR) + cv2.imshow("image", cv2_img) + k = cv2.waitKey(0) + cv2.destroyAllWindows() + if k == ord("q") or k == ord("d"): + return k + return k + + +def show_console( + image: Union[list[Union[Image.Image, np.ndarray], Union[Image.Image, np.ndarray]]], + width: int, + back: str, + interactive: bool = False, +) -> int: + from ascii_magic import from_pillow_image, Back + + back = None + if back is not None: + back = getattr(Back, back.upper()) + + k = None + imgs = ( + [image] + if (isinstance(image, np.ndarray) and len(image.shape) == 3) or isinstance(image, Image.Image) + else [image[0], image[-1]] + ) + if len(imgs) > 1: + print(f"Number of images: {len(image)}") + for i, img in enumerate(imgs): + if len(imgs) > 1: + print(f"{'First' if i == 0 else 'Last'} image: {img.shape}") + else: + print(f"Image: {img.shape}") + pil_img = img if isinstance(img, Image.Image) else Image.fromarray(img) + ascii_img = from_pillow_image(pil_img) + ascii_img.to_terminal(columns=width, back=back) + + if interactive: + k = input("Press q to quit, d to next dataset, other key to next: ") + if k == "q" or k == "d": + return ord(k) + + if not interactive: + return ord(" ") + return ord(k) if k else ord(" ") + + +def save_video(image: Union[list[Union[Image.Image, np.ndarray], Union[Image.Image, np.ndarray]]], cache_path: str, fps: int = 24): + import av + + directory = os.path.dirname(cache_path) + if not os.path.exists(directory): + os.makedirs(directory) + + if (isinstance(image, np.ndarray) and len(image.shape) == 3) or isinstance(image, Image.Image): + # save image + image_path = cache_path.replace(".safetensors", ".jpg") + img = image if isinstance(image, Image.Image) else Image.fromarray(image) + img.save(image_path) + print(f"Saved image: {image_path}") + else: + imgs = image + print(f"Number of images: {len(imgs)}") + # save video + video_path = cache_path.replace(".safetensors", ".mp4") + height, width = imgs[0].shape[0:2] + + # create output container + container = av.open(video_path, mode="w") + + # create video stream + codec = "libx264" + pixel_format = "yuv420p" + stream = container.add_stream(codec, rate=fps) + stream.width = width + stream.height = height + stream.pix_fmt = pixel_format + stream.bit_rate = 1000000 # 1Mbit/s for preview quality + + for frame_img in imgs: + if isinstance(frame_img, Image.Image): + frame = av.VideoFrame.from_image(frame_img) + else: + frame = av.VideoFrame.from_ndarray(frame_img, format="rgb24") + packets = stream.encode(frame) + for packet in packets: + container.mux(packet) + + for packet in stream.encode(): + container.mux(packet) + + container.close() + + print(f"Saved video: {video_path}") + + +def show_datasets( + datasets: list[BaseDataset], + debug_mode: str, + console_width: int, + console_back: str, + console_num_images: Optional[int], + fps: int = 24, +): + if debug_mode != "video": + print(f"d: next dataset, q: quit") + + num_workers = max(1, os.cpu_count() - 1) + for i, dataset in enumerate(datasets): + print(f"Dataset [{i}]") + batch_index = 0 + num_images_to_show = console_num_images + k = None + for key, batch in dataset.retrieve_latent_cache_batches(num_workers): + print(f"bucket resolution: {key}, count: {len(batch)}") + for j, item_info in enumerate(batch): + item_info: ItemInfo + print(f"{batch_index}-{j}: {item_info}") + if debug_mode == "image": + k = show_image(item_info.content) + elif debug_mode == "console": + k = show_console(item_info.content, console_width, console_back, console_num_images is None) + if num_images_to_show is not None: + num_images_to_show -= 1 + if num_images_to_show == 0: + k = ord("d") # next dataset + elif debug_mode == "video": + save_video(item_info.content, item_info.latent_cache_path, fps) + k = None # save next video + + if k == ord("q"): + return + elif k == ord("d"): + break + if k == ord("d"): + break + batch_index += 1 + + +def preprocess_contents(batch: list[ItemInfo]) -> tuple[int, int, torch.Tensor]: + # item.content: target image (H, W, C) + # item.control_content: list of images (H, W, C) + + # Stack batch into tensor (B,F,H,W,C) in RGB order. The numbers of control content for each item are the same. + contents = [] + content_masks: list[list[Optional[torch.Tensor]]] = [] + for item in batch: + item_contents = item.control_content + [item.content] + + item_masks = [] + for i, c in enumerate(item_contents): + if c.shape[-1] == 4: # RGBA + item_contents[i] = c[..., :3] # remove alpha channel from content + + alpha = c[..., 3] # extract alpha channel + mask_image = Image.fromarray(alpha, mode="L") + width, height = mask_image.size + mask_image = mask_image.resize((width // 8, height // 8), Image.LANCZOS) + mask_image = np.array(mask_image) # PIL to numpy, HWC + mask_image = torch.from_numpy(mask_image).float() / 255.0 # 0 to 1.0, HWC + mask_image = mask_image.squeeze(-1) # HWC -> HW + mask_image = mask_image.unsqueeze(0).unsqueeze(0).unsqueeze(0) # HW -> 111HW (BCFHW) + mask_image = mask_image.to(torch.float32) + content_mask = mask_image + else: + content_mask = None + + item_masks.append(content_mask) + + item_contents = [torch.from_numpy(c) for c in item_contents] + contents.append(torch.stack(item_contents, dim=0)) # list of [F, H, W, C] + content_masks.append(item_masks) + + contents = torch.stack(contents, dim=0) # B, F, H, W, C. F is control frames + target frame + + contents = contents.permute(0, 4, 1, 2, 3).contiguous() # B, C, F, H, W + contents = contents / 127.5 - 1.0 # normalize to [-1, 1] + + height, width = contents.shape[-2], contents.shape[-1] + if height < 8 or width < 8: + item = batch[0] # other items should have the same size + raise ValueError(f"Image or video size too small: {item.item_key} and {len(batch) - 1} more, size: {item.original_size}") + + return height, width, contents, content_masks + + +def encode_and_save_batch(vae: AutoencoderKLCausal3D, batch: list[ItemInfo]): + contents = torch.stack([torch.from_numpy(item.content) for item in batch]) + if len(contents.shape) == 4: + contents = contents.unsqueeze(1) # B, H, W, C -> B, F, H, W, C + + contents = contents.permute(0, 4, 1, 2, 3).contiguous() # B, C, F, H, W + contents = contents.to(vae.device, dtype=vae.dtype) + contents = contents / 127.5 - 1.0 # normalize to [-1, 1] + + h, w = contents.shape[3], contents.shape[4] + if h < 8 or w < 8: + item = batch[0] # other items should have the same size + raise ValueError(f"Image or video size too small: {item.item_key} and {len(batch) - 1} more, size: {item.original_size}") + + # print(f"encode batch: {contents.shape}") + with torch.no_grad(): + latent = vae.encode(contents).latent_dist.sample() + # latent = latent * vae.config.scaling_factor + + # # debug: decode and save + # with torch.no_grad(): + # latent_to_decode = latent / vae.config.scaling_factor + # images = vae.decode(latent_to_decode, return_dict=False)[0] + # images = (images / 2 + 0.5).clamp(0, 1) + # images = images.cpu().float().numpy() + # images = (images * 255).astype(np.uint8) + # images = images.transpose(0, 2, 3, 4, 1) # B, C, F, H, W -> B, F, H, W, C + # for b in range(images.shape[0]): + # for f in range(images.shape[1]): + # fln = os.path.splitext(os.path.basename(batch[b].item_key))[0] + # img = Image.fromarray(images[b, f]) + # img.save(f"./logs/decode_{fln}_{b}_{f:03d}.jpg") + + for item, l in zip(batch, latent): + # print(f"save latent cache: {item.latent_cache_path}, latent shape: {l.shape}") + save_latent_cache(item, l) + + +def encode_datasets(datasets: list[BaseDataset], encode: callable, args: argparse.Namespace): + num_workers = args.num_workers if args.num_workers is not None else max(1, os.cpu_count() - 1) + for i, dataset in enumerate(datasets): + logger.info(f"Encoding dataset [{i}]") + all_latent_cache_paths = [] + for _, batch in tqdm(dataset.retrieve_latent_cache_batches(num_workers)): + all_latent_cache_paths.extend([item.latent_cache_path for item in batch]) + + if args.skip_existing: + filtered_batch = [item for item in batch if not os.path.exists(item.latent_cache_path)] + if len(filtered_batch) == 0: + continue + batch = filtered_batch + + bs = args.batch_size if args.batch_size is not None else len(batch) + for i in range(0, len(batch), bs): + encode(batch[i : i + bs]) + + # normalize paths + all_latent_cache_paths = [os.path.normpath(p) for p in all_latent_cache_paths] + all_latent_cache_paths = set(all_latent_cache_paths) + + # remove old cache files not in the dataset + all_cache_files = dataset.get_all_latent_cache_files() + for cache_file in all_cache_files: + if os.path.normpath(cache_file) not in all_latent_cache_paths: + if args.keep_cache: + logger.info(f"Keep cache file not in the dataset: {cache_file}") + else: + os.remove(cache_file) + logger.info(f"Removed old cache file: {cache_file}") + + +def main(): + parser = setup_parser_common() + parser = hv_setup_parser(parser) + + args = parser.parse_args() + + device = args.device if args.device is not None else "cuda" if torch.cuda.is_available() else "cpu" + device = torch.device(device) + + # Load dataset config + blueprint_generator = BlueprintGenerator(ConfigSanitizer()) + logger.info(f"Load dataset config from {args.dataset_config}") + user_config = config_utils.load_user_config(args.dataset_config) + blueprint = blueprint_generator.generate(user_config, args, architecture=ARCHITECTURE_HUNYUAN_VIDEO) + train_dataset_group = config_utils.generate_dataset_group_by_blueprint(blueprint.dataset_group) + + datasets = train_dataset_group.datasets + + if args.debug_mode is not None: + show_datasets(datasets, args.debug_mode, args.console_width, args.console_back, args.console_num_images) + return + + assert args.vae is not None, "vae checkpoint is required" + + # Load VAE model: HunyuanVideo VAE model is float16 + vae_dtype = torch.float16 if args.vae_dtype is None else str_to_dtype(args.vae_dtype) + vae, _, s_ratio, t_ratio = load_vae(vae_dtype=vae_dtype, device=device, vae_path=args.vae) + vae.eval() + logger.info(f"Loaded VAE: {vae.config}, dtype: {vae.dtype}") + + if args.vae_chunk_size is not None: + vae.set_chunk_size_for_causal_conv_3d(args.vae_chunk_size) + logger.info(f"Set chunk_size to {args.vae_chunk_size} for CausalConv3d in VAE") + if args.vae_spatial_tile_sample_min_size is not None: + vae.enable_spatial_tiling(True) + vae.tile_sample_min_size = args.vae_spatial_tile_sample_min_size + vae.tile_latent_min_size = args.vae_spatial_tile_sample_min_size // 8 + elif args.vae_tiling: + vae.enable_spatial_tiling(True) + + # Encode images + def encode(one_batch: list[ItemInfo]): + encode_and_save_batch(vae, one_batch) + + encode_datasets(datasets, encode, args) + + +def setup_parser_common() -> argparse.ArgumentParser: + parser = argparse.ArgumentParser() + + parser.add_argument("--dataset_config", type=str, required=True, help="path to dataset config .toml file") + parser.add_argument("--vae", type=str, required=False, default=None, help="path to vae checkpoint") + parser.add_argument("--vae_dtype", type=str, default=None, help="data type for VAE, default is float16") + parser.add_argument("--device", type=str, default=None, help="device to use, default is cuda if available") + parser.add_argument( + "--batch_size", type=int, default=None, help="batch size, override dataset config if dataset batch size > this" + ) + parser.add_argument("--num_workers", type=int, default=None, help="number of workers for dataset. default is cpu count-1") + parser.add_argument("--skip_existing", action="store_true", help="skip existing cache files") + parser.add_argument("--keep_cache", action="store_true", help="keep cache files not in dataset") + parser.add_argument("--debug_mode", type=str, default=None, choices=["image", "console", "video"], help="debug mode") + parser.add_argument("--console_width", type=int, default=80, help="debug mode: console width") + parser.add_argument( + "--console_back", type=str, default=None, help="debug mode: console background color, one of ascii_magic.Back" + ) + parser.add_argument( + "--console_num_images", + type=int, + default=None, + help="debug mode: not interactive, number of images to show for each dataset", + ) + return parser + + +def hv_setup_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: + parser.add_argument( + "--vae_tiling", + action="store_true", + help="enable spatial tiling for VAE, default is False. If vae_spatial_tile_sample_min_size is set, this is automatically enabled", + ) + parser.add_argument("--vae_chunk_size", type=int, default=None, help="chunk size for CausalConv3d in VAE") + parser.add_argument( + "--vae_spatial_tile_sample_min_size", type=int, default=None, help="spatial tile sample min size for VAE, default 256" + ) + return parser + + +if __name__ == "__main__": + main() diff --git a/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/cache_text_encoder_outputs.py b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/cache_text_encoder_outputs.py new file mode 100644 index 0000000000000000000000000000000000000000..1117c3b14d7291dc1605c9037a6c844f7d107585 --- /dev/null +++ b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/cache_text_encoder_outputs.py @@ -0,0 +1,215 @@ +import argparse +import os +from typing import Optional, Union + +import numpy as np +import torch +from tqdm import tqdm + +from musubi_tuner.dataset import config_utils +from musubi_tuner.dataset.config_utils import BlueprintGenerator, ConfigSanitizer +import accelerate + +from musubi_tuner.dataset.image_video_dataset import ARCHITECTURE_HUNYUAN_VIDEO, BaseDataset, ItemInfo, save_text_encoder_output_cache +from musubi_tuner.hunyuan_model import text_encoder as text_encoder_module +from musubi_tuner.hunyuan_model.text_encoder import TextEncoder + +import logging + +from musubi_tuner.utils.model_utils import str_to_dtype + +logger = logging.getLogger(__name__) +logging.basicConfig(level=logging.INFO) + + +def encode_prompt(text_encoder: TextEncoder, prompt: Union[str, list[str]]): + data_type = "video" # video only, image is not supported + text_inputs = text_encoder.text2tokens(prompt, data_type=data_type) + + with torch.no_grad(): + prompt_outputs = text_encoder.encode(text_inputs, data_type=data_type) + + return prompt_outputs.hidden_state, prompt_outputs.attention_mask + + +def encode_and_save_batch( + text_encoder: TextEncoder, batch: list[ItemInfo], is_llm: bool, accelerator: Optional[accelerate.Accelerator] +): + prompts = [item.caption for item in batch] + # print(prompts) + + # encode prompt + if accelerator is not None: + with accelerator.autocast(): + prompt_embeds, prompt_mask = encode_prompt(text_encoder, prompts) + else: + prompt_embeds, prompt_mask = encode_prompt(text_encoder, prompts) + + # # convert to fp16 if needed + # if prompt_embeds.dtype == torch.float32 and text_encoder.dtype != torch.float32: + # prompt_embeds = prompt_embeds.to(text_encoder.dtype) + + # save prompt cache + for item, embed, mask in zip(batch, prompt_embeds, prompt_mask): + save_text_encoder_output_cache(item, embed, mask, is_llm) + + +def prepare_cache_files_and_paths(datasets: list[BaseDataset]): + all_cache_files_for_dataset = [] # exisiting cache files + all_cache_paths_for_dataset = [] # all cache paths in the dataset + for dataset in datasets: + all_cache_files = [os.path.normpath(file) for file in dataset.get_all_text_encoder_output_cache_files()] + all_cache_files = set(all_cache_files) + all_cache_files_for_dataset.append(all_cache_files) + + all_cache_paths_for_dataset.append(set()) + return all_cache_files_for_dataset, all_cache_paths_for_dataset + + +def process_text_encoder_batches( + num_workers: Optional[int], + skip_existing: bool, + batch_size: int, + datasets: list[BaseDataset], + all_cache_files_for_dataset: list[set], + all_cache_paths_for_dataset: list[set], + encode: callable, +): + num_workers = num_workers if num_workers is not None else max(1, os.cpu_count() - 1) + for i, dataset in enumerate(datasets): + logger.info(f"Encoding dataset [{i}]") + all_cache_files = all_cache_files_for_dataset[i] + all_cache_paths = all_cache_paths_for_dataset[i] + for batch in tqdm(dataset.retrieve_text_encoder_output_cache_batches(num_workers)): + # update cache files (it's ok if we update it multiple times) + all_cache_paths.update([os.path.normpath(item.text_encoder_output_cache_path) for item in batch]) + + # skip existing cache files + if skip_existing: + filtered_batch = [ + item for item in batch if not os.path.normpath(item.text_encoder_output_cache_path) in all_cache_files + ] + # print(f"Filtered {len(batch) - len(filtered_batch)} existing cache files") + if len(filtered_batch) == 0: + continue + batch = filtered_batch + + bs = batch_size if batch_size is not None else len(batch) + for i in range(0, len(batch), bs): + encode(batch[i : i + bs]) + + +def post_process_cache_files( + datasets: list[BaseDataset], all_cache_files_for_dataset: list[set], all_cache_paths_for_dataset: list[set], keep_cache: bool +): + for i, dataset in enumerate(datasets): + all_cache_files = all_cache_files_for_dataset[i] + all_cache_paths = all_cache_paths_for_dataset[i] + for cache_file in all_cache_files: + if cache_file not in all_cache_paths: + if keep_cache: + logger.info(f"Keep cache file not in the dataset: {cache_file}") + else: + os.remove(cache_file) + logger.info(f"Removed old cache file: {cache_file}") + + +def main(): + parser = setup_parser_common() + parser = hv_setup_parser(parser) + + args = parser.parse_args() + + device = args.device if args.device is not None else "cuda" if torch.cuda.is_available() else "cpu" + device = torch.device(device) + + # Load dataset config + blueprint_generator = BlueprintGenerator(ConfigSanitizer()) + logger.info(f"Load dataset config from {args.dataset_config}") + user_config = config_utils.load_user_config(args.dataset_config) + blueprint = blueprint_generator.generate(user_config, args, architecture=ARCHITECTURE_HUNYUAN_VIDEO) + train_dataset_group = config_utils.generate_dataset_group_by_blueprint(blueprint.dataset_group) + + datasets = train_dataset_group.datasets + + # define accelerator for fp8 inference + accelerator = None + if args.fp8_llm: + accelerator = accelerate.Accelerator(mixed_precision="fp16") + + # prepare cache files and paths: all_cache_files_for_dataset = exisiting cache files, all_cache_paths_for_dataset = all cache paths in the dataset + all_cache_files_for_dataset, all_cache_paths_for_dataset = prepare_cache_files_and_paths(datasets) + + # Load Text Encoder 1 + text_encoder_dtype = torch.float16 if args.text_encoder_dtype is None else str_to_dtype(args.text_encoder_dtype) + logger.info(f"loading text encoder 1: {args.text_encoder1}") + text_encoder_1 = text_encoder_module.load_text_encoder_1(args.text_encoder1, device, args.fp8_llm, text_encoder_dtype) + text_encoder_1.to(device=device) + + # Encode with Text Encoder 1 (LLM) + logger.info("Encoding with Text Encoder 1") + + def encode_for_text_encoder_1(batch: list[ItemInfo]): + encode_and_save_batch(text_encoder_1, batch, is_llm=True, accelerator=accelerator) + + process_text_encoder_batches( + args.num_workers, + args.skip_existing, + args.batch_size, + datasets, + all_cache_files_for_dataset, + all_cache_paths_for_dataset, + encode_for_text_encoder_1, + ) + del text_encoder_1 + + # Load Text Encoder 2 + logger.info(f"loading text encoder 2: {args.text_encoder2}") + text_encoder_2 = text_encoder_module.load_text_encoder_2(args.text_encoder2, device, text_encoder_dtype) + text_encoder_2.to(device=device) + + # Encode with Text Encoder 2 + logger.info("Encoding with Text Encoder 2") + + def encode_for_text_encoder_2(batch: list[ItemInfo]): + encode_and_save_batch(text_encoder_2, batch, is_llm=False, accelerator=None) + + process_text_encoder_batches( + args.num_workers, + args.skip_existing, + args.batch_size, + datasets, + all_cache_files_for_dataset, + all_cache_paths_for_dataset, + encode_for_text_encoder_2, + ) + del text_encoder_2 + + # remove cache files not in dataset + post_process_cache_files(datasets, all_cache_files_for_dataset, all_cache_paths_for_dataset, args.keep_cache) + + +def setup_parser_common(): + parser = argparse.ArgumentParser() + + parser.add_argument("--dataset_config", type=str, required=True, help="path to dataset config .toml file") + parser.add_argument("--device", type=str, default=None, help="device to use, default is cuda if available") + parser.add_argument( + "--batch_size", type=int, default=None, help="batch size, override dataset config if dataset batch size > this" + ) + parser.add_argument("--num_workers", type=int, default=None, help="number of workers for dataset. default is cpu count-1") + parser.add_argument("--skip_existing", action="store_true", help="skip existing cache files") + parser.add_argument("--keep_cache", action="store_true", help="keep cache files not in dataset") + return parser + + +def hv_setup_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: + parser.add_argument("--text_encoder1", type=str, required=True, help="Text Encoder 1 directory") + parser.add_argument("--text_encoder2", type=str, required=True, help="Text Encoder 2 directory") + parser.add_argument("--text_encoder_dtype", type=str, default=None, help="data type for Text Encoder, default is float16") + parser.add_argument("--fp8_llm", action="store_true", help="use fp8 for Text Encoder 1 (LLM)") + return parser + + +if __name__ == "__main__": + main() diff --git a/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/convert_lora.py b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/convert_lora.py new file mode 100644 index 0000000000000000000000000000000000000000..d610e9bb86c2554e745f4762e1d7214aa18d1be2 --- /dev/null +++ b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/convert_lora.py @@ -0,0 +1,141 @@ +import argparse + +import torch +from safetensors.torch import load_file, save_file +from safetensors import safe_open +from musubi_tuner.utils import model_utils + +import logging + + +logger = logging.getLogger(__name__) +logging.basicConfig(level=logging.INFO) + + +def convert_from_diffusers(prefix, weights_sd): + # convert from diffusers(?) to default LoRA + # Diffusers format: {"diffusion_model.module.name.lora_A.weight": weight, "diffusion_model.module.name.lora_B.weight": weight, ...} + # default LoRA format: {"prefix_module_name.lora_down.weight": weight, "prefix_module_name.lora_up.weight": weight, ...} + + # note: Diffusers has no alpha, so alpha is set to rank + new_weights_sd = {} + lora_dims = {} + for key, weight in weights_sd.items(): + diffusers_prefix, key_body = key.split(".", 1) + if diffusers_prefix != "diffusion_model" and diffusers_prefix != "transformer": + logger.warning(f"unexpected key: {key} in diffusers format") + continue + + new_key = f"{prefix}{key_body}".replace(".", "_").replace("_lora_A_", ".lora_down.").replace("_lora_B_", ".lora_up.") + new_weights_sd[new_key] = weight + + lora_name = new_key.split(".")[0] # before first dot + if lora_name not in lora_dims and "lora_down" in new_key: + lora_dims[lora_name] = weight.shape[0] + + # add alpha with rank + for lora_name, dim in lora_dims.items(): + new_weights_sd[f"{lora_name}.alpha"] = torch.tensor(dim) + + return new_weights_sd + + +def convert_to_diffusers(prefix, weights_sd): + # convert from default LoRA to diffusers + + # get alphas + lora_alphas = {} + for key, weight in weights_sd.items(): + if key.startswith(prefix): + lora_name = key.split(".", 1)[0] # before first dot + if lora_name not in lora_alphas and "alpha" in key: + lora_alphas[lora_name] = weight + + new_weights_sd = {} + for key, weight in weights_sd.items(): + if key.startswith(prefix): + if "alpha" in key: + continue + + lora_name = key.split(".", 1)[0] # before first dot + + module_name = lora_name[len(prefix) :] # remove "lora_unet_" + module_name = module_name.replace("_", ".") # replace "_" with "." + if ".cross.attn." in module_name or ".self.attn." in module_name: + # Wan2.1 lora name to module name: ugly but works + module_name = module_name.replace("cross.attn", "cross_attn") # fix cross attn + module_name = module_name.replace("self.attn", "self_attn") # fix self attn + module_name = module_name.replace("k.img", "k_img") # fix k img + module_name = module_name.replace("v.img", "v_img") # fix v img + else: + # HunyuanVideo lora name to module name: ugly but works + module_name = module_name.replace("double.blocks.", "double_blocks.") # fix double blocks + module_name = module_name.replace("single.blocks.", "single_blocks.") # fix single blocks + module_name = module_name.replace("img.", "img_") # fix img + module_name = module_name.replace("txt.", "txt_") # fix txt + module_name = module_name.replace("attn.", "attn_") # fix attn + + diffusers_prefix = "diffusion_model" + if "lora_down" in key: + new_key = f"{diffusers_prefix}.{module_name}.lora_A.weight" + dim = weight.shape[0] + elif "lora_up" in key: + new_key = f"{diffusers_prefix}.{module_name}.lora_B.weight" + dim = weight.shape[1] + else: + logger.warning(f"unexpected key: {key} in default LoRA format") + continue + + # scale weight by alpha + if lora_name in lora_alphas: + # we scale both down and up, so scale is sqrt + scale = lora_alphas[lora_name] / dim + scale = scale.sqrt() + weight = weight * scale + else: + logger.warning(f"missing alpha for {lora_name}") + + new_weights_sd[new_key] = weight + + return new_weights_sd + + +def convert(input_file, output_file, target_format): + logger.info(f"loading {input_file}") + weights_sd = load_file(input_file) + with safe_open(input_file, framework="pt") as f: + metadata = f.metadata() + + logger.info(f"converting to {target_format}") + prefix = "lora_unet_" + if target_format == "default": + new_weights_sd = convert_from_diffusers(prefix, weights_sd) + metadata = metadata or {} + model_utils.precalculate_safetensors_hashes(new_weights_sd, metadata) + elif target_format == "other": + new_weights_sd = convert_to_diffusers(prefix, weights_sd) + else: + raise ValueError(f"unknown target format: {target_format}") + + logger.info(f"saving to {output_file}") + save_file(new_weights_sd, output_file, metadata=metadata) + + logger.info("done") + + +def parse_args(): + parser = argparse.ArgumentParser(description="Convert LoRA weights between default and other formats") + parser.add_argument("--input", type=str, required=True, help="input model file") + parser.add_argument("--output", type=str, required=True, help="output model file") + parser.add_argument("--target", type=str, required=True, choices=["other", "default"], help="target format") + args = parser.parse_args() + return args + + +def main(): + args = parse_args() + convert(args.input, args.output, args.target) + + +if __name__ == "__main__": + main() diff --git a/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/dataset/__init__.py b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/dataset/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/dataset/__pycache__/__init__.cpython-312.pyc b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/dataset/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6a1a7735dfb857c4eb813b3da1ee10795956d1a3 Binary files /dev/null and b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/dataset/__pycache__/__init__.cpython-312.pyc differ diff --git a/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/dataset/__pycache__/config_utils.cpython-312.pyc b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/dataset/__pycache__/config_utils.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b229653f671b2534f7e3b73df1c00896b6c6041e Binary files /dev/null and b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/dataset/__pycache__/config_utils.cpython-312.pyc differ diff --git a/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/dataset/__pycache__/image_video_dataset.cpython-312.pyc b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/dataset/__pycache__/image_video_dataset.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..431d64bdeeff38d727dae1ffff6271fd6bde2b35 Binary files /dev/null and b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/dataset/__pycache__/image_video_dataset.cpython-312.pyc differ diff --git a/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/dataset/config_utils.py b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/dataset/config_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..90fac7fdb870f583a908bab7706a9f3ecc429c37 --- /dev/null +++ b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/dataset/config_utils.py @@ -0,0 +1,400 @@ +import argparse +from dataclasses import ( + asdict, + dataclass, +) +import functools +import random +from textwrap import dedent, indent +import json +from pathlib import Path + +# from toolz import curry +from typing import Dict, List, Optional, Sequence, Tuple, Union + +import toml +import voluptuous +from voluptuous import Any, ExactSequence, MultipleInvalid, Object, Schema + +from musubi_tuner.dataset.image_video_dataset import DatasetGroup, ImageDataset, VideoDataset + +import logging + +logger = logging.getLogger(__name__) +logging.basicConfig(level=logging.INFO) + + +@dataclass +class BaseDatasetParams: + resolution: Tuple[int, int] = (960, 544) + enable_bucket: bool = False + bucket_no_upscale: bool = False + caption_extension: Optional[str] = None + batch_size: int = 1 + num_repeats: int = 1 + cache_directory: Optional[str] = None + debug_dataset: bool = False + architecture: str = "no_default" # short style like "hv" or "wan" + + +@dataclass +class ImageDatasetParams(BaseDatasetParams): + image_directory: Optional[str] = None + image_jsonl_file: Optional[str] = None + control_directory: Optional[str] = None + + # FramePack dependent parameters + fp_latent_window_size: Optional[int] = 9 + fp_1f_clean_indices: Optional[Sequence[int]] = None + fp_1f_target_index: Optional[int] = None + fp_1f_no_post: Optional[bool] = False + + +@dataclass +class VideoDatasetParams(BaseDatasetParams): + video_directory: Optional[str] = None + video_jsonl_file: Optional[str] = None + control_directory: Optional[str] = None + target_frames: Sequence[int] = (1,) + frame_extraction: Optional[str] = "head" + frame_stride: Optional[int] = 1 + frame_sample: Optional[int] = 1 + max_frames: Optional[int] = 129 + source_fps: Optional[float] = None + + # FramePack dependent parameters + fp_latent_window_size: Optional[int] = 9 + + +@dataclass +class DatasetBlueprint: + is_image_dataset: bool + params: Union[ImageDatasetParams, VideoDatasetParams] + + +@dataclass +class DatasetGroupBlueprint: + datasets: Sequence[DatasetBlueprint] + + +@dataclass +class Blueprint: + dataset_group: DatasetGroupBlueprint + + +class ConfigSanitizer: + # @curry + @staticmethod + def __validate_and_convert_twodim(klass, value: Sequence) -> Tuple: + Schema(ExactSequence([klass, klass]))(value) + return tuple(value) + + # @curry + @staticmethod + def __validate_and_convert_scalar_or_twodim(klass, value: Union[float, Sequence]) -> Tuple: + Schema(Any(klass, ExactSequence([klass, klass])))(value) + try: + Schema(klass)(value) + return (value, value) + except: + return ConfigSanitizer.__validate_and_convert_twodim(klass, value) + + # datasets schema + DATASET_ASCENDABLE_SCHEMA = { + "caption_extension": str, + "batch_size": int, + "num_repeats": int, + "resolution": functools.partial(__validate_and_convert_scalar_or_twodim.__func__, int), + "enable_bucket": bool, + "bucket_no_upscale": bool, + } + IMAGE_DATASET_DISTINCT_SCHEMA = { + "image_directory": str, + "image_jsonl_file": str, + "cache_directory": str, + "control_directory": str, + "fp_latent_window_size": int, + "fp_1f_clean_indices": [int], + "fp_1f_target_index": int, + "fp_1f_no_post": bool, + } + VIDEO_DATASET_DISTINCT_SCHEMA = { + "video_directory": str, + "video_jsonl_file": str, + "control_directory": str, + "target_frames": [int], + "frame_extraction": str, + "frame_stride": int, + "frame_sample": int, + "max_frames": int, + "cache_directory": str, + "source_fps": float, + } + + # options handled by argparse but not handled by user config + ARGPARSE_SPECIFIC_SCHEMA = { + "debug_dataset": bool, + } + + def __init__(self) -> None: + self.image_dataset_schema = self.__merge_dict( + self.DATASET_ASCENDABLE_SCHEMA, + self.IMAGE_DATASET_DISTINCT_SCHEMA, + ) + self.video_dataset_schema = self.__merge_dict( + self.DATASET_ASCENDABLE_SCHEMA, + self.VIDEO_DATASET_DISTINCT_SCHEMA, + ) + + def validate_flex_dataset(dataset_config: dict): + if "video_directory" in dataset_config or "video_jsonl_file" in dataset_config: + return Schema(self.video_dataset_schema)(dataset_config) + else: + return Schema(self.image_dataset_schema)(dataset_config) + + self.dataset_schema = validate_flex_dataset + + self.general_schema = self.__merge_dict( + self.DATASET_ASCENDABLE_SCHEMA, + ) + self.user_config_validator = Schema( + { + "general": self.general_schema, + "datasets": [self.dataset_schema], + } + ) + self.argparse_schema = self.__merge_dict( + self.ARGPARSE_SPECIFIC_SCHEMA, + ) + self.argparse_config_validator = Schema(Object(self.argparse_schema), extra=voluptuous.ALLOW_EXTRA) + + def sanitize_user_config(self, user_config: dict) -> dict: + try: + return self.user_config_validator(user_config) + except MultipleInvalid: + # TODO: clarify the error message + logger.error("Invalid user config / ユーザ設定の形式が正しくないようです") + raise + + # NOTE: In nature, argument parser result is not needed to be sanitize + # However this will help us to detect program bug + def sanitize_argparse_namespace(self, argparse_namespace: argparse.Namespace) -> argparse.Namespace: + try: + return self.argparse_config_validator(argparse_namespace) + except MultipleInvalid: + # XXX: this should be a bug + logger.error( + "Invalid cmdline parsed arguments. This should be a bug. / コマンドラインのパース結果が正しくないようです。プログラムのバグの可能性が高いです。" + ) + raise + + # NOTE: value would be overwritten by latter dict if there is already the same key + @staticmethod + def __merge_dict(*dict_list: dict) -> dict: + merged = {} + for schema in dict_list: + # merged |= schema + for k, v in schema.items(): + merged[k] = v + return merged + + +class BlueprintGenerator: + BLUEPRINT_PARAM_NAME_TO_CONFIG_OPTNAME = {} + + def __init__(self, sanitizer: ConfigSanitizer): + self.sanitizer = sanitizer + + # runtime_params is for parameters which is only configurable on runtime, such as tokenizer + def generate(self, user_config: dict, argparse_namespace: argparse.Namespace, **runtime_params) -> Blueprint: + sanitized_user_config = self.sanitizer.sanitize_user_config(user_config) + sanitized_argparse_namespace = self.sanitizer.sanitize_argparse_namespace(argparse_namespace) + + argparse_config = {k: v for k, v in vars(sanitized_argparse_namespace).items() if v is not None} + general_config = sanitized_user_config.get("general", {}) + + dataset_blueprints = [] + for dataset_config in sanitized_user_config.get("datasets", []): + is_image_dataset = "image_directory" in dataset_config or "image_jsonl_file" in dataset_config + if is_image_dataset: + dataset_params_klass = ImageDatasetParams + else: + dataset_params_klass = VideoDatasetParams + + params = self.generate_params_by_fallbacks( + dataset_params_klass, [dataset_config, general_config, argparse_config, runtime_params] + ) + dataset_blueprints.append(DatasetBlueprint(is_image_dataset, params)) + + dataset_group_blueprint = DatasetGroupBlueprint(dataset_blueprints) + + return Blueprint(dataset_group_blueprint) + + @staticmethod + def generate_params_by_fallbacks(param_klass, fallbacks: Sequence[dict]): + name_map = BlueprintGenerator.BLUEPRINT_PARAM_NAME_TO_CONFIG_OPTNAME + search_value = BlueprintGenerator.search_value + default_params = asdict(param_klass()) + param_names = default_params.keys() + + params = {name: search_value(name_map.get(name, name), fallbacks, default_params.get(name)) for name in param_names} + + return param_klass(**params) + + @staticmethod + def search_value(key: str, fallbacks: Sequence[dict], default_value=None): + for cand in fallbacks: + value = cand.get(key) + if value is not None: + return value + + return default_value + + +# if training is True, it will return a dataset group for training, otherwise for caching +def generate_dataset_group_by_blueprint(dataset_group_blueprint: DatasetGroupBlueprint, training: bool = False) -> DatasetGroup: + datasets: List[Union[ImageDataset, VideoDataset]] = [] + + for dataset_blueprint in dataset_group_blueprint.datasets: + if dataset_blueprint.is_image_dataset: + dataset_klass = ImageDataset + else: + dataset_klass = VideoDataset + + dataset = dataset_klass(**asdict(dataset_blueprint.params)) + datasets.append(dataset) + + # assertion + cache_directories = [dataset.cache_directory for dataset in datasets] + num_of_unique_cache_directories = len(set(cache_directories)) + if num_of_unique_cache_directories != len(cache_directories): + raise ValueError( + "cache directory should be unique for each dataset (note that cache directory is image/video directory if not specified)" + + " / cache directory は各データセットごとに異なる必要があります(指定されていない場合はimage/video directoryが使われるので注意)" + ) + + # print info + info = "" + for i, dataset in enumerate(datasets): + is_image_dataset = isinstance(dataset, ImageDataset) + info += dedent( + f"""\ + [Dataset {i}] + is_image_dataset: {is_image_dataset} + resolution: {dataset.resolution} + batch_size: {dataset.batch_size} + num_repeats: {dataset.num_repeats} + caption_extension: "{dataset.caption_extension}" + enable_bucket: {dataset.enable_bucket} + bucket_no_upscale: {dataset.bucket_no_upscale} + cache_directory: "{dataset.cache_directory}" + debug_dataset: {dataset.debug_dataset} + """ + ) + + if is_image_dataset: + info += indent( + dedent( + f"""\ + image_directory: "{dataset.image_directory}" + image_jsonl_file: "{dataset.image_jsonl_file}" + fp_latent_window_size: {dataset.fp_latent_window_size} + fp_1f_clean_indices: {dataset.fp_1f_clean_indices} + fp_1f_target_index: {dataset.fp_1f_target_index} + fp_1f_no_post: {dataset.fp_1f_no_post} + \n""" + ), + " ", + ) + else: + info += indent( + dedent( + f"""\ + video_directory: "{dataset.video_directory}" + video_jsonl_file: "{dataset.video_jsonl_file}" + control_directory: "{dataset.control_directory}" + target_frames: {dataset.target_frames} + frame_extraction: {dataset.frame_extraction} + frame_stride: {dataset.frame_stride} + frame_sample: {dataset.frame_sample} + max_frames: {dataset.max_frames} + source_fps: {dataset.source_fps} + \n""" + ), + " ", + ) + logger.info(f"{info}") + + # make buckets first because it determines the length of dataset + # and set the same seed for all datasets + seed = random.randint(0, 2**31) # actual seed is seed + epoch_no + for i, dataset in enumerate(datasets): + # logger.info(f"[Dataset {i}]") + dataset.set_seed(seed) + if training: + dataset.prepare_for_training() + + return DatasetGroup(datasets) + + +def load_user_config(file: str) -> dict: + file: Path = Path(file) + if not file.is_file(): + raise ValueError(f"file not found / ファイルが見つかりません: {file}") + + if file.name.lower().endswith(".json"): + try: + with open(file, "r", encoding="utf-8") as f: + config = json.load(f) + except Exception: + logger.error( + f"Error on parsing JSON config file. Please check the format. / JSON 形式の設定ファイルの読み込みに失敗しました。文法が正しいか確認してください。: {file}" + ) + raise + elif file.name.lower().endswith(".toml"): + try: + config = toml.load(file) + except Exception: + logger.error( + f"Error on parsing TOML config file. Please check the format. / TOML 形式の設定ファイルの読み込みに失敗しました。文法が正しいか確認してください。: {file}" + ) + raise + else: + raise ValueError(f"not supported config file format / 対応していない設定ファイルの形式です: {file}") + + return config + + +# for config test +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("dataset_config") + config_args, remain = parser.parse_known_args() + + parser = argparse.ArgumentParser() + parser.add_argument("--debug_dataset", action="store_true") + argparse_namespace = parser.parse_args(remain) + + logger.info("[argparse_namespace]") + logger.info(f"{vars(argparse_namespace)}") + + user_config = load_user_config(config_args.dataset_config) + + logger.info("") + logger.info("[user_config]") + logger.info(f"{user_config}") + + sanitizer = ConfigSanitizer() + sanitized_user_config = sanitizer.sanitize_user_config(user_config) + + logger.info("") + logger.info("[sanitized_user_config]") + logger.info(f"{sanitized_user_config}") + + blueprint = BlueprintGenerator(sanitizer).generate(user_config, argparse_namespace) + + logger.info("") + logger.info("[blueprint]") + logger.info(f"{blueprint}") + + dataset_group = generate_dataset_group_by_blueprint(blueprint.dataset_group) diff --git a/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/dataset/dataset_config.md b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/dataset/dataset_config.md new file mode 100644 index 0000000000000000000000000000000000000000..a6d61978aac81e34a0a3768b05f3673adea34521 --- /dev/null +++ b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/dataset/dataset_config.md @@ -0,0 +1,538 @@ +> 📝 Click on the language section to expand / 言語をクリックして展開 + +## Dataset Configuration + +Please create a TOML file for dataset configuration. + +Image and video datasets are supported. The configuration file can include multiple datasets, either image or video datasets, with caption text files or metadata JSONL files. + +The cache directory must be different for each dataset. + +Each video is extracted frame by frame without additional processing and used for training. It is recommended to use videos with a frame rate of 24fps for HunyuanVideo, 16fps for Wan2.1 and 30fps for FramePack. You can check the videos that will be trained using `--debug_mode video` when caching latent (see [here](/README.md#latent-caching)). +
+日本語 + +データセットの設定を行うためのTOMLファイルを作成してください。 + +画像データセットと動画データセットがサポートされています。設定ファイルには、画像または動画データセットを複数含めることができます。キャプションテキストファイルまたはメタデータJSONLファイルを使用できます。 + +キャッシュディレクトリは、各データセットごとに異なるディレクトリである必要があります。 + +動画は追加のプロセスなしでフレームごとに抽出され、学習に用いられます。そのため、HunyuanVideoは24fps、Wan2.1は16fps、FramePackは30fpsのフレームレートの動画を使用することをお勧めします。latentキャッシュ時の`--debug_mode video`を使用すると、学習される動画を確認できます([こちら](/README.ja.md#latentの事前キャッシュ)を参照)。 +
+ +### Sample for Image Dataset with Caption Text Files + +```toml +# resolution, caption_extension, batch_size, num_repeats, enable_bucket, bucket_no_upscale should be set in either general or datasets +# otherwise, the default values will be used for each item + +# general configurations +[general] +resolution = [960, 544] +caption_extension = ".txt" +batch_size = 1 +enable_bucket = true +bucket_no_upscale = false + +[[datasets]] +image_directory = "/path/to/image_dir" +cache_directory = "/path/to/cache_directory" +num_repeats = 1 # optional, default is 1. Number of times to repeat the dataset. Useful to balance the multiple datasets with different sizes. + +# other datasets can be added here. each dataset can have different configurations +``` + +`cache_directory` is optional, default is None to use the same directory as the image directory. However, we recommend to set the cache directory to avoid accidental sharing of the cache files between different datasets. + +`num_repeats` is also available. It is optional, default is 1 (no repeat). It repeats the images (or videos) that many times to expand the dataset. For example, if `num_repeats = 2` and there are 20 images in the dataset, each image will be duplicated twice (with the same caption) to have a total of 40 images. It is useful to balance the multiple datasets with different sizes. + +
+日本語 + +`cache_directory` はオプションです。デフォルトは画像ディレクトリと同じディレクトリに設定されます。ただし、異なるデータセット間でキャッシュファイルが共有されるのを防ぐために、明示的に別のキャッシュディレクトリを設定することをお勧めします。 + +`num_repeats` はオプションで、デフォルトは 1 です(繰り返しなし)。画像(や動画)を、その回数だけ単純に繰り返してデータセットを拡張します。たとえば`num_repeats = 2`としたとき、画像20枚のデータセットなら、各画像が2枚ずつ(同一のキャプションで)計40枚存在した場合と同じになります。異なるデータ数のデータセット間でバランスを取るために使用可能です。 + +resolution, caption_extension, batch_size, num_repeats, enable_bucket, bucket_no_upscale は general または datasets のどちらかに設定してください。省略時は各項目のデフォルト値が使用されます。 + +`[[datasets]]`以下を追加することで、他のデータセットを追加できます。各データセットには異なる設定を持てます。 +
+ +### Sample for Image Dataset with Metadata JSONL File + +```toml +# resolution, batch_size, num_repeats, enable_bucket, bucket_no_upscale should be set in either general or datasets +# caption_extension is not required for metadata jsonl file +# cache_directory is required for each dataset with metadata jsonl file + +# general configurations +[general] +resolution = [960, 544] +batch_size = 1 +enable_bucket = true +bucket_no_upscale = false + +[[datasets]] +image_jsonl_file = "/path/to/metadata.jsonl" +cache_directory = "/path/to/cache_directory" # required for metadata jsonl file +num_repeats = 1 # optional, default is 1. Same as above. + +# other datasets can be added here. each dataset can have different configurations +``` + +JSONL file format for metadata: + +```json +{"image_path": "/path/to/image1.jpg", "caption": "A caption for image1"} +{"image_path": "/path/to/image2.jpg", "caption": "A caption for image2"} +``` + +
+日本語 + +resolution, batch_size, num_repeats, enable_bucket, bucket_no_upscale は general または datasets のどちらかに設定してください。省略時は各項目のデフォルト値が使用されます。 + +metadata jsonl ファイルを使用する場合、caption_extension は必要ありません。また、cache_directory は必須です。 + +キャプションによるデータセットと同様に、複数のデータセットを追加できます。各データセットには異なる設定を持てます。 +
+ + +### Sample for Video Dataset with Caption Text Files + +```toml +# Common parameters (resolution, caption_extension, batch_size, num_repeats, enable_bucket, bucket_no_upscale) +# can be set in either general or datasets sections +# Video-specific parameters (target_frames, frame_extraction, frame_stride, frame_sample, max_frames, source_fps) +# must be set in each datasets section + +# general configurations +[general] +resolution = [960, 544] +caption_extension = ".txt" +batch_size = 1 +enable_bucket = true +bucket_no_upscale = false + +[[datasets]] +video_directory = "/path/to/video_dir" +cache_directory = "/path/to/cache_directory" # recommended to set cache directory +target_frames = [1, 25, 45] +frame_extraction = "head" +source_fps = 30.0 # optional, source fps for videos in the directory, decimal number + +[[datasets]] +video_directory = "/path/to/video_dir2" +cache_directory = "/path/to/cache_directory2" # recommended to set cache directory +frame_extraction = "full" +max_frames = 45 + +# other datasets can be added here. each dataset can have different configurations +``` + +__In HunyuanVideo and Wan2.1, the number of `target_frames` must be "N\*4+1" (N=0,1,2,...).__ Otherwise, it will be truncated to the nearest "N*4+1". + +In FramePack, it is recommended to set `frame_extraction` to `full` and `max_frames` to a sufficiently large value, as it can handle longer videos. However, if the video is too long, an Out of Memory error may occur during VAE encoding. The videos in FramePack are trimmed to "N * latent_window_size * 4 + 1" frames (for example, 37, 73, 109... if `latent_window_size` is 9). + +If the `source_fps` is specified, the videos in the directory are considered to be at this frame rate, and some frames will be skipped to match the model's frame rate (24 for HunyuanVideo and 16 for Wan2.1). __The value must be a decimal number, for example, `30.0` instead of `30`.__ The skipping is done automatically and does not consider the content of the images. Please check if the converted data is correct using `--debug_mode video`. + +If `source_fps` is not specified (default), all frames of the video will be used regardless of the video's frame rate. + +
+日本語 + +共通パラメータ(resolution, caption_extension, batch_size, num_repeats, enable_bucket, bucket_no_upscale)は、generalまたはdatasetsのいずれかに設定できます。 +動画固有のパラメータ(target_frames, frame_extraction, frame_stride, frame_sample, max_frames, source_fps)は、各datasetsセクションに設定する必要があります。 + +__HunyuanVideoおよびWan2.1では、target_framesの数値は「N\*4+1」である必要があります。__ これ以外の値の場合は、最も近いN\*4+1の値に切り捨てられます。 + +FramePackでも同様ですが、FramePackでは動画が長くても学習可能なため、 `frame_extraction`に`full` を指定し、`max_frames`を十分に大きな値に設定することをお勧めします。ただし、あまりにも長すぎるとVAEのencodeでOut of Memoryエラーが発生する可能性があります。FramePackの動画は、「N * latent_window_size * 4 + 1」フレームにトリミングされます(latent_window_sizeが9の場合、37、73、109……)。 + +`source_fps`を指定した場合、ディレクトリ内の動画をこのフレームレートとみなして、モデルのフレームレートにあうようにいくつかのフレームをスキップします(HunyuanVideoは24、Wan2.1は16)。__小数点を含む数値で指定してください。__ 例:`30`ではなく`30.0`。スキップは機械的に行われ、画像の内容は考慮しません。変換後のデータが正しいか、`--debug_mode video`で確認してください。 + +`source_fps`を指定しない場合、動画のフレームは(動画自体のフレームレートに関係なく)すべて使用されます。 + +他の注意事項は画像データセットと同様です。 +
+ +### Sample for Video Dataset with Metadata JSONL File + +```toml +# Common parameters (resolution, caption_extension, batch_size, num_repeats, enable_bucket, bucket_no_upscale) +# can be set in either general or datasets sections +# Video-specific parameters (target_frames, frame_extraction, frame_stride, frame_sample, max_frames, source_fps) +# must be set in each datasets section + +# caption_extension is not required for metadata jsonl file +# cache_directory is required for each dataset with metadata jsonl file + +# general configurations +[general] +resolution = [960, 544] +batch_size = 1 +enable_bucket = true +bucket_no_upscale = false + +[[datasets]] +video_jsonl_file = "/path/to/metadata.jsonl" +target_frames = [1, 25, 45] +frame_extraction = "head" +cache_directory = "/path/to/cache_directory_head" +source_fps = 30.0 # optional, source fps for videos in the jsonl file +# same metadata jsonl file can be used for multiple datasets +[[datasets]] +video_jsonl_file = "/path/to/metadata.jsonl" +target_frames = [1] +frame_stride = 10 +cache_directory = "/path/to/cache_directory_stride" + +# other datasets can be added here. each dataset can have different configurations +``` + +JSONL file format for metadata: + +```json +{"video_path": "/path/to/video1.mp4", "caption": "A caption for video1"} +{"video_path": "/path/to/video2.mp4", "caption": "A caption for video2"} +``` + +`video_path` can be a directory containing multiple images. + +
+日本語 +metadata jsonl ファイルを使用する場合、caption_extension は必要ありません。また、cache_directory は必須です。 + +`video_path`は、複数の画像を含むディレクトリのパスでも構いません。 + +他の注意事項は今までのデータセットと同様です。 +
+ +### frame_extraction Options + +- `head`: Extract the first N frames from the video. +- `chunk`: Extract frames by splitting the video into chunks of N frames. +- `slide`: Extract frames from the video with a stride of `frame_stride`. +- `uniform`: Extract `frame_sample` samples uniformly from the video. +- `full`: Extract all frames from the video. + +In the case of `full`, the entire video is used, but it is trimmed to "N*4+1" frames. It is also trimmed to the `max_frames` if it exceeds that value. To avoid Out of Memory errors, please set `max_frames`. + +The frame extraction methods other than `full` are recommended when the video contains repeated actions. `full` is recommended when each video represents a single complete motion. + +For example, consider a video with 40 frames. The following diagrams illustrate each extraction: + +
+日本語 + +- `head`: 動画から最初のNフレームを抽出します。 +- `chunk`: 動画をNフレームずつに分割してフレームを抽出します。 +- `slide`: `frame_stride`に指定したフレームごとに動画からNフレームを抽出します。 +- `uniform`: 動画から一定間隔で、`frame_sample`個のNフレームを抽出します。 +- `full`: 動画から全てのフレームを抽出します。 + +`full`の場合、各動画の全体を用いますが、「N*4+1」のフレーム数にトリミングされます。また`max_frames`を超える場合もその値にトリミングされます。Out of Memoryエラーを避けるために、`max_frames`を設定してください。 + +`full`以外の抽出方法は、動画が特定の動作を繰り返している場合にお勧めします。`full`はそれぞれの動画がひとつの完結したモーションの場合にお勧めします。 + +例えば、40フレームの動画を例とした抽出について、以下の図で説明します。 +
+ +``` +Original Video, 40 frames: x = frame, o = no frame +oooooooooooooooooooooooooooooooooooooooo + +head, target_frames = [1, 13, 25] -> extract head frames: +xooooooooooooooooooooooooooooooooooooooo +xxxxxxxxxxxxxooooooooooooooooooooooooooo +xxxxxxxxxxxxxxxxxxxxxxxxxooooooooooooooo + +chunk, target_frames = [13, 25] -> extract frames by splitting into chunks, into 13 and 25 frames: +xxxxxxxxxxxxxooooooooooooooooooooooooooo +oooooooooooooxxxxxxxxxxxxxoooooooooooooo +ooooooooooooooooooooooooooxxxxxxxxxxxxxo +xxxxxxxxxxxxxxxxxxxxxxxxxooooooooooooooo + +NOTE: Please do not include 1 in target_frames if you are using the frame_extraction "chunk". It will make the all frames to be extracted. +注: frame_extraction "chunk" を使用する場合、target_frames に 1 を含めないでください。全てのフレームが抽出されてしまいます。 + +slide, target_frames = [1, 13, 25], frame_stride = 10 -> extract N frames with a stride of 10: +xooooooooooooooooooooooooooooooooooooooo +ooooooooooxooooooooooooooooooooooooooooo +ooooooooooooooooooooxooooooooooooooooooo +ooooooooooooooooooooooooooooooxooooooooo +xxxxxxxxxxxxxooooooooooooooooooooooooooo +ooooooooooxxxxxxxxxxxxxooooooooooooooooo +ooooooooooooooooooooxxxxxxxxxxxxxooooooo +xxxxxxxxxxxxxxxxxxxxxxxxxooooooooooooooo +ooooooooooxxxxxxxxxxxxxxxxxxxxxxxxxooooo + +uniform, target_frames =[1, 13, 25], frame_sample = 4 -> extract `frame_sample` samples uniformly, N frames each: +xooooooooooooooooooooooooooooooooooooooo +oooooooooooooxoooooooooooooooooooooooooo +oooooooooooooooooooooooooxoooooooooooooo +ooooooooooooooooooooooooooooooooooooooox +xxxxxxxxxxxxxooooooooooooooooooooooooooo +oooooooooxxxxxxxxxxxxxoooooooooooooooooo +ooooooooooooooooooxxxxxxxxxxxxxooooooooo +oooooooooooooooooooooooooooxxxxxxxxxxxxx +xxxxxxxxxxxxxxxxxxxxxxxxxooooooooooooooo +oooooxxxxxxxxxxxxxxxxxxxxxxxxxoooooooooo +ooooooooooxxxxxxxxxxxxxxxxxxxxxxxxxooooo +oooooooooooooooxxxxxxxxxxxxxxxxxxxxxxxxx + +Three Original Videos, 20, 25, 35 frames: x = frame, o = no frame + +full, max_frames = 31 -> extract all frames (trimmed to the maximum length): +video1: xxxxxxxxxxxxxxxxx (trimmed to 17 frames) +video2: xxxxxxxxxxxxxxxxxxxxxxxxx (25 frames) +video3: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx (trimmed to 31 frames) +``` + +### Sample for Image Dataset with Control Images + +The dataset with control images. This is used for training the one frame training for FramePack. + +The dataset configuration with caption text files is similar to the image dataset, but with an additional `control_directory` parameter. + +The control images are used from the `control_directory` with the same filename (or different extension) as the image, for example, `image_dir/image1.jpg` and `control_dir/image1.png`. The images in `image_directory` should be the target images (the images to be generated during inference, the changed images). The `control_directory` should contain the starting images for inference. The captions should be stored in `image_directory`. + +If multiple control images are specified, the filenames of the control images should be numbered (excluding the extension). For example, specify `image_dir/image1.jpg` and `control_dir/image1_0.png`, `control_dir/image1_1.png`. You can also specify the numbers with four digits, such as `image1_0000.png`, `image1_0001.png`. + +The metadata JSONL file format is the same as the image dataset, but with an additional `control_path` parameter. + +```json +{"image_path": "/path/to/image1.jpg", "control_path": "/path/to/control1.png", "caption": "A caption for image1"} +{"image_path": "/path/to/image2.jpg", "control_path": "/path/to/control2.png", "caption": "A caption for image2"} + +If multiple control images are specified, the attribute names should be `control_path_0`, `control_path_1`, etc. + +```json +{"image_path": "/path/to/image1.jpg", "control_path_0": "/path/to/control1_0.png", "control_path_1": "/path/to/control1_1.png", "caption": "A caption for image1"} +{"image_path": "/path/to/image2.jpg", "control_path_0": "/path/to/control2_0.png", "control_path_1": "/path/to/control2_1.png", "caption": "A caption for image2"} +``` + +The control images can also have an alpha channel. In this case, the alpha channel of the image is used as a mask for the latent. + +
+日本語 + +制御画像を持つデータセットです。現時点ではFramePackの単一フレーム学習に使用します。 + +キャプションファイルを用いる場合は`control_directory`を追加で指定してください。制御画像は、画像と同じファイル名(または拡張子のみが異なるファイル名)の、`control_directory`にある画像が使用されます(例:`image_dir/image1.jpg`と`control_dir/image1.png`)。`image_directory`の画像は学習対象の画像(推論時に生成する画像、変化後の画像)としてください。`control_directory`には推論時の開始画像を格納してください。キャプションは`image_directory`へ格納してください。 + +複数枚の制御画像が指定可能です。この場合、制御画像のファイル名(拡張子を除く)へ数字を付与してください。例えば、`image_dir/image1.jpg`と`control_dir/image1_0.png`, `control_dir/image1_1.png`のように指定します。`image1_0000.png`, `image1_0001.png`のように数字を4桁で指定することもできます。 + +メタデータJSONLファイルを使用する場合は、`control_path`を追加してください。複数枚の制御画像を指定する場合は、`control_path_0`, `control_path_1`のように数字を付与してください。 + +制御画像はアルファチャンネルを持つこともできます。この場合、画像のアルファチャンネルはlatentへのマスクとして使用されます。 + +
+ +### Sample for Video Dataset with Control Images + +The dataset with control videos is used for training ControlNet models. + +The dataset configuration with caption text files is similar to the video dataset, but with an additional `control_directory` parameter. + +The control video for a video is used from the `control_directory` with the same filename (or different extension) as the video, for example, `video_dir/video1.mp4` and `control_dir/video1.mp4` or `control_dir/video1.mov`. The control video can also be a directory without an extension, for example, `video_dir/video1.mp4` and `control_dir/video1`. + +```toml +[[datasets]] +video_directory = "/path/to/video_dir" +control_directory = "/path/to/control_dir" # required for dataset with control videos +cache_directory = "/path/to/cache_directory" # recommended to set cache directory +target_frames = [1, 25, 45] +frame_extraction = "head" +``` + +The dataset configuration with metadata JSONL file is same as the video dataset, but metadata JSONL file must include the control video paths. The control video path can be a directory containing multiple images. + +```json +{"video_path": "/path/to/video1.mp4", "control_path": "/path/to/control1.mp4", "caption": "A caption for video1"} +{"video_path": "/path/to/video2.mp4", "control_path": "/path/to/control2.mp4", "caption": "A caption for video2"} +``` + +
+日本語 + +制御動画を持つデータセットです。ControlNetモデルの学習に使用します。 + +キャプションを用いる場合のデータセット設定は動画データセットと似ていますが、`control_directory`パラメータが追加されています。上にある例を参照してください。ある動画に対する制御用動画として、動画と同じファイル名(または拡張子のみが異なるファイル名)の、`control_directory`にある動画が使用されます(例:`video_dir/video1.mp4`と`control_dir/video1.mp4`または`control_dir/video1.mov`)。また、拡張子なしのディレクトリ内の、複数枚の画像を制御用動画として使用することもできます(例:`video_dir/video1.mp4`と`control_dir/video1`)。 + +データセット設定でメタデータJSONLファイルを使用する場合は、動画と制御用動画のパスを含める必要があります。制御用動画のパスは、複数枚の画像を含むディレクトリのパスでも構いません。 + +
+ +## Architecture-specific Settings / アーキテクチャ固有の設定 + +The dataset configuration is shared across all architectures. However, some architectures may require additional settings or have specific requirements for the dataset. + +### FramePack + +For FramePack, you can set the latent window size for training. It is recommended to set it to 9 for FramePack training. The default value is 9, so you can usually omit this setting. + +```toml +[[datasets]] +fp_latent_window_size = 9 +``` + +
+日本語 + +学習時のlatent window sizeを指定できます。FramePackの学習においては、9を指定することを推奨します。省略時は9が使用されますので、通常は省略して構いません。 + +
+ +### FramePack One Frame Training + +For the default one frame training of FramePack, you need to set the following parameters in the dataset configuration: + +```toml +[[datasets]] +fp_1f_clean_indices = [0] +fp_1f_target_index = 9 +fp_1f_no_post = false +``` + +**Advanced Settings:** + +**Note that these parameters are still experimental, and the optimal values are not yet known.** The parameters may also change in the future. + +`fp_1f_clean_indices` sets the `clean_indices` value passed to the FramePack model. You can specify multiple indices. `fp_1f_target_index` sets the index of the frame to be trained (generated). `fp_1f_no_post` sets whether to add a zero value as `clean_latent_post`, default is `false` (add zero value). + +The number of control images should match the number of indices specified in `fp_1f_clean_indices`. + +The default values mean that the first image (control image) is at index `0`, and the target image (the changed image) is at index `9`. + +For training with 1f-mc, set `fp_1f_clean_indices` to `[0, 1]` and `fp_1f_target_index` to `9` (or another value). This allows you to use multiple control images to train a single generated image. The control images will be two in this case. + +```toml +[[datasets]] +fp_1f_clean_indices = [0, 1] +fp_1f_target_index = 9 +fp_1f_no_post = false +``` + +For training with kisekaeichi, set `fp_1f_clean_indices` to `[0, 10]` and `fp_1f_target_index` to `1` (or another value). This allows you to use the starting image (the image just before the generation section) and the image following the generation section (equivalent to `clean_latent_post`) to train the first image of the generated video. The control images will be two in this case. `fp_1f_no_post` should be set to `true`. + +```toml +[[datasets]] +fp_1f_clean_indices = [0, 10] +fp_1f_target_index = 1 +fp_1f_no_post = true +``` + +With `fp_1f_clean_indices` and `fp_1f_target_index`, you can specify any number of control images and any index of the target image for training. + +If you set `fp_1f_no_post` to `false`, the `clean_latent_post_index` will be `1 + fp1_latent_window_size`. + +You can also set the `no_2x` and `no_4x` options for cache scripts to disable the clean latents 2x and 4x. + +The 2x indices are `1 + fp1_latent_window_size + 1` for two indices (usually `11, 12`), and the 4x indices are `1 + fp1_latent_window_size + 1 + 2` for sixteen indices (usually `13, 14, ..., 28`), regardless of `fp_1f_no_post` and `no_2x`, `no_4x` settings. + +
+日本語 + +※ **以下のパラメータは研究中で最適値はまだ不明です。** またパラメータ自体も変更される可能性があります。 + +デフォルトの1フレーム学習を行う場合、`fp_1f_clean_indices`に`[0]`を、`fp_1f_target_index`に`9`(または5から15程度の値)を、`no_post`に`false`を設定してください。(記述例は英語版ドキュメントを参照、以降同じ。) + +**より高度な設定:** + +`fp_1f_clean_indices`は、FramePackモデルに渡される `clean_indices` の値を設定します。複数指定が可能です。`fp_1f_target_index`は、学習(生成)対象のフレームのインデックスを設定します。`fp_1f_no_post`は、`clean_latent_post` をゼロ値で追加するかどうかを設定します(デフォルトは`false`で、ゼロ値で追加します)。 + +制御画像の枚数は`fp_1f_clean_indices`に指定したインデックスの数とあわせてください。 + +デフォルトの1フレーム学習では、開始画像(制御画像)1枚をインデックス`0`、生成対象の画像(変化後の画像)をインデックス`9`に設定しています。 + +1f-mcの学習を行う場合は、`fp_1f_clean_indices`に `[0, 1]`を、`fp_1f_target_index`に`9`を設定してください。これにより動画の先頭の2枚の制御画像を使用して、後続の1枚の生成画像を学習します。制御画像は2枚になります。 + +kisekaeichiの学習を行う場合は、`fp_1f_clean_indices`に `[0, 10]`を、`fp_1f_target_index`に`1`(または他の値)を設定してください。これは、開始画像(生成セクションの直前の画像)(`clean_latent_pre`に相当)と、生成セクションに続く1枚の画像(`clean_latent_post`に相当)を使用して、生成動画の先頭の画像(`target_index=1`)を学習します。制御画像は2枚になります。`f1_1f_no_post`は`true`に設定してください。 + +`fp_1f_clean_indices`と`fp_1f_target_index`を応用することで、任意の枚数の制御画像を、任意のインデックスを指定して学習することが可能です。 + +`fp_1f_no_post`を`false`に設定すると、`clean_latent_post_index`は `1 + fp1_latent_window_size` になります。 + +推論時の `no_2x`、`no_4x`に対応する設定は、キャッシュスクリプトの引数で行えます。なお、2xのindexは `1 + fp1_latent_window_size + 1` からの2個(通常は`11, 12`)、4xのindexは `1 + fp1_latent_window_size + 1 + 2` からの16個になります(通常は`13, 14, ..., 28`)です。これらの値は`fp_1f_no_post`や`no_2x`, `no_4x`の設定に関わらず、常に同じです。 + +
+ +## Specifications + +```toml +# general configurations +[general] +resolution = [960, 544] # optional, [W, H], default is [960, 544]. This is the default resolution for all datasets +caption_extension = ".txt" # optional, default is None. This is the default caption extension for all datasets +batch_size = 1 # optional, default is 1. This is the default batch size for all datasets +num_repeats = 1 # optional, default is 1. Number of times to repeat the dataset. Useful to balance the multiple datasets with different sizes. +enable_bucket = true # optional, default is false. Enable bucketing for datasets +bucket_no_upscale = false # optional, default is false. Disable upscaling for bucketing. Ignored if enable_bucket is false + +### Image Dataset + +# sample image dataset with caption text files +[[datasets]] +image_directory = "/path/to/image_dir" +caption_extension = ".txt" # required for caption text files, if general caption extension is not set +resolution = [960, 544] # required if general resolution is not set +batch_size = 4 # optional, overwrite the default batch size +num_repeats = 1 # optional, overwrite the default num_repeats +enable_bucket = false # optional, overwrite the default bucketing setting +bucket_no_upscale = true # optional, overwrite the default bucketing setting +cache_directory = "/path/to/cache_directory" # optional, default is None to use the same directory as the image directory. NOTE: caching is always enabled +control_directory = "/path/to/control_dir" # optional, required for dataset with control images + +# sample image dataset with metadata **jsonl** file +[[datasets]] +image_jsonl_file = "/path/to/metadata.jsonl" # includes pairs of image files and captions +resolution = [960, 544] # required if general resolution is not set +cache_directory = "/path/to/cache_directory" # required for metadata jsonl file +# caption_extension is not required for metadata jsonl file +# batch_size, num_repeats, enable_bucket, bucket_no_upscale are also available for metadata jsonl file + +### Video Dataset + +# sample video dataset with caption text files +[[datasets]] +video_directory = "/path/to/video_dir" +caption_extension = ".txt" # required for caption text files, if general caption extension is not set +resolution = [960, 544] # required if general resolution is not set + +control_directory = "/path/to/control_dir" # optional, required for dataset with control images + +# following configurations must be set in each [[datasets]] section for video datasets + +target_frames = [1, 25, 79] # required for video dataset. list of video lengths to extract frames. each element must be N*4+1 (N=0,1,2,...) + +# NOTE: Please do not include 1 in target_frames if you are using the frame_extraction "chunk". It will make the all frames to be extracted. + +frame_extraction = "head" # optional, "head" or "chunk", "slide", "uniform". Default is "head" +frame_stride = 1 # optional, default is 1, available for "slide" frame extraction +frame_sample = 4 # optional, default is 1 (same as "head"), available for "uniform" frame extraction +max_frames = 129 # optional, default is 129. Maximum number of frames to extract, available for "full" frame extraction +# batch_size, num_repeats, enable_bucket, bucket_no_upscale, cache_directory are also available for video dataset + +# sample video dataset with metadata jsonl file +[[datasets]] +video_jsonl_file = "/path/to/metadata.jsonl" # includes pairs of video files and captions + +target_frames = [1, 79] + +cache_directory = "/path/to/cache_directory" # required for metadata jsonl file +# frame_extraction, frame_stride, frame_sample, max_frames are also available for metadata jsonl file +``` + + + +The metadata with .json file will be supported in the near future. + + + diff --git a/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/dataset/image_video_dataset.py b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/dataset/image_video_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..c0804b3b12a4536c1ba4ec08049b2828e2c0c48b --- /dev/null +++ b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/dataset/image_video_dataset.py @@ -0,0 +1,1894 @@ +from concurrent.futures import ThreadPoolExecutor +import glob +import json +import math +import os +import random +import time +from typing import Any, Optional, Sequence, Tuple, Union + +import numpy as np +import torch +from safetensors.torch import save_file, load_file +from safetensors import safe_open +from PIL import Image +import cv2 +import av + +from musubi_tuner.utils import safetensors_utils +from musubi_tuner.utils.model_utils import dtype_to_str + +import logging + +logger = logging.getLogger(__name__) +logging.basicConfig(level=logging.INFO) + + +IMAGE_EXTENSIONS = [".png", ".jpg", ".jpeg", ".webp", ".bmp", ".PNG", ".JPG", ".JPEG", ".WEBP", ".BMP"] + +try: + import pillow_avif + + IMAGE_EXTENSIONS.extend([".avif", ".AVIF"]) +except: + pass + +# JPEG-XL on Linux +try: + from jxlpy import JXLImagePlugin + + IMAGE_EXTENSIONS.extend([".jxl", ".JXL"]) +except: + pass + +# JPEG-XL on Windows +try: + import pillow_jxl + + IMAGE_EXTENSIONS.extend([".jxl", ".JXL"]) +except: + pass + +VIDEO_EXTENSIONS = [ + ".mp4", + ".webm", + ".avi", + ".mkv", + ".mov", + ".flv", + ".wmv", + ".m4v", + ".mpg", + ".mpeg", + ".MP4", + ".WEBM", + ".AVI", + ".MKV", + ".MOV", + ".FLV", + ".WMV", + ".M4V", + ".MPG", + ".MPEG", +] # some of them are not tested + +ARCHITECTURE_HUNYUAN_VIDEO = "hv" +ARCHITECTURE_HUNYUAN_VIDEO_FULL = "hunyuan_video" +ARCHITECTURE_WAN = "wan" +ARCHITECTURE_WAN_FULL = "wan" +ARCHITECTURE_FRAMEPACK = "fp" +ARCHITECTURE_FRAMEPACK_FULL = "framepack" + + +def glob_images(directory, base="*"): + img_paths = [] + for ext in IMAGE_EXTENSIONS: + if base == "*": + img_paths.extend(glob.glob(os.path.join(glob.escape(directory), base + ext))) + else: + img_paths.extend(glob.glob(glob.escape(os.path.join(directory, base + ext)))) + img_paths = list(set(img_paths)) # remove duplicates + img_paths.sort() + return img_paths + + +def glob_videos(directory, base="*"): + video_paths = [] + for ext in VIDEO_EXTENSIONS: + if base == "*": + video_paths.extend(glob.glob(os.path.join(glob.escape(directory), base + ext))) + else: + video_paths.extend(glob.glob(glob.escape(os.path.join(directory, base + ext)))) + video_paths = list(set(video_paths)) # remove duplicates + video_paths.sort() + return video_paths + + +def divisible_by(num: int, divisor: int) -> int: + return num - num % divisor + + +def resize_image_to_bucket(image: Union[Image.Image, np.ndarray], bucket_reso: tuple[int, int]) -> np.ndarray: + """ + Resize the image to the bucket resolution. + + bucket_reso: **(width, height)** + """ + is_pil_image = isinstance(image, Image.Image) + if is_pil_image: + image_width, image_height = image.size + else: + image_height, image_width = image.shape[:2] + + if bucket_reso == (image_width, image_height): + return np.array(image) if is_pil_image else image + + bucket_width, bucket_height = bucket_reso + + # resize the image to the bucket resolution to match the short side + scale_width = bucket_width / image_width + scale_height = bucket_height / image_height + scale = max(scale_width, scale_height) + image_width = int(image_width * scale + 0.5) + image_height = int(image_height * scale + 0.5) + + if scale > 1: + image = Image.fromarray(image) if not is_pil_image else image + image = image.resize((image_width, image_height), Image.LANCZOS) + image = np.array(image) + else: + image = np.array(image) if is_pil_image else image + image = cv2.resize(image, (image_width, image_height), interpolation=cv2.INTER_AREA) + + # crop the image to the bucket resolution + crop_left = (image_width - bucket_width) // 2 + crop_top = (image_height - bucket_height) // 2 + image = image[crop_top : crop_top + bucket_height, crop_left : crop_left + bucket_width] + return image + + +class ItemInfo: + def __init__( + self, + item_key: str, + caption: str, + original_size: tuple[int, int], + bucket_size: Optional[tuple[Any]] = None, + frame_count: Optional[int] = None, + content: Optional[np.ndarray] = None, + latent_cache_path: Optional[str] = None, + ) -> None: + self.item_key = item_key + self.caption = caption + self.original_size = original_size + self.bucket_size = bucket_size + self.frame_count = frame_count + self.content = content + self.latent_cache_path = latent_cache_path + self.text_encoder_output_cache_path: Optional[str] = None + + # np.ndarray for video, list[np.ndarray] for image with multiple controls + self.control_content: Optional[Union[np.ndarray, list[np.ndarray]]] = None + + # FramePack architecture specific + self.fp_latent_window_size: Optional[int] = None + self.fp_1f_clean_indices: Optional[list[int]] = None # indices of clean latents for 1f + self.fp_1f_target_index: Optional[int] = None # target index for 1f clean latents + self.fp_1f_no_post: Optional[bool] = None # whether to add zero values as clean latent post + + def __str__(self) -> str: + return ( + f"ItemInfo(item_key={self.item_key}, caption={self.caption}, " + + f"original_size={self.original_size}, bucket_size={self.bucket_size}, " + + f"frame_count={self.frame_count}, latent_cache_path={self.latent_cache_path}, content={self.content.shape if self.content is not None else None})" + ) + + +# We use simple if-else approach to support multiple architectures. +# Maybe we can use a plugin system in the future. + +# the keys of the dict are `_FxHxW_` for latents +# and `_` for other tensors + + +def save_latent_cache(item_info: ItemInfo, latent: torch.Tensor): + """HunyuanVideo architecture only. HunyuanVideo doesn't support I2V and control latents""" + assert latent.dim() == 4, "latent should be 4D tensor (frame, channel, height, width)" + + _, F, H, W = latent.shape + dtype_str = dtype_to_str(latent.dtype) + sd = {f"latents_{F}x{H}x{W}_{dtype_str}": latent.detach().cpu()} + + save_latent_cache_common(item_info, sd, ARCHITECTURE_HUNYUAN_VIDEO_FULL) + + +def save_latent_cache_wan( + item_info: ItemInfo, + latent: torch.Tensor, + clip_embed: Optional[torch.Tensor], + image_latent: Optional[torch.Tensor], + control_latent: Optional[torch.Tensor], + f_indices: Optional[list[int]] = None, +): + """Wan architecture only""" + assert latent.dim() == 4, "latent should be 4D tensor (frame, channel, height, width)" + + _, F, H, W = latent.shape + dtype_str = dtype_to_str(latent.dtype) + sd = {f"latents_{F}x{H}x{W}_{dtype_str}": latent.detach().cpu()} + + if clip_embed is not None: + sd[f"clip_{dtype_str}"] = clip_embed.detach().cpu() + + if image_latent is not None: + sd[f"latents_image_{F}x{H}x{W}_{dtype_str}"] = image_latent.detach().cpu() + + if control_latent is not None: + sd[f"latents_control_{F}x{H}x{W}_{dtype_str}"] = control_latent.detach().cpu() + + if f_indices is not None: + dtype_str = dtype_to_str(torch.int32) + sd[f"f_indices_{dtype_str}"] = torch.tensor(f_indices, dtype=torch.int32) + + save_latent_cache_common(item_info, sd, ARCHITECTURE_WAN_FULL) + + +def save_latent_cache_framepack( + item_info: ItemInfo, + latent: torch.Tensor, + latent_indices: torch.Tensor, + clean_latents: torch.Tensor, + clean_latent_indices: torch.Tensor, + clean_latents_2x: torch.Tensor, + clean_latent_2x_indices: torch.Tensor, + clean_latents_4x: torch.Tensor, + clean_latent_4x_indices: torch.Tensor, + image_embeddings: torch.Tensor, +): + """FramePack architecture only""" + assert latent.dim() == 4, "latent should be 4D tensor (frame, channel, height, width)" + + _, F, H, W = latent.shape + dtype_str = dtype_to_str(latent.dtype) + sd = {f"latents_{F}x{H}x{W}_{dtype_str}": latent.detach().cpu().contiguous()} + + # `latents_xxx` must have {F, H, W} suffix + indices_dtype_str = dtype_to_str(latent_indices.dtype) + sd[f"image_embeddings_{dtype_str}"] = image_embeddings.detach().cpu() # image embeddings dtype is same as latents dtype + sd[f"latent_indices_{indices_dtype_str}"] = latent_indices.detach().cpu() + sd[f"clean_latent_indices_{indices_dtype_str}"] = clean_latent_indices.detach().cpu() + sd[f"latents_clean_{F}x{H}x{W}_{dtype_str}"] = clean_latents.detach().cpu().contiguous() + if clean_latent_2x_indices is not None: + sd[f"clean_latent_2x_indices_{indices_dtype_str}"] = clean_latent_2x_indices.detach().cpu() + if clean_latents_2x is not None: + sd[f"latents_clean_2x_{F}x{H}x{W}_{dtype_str}"] = clean_latents_2x.detach().cpu().contiguous() + if clean_latent_4x_indices is not None: + sd[f"clean_latent_4x_indices_{indices_dtype_str}"] = clean_latent_4x_indices.detach().cpu() + if clean_latents_4x is not None: + sd[f"latents_clean_4x_{F}x{H}x{W}_{dtype_str}"] = clean_latents_4x.detach().cpu().contiguous() + + # for key, value in sd.items(): + # print(f"{key}: {value.shape}") + save_latent_cache_common(item_info, sd, ARCHITECTURE_FRAMEPACK_FULL) + + +def save_latent_cache_common(item_info: ItemInfo, sd: dict[str, torch.Tensor], arch_fullname: str): + metadata = { + "architecture": arch_fullname, + "width": f"{item_info.original_size[0]}", + "height": f"{item_info.original_size[1]}", + "format_version": "1.0.1", + } + if item_info.frame_count is not None: + metadata["frame_count"] = f"{item_info.frame_count}" + + for key, value in sd.items(): + # NaN check and show warning, replace NaN with 0 + if torch.isnan(value).any(): + logger.warning(f"{key} tensor has NaN: {item_info.item_key}, replace NaN with 0") + value[torch.isnan(value)] = 0 + + latent_dir = os.path.dirname(item_info.latent_cache_path) + os.makedirs(latent_dir, exist_ok=True) + + save_file(sd, item_info.latent_cache_path, metadata=metadata) + + +def save_text_encoder_output_cache(item_info: ItemInfo, embed: torch.Tensor, mask: Optional[torch.Tensor], is_llm: bool): + """HunyuanVideo architecture only""" + assert ( + embed.dim() == 1 or embed.dim() == 2 + ), f"embed should be 2D tensor (feature, hidden_size) or (hidden_size,), got {embed.shape}" + assert mask is None or mask.dim() == 1, f"mask should be 1D tensor (feature), got {mask.shape}" + + sd = {} + dtype_str = dtype_to_str(embed.dtype) + text_encoder_type = "llm" if is_llm else "clipL" + sd[f"{text_encoder_type}_{dtype_str}"] = embed.detach().cpu() + if mask is not None: + sd[f"{text_encoder_type}_mask"] = mask.detach().cpu() + + save_text_encoder_output_cache_common(item_info, sd, ARCHITECTURE_HUNYUAN_VIDEO_FULL) + + +def save_text_encoder_output_cache_wan(item_info: ItemInfo, embed: torch.Tensor): + """Wan architecture only. Wan2.1 only has a single text encoder""" + + sd = {} + dtype_str = dtype_to_str(embed.dtype) + text_encoder_type = "t5" + sd[f"varlen_{text_encoder_type}_{dtype_str}"] = embed.detach().cpu() + + save_text_encoder_output_cache_common(item_info, sd, ARCHITECTURE_WAN_FULL) + + +def save_text_encoder_output_cache_framepack( + item_info: ItemInfo, llama_vec: torch.Tensor, llama_attention_mask: torch.Tensor, clip_l_pooler: torch.Tensor +): + """FramePack architecture only.""" + sd = {} + dtype_str = dtype_to_str(llama_vec.dtype) + sd[f"llama_vec_{dtype_str}"] = llama_vec.detach().cpu() + sd[f"llama_attention_mask"] = llama_attention_mask.detach().cpu() + dtype_str = dtype_to_str(clip_l_pooler.dtype) + sd[f"clip_l_pooler_{dtype_str}"] = clip_l_pooler.detach().cpu() + + save_text_encoder_output_cache_common(item_info, sd, ARCHITECTURE_FRAMEPACK_FULL) + + +def save_text_encoder_output_cache_common(item_info: ItemInfo, sd: dict[str, torch.Tensor], arch_fullname: str): + for key, value in sd.items(): + # NaN check and show warning, replace NaN with 0 + if torch.isnan(value).any(): + logger.warning(f"{key} tensor has NaN: {item_info.item_key}, replace NaN with 0") + value[torch.isnan(value)] = 0 + + metadata = { + "architecture": arch_fullname, + "caption1": item_info.caption, + "format_version": "1.0.1", + } + + if os.path.exists(item_info.text_encoder_output_cache_path): + # load existing cache and update metadata + with safetensors_utils.MemoryEfficientSafeOpen(item_info.text_encoder_output_cache_path) as f: + existing_metadata = f.metadata() + for key in f.keys(): + if key not in sd: # avoid overwriting by existing cache, we keep the new one + sd[key] = f.get_tensor(key) + + assert existing_metadata["architecture"] == metadata["architecture"], "architecture mismatch" + if existing_metadata["caption1"] != metadata["caption1"]: + logger.warning(f"caption mismatch: existing={existing_metadata['caption1']}, new={metadata['caption1']}, overwrite") + # TODO verify format_version + + existing_metadata.pop("caption1", None) + existing_metadata.pop("format_version", None) + metadata.update(existing_metadata) # copy existing metadata except caption and format_version + else: + text_encoder_output_dir = os.path.dirname(item_info.text_encoder_output_cache_path) + os.makedirs(text_encoder_output_dir, exist_ok=True) + + safetensors_utils.mem_eff_save_file(sd, item_info.text_encoder_output_cache_path, metadata=metadata) + + +class BucketSelector: + RESOLUTION_STEPS_HUNYUAN = 16 + RESOLUTION_STEPS_WAN = 16 + RESOLUTION_STEPS_FRAMEPACK = 16 + + def __init__( + self, resolution: Tuple[int, int], enable_bucket: bool = True, no_upscale: bool = False, architecture: str = "no_default" + ): + self.resolution = resolution + self.bucket_area = resolution[0] * resolution[1] + self.architecture = architecture + + if self.architecture == ARCHITECTURE_HUNYUAN_VIDEO: + self.reso_steps = BucketSelector.RESOLUTION_STEPS_HUNYUAN + elif self.architecture == ARCHITECTURE_WAN: + self.reso_steps = BucketSelector.RESOLUTION_STEPS_WAN + elif self.architecture == ARCHITECTURE_FRAMEPACK: + self.reso_steps = BucketSelector.RESOLUTION_STEPS_FRAMEPACK + else: + raise ValueError(f"Invalid architecture: {self.architecture}") + + if not enable_bucket: + # only define one bucket + self.bucket_resolutions = [resolution] + self.no_upscale = False + else: + # prepare bucket resolution + self.no_upscale = no_upscale + sqrt_size = int(math.sqrt(self.bucket_area)) + min_size = divisible_by(sqrt_size // 2, self.reso_steps) + self.bucket_resolutions = [] + for w in range(min_size, sqrt_size + self.reso_steps, self.reso_steps): + h = divisible_by(self.bucket_area // w, self.reso_steps) + self.bucket_resolutions.append((w, h)) + self.bucket_resolutions.append((h, w)) + + self.bucket_resolutions = list(set(self.bucket_resolutions)) + self.bucket_resolutions.sort() + + # calculate aspect ratio to find the nearest resolution + self.aspect_ratios = np.array([w / h for w, h in self.bucket_resolutions]) + + def get_bucket_resolution(self, image_size: tuple[int, int]) -> tuple[int, int]: + """ + return the bucket resolution for the given image size, (width, height) + """ + area = image_size[0] * image_size[1] + if self.no_upscale and area <= self.bucket_area: + w, h = image_size + w = divisible_by(w, self.reso_steps) + h = divisible_by(h, self.reso_steps) + return w, h + + aspect_ratio = image_size[0] / image_size[1] + ar_errors = self.aspect_ratios - aspect_ratio + bucket_id = np.abs(ar_errors).argmin() + return self.bucket_resolutions[bucket_id] + + +def load_video( + video_path: str, + start_frame: Optional[int] = None, + end_frame: Optional[int] = None, + bucket_selector: Optional[BucketSelector] = None, + bucket_reso: Optional[tuple[int, int]] = None, + source_fps: Optional[float] = None, + target_fps: Optional[float] = None, +) -> list[np.ndarray]: + """ + bucket_reso: if given, resize the video to the bucket resolution, (width, height) + """ + if source_fps is None or target_fps is None: + if os.path.isfile(video_path): + container = av.open(video_path) + video = [] + for i, frame in enumerate(container.decode(video=0)): + if start_frame is not None and i < start_frame: + continue + if end_frame is not None and i >= end_frame: + break + frame = frame.to_image() + + if bucket_selector is not None and bucket_reso is None: + bucket_reso = bucket_selector.get_bucket_resolution(frame.size) # calc resolution from first frame + + if bucket_reso is not None: + frame = resize_image_to_bucket(frame, bucket_reso) + else: + frame = np.array(frame) + + video.append(frame) + container.close() + else: + # load images in the directory + image_files = glob_images(video_path) + image_files.sort() + video = [] + for i in range(len(image_files)): + if start_frame is not None and i < start_frame: + continue + if end_frame is not None and i >= end_frame: + break + + image_file = image_files[i] + image = Image.open(image_file).convert("RGB") + + if bucket_selector is not None and bucket_reso is None: + bucket_reso = bucket_selector.get_bucket_resolution(image.size) # calc resolution from first frame + image = np.array(image) + if bucket_reso is not None: + image = resize_image_to_bucket(image, bucket_reso) + + video.append(image) + else: + # drop frames to match the target fps TODO commonize this code with the above if this works + frame_index_delta = target_fps / source_fps # example: 16 / 30 = 0.5333 + if os.path.isfile(video_path): + container = av.open(video_path) + video = [] + frame_index_with_fraction = 0.0 + previous_frame_index = -1 + for i, frame in enumerate(container.decode(video=0)): + target_frame_index = int(frame_index_with_fraction) + frame_index_with_fraction += frame_index_delta + + if target_frame_index == previous_frame_index: # drop this frame + continue + + # accept this frame + previous_frame_index = target_frame_index + + if start_frame is not None and target_frame_index < start_frame: + continue + if end_frame is not None and target_frame_index >= end_frame: + break + frame = frame.to_image() + + if bucket_selector is not None and bucket_reso is None: + bucket_reso = bucket_selector.get_bucket_resolution(frame.size) # calc resolution from first frame + + if bucket_reso is not None: + frame = resize_image_to_bucket(frame, bucket_reso) + else: + frame = np.array(frame) + + video.append(frame) + container.close() + else: + # load images in the directory + image_files = glob_images(video_path) + image_files.sort() + video = [] + frame_index_with_fraction = 0.0 + previous_frame_index = -1 + for i in range(len(image_files)): + target_frame_index = int(frame_index_with_fraction) + frame_index_with_fraction += frame_index_delta + + if target_frame_index == previous_frame_index: # drop this frame + continue + + # accept this frame + previous_frame_index = target_frame_index + + if start_frame is not None and target_frame_index < start_frame: + continue + if end_frame is not None and target_frame_index >= end_frame: + break + + image_file = image_files[i] + image = Image.open(image_file).convert("RGB") + + if bucket_selector is not None and bucket_reso is None: + bucket_reso = bucket_selector.get_bucket_resolution(image.size) # calc resolution from first frame + image = np.array(image) + if bucket_reso is not None: + image = resize_image_to_bucket(image, bucket_reso) + + video.append(image) + + return video + + +class BucketBatchManager: + + def __init__(self, bucketed_item_info: dict[tuple[Any], list[ItemInfo]], batch_size: int): + self.batch_size = batch_size + self.buckets = bucketed_item_info + self.bucket_resos = list(self.buckets.keys()) + self.bucket_resos.sort() + + # indices for enumerating batches. each batch is reso + batch_idx. reso is (width, height) or (width, height, frames) + self.bucket_batch_indices: list[tuple[tuple[Any], int]] = [] + for bucket_reso in self.bucket_resos: + bucket = self.buckets[bucket_reso] + num_batches = math.ceil(len(bucket) / self.batch_size) + for i in range(num_batches): + self.bucket_batch_indices.append((bucket_reso, i)) + + # do no shuffle here to avoid multiple datasets have different order + # self.shuffle() + + def show_bucket_info(self): + for bucket_reso in self.bucket_resos: + bucket = self.buckets[bucket_reso] + logger.info(f"bucket: {bucket_reso}, count: {len(bucket)}") + + logger.info(f"total batches: {len(self)}") + + def shuffle(self): + # shuffle each bucket + for bucket in self.buckets.values(): + random.shuffle(bucket) + + # shuffle the order of batches + random.shuffle(self.bucket_batch_indices) + + def __len__(self): + return len(self.bucket_batch_indices) + + def __getitem__(self, idx): + bucket_reso, batch_idx = self.bucket_batch_indices[idx] + bucket = self.buckets[bucket_reso] + start = batch_idx * self.batch_size + end = min(start + self.batch_size, len(bucket)) + + batch_tensor_data = {} + varlen_keys = set() + for item_info in bucket[start:end]: + sd_latent = load_file(item_info.latent_cache_path) + sd_te = load_file(item_info.text_encoder_output_cache_path) + sd = {**sd_latent, **sd_te} + + # TODO refactor this + for key in sd.keys(): + is_varlen_key = key.startswith("varlen_") # varlen keys are not stacked + content_key = key + + if is_varlen_key: + content_key = content_key.replace("varlen_", "") + + if content_key.endswith("_mask"): + pass + else: + content_key = content_key.rsplit("_", 1)[0] # remove dtype + if content_key.startswith("latents_"): + content_key = content_key.rsplit("_", 1)[0] # remove FxHxW + + if content_key not in batch_tensor_data: + batch_tensor_data[content_key] = [] + batch_tensor_data[content_key].append(sd[key]) + + if is_varlen_key: + varlen_keys.add(content_key) + + for key in batch_tensor_data.keys(): + if key not in varlen_keys: + batch_tensor_data[key] = torch.stack(batch_tensor_data[key]) + + return batch_tensor_data + + +class ContentDatasource: + def __init__(self): + self.caption_only = False # set to True to only fetch caption for Text Encoder caching + self.has_control = False + + def set_caption_only(self, caption_only: bool): + self.caption_only = caption_only + + def is_indexable(self): + return False + + def get_caption(self, idx: int) -> tuple[str, str]: + """ + Returns caption. May not be called if is_indexable() returns False. + """ + raise NotImplementedError + + def __len__(self): + raise NotImplementedError + + def __iter__(self): + raise NotImplementedError + + def __next__(self): + raise NotImplementedError + + +class ImageDatasource(ContentDatasource): + def __init__(self): + super().__init__() + + def get_image_data(self, idx: int) -> tuple[str, Image.Image, str]: + """ + Returns image data as a tuple of image path, image, and caption for the given index. + Key must be unique and valid as a file name. + May not be called if is_indexable() returns False. + """ + raise NotImplementedError + + +class ImageDirectoryDatasource(ImageDatasource): + def __init__( + self, + image_directory: str, + caption_extension: Optional[str] = None, + control_directory: Optional[str] = None, + control_count_per_image: int = 1, + ): + super().__init__() + self.image_directory = image_directory + self.caption_extension = caption_extension + self.control_directory = control_directory + self.control_count_per_image = control_count_per_image + self.current_idx = 0 + + # glob images + logger.info(f"glob images in {self.image_directory}") + self.image_paths = glob_images(self.image_directory) + logger.info(f"found {len(self.image_paths)} images") + + # glob control images if specified + if self.control_directory is not None: + logger.info(f"glob control images in {self.control_directory}") + self.has_control = True + self.control_paths = {} + for image_path in self.image_paths: + image_basename = os.path.basename(image_path) + image_basename_no_ext = os.path.splitext(image_basename)[0] + potential_paths = glob.glob(os.path.join(self.control_directory, os.path.splitext(image_basename)[0] + "*.*")) + if potential_paths: + # sort by the digits (`_0000`) suffix, prefer the one without the suffix + def sort_key(path): + basename = os.path.basename(path) + basename_no_ext = os.path.splitext(basename)[0] + if image_basename_no_ext == basename_no_ext: # prefer the one without suffix + return 0 + digits_suffix = basename_no_ext.rsplit("_", 1)[-1] + if not digits_suffix.isdigit(): + raise ValueError(f"Invalid digits suffix in {basename_no_ext}") + return int(digits_suffix) + 1 + + potential_paths.sort(key=sort_key) + if len(potential_paths) < control_count_per_image: + logger.error( + f"Not enough control images for {image_path}: found {len(potential_paths)}, expected {control_count_per_image}" + ) + raise ValueError( + f"Not enough control images for {image_path}: found {len(potential_paths)}, expected {control_count_per_image}" + ) + + # take the first `control_count_per_image` paths + self.control_paths[image_path] = potential_paths[:control_count_per_image] + logger.info(f"found {len(self.control_paths)} matching control images") + + missing_controls = len(self.image_paths) - len(self.control_paths) + if missing_controls > 0: + missing_control_paths = set(self.image_paths) - set(self.control_paths.keys()) + logger.error(f"Could not find matching control images for {missing_controls} images: {missing_control_paths}") + raise ValueError(f"Could not find matching control images for {missing_controls} images") + + def is_indexable(self): + return True + + def __len__(self): + return len(self.image_paths) + + def get_image_data(self, idx: int) -> tuple[str, Image.Image, str, Optional[Image.Image]]: + image_path = self.image_paths[idx] + image = Image.open(image_path).convert("RGB") + + _, caption = self.get_caption(idx) + + controls = None + if self.has_control: + controls = [] + for control_path in self.control_paths[image_path]: + control = Image.open(control_path) + if control.mode != "RGB" and control.mode != "RGBA": + control = control.convert("RGB") + controls.append(control) + + return image_path, image, caption, controls + + def get_caption(self, idx: int) -> tuple[str, str]: + image_path = self.image_paths[idx] + caption_path = os.path.splitext(image_path)[0] + self.caption_extension if self.caption_extension else "" + with open(caption_path, "r", encoding="utf-8") as f: + caption = f.read().strip() + return image_path, caption + + def __iter__(self): + self.current_idx = 0 + return self + + def __next__(self) -> callable: + """ + Returns a fetcher function that returns image data. + """ + if self.current_idx >= len(self.image_paths): + raise StopIteration + + if self.caption_only: + + def create_caption_fetcher(index): + return lambda: self.get_caption(index) + + fetcher = create_caption_fetcher(self.current_idx) + else: + + def create_image_fetcher(index): + return lambda: self.get_image_data(index) + + fetcher = create_image_fetcher(self.current_idx) + + self.current_idx += 1 + return fetcher + + +class ImageJsonlDatasource(ImageDatasource): + def __init__(self, image_jsonl_file: str, control_count_per_image: int = 1): + super().__init__() + self.image_jsonl_file = image_jsonl_file + self.control_count_per_image = control_count_per_image + self.current_idx = 0 + + # load jsonl + logger.info(f"load image jsonl from {self.image_jsonl_file}") + self.data = [] + with open(self.image_jsonl_file, "r", encoding="utf-8") as f: + for line in f: + try: + data = json.loads(line) + except json.JSONDecodeError: + logger.error(f"failed to load json: {line} @ {self.image_jsonl_file}") + raise + self.data.append(data) + logger.info(f"loaded {len(self.data)} images") + + # Normalize control paths + for item in self.data: + if "control_path" in item: + item["control_path_0"] = item.pop("control_path") + + # Ensure control paths are named consistently, from control_path_0000 to control_path_0, control_path_1, etc. + control_path_keys = [key for key in item.keys() if key.startswith("control_path_")] + control_path_keys.sort(key=lambda x: int(x.split("_")[-1])) + for i, key in enumerate(control_path_keys): + if key != f"control_path_{i}": + item[f"control_path_{i}"] = item.pop(key) + + # Check if there are control paths in the JSONL + self.has_control = any("control_path_0" in item for item in self.data) + if self.has_control: + missing_control_images = [ + item["image_path"] + for item in self.data + if sum(f"control_path_{i}" not in item for i in range(self.control_count_per_image)) > 0 + ] + if missing_control_images: + logger.error(f"Some images do not have control paths in JSONL data: {missing_control_images}") + raise ValueError(f"Some images do not have control paths in JSONL data: {missing_control_images}") + logger.info(f"found {len(self.data)} images with {self.control_count_per_image} control images per image in JSONL data") + + def is_indexable(self): + return True + + def __len__(self): + return len(self.data) + + def get_image_data(self, idx: int) -> tuple[str, Image.Image, str, Optional[list[Image.Image]]]: + data = self.data[idx] + image_path = data["image_path"] + image = Image.open(image_path).convert("RGB") + + caption = data["caption"] + + controls = None + if self.has_control: + controls = [] + for i in range(self.control_count_per_image): + control_path = data[f"control_path_{i}"] + control = Image.open(control_path) + if control.mode != "RGB" and control.mode != "RGBA": + control = control.convert("RGB") + controls.append(control) + + return image_path, image, caption, controls + + def get_caption(self, idx: int) -> tuple[str, str]: + data = self.data[idx] + image_path = data["image_path"] + caption = data["caption"] + return image_path, caption + + def __iter__(self): + self.current_idx = 0 + return self + + def __next__(self) -> callable: + if self.current_idx >= len(self.data): + raise StopIteration + + if self.caption_only: + + def create_caption_fetcher(index): + return lambda: self.get_caption(index) + + fetcher = create_caption_fetcher(self.current_idx) + + else: + + def create_fetcher(index): + return lambda: self.get_image_data(index) + + fetcher = create_fetcher(self.current_idx) + + self.current_idx += 1 + return fetcher + + +class VideoDatasource(ContentDatasource): + def __init__(self): + super().__init__() + + # None means all frames + self.start_frame = None + self.end_frame = None + + self.bucket_selector = None + + self.source_fps = None + self.target_fps = None + + def __len__(self): + raise NotImplementedError + + def get_video_data_from_path( + self, + video_path: str, + start_frame: Optional[int] = None, + end_frame: Optional[int] = None, + bucket_selector: Optional[BucketSelector] = None, + ) -> tuple[str, list[Image.Image], str]: + # this method can resize the video if bucket_selector is given to reduce the memory usage + + start_frame = start_frame if start_frame is not None else self.start_frame + end_frame = end_frame if end_frame is not None else self.end_frame + bucket_selector = bucket_selector if bucket_selector is not None else self.bucket_selector + + video = load_video( + video_path, start_frame, end_frame, bucket_selector, source_fps=self.source_fps, target_fps=self.target_fps + ) + return video + + def get_control_data_from_path( + self, + control_path: str, + start_frame: Optional[int] = None, + end_frame: Optional[int] = None, + bucket_selector: Optional[BucketSelector] = None, + ) -> list[Image.Image]: + start_frame = start_frame if start_frame is not None else self.start_frame + end_frame = end_frame if end_frame is not None else self.end_frame + bucket_selector = bucket_selector if bucket_selector is not None else self.bucket_selector + + control = load_video( + control_path, start_frame, end_frame, bucket_selector, source_fps=self.source_fps, target_fps=self.target_fps + ) + return control + + def set_start_and_end_frame(self, start_frame: Optional[int], end_frame: Optional[int]): + self.start_frame = start_frame + self.end_frame = end_frame + + def set_bucket_selector(self, bucket_selector: BucketSelector): + self.bucket_selector = bucket_selector + + def set_source_and_target_fps(self, source_fps: Optional[float], target_fps: Optional[float]): + self.source_fps = source_fps + self.target_fps = target_fps + + def __iter__(self): + raise NotImplementedError + + def __next__(self): + raise NotImplementedError + + +class VideoDirectoryDatasource(VideoDatasource): + def __init__(self, video_directory: str, caption_extension: Optional[str] = None, control_directory: Optional[str] = None): + super().__init__() + self.video_directory = video_directory + self.caption_extension = caption_extension + self.control_directory = control_directory # 新しく追加: コントロール画像ディレクトリ + self.current_idx = 0 + + # glob videos + logger.info(f"glob videos in {self.video_directory}") + self.video_paths = glob_videos(self.video_directory) + logger.info(f"found {len(self.video_paths)} videos") + + # glob control images if specified + if self.control_directory is not None: + logger.info(f"glob control videos in {self.control_directory}") + self.has_control = True + self.control_paths = {} + for video_path in self.video_paths: + video_basename = os.path.basename(video_path) + # construct control path from video path + # for example: video_path = "vid/video.mp4" -> control_path = "control/video.mp4" + control_path = os.path.join(self.control_directory, video_basename) + if os.path.exists(control_path): + self.control_paths[video_path] = control_path + else: + # use the same base name for control path + base_name = os.path.splitext(video_basename)[0] + + # directory with images. for example: video_path = "vid/video.mp4" -> control_path = "control/video" + potential_path = os.path.join(self.control_directory, base_name) # no extension + if os.path.isdir(potential_path): + self.control_paths[video_path] = potential_path + else: + # another extension for control path + # for example: video_path = "vid/video.mp4" -> control_path = "control/video.mov" + for ext in VIDEO_EXTENSIONS: + potential_path = os.path.join(self.control_directory, base_name + ext) + if os.path.exists(potential_path): + self.control_paths[video_path] = potential_path + break + + logger.info(f"found {len(self.control_paths)} matching control videos/images") + # check if all videos have matching control paths, if not, raise an error + missing_controls = len(self.video_paths) - len(self.control_paths) + if missing_controls > 0: + # logger.warning(f"Could not find matching control videos/images for {missing_controls} videos") + missing_controls_videos = [video_path for video_path in self.video_paths if video_path not in self.control_paths] + logger.error( + f"Could not find matching control videos/images for {missing_controls} videos: {missing_controls_videos}" + ) + raise ValueError(f"Could not find matching control videos/images for {missing_controls} videos") + + def is_indexable(self): + return True + + def __len__(self): + return len(self.video_paths) + + def get_video_data( + self, + idx: int, + start_frame: Optional[int] = None, + end_frame: Optional[int] = None, + bucket_selector: Optional[BucketSelector] = None, + ) -> tuple[str, list[Image.Image], str, Optional[list[Image.Image]]]: + video_path = self.video_paths[idx] + video = self.get_video_data_from_path(video_path, start_frame, end_frame, bucket_selector) + + _, caption = self.get_caption(idx) + + control = None + if self.control_directory is not None and video_path in self.control_paths: + control_path = self.control_paths[video_path] + control = self.get_control_data_from_path(control_path, start_frame, end_frame, bucket_selector) + + return video_path, video, caption, control + + def get_caption(self, idx: int) -> tuple[str, str]: + video_path = self.video_paths[idx] + caption_path = os.path.splitext(video_path)[0] + self.caption_extension if self.caption_extension else "" + with open(caption_path, "r", encoding="utf-8") as f: + caption = f.read().strip() + return video_path, caption + + def __iter__(self): + self.current_idx = 0 + return self + + def __next__(self): + if self.current_idx >= len(self.video_paths): + raise StopIteration + + if self.caption_only: + + def create_caption_fetcher(index): + return lambda: self.get_caption(index) + + fetcher = create_caption_fetcher(self.current_idx) + + else: + + def create_fetcher(index): + return lambda: self.get_video_data(index) + + fetcher = create_fetcher(self.current_idx) + + self.current_idx += 1 + return fetcher + + +class VideoJsonlDatasource(VideoDatasource): + def __init__(self, video_jsonl_file: str): + super().__init__() + self.video_jsonl_file = video_jsonl_file + self.current_idx = 0 + + # load jsonl + logger.info(f"load video jsonl from {self.video_jsonl_file}") + self.data = [] + with open(self.video_jsonl_file, "r", encoding="utf-8") as f: + for line in f: + data = json.loads(line) + self.data.append(data) + logger.info(f"loaded {len(self.data)} videos") + + # Check if there are control paths in the JSONL + self.has_control = any("control_path" in item for item in self.data) + if self.has_control: + control_count = sum(1 for item in self.data if "control_path" in item) + if control_count < len(self.data): + missing_control_videos = [item["video_path"] for item in self.data if "control_path" not in item] + logger.error(f"Some videos do not have control paths in JSONL data: {missing_control_videos}") + raise ValueError(f"Some videos do not have control paths in JSONL data: {missing_control_videos}") + logger.info(f"found {control_count} control videos/images in JSONL data") + + def is_indexable(self): + return True + + def __len__(self): + return len(self.data) + + def get_video_data( + self, + idx: int, + start_frame: Optional[int] = None, + end_frame: Optional[int] = None, + bucket_selector: Optional[BucketSelector] = None, + ) -> tuple[str, list[Image.Image], str, Optional[list[Image.Image]]]: + data = self.data[idx] + video_path = data["video_path"] + video = self.get_video_data_from_path(video_path, start_frame, end_frame, bucket_selector) + + caption = data["caption"] + + control = None + if "control_path" in data and data["control_path"]: + control_path = data["control_path"] + control = self.get_control_data_from_path(control_path, start_frame, end_frame, bucket_selector) + + return video_path, video, caption, control + + def get_caption(self, idx: int) -> tuple[str, str]: + data = self.data[idx] + video_path = data["video_path"] + caption = data["caption"] + return video_path, caption + + def __iter__(self): + self.current_idx = 0 + return self + + def __next__(self): + if self.current_idx >= len(self.data): + raise StopIteration + + if self.caption_only: + + def create_caption_fetcher(index): + return lambda: self.get_caption(index) + + fetcher = create_caption_fetcher(self.current_idx) + + else: + + def create_fetcher(index): + return lambda: self.get_video_data(index) + + fetcher = create_fetcher(self.current_idx) + + self.current_idx += 1 + return fetcher + + +class BaseDataset(torch.utils.data.Dataset): + def __init__( + self, + resolution: Tuple[int, int] = (960, 544), + caption_extension: Optional[str] = None, + batch_size: int = 1, + num_repeats: int = 1, + enable_bucket: bool = False, + bucket_no_upscale: bool = False, + cache_directory: Optional[str] = None, + debug_dataset: bool = False, + architecture: str = "no_default", + ): + self.resolution = resolution + self.caption_extension = caption_extension + self.batch_size = batch_size + self.num_repeats = num_repeats + self.enable_bucket = enable_bucket + self.bucket_no_upscale = bucket_no_upscale + self.cache_directory = cache_directory + self.debug_dataset = debug_dataset + self.architecture = architecture + self.seed = None + self.current_epoch = 0 + + if not self.enable_bucket: + self.bucket_no_upscale = False + + def get_metadata(self) -> dict: + metadata = { + "resolution": self.resolution, + "caption_extension": self.caption_extension, + "batch_size_per_device": self.batch_size, + "num_repeats": self.num_repeats, + "enable_bucket": bool(self.enable_bucket), + "bucket_no_upscale": bool(self.bucket_no_upscale), + } + return metadata + + def get_all_latent_cache_files(self): + return glob.glob(os.path.join(self.cache_directory, f"*_{self.architecture}.safetensors")) + + def get_all_text_encoder_output_cache_files(self): + return glob.glob(os.path.join(self.cache_directory, f"*_{self.architecture}_te.safetensors")) + + def get_latent_cache_path(self, item_info: ItemInfo) -> str: + """ + Returns the cache path for the latent tensor. + + item_info: ItemInfo object + + Returns: + str: cache path + + cache_path is based on the item_key and the resolution. + """ + w, h = item_info.original_size + basename = os.path.splitext(os.path.basename(item_info.item_key))[0] + assert self.cache_directory is not None, "cache_directory is required / cache_directoryは必須です" + return os.path.join(self.cache_directory, f"{basename}_{w:04d}x{h:04d}_{self.architecture}.safetensors") + + def get_text_encoder_output_cache_path(self, item_info: ItemInfo) -> str: + basename = os.path.splitext(os.path.basename(item_info.item_key))[0] + assert self.cache_directory is not None, "cache_directory is required / cache_directoryは必須です" + return os.path.join(self.cache_directory, f"{basename}_{self.architecture}_te.safetensors") + + def retrieve_latent_cache_batches(self, num_workers: int): + raise NotImplementedError + + def retrieve_text_encoder_output_cache_batches(self, num_workers: int): + raise NotImplementedError + + def prepare_for_training(self): + pass + + def set_seed(self, seed: int): + self.seed = seed + + def set_current_epoch(self, epoch): + if not self.current_epoch == epoch: # shuffle buckets when epoch is incremented + if epoch > self.current_epoch: + logger.info("epoch is incremented. current_epoch: {}, epoch: {}".format(self.current_epoch, epoch)) + num_epochs = epoch - self.current_epoch + for _ in range(num_epochs): + self.current_epoch += 1 + self.shuffle_buckets() + # self.current_epoch seem to be set to 0 again in the next epoch. it may be caused by skipped_dataloader? + else: + logger.warning("epoch is not incremented. current_epoch: {}, epoch: {}".format(self.current_epoch, epoch)) + self.current_epoch = epoch + + def set_current_step(self, step): + self.current_step = step + + def set_max_train_steps(self, max_train_steps): + self.max_train_steps = max_train_steps + + def shuffle_buckets(self): + raise NotImplementedError + + def __len__(self): + return NotImplementedError + + def __getitem__(self, idx): + raise NotImplementedError + + def _default_retrieve_text_encoder_output_cache_batches(self, datasource: ContentDatasource, batch_size: int, num_workers: int): + datasource.set_caption_only(True) + executor = ThreadPoolExecutor(max_workers=num_workers) + + data: list[ItemInfo] = [] + futures = [] + + def aggregate_future(consume_all: bool = False): + while len(futures) >= num_workers or (consume_all and len(futures) > 0): + completed_futures = [future for future in futures if future.done()] + if len(completed_futures) == 0: + if len(futures) >= num_workers or consume_all: # to avoid adding too many futures + time.sleep(0.1) + continue + else: + break # submit batch if possible + + for future in completed_futures: + item_key, caption = future.result() + item_info = ItemInfo(item_key, caption, (0, 0), (0, 0)) + item_info.text_encoder_output_cache_path = self.get_text_encoder_output_cache_path(item_info) + data.append(item_info) + + futures.remove(future) + + def submit_batch(flush: bool = False): + nonlocal data + if len(data) >= batch_size or (len(data) > 0 and flush): + batch = data[0:batch_size] + if len(data) > batch_size: + data = data[batch_size:] + else: + data = [] + return batch + return None + + for fetch_op in datasource: + future = executor.submit(fetch_op) + futures.append(future) + aggregate_future() + while True: + batch = submit_batch() + if batch is None: + break + yield batch + + aggregate_future(consume_all=True) + while True: + batch = submit_batch(flush=True) + if batch is None: + break + yield batch + + executor.shutdown() + + +class ImageDataset(BaseDataset): + def __init__( + self, + resolution: Tuple[int, int], + caption_extension: Optional[str], + batch_size: int, + num_repeats: int, + enable_bucket: bool, + bucket_no_upscale: bool, + image_directory: Optional[str] = None, + image_jsonl_file: Optional[str] = None, + control_directory: Optional[str] = None, + cache_directory: Optional[str] = None, + fp_latent_window_size: Optional[int] = 9, + fp_1f_clean_indices: Optional[list[int]] = None, + fp_1f_target_index: Optional[int] = None, + fp_1f_no_post: Optional[bool] = False, + debug_dataset: bool = False, + architecture: str = "no_default", + ): + super(ImageDataset, self).__init__( + resolution, + caption_extension, + batch_size, + num_repeats, + enable_bucket, + bucket_no_upscale, + cache_directory, + debug_dataset, + architecture, + ) + self.image_directory = image_directory + self.image_jsonl_file = image_jsonl_file + self.control_directory = control_directory + self.fp_latent_window_size = fp_latent_window_size + self.fp_1f_clean_indices = fp_1f_clean_indices + self.fp_1f_target_index = fp_1f_target_index + self.fp_1f_no_post = fp_1f_no_post + + control_count_per_image = 1 + if fp_1f_clean_indices is not None: + control_count_per_image = len(fp_1f_clean_indices) + + if image_directory is not None: + self.datasource = ImageDirectoryDatasource( + image_directory, caption_extension, control_directory, control_count_per_image + ) + elif image_jsonl_file is not None: + self.datasource = ImageJsonlDatasource(image_jsonl_file, control_count_per_image) + else: + raise ValueError("image_directory or image_jsonl_file must be specified") + + if self.cache_directory is None: + self.cache_directory = self.image_directory + + self.batch_manager = None + self.num_train_items = 0 + self.has_control = self.datasource.has_control + + def get_metadata(self): + metadata = super().get_metadata() + if self.image_directory is not None: + metadata["image_directory"] = os.path.basename(self.image_directory) + if self.image_jsonl_file is not None: + metadata["image_jsonl_file"] = os.path.basename(self.image_jsonl_file) + if self.control_directory is not None: + metadata["control_directory"] = os.path.basename(self.control_directory) + metadata["has_control"] = self.has_control + return metadata + + def get_total_image_count(self): + return len(self.datasource) if self.datasource.is_indexable() else None + + def retrieve_latent_cache_batches(self, num_workers: int): + buckset_selector = BucketSelector(self.resolution, self.enable_bucket, self.bucket_no_upscale, self.architecture) + executor = ThreadPoolExecutor(max_workers=num_workers) + + batches: dict[tuple[int, int], list[ItemInfo]] = {} # (width, height) -> [ItemInfo] + futures = [] + + # aggregate futures and sort by bucket resolution + def aggregate_future(consume_all: bool = False): + while len(futures) >= num_workers or (consume_all and len(futures) > 0): + completed_futures = [future for future in futures if future.done()] + if len(completed_futures) == 0: + if len(futures) >= num_workers or consume_all: # to avoid adding too many futures + time.sleep(0.1) + continue + else: + break # submit batch if possible + + for future in completed_futures: + original_size, item_key, image, caption, controls = future.result() + bucket_height, bucket_width = image.shape[:2] + bucket_reso = (bucket_width, bucket_height) + + item_info = ItemInfo(item_key, caption, original_size, bucket_reso, content=image) + item_info.latent_cache_path = self.get_latent_cache_path(item_info) + item_info.fp_latent_window_size = self.fp_latent_window_size + item_info.fp_1f_clean_indices = self.fp_1f_clean_indices + item_info.fp_1f_target_index = self.fp_1f_target_index + item_info.fp_1f_no_post = self.fp_1f_no_post + + if self.architecture == ARCHITECTURE_FRAMEPACK or self.architecture == ARCHITECTURE_WAN: + # we need to split the bucket with latent window size and optional 1f clean indices, zero post + bucket_reso = list(bucket_reso) + [self.fp_latent_window_size] + if self.fp_1f_clean_indices is not None: + bucket_reso.append(len(self.fp_1f_clean_indices)) + bucket_reso.append(self.fp_1f_no_post) + bucket_reso = tuple(bucket_reso) + + if controls is not None: + item_info.control_content = controls + + if bucket_reso not in batches: + batches[bucket_reso] = [] + batches[bucket_reso].append(item_info) + + futures.remove(future) + + # submit batch if some bucket has enough items + def submit_batch(flush: bool = False): + for key in batches: + if len(batches[key]) >= self.batch_size or flush: + batch = batches[key][0 : self.batch_size] + if len(batches[key]) > self.batch_size: + batches[key] = batches[key][self.batch_size :] + else: + del batches[key] + return key, batch + return None, None + + for fetch_op in self.datasource: + + # fetch and resize image in a separate thread + def fetch_and_resize(op: callable) -> tuple[tuple[int, int], str, Image.Image, str, Optional[Image.Image]]: + image_key, image, caption, controls = op() + image: Image.Image + image_size = image.size + + bucket_reso = buckset_selector.get_bucket_resolution(image_size) + image = resize_image_to_bucket(image, bucket_reso) # returns np.ndarray + resized_controls = None + if controls is not None: + resized_controls = [] + for control in controls: + resized_control = resize_image_to_bucket(control, bucket_reso) # returns np.ndarray + resized_controls.append(resized_control) + + return image_size, image_key, image, caption, resized_controls + + future = executor.submit(fetch_and_resize, fetch_op) + futures.append(future) + aggregate_future() + while True: + key, batch = submit_batch() + if key is None: + break + yield key, batch + + aggregate_future(consume_all=True) + while True: + key, batch = submit_batch(flush=True) + if key is None: + break + yield key, batch + + executor.shutdown() + + def retrieve_text_encoder_output_cache_batches(self, num_workers: int): + return self._default_retrieve_text_encoder_output_cache_batches(self.datasource, self.batch_size, num_workers) + + def prepare_for_training(self): + bucket_selector = BucketSelector(self.resolution, self.enable_bucket, self.bucket_no_upscale, self.architecture) + + # glob cache files + latent_cache_files = glob.glob(os.path.join(self.cache_directory, f"*_{self.architecture}.safetensors")) + + # assign cache files to item info + bucketed_item_info: dict[tuple[int, int], list[ItemInfo]] = {} # (width, height) -> [ItemInfo] + for cache_file in latent_cache_files: + tokens = os.path.basename(cache_file).split("_") + + image_size = tokens[-2] # 0000x0000 + image_width, image_height = map(int, image_size.split("x")) + image_size = (image_width, image_height) + + item_key = "_".join(tokens[:-2]) + text_encoder_output_cache_file = os.path.join(self.cache_directory, f"{item_key}_{self.architecture}_te.safetensors") + if not os.path.exists(text_encoder_output_cache_file): + logger.warning(f"Text encoder output cache file not found: {text_encoder_output_cache_file}") + continue + + bucket_reso = bucket_selector.get_bucket_resolution(image_size) + + if self.architecture == ARCHITECTURE_FRAMEPACK or self.architecture == ARCHITECTURE_WAN: + # we need to split the bucket with latent window size and optional 1f clean indices, zero post + bucket_reso = list(bucket_reso) + [self.fp_latent_window_size] + if self.fp_1f_clean_indices is not None: + bucket_reso.append(len(self.fp_1f_clean_indices)) + bucket_reso.append(self.fp_1f_no_post) + bucket_reso = tuple(bucket_reso) + + item_info = ItemInfo(item_key, "", image_size, bucket_reso, latent_cache_path=cache_file) + item_info.text_encoder_output_cache_path = text_encoder_output_cache_file + + bucket = bucketed_item_info.get(bucket_reso, []) + for _ in range(self.num_repeats): + bucket.append(item_info) + bucketed_item_info[bucket_reso] = bucket + + # prepare batch manager + self.batch_manager = BucketBatchManager(bucketed_item_info, self.batch_size) + self.batch_manager.show_bucket_info() + + self.num_train_items = sum([len(bucket) for bucket in bucketed_item_info.values()]) + + def shuffle_buckets(self): + # set random seed for this epoch + random.seed(self.seed + self.current_epoch) + self.batch_manager.shuffle() + + def __len__(self): + if self.batch_manager is None: + return 100 # dummy value + return len(self.batch_manager) + + def __getitem__(self, idx): + return self.batch_manager[idx] + + +class VideoDataset(BaseDataset): + TARGET_FPS_HUNYUAN = 24.0 + TARGET_FPS_WAN = 16.0 + TARGET_FPS_FRAMEPACK = 30.0 + + def __init__( + self, + resolution: Tuple[int, int], + caption_extension: Optional[str], + batch_size: int, + num_repeats: int, + enable_bucket: bool, + bucket_no_upscale: bool, + frame_extraction: Optional[str] = "head", + frame_stride: Optional[int] = 1, + frame_sample: Optional[int] = 1, + target_frames: Optional[list[int]] = None, + max_frames: Optional[int] = None, + source_fps: Optional[float] = None, + video_directory: Optional[str] = None, + video_jsonl_file: Optional[str] = None, + control_directory: Optional[str] = None, + cache_directory: Optional[str] = None, + fp_latent_window_size: Optional[int] = 9, + debug_dataset: bool = False, + architecture: str = "no_default", + ): + super(VideoDataset, self).__init__( + resolution, + caption_extension, + batch_size, + num_repeats, + enable_bucket, + bucket_no_upscale, + cache_directory, + debug_dataset, + architecture, + ) + self.video_directory = video_directory + self.video_jsonl_file = video_jsonl_file + self.control_directory = control_directory + self.frame_extraction = frame_extraction + self.frame_stride = frame_stride + self.frame_sample = frame_sample + self.max_frames = max_frames + self.source_fps = source_fps + self.fp_latent_window_size = fp_latent_window_size + + if self.architecture == ARCHITECTURE_HUNYUAN_VIDEO: + self.target_fps = VideoDataset.TARGET_FPS_HUNYUAN + elif self.architecture == ARCHITECTURE_WAN: + self.target_fps = VideoDataset.TARGET_FPS_WAN + elif self.architecture == ARCHITECTURE_FRAMEPACK: + self.target_fps = VideoDataset.TARGET_FPS_FRAMEPACK + else: + raise ValueError(f"Unsupported architecture: {self.architecture}") + + if target_frames is not None: + target_frames = list(set(target_frames)) + target_frames.sort() + + # round each value to N*4+1 + rounded_target_frames = [(f - 1) // 4 * 4 + 1 for f in target_frames] + rouneded_target_frames = list(set(rounded_target_frames)) + rouneded_target_frames.sort() + + # if value is changed, warn + if target_frames != rounded_target_frames: + logger.warning(f"target_frames are rounded to {rounded_target_frames}") + + target_frames = tuple(rounded_target_frames) + + self.target_frames = target_frames + + if video_directory is not None: + self.datasource = VideoDirectoryDatasource(video_directory, caption_extension, control_directory) + elif video_jsonl_file is not None: + self.datasource = VideoJsonlDatasource(video_jsonl_file) + + if self.frame_extraction == "uniform" and self.frame_sample == 1: + self.frame_extraction = "head" + logger.warning("frame_sample is set to 1 for frame_extraction=uniform. frame_extraction is changed to head.") + if self.frame_extraction == "head": + # head extraction. we can limit the number of frames to be extracted + self.datasource.set_start_and_end_frame(0, max(self.target_frames)) + + if self.cache_directory is None: + self.cache_directory = self.video_directory + + self.batch_manager = None + self.num_train_items = 0 + self.has_control = self.datasource.has_control + + def get_metadata(self): + metadata = super().get_metadata() + if self.video_directory is not None: + metadata["video_directory"] = os.path.basename(self.video_directory) + if self.video_jsonl_file is not None: + metadata["video_jsonl_file"] = os.path.basename(self.video_jsonl_file) + if self.control_directory is not None: + metadata["control_directory"] = os.path.basename(self.control_directory) + metadata["frame_extraction"] = self.frame_extraction + metadata["frame_stride"] = self.frame_stride + metadata["frame_sample"] = self.frame_sample + metadata["target_frames"] = self.target_frames + metadata["max_frames"] = self.max_frames + metadata["source_fps"] = self.source_fps + metadata["has_control"] = self.has_control + return metadata + + def retrieve_latent_cache_batches(self, num_workers: int): + buckset_selector = BucketSelector(self.resolution, architecture=self.architecture) + self.datasource.set_bucket_selector(buckset_selector) + if self.source_fps is not None: + self.datasource.set_source_and_target_fps(self.source_fps, self.target_fps) + else: + self.datasource.set_source_and_target_fps(None, None) # no conversion + + executor = ThreadPoolExecutor(max_workers=num_workers) + + # key: (width, height, frame_count) and optional latent_window_size, value: [ItemInfo] + batches: dict[tuple[Any], list[ItemInfo]] = {} + futures = [] + + def aggregate_future(consume_all: bool = False): + while len(futures) >= num_workers or (consume_all and len(futures) > 0): + completed_futures = [future for future in futures if future.done()] + if len(completed_futures) == 0: + if len(futures) >= num_workers or consume_all: # to avoid adding too many futures + time.sleep(0.1) + continue + else: + break # submit batch if possible + + for future in completed_futures: + original_frame_size, video_key, video, caption, control = future.result() + + frame_count = len(video) + video = np.stack(video, axis=0) + height, width = video.shape[1:3] + bucket_reso = (width, height) # already resized + + # process control images if available + control_video = None + if control is not None: + # set frame count to the same as video + if len(control) > frame_count: + control = control[:frame_count] + elif len(control) < frame_count: + # if control is shorter than video, repeat the last frame + last_frame = control[-1] + control.extend([last_frame] * (frame_count - len(control))) + control_video = np.stack(control, axis=0) + + crop_pos_and_frames = [] + if self.frame_extraction == "head": + for target_frame in self.target_frames: + if frame_count >= target_frame: + crop_pos_and_frames.append((0, target_frame)) + elif self.frame_extraction == "chunk": + # split by target_frames + for target_frame in self.target_frames: + for i in range(0, frame_count, target_frame): + if i + target_frame <= frame_count: + crop_pos_and_frames.append((i, target_frame)) + elif self.frame_extraction == "slide": + # slide window + for target_frame in self.target_frames: + if frame_count >= target_frame: + for i in range(0, frame_count - target_frame + 1, self.frame_stride): + crop_pos_and_frames.append((i, target_frame)) + elif self.frame_extraction == "uniform": + # select N frames uniformly + for target_frame in self.target_frames: + if frame_count >= target_frame: + frame_indices = np.linspace(0, frame_count - target_frame, self.frame_sample, dtype=int) + for i in frame_indices: + crop_pos_and_frames.append((i, target_frame)) + elif self.frame_extraction == "full": + # select all frames + target_frame = min(frame_count, self.max_frames) + target_frame = (target_frame - 1) // 4 * 4 + 1 # round to N*4+1 + crop_pos_and_frames.append((0, target_frame)) + else: + raise ValueError(f"frame_extraction {self.frame_extraction} is not supported") + + for crop_pos, target_frame in crop_pos_and_frames: + cropped_video = video[crop_pos : crop_pos + target_frame] + body, ext = os.path.splitext(video_key) + item_key = f"{body}_{crop_pos:05d}-{target_frame:03d}{ext}" + batch_key = (*bucket_reso, target_frame) # bucket_reso with frame_count + + if self.architecture == ARCHITECTURE_FRAMEPACK: + # add latent window size to bucket resolution + batch_key = (*batch_key, self.fp_latent_window_size) + + # crop control video if available + cropped_control = None + if control_video is not None: + cropped_control = control_video[crop_pos : crop_pos + target_frame] + + item_info = ItemInfo( + item_key, caption, original_frame_size, batch_key, frame_count=target_frame, content=cropped_video + ) + item_info.latent_cache_path = self.get_latent_cache_path(item_info) + item_info.control_content = cropped_control # None is allowed + item_info.fp_latent_window_size = self.fp_latent_window_size + + batch = batches.get(batch_key, []) + batch.append(item_info) + batches[batch_key] = batch + + futures.remove(future) + + def submit_batch(flush: bool = False): + for key in batches: + if len(batches[key]) >= self.batch_size or flush: + batch = batches[key][0 : self.batch_size] + if len(batches[key]) > self.batch_size: + batches[key] = batches[key][self.batch_size :] + else: + del batches[key] + return key, batch + return None, None + + for operator in self.datasource: + + def fetch_and_resize(op: callable) -> tuple[tuple[int, int], str, list[np.ndarray], str, Optional[list[np.ndarray]]]: + result = op() + + if len(result) == 3: # for backward compatibility TODO remove this in the future + video_key, video, caption = result + control = None + else: + video_key, video, caption, control = result + + video: list[np.ndarray] + frame_size = (video[0].shape[1], video[0].shape[0]) + + # resize if necessary + bucket_reso = buckset_selector.get_bucket_resolution(frame_size) + video = [resize_image_to_bucket(frame, bucket_reso) for frame in video] + + # resize control if necessary + if control is not None: + control = [resize_image_to_bucket(frame, bucket_reso) for frame in control] + + return frame_size, video_key, video, caption, control + + future = executor.submit(fetch_and_resize, operator) + futures.append(future) + aggregate_future() + while True: + key, batch = submit_batch() + if key is None: + break + yield key, batch + + aggregate_future(consume_all=True) + while True: + key, batch = submit_batch(flush=True) + if key is None: + break + yield key, batch + + executor.shutdown() + + def retrieve_text_encoder_output_cache_batches(self, num_workers: int): + return self._default_retrieve_text_encoder_output_cache_batches(self.datasource, self.batch_size, num_workers) + + def prepare_for_training(self): + bucket_selector = BucketSelector(self.resolution, self.enable_bucket, self.bucket_no_upscale, self.architecture) + + # glob cache files + latent_cache_files = glob.glob(os.path.join(self.cache_directory, f"*_{self.architecture}.safetensors")) + + # assign cache files to item info + bucketed_item_info: dict[tuple[int, int, int], list[ItemInfo]] = {} # (width, height, frame_count) -> [ItemInfo] + for cache_file in latent_cache_files: + tokens = os.path.basename(cache_file).split("_") + + image_size = tokens[-2] # 0000x0000 + image_width, image_height = map(int, image_size.split("x")) + image_size = (image_width, image_height) + + frame_pos, frame_count = tokens[-3].split("-")[:2] # "00000-000", or optional section index "00000-000-00" + frame_pos, frame_count = int(frame_pos), int(frame_count) + + item_key = "_".join(tokens[:-3]) + text_encoder_output_cache_file = os.path.join(self.cache_directory, f"{item_key}_{self.architecture}_te.safetensors") + if not os.path.exists(text_encoder_output_cache_file): + logger.warning(f"Text encoder output cache file not found: {text_encoder_output_cache_file}") + continue + + bucket_reso = bucket_selector.get_bucket_resolution(image_size) + bucket_reso = (*bucket_reso, frame_count) + item_info = ItemInfo(item_key, "", image_size, bucket_reso, frame_count=frame_count, latent_cache_path=cache_file) + item_info.text_encoder_output_cache_path = text_encoder_output_cache_file + + bucket = bucketed_item_info.get(bucket_reso, []) + for _ in range(self.num_repeats): + bucket.append(item_info) + bucketed_item_info[bucket_reso] = bucket + + # prepare batch manager + self.batch_manager = BucketBatchManager(bucketed_item_info, self.batch_size) + self.batch_manager.show_bucket_info() + + self.num_train_items = sum([len(bucket) for bucket in bucketed_item_info.values()]) + + def shuffle_buckets(self): + # set random seed for this epoch + random.seed(self.seed + self.current_epoch) + self.batch_manager.shuffle() + + def __len__(self): + if self.batch_manager is None: + return 100 # dummy value + return len(self.batch_manager) + + def __getitem__(self, idx): + return self.batch_manager[idx] + + +class DatasetGroup(torch.utils.data.ConcatDataset): + def __init__(self, datasets: Sequence[Union[ImageDataset, VideoDataset]]): + super().__init__(datasets) + self.datasets: list[Union[ImageDataset, VideoDataset]] = datasets + self.num_train_items = 0 + for dataset in self.datasets: + self.num_train_items += dataset.num_train_items + + def set_current_epoch(self, epoch): + for dataset in self.datasets: + dataset.set_current_epoch(epoch) + + def set_current_step(self, step): + for dataset in self.datasets: + dataset.set_current_step(step) + + def set_max_train_steps(self, max_train_steps): + for dataset in self.datasets: + dataset.set_max_train_steps(max_train_steps) diff --git a/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/fpack_cache_latents.py b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/fpack_cache_latents.py new file mode 100644 index 0000000000000000000000000000000000000000..760e0c3cee5072700f477c6d9648f83e146c464c --- /dev/null +++ b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/fpack_cache_latents.py @@ -0,0 +1,490 @@ +import argparse +import logging +import math +import os +from typing import List, Optional + +import numpy as np +import torch +import torch.nn.functional as F +from tqdm import tqdm +from transformers import SiglipImageProcessor, SiglipVisionModel +from PIL import Image + +from musubi_tuner.dataset import config_utils +from musubi_tuner.dataset.config_utils import BlueprintGenerator, ConfigSanitizer +from musubi_tuner.dataset.image_video_dataset import BaseDataset, ItemInfo, save_latent_cache_framepack, ARCHITECTURE_FRAMEPACK +from musubi_tuner.frame_pack import hunyuan +from musubi_tuner.frame_pack.framepack_utils import load_image_encoders, load_vae +from musubi_tuner.hunyuan_model.autoencoder_kl_causal_3d import AutoencoderKLCausal3D +from musubi_tuner.frame_pack.clip_vision import hf_clip_vision_encode +import musubi_tuner.cache_latents as cache_latents +from musubi_tuner.cache_latents import preprocess_contents + +logger = logging.getLogger(__name__) +logging.basicConfig(level=logging.INFO) + + +def encode_and_save_batch( + vae: AutoencoderKLCausal3D, + feature_extractor: SiglipImageProcessor, + image_encoder: SiglipVisionModel, + batch: List[ItemInfo], + vanilla_sampling: bool = False, + one_frame: bool = False, + one_frame_no_2x: bool = False, + one_frame_no_4x: bool = False, +): + """Encode a batch of original RGB videos and save FramePack section caches.""" + if one_frame: + encode_and_save_batch_one_frame( + vae, feature_extractor, image_encoder, batch, vanilla_sampling, one_frame_no_2x, one_frame_no_4x + ) + return + + latent_window_size = batch[0].fp_latent_window_size # all items should have the same window size + + # Stack batch into tensor (B,C,F,H,W) in RGB order + contents = torch.stack([torch.from_numpy(item.content) for item in batch]) + if len(contents.shape) == 4: + contents = contents.unsqueeze(1) # B, H, W, C -> B, F, H, W, C + + contents = contents.permute(0, 4, 1, 2, 3).contiguous() # B, C, F, H, W + contents = contents.to(vae.device, dtype=vae.dtype) + contents = contents / 127.5 - 1.0 # normalize to [-1, 1] + + height, width = contents.shape[3], contents.shape[4] + if height < 8 or width < 8: + item = batch[0] # other items should have the same size + raise ValueError(f"Image or video size too small: {item.item_key} and {len(batch) - 1} more, size: {item.original_size}") + + # calculate latent frame count from original frame count (4n+1) + latent_f = (batch[0].frame_count - 1) // 4 + 1 + + # calculate the total number of sections (excluding the first frame, divided by window size) + total_latent_sections = math.floor((latent_f - 1) / latent_window_size) + if total_latent_sections < 1: + min_frames_needed = latent_window_size * 4 + 1 + raise ValueError( + f"Not enough frames for FramePack: {batch[0].frame_count} frames ({latent_f} latent frames), minimum required: {min_frames_needed} frames ({latent_window_size+1} latent frames)" + ) + + # actual latent frame count (aligned to section boundaries) + latent_f_aligned = total_latent_sections * latent_window_size + 1 if not one_frame else 1 + + # actual video frame count + frame_count_aligned = (latent_f_aligned - 1) * 4 + 1 + if frame_count_aligned != batch[0].frame_count: + logger.info( + f"Frame count mismatch: required={frame_count_aligned} != actual={batch[0].frame_count}, trimming to {frame_count_aligned}" + ) + contents = contents[:, :, :frame_count_aligned, :, :] + + latent_f = latent_f_aligned # Update to the aligned value + + # VAE encode (list of tensor -> stack) + latents = hunyuan.vae_encode(contents, vae) # include scaling factor + latents = latents.to("cpu") # (B, C, latent_f, H/8, W/8) + + # Vision encoding per‑item (once) + images = np.stack([item.content[0] for item in batch], axis=0) # B, H, W, C + + # encode image with image encoder + image_embeddings = [] + with torch.no_grad(): + for image in images: + if image.shape[-1] == 4: + image = image[..., :3] + image_encoder_output = hf_clip_vision_encode(image, feature_extractor, image_encoder) + image_embeddings.append(image_encoder_output.last_hidden_state) + image_embeddings = torch.cat(image_embeddings, dim=0) # B, LEN, 1152 + image_embeddings = image_embeddings.to("cpu") # Save memory + + if not vanilla_sampling: + # padding is reversed for inference (future to past) + latent_paddings = list(reversed(range(total_latent_sections))) + # Note: The padding trick for inference. See the paper for details. + if total_latent_sections > 4: + latent_paddings = [3] + [2] * (total_latent_sections - 3) + [1, 0] + + for b, item in enumerate(batch): + original_latent_cache_path = item.latent_cache_path + video_lat = latents[b : b + 1] # keep batch dim, 1, C, F, H, W + + # emulate inference step (history latents) + # Note: In inference, history_latents stores *generated* future latents. + # Here, for caching, we just need its shape and type for clean_* tensors. + # The actual content doesn't matter much as clean_* will be overwritten. + history_latents = torch.zeros( + (1, video_lat.shape[1], 1 + 2 + 16, video_lat.shape[3], video_lat.shape[4]), dtype=video_lat.dtype + ) # C=16 for HY + + latent_f_index = latent_f - latent_window_size # Start from the last section + section_index = total_latent_sections - 1 + + for latent_padding in latent_paddings: + is_last_section = section_index == 0 # the last section in inference order == the first section in time + latent_padding_size = latent_padding * latent_window_size + if is_last_section: + assert latent_f_index == 1, "Last section should be starting from frame 1" + + # indices generation (same as inference) + indices = torch.arange(0, sum([1, latent_padding_size, latent_window_size, 1, 2, 16])).unsqueeze(0) + ( + clean_latent_indices_pre, # Index for start_latent + blank_indices, # Indices for padding (future context in inference) + latent_indices, # Indices for the target latents to predict + clean_latent_indices_post, # Index for the most recent history frame + clean_latent_2x_indices, # Indices for the next 2 history frames + clean_latent_4x_indices, # Indices for the next 16 history frames + ) = indices.split([1, latent_padding_size, latent_window_size, 1, 2, 16], dim=1) + + # Indices for clean_latents (start + recent history) + clean_latent_indices = torch.cat([clean_latent_indices_pre, clean_latent_indices_post], dim=1) + + # clean latents preparation (emulating inference) + clean_latents_pre = video_lat[:, :, 0:1, :, :] # Always the first frame (start_latent) + clean_latents_post, clean_latents_2x, clean_latents_4x = history_latents[:, :, : 1 + 2 + 16, :, :].split( + [1, 2, 16], dim=2 + ) + clean_latents = torch.cat([clean_latents_pre, clean_latents_post], dim=2) # Combine start frame + placeholder + + # Target latents for this section (ground truth) + target_latents = video_lat[:, :, latent_f_index : latent_f_index + latent_window_size, :, :] + + # save cache (file path is inside item.latent_cache_path pattern), remove batch dim + item.latent_cache_path = append_section_idx_to_latent_cache_path(original_latent_cache_path, section_index) + save_latent_cache_framepack( + item_info=item, + latent=target_latents.squeeze(0), # Ground truth for this section + latent_indices=latent_indices.squeeze(0), # Indices for the ground truth section + clean_latents=clean_latents.squeeze(0), # Start frame + history placeholder + clean_latent_indices=clean_latent_indices.squeeze(0), # Indices for start frame + history placeholder + clean_latents_2x=clean_latents_2x.squeeze(0), # History placeholder + clean_latent_2x_indices=clean_latent_2x_indices.squeeze(0), # Indices for history placeholder + clean_latents_4x=clean_latents_4x.squeeze(0), # History placeholder + clean_latent_4x_indices=clean_latent_4x_indices.squeeze(0), # Indices for history placeholder + image_embeddings=image_embeddings[b], + ) + + if is_last_section: # If this was the first section generated in inference (time=0) + # History gets the start frame + the generated first section + generated_latents_for_history = video_lat[:, :, : latent_window_size + 1, :, :] + else: + # History gets the generated current section + generated_latents_for_history = target_latents # Use true latents as stand-in for generated + + history_latents = torch.cat([generated_latents_for_history, history_latents], dim=2) + + section_index -= 1 + latent_f_index -= latent_window_size + + else: + # Vanilla Sampling Logic + for b, item in enumerate(batch): + original_latent_cache_path = item.latent_cache_path + video_lat = latents[b : b + 1] # Keep batch dim: 1, C, F_aligned, H, W + img_emb = image_embeddings[b] # LEN, 1152 + + for section_index in range(total_latent_sections): + target_start_f = section_index * latent_window_size + 1 + target_end_f = target_start_f + latent_window_size + target_latents = video_lat[:, :, target_start_f:target_end_f, :, :] + start_latent = video_lat[:, :, 0:1, :, :] + + # Clean latents preparation (Vanilla) + clean_latents_total_count = 1 + 2 + 16 + history_latents = torch.zeros( + size=(1, 16, clean_latents_total_count, video_lat.shape[-2], video_lat.shape[-1]), + device=video_lat.device, + dtype=video_lat.dtype, + ) + + history_start_f = 0 + video_start_f = target_start_f - clean_latents_total_count + copy_count = clean_latents_total_count + if video_start_f < 0: + history_start_f = -video_start_f + copy_count = clean_latents_total_count - history_start_f + video_start_f = 0 + if copy_count > 0: + history_latents[:, :, history_start_f:] = video_lat[:, :, video_start_f : video_start_f + copy_count, :, :] + + # indices generation (Vanilla): copy from FramePack-F1 + indices = torch.arange(0, sum([1, 16, 2, 1, latent_window_size])).unsqueeze(0) + ( + clean_latent_indices_start, + clean_latent_4x_indices, + clean_latent_2x_indices, + clean_latent_1x_indices, + latent_indices, + ) = indices.split([1, 16, 2, 1, latent_window_size], dim=1) + clean_latent_indices = torch.cat([clean_latent_indices_start, clean_latent_1x_indices], dim=1) + + clean_latents_4x, clean_latents_2x, clean_latents_1x = history_latents.split([16, 2, 1], dim=2) + clean_latents = torch.cat([start_latent, clean_latents_1x], dim=2) + + # Save cache + item.latent_cache_path = append_section_idx_to_latent_cache_path(original_latent_cache_path, section_index) + save_latent_cache_framepack( + item_info=item, + latent=target_latents.squeeze(0), + latent_indices=latent_indices.squeeze(0), # Indices for target section i + clean_latents=clean_latents.squeeze(0), # Past clean frames + clean_latent_indices=clean_latent_indices.squeeze(0), # Indices for clean_latents_pre/post + clean_latents_2x=clean_latents_2x.squeeze(0), # Past clean frames (2x) + clean_latent_2x_indices=clean_latent_2x_indices.squeeze(0), # Indices for clean_latents_2x + clean_latents_4x=clean_latents_4x.squeeze(0), # Past clean frames (4x) + clean_latent_4x_indices=clean_latent_4x_indices.squeeze(0), # Indices for clean_latents_4x + image_embeddings=img_emb, + # Note: We don't explicitly save past_offset_indices, + # but its size influences the absolute values in other indices. + ) + + +def encode_and_save_batch_one_frame( + vae: AutoencoderKLCausal3D, + feature_extractor: SiglipImageProcessor, + image_encoder: SiglipVisionModel, + batch: List[ItemInfo], + vanilla_sampling: bool = False, + one_frame_no_2x: bool = False, + one_frame_no_4x: bool = False, +): + # item.content: target image (H, W, C) + # item.control_content: list of images (H, W, C) + _, _, contents, content_masks = preprocess_contents(batch) + contents = contents.to(vae.device, dtype=vae.dtype) # B, C, F, H, W + + # VAE encode: we need to encode one frame at a time because VAE encoder has stride=4 for the time dimension except for the first frame. + latents = [hunyuan.vae_encode(contents[:, :, idx : idx + 1], vae).to("cpu") for idx in range(contents.shape[2])] + latents = torch.cat(latents, dim=2) # B, C, F, H/8, W/8 + + # apply alphas to latents + for b, item in enumerate(batch): + for i, content_mask in enumerate(content_masks[b]): + if content_mask is not None: + # apply mask to the latents + # print(f"Applying content mask for item {item.item_key}, frame {i}") + latents[b : b + 1, :, i : i + 1] *= content_mask + + # Vision encoding per‑item (once): use control content because it is the start image + images = [item.control_content[0] for item in batch] # list of [H, W, C] + + # encode image with image encoder + image_embeddings = [] + with torch.no_grad(): + for image in images: + if image.shape[-1] == 4: + image = image[..., :3] + image_encoder_output = hf_clip_vision_encode(image, feature_extractor, image_encoder) + image_embeddings.append(image_encoder_output.last_hidden_state) + image_embeddings = torch.cat(image_embeddings, dim=0) # B, LEN, 1152 + image_embeddings = image_embeddings.to("cpu") # Save memory + + # save cache for each item in the batch + for b, item in enumerate(batch): + # indices generation (same as inference): each item may have different clean_latent_indices, so we generate them per item + clean_latent_indices = item.fp_1f_clean_indices # list of indices for clean latents + if clean_latent_indices is None or len(clean_latent_indices) == 0: + logger.warning( + f"Item {item.item_key} has no clean_latent_indices defined, using default indices for one frame training." + ) + clean_latent_indices = [0] + + if not item.fp_1f_no_post: + clean_latent_indices = clean_latent_indices + [1 + item.fp_latent_window_size] + clean_latent_indices = torch.Tensor(clean_latent_indices).long() # N + + latent_index = torch.Tensor([item.fp_1f_target_index]).long() # 1 + + # zero values is not needed to cache even if one_frame_no_2x or 4x is False + clean_latents_2x = None + clean_latents_4x = None + + if one_frame_no_2x: + clean_latent_2x_indices = None + else: + index = 1 + item.fp_latent_window_size + 1 + clean_latent_2x_indices = torch.arange(index, index + 2) # 2 + + if one_frame_no_4x: + clean_latent_4x_indices = None + else: + index = 1 + item.fp_latent_window_size + 1 + 2 + clean_latent_4x_indices = torch.arange(index, index + 16) # 16 + + # clean latents preparation (emulating inference) + clean_latents = latents[b, :, :-1] # C, F, H, W + if not item.fp_1f_no_post: + # If zero post is enabled, we need to add a zero frame at the end + clean_latents = F.pad(clean_latents, (0, 0, 0, 0, 0, 1), value=0.0) # C, F+1, H, W + + # Target latents for this section (ground truth) + target_latents = latents[b, :, -1:] # C, 1, H, W + + print(f"Saving cache for item {item.item_key} at {item.latent_cache_path}. no_post: {item.fp_1f_no_post}") + print(f" Clean latent indices: {clean_latent_indices}, latent index: {latent_index}") + print(f" Clean latents: {clean_latents.shape}, target latents: {target_latents.shape}") + print(f" Clean latents 2x indices: {clean_latent_2x_indices}, clean latents 4x indices: {clean_latent_4x_indices}") + print( + f" Clean latents 2x: {clean_latents_2x.shape if clean_latents_2x is not None else 'None'}, " + f"Clean latents 4x: {clean_latents_4x.shape if clean_latents_4x is not None else 'None'}" + ) + print(f" Image embeddings: {image_embeddings[b].shape}") + + # save cache (file path is inside item.latent_cache_path pattern), remove batch dim + save_latent_cache_framepack( + item_info=item, + latent=target_latents, # Ground truth for this section + latent_indices=latent_index, # Indices for the ground truth section + clean_latents=clean_latents, # Start frame + history placeholder + clean_latent_indices=clean_latent_indices, # Indices for start frame + history placeholder + clean_latents_2x=clean_latents_2x, # History placeholder + clean_latent_2x_indices=clean_latent_2x_indices, # Indices for history placeholder + clean_latents_4x=clean_latents_4x, # History placeholder + clean_latent_4x_indices=clean_latent_4x_indices, # Indices for history placeholder + image_embeddings=image_embeddings[b], + ) + + +def framepack_setup_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: + parser.add_argument("--image_encoder", type=str, required=True, help="Image encoder (CLIP) checkpoint path or directory") + parser.add_argument( + "--f1", + action="store_true", + help="Generate cache for F1 model (vanilla (autoregressive) sampling) instead of Inverted anti-drifting (plain FramePack)", + ) + parser.add_argument( + "--one_frame", + action="store_true", + help="Generate cache for one frame training (single frame, single section). latent_window_size is used as the index of the target frame.", + ) + parser.add_argument( + "--one_frame_no_2x", + action="store_true", + help="Do not use clean_latents_2x and clean_latent_2x_indices for one frame training.", + ) + parser.add_argument( + "--one_frame_no_4x", + action="store_true", + help="Do not use clean_latents_4x and clean_latent_4x_indices for one frame training.", + ) + return parser + + +def main(): + parser = cache_latents.setup_parser_common() + parser = cache_latents.hv_setup_parser(parser) # VAE + parser = framepack_setup_parser(parser) + + args = parser.parse_args() + + if args.vae_dtype is not None: + raise ValueError("VAE dtype is not supported in FramePack") + # if args.batch_size != 1: + # args.batch_size = 1 + # logger.info("Batch size is set to 1 for FramePack.") + + device = args.device if hasattr(args, "device") and args.device else ("cuda" if torch.cuda.is_available() else "cpu") + device = torch.device(device) + + # Load dataset config + blueprint_generator = BlueprintGenerator(ConfigSanitizer()) + logger.info(f"Load dataset config from {args.dataset_config}") + user_config = config_utils.load_user_config(args.dataset_config) + blueprint = blueprint_generator.generate(user_config, args, architecture=ARCHITECTURE_FRAMEPACK) + train_dataset_group = config_utils.generate_dataset_group_by_blueprint(blueprint.dataset_group) + + datasets = train_dataset_group.datasets + + if args.debug_mode is not None: + cache_latents.show_datasets( + datasets, args.debug_mode, args.console_width, args.console_back, args.console_num_images, fps=16 + ) + return + + assert args.vae is not None, "vae checkpoint is required" + + logger.info(f"Loading VAE model from {args.vae}") + vae = load_vae(args.vae, args.vae_chunk_size, args.vae_spatial_tile_sample_min_size, device=device) + vae.to(device) + + logger.info(f"Loading image encoder from {args.image_encoder}") + feature_extractor, image_encoder = load_image_encoders(args) + image_encoder.eval() + image_encoder.to(device) + + logger.info(f"Cache generation mode: {'Vanilla Sampling' if args.f1 else 'Inference Emulation'}") + + # encoding closure + def encode(batch: List[ItemInfo]): + encode_and_save_batch( + vae, feature_extractor, image_encoder, batch, args.f1, args.one_frame, args.one_frame_no_2x, args.one_frame_no_4x + ) + + # reuse core loop from cache_latents with no change + encode_datasets_framepack(datasets, encode, args) + + +def append_section_idx_to_latent_cache_path(latent_cache_path: str, section_idx: int) -> str: + tokens = latent_cache_path.split("_") + tokens[-3] = f"{tokens[-3]}-{section_idx:04d}" # append section index to "frame_pos-count" + return "_".join(tokens) + + +def encode_datasets_framepack(datasets: list[BaseDataset], encode: callable, args: argparse.Namespace): + num_workers = args.num_workers if args.num_workers is not None else max(1, os.cpu_count() - 1) + for i, dataset in enumerate(datasets): + logger.info(f"Encoding dataset [{i}]") + all_latent_cache_paths = [] + for _, batch in tqdm(dataset.retrieve_latent_cache_batches(num_workers)): + batch: list[ItemInfo] = batch # type: ignore + + # latent_cache_path is "{basename}_{w:04d}x{h:04d}_{self.architecture}.safetensors" + # For video dataset,we expand it to "{basename}_{section_idx:04d}_{w:04d}x{h:04d}_{self.architecture}.safetensors" + filtered_batch = [] + for item in batch: + if item.frame_count is None: + # image dataset + all_latent_cache_paths.append(item.latent_cache_path) + all_existing = os.path.exists(item.latent_cache_path) + else: + latent_f = (item.frame_count - 1) // 4 + 1 + num_sections = max(1, math.floor((latent_f - 1) / item.fp_latent_window_size)) # min 1 section + all_existing = True + for sec in range(num_sections): + p = append_section_idx_to_latent_cache_path(item.latent_cache_path, sec) + all_latent_cache_paths.append(p) + all_existing = all_existing and os.path.exists(p) + + if not all_existing: # if any section cache is missing + filtered_batch.append(item) + + if args.skip_existing: + if len(filtered_batch) == 0: # all sections exist + logger.info(f"All sections exist for {batch[0].item_key}, skipping") + continue + batch = filtered_batch # update batch to only missing sections + + bs = args.batch_size if args.batch_size is not None else len(batch) + for i in range(0, len(batch), bs): + encode(batch[i : i + bs]) + + # normalize paths + all_latent_cache_paths = [os.path.normpath(p) for p in all_latent_cache_paths] + all_latent_cache_paths = set(all_latent_cache_paths) + + # remove old cache files not in the dataset + all_cache_files = dataset.get_all_latent_cache_files() + for cache_file in all_cache_files: + if os.path.normpath(cache_file) not in all_latent_cache_paths: + if args.keep_cache: + logger.info(f"Keep cache file not in the dataset: {cache_file}") + else: + os.remove(cache_file) + logger.info(f"Removed old cache file: {cache_file}") + + +if __name__ == "__main__": + main() diff --git a/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/fpack_cache_latents_ysh.py b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/fpack_cache_latents_ysh.py new file mode 100644 index 0000000000000000000000000000000000000000..f40e1be3a8f484c35b5ae1eed34b53b857d0641e --- /dev/null +++ b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/fpack_cache_latents_ysh.py @@ -0,0 +1,441 @@ +import argparse +import logging +import math +import os +from typing import List, Optional + +import numpy as np +import torch +import torch.nn.functional as F +from tqdm import tqdm +from transformers import SiglipImageProcessor, SiglipVisionModel +from PIL import Image + +from musubi_tuner.dataset import config_utils +from musubi_tuner.dataset.config_utils import BlueprintGenerator, ConfigSanitizer +from musubi_tuner.dataset.image_video_dataset import BaseDataset, ItemInfo, save_latent_cache_framepack, ARCHITECTURE_FRAMEPACK +from musubi_tuner.frame_pack import hunyuan +from musubi_tuner.frame_pack.framepack_utils import load_image_encoders, load_vae +from musubi_tuner.hunyuan_model.autoencoder_kl_causal_3d import AutoencoderKLCausal3D +from musubi_tuner.frame_pack.clip_vision import hf_clip_vision_encode +import musubi_tuner.cache_latents as cache_latents +from musubi_tuner.cache_latents import preprocess_contents + +logger = logging.getLogger(__name__) +logging.basicConfig(level=logging.INFO) + + +def encode_and_save_batch( + vae: AutoencoderKLCausal3D, + feature_extractor: SiglipImageProcessor, + image_encoder: SiglipVisionModel, + batch: List[ItemInfo], + vanilla_sampling: bool = False, +): + """Encode a batch of original RGB videos and save FramePack section caches.""" + latent_window_size = batch[0].fp_latent_window_size # all items should have the same window size, 9 + + # Stack batch into tensor (B,C,F,H,W) in RGB order + contents = torch.stack([torch.from_numpy(item.content) for item in batch]) # torch.Size([1, 73, 480, 832, 3]) + if len(contents.shape) == 4: + contents = contents.unsqueeze(1) # B, H, W, C -> B, F, H, W, C + + contents = contents.permute(0, 4, 1, 2, 3).contiguous() # B, C, F, H, W # torch.Size([1, 3, 73, 480, 832]) + contents = contents.to(vae.device, dtype=vae.dtype) + contents = contents / 127.5 - 1.0 # normalize to [-1, 1] + + height, width = contents.shape[3], contents.shape[4] + if height < 8 or width < 8: + item = batch[0] # other items should have the same size + raise ValueError(f"Image or video size too small: {item.item_key} and {len(batch) - 1} more, size: {item.original_size}") + + # calculate latent frame count from original frame count (4n+1) + latent_f = (batch[0].frame_count - 1) // 4 + 1 # 19 + + # calculate the total number of sections (excluding the first frame, divided by window size) + total_latent_sections = math.floor((latent_f - 1) / latent_window_size) # 2.0 + if total_latent_sections < 1: + min_frames_needed = latent_window_size * 4 + 1 + raise ValueError( + f"Not enough frames for FramePack: {batch[0].frame_count} frames ({latent_f} latent frames), minimum required: {min_frames_needed} frames ({latent_window_size+1} latent frames)" + ) + + # actual latent frame count (aligned to section boundaries) + latent_f_aligned = total_latent_sections * latent_window_size + 1 # 19 + + # actual video frame count + frame_count_aligned = (latent_f_aligned - 1) * 4 + 1 # 73 + if frame_count_aligned != batch[0].frame_count: # 73 != 89 + logger.info( + f"Frame count mismatch: required={frame_count_aligned} != actual={batch[0].frame_count}, trimming to {frame_count_aligned}" + ) + contents = contents[:, :, :frame_count_aligned, :, :] # torch.Size([1, 3, 89, 480, 832]) -> torch.Size([1, 3, 73, 480, 832]) + + latent_f = latent_f_aligned # Update to the aligned value + + # VAE encode (list of tensor -> stack) + latents = hunyuan.vae_encode(contents, vae) # include scaling factor # torch.Size([1, 16, 19, 60, 104]) + latents = latents.to("cpu") # (B, C, latent_f, H/8, W/8) + + # Vision encoding per‑item (once) + images = np.stack([item.content[0] for item in batch], axis=0) # B, H, W, C # (1, 480, 832, 3) + + # encode image with image encoder + image_embeddings = [] + with torch.no_grad(): + for image in images: + if image.shape[-1] == 4: + image = image[..., :3] + image_encoder_output = hf_clip_vision_encode(image, feature_extractor, image_encoder) + image_embeddings.append(image_encoder_output.last_hidden_state) + image_embeddings = torch.cat(image_embeddings, dim=0) # B, LEN, 1152 # torch.Size([1, 729, 1152]) + image_embeddings = image_embeddings.to("cpu") # Save memory + + if not vanilla_sampling: + # padding is reversed for inference (future to past) + latent_paddings = list(reversed(range(total_latent_sections))) # [1, 0] + # Note: The padding trick for inference. See the paper for details. + if total_latent_sections > 4: + latent_paddings = [3] + [2] * (total_latent_sections - 3) + [1, 0] + + # all_target_latents = [] + # all_target_latent_indices = [] + # all_clean_latents = [] + # all_clean_latent_indices = [] + # all_clean_latents_2x = [] + # all_clean_latent_2x_indices = [] + # all_clean_latents_4x = [] + # all_clean_latent_4x_indices = [] + + for b, item in enumerate(batch): + original_latent_cache_path = item.latent_cache_path + video_lat = latents[b : b + 1] # keep batch dim, 1, C, F, H, W # torch.Size([1, 16, 19, 60, 104]) + + # emulate inference step (history latents) + # Note: In inference, history_latents stores *generated* future latents. + # Here, for caching, we just need its shape and type for clean_* tensors. + # The actual content doesn't matter much as clean_* will be overwritten. + history_latents = torch.zeros( + (1, video_lat.shape[1], 1 + 2 + 16, video_lat.shape[3], video_lat.shape[4]), dtype=video_lat.dtype + ) # C=16 for HY # torch.Size([1, 16, 19, 60, 104]) + + latent_f_index = latent_f - latent_window_size # Start from the last section # 19 - 9 = 10 + section_index = total_latent_sections - 1 # 2 - 1 = 1 + + for latent_padding in latent_paddings: + is_last_section = section_index == 0 # the last section in inference order == the first section in time + latent_padding_size = latent_padding * latent_window_size + if is_last_section: + assert latent_f_index == 1, "Last section should be starting from frame 1" + + # indices generation (same as inference) + indices = torch.arange(0, sum([1, latent_padding_size, latent_window_size, 1, 2, 16])).unsqueeze(0) + ( + clean_latent_indices_pre, # Index for start_latent + blank_indices, # Indices for padding (future context in inference) + latent_indices, # Indices for the target latents to predict + clean_latent_indices_post, # Index for the most recent history frame + clean_latent_2x_indices, # Indices for the next 2 history frames + clean_latent_4x_indices, # Indices for the next 16 history frames + ) = indices.split([1, latent_padding_size, latent_window_size, 1, 2, 16], dim=1) + + # Indices for clean_latents (start + recent history) + clean_latent_indices = torch.cat([clean_latent_indices_pre, clean_latent_indices_post], dim=1) + + # clean latents preparation (emulating inference) + clean_latents_pre = video_lat[:, :, 0:1, :, :] # Always the first frame (start_latent) + clean_latents_post, clean_latents_2x, clean_latents_4x = history_latents[:, :, : 1 + 2 + 16, :, :].split( + [1, 2, 16], dim=2 + ) + clean_latents = torch.cat([clean_latents_pre, clean_latents_post], dim=2) # Combine start frame + placeholder + + # Target latents for this section (ground truth) + target_latents = video_lat[:, :, latent_f_index : latent_f_index + latent_window_size, :, :] + + # save cache (file path is inside item.latent_cache_path pattern), remove batch dim + item.latent_cache_path = append_section_idx_to_latent_cache_path(original_latent_cache_path, section_index) + save_latent_cache_framepack( + item_info=item, + latent=target_latents.squeeze(0), # Ground truth for this section + latent_indices=latent_indices.squeeze(0), # Indices for the ground truth section + clean_latents=clean_latents.squeeze(0), # Start frame + history placeholder + clean_latent_indices=clean_latent_indices.squeeze(0), # Indices for start frame + history placeholder + clean_latents_2x=clean_latents_2x.squeeze(0), # History placeholder + clean_latent_2x_indices=clean_latent_2x_indices.squeeze(0), # Indices for history placeholder + clean_latents_4x=clean_latents_4x.squeeze(0), # History placeholder + clean_latent_4x_indices=clean_latent_4x_indices.squeeze(0), # Indices for history placeholder + image_embeddings=image_embeddings[b], + ) + + # Append for batching, remove batch dim + # all_target_latents.append(target_latents) # Ground truth for this section + # all_target_latent_indices.append(latent_indices) # Indices for the ground truth section + # all_clean_latents.append(clean_latents) # Start frame + history placeholder + # all_clean_latent_indices.append(clean_latent_indices) # Indices for start frame + history placeholder + # all_clean_latents_2x.append(clean_latents_2x) # History placeholder + # all_clean_latent_2x_indices.append(clean_latent_2x_indices) # Indices for history placeholder + # all_clean_latents_4x.append(clean_latents_4x) # History placeholder + # all_clean_latent_4x_indices.append(clean_latent_4x_indices) # Indices for history placeholder + + print("\nLatent shapes:") + print(f"target_latents shape: {target_latents.squeeze(0).shape}") + print(f"clean_latents shape: {clean_latents.squeeze(0).shape}") + print(f"clean_latents_2x shape: {clean_latents_2x.squeeze(0).shape}") + print(f"clean_latents_4x shape: {clean_latents_4x.squeeze(0).shape}") + print(f"image_embeddings shape: {image_embeddings[b].shape}") + + print("\nIndices:") + print(f"latent_indices: {latent_indices.squeeze(0)}") + print(f"clean_latent_indices: {clean_latent_indices.squeeze(0)}") + print(f"clean_latent_2x_indices: {clean_latent_2x_indices.squeeze(0)}") + print(f"clean_latent_4x_indices: {clean_latent_4x_indices.squeeze(0)}") + + print("\nHistory_latents shapes:") + print(f"history_latents shape: {history_latents.squeeze(0).shape}\n\n\n") + + if is_last_section: # If this was the first section generated in inference (time=0) + # History gets the start frame + the generated first section + generated_latents_for_history = video_lat[:, :, : latent_window_size + 1, :, :] + else: + # History gets the generated current section + generated_latents_for_history = target_latents # Use true latents as stand-in for generated + + history_latents = torch.cat([generated_latents_for_history, history_latents], dim=2) + + section_index -= 1 + latent_f_index -= latent_window_size + + else: + # Vanilla Sampling Logic + for b, item in enumerate(batch): + original_latent_cache_path = item.latent_cache_path + video_lat = latents[b : b + 1] # Keep batch dim: 1, C, F_aligned, H, W + img_emb = image_embeddings[b] # LEN, 1152 + + for section_index in range(total_latent_sections): + target_start_f = section_index * latent_window_size + 1 + target_end_f = target_start_f + latent_window_size + target_latents = video_lat[:, :, target_start_f:target_end_f, :, :] + start_latent = video_lat[:, :, 0:1, :, :] + + # Clean latents preparation (Vanilla) + clean_latents_total_count = 1 + 2 + 16 + history_latents = torch.zeros( + size=(1, 16, clean_latents_total_count, video_lat.shape[-2], video_lat.shape[-1]), + device=video_lat.device, + dtype=video_lat.dtype, + ) + + history_start_f = 0 + video_start_f = target_start_f - clean_latents_total_count + copy_count = clean_latents_total_count + if video_start_f < 0: + history_start_f = -video_start_f + copy_count = clean_latents_total_count - history_start_f + video_start_f = 0 + if copy_count > 0: + history_latents[:, :, history_start_f:] = video_lat[:, :, video_start_f : video_start_f + copy_count, :, :] + + # indices generation (Vanilla): copy from FramePack-F1 + indices = torch.arange(0, sum([1, 16, 2, 1, latent_window_size])).unsqueeze(0) + ( + clean_latent_indices_start, + clean_latent_4x_indices, + clean_latent_2x_indices, + clean_latent_1x_indices, + latent_indices, + ) = indices.split([1, 16, 2, 1, latent_window_size], dim=1) + clean_latent_indices = torch.cat([clean_latent_indices_start, clean_latent_1x_indices], dim=1) + + clean_latents_4x, clean_latents_2x, clean_latents_1x = history_latents.split([16, 2, 1], dim=2) + clean_latents = torch.cat([start_latent, clean_latents_1x], dim=2) + + # Save cache + item.latent_cache_path = append_section_idx_to_latent_cache_path(original_latent_cache_path, section_index) + save_latent_cache_framepack( + item_info=item, + latent=target_latents.squeeze(0), + latent_indices=latent_indices.squeeze(0), # Indices for target section i + clean_latents=clean_latents.squeeze(0), # Past clean frames + clean_latent_indices=clean_latent_indices.squeeze(0), # Indices for clean_latents_pre/post + clean_latents_2x=clean_latents_2x.squeeze(0), # Past clean frames (2x) + clean_latent_2x_indices=clean_latent_2x_indices.squeeze(0), # Indices for clean_latents_2x + clean_latents_4x=clean_latents_4x.squeeze(0), # Past clean frames (4x) + clean_latent_4x_indices=clean_latent_4x_indices.squeeze(0), # Indices for clean_latents_4x + image_embeddings=img_emb, + # Note: We don't explicitly save past_offset_indices, + # but its size influences the absolute values in other indices. + ) + + # Stack all sections into batches + # batched_target_latents = torch.cat(all_target_latents, dim=0) + # batched_target_latent_indices = torch.cat(all_target_latent_indices, dim=0) + # batched_clean_latents = torch.cat(all_clean_latents, dim=0) + # batched_clean_latent_indices = torch.cat(all_clean_latent_indices, dim=0) + # batched_clean_latents_2x = torch.cat(all_clean_latents_2x, dim=0) + # batched_clean_latent_2x_indices = torch.cat(all_clean_latent_2x_indices, dim=0) + # batched_clean_latents_4x = torch.cat(all_clean_latents_4x, dim=0) + # batched_clean_latent_4x_indices = torch.cat(all_clean_latent_4x_indices, dim=0) + + +def append_section_idx_to_latent_cache_path(latent_cache_path: str, section_idx: int) -> str: + tokens = latent_cache_path.split("_") + tokens[-3] = f"{tokens[-3]}-{section_idx:04d}" # append section index to "frame_pos-count" + return "_".join(tokens) + + +def encode_datasets_framepack(datasets: list[BaseDataset], encode: callable, args: argparse.Namespace): + num_workers = args.num_workers if args.num_workers is not None else max(1, os.cpu_count() - 1) + for i, dataset in enumerate(datasets): + logger.info(f"Encoding dataset [{i}]") + all_latent_cache_paths = [] + for _, batch in tqdm(dataset.retrieve_latent_cache_batches(num_workers)): + batch: list[ItemInfo] = batch # type: ignore + + # latent_cache_path is "{basename}_{w:04d}x{h:04d}_{self.architecture}.safetensors" + # For video dataset,we expand it to "{basename}_{section_idx:04d}_{w:04d}x{h:04d}_{self.architecture}.safetensors" + filtered_batch = [] + for item in batch: + if item.frame_count is None: + # image dataset + all_latent_cache_paths.append(item.latent_cache_path) + all_existing = os.path.exists(item.latent_cache_path) + else: + latent_f = (item.frame_count - 1) // 4 + 1 + num_sections = max(1, math.floor((latent_f - 1) / item.fp_latent_window_size)) # min 1 section + all_existing = True + for sec in range(num_sections): + p = append_section_idx_to_latent_cache_path(item.latent_cache_path, sec) + all_latent_cache_paths.append(p) + all_existing = all_existing and os.path.exists(p) + + if not all_existing: # if any section cache is missing + filtered_batch.append(item) + + if args.skip_existing: + if len(filtered_batch) == 0: # all sections exist + logger.info(f"All sections exist for {batch[0].item_key}, skipping") + continue + batch = filtered_batch # update batch to only missing sections + + bs = args.batch_size if args.batch_size is not None else len(batch) + + for i in range(0, len(batch), bs): + encode(batch[i : i + bs]) + + # normalize paths + all_latent_cache_paths = [os.path.normpath(p) for p in all_latent_cache_paths] + all_latent_cache_paths = set(all_latent_cache_paths) + + # remove old cache files not in the dataset + all_cache_files = dataset.get_all_latent_cache_files() + for cache_file in all_cache_files: + if os.path.normpath(cache_file) not in all_latent_cache_paths: + if args.keep_cache: + logger.info(f"Keep cache file not in the dataset: {cache_file}") + else: + os.remove(cache_file) + logger.info(f"Removed old cache file: {cache_file}") + + +def framepack_setup_parser() -> argparse.ArgumentParser: + parser = argparse.ArgumentParser() + + # setup_parser_common + parser.add_argument("--dataset_config", type=str, required=True, help="path to dataset config .toml file") + parser.add_argument("--vae", type=str, required=False, default=None, help="path to vae checkpoint") + parser.add_argument("--vae_dtype", type=str, default=None, help="data type for VAE, default is float16") + parser.add_argument("--device", type=str, default=None, help="device to use, default is cuda if available") + parser.add_argument( + "--batch_size", type=int, default=None, help="batch size, override dataset config if dataset batch size > this" + ) + parser.add_argument("--num_workers", type=int, default=None, help="number of workers for dataset. default is cpu count-1") + parser.add_argument("--skip_existing", action="store_true", help="skip existing cache files") + parser.add_argument("--keep_cache", action="store_true", help="keep cache files not in dataset") + parser.add_argument("--debug_mode", type=str, default=None, choices=["image", "console", "video"], help="debug mode") + parser.add_argument("--console_width", type=int, default=80, help="debug mode: console width") + parser.add_argument( + "--console_back", type=str, default=None, help="debug mode: console background color, one of ascii_magic.Back" + ) + parser.add_argument( + "--console_num_images", + type=int, + default=None, + help="debug mode: not interactive, number of images to show for each dataset", + ) + + # hv_setup_parser + parser.add_argument( + "--vae_tiling", + action="store_true", + help="enable spatial tiling for VAE, default is False. If vae_spatial_tile_sample_min_size is set, this is automatically enabled", + ) + parser.add_argument("--vae_chunk_size", type=int, default=None, help="chunk size for CausalConv3d in VAE") + parser.add_argument( + "--vae_spatial_tile_sample_min_size", type=int, default=None, help="spatial tile sample min size for VAE, default 256" + ) + + + # framepack_setup_parser + parser.add_argument("--image_encoder", type=str, required=True, help="Image encoder (CLIP) checkpoint path or directory") + parser.add_argument( + "--f1", + action="store_true", + help="Generate cache for F1 model (vanilla (autoregressive) sampling) instead of Inverted anti-drifting (plain FramePack)", + ) + return parser + + +def main(): + parser = framepack_setup_parser() + args = parser.parse_args() + + if args.vae_dtype is not None: + raise ValueError("VAE dtype is not supported in FramePack") + # if args.batch_size != 1: + # args.batch_size = 1 + # logger.info("Batch size is set to 1 for FramePack.") + + device = args.device if hasattr(args, "device") and args.device else ("cuda" if torch.cuda.is_available() else "cpu") + device = torch.device(device) + + # Load dataset config + blueprint_generator = BlueprintGenerator(ConfigSanitizer()) + logger.info(f"Load dataset config from {args.dataset_config}") + user_config = config_utils.load_user_config(args.dataset_config) + blueprint = blueprint_generator.generate(user_config, args, architecture=ARCHITECTURE_FRAMEPACK) + train_dataset_group = config_utils.generate_dataset_group_by_blueprint(blueprint.dataset_group) + + datasets = train_dataset_group.datasets + + if args.debug_mode is not None: + cache_latents.show_datasets( + datasets, args.debug_mode, args.console_width, args.console_back, args.console_num_images, fps=16 + ) + return + + assert args.vae is not None, "vae checkpoint is required" + + logger.info(f"Loading VAE model from {args.vae}") + vae = load_vae(args.vae, args.vae_chunk_size, args.vae_spatial_tile_sample_min_size, device=device) + vae.to(device) + + logger.info(f"Loading image encoder from {args.image_encoder}") + feature_extractor, image_encoder = load_image_encoders(args) + image_encoder.eval() + image_encoder.to(device) + + logger.info(f"Cache generation mode: {'Vanilla Sampling' if args.f1 else 'Inference Emulation'}") + + # encoding closure + def encode(batch: List[ItemInfo]): + encode_and_save_batch( + vae, feature_extractor, image_encoder, batch, args.f1 + ) + + # reuse core loop from cache_latents with no change + encode_datasets_framepack(datasets, encode, args) + + +if __name__ == "__main__": + main() diff --git a/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/fpack_cache_text_encoder_outputs.py b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/fpack_cache_text_encoder_outputs.py new file mode 100644 index 0000000000000000000000000000000000000000..7b43290a0a3db6b9fad08a77f59a94f9487bffaf --- /dev/null +++ b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/fpack_cache_text_encoder_outputs.py @@ -0,0 +1,111 @@ +import argparse +import os +from typing import Optional, Union + +import numpy as np +import torch +from tqdm import tqdm +from transformers import LlamaTokenizerFast, LlamaModel, CLIPTokenizer, CLIPTextModel +from musubi_tuner.dataset import config_utils +from musubi_tuner.dataset.config_utils import BlueprintGenerator, ConfigSanitizer +from musubi_tuner.dataset.image_video_dataset import ARCHITECTURE_FRAMEPACK, ItemInfo, save_text_encoder_output_cache_framepack +import musubi_tuner.cache_text_encoder_outputs as cache_text_encoder_outputs +from musubi_tuner.frame_pack import hunyuan +from musubi_tuner.frame_pack.framepack_utils import load_text_encoder1, load_text_encoder2 + +import logging + +from musubi_tuner.frame_pack.utils import crop_or_pad_yield_mask + +logger = logging.getLogger(__name__) +logging.basicConfig(level=logging.INFO) + + +def encode_and_save_batch( + tokenizer1: LlamaTokenizerFast, + text_encoder1: LlamaModel, + tokenizer2: CLIPTokenizer, + text_encoder2: CLIPTextModel, + batch: list[ItemInfo], + device: torch.device, +): + prompts = [item.caption for item in batch] + + # encode prompt + # FramePack's encode_prompt_conds only supports single prompt, so we need to encode each prompt separately + list_of_llama_vec = [] + list_of_llama_attention_mask = [] + list_of_clip_l_pooler = [] + for prompt in prompts: + with torch.autocast(device_type=device.type, dtype=text_encoder1.dtype), torch.no_grad(): + # llama_vec, clip_l_pooler = hunyuan.encode_prompt_conds(prompts, text_encoder1, text_encoder2, tokenizer1, tokenizer2) + llama_vec, clip_l_pooler = hunyuan.encode_prompt_conds(prompt, text_encoder1, text_encoder2, tokenizer1, tokenizer2) + llama_vec, llama_attention_mask = crop_or_pad_yield_mask(llama_vec, length=512) + + list_of_llama_vec.append(llama_vec.squeeze(0)) + list_of_llama_attention_mask.append(llama_attention_mask.squeeze(0)) + list_of_clip_l_pooler.append(clip_l_pooler.squeeze(0)) + + # save prompt cache + for item, llama_vec, llama_attention_mask, clip_l_pooler in zip( + batch, list_of_llama_vec, list_of_llama_attention_mask, list_of_clip_l_pooler + ): + # save llama_vec and clip_l_pooler to cache + save_text_encoder_output_cache_framepack(item, llama_vec, llama_attention_mask, clip_l_pooler) + + +def main(): + parser = cache_text_encoder_outputs.setup_parser_common() + parser = framepack_setup_parser(parser) + + args = parser.parse_args() + + device = args.device if args.device is not None else "cuda" if torch.cuda.is_available() else "cpu" + device = torch.device(device) + + # Load dataset config + blueprint_generator = BlueprintGenerator(ConfigSanitizer()) + logger.info(f"Load dataset config from {args.dataset_config}") + user_config = config_utils.load_user_config(args.dataset_config) + blueprint = blueprint_generator.generate(user_config, args, architecture=ARCHITECTURE_FRAMEPACK) + train_dataset_group = config_utils.generate_dataset_group_by_blueprint(blueprint.dataset_group) + + datasets = train_dataset_group.datasets + + # prepare cache files and paths: all_cache_files_for_dataset = exisiting cache files, all_cache_paths_for_dataset = all cache paths in the dataset + all_cache_files_for_dataset, all_cache_paths_for_dataset = cache_text_encoder_outputs.prepare_cache_files_and_paths(datasets) + + # load text encoder + tokenizer1, text_encoder1 = load_text_encoder1(args, args.fp8_llm, device) + tokenizer2, text_encoder2 = load_text_encoder2(args) + text_encoder2.to(device) + + # Encode with Text Encoders + logger.info("Encoding with Text Encoders") + + def encode_for_text_encoder(batch: list[ItemInfo]): + encode_and_save_batch(tokenizer1, text_encoder1, tokenizer2, text_encoder2, batch, device) + + cache_text_encoder_outputs.process_text_encoder_batches( + args.num_workers, + args.skip_existing, + args.batch_size, + datasets, + all_cache_files_for_dataset, + all_cache_paths_for_dataset, + encode_for_text_encoder, + ) + + # remove cache files not in dataset + cache_text_encoder_outputs.post_process_cache_files(datasets, all_cache_files_for_dataset, all_cache_paths_for_dataset, args.keep_cache) + + +def framepack_setup_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: + parser.add_argument("--text_encoder1", type=str, required=True, help="Text Encoder 1 directory") + parser.add_argument("--text_encoder2", type=str, required=True, help="Text Encoder 2 directory") + parser.add_argument("--fp8_llm", action="store_true", help="use fp8 for Text Encoder 1 (LLM)") + return parser + + +if __name__ == "__main__": + main() diff --git a/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/fpack_generate_video.py b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/fpack_generate_video.py new file mode 100644 index 0000000000000000000000000000000000000000..2aa95b112165c9498063f611e65825d7fb7199c4 --- /dev/null +++ b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/fpack_generate_video.py @@ -0,0 +1,2159 @@ +import argparse +from datetime import datetime +import gc +import json +import random +import os +import re +import time +import math +import copy +from typing import Tuple, Optional, List, Union, Any, Dict + +import torch +from safetensors.torch import load_file, save_file +from safetensors import safe_open +from PIL import Image +import cv2 +import numpy as np +import torchvision.transforms.functional as TF +from transformers import LlamaModel +from tqdm import tqdm + +from musubi_tuner.networks import lora_framepack +from musubi_tuner.hunyuan_model.autoencoder_kl_causal_3d import AutoencoderKLCausal3D +from musubi_tuner.frame_pack import hunyuan +from musubi_tuner.frame_pack.hunyuan_video_packed import load_packed_model +from musubi_tuner.frame_pack.hunyuan_video_packed_inference import HunyuanVideoTransformer3DModelPackedInference +from musubi_tuner.frame_pack.utils import crop_or_pad_yield_mask, resize_and_center_crop, soft_append_bcthw +from musubi_tuner.frame_pack.bucket_tools import find_nearest_bucket +from musubi_tuner.frame_pack.clip_vision import hf_clip_vision_encode +from musubi_tuner.frame_pack.k_diffusion_hunyuan import sample_hunyuan +from musubi_tuner.dataset import image_video_dataset + +try: + from lycoris.kohya import create_network_from_weights +except: + pass + +from musubi_tuner.utils.device_utils import clean_memory_on_device +from musubi_tuner.hv_generate_video import save_images_grid, save_videos_grid, synchronize_device +from musubi_tuner.wan_generate_video import merge_lora_weights +from musubi_tuner.frame_pack.framepack_utils import load_vae, load_text_encoder1, load_text_encoder2, load_image_encoders +from musubi_tuner.dataset.image_video_dataset import load_video + +import logging + +logger = logging.getLogger(__name__) +logging.basicConfig(level=logging.INFO) + + +def parse_section_strings(input_string: str) -> dict[int, str]: + section_strings = {} + if input_string is None: # handle None input for image_path etc. + return section_strings + if ";;;" in input_string: + split_section_strings = input_string.split(";;;") + for section_str in split_section_strings: + if ":" not in section_str: + start = end = 0 + section_str = section_str.strip() + else: + index_str, section_str = section_str.split(":", 1) + index_str = index_str.strip() + section_str = section_str.strip() + + m = re.match(r"^(-?\d+)(-\d+)?$", index_str) + if m: + start = int(m.group(1)) + end = int(m.group(2)[1:]) if m.group(2) is not None else start + else: + start = end = 0 + section_str = section_str.strip() + for i in range(start, end + 1): + section_strings[i] = section_str + else: + section_strings[0] = input_string + + if not section_strings: # If input_string was empty or only separators + return section_strings + + if 0 not in section_strings: + indices = list(section_strings.keys()) + if all(i < 0 for i in indices): + section_index = min(indices) + else: + section_index = min(i for i in indices if i >= 0) + section_strings[0] = section_strings[section_index] + return section_strings + + +class GenerationSettings: + def __init__(self, device: torch.device, dit_weight_dtype: Optional[torch.dtype] = None): + self.device = device + self.dit_weight_dtype = dit_weight_dtype # not used currently because model may be optimized + + +def parse_args() -> argparse.Namespace: + """parse command line arguments""" + parser = argparse.ArgumentParser(description="Wan 2.1 inference script") + + # WAN arguments + # parser.add_argument("--ckpt_dir", type=str, default=None, help="The path to the checkpoint directory (Wan 2.1 official).") + parser.add_argument( + "--sample_solver", type=str, default="unipc", choices=["unipc", "dpm++", "vanilla"], help="The solver used to sample." + ) + + parser.add_argument("--dit", type=str, default=None, help="DiT directory or path") + parser.add_argument("--vae", type=str, default=None, help="VAE directory or path") + parser.add_argument("--text_encoder1", type=str, required=True, help="Text Encoder 1 directory or path") + parser.add_argument("--text_encoder2", type=str, required=True, help="Text Encoder 2 directory or path") + parser.add_argument("--image_encoder", type=str, required=True, help="Image Encoder directory or path") + parser.add_argument("--f1", action="store_true", help="Use F1 sampling method") + + # LoRA + parser.add_argument("--lora_weight", type=str, nargs="*", required=False, default=None, help="LoRA weight path") + parser.add_argument("--lora_multiplier", type=float, nargs="*", default=1.0, help="LoRA multiplier") + parser.add_argument("--include_patterns", type=str, nargs="*", default=None, help="LoRA module include patterns") + parser.add_argument("--exclude_patterns", type=str, nargs="*", default=None, help="LoRA module exclude patterns") + parser.add_argument( + "--save_merged_model", + type=str, + default=None, + help="Save merged model to path. If specified, no inference will be performed.", + ) + + # inference + parser.add_argument( + "--prompt", + type=str, + default=None, + help="prompt for generation. If `;;;` is used, it will be split into sections. Example: `section_index:prompt` or " + "`section_index:prompt;;;section_index:prompt;;;...`, section_index can be `0` or `-1` or `0-2`, `-1` means last section, `0-2` means from 0 to 2 (inclusive).", + ) + parser.add_argument( + "--negative_prompt", + type=str, + default=None, + help="negative prompt for generation, default is empty string. should not change.", + ) + parser.add_argument( + "--custom_system_prompt", + type=str, + default=None, + help="Custom system prompt for LLM. If specified, it will override the default system prompt. See hunyuan_model/text_encoder.py for the default system prompt.", + ) + parser.add_argument("--video_size", type=int, nargs=2, default=[256, 256], help="video size, height and width") + parser.add_argument("--video_seconds", type=float, default=5.0, help="video length, default is 5.0 seconds") + parser.add_argument( + "--video_sections", + type=int, + default=None, + help="number of video sections, Default is None (auto calculate from video seconds)", + ) + parser.add_argument( + "--one_frame_inference", + type=str, + default=None, + help="one frame inference, default is None, comma separated values from 'no_2x', 'no_4x', 'no_post', 'control_indices' and 'target_index'.", + ) + parser.add_argument( + "--control_image_path", type=str, default=None, nargs="*", help="path to control (reference) image for one frame inference." + ) + parser.add_argument( + "--control_image_mask_path", + type=str, + default=None, + nargs="*", + help="path to control (reference) image mask for one frame inference.", + ) + parser.add_argument("--fps", type=int, default=30, help="video fps, default is 30") + parser.add_argument("--infer_steps", type=int, default=25, help="number of inference steps, default is 25") + parser.add_argument("--save_path", type=str, required=True, help="path to save generated video") + parser.add_argument("--seed", type=int, default=None, help="Seed for evaluation.") + # parser.add_argument( + # "--cpu_noise", action="store_true", help="Use CPU to generate noise (compatible with ComfyUI). Default is False." + # ) + parser.add_argument("--latent_window_size", type=int, default=9, help="latent window size, default is 9. should not change.") + parser.add_argument( + "--embedded_cfg_scale", type=float, default=10.0, help="Embeded CFG scale (distilled CFG Scale), default is 10.0" + ) + parser.add_argument( + "--guidance_scale", + type=float, + default=1.0, + help="Guidance scale for classifier free guidance. Default is 1.0 (no guidance), should not change.", + ) + parser.add_argument("--guidance_rescale", type=float, default=0.0, help="CFG Re-scale, default is 0.0. Should not change.") + # parser.add_argument("--video_path", type=str, default=None, help="path to video for video2video inference") + parser.add_argument( + "--image_path", + type=str, + default=None, + help="path to image for image2video inference. If `;;;` is used, it will be used as section images. The notation is same as `--prompt`.", + ) + parser.add_argument("--end_image_path", type=str, default=None, help="path to end image for image2video inference") + parser.add_argument( + "--latent_paddings", + type=str, + default=None, + help="latent paddings for each section, comma separated values. default is None (FramePack default paddings)", + ) + # parser.add_argument( + # "--control_path", + # type=str, + # default=None, + # help="path to control video for inference with controlnet. video file or directory with images", + # ) + # parser.add_argument("--trim_tail_frames", type=int, default=0, help="trim tail N frames from the video before saving") + + # Flow Matching + parser.add_argument( + "--flow_shift", + type=float, + default=None, + help="Shift factor for flow matching schedulers. Default is None (FramePack default).", + ) + + parser.add_argument("--fp8", action="store_true", help="use fp8 for DiT model") + parser.add_argument("--fp8_scaled", action="store_true", help="use scaled fp8 for DiT, only for fp8") + # parser.add_argument("--fp8_fast", action="store_true", help="Enable fast FP8 arithmetic (RTX 4XXX+), only for fp8_scaled") + parser.add_argument( + "--rope_scaling_factor", type=float, default=0.5, help="RoPE scaling factor for high resolution (H/W), default is 0.5" + ) + parser.add_argument( + "--rope_scaling_timestep_threshold", + type=int, + default=None, + help="RoPE scaling timestep threshold, default is None (disable), if set, RoPE scaling will be applied only for timesteps >= threshold, around 800 is good starting point", + ) + + parser.add_argument("--fp8_llm", action="store_true", help="use fp8 for Text Encoder 1 (LLM)") + parser.add_argument( + "--device", type=str, default=None, help="device to use for inference. If None, use CUDA if available, otherwise use CPU" + ) + parser.add_argument( + "--attn_mode", + type=str, + default="torch", + choices=["flash", "torch", "sageattn", "xformers", "sdpa"], # "flash2", "flash3", + help="attention mode", + ) + parser.add_argument("--vae_chunk_size", type=int, default=None, help="chunk size for CausalConv3d in VAE") + parser.add_argument( + "--vae_spatial_tile_sample_min_size", type=int, default=None, help="spatial tile sample min size for VAE, default 256" + ) + parser.add_argument("--bulk_decode", action="store_true", help="decode all frames at once") + parser.add_argument("--blocks_to_swap", type=int, default=0, help="number of blocks to swap in the model") + parser.add_argument( + "--output_type", + type=str, + default="video", + choices=["video", "images", "latent", "both", "latent_images"], + help="output type", + ) + parser.add_argument("--no_metadata", action="store_true", help="do not save metadata") + parser.add_argument("--latent_path", type=str, nargs="*", default=None, help="path to latent for decode. no inference") + parser.add_argument("--lycoris", action="store_true", help="use lycoris for inference") + # parser.add_argument("--compile", action="store_true", help="Enable torch.compile") + # parser.add_argument( + # "--compile_args", + # nargs=4, + # metavar=("BACKEND", "MODE", "DYNAMIC", "FULLGRAPH"), + # default=["inductor", "max-autotune-no-cudagraphs", "False", "False"], + # help="Torch.compile settings", + # ) + + # MagCache + parser.add_argument( + "--magcache_mag_ratios", + type=str, + default=None, + help="Enable MagCache for inference with specified ratios, comma separated values. Example: `1.0,1.06971,1.29073,...`. " + + "It is recommended to use same count of ratios as as inference steps." + + "Default is None (disabled), if `0` is specified, it will use default ratios for 50 steps.", + ) + parser.add_argument("--magcache_retention_ratio", type=float, default=0.2, help="MagCache retention ratio, default is 0.2") + parser.add_argument("--magcache_threshold", type=float, default=0.24, help="MagCache threshold, default is 0.24") + parser.add_argument("--magcache_k", type=int, default=6, help="MagCache k value, default is 6") + parser.add_argument("--magcache_calibration", action="store_true", help="Enable MagCache calibration") + + # New arguments for batch and interactive modes + parser.add_argument("--from_file", type=str, default=None, help="Read prompts from a file") + parser.add_argument("--interactive", action="store_true", help="Interactive mode: read prompts from console") + + args = parser.parse_args() + + # Validate arguments + if args.from_file and args.interactive: + raise ValueError("Cannot use both --from_file and --interactive at the same time") + + if args.latent_path is None or len(args.latent_path) == 0: + if args.prompt is None and not args.from_file and not args.interactive: + raise ValueError("Either --prompt, --from_file or --interactive must be specified") + + return args + + +def parse_prompt_line(line: str) -> Dict[str, Any]: + """Parse a prompt line into a dictionary of argument overrides + + Args: + line: Prompt line with options + + Returns: + Dict[str, Any]: Dictionary of argument overrides + """ + # TODO common function with hv_train_network.line_to_prompt_dict + parts = line.split(" --") + prompt = parts[0].strip() + + # Create dictionary of overrides + overrides = {"prompt": prompt} + # Initialize control_image_path and control_image_mask_path as a list to accommodate multiple paths + overrides["control_image_path"] = [] + overrides["control_image_mask_path"] = [] + + for part in parts[1:]: + if not part.strip(): + continue + option_parts = part.split(" ", 1) + option = option_parts[0].strip() + value = option_parts[1].strip() if len(option_parts) > 1 else "" + + # Map options to argument names + if option == "w": + overrides["video_size_width"] = int(value) + elif option == "h": + overrides["video_size_height"] = int(value) + elif option == "f": + overrides["video_seconds"] = float(value) + elif option == "d": + overrides["seed"] = int(value) + elif option == "s": + overrides["infer_steps"] = int(value) + elif option == "g" or option == "l": + overrides["guidance_scale"] = float(value) + elif option == "fs": + overrides["flow_shift"] = float(value) + elif option == "i": + overrides["image_path"] = value + # elif option == "im": + # overrides["image_mask_path"] = value + # elif option == "cn": + # overrides["control_path"] = value + elif option == "n": + overrides["negative_prompt"] = value + elif option == "vs": # video_sections + overrides["video_sections"] = int(value) + elif option == "ei": # end_image_path + overrides["end_image_path"] = value + elif option == "ci": # control_image_path + overrides["control_image_path"].append(value) + elif option == "cim": # control_image_mask_path + overrides["control_image_mask_path"].append(value) + elif option == "of": # one_frame_inference + overrides["one_frame_inference"] = value + # magcache + elif option == "mcrr": # magcache retention ratio + overrides["magcache_retention_ratio"] = float(value) + elif option == "mct": # magcache threshold + overrides["magcache_threshold"] = float(value) + elif option == "mck": # magcache k + overrides["magcache_k"] = int(value) + + # If no control_image_path was provided, remove the empty list + if not overrides["control_image_path"]: + del overrides["control_image_path"] + if not overrides["control_image_mask_path"]: + del overrides["control_image_mask_path"] + + return overrides + + +def apply_overrides(args: argparse.Namespace, overrides: Dict[str, Any]) -> argparse.Namespace: + """Apply overrides to args + + Args: + args: Original arguments + overrides: Dictionary of overrides + + Returns: + argparse.Namespace: New arguments with overrides applied + """ + args_copy = copy.deepcopy(args) + + for key, value in overrides.items(): + if key == "video_size_width": + args_copy.video_size[1] = value + elif key == "video_size_height": + args_copy.video_size[0] = value + else: + setattr(args_copy, key, value) + + return args_copy + + +def check_inputs(args: argparse.Namespace) -> Tuple[int, int, int]: + """Validate video size and length + + Args: + args: command line arguments + + Returns: + Tuple[int, int, float]: (height, width, video_seconds) + """ + height = args.video_size[0] + width = args.video_size[1] + + video_seconds = args.video_seconds + if args.video_sections is not None: + video_seconds = (args.video_sections * (args.latent_window_size * 4) + 1) / args.fps + + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + return height, width, video_seconds + + +# region DiT model + + +def load_dit_model(args: argparse.Namespace, device: torch.device) -> HunyuanVideoTransformer3DModelPackedInference: + """load DiT model + + Args: + args: command line arguments + device: device to use + dit_dtype: data type for the model + dit_weight_dtype: data type for the model weights. None for as-is + + Returns: + HunyuanVideoTransformer3DModelPackedInference: DiT model + """ + loading_device = "cpu" + if args.blocks_to_swap == 0 and not args.fp8_scaled and args.lora_weight is None: + loading_device = device + + # do not fp8 optimize because we will merge LoRA weights + model = load_packed_model(device, args.dit, args.attn_mode, loading_device, for_inference=True) + + # apply RoPE scaling factor + if args.rope_scaling_timestep_threshold is not None: + logger.info( + f"Applying RoPE scaling factor {args.rope_scaling_factor} for timesteps >= {args.rope_scaling_timestep_threshold}" + ) + model.enable_rope_scaling(args.rope_scaling_timestep_threshold, args.rope_scaling_factor) + + # magcache + initialize_magcache(args, model) + + return model + + +def optimize_model(model: HunyuanVideoTransformer3DModelPackedInference, args: argparse.Namespace, device: torch.device) -> None: + """optimize the model (FP8 conversion, device move etc.) + + Args: + model: dit model + args: command line arguments + device: device to use + """ + if args.fp8_scaled: + # load state dict as-is and optimize to fp8 + state_dict = model.state_dict() + + # if no blocks to swap, we can move the weights to GPU after optimization on GPU (omit redundant CPU->GPU copy) + move_to_device = args.blocks_to_swap == 0 # if blocks_to_swap > 0, we will keep the model on CPU + state_dict = model.fp8_optimization(state_dict, device, move_to_device, use_scaled_mm=False) # args.fp8_fast) + + info = model.load_state_dict(state_dict, strict=True, assign=True) + logger.info(f"Loaded FP8 optimized weights: {info}") + + if args.blocks_to_swap == 0: + model.to(device) # make sure all parameters are on the right device (e.g. RoPE etc.) + else: + # simple cast to dit_dtype + target_dtype = None # load as-is (dit_weight_dtype == dtype of the weights in state_dict) + target_device = None + + if args.fp8: + target_dtype = torch.float8e4m3fn + + if args.blocks_to_swap == 0: + logger.info(f"Move model to device: {device}") + target_device = device + + if target_device is not None and target_dtype is not None: + model.to(target_device, target_dtype) # move and cast at the same time. this reduces redundant copy operations + + # if args.compile: + # compile_backend, compile_mode, compile_dynamic, compile_fullgraph = args.compile_args + # logger.info( + # f"Torch Compiling[Backend: {compile_backend}; Mode: {compile_mode}; Dynamic: {compile_dynamic}; Fullgraph: {compile_fullgraph}]" + # ) + # torch._dynamo.config.cache_size_limit = 32 + # for i in range(len(model.blocks)): + # model.blocks[i] = torch.compile( + # model.blocks[i], + # backend=compile_backend, + # mode=compile_mode, + # dynamic=compile_dynamic.lower() in "true", + # fullgraph=compile_fullgraph.lower() in "true", + # ) + + if args.blocks_to_swap > 0: + logger.info(f"Enable swap {args.blocks_to_swap} blocks to CPU from device: {device}") + model.enable_block_swap(args.blocks_to_swap, device, supports_backward=False) + model.move_to_device_except_swap_blocks(device) + model.prepare_block_swap_before_forward() + else: + # make sure the model is on the right device + model.to(device) + + model.eval().requires_grad_(False) + clean_memory_on_device(device) + + +# endregion + + +def decode_latent( + latent_window_size: int, + total_latent_sections: int, + bulk_decode: bool, + vae: AutoencoderKLCausal3D, + latent: torch.Tensor, + device: torch.device, + one_frame_inference_mode: bool = False, +) -> torch.Tensor: + logger.info(f"Decoding video...") + if latent.ndim == 4: + latent = latent.unsqueeze(0) # add batch dimension + + vae.to(device) + if not bulk_decode and not one_frame_inference_mode: + latent_window_size = latent_window_size # default is 9 + # total_latent_sections = (args.video_seconds * 30) / (latent_window_size * 4) + # total_latent_sections = int(max(round(total_latent_sections), 1)) + num_frames = latent_window_size * 4 - 3 + + latents_to_decode = [] + latent_frame_index = 0 + for i in range(total_latent_sections - 1, -1, -1): + is_last_section = i == total_latent_sections - 1 + generated_latent_frames = (num_frames + 3) // 4 + (1 if is_last_section else 0) + section_latent_frames = (latent_window_size * 2 + 1) if is_last_section else (latent_window_size * 2) + + section_latent = latent[:, :, latent_frame_index : latent_frame_index + section_latent_frames, :, :] + if section_latent.shape[2] > 0: + latents_to_decode.append(section_latent) + + latent_frame_index += generated_latent_frames + + latents_to_decode = latents_to_decode[::-1] # reverse the order of latents to decode + + history_pixels = None + for latent in tqdm(latents_to_decode): + if history_pixels is None: + history_pixels = hunyuan.vae_decode(latent, vae).cpu() + else: + overlapped_frames = latent_window_size * 4 - 3 + current_pixels = hunyuan.vae_decode(latent, vae).cpu() + history_pixels = soft_append_bcthw(current_pixels, history_pixels, overlapped_frames) + clean_memory_on_device(device) + else: + # bulk decode + logger.info(f"Bulk decoding or one frame inference") + if not one_frame_inference_mode: + history_pixels = hunyuan.vae_decode(latent, vae).cpu() # normal + else: + # one frame inference + history_pixels = [hunyuan.vae_decode(latent[:, :, i : i + 1, :, :], vae).cpu() for i in range(latent.shape[2])] + history_pixels = torch.cat(history_pixels, dim=2) + + vae.to("cpu") + + logger.info(f"Decoded. Pixel shape {history_pixels.shape}") + return history_pixels[0] # remove batch dimension + + +def prepare_image_inputs( + args: argparse.Namespace, + device: torch.device, + vae: AutoencoderKLCausal3D, + shared_models: Optional[Dict] = None, +) -> Dict[str, Any]: + """Prepare image-related inputs for I2V: VAE encoding and image encoder features.""" + height, width, video_seconds = check_inputs(args) + + # prepare image + def preprocess_image(image_path: str): + image = Image.open(image_path) + if image.mode == "RGBA": + alpha = image.split()[-1] + else: + alpha = None + image = image.convert("RGB") + + image_np = np.array(image) # PIL to numpy, HWC + + image_np = image_video_dataset.resize_image_to_bucket(image_np, (width, height)) + image_tensor = torch.from_numpy(image_np).float() / 127.5 - 1.0 # -1 to 1.0, HWC + image_tensor = image_tensor.permute(2, 0, 1)[None, :, None] # HWC -> CHW -> NCFHW, N=1, C=3, F=1 + return image_tensor, image_np, alpha + + section_image_paths = parse_section_strings(args.image_path) + + section_images = {} + if section_image_paths: + for index, image_path in section_image_paths.items(): + img_tensor, img_np, _ = preprocess_image(image_path) + section_images[index] = (img_tensor, img_np) + else: + # image_path should be given, if not, we create a placeholder image (black image) + placeholder_img_np = np.zeros((height, width, 3), dtype=np.uint8) # Placeholder + placeholder_img_tensor = torch.zeros(1, 3, 1, height, width) + section_images[0] = (placeholder_img_tensor, placeholder_img_np) + section_image_paths[0] = "placeholder_image" + + # check end image + if args.end_image_path is not None: + end_image_tensor, _, _ = preprocess_image(args.end_image_path) + else: + end_image_tensor = None + + # check control images + if args.control_image_path is not None and len(args.control_image_path) > 0: + control_image_tensors = [] + control_mask_images = [] + for ctrl_image_path in args.control_image_path: + control_image_tensor, _, control_mask = preprocess_image(ctrl_image_path) + control_image_tensors.append(control_image_tensor) + control_mask_images.append(control_mask) + else: + control_image_tensors = None # Keep as None if not provided + control_mask_images = None + + # load image encoder + # VAE is passed as an argument, assume it's on the correct device or handled by caller + if shared_models is not None and "feature_extractor" in shared_models and "image_encoder" in shared_models: + feature_extractor, image_encoder = shared_models["feature_extractor"], shared_models["image_encoder"] + else: + feature_extractor, image_encoder = load_image_encoders(args) + + image_encoder_original_device = image_encoder.device + image_encoder.to(device) + + section_image_encoder_last_hidden_states = {} + for index, (img_tensor, img_np) in section_images.items(): + with torch.no_grad(): + image_encoder_output = hf_clip_vision_encode(img_np, feature_extractor, image_encoder) + image_encoder_last_hidden_state = image_encoder_output.last_hidden_state.cpu() + section_image_encoder_last_hidden_states[index] = image_encoder_last_hidden_state + + if not (shared_models and "image_encoder" in shared_models): # if loaded locally + del image_encoder, feature_extractor + else: # if shared, move back to original device (likely CPU) + image_encoder.to(image_encoder_original_device) + + clean_memory_on_device(device) + + # VAE encoding + logger.info(f"Encoding image to latent space with VAE") + vae_original_device = vae.device + vae.to(device) + + section_start_latents = {} + for index, (img_tensor, img_np) in section_images.items(): + start_latent = hunyuan.vae_encode(img_tensor.to(device), vae).cpu() # ensure tensor is on device + section_start_latents[index] = start_latent + + end_latent = hunyuan.vae_encode(end_image_tensor.to(device), vae).cpu() if end_image_tensor is not None else None + + control_latents = None + if control_image_tensors is not None: + control_latents = [] + for ctrl_image_tensor in control_image_tensors: + control_latent = hunyuan.vae_encode(ctrl_image_tensor.to(device), vae).cpu() + control_latents.append(control_latent) + + vae.to(vae_original_device) # Move VAE back to its original device + clean_memory_on_device(device) + + arg_c_img = {} + for index in section_images.keys(): + image_encoder_last_hidden_state = section_image_encoder_last_hidden_states[index] + start_latent = section_start_latents[index] + arg_c_img_i = { + "image_encoder_last_hidden_state": image_encoder_last_hidden_state, + "start_latent": start_latent, + "image_path": section_image_paths.get(index, "placeholder_image"), + } + arg_c_img[index] = arg_c_img_i + + return { + "height": height, + "width": width, + "video_seconds": video_seconds, + "context_img": arg_c_img, + "end_latent": end_latent, + "control_latents": control_latents, + "control_mask_images": control_mask_images, + } + + +def prepare_text_inputs( + args: argparse.Namespace, + device: torch.device, + shared_models: Optional[Dict] = None, +) -> Dict[str, Any]: + """Prepare text-related inputs for I2V: LLM and TextEncoder encoding.""" + + n_prompt = args.negative_prompt if args.negative_prompt else "" + section_prompts = parse_section_strings(args.prompt if args.prompt else " ") # Ensure prompt is not None + + # load text encoder: conds_cache holds cached encodings for prompts without padding + conds_cache = {} + if shared_models is not None: + tokenizer1, text_encoder1 = shared_models.get("tokenizer1"), shared_models.get("text_encoder1") + tokenizer2, text_encoder2 = shared_models.get("tokenizer2"), shared_models.get("text_encoder2") + if "conds_cache" in shared_models: # Use shared cache if available + conds_cache = shared_models["conds_cache"] + # text_encoder1 and text_encoder2 are on device (batched inference) or CPU (interactive inference) + else: # Load if not in shared_models + tokenizer1, text_encoder1 = load_text_encoder1(args, args.fp8_llm, device) # Load to GPU + tokenizer2, text_encoder2 = load_text_encoder2(args) # Load to CPU + text_encoder2.to(device) # Move text_encoder2 to the same device as text_encoder1 + + # Store original devices to move back later if they were shared. This does nothing if shared_models is None + text_encoder1_original_device = text_encoder1.device if text_encoder1 else None + text_encoder2_original_device = text_encoder2.device if text_encoder2 else None + + logger.info(f"Encoding prompt with Text Encoders") + llama_vecs = {} + llama_attention_masks = {} + clip_l_poolers = {} + + # Ensure text_encoder1 and text_encoder2 are not None before proceeding + if not text_encoder1 or not text_encoder2 or not tokenizer1 or not tokenizer2: + raise ValueError("Text encoders or tokenizers are not loaded properly.") + + # Define a function to move models to device if needed + # This is to avoid moving models if not needed, especially in interactive mode + model_is_moved = False + + def move_models_to_device_if_needed(): + nonlocal model_is_moved + nonlocal shared_models + + if model_is_moved: + return + model_is_moved = True + + logger.info(f"Moving DiT and Text Encoders to appropriate device: {device} or CPU") + if shared_models and "model" in shared_models: # DiT model is shared + if args.blocks_to_swap > 0: + logger.info("Waiting for 5 seconds to finish block swap") + time.sleep(5) + model = shared_models["model"] + model.to("cpu") + clean_memory_on_device(device) # clean memory on device before moving models + + text_encoder1.to(device) + text_encoder2.to(device) + + with torch.autocast(device_type=device.type, dtype=text_encoder1.dtype), torch.no_grad(): + for index, prompt in section_prompts.items(): + if prompt in conds_cache: + llama_vec, clip_l_pooler = conds_cache[prompt] + else: + move_models_to_device_if_needed() + llama_vec, clip_l_pooler = hunyuan.encode_prompt_conds( + prompt, text_encoder1, text_encoder2, tokenizer1, tokenizer2, custom_system_prompt=args.custom_system_prompt + ) + llama_vec = llama_vec.cpu() + clip_l_pooler = clip_l_pooler.cpu() + conds_cache[prompt] = (llama_vec, clip_l_pooler) + + llama_vec, llama_attention_mask = crop_or_pad_yield_mask(llama_vec, length=512) + llama_vecs[index] = llama_vec + llama_attention_masks[index] = llama_attention_mask + clip_l_poolers[index] = clip_l_pooler + + if args.guidance_scale == 1.0: + # llama_vecs[0] should always exist because prompt is guaranteed to be non-empty + first_llama_vec = llama_vecs.get(0) # this is cropped or padded, but it's okay for null context + first_clip_l_pooler = clip_l_poolers.get(0) + llama_vec_n, clip_l_pooler_n = torch.zeros_like(first_llama_vec), torch.zeros_like(first_clip_l_pooler) + + else: + with torch.autocast(device_type=device.type, dtype=text_encoder1.dtype), torch.no_grad(): + if n_prompt in conds_cache: + llama_vec_n, clip_l_pooler_n = conds_cache[n_prompt] + else: + move_models_to_device_if_needed() + llama_vec_n, clip_l_pooler_n = hunyuan.encode_prompt_conds( + n_prompt, text_encoder1, text_encoder2, tokenizer1, tokenizer2, custom_system_prompt=args.custom_system_prompt + ) + llama_vec_n = llama_vec_n.cpu() + clip_l_pooler_n = clip_l_pooler_n.cpu() + conds_cache[n_prompt] = (llama_vec_n, clip_l_pooler_n) + + llama_vec_n, llama_attention_mask_n = crop_or_pad_yield_mask(llama_vec_n, length=512) + + if not (shared_models and "text_encoder1" in shared_models): # if loaded locally + del tokenizer1, text_encoder1, tokenizer2, text_encoder2 + else: # if shared, move back to original device (likely CPU) + if text_encoder1: + text_encoder1.to(text_encoder1_original_device) + if text_encoder2: + text_encoder2.to(text_encoder2_original_device) + + clean_memory_on_device(device) + + arg_c = {} + for index in llama_vecs.keys(): + llama_vec = llama_vecs[index] + llama_attention_mask = llama_attention_masks[index] + clip_l_pooler = clip_l_poolers[index] + arg_c_i = { + "llama_vec": llama_vec, + "llama_attention_mask": llama_attention_mask, + "clip_l_pooler": clip_l_pooler, + "prompt": section_prompts[index], + } + arg_c[index] = arg_c_i + + arg_null = { + "llama_vec": llama_vec_n, + "llama_attention_mask": llama_attention_mask_n, + "clip_l_pooler": clip_l_pooler_n, + } + + return { + "context": arg_c, + "context_null": arg_null, + } + + +def prepare_i2v_inputs( + args: argparse.Namespace, + device: torch.device, + vae: AutoencoderKLCausal3D, # VAE is now explicitly passed + shared_models: Optional[Dict] = None, +) -> Tuple[int, int, float, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, Tuple[dict, dict]]: + """Prepare inputs for I2V by calling image and text preparation functions.""" + + image_data = prepare_image_inputs(args, device, vae, shared_models) + text_data = prepare_text_inputs(args, device, shared_models) + + return ( + image_data["height"], + image_data["width"], + image_data["video_seconds"], + text_data["context"], + text_data["context_null"], + image_data["context_img"], + image_data["end_latent"], + image_data["control_latents"], + image_data["control_mask_images"], + ) + + +# def setup_scheduler(args: argparse.Namespace, config, device: torch.device) -> Tuple[Any, torch.Tensor]: +# """setup scheduler for sampling + +# Args: +# args: command line arguments +# config: model configuration +# device: device to use + +# Returns: +# Tuple[Any, torch.Tensor]: (scheduler, timesteps) +# """ +# if args.sample_solver == "unipc": +# scheduler = FlowUniPCMultistepScheduler(num_train_timesteps=config.num_train_timesteps, shift=1, use_dynamic_shifting=False) +# scheduler.set_timesteps(args.infer_steps, device=device, shift=args.flow_shift) +# timesteps = scheduler.timesteps +# elif args.sample_solver == "dpm++": +# scheduler = FlowDPMSolverMultistepScheduler( +# num_train_timesteps=config.num_train_timesteps, shift=1, use_dynamic_shifting=False +# ) +# sampling_sigmas = get_sampling_sigmas(args.infer_steps, args.flow_shift) +# timesteps, _ = retrieve_timesteps(scheduler, device=device, sigmas=sampling_sigmas) +# elif args.sample_solver == "vanilla": +# scheduler = FlowMatchDiscreteScheduler(num_train_timesteps=config.num_train_timesteps, shift=args.flow_shift) +# scheduler.set_timesteps(args.infer_steps, device=device) +# timesteps = scheduler.timesteps + +# # FlowMatchDiscreteScheduler does not support generator argument in step method +# org_step = scheduler.step + +# def step_wrapper( +# model_output: torch.Tensor, +# timestep: Union[int, torch.Tensor], +# sample: torch.Tensor, +# return_dict: bool = True, +# generator=None, +# ): +# return org_step(model_output, timestep, sample, return_dict=return_dict) + +# scheduler.step = step_wrapper +# else: +# raise NotImplementedError("Unsupported solver.") + +# return scheduler, timesteps + + +def convert_lora_for_framepack(lora_sd: dict[str, torch.Tensor]) -> dict[str, torch.Tensor]: + # Check the format of the LoRA file + keys = list(lora_sd.keys()) + if keys[0].startswith("lora_unet_"): + # logging.info(f"Musubi Tuner LoRA detected") + pass + + else: + transformer_prefixes = ["diffusion_model", "transformer"] # to ignore Text Encoder modules + lora_suffix = None + prefix = None + for key in keys: + if lora_suffix is None and "lora_A" in key: + lora_suffix = "lora_A" + if prefix is None: + pfx = key.split(".")[0] + if pfx in transformer_prefixes: + prefix = pfx + if lora_suffix is not None and prefix is not None: + break + + if lora_suffix == "lora_A" and prefix is not None: + logging.info(f"Diffusion-pipe (?) LoRA detected, converting to the default LoRA format") + lora_sd = convert_lora_from_diffusion_pipe_or_something(lora_sd, "lora_unet_") + + else: + logging.info(f"LoRA file format not recognized. Using it as-is.") + + # Check LoRA is for FramePack or for HunyuanVideo + is_hunyuan = False + for key in lora_sd.keys(): + if "double_blocks" in key or "single_blocks" in key: + is_hunyuan = True + break + if is_hunyuan: + logging.info("HunyuanVideo LoRA detected, converting to FramePack format") + lora_sd = convert_hunyuan_to_framepack(lora_sd) + + return lora_sd + + +def convert_lora_from_diffusion_pipe_or_something(lora_sd: dict[str, torch.Tensor], prefix: str) -> dict[str, torch.Tensor]: + """ + Convert LoRA weights to the format used by the diffusion pipeline to Musubi Tuner. + Copy from Musubi Tuner repo. + """ + # convert from diffusers(?) to default LoRA + # Diffusers format: {"diffusion_model.module.name.lora_A.weight": weight, "diffusion_model.module.name.lora_B.weight": weight, ...} + # default LoRA format: {"prefix_module_name.lora_down.weight": weight, "prefix_module_name.lora_up.weight": weight, ...} + + # note: Diffusers has no alpha, so alpha is set to rank + new_weights_sd = {} + lora_dims = {} + for key, weight in lora_sd.items(): + diffusers_prefix, key_body = key.split(".", 1) + if diffusers_prefix != "diffusion_model" and diffusers_prefix != "transformer": + print(f"unexpected key: {key} in diffusers format") + continue + + new_key = f"{prefix}{key_body}".replace(".", "_").replace("_lora_A_", ".lora_down.").replace("_lora_B_", ".lora_up.") + new_weights_sd[new_key] = weight + + lora_name = new_key.split(".")[0] # before first dot + if lora_name not in lora_dims and "lora_down" in new_key: + lora_dims[lora_name] = weight.shape[0] + + # add alpha with rank + for lora_name, dim in lora_dims.items(): + new_weights_sd[f"{lora_name}.alpha"] = torch.tensor(dim) + + return new_weights_sd + + +def convert_hunyuan_to_framepack(lora_sd: dict[str, torch.Tensor]) -> dict[str, torch.Tensor]: + """ + Convert HunyuanVideo LoRA weights to FramePack format. + """ + new_lora_sd = {} + for key, weight in lora_sd.items(): + if "double_blocks" in key: + key = key.replace("double_blocks", "transformer_blocks") + key = key.replace("img_mod_linear", "norm1_linear") + key = key.replace("img_attn_qkv", "attn_to_QKV") # split later + key = key.replace("img_attn_proj", "attn_to_out_0") + key = key.replace("img_mlp_fc1", "ff_net_0_proj") + key = key.replace("img_mlp_fc2", "ff_net_2") + key = key.replace("txt_mod_linear", "norm1_context_linear") + key = key.replace("txt_attn_qkv", "attn_add_QKV_proj") # split later + key = key.replace("txt_attn_proj", "attn_to_add_out") + key = key.replace("txt_mlp_fc1", "ff_context_net_0_proj") + key = key.replace("txt_mlp_fc2", "ff_context_net_2") + elif "single_blocks" in key: + key = key.replace("single_blocks", "single_transformer_blocks") + key = key.replace("linear1", "attn_to_QKVM") # split later + key = key.replace("linear2", "proj_out") + key = key.replace("modulation_linear", "norm_linear") + else: + print(f"Unsupported module name: {key}, only double_blocks and single_blocks are supported") + continue + + if "QKVM" in key: + # split QKVM into Q, K, V, M + key_q = key.replace("QKVM", "q") + key_k = key.replace("QKVM", "k") + key_v = key.replace("QKVM", "v") + key_m = key.replace("attn_to_QKVM", "proj_mlp") + if "_down" in key or "alpha" in key: + # copy QKVM weight or alpha to Q, K, V, M + assert "alpha" in key or weight.size(1) == 3072, f"QKVM weight size mismatch: {key}. {weight.size()}" + new_lora_sd[key_q] = weight + new_lora_sd[key_k] = weight + new_lora_sd[key_v] = weight + new_lora_sd[key_m] = weight + elif "_up" in key: + # split QKVM weight into Q, K, V, M + assert weight.size(0) == 21504, f"QKVM weight size mismatch: {key}. {weight.size()}" + new_lora_sd[key_q] = weight[:3072] + new_lora_sd[key_k] = weight[3072 : 3072 * 2] + new_lora_sd[key_v] = weight[3072 * 2 : 3072 * 3] + new_lora_sd[key_m] = weight[3072 * 3 :] # 21504 - 3072 * 3 = 12288 + else: + print(f"Unsupported module name: {key}") + continue + elif "QKV" in key: + # split QKV into Q, K, V + key_q = key.replace("QKV", "q") + key_k = key.replace("QKV", "k") + key_v = key.replace("QKV", "v") + if "_down" in key or "alpha" in key: + # copy QKV weight or alpha to Q, K, V + assert "alpha" in key or weight.size(1) == 3072, f"QKV weight size mismatch: {key}. {weight.size()}" + new_lora_sd[key_q] = weight + new_lora_sd[key_k] = weight + new_lora_sd[key_v] = weight + elif "_up" in key: + # split QKV weight into Q, K, V + assert weight.size(0) == 3072 * 3, f"QKV weight size mismatch: {key}. {weight.size()}" + new_lora_sd[key_q] = weight[:3072] + new_lora_sd[key_k] = weight[3072 : 3072 * 2] + new_lora_sd[key_v] = weight[3072 * 2 :] + else: + print(f"Unsupported module name: {key}") + continue + else: + # no split needed + new_lora_sd[key] = weight + + return new_lora_sd + + +def initialize_magcache(args: argparse.Namespace, model: HunyuanVideoTransformer3DModelPackedInference) -> None: + if args.magcache_mag_ratios is None and not args.magcache_calibration: + return + + # parse mag_ratios + mag_ratios = None # calibration mode + if args.magcache_mag_ratios is not None: + mag_ratios = [float(ratio) for ratio in args.magcache_mag_ratios.split(",")] + if len(mag_ratios) == 1 and mag_ratios[0] == 0: + # use default mag_ratios + mag_ratios = None + + logger.info( + f"Initializing MagCache with mag_ratios: {mag_ratios}, retention_ratio: {args.magcache_retention_ratio}, " + f"magcache_thresh: {args.magcache_threshold}, K: {args.magcache_k}, calibration: {args.magcache_calibration}" + ) + model.initialize_magcache( + enable=True, + retention_ratio=args.magcache_retention_ratio, + mag_ratios=mag_ratios, + magcache_thresh=args.magcache_threshold, + K=args.magcache_k, + calibration=args.magcache_calibration, + ) + + +def preprocess_magcache(args: argparse.Namespace, model: HunyuanVideoTransformer3DModelPackedInference) -> None: + if args.magcache_mag_ratios is None and not args.magcache_calibration: + return + + model.reset_magcache(args.infer_steps) + + +def postprocess_magcache(args: argparse.Namespace, model: HunyuanVideoTransformer3DModelPackedInference) -> None: + if args.magcache_mag_ratios is None and not args.magcache_calibration: + return + if not args.magcache_calibration: + return + + # print mag ratios + norm_ratio, norm_std, cos_dis = model.get_calibration_data() + logger.info(f"MagCache calibration data:") + logger.info(f" - norm_ratio: {norm_ratio}") + logger.info(f" - norm_std: {norm_std}") + logger.info(f" - cos_dis: {cos_dis}") + logger.info(f"Copy and paste following values to --magcache_mag_ratios argument to use them:") + print(",".join([f"{ratio:.5f}" for ratio in [1] + norm_ratio])) + + +def generate( + args: argparse.Namespace, + gen_settings: GenerationSettings, + shared_models: Optional[Dict] = None, + precomputed_image_data: Optional[Dict] = None, + precomputed_text_data: Optional[Dict] = None, +) -> tuple[Optional[AutoencoderKLCausal3D], torch.Tensor]: # VAE can be Optional + """main function for generation + + Args: + args: command line arguments + shared_models: dictionary containing pre-loaded models (mainly for DiT) + precomputed_image_data: Optional dictionary with precomputed image data + precomputed_text_data: Optional dictionary with precomputed text data + + Returns: + tuple: (AutoencoderKLCausal3D model (vae) or None, torch.Tensor generated latent) + """ + device, dit_weight_dtype = (gen_settings.device, gen_settings.dit_weight_dtype) + vae_instance_for_return = None + + # prepare seed + seed = args.seed if args.seed is not None else random.randint(0, 2**32 - 1) + args.seed = seed # set seed to args for saving + + if precomputed_image_data is not None and precomputed_text_data is not None: + logger.info("Using precomputed image and text data.") + height = precomputed_image_data["height"] + width = precomputed_image_data["width"] + video_seconds = precomputed_image_data["video_seconds"] + context_img = precomputed_image_data["context_img"] + end_latent = precomputed_image_data["end_latent"] + control_latents = precomputed_image_data["control_latents"] + control_mask_images = precomputed_image_data["control_mask_images"] + + context = precomputed_text_data["context"] + context_null = precomputed_text_data["context_null"] + # VAE is not loaded here if data is precomputed; decoding VAE is handled by caller (e.g., process_batch_prompts) + # vae_instance_for_return remains None + else: + # Load VAE if not precomputed (for single/interactive mode) + # shared_models for single/interactive might contain text/image encoders, but not VAE after `load_shared_models` change. + # So, VAE will be loaded here for single/interactive. + logger.info("No precomputed data. Preparing image and text inputs.") + if shared_models and "vae" in shared_models: # Should not happen with new load_shared_models + vae_instance_for_return = shared_models["vae"] + else: + vae_instance_for_return = load_vae(args.vae, args.vae_chunk_size, args.vae_spatial_tile_sample_min_size, device) + + height, width, video_seconds, context, context_null, context_img, end_latent, control_latents, control_mask_images = ( + prepare_i2v_inputs(args, device, vae_instance_for_return, shared_models) # Pass VAE + ) + + if shared_models is None or "model" not in shared_models: + # load DiT model + model = load_dit_model(args, device) + + # merge LoRA weights + if args.lora_weight is not None and len(args.lora_weight) > 0: + # ugly hack to common merge_lora_weights function + merge_lora_weights(lora_framepack, model, args, device, convert_lora_for_framepack) + + # if we only want to save the model, we can skip the rest + if args.save_merged_model: + return None, None + + # optimize model: fp8 conversion, block swap etc. + optimize_model(model, args, device) + + if shared_models is not None: + shared_models["model"] = model + else: + # use shared model + model: HunyuanVideoTransformer3DModelPackedInference = shared_models["model"] + model.move_to_device_except_swap_blocks(device) # Handles block swap correctly + model.prepare_block_swap_before_forward() + + # sampling + latent_window_size = args.latent_window_size # default is 9 + # ex: (5s * 30fps) / (9 * 4) = 4.16 -> 4 sections, 60s -> 1800 / 36 = 50 sections + total_latent_sections = (video_seconds * 30) / (latent_window_size * 4) + total_latent_sections = int(max(round(total_latent_sections), 1)) + + # set random generator + seed_g = torch.Generator(device="cpu") + seed_g.manual_seed(seed) + num_frames = latent_window_size * 4 - 3 + + logger.info( + f"Video size: {height}x{width}@{video_seconds} (HxW@seconds), fps: {args.fps}, num sections: {total_latent_sections}, " + f"infer_steps: {args.infer_steps}, frames per generation: {num_frames}" + ) + + # video generation ###### + f1_mode = args.f1 + one_frame_inference = None + if args.one_frame_inference is not None: + one_frame_inference = set() + for mode in args.one_frame_inference.split(","): + one_frame_inference.add(mode.strip()) + + if one_frame_inference is not None: + real_history_latents = generate_with_one_frame_inference( + args, + model, + context, + context_null, + context_img, + control_latents, + control_mask_images, + latent_window_size, + height, + width, + device, + seed_g, + one_frame_inference, + ) + else: + # prepare history latents + history_latents = torch.zeros((1, 16, 1 + 2 + 16, height // 8, width // 8), dtype=torch.float32) + if end_latent is not None and not f1_mode: + logger.info(f"Use end image(s): {args.end_image_path}") + history_latents[:, :, :1] = end_latent.to(history_latents) + + # prepare clean latents and indices + if not f1_mode: + # Inverted Anti-drifting + total_generated_latent_frames = 0 + latent_paddings = reversed(range(total_latent_sections)) + + if total_latent_sections > 4 and one_frame_inference is None: + # In theory the latent_paddings should follow the above sequence, but it seems that duplicating some + # items looks better than expanding it when total_latent_sections > 4 + # One can try to remove below trick and just + # use `latent_paddings = list(reversed(range(total_latent_sections)))` to compare + # 4 sections: 3, 2, 1, 0. 50 sections: 3, 2, 2, ... 2, 1, 0 + latent_paddings = [3] + [2] * (total_latent_sections - 3) + [1, 0] + + if args.latent_paddings is not None: + # parse user defined latent paddings + user_latent_paddings = [int(x) for x in args.latent_paddings.split(",")] + if len(user_latent_paddings) < total_latent_sections: + print( + f"User defined latent paddings length {len(user_latent_paddings)} does not match total sections {total_latent_sections}." + ) + print(f"Use default paddings instead for unspecified sections.") + latent_paddings[: len(user_latent_paddings)] = user_latent_paddings + elif len(user_latent_paddings) > total_latent_sections: + print( + f"User defined latent paddings length {len(user_latent_paddings)} is greater than total sections {total_latent_sections}." + ) + print(f"Use only first {total_latent_sections} paddings instead.") + latent_paddings = user_latent_paddings[:total_latent_sections] + else: + latent_paddings = user_latent_paddings + else: + start_latent = context_img[0]["start_latent"] + history_latents = torch.cat([history_latents, start_latent], dim=2) + total_generated_latent_frames = 1 # a bit hacky, but we employ the same logic as in official code + latent_paddings = [0] * total_latent_sections # dummy paddings for F1 mode + + latent_paddings = list(latent_paddings) # make sure it's a list + for loop_index in range(total_latent_sections): + latent_padding = latent_paddings[loop_index] + + if not f1_mode: + # Inverted Anti-drifting + section_index_reverse = loop_index # 0, 1, 2, 3 + section_index = total_latent_sections - 1 - section_index_reverse # 3, 2, 1, 0 + section_index_from_last = -(section_index_reverse + 1) # -1, -2, -3, -4 + + is_last_section = section_index == 0 + is_first_section = section_index_reverse == 0 + latent_padding_size = latent_padding * latent_window_size + + logger.info(f"latent_padding_size = {latent_padding_size}, is_last_section = {is_last_section}") + else: + section_index = loop_index # 0, 1, 2, 3 + section_index_from_last = section_index - total_latent_sections # -4, -3, -2, -1 + is_last_section = loop_index == total_latent_sections - 1 + is_first_section = loop_index == 0 + latent_padding_size = 0 # dummy padding for F1 mode + + # select start latent + if section_index_from_last in context_img: + image_index = section_index_from_last + elif section_index in context_img: + image_index = section_index + else: + image_index = 0 + + start_latent = context_img[image_index]["start_latent"] + image_path = context_img[image_index]["image_path"] + if image_index != 0: # use section image other than section 0 + logger.info( + f"Apply experimental section image, latent_padding_size = {latent_padding_size}, image_path = {image_path}" + ) + + if not f1_mode: + # Inverted Anti-drifting + indices = torch.arange(0, sum([1, latent_padding_size, latent_window_size, 1, 2, 16])).unsqueeze(0) + ( + clean_latent_indices_pre, + blank_indices, + latent_indices, + clean_latent_indices_post, + clean_latent_2x_indices, + clean_latent_4x_indices, + ) = indices.split([1, latent_padding_size, latent_window_size, 1, 2, 16], dim=1) + + clean_latent_indices = torch.cat([clean_latent_indices_pre, clean_latent_indices_post], dim=1) + + clean_latents_pre = start_latent.to(history_latents) + clean_latents_post, clean_latents_2x, clean_latents_4x = history_latents[:, :, : 1 + 2 + 16, :, :].split( + [1, 2, 16], dim=2 + ) + clean_latents = torch.cat([clean_latents_pre, clean_latents_post], dim=2) + + else: + # F1 mode + indices = torch.arange(0, sum([1, 16, 2, 1, latent_window_size])).unsqueeze(0) + ( + clean_latent_indices_start, + clean_latent_4x_indices, + clean_latent_2x_indices, + clean_latent_1x_indices, + latent_indices, + ) = indices.split([1, 16, 2, 1, latent_window_size], dim=1) + clean_latent_indices = torch.cat([clean_latent_indices_start, clean_latent_1x_indices], dim=1) + + clean_latents_4x, clean_latents_2x, clean_latents_1x = history_latents[:, :, -sum([16, 2, 1]) :, :, :].split( + [16, 2, 1], dim=2 + ) + clean_latents = torch.cat([start_latent.to(history_latents), clean_latents_1x], dim=2) + + # if use_teacache: + # transformer.initialize_teacache(enable_teacache=True, num_steps=steps) + # else: + # transformer.initialize_teacache(enable_teacache=False) + + # prepare conditioning inputs + if section_index_from_last in context: + prompt_index = section_index_from_last + elif section_index in context: + prompt_index = section_index + else: + prompt_index = 0 + + context_for_index = context[prompt_index] + # if args.section_prompts is not None: + logger.info(f"Section {section_index}: {context_for_index['prompt']}") + + llama_vec = context_for_index["llama_vec"].to(device, dtype=torch.bfloat16) + llama_attention_mask = context_for_index["llama_attention_mask"].to(device) + clip_l_pooler = context_for_index["clip_l_pooler"].to(device, dtype=torch.bfloat16) + + image_encoder_last_hidden_state = context_img[image_index]["image_encoder_last_hidden_state"].to( + device, dtype=torch.bfloat16 + ) + + llama_vec_n = context_null["llama_vec"].to(device, dtype=torch.bfloat16) + llama_attention_mask_n = context_null["llama_attention_mask"].to(device) + clip_l_pooler_n = context_null["clip_l_pooler"].to(device, dtype=torch.bfloat16) + + preprocess_magcache(args, model) + + generated_latents = sample_hunyuan( + transformer=model, + sampler=args.sample_solver, + width=width, + height=height, + frames=num_frames, + real_guidance_scale=args.guidance_scale, + distilled_guidance_scale=args.embedded_cfg_scale, + guidance_rescale=args.guidance_rescale, + shift=args.flow_shift, + num_inference_steps=args.infer_steps, + generator=seed_g, + prompt_embeds=llama_vec, + prompt_embeds_mask=llama_attention_mask, + prompt_poolers=clip_l_pooler, + negative_prompt_embeds=llama_vec_n, + negative_prompt_embeds_mask=llama_attention_mask_n, + negative_prompt_poolers=clip_l_pooler_n, + device=device, + dtype=torch.bfloat16, + image_embeddings=image_encoder_last_hidden_state, + latent_indices=latent_indices, + clean_latents=clean_latents, + clean_latent_indices=clean_latent_indices, + clean_latents_2x=clean_latents_2x, + clean_latent_2x_indices=clean_latent_2x_indices, + clean_latents_4x=clean_latents_4x, + clean_latent_4x_indices=clean_latent_4x_indices, + ) + postprocess_magcache(args, model) + + # concatenate generated latents + total_generated_latent_frames += int(generated_latents.shape[2]) + if not f1_mode: + # Inverted Anti-drifting: prepend generated latents to history latents + if is_last_section: + generated_latents = torch.cat([start_latent.to(generated_latents), generated_latents], dim=2) + total_generated_latent_frames += 1 + + history_latents = torch.cat([generated_latents.to(history_latents), history_latents], dim=2) + real_history_latents = history_latents[:, :, :total_generated_latent_frames, :, :] + else: + # F1 mode: append generated latents to history latents + history_latents = torch.cat([history_latents, generated_latents.to(history_latents)], dim=2) + real_history_latents = history_latents[:, :, -total_generated_latent_frames:, :, :] + + logger.info(f"Generated. Latent shape {real_history_latents.shape}") + + # # TODO support saving intermediate video + # clean_memory_on_device(device) + # vae.to(device) + # if history_pixels is None: + # history_pixels = hunyuan.vae_decode(real_history_latents, vae).cpu() + # else: + # section_latent_frames = (latent_window_size * 2 + 1) if is_last_section else (latent_window_size * 2) + # overlapped_frames = latent_window_size * 4 - 3 + # current_pixels = hunyuan.vae_decode(real_history_latents[:, :, :section_latent_frames], vae).cpu() + # history_pixels = soft_append_bcthw(current_pixels, history_pixels, overlapped_frames) + # vae.to("cpu") + # # if not is_last_section: + # # # save intermediate video + # # save_video(history_pixels[0], args, total_generated_latent_frames) + # print(f"Decoded. Current latent shape {real_history_latents.shape}; pixel shape {history_pixels.shape}") + + # Only clean up shared models if they were created within this function + wait_for_clean_memory = False + if not (shared_models and "model" in shared_models) and "model" in locals(): # if model was loaded locally + del model + synchronize_device(device) + wait_for_clean_memory = True + + # wait for 5 seconds until block swap is done + if wait_for_clean_memory and args.blocks_to_swap > 0: + logger.info("Waiting for 5 seconds to finish block swap") + time.sleep(5) + + gc.collect() + clean_memory_on_device(device) + + return vae_instance_for_return, real_history_latents + + +def generate_with_one_frame_inference( + args: argparse.Namespace, + model: HunyuanVideoTransformer3DModelPackedInference, + context: Dict[int, Dict[str, torch.Tensor]], + context_null: Dict[str, torch.Tensor], + context_img: Dict[int, Dict[str, torch.Tensor]], + control_latents: Optional[List[torch.Tensor]], + control_mask_images: Optional[List[Optional[Image.Image]]], + latent_window_size: int, + height: int, + width: int, + device: torch.device, + seed_g: torch.Generator, + one_frame_inference: set[str], +) -> torch.Tensor: + # one frame inference + sample_num_frames = 1 + latent_indices = torch.zeros((1, 1), dtype=torch.int64) # 1x1 latent index for target image + latent_indices[:, 0] = latent_window_size # last of latent_window + + def get_latent_mask(mask_image: Image.Image) -> torch.Tensor: + if mask_image.mode != "L": + mask_image = mask_image.convert("L") + mask_image = mask_image.resize((width // 8, height // 8), Image.LANCZOS) + mask_image = np.array(mask_image) # PIL to numpy, HWC + mask_image = torch.from_numpy(mask_image).float() / 255.0 # 0 to 1.0, HWC + mask_image = mask_image.squeeze(-1) # HWC -> HW + mask_image = mask_image.unsqueeze(0).unsqueeze(0).unsqueeze(0) # HW -> 111HW (BCFHW) + mask_image = mask_image.to(torch.float32) + return mask_image + + if control_latents is None or len(control_latents) == 0: + logger.info(f"No control images provided for one frame inference. Use zero latents for control images.") + control_latents = [torch.zeros(1, 16, 1, height // 8, width // 8, dtype=torch.float32)] + + if "no_post" not in one_frame_inference: + # add zero latents as clean latents post + control_latents.append(torch.zeros((1, 16, 1, height // 8, width // 8), dtype=torch.float32)) + logger.info(f"Add zero latents as clean latents post for one frame inference.") + + # kisekaeichi and 1f-mc: both are using control images, but indices are different + clean_latents = torch.cat(control_latents, dim=2) # (1, 16, num_control_images, H//8, W//8) + clean_latent_indices = torch.zeros((1, len(control_latents)), dtype=torch.int64) + if "no_post" not in one_frame_inference: + clean_latent_indices[:, -1] = 1 + latent_window_size # default index for clean latents post + + for i in range(len(control_latents)): + mask_image = None + if args.control_image_mask_path is not None and i < len(args.control_image_mask_path): + mask_image = get_latent_mask(Image.open(args.control_image_mask_path[i])) + logger.info( + f"Apply mask for clean latents 1x for {i + 1}: {args.control_image_mask_path[i]}, shape: {mask_image.shape}" + ) + elif control_mask_images is not None and i < len(control_mask_images) and control_mask_images[i] is not None: + mask_image = get_latent_mask(control_mask_images[i]) + logger.info(f"Apply mask for clean latents 1x for {i + 1} with alpha channel: {mask_image.shape}") + if mask_image is not None: + clean_latents[:, :, i : i + 1, :, :] = clean_latents[:, :, i : i + 1, :, :] * mask_image + + for one_frame_param in one_frame_inference: + if one_frame_param.startswith("target_index="): + target_index = int(one_frame_param.split("=")[1]) + latent_indices[:, 0] = target_index + logger.info(f"Set index for target: {target_index}") + elif one_frame_param.startswith("control_index="): + control_indices = one_frame_param.split("=")[1].split(";") + i = 0 + while i < len(control_indices) and i < clean_latent_indices.shape[1]: + control_index = int(control_indices[i]) + clean_latent_indices[:, i] = control_index + i += 1 + logger.info(f"Set index for clean latent 1x: {control_indices}") + + # "default" option does nothing, so we can skip it + if "default" in one_frame_inference: + pass + + if "no_2x" in one_frame_inference: + clean_latents_2x = None + clean_latent_2x_indices = None + logger.info(f"No clean_latents_2x") + else: + clean_latents_2x = torch.zeros((1, 16, 2, height // 8, width // 8), dtype=torch.float32) + index = 1 + latent_window_size + 1 + clean_latent_2x_indices = torch.arange(index, index + 2).unsqueeze(0) # 2 + + if "no_4x" in one_frame_inference: + clean_latents_4x = None + clean_latent_4x_indices = None + logger.info(f"No clean_latents_4x") + else: + clean_latents_4x = torch.zeros((1, 16, 16, height // 8, width // 8), dtype=torch.float32) + index = 1 + latent_window_size + 1 + 2 + clean_latent_4x_indices = torch.arange(index, index + 16).unsqueeze(0) # 16 + + logger.info( + f"One frame inference. clean_latent: {clean_latents.shape} latent_indices: {latent_indices}, clean_latent_indices: {clean_latent_indices}, num_frames: {sample_num_frames}" + ) + + # prepare conditioning inputs + prompt_index = 0 + image_index = 0 + + context_for_index = context[prompt_index] + logger.info(f"Prompt: {context_for_index['prompt']}") + + llama_vec = context_for_index["llama_vec"].to(device, dtype=torch.bfloat16) + llama_attention_mask = context_for_index["llama_attention_mask"].to(device) + clip_l_pooler = context_for_index["clip_l_pooler"].to(device, dtype=torch.bfloat16) + + image_encoder_last_hidden_state = context_img[image_index]["image_encoder_last_hidden_state"].to(device, dtype=torch.bfloat16) + + llama_vec_n = context_null["llama_vec"].to(device, dtype=torch.bfloat16) + llama_attention_mask_n = context_null["llama_attention_mask"].to(device) + clip_l_pooler_n = context_null["clip_l_pooler"].to(device, dtype=torch.bfloat16) + + preprocess_magcache(args, model) + + generated_latents = sample_hunyuan( + transformer=model, + sampler=args.sample_solver, + width=width, + height=height, + frames=1, + real_guidance_scale=args.guidance_scale, + distilled_guidance_scale=args.embedded_cfg_scale, + guidance_rescale=args.guidance_rescale, + shift=args.flow_shift, + num_inference_steps=args.infer_steps, + generator=seed_g, + prompt_embeds=llama_vec, + prompt_embeds_mask=llama_attention_mask, + prompt_poolers=clip_l_pooler, + negative_prompt_embeds=llama_vec_n, + negative_prompt_embeds_mask=llama_attention_mask_n, + negative_prompt_poolers=clip_l_pooler_n, + device=device, + dtype=torch.bfloat16, + image_embeddings=image_encoder_last_hidden_state, + latent_indices=latent_indices, + clean_latents=clean_latents, + clean_latent_indices=clean_latent_indices, + clean_latents_2x=clean_latents_2x, + clean_latent_2x_indices=clean_latent_2x_indices, + clean_latents_4x=clean_latents_4x, + clean_latent_4x_indices=clean_latent_4x_indices, + ) + + postprocess_magcache(args, model) + + real_history_latents = generated_latents.to(clean_latents) + return real_history_latents + + +def save_latent(latent: torch.Tensor, args: argparse.Namespace, height: int, width: int) -> str: + """Save latent to file + + Args: + latent: Latent tensor + args: command line arguments + height: height of frame + width: width of frame + + Returns: + str: Path to saved latent file + """ + save_path = args.save_path + os.makedirs(save_path, exist_ok=True) + time_flag = datetime.fromtimestamp(time.time()).strftime("%Y%m%d-%H%M%S") + + seed = args.seed + video_seconds = args.video_seconds + + latent_path = f"{save_path}/{time_flag}_{seed}_latent.safetensors" + + if args.no_metadata: + metadata = None + else: + metadata = { + "seeds": f"{seed}", + "prompt": f"{args.prompt}", + "height": f"{height}", + "width": f"{width}", + "video_seconds": f"{video_seconds}", + "infer_steps": f"{args.infer_steps}", + "guidance_scale": f"{args.guidance_scale}", + "latent_window_size": f"{args.latent_window_size}", + "embedded_cfg_scale": f"{args.embedded_cfg_scale}", + "guidance_rescale": f"{args.guidance_rescale}", + "sample_solver": f"{args.sample_solver}", + "latent_window_size": f"{args.latent_window_size}", + "fps": f"{args.fps}", + } + if args.negative_prompt is not None: + metadata["negative_prompt"] = f"{args.negative_prompt}" + + sd = {"latent": latent.contiguous()} + save_file(sd, latent_path, metadata=metadata) + logger.info(f"Latent saved to: {latent_path}") + + return latent_path + + +def save_video( + video: torch.Tensor, args: argparse.Namespace, original_base_name: Optional[str] = None, latent_frames: Optional[int] = None +) -> str: + """Save video to file + + Args: + video: Video tensor + args: command line arguments + original_base_name: Original base name (if latents are loaded from files) + + Returns: + str: Path to saved video file + """ + save_path = args.save_path + os.makedirs(save_path, exist_ok=True) + time_flag = datetime.fromtimestamp(time.time()).strftime("%Y%m%d-%H%M%S") + + seed = args.seed + original_name = "" if original_base_name is None else f"_{original_base_name}" + latent_frames = "" if latent_frames is None else f"_{latent_frames}" + video_path = f"{save_path}/{time_flag}_{seed}{original_name}{latent_frames}.mp4" + + video = video.unsqueeze(0) + save_videos_grid(video, video_path, fps=args.fps, rescale=True) + logger.info(f"Video saved to: {video_path}") + + return video_path + + +def save_images(sample: torch.Tensor, args: argparse.Namespace, original_base_name: Optional[str] = None) -> str: + """Save images to directory + + Args: + sample: Video tensor + args: command line arguments + original_base_name: Original base name (if latents are loaded from files) + + Returns: + str: Path to saved images directory + """ + save_path = args.save_path + os.makedirs(save_path, exist_ok=True) + time_flag = datetime.fromtimestamp(time.time()).strftime("%Y%m%d-%H%M%S") + + seed = args.seed + original_name = "" if original_base_name is None else f"_{original_base_name}" + image_name = f"{time_flag}_{seed}{original_name}" + sample = sample.unsqueeze(0) + one_frame_mode = args.one_frame_inference is not None + save_images_grid(sample, save_path, image_name, rescale=True, create_subdir=not one_frame_mode) + logger.info(f"Sample images saved to: {save_path}/{image_name}") + + return f"{save_path}/{image_name}" + + +def save_output( + args: argparse.Namespace, + vae: AutoencoderKLCausal3D, # Expect a VAE instance for decoding + latent: torch.Tensor, + device: torch.device, + original_base_names: Optional[List[str]] = None, +) -> None: + """save output + + Args: + args: command line arguments + vae: VAE model + latent: latent tensor + device: device to use + original_base_names: original base names (if latents are loaded from files) + """ + height, width = latent.shape[-2], latent.shape[-1] # BCTHW + height *= 8 + width *= 8 + # print(f"Saving output. Latent shape {latent.shape}; pixel shape {height}x{width}") + if args.output_type == "latent" or args.output_type == "both" or args.output_type == "latent_images": + # save latent + save_latent(latent, args, height, width) + if args.output_type == "latent": + return + + if vae is None: + logger.error("VAE is None, cannot decode latents for saving video/images.") + return + + total_latent_sections = (args.video_seconds * 30) / (args.latent_window_size * 4) + total_latent_sections = int(max(round(total_latent_sections), 1)) + video = decode_latent( + args.latent_window_size, total_latent_sections, args.bulk_decode, vae, latent, device, args.one_frame_inference is not None + ) + + if args.output_type == "video" or args.output_type == "both": + # save video + original_name = "" if original_base_names is None else f"_{original_base_names[0]}" + save_video(video, args, original_name) + + elif args.output_type == "images" or args.output_type == "latent_images": + # save images + original_name = "" if original_base_names is None else f"_{original_base_names[0]}" + save_images(video, args, original_name) + + +def preprocess_prompts_for_batch(prompt_lines: List[str], base_args: argparse.Namespace) -> List[Dict]: + """Process multiple prompts for batch mode + + Args: + prompt_lines: List of prompt lines + base_args: Base command line arguments + + Returns: + List[Dict]: List of prompt data dictionaries + """ + prompts_data = [] + + for line in prompt_lines: + line = line.strip() + if not line or line.startswith("#"): # Skip empty lines and comments + continue + + # Parse prompt line and create override dictionary + prompt_data = parse_prompt_line(line) + logger.info(f"Parsed prompt data: {prompt_data}") + prompts_data.append(prompt_data) + + return prompts_data + + +def load_shared_models(args: argparse.Namespace) -> Dict: + """Load shared models for batch processing or interactive mode. + Models are loaded to CPU to save memory. VAE is NOT loaded here. + DiT model is also NOT loaded here, handled by process_batch_prompts or generate. + + Args: + args: Base command line arguments + + Returns: + Dict: Dictionary of shared models (text/image encoders) + """ + shared_models = {} + # Load text encoders to CPU + tokenizer1, text_encoder1 = load_text_encoder1(args, args.fp8_llm, "cpu") + tokenizer2, text_encoder2 = load_text_encoder2(args) # Assumes it loads to CPU or handles device internally + # Load image encoders to CPU + feature_extractor, image_encoder = load_image_encoders(args) # Assumes it loads to CPU or handles device internally + + shared_models["tokenizer1"] = tokenizer1 + shared_models["text_encoder1"] = text_encoder1 + shared_models["tokenizer2"] = tokenizer2 + shared_models["text_encoder2"] = text_encoder2 + shared_models["feature_extractor"] = feature_extractor + shared_models["image_encoder"] = image_encoder + + return shared_models + + +def process_batch_prompts(prompts_data: List[Dict], args: argparse.Namespace) -> None: + """Process multiple prompts with model reuse and batched precomputation + + Args: + prompts_data: List of prompt data dictionaries + args: Base command line arguments + """ + if not prompts_data: + logger.warning("No valid prompts found") + return + + gen_settings = get_generation_settings(args) + device = gen_settings.device + + # 1. Precompute Image Data (VAE and Image Encoders) + logger.info("Loading VAE and Image Encoders for batch image preprocessing...") + vae_for_batch = load_vae(args.vae, args.vae_chunk_size, args.vae_spatial_tile_sample_min_size, "cpu") + feature_extractor_batch, image_encoder_batch = load_image_encoders(args) # Assume loads to CPU + + all_precomputed_image_data = [] + all_prompt_args_list = [apply_overrides(args, pd) for pd in prompts_data] # Create all arg instances first + + logger.info("Preprocessing images and VAE encoding for all prompts...") + + # VAE and Image Encoder to device for this phase, because we do not want to offload them to CPU + vae_for_batch.to(device) + image_encoder_batch.to(device) + + # Pass models via a temporary shared_models dict for prepare_image_inputs + # This ensures prepare_image_inputs can use them if it expects them in shared_models + # Or it can load them if this dict is empty (though here we provide them) + temp_shared_models_img = {"feature_extractor": feature_extractor_batch, "image_encoder": image_encoder_batch} + + for i, prompt_args_item in enumerate(all_prompt_args_list): + logger.info(f"Image preprocessing for prompt {i+1}/{len(all_prompt_args_list)}: {prompt_args_item.prompt}") + # prepare_image_inputs will move vae/image_encoder to device temporarily + image_data = prepare_image_inputs(prompt_args_item, device, vae_for_batch, temp_shared_models_img) + all_precomputed_image_data.append(image_data) + + # Models should be back on GPU because prepare_image_inputs moved them to the original device + del feature_extractor_batch, image_encoder_batch, temp_shared_models_img + vae_for_batch.to("cpu") # Move VAE back to CPU + clean_memory_on_device(device) + + # 2. Precompute Text Data (Text Encoders) + logger.info("Loading Text Encoders for batch text preprocessing...") + # Text Encoders loaded to CPU by load_text_encoder1/2 + tokenizer1_batch, text_encoder1_batch = load_text_encoder1(args, args.fp8_llm, device) + tokenizer2_batch, text_encoder2_batch = load_text_encoder2(args) + + # Text Encoders to device for this phase + text_encoder2_batch.to(device) # Moved into prepare_text_inputs logic + + all_precomputed_text_data = [] + conds_cache_batch = {} + + logger.info("Preprocessing text and LLM/TextEncoder encoding for all prompts...") + temp_shared_models_txt = { + "tokenizer1": tokenizer1_batch, + "text_encoder1": text_encoder1_batch, # on GPU + "tokenizer2": tokenizer2_batch, + "text_encoder2": text_encoder2_batch, # on GPU + "conds_cache": conds_cache_batch, + } + + for i, prompt_args_item in enumerate(all_prompt_args_list): + logger.info(f"Text preprocessing for prompt {i+1}/{len(all_prompt_args_list)}: {prompt_args_item.prompt}") + # prepare_text_inputs will move text_encoders to device temporarily + text_data = prepare_text_inputs(prompt_args_item, device, temp_shared_models_txt) + all_precomputed_text_data.append(text_data) + + # Models should be removed from device after prepare_text_inputs + del tokenizer1_batch, text_encoder1_batch, tokenizer2_batch, text_encoder2_batch, temp_shared_models_txt, conds_cache_batch + clean_memory_on_device(device) + + # 3. Load DiT Model once + logger.info("Loading DiT model for batch generation...") + # Use args from the first prompt for DiT loading (LoRA etc. should be consistent for a batch) + first_prompt_args = all_prompt_args_list[0] + dit_model = load_dit_model(first_prompt_args, device) # Load directly to target device if possible + if first_prompt_args.lora_weight is not None and len(first_prompt_args.lora_weight) > 0: + logger.info("Merging LoRA weights into DiT model...") + merge_lora_weights(lora_framepack, dit_model, first_prompt_args, device, convert_lora_for_framepack) + if first_prompt_args.save_merged_model: + logger.info("Merged DiT model saved. Skipping generation.") + del dit_model + clean_memory_on_device(device) + return + logger.info("Optimizing DiT model...") + optimize_model(dit_model, first_prompt_args, device) # Handles device placement, fp8 etc. + + shared_models_for_generate = {"model": dit_model} # Pass DiT via shared_models + + all_latents = [] + + logger.info("Generating latents for all prompts...") + with torch.no_grad(): + for i, prompt_args_item in enumerate(all_prompt_args_list): + current_image_data = all_precomputed_image_data[i] + current_text_data = all_precomputed_text_data[i] + + logger.info(f"Generating latent for prompt {i+1}/{len(all_prompt_args_list)}: {prompt_args_item.prompt}") + try: + # generate is called with precomputed data, so it won't load VAE/Text/Image encoders. + # It will use the DiT model from shared_models_for_generate. + # The VAE instance returned by generate will be None here. + _, latent = generate( + prompt_args_item, gen_settings, shared_models_for_generate, current_image_data, current_text_data + ) + + if latent is None and prompt_args_item.save_merged_model: # Should be caught earlier + continue + + # Save latent if needed (using data from precomputed_image_data for H/W) + if prompt_args_item.output_type in ["latent", "both", "latent_images"]: + height = current_image_data["height"] + width = current_image_data["width"] + save_latent(latent, prompt_args_item, height, width) + + all_latents.append(latent) + except Exception as e: + logger.error(f"Error generating latent for prompt: {prompt_args_item.prompt}. Error: {e}", exc_info=True) + all_latents.append(None) # Add placeholder for failed generations + continue + + # Free DiT model + logger.info("Releasing DiT model from memory...") + if args.blocks_to_swap > 0: + logger.info("Waiting for 5 seconds to finish block swap") + time.sleep(5) + + del shared_models_for_generate["model"] + del dit_model + clean_memory_on_device(device) + synchronize_device(device) # Ensure memory is freed before loading VAE for decoding + + # 4. Decode latents and save outputs (using vae_for_batch) + if args.output_type != "latent": + logger.info("Decoding latents to videos/images using batched VAE...") + vae_for_batch.to(device) # Move VAE to device for decoding + + for i, latent in enumerate(all_latents): + if latent is None: # Skip failed generations + logger.warning(f"Skipping decoding for prompt {i+1} due to previous error.") + continue + + current_args = all_prompt_args_list[i] + logger.info(f"Decoding output {i+1}/{len(all_latents)} for prompt: {current_args.prompt}") + + # if args.output_type is "both" or "latent_images", we already saved latent above. + # so we skip saving latent here. + if current_args.output_type == "both": + current_args.output_type = "video" + elif current_args.output_type == "latent_images": + current_args.output_type = "images" + + # save_output expects latent to be [BCTHW] or [CTHW]. generate returns [BCTHW] (batch size 1). + # latent[0] is correct if generate returns it with batch dim. + # The latent from generate is (1, C, T, H, W) + save_output(current_args, vae_for_batch, latent[0], device) # Pass vae_for_batch + + vae_for_batch.to("cpu") # Move VAE back to CPU + + del vae_for_batch + clean_memory_on_device(device) + + +def process_interactive(args: argparse.Namespace) -> None: + """Process prompts in interactive mode + + Args: + args: Base command line arguments + """ + gen_settings = get_generation_settings(args) + device = gen_settings.device + shared_models = load_shared_models(args) + shared_models["conds_cache"] = {} # Initialize empty cache for interactive mode + + print("Interactive mode. Enter prompts (Ctrl+D or Ctrl+Z (Windows) to exit):") + + try: + import prompt_toolkit + except ImportError: + logger.warning("prompt_toolkit not found. Using basic input instead.") + prompt_toolkit = None + + if prompt_toolkit: + session = prompt_toolkit.PromptSession() + + def input_line(prompt: str) -> str: + return session.prompt(prompt) + + else: + + def input_line(prompt: str) -> str: + return input(prompt) + + try: + while True: + try: + line = input_line("> ") + if not line.strip(): + continue + if len(line.strip()) == 1 and line.strip() in ["\x04", "\x1a"]: # Ctrl+D or Ctrl+Z with prompt_toolkit + raise EOFError # Exit on Ctrl+D or Ctrl+Z + + # Parse prompt + prompt_data = parse_prompt_line(line) + prompt_args = apply_overrides(args, prompt_data) + + # Generate latent + # For interactive, precomputed data is None. shared_models contains text/image encoders. + # generate will load VAE internally. + returned_vae, latent = generate(prompt_args, gen_settings, shared_models) + + # If not one_frame_inference, move DiT model to CPU after generation + if not prompt_args.one_frame_inference: + if prompt_args.blocks_to_swap > 0: + logger.info("Waiting for 5 seconds to finish block swap") + time.sleep(5) + model = shared_models.get("model") + model.to("cpu") # Move DiT model to CPU after generation + + # Save latent and video + # returned_vae from generate will be used for decoding here. + save_output(prompt_args, returned_vae, latent[0], device) + + except KeyboardInterrupt: + print("\nInterrupted. Continue (Ctrl+D or Ctrl+Z (Windows) to exit)") + continue + + except EOFError: + print("\nExiting interactive mode") + + +def get_generation_settings(args: argparse.Namespace) -> GenerationSettings: + device = torch.device(args.device) + + dit_weight_dtype = None # default + if args.fp8_scaled: + dit_weight_dtype = None # various precision weights, so don't cast to specific dtype + elif args.fp8: + dit_weight_dtype = torch.float8_e4m3fn + + logger.info(f"Using device: {device}, DiT weight weight precision: {dit_weight_dtype}") + + gen_settings = GenerationSettings(device=device, dit_weight_dtype=dit_weight_dtype) + return gen_settings + + +def main(): + # Parse arguments + args = parse_args() + + # Check if latents are provided + latents_mode = args.latent_path is not None and len(args.latent_path) > 0 + + # Set device + device = args.device if args.device is not None else "cuda" if torch.cuda.is_available() else "cpu" + device = torch.device(device) + logger.info(f"Using device: {device}") + args.device = device + + if latents_mode: + # Original latent decode mode + original_base_names = [] + latents_list = [] + seeds = [] + + # assert len(args.latent_path) == 1, "Only one latent path is supported for now" + + for latent_path in args.latent_path: + original_base_names.append(os.path.splitext(os.path.basename(latent_path))[0]) + seed = 0 + + if os.path.splitext(latent_path)[1] != ".safetensors": + latents = torch.load(latent_path, map_location="cpu") + else: + latents = load_file(latent_path)["latent"] + with safe_open(latent_path, framework="pt") as f: + metadata = f.metadata() + if metadata is None: + metadata = {} + logger.info(f"Loaded metadata: {metadata}") + + if "seeds" in metadata: + seed = int(metadata["seeds"]) + if "height" in metadata and "width" in metadata: + height = int(metadata["height"]) + width = int(metadata["width"]) + args.video_size = [height, width] + if "video_seconds" in metadata: + args.video_seconds = float(metadata["video_seconds"]) + + seeds.append(seed) + logger.info(f"Loaded latent from {latent_path}. Shape: {latents.shape}") + + if latents.ndim == 5: # [BCTHW] + latents = latents.squeeze(0) # [CTHW] + + latents_list.append(latents) + + # latent = torch.stack(latents_list, dim=0) # [N, ...], must be same shape + + for i, latent in enumerate(latents_list): + args.seed = seeds[i] + + vae = load_vae(args.vae, args.vae_chunk_size, args.vae_spatial_tile_sample_min_size, device) + save_output(args, vae, latent, device, original_base_names) + + elif args.from_file: + # Batch mode from file + + # Read prompts from file + with open(args.from_file, "r", encoding="utf-8") as f: + prompt_lines = f.readlines() + + # Process prompts + prompts_data = preprocess_prompts_for_batch(prompt_lines, args) + process_batch_prompts(prompts_data, args) + + elif args.interactive: + # Interactive mode + process_interactive(args) + + else: + # Single prompt mode (original behavior) + + # Generate latent + gen_settings = get_generation_settings(args) + # For single mode, precomputed data is None, shared_models is None. + # generate will load all necessary models (VAE, Text/Image Encoders, DiT). + returned_vae, latent = generate(args, gen_settings) + # print(f"Generated latent shape: {latent.shape}") + if args.save_merged_model: + return + + # Save latent and video + # returned_vae from generate will be used for decoding here. + save_output(args, returned_vae, latent[0], device) + + logger.info("Done!") + + +if __name__ == "__main__": + main() diff --git a/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/fpack_train_network.py b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/fpack_train_network.py new file mode 100644 index 0000000000000000000000000000000000000000..036ad653e36c5960620f1b724390cf33457df7e4 --- /dev/null +++ b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/fpack_train_network.py @@ -0,0 +1,618 @@ +import argparse +import gc +import math +import time +from typing import Optional +from PIL import Image + + +import numpy as np +import torch +import torchvision.transforms.functional as TF +from tqdm import tqdm +from accelerate import Accelerator, init_empty_weights + +from musubi_tuner.dataset import image_video_dataset +from musubi_tuner.dataset.image_video_dataset import ARCHITECTURE_FRAMEPACK, ARCHITECTURE_FRAMEPACK_FULL, load_video +from musubi_tuner.fpack_generate_video import decode_latent +from musubi_tuner.frame_pack import hunyuan +from musubi_tuner.frame_pack.clip_vision import hf_clip_vision_encode +from musubi_tuner.frame_pack.framepack_utils import load_image_encoders, load_text_encoder1, load_text_encoder2 +from musubi_tuner.frame_pack.framepack_utils import load_vae as load_framepack_vae +from musubi_tuner.frame_pack.hunyuan_video_packed import HunyuanVideoTransformer3DModelPacked, load_packed_model +from musubi_tuner.frame_pack.k_diffusion_hunyuan import sample_hunyuan +from musubi_tuner.frame_pack.utils import crop_or_pad_yield_mask +from musubi_tuner.dataset.image_video_dataset import resize_image_to_bucket +from musubi_tuner.hv_train_network import NetworkTrainer, load_prompts, clean_memory_on_device, setup_parser_common, read_config_from_file + +import logging + +logger = logging.getLogger(__name__) +logging.basicConfig(level=logging.INFO) + +from musubi_tuner.utils import model_utils +from musubi_tuner.utils.safetensors_utils import load_safetensors, MemoryEfficientSafeOpen + + +class FramePackNetworkTrainer(NetworkTrainer): + def __init__(self): + super().__init__() + + # region model specific + + @property + def architecture(self) -> str: + return ARCHITECTURE_FRAMEPACK + + @property + def architecture_full_name(self) -> str: + return ARCHITECTURE_FRAMEPACK_FULL + + def handle_model_specific_args(self, args): + self._i2v_training = True + self._control_training = False + self.default_guidance_scale = 10.0 # embeded guidance scale + + def process_sample_prompts( + self, + args: argparse.Namespace, + accelerator: Accelerator, + sample_prompts: str, + ): + device = accelerator.device + + logger.info(f"cache Text Encoder outputs for sample prompt: {sample_prompts}") + prompts = load_prompts(sample_prompts) + + # load text encoder + tokenizer1, text_encoder1 = load_text_encoder1(args, args.fp8_llm, device) + tokenizer2, text_encoder2 = load_text_encoder2(args) + text_encoder2.to(device) + + sample_prompts_te_outputs = {} # (prompt) -> (t1 embeds, t1 mask, t2 embeds) + for prompt_dict in prompts: + for p in [prompt_dict.get("prompt", ""), prompt_dict.get("negative_prompt", "")]: + if p is None or p in sample_prompts_te_outputs: + continue + logger.info(f"cache Text Encoder outputs for prompt: {p}") + with torch.amp.autocast(device_type=device.type, dtype=text_encoder1.dtype), torch.no_grad(): + llama_vec, clip_l_pooler = hunyuan.encode_prompt_conds(p, text_encoder1, text_encoder2, tokenizer1, tokenizer2) + llama_vec, llama_attention_mask = crop_or_pad_yield_mask(llama_vec, length=512) + + llama_vec = llama_vec.to("cpu") + llama_attention_mask = llama_attention_mask.to("cpu") + clip_l_pooler = clip_l_pooler.to("cpu") + sample_prompts_te_outputs[p] = (llama_vec, llama_attention_mask, clip_l_pooler) + del text_encoder1, text_encoder2 + clean_memory_on_device(device) + + # image embedding for I2V training + feature_extractor, image_encoder = load_image_encoders(args) + image_encoder.to(device) + + # encode image with image encoder + sample_prompts_image_embs = {} + for prompt_dict in prompts: + image_path = prompt_dict.get("image_path", None) + assert image_path is not None, "image_path should be set for I2V training" + if image_path in sample_prompts_image_embs: + continue + + logger.info(f"Encoding image to image encoder context: {image_path}") + + height = prompt_dict.get("height", 256) + width = prompt_dict.get("width", 256) + + img = Image.open(image_path).convert("RGB") + img_np = np.array(img) # PIL to numpy, HWC + img_np = image_video_dataset.resize_image_to_bucket(img_np, (width, height)) # returns a numpy array + + with torch.no_grad(): + image_encoder_output = hf_clip_vision_encode(img_np, feature_extractor, image_encoder) + image_encoder_last_hidden_state = image_encoder_output.last_hidden_state + + image_encoder_last_hidden_state = image_encoder_last_hidden_state.to("cpu") + sample_prompts_image_embs[image_path] = image_encoder_last_hidden_state + + del image_encoder + clean_memory_on_device(device) + + # prepare sample parameters + sample_parameters = [] + for prompt_dict in prompts: + prompt_dict_copy = prompt_dict.copy() + + p = prompt_dict.get("prompt", "") + llama_vec, llama_attention_mask, clip_l_pooler = sample_prompts_te_outputs[p] + prompt_dict_copy["llama_vec"] = llama_vec + prompt_dict_copy["llama_attention_mask"] = llama_attention_mask + prompt_dict_copy["clip_l_pooler"] = clip_l_pooler + + p = prompt_dict.get("negative_prompt", "") + llama_vec, llama_attention_mask, clip_l_pooler = sample_prompts_te_outputs[p] + prompt_dict_copy["negative_llama_vec"] = llama_vec + prompt_dict_copy["negative_llama_attention_mask"] = llama_attention_mask + prompt_dict_copy["negative_clip_l_pooler"] = clip_l_pooler + + p = prompt_dict.get("image_path", None) + prompt_dict_copy["image_encoder_last_hidden_state"] = sample_prompts_image_embs[p] + + sample_parameters.append(prompt_dict_copy) + + clean_memory_on_device(accelerator.device) + return sample_parameters + + def do_inference( + self, + accelerator, + args, + sample_parameter, + vae, + dit_dtype, + transformer, + discrete_flow_shift, + sample_steps, + width, + height, + frame_count, + generator, + do_classifier_free_guidance, + guidance_scale, + cfg_scale, + image_path=None, + control_video_path=None, + ): + """architecture dependent inference""" + model: HunyuanVideoTransformer3DModelPacked = transformer + device = accelerator.device + if cfg_scale is None: + cfg_scale = 1.0 + do_classifier_free_guidance = do_classifier_free_guidance and cfg_scale != 1.0 + + # prepare parameters + one_frame_mode = args.one_frame + if one_frame_mode: + one_frame_inference = set() + for mode in sample_parameter["one_frame"].split(","): + one_frame_inference.add(mode.strip()) + else: + one_frame_inference = None + + latent_window_size = args.latent_window_size # default is 9 + latent_f = (frame_count - 1) // 4 + 1 + total_latent_sections = math.floor((latent_f - 1) / latent_window_size) + if total_latent_sections < 1 and not one_frame_mode: + logger.warning(f"Not enough frames for FramePack: {latent_f}, minimum: {latent_window_size*4+1}") + return None + + latent_f = total_latent_sections * latent_window_size + 1 + actual_frame_count = (latent_f - 1) * 4 + 1 + if actual_frame_count != frame_count: + logger.info(f"Frame count mismatch: {actual_frame_count} != {frame_count}, trimming to {actual_frame_count}") + frame_count = actual_frame_count + num_frames = latent_window_size * 4 - 3 + + # prepare start and control latent + def encode_image(path): + image = Image.open(path) + if image.mode == "RGBA": + alpha = image.split()[-1] + image = image.convert("RGB") + else: + alpha = None + image = resize_image_to_bucket(image, (width, height)) # returns a numpy array + image = torch.from_numpy(image).permute(2, 0, 1).unsqueeze(1).unsqueeze(0).float() # 1, C, 1, H, W + image = image / 127.5 - 1 # -1 to 1 + return hunyuan.vae_encode(image, vae).to("cpu"), alpha + + # VAE encoding + logger.info(f"Encoding image to latent space") + vae.to(device) + + start_latent, _ = ( + encode_image(image_path) if image_path else torch.zeros((1, 16, 1, height // 8, width // 8), dtype=torch.float32) + ) + + if one_frame_mode: + control_latents = [] + control_alphas = [] + if "control_image_path" in sample_parameter: + for control_image_path in sample_parameter["control_image_path"]: + control_latent, control_alpha = encode_image(control_image_path) + control_latents.append(control_latent) + control_alphas.append(control_alpha) + else: + control_latents = None + control_alphas = None + + vae.to("cpu") # move VAE to CPU to save memory + clean_memory_on_device(device) + + # sampilng + if not one_frame_mode: + f1_mode = args.f1 + history_latents = torch.zeros((1, 16, 1 + 2 + 16, height // 8, width // 8), dtype=torch.float32) + + if not f1_mode: + total_generated_latent_frames = 0 + latent_paddings = reversed(range(total_latent_sections)) + else: + total_generated_latent_frames = 1 + history_latents = torch.cat([history_latents, start_latent.to(history_latents)], dim=2) + latent_paddings = [0] * total_latent_sections + + if total_latent_sections > 4: + latent_paddings = [3] + [2] * (total_latent_sections - 3) + [1, 0] + + latent_paddings = list(latent_paddings) + for loop_index in range(total_latent_sections): + latent_padding = latent_paddings[loop_index] + + if not f1_mode: + is_last_section = latent_padding == 0 + latent_padding_size = latent_padding * latent_window_size + + logger.info(f"latent_padding_size = {latent_padding_size}, is_last_section = {is_last_section}") + + indices = torch.arange(0, sum([1, latent_padding_size, latent_window_size, 1, 2, 16])).unsqueeze(0) + ( + clean_latent_indices_pre, + blank_indices, + latent_indices, + clean_latent_indices_post, + clean_latent_2x_indices, + clean_latent_4x_indices, + ) = indices.split([1, latent_padding_size, latent_window_size, 1, 2, 16], dim=1) + clean_latent_indices = torch.cat([clean_latent_indices_pre, clean_latent_indices_post], dim=1) + + clean_latents_pre = start_latent.to(history_latents) + clean_latents_post, clean_latents_2x, clean_latents_4x = history_latents[:, :, : 1 + 2 + 16, :, :].split( + [1, 2, 16], dim=2 + ) + clean_latents = torch.cat([clean_latents_pre, clean_latents_post], dim=2) + else: + indices = torch.arange(0, sum([1, 16, 2, 1, latent_window_size])).unsqueeze(0) + ( + clean_latent_indices_start, + clean_latent_4x_indices, + clean_latent_2x_indices, + clean_latent_1x_indices, + latent_indices, + ) = indices.split([1, 16, 2, 1, latent_window_size], dim=1) + clean_latent_indices = torch.cat([clean_latent_indices_start, clean_latent_1x_indices], dim=1) + + clean_latents_4x, clean_latents_2x, clean_latents_1x = history_latents[:, :, -sum([16, 2, 1]) :, :, :].split( + [16, 2, 1], dim=2 + ) + clean_latents = torch.cat([start_latent.to(history_latents), clean_latents_1x], dim=2) + + # if use_teacache: + # transformer.initialize_teacache(enable_teacache=True, num_steps=steps) + # else: + # transformer.initialize_teacache(enable_teacache=False) + + llama_vec = sample_parameter["llama_vec"].to(device, dtype=torch.bfloat16) + llama_attention_mask = sample_parameter["llama_attention_mask"].to(device) + clip_l_pooler = sample_parameter["clip_l_pooler"].to(device, dtype=torch.bfloat16) + if cfg_scale == 1.0: + llama_vec_n, clip_l_pooler_n = torch.zeros_like(llama_vec), torch.zeros_like(clip_l_pooler) + llama_vec_n, llama_attention_mask_n = crop_or_pad_yield_mask(llama_vec_n, length=512) + else: + llama_vec_n = sample_parameter["negative_llama_vec"].to(device, dtype=torch.bfloat16) + llama_attention_mask_n = sample_parameter["negative_llama_attention_mask"].to(device) + clip_l_pooler_n = sample_parameter["negative_clip_l_pooler"].to(device, dtype=torch.bfloat16) + image_encoder_last_hidden_state = sample_parameter["image_encoder_last_hidden_state"].to( + device, dtype=torch.bfloat16 + ) + + generated_latents = sample_hunyuan( + transformer=model, + sampler=args.sample_solver, + width=width, + height=height, + frames=num_frames, + real_guidance_scale=cfg_scale, + distilled_guidance_scale=guidance_scale, + guidance_rescale=0.0, + # shift=3.0, + num_inference_steps=sample_steps, + generator=generator, + prompt_embeds=llama_vec, + prompt_embeds_mask=llama_attention_mask, + prompt_poolers=clip_l_pooler, + negative_prompt_embeds=llama_vec_n, + negative_prompt_embeds_mask=llama_attention_mask_n, + negative_prompt_poolers=clip_l_pooler_n, + device=device, + dtype=torch.bfloat16, + image_embeddings=image_encoder_last_hidden_state, + latent_indices=latent_indices, + clean_latents=clean_latents, + clean_latent_indices=clean_latent_indices, + clean_latents_2x=clean_latents_2x, + clean_latent_2x_indices=clean_latent_2x_indices, + clean_latents_4x=clean_latents_4x, + clean_latent_4x_indices=clean_latent_4x_indices, + ) + + total_generated_latent_frames += int(generated_latents.shape[2]) + if not f1_mode: + if is_last_section: + generated_latents = torch.cat([start_latent.to(generated_latents), generated_latents], dim=2) + total_generated_latent_frames += 1 + history_latents = torch.cat([generated_latents.to(history_latents), history_latents], dim=2) + real_history_latents = history_latents[:, :, :total_generated_latent_frames, :, :] + else: + history_latents = torch.cat([history_latents, generated_latents.to(history_latents)], dim=2) + real_history_latents = history_latents[:, :, -total_generated_latent_frames:, :, :] + + logger.info(f"Generated. Latent shape {real_history_latents.shape}") + else: + # one frame mode + sample_num_frames = 1 + latent_indices = torch.zeros((1, 1), dtype=torch.int64) # 1x1 latent index for target image + latent_indices[:, 0] = latent_window_size # last of latent_window + + def get_latent_mask(mask_image: Image.Image): + mask_image = mask_image.resize((width // 8, height // 8), Image.LANCZOS) + mask_image = np.array(mask_image) # PIL to numpy, HWC + mask_image = torch.from_numpy(mask_image).float() / 255.0 # 0 to 1.0, HWC + mask_image = mask_image.squeeze(-1) # HWC -> HW + mask_image = mask_image.unsqueeze(0).unsqueeze(0).unsqueeze(0) # HW -> 111HW (B, C, F, H, W) + mask_image = mask_image.to(torch.float32) + return mask_image + + if control_latents is None or len(control_latents) == 0: + logger.info(f"No control images provided for one frame inference. Use zero latents for control images.") + control_latents = [torch.zeros(1, 16, 1, height // 8, width // 8, dtype=torch.float32)] + + if "no_post" not in one_frame_inference: + # add zero latents as clean latents post + control_latents.append(torch.zeros((1, 16, 1, height // 8, width // 8), dtype=torch.float32)) + logger.info(f"Add zero latents as clean latents post for one frame inference.") + + # kisekaeichi and 1f-mc: both are using control images, but indices are different + clean_latents = torch.cat(control_latents, dim=2) # (1, 16, num_control_images, H//8, W//8) + clean_latent_indices = torch.zeros((1, len(control_latents)), dtype=torch.int64) + if "no_post" not in one_frame_inference: + clean_latent_indices[:, -1] = 1 + latent_window_size # default index for clean latents post + + # apply mask for control latents (clean latents) + for i in range(len(control_alphas)): + control_alpha = control_alphas[i] + if control_alpha is not None: + latent_mask = get_latent_mask(control_alpha) + logger.info(f"Apply mask for clean latents 1x for {i+1}: shape: {latent_mask.shape}") + clean_latents[:, :, i : i + 1, :, :] = clean_latents[:, :, i : i + 1, :, :] * latent_mask + + for one_frame_param in one_frame_inference: + if one_frame_param.startswith("target_index="): + target_index = int(one_frame_param.split("=")[1]) + latent_indices[:, 0] = target_index + logger.info(f"Set index for target: {target_index}") + elif one_frame_param.startswith("control_index="): + control_indices = one_frame_param.split("=")[1].split(";") + i = 0 + while i < len(control_indices) and i < clean_latent_indices.shape[1]: + control_index = int(control_indices[i]) + clean_latent_indices[:, i] = control_index + i += 1 + logger.info(f"Set index for clean latent 1x: {control_indices}") + + if "no_2x" in one_frame_inference: + clean_latents_2x = None + clean_latent_2x_indices = None + logger.info(f"No clean_latents_2x") + else: + clean_latents_2x = torch.zeros((1, 16, 2, height // 8, width // 8), dtype=torch.float32) + index = 1 + latent_window_size + 1 + clean_latent_2x_indices = torch.arange(index, index + 2).unsqueeze(0) # 2 + + if "no_4x" in one_frame_inference: + clean_latents_4x = None + clean_latent_4x_indices = None + logger.info(f"No clean_latents_4x") + else: + clean_latents_4x = torch.zeros((1, 16, 16, height // 8, width // 8), dtype=torch.float32) + index = 1 + latent_window_size + 1 + 2 + clean_latent_4x_indices = torch.arange(index, index + 16).unsqueeze(0) # 16 + + logger.info( + f"One frame inference. clean_latent: {clean_latents.shape} latent_indices: {latent_indices}, clean_latent_indices: {clean_latent_indices}, num_frames: {sample_num_frames}" + ) + + # prepare conditioning inputs + llama_vec = sample_parameter["llama_vec"].to(device, dtype=torch.bfloat16) + llama_attention_mask = sample_parameter["llama_attention_mask"].to(device) + clip_l_pooler = sample_parameter["clip_l_pooler"].to(device, dtype=torch.bfloat16) + if cfg_scale == 1.0: + llama_vec_n, clip_l_pooler_n = torch.zeros_like(llama_vec), torch.zeros_like(clip_l_pooler) + llama_vec_n, llama_attention_mask_n = crop_or_pad_yield_mask(llama_vec_n, length=512) + else: + llama_vec_n = sample_parameter["negative_llama_vec"].to(device, dtype=torch.bfloat16) + llama_attention_mask_n = sample_parameter["negative_llama_attention_mask"].to(device) + clip_l_pooler_n = sample_parameter["negative_clip_l_pooler"].to(device, dtype=torch.bfloat16) + image_encoder_last_hidden_state = sample_parameter["image_encoder_last_hidden_state"].to(device, dtype=torch.bfloat16) + + generated_latents = sample_hunyuan( + transformer=model, + sampler=args.sample_solver, + width=width, + height=height, + frames=1, + real_guidance_scale=cfg_scale, + distilled_guidance_scale=guidance_scale, + guidance_rescale=0.0, + # shift=3.0, + num_inference_steps=sample_steps, + generator=generator, + prompt_embeds=llama_vec, + prompt_embeds_mask=llama_attention_mask, + prompt_poolers=clip_l_pooler, + negative_prompt_embeds=llama_vec_n, + negative_prompt_embeds_mask=llama_attention_mask_n, + negative_prompt_poolers=clip_l_pooler_n, + device=device, + dtype=torch.bfloat16, + image_embeddings=image_encoder_last_hidden_state, + latent_indices=latent_indices, + clean_latents=clean_latents, + clean_latent_indices=clean_latent_indices, + clean_latents_2x=clean_latents_2x, + clean_latent_2x_indices=clean_latent_2x_indices, + clean_latents_4x=clean_latents_4x, + clean_latent_4x_indices=clean_latent_4x_indices, + ) + + real_history_latents = generated_latents.to(clean_latents) + + # wait for 5 seconds until block swap is done + logger.info("Waiting for 5 seconds to finish block swap") + time.sleep(5) + + gc.collect() + clean_memory_on_device(device) + + video = decode_latent( + latent_window_size, total_latent_sections, args.bulk_decode, vae, real_history_latents, device, one_frame_mode + ) + video = video.to("cpu", dtype=torch.float32).unsqueeze(0) # add batch dimension + video = (video / 2 + 0.5).clamp(0, 1) # -1 to 1 -> 0 to 1 + clean_memory_on_device(device) + + return video + + def load_vae(self, args: argparse.Namespace, vae_dtype: torch.dtype, vae_path: str): + vae_path = args.vae + logger.info(f"Loading VAE model from {vae_path}") + vae = load_framepack_vae(args.vae, args.vae_chunk_size, args.vae_spatial_tile_sample_min_size, "cpu") + return vae + + def load_transformer( + self, + accelerator: Accelerator, + args: argparse.Namespace, + dit_path: str, + attn_mode: str, + split_attn: bool, + loading_device: str, + dit_weight_dtype: Optional[torch.dtype], + ): + logger.info(f"Loading DiT model from {dit_path}") + device = accelerator.device + model = load_packed_model(device, dit_path, attn_mode, loading_device, args.fp8_scaled, split_attn) + return model + + def scale_shift_latents(self, latents): + # FramePack VAE includes scaling + return latents + + def call_dit( + self, + args: argparse.Namespace, + accelerator: Accelerator, + transformer, + latents: torch.Tensor, + batch: dict[str, torch.Tensor], + noise: torch.Tensor, + noisy_model_input: torch.Tensor, + timesteps: torch.Tensor, + network_dtype: torch.dtype, + ): + model: HunyuanVideoTransformer3DModelPacked = transformer + device = accelerator.device + batch_size = latents.shape[0] + + # maybe model.dtype is better than network_dtype... + distilled_guidance = torch.tensor([args.guidance_scale * 1000.0] * batch_size).to(device=device, dtype=network_dtype) + latents = latents.to(device=accelerator.device, dtype=network_dtype) + noisy_model_input = noisy_model_input.to(device=accelerator.device, dtype=network_dtype) + # for k, v in batch.items(): + # if isinstance(v, torch.Tensor): + # print(f"{k}: {v.shape} {v.dtype} {v.device}") + with accelerator.autocast(): + clean_latent_2x_indices = batch["clean_latent_2x_indices"] if "clean_latent_2x_indices" in batch else None + if clean_latent_2x_indices is not None: + clean_latent_2x = batch["latents_clean_2x"] if "latents_clean_2x" in batch else None + if clean_latent_2x is None: + clean_latent_2x = torch.zeros( + (batch_size, 16, 2, latents.shape[3], latents.shape[4]), dtype=latents.dtype, device=latents.device + ) + else: + clean_latent_2x = None + + clean_latent_4x_indices = batch["clean_latent_4x_indices"] if "clean_latent_4x_indices" in batch else None + if clean_latent_4x_indices is not None: + clean_latent_4x = batch["latents_clean_4x"] if "latents_clean_4x" in batch else None + if clean_latent_4x is None: + clean_latent_4x = torch.zeros( + (batch_size, 16, 16, latents.shape[3], latents.shape[4]), dtype=latents.dtype, device=latents.device + ) + else: + clean_latent_4x = None + + model_pred = model( + hidden_states=noisy_model_input, + timestep=timesteps, + encoder_hidden_states=batch["llama_vec"], + encoder_attention_mask=batch["llama_attention_mask"], + pooled_projections=batch["clip_l_pooler"], + guidance=distilled_guidance, + latent_indices=batch["latent_indices"], + clean_latents=batch["latents_clean"], + clean_latent_indices=batch["clean_latent_indices"], + clean_latents_2x=clean_latent_2x, + clean_latent_2x_indices=clean_latent_2x_indices, + clean_latents_4x=clean_latent_4x, + clean_latent_4x_indices=clean_latent_4x_indices, + image_embeddings=batch["image_embeddings"], + return_dict=False, + ) + model_pred = model_pred[0] # returns tuple (model_pred, ) + + # flow matching loss + target = noise - latents + + return model_pred, target + + # endregion model specific + + +def framepack_setup_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: + """FramePack specific parser setup""" + parser.add_argument("--fp8_scaled", action="store_true", help="use scaled fp8 for DiT / DiTにスケーリングされたfp8を使う") + parser.add_argument("--fp8_llm", action="store_true", help="use fp8 for LLM / LLMにfp8を使う") + parser.add_argument("--text_encoder1", type=str, help="Text Encoder 1 directory / テキストエンコーダ1のディレクトリ") + parser.add_argument("--text_encoder2", type=str, help="Text Encoder 2 directory / テキストエンコーダ2のディレクトリ") + parser.add_argument("--vae_chunk_size", type=int, default=None, help="chunk size for CausalConv3d in VAE") + parser.add_argument( + "--vae_spatial_tile_sample_min_size", type=int, default=None, help="spatial tile sample min size for VAE, default 256" + ) + parser.add_argument("--image_encoder", type=str, required=True, help="Image encoder (CLIP) checkpoint path or directory") + parser.add_argument("--latent_window_size", type=int, default=9, help="FramePack latent window size (default 9)") + parser.add_argument("--bulk_decode", action="store_true", help="decode all frames at once in sample generation") + parser.add_argument("--f1", action="store_true", help="Use F1 sampling method for sample generation") + parser.add_argument("--one_frame", action="store_true", help="Use one frame sampling method for sample generation") + return parser + + +def main(): + parser = setup_parser_common() + parser = framepack_setup_parser(parser) + + args = parser.parse_args() + args = read_config_from_file(args, parser) + + assert ( + args.vae_dtype is None or args.vae_dtype == "float16" + ), "VAE dtype must be float16 / VAEのdtypeはfloat16でなければなりません" + args.vae_dtype = "float16" # fixed + args.dit_dtype = "bfloat16" # fixed + args.sample_solver = "unipc" # for sample generation, fixed to unipc + + trainer = FramePackNetworkTrainer() + trainer.train(args) + + +if __name__ == "__main__": + main() diff --git a/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/frame_pack/__init__.py b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/frame_pack/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/frame_pack/__pycache__/__init__.cpython-312.pyc b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/frame_pack/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4b4de0ed2ac78d9eae6467dd7035d15dd82e608f Binary files /dev/null and b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/frame_pack/__pycache__/__init__.cpython-312.pyc differ diff --git a/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/frame_pack/__pycache__/bucket_tools.cpython-312.pyc b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/frame_pack/__pycache__/bucket_tools.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..553342cd69efbece4ca6357dedf1cd3b71dbf62c Binary files /dev/null and b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/frame_pack/__pycache__/bucket_tools.cpython-312.pyc differ diff --git a/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/frame_pack/__pycache__/clip_vision.cpython-312.pyc b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/frame_pack/__pycache__/clip_vision.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..689aae70224e49a27aaf0a9e32461679a04a2e34 Binary files /dev/null and b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/frame_pack/__pycache__/clip_vision.cpython-312.pyc differ diff --git a/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/frame_pack/__pycache__/framepack_utils.cpython-312.pyc b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/frame_pack/__pycache__/framepack_utils.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1a7c2d6f4bd3459a4c88a2c2f28a87112f333871 Binary files /dev/null and b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/frame_pack/__pycache__/framepack_utils.cpython-312.pyc differ diff --git a/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/frame_pack/__pycache__/hunyuan.cpython-312.pyc b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/frame_pack/__pycache__/hunyuan.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ff55ffe3b75757ec82a9dde771f974f306d7161a Binary files /dev/null and b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/frame_pack/__pycache__/hunyuan.cpython-312.pyc differ diff --git a/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/frame_pack/__pycache__/hunyuan_video_packed.cpython-312.pyc b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/frame_pack/__pycache__/hunyuan_video_packed.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d6e137173f60b49e8e562bb0712d751a417efe19 Binary files /dev/null and b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/frame_pack/__pycache__/hunyuan_video_packed.cpython-312.pyc differ diff --git a/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/frame_pack/__pycache__/hunyuan_video_packed_inference.cpython-312.pyc b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/frame_pack/__pycache__/hunyuan_video_packed_inference.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..425dc4f83fc516972c6e3767df947451f75abff4 Binary files /dev/null and b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/frame_pack/__pycache__/hunyuan_video_packed_inference.cpython-312.pyc differ diff --git a/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/frame_pack/__pycache__/k_diffusion_hunyuan.cpython-312.pyc b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/frame_pack/__pycache__/k_diffusion_hunyuan.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ddff819114b64c1da4eb73677cf7034ab1917e46 Binary files /dev/null and b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/frame_pack/__pycache__/k_diffusion_hunyuan.cpython-312.pyc differ diff --git a/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/frame_pack/__pycache__/uni_pc_fm.cpython-312.pyc b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/frame_pack/__pycache__/uni_pc_fm.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0e87e49b00bbb1275c0feb08b3e49c8c85e64494 Binary files /dev/null and b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/frame_pack/__pycache__/uni_pc_fm.cpython-312.pyc differ diff --git a/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/frame_pack/__pycache__/utils.cpython-312.pyc b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/frame_pack/__pycache__/utils.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7ba17c6912fc5c4c1e23652cff83f875cc5c98e4 Binary files /dev/null and b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/frame_pack/__pycache__/utils.cpython-312.pyc differ diff --git a/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/frame_pack/__pycache__/wrapper.cpython-312.pyc b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/frame_pack/__pycache__/wrapper.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..87c877e6f55de781de201c107afc88811b05dfbd Binary files /dev/null and b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/frame_pack/__pycache__/wrapper.cpython-312.pyc differ diff --git a/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/frame_pack/bucket_tools.py b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/frame_pack/bucket_tools.py new file mode 100644 index 0000000000000000000000000000000000000000..dc13fdeb11f9ac87c64dda049a06b968360e7c3f --- /dev/null +++ b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/frame_pack/bucket_tools.py @@ -0,0 +1,30 @@ +bucket_options = { + 640: [ + (416, 960), + (448, 864), + (480, 832), + (512, 768), + (544, 704), + (576, 672), + (608, 640), + (640, 608), + (672, 576), + (704, 544), + (768, 512), + (832, 480), + (864, 448), + (960, 416), + ], +} + + +def find_nearest_bucket(h, w, resolution=640): + min_metric = float('inf') + best_bucket = None + for (bucket_h, bucket_w) in bucket_options[resolution]: + metric = abs(h * bucket_w - w * bucket_h) + if metric <= min_metric: + min_metric = metric + best_bucket = (bucket_h, bucket_w) + return best_bucket + diff --git a/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/frame_pack/clip_vision.py b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/frame_pack/clip_vision.py new file mode 100644 index 0000000000000000000000000000000000000000..1c919296b23084ac00e3e4440657d368df1ee86e --- /dev/null +++ b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/frame_pack/clip_vision.py @@ -0,0 +1,14 @@ +import numpy as np + + +def hf_clip_vision_encode(image, feature_extractor, image_encoder): + assert isinstance(image, np.ndarray) + assert image.ndim == 3 and image.shape[2] == 3 + assert image.dtype == np.uint8 + + preprocessed = feature_extractor.preprocess(images=image, return_tensors="pt").to( + device=image_encoder.device, dtype=image_encoder.dtype + ) + image_encoder_output = image_encoder(**preprocessed) + + return image_encoder_output diff --git a/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/frame_pack/framepack_utils.py b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/frame_pack/framepack_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..b26a56ace8ab5abf00ccac1d32cc350f5dca2634 --- /dev/null +++ b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/frame_pack/framepack_utils.py @@ -0,0 +1,273 @@ +import os +import logging +from types import SimpleNamespace +from typing import Optional, Union + +import accelerate +from accelerate import Accelerator, init_empty_weights +import torch +from safetensors.torch import load_file +from transformers import ( + LlamaTokenizerFast, + LlamaConfig, + LlamaModel, + CLIPTokenizer, + CLIPTextModel, + CLIPConfig, + SiglipImageProcessor, + SiglipVisionModel, + SiglipVisionConfig, +) + +from musubi_tuner.utils.safetensors_utils import load_split_weights +from musubi_tuner.hunyuan_model.vae import load_vae as hunyuan_load_vae + +import logging + +logger = logging.getLogger(__name__) +logging.basicConfig(level=logging.INFO) + + +def load_vae( + vae_path: str, vae_chunk_size: Optional[int], vae_spatial_tile_sample_min_size: Optional[int], device: Union[str, torch.device] +): + # single file and directory (contains 'vae') support + if os.path.isdir(vae_path): + vae_path = os.path.join(vae_path, "vae", "diffusion_pytorch_model.safetensors") + else: + vae_path = vae_path + + vae_dtype = torch.float16 # if vae_dtype is None else str_to_dtype(vae_dtype) + vae, _, s_ratio, t_ratio = hunyuan_load_vae(vae_dtype=vae_dtype, device=device, vae_path=vae_path) + vae.eval() + # vae_kwargs = {"s_ratio": s_ratio, "t_ratio": t_ratio} + + # set chunk_size to CausalConv3d recursively + chunk_size = vae_chunk_size + if chunk_size is not None: + vae.set_chunk_size_for_causal_conv_3d(chunk_size) + logger.info(f"Set chunk_size to {chunk_size} for CausalConv3d") + + if vae_spatial_tile_sample_min_size is not None: + vae.enable_spatial_tiling(True) + vae.tile_sample_min_size = vae_spatial_tile_sample_min_size + vae.tile_latent_min_size = vae_spatial_tile_sample_min_size // 8 + logger.info(f"Enabled spatial tiling with min size {vae_spatial_tile_sample_min_size}") + # elif vae_tiling: + else: + vae.enable_spatial_tiling(True) + + return vae + + +# region Text Encoders + +# Text Encoder configs are copied from HunyuanVideo repo + +LLAMA_CONFIG = { + "architectures": ["LlamaModel"], + "attention_bias": False, + "attention_dropout": 0.0, + "bos_token_id": 128000, + "eos_token_id": 128001, + "head_dim": 128, + "hidden_act": "silu", + "hidden_size": 4096, + "initializer_range": 0.02, + "intermediate_size": 14336, + "max_position_embeddings": 8192, + "mlp_bias": False, + "model_type": "llama", + "num_attention_heads": 32, + "num_hidden_layers": 32, + "num_key_value_heads": 8, + "pretraining_tp": 1, + "rms_norm_eps": 1e-05, + "rope_scaling": None, + "rope_theta": 500000.0, + "tie_word_embeddings": False, + "torch_dtype": "float16", + "transformers_version": "4.46.3", + "use_cache": True, + "vocab_size": 128320, +} + +CLIP_CONFIG = { + # "_name_or_path": "/raid/aryan/llava-llama-3-8b-v1_1-extracted/text_encoder_2", + "architectures": ["CLIPTextModel"], + "attention_dropout": 0.0, + "bos_token_id": 0, + "dropout": 0.0, + "eos_token_id": 2, + "hidden_act": "quick_gelu", + "hidden_size": 768, + "initializer_factor": 1.0, + "initializer_range": 0.02, + "intermediate_size": 3072, + "layer_norm_eps": 1e-05, + "max_position_embeddings": 77, + "model_type": "clip_text_model", + "num_attention_heads": 12, + "num_hidden_layers": 12, + "pad_token_id": 1, + "projection_dim": 768, + "torch_dtype": "float16", + "transformers_version": "4.48.0.dev0", + "vocab_size": 49408, +} + + +def load_text_encoder1( + args, fp8_llm: Optional[bool] = False, device: Optional[Union[str, torch.device]] = None +) -> tuple[LlamaTokenizerFast, LlamaModel]: + # single file, split file and directory (contains 'text_encoder') support + logger.info(f"Loading text encoder 1 tokenizer") + tokenizer1 = LlamaTokenizerFast.from_pretrained("hunyuanvideo-community/HunyuanVideo", subfolder="tokenizer") + + logger.info(f"Loading text encoder 1 from {args.text_encoder1}") + if os.path.isdir(args.text_encoder1): + # load from directory, configs are in the directory + text_encoder1 = LlamaModel.from_pretrained(args.text_encoder1, subfolder="text_encoder", torch_dtype=torch.float16) + else: + # load from file, we create the model with the appropriate config + config = LlamaConfig(**LLAMA_CONFIG) + with init_empty_weights(): + text_encoder1 = LlamaModel._from_config(config, torch_dtype=torch.float16) + + state_dict = load_split_weights(args.text_encoder1) + + # support weights from ComfyUI + if "model.embed_tokens.weight" in state_dict: + for key in list(state_dict.keys()): + if key.startswith("model."): + new_key = key.replace("model.", "") + state_dict[new_key] = state_dict[key] + del state_dict[key] + if "tokenizer" in state_dict: + state_dict.pop("tokenizer") + if "lm_head.weight" in state_dict: + state_dict.pop("lm_head.weight") + + # # support weights from ComfyUI + # if "tokenizer" in state_dict: + # state_dict.pop("tokenizer") + + text_encoder1.load_state_dict(state_dict, strict=True, assign=True) + + if fp8_llm: + org_dtype = text_encoder1.dtype + logger.info(f"Moving and casting text encoder to {device} and torch.float8_e4m3fn") + text_encoder1.to(device=device, dtype=torch.float8_e4m3fn) + + # prepare LLM for fp8 + def prepare_fp8(llama_model: LlamaModel, target_dtype): + def forward_hook(module): + def forward(hidden_states): + input_dtype = hidden_states.dtype + hidden_states = hidden_states.to(torch.float32) + variance = hidden_states.pow(2).mean(-1, keepdim=True) + hidden_states = hidden_states * torch.rsqrt(variance + module.variance_epsilon) + return module.weight.to(input_dtype) * hidden_states.to(input_dtype) + + return forward + + for module in llama_model.modules(): + if module.__class__.__name__ in ["Embedding"]: + # print("set", module.__class__.__name__, "to", target_dtype) + module.to(target_dtype) + if module.__class__.__name__ in ["LlamaRMSNorm"]: + # print("set", module.__class__.__name__, "hooks") + module.forward = forward_hook(module) + + prepare_fp8(text_encoder1, org_dtype) + else: + text_encoder1.to(device) + + text_encoder1.eval() + return tokenizer1, text_encoder1 + + +def load_text_encoder2(args) -> tuple[CLIPTokenizer, CLIPTextModel]: + # single file and directory (contains 'text_encoder_2') support + logger.info(f"Loading text encoder 2 tokenizer") + tokenizer2 = CLIPTokenizer.from_pretrained("hunyuanvideo-community/HunyuanVideo", subfolder="tokenizer_2") + + logger.info(f"Loading text encoder 2 from {args.text_encoder2}") + if os.path.isdir(args.text_encoder2): + # load from directory, configs are in the directory + text_encoder2 = CLIPTextModel.from_pretrained(args.text_encoder2, subfolder="text_encoder_2", torch_dtype=torch.float16) + else: + # we only have one file, so we can load it directly + config = CLIPConfig(**CLIP_CONFIG) + with init_empty_weights(): + text_encoder2 = CLIPTextModel._from_config(config, torch_dtype=torch.float16) + + state_dict = load_file(args.text_encoder2) + + text_encoder2.load_state_dict(state_dict, strict=True, assign=True) + + text_encoder2.eval() + return tokenizer2, text_encoder2 + + +# endregion + +# region image encoder + +# Siglip configs are copied from FramePack repo +FEATURE_EXTRACTOR_CONFIG = { + "do_convert_rgb": None, + "do_normalize": True, + "do_rescale": True, + "do_resize": True, + "image_mean": [0.5, 0.5, 0.5], + "image_processor_type": "SiglipImageProcessor", + "image_std": [0.5, 0.5, 0.5], + "processor_class": "SiglipProcessor", + "resample": 3, + "rescale_factor": 0.00392156862745098, + "size": {"height": 384, "width": 384}, +} +IMAGE_ENCODER_CONFIG = { + "_name_or_path": "/home/lvmin/.cache/huggingface/hub/models--black-forest-labs--FLUX.1-Redux-dev/snapshots/1282f955f706b5240161278f2ef261d2a29ad649/image_encoder", + "architectures": ["SiglipVisionModel"], + "attention_dropout": 0.0, + "hidden_act": "gelu_pytorch_tanh", + "hidden_size": 1152, + "image_size": 384, + "intermediate_size": 4304, + "layer_norm_eps": 1e-06, + "model_type": "siglip_vision_model", + "num_attention_heads": 16, + "num_channels": 3, + "num_hidden_layers": 27, + "patch_size": 14, + "torch_dtype": "bfloat16", + "transformers_version": "4.46.2", +} + + +def load_image_encoders(args): + logger.info(f"Loading image encoder feature extractor") + feature_extractor = SiglipImageProcessor(**FEATURE_EXTRACTOR_CONFIG) + + # single file, split file and directory (contains 'image_encoder') support + logger.info(f"Loading image encoder from {args.image_encoder}") + if os.path.isdir(args.image_encoder): + # load from directory, configs are in the directory + image_encoder = SiglipVisionModel.from_pretrained(args.image_encoder, subfolder="image_encoder", torch_dtype=torch.float16) + else: + # load from file, we create the model with the appropriate config + config = SiglipVisionConfig(**IMAGE_ENCODER_CONFIG) + with init_empty_weights(): + image_encoder = SiglipVisionModel._from_config(config, torch_dtype=torch.float16) + + state_dict = load_file(args.image_encoder) + + image_encoder.load_state_dict(state_dict, strict=True, assign=True) + + image_encoder.eval() + return feature_extractor, image_encoder + + +# endregion diff --git a/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/frame_pack/hunyuan.py b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/frame_pack/hunyuan.py new file mode 100644 index 0000000000000000000000000000000000000000..b5d1c7254ef0edaa0123cde1c488d1d776eefed1 --- /dev/null +++ b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/frame_pack/hunyuan.py @@ -0,0 +1,134 @@ +# original code: https://github.com/lllyasviel/FramePack +# original license: Apache-2.0 + +import torch + +# from diffusers.pipelines.hunyuan_video.pipeline_hunyuan_video import DEFAULT_PROMPT_TEMPLATE +# from diffusers_helper.utils import crop_or_pad_yield_mask +from musubi_tuner.hunyuan_model.autoencoder_kl_causal_3d import AutoencoderKLCausal3D +from musubi_tuner.hunyuan_model.text_encoder import PROMPT_TEMPLATE + + +@torch.no_grad() +def encode_prompt_conds(prompt, text_encoder, text_encoder_2, tokenizer, tokenizer_2, max_length=256, custom_system_prompt=None): + assert isinstance(prompt, str) + + prompt = [prompt] + + # LLAMA + + # We can verify crop_start by checking the token count of the prompt: + # custom_system_prompt = ( + # "Describe the video by detailing the following aspects: " + # "1. The main content and theme of the video." + # "2. The color, shape, size, texture, quantity, text, and spatial relationships of the objects." + # "3. Actions, events, behaviors temporal relationships, physical movement changes of the objects." + # "4. background environment, light, style and atmosphere." + # "5. camera angles, movements, and transitions used in the video:" + # ) + if custom_system_prompt is None: + prompt_llama = [PROMPT_TEMPLATE["dit-llm-encode-video"]["template"].format(p) for p in prompt] + crop_start = PROMPT_TEMPLATE["dit-llm-encode-video"]["crop_start"] + else: + # count tokens for custom_system_prompt + full_prompt = f"<|start_header_id|>system<|end_header_id|>\n\n{custom_system_prompt}<|eot_id|><|start_header_id|>user<|end_header_id|>\n\n" + print(f"Custom system prompt: {full_prompt}") + system_prompt_tokens = tokenizer(full_prompt, return_tensors="pt", truncation=True).input_ids[0].shape[0] + print(f"Custom system prompt token count: {system_prompt_tokens}") + prompt_llama = [full_prompt + p + "<|eot_id|>" for p in prompt] + crop_start = system_prompt_tokens + + llama_inputs = tokenizer( + prompt_llama, + padding="max_length", + max_length=max_length + crop_start, + truncation=True, + return_tensors="pt", + return_length=False, + return_overflowing_tokens=False, + return_attention_mask=True, + ) + + llama_input_ids = llama_inputs.input_ids.to(text_encoder.device) + llama_attention_mask = llama_inputs.attention_mask.to(text_encoder.device) + llama_attention_length = int(llama_attention_mask.sum()) + + llama_outputs = text_encoder( + input_ids=llama_input_ids, + attention_mask=llama_attention_mask, + output_hidden_states=True, + ) + + llama_vec = llama_outputs.hidden_states[-3][:, crop_start:llama_attention_length] + # llama_vec_remaining = llama_outputs.hidden_states[-3][:, llama_attention_length:] + llama_attention_mask = llama_attention_mask[:, crop_start:llama_attention_length] + + assert torch.all(llama_attention_mask.bool()) + + # CLIP + + clip_l_input_ids = tokenizer_2( + prompt, + padding="max_length", + max_length=77, + truncation=True, + return_overflowing_tokens=False, + return_length=False, + return_tensors="pt", + ).input_ids + clip_l_pooler = text_encoder_2(clip_l_input_ids.to(text_encoder_2.device), output_hidden_states=False).pooler_output + + return llama_vec, clip_l_pooler + + +@torch.no_grad() +def vae_decode_fake(latents): + latent_rgb_factors = [ + [-0.0395, -0.0331, 0.0445], + [0.0696, 0.0795, 0.0518], + [0.0135, -0.0945, -0.0282], + [0.0108, -0.0250, -0.0765], + [-0.0209, 0.0032, 0.0224], + [-0.0804, -0.0254, -0.0639], + [-0.0991, 0.0271, -0.0669], + [-0.0646, -0.0422, -0.0400], + [-0.0696, -0.0595, -0.0894], + [-0.0799, -0.0208, -0.0375], + [0.1166, 0.1627, 0.0962], + [0.1165, 0.0432, 0.0407], + [-0.2315, -0.1920, -0.1355], + [-0.0270, 0.0401, -0.0821], + [-0.0616, -0.0997, -0.0727], + [0.0249, -0.0469, -0.1703], + ] # From comfyui + + latent_rgb_factors_bias = [0.0259, -0.0192, -0.0761] + + weight = torch.tensor(latent_rgb_factors, device=latents.device, dtype=latents.dtype).transpose(0, 1)[:, :, None, None, None] + bias = torch.tensor(latent_rgb_factors_bias, device=latents.device, dtype=latents.dtype) + + images = torch.nn.functional.conv3d(latents, weight, bias=bias, stride=1, padding=0, dilation=1, groups=1) + images = images.clamp(0.0, 1.0) + + return images + + +@torch.no_grad() +def vae_decode(latents, vae, image_mode=False) -> torch.Tensor: + latents = latents / vae.config.scaling_factor + + if not image_mode: + image = vae.decode(latents.to(device=vae.device, dtype=vae.dtype)).sample + else: + latents = latents.to(device=vae.device, dtype=vae.dtype).unbind(2) + image = [vae.decode(l.unsqueeze(2)).sample for l in latents] + image = torch.cat(image, dim=2) + + return image + + +@torch.no_grad() +def vae_encode(image, vae: AutoencoderKLCausal3D) -> torch.Tensor: + latents = vae.encode(image.to(device=vae.device, dtype=vae.dtype)).latent_dist.sample() + latents = latents * vae.config.scaling_factor + return latents diff --git a/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/frame_pack/hunyuan_video_packed.py b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/frame_pack/hunyuan_video_packed.py new file mode 100644 index 0000000000000000000000000000000000000000..c8a7aba53aceba6ed09d5cb13ad0da8e2e0e8a4f --- /dev/null +++ b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/frame_pack/hunyuan_video_packed.py @@ -0,0 +1,2044 @@ +# original code: https://github.com/lllyasviel/FramePack +# original license: Apache-2.0 + +import glob +import math +import numbers +import os +from types import SimpleNamespace +from typing import Any, Dict, List, Optional, Tuple, Union + +import torch +import einops +import torch.nn as nn +import torch.nn.functional as F +import numpy as np + +from musubi_tuner.modules.custom_offloading_utils import ModelOffloader +from musubi_tuner.utils.safetensors_utils import load_split_weights +from musubi_tuner.modules.fp8_optimization_utils import apply_fp8_monkey_patch, optimize_state_dict_with_fp8 +from accelerate import init_empty_weights + +try: + # raise NotImplementedError + from xformers.ops import memory_efficient_attention as xformers_attn_func + + print("Xformers is installed!") +except: + print("Xformers is not installed!") + xformers_attn_func = None + +try: + # raise NotImplementedError + from flash_attn import flash_attn_varlen_func, flash_attn_func + + print("Flash Attn is installed!") +except: + print("Flash Attn is not installed!") + flash_attn_varlen_func = None + flash_attn_func = None + +try: + # raise NotImplementedError + from sageattention import sageattn_varlen, sageattn + + print("Sage Attn is installed!") +except: + print("Sage Attn is not installed!") + sageattn_varlen = None + sageattn = None + + +import logging + +logger = logging.getLogger(__name__) +logging.basicConfig(level=logging.INFO) + +# region diffusers + +# copied from diffusers with some modifications to minimize dependencies +# original code: https://github.com/huggingface/diffusers/ +# original license: Apache-2.0 + +ACT2CLS = { + "swish": nn.SiLU, + "silu": nn.SiLU, + "mish": nn.Mish, + "gelu": nn.GELU, + "relu": nn.ReLU, +} + + +def get_activation(act_fn: str) -> nn.Module: + """Helper function to get activation function from string. + + Args: + act_fn (str): Name of activation function. + + Returns: + nn.Module: Activation function. + """ + + act_fn = act_fn.lower() + if act_fn in ACT2CLS: + return ACT2CLS[act_fn]() + else: + raise ValueError(f"activation function {act_fn} not found in ACT2FN mapping {list(ACT2CLS.keys())}") + + +def get_timestep_embedding( + timesteps: torch.Tensor, + embedding_dim: int, + flip_sin_to_cos: bool = False, + downscale_freq_shift: float = 1, + scale: float = 1, + max_period: int = 10000, +): + """ + This matches the implementation in Denoising Diffusion Probabilistic Models: Create sinusoidal timestep embeddings. + + Args + timesteps (torch.Tensor): + a 1-D Tensor of N indices, one per batch element. These may be fractional. + embedding_dim (int): + the dimension of the output. + flip_sin_to_cos (bool): + Whether the embedding order should be `cos, sin` (if True) or `sin, cos` (if False) + downscale_freq_shift (float): + Controls the delta between frequencies between dimensions + scale (float): + Scaling factor applied to the embeddings. + max_period (int): + Controls the maximum frequency of the embeddings + Returns + torch.Tensor: an [N x dim] Tensor of positional embeddings. + """ + assert len(timesteps.shape) == 1, "Timesteps should be a 1d-array" + + half_dim = embedding_dim // 2 + exponent = -math.log(max_period) * torch.arange(start=0, end=half_dim, dtype=torch.float32, device=timesteps.device) + exponent = exponent / (half_dim - downscale_freq_shift) + + emb = torch.exp(exponent) + emb = timesteps[:, None].float() * emb[None, :] + + # scale embeddings + emb = scale * emb + + # concat sine and cosine embeddings + emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=-1) + + # flip sine and cosine embeddings + if flip_sin_to_cos: + emb = torch.cat([emb[:, half_dim:], emb[:, :half_dim]], dim=-1) + + # zero pad + if embedding_dim % 2 == 1: + emb = torch.nn.functional.pad(emb, (0, 1, 0, 0)) + return emb + + +class TimestepEmbedding(nn.Module): + def __init__( + self, + in_channels: int, + time_embed_dim: int, + act_fn: str = "silu", + out_dim: int = None, + post_act_fn: Optional[str] = None, + cond_proj_dim=None, + sample_proj_bias=True, + ): + super().__init__() + + self.linear_1 = nn.Linear(in_channels, time_embed_dim, sample_proj_bias) + + if cond_proj_dim is not None: + self.cond_proj = nn.Linear(cond_proj_dim, in_channels, bias=False) + else: + self.cond_proj = None + + self.act = get_activation(act_fn) + + if out_dim is not None: + time_embed_dim_out = out_dim + else: + time_embed_dim_out = time_embed_dim + self.linear_2 = nn.Linear(time_embed_dim, time_embed_dim_out, sample_proj_bias) + + if post_act_fn is None: + self.post_act = None + else: + self.post_act = get_activation(post_act_fn) + + def forward(self, sample, condition=None): + if condition is not None: + sample = sample + self.cond_proj(condition) + sample = self.linear_1(sample) + + if self.act is not None: + sample = self.act(sample) + + sample = self.linear_2(sample) + + if self.post_act is not None: + sample = self.post_act(sample) + return sample + + +class Timesteps(nn.Module): + def __init__(self, num_channels: int, flip_sin_to_cos: bool, downscale_freq_shift: float, scale: int = 1): + super().__init__() + self.num_channels = num_channels + self.flip_sin_to_cos = flip_sin_to_cos + self.downscale_freq_shift = downscale_freq_shift + self.scale = scale + + def forward(self, timesteps): + t_emb = get_timestep_embedding( + timesteps, + self.num_channels, + flip_sin_to_cos=self.flip_sin_to_cos, + downscale_freq_shift=self.downscale_freq_shift, + scale=self.scale, + ) + return t_emb + + +class FP32SiLU(nn.Module): + r""" + SiLU activation function with input upcasted to torch.float32. + """ + + def __init__(self): + super().__init__() + + def forward(self, inputs: torch.Tensor) -> torch.Tensor: + return F.silu(inputs.float(), inplace=False).to(inputs.dtype) + + +class GELU(nn.Module): + r""" + GELU activation function with tanh approximation support with `approximate="tanh"`. + + Parameters: + dim_in (`int`): The number of channels in the input. + dim_out (`int`): The number of channels in the output. + approximate (`str`, *optional*, defaults to `"none"`): If `"tanh"`, use tanh approximation. + bias (`bool`, defaults to True): Whether to use a bias in the linear layer. + """ + + def __init__(self, dim_in: int, dim_out: int, approximate: str = "none", bias: bool = True): + super().__init__() + self.proj = nn.Linear(dim_in, dim_out, bias=bias) + self.approximate = approximate + + def gelu(self, gate: torch.Tensor) -> torch.Tensor: + # if gate.device.type == "mps" and is_torch_version("<", "2.0.0"): + # # fp16 gelu not supported on mps before torch 2.0 + # return F.gelu(gate.to(dtype=torch.float32), approximate=self.approximate).to(dtype=gate.dtype) + return F.gelu(gate, approximate=self.approximate) + + def forward(self, hidden_states): + hidden_states = self.proj(hidden_states) + hidden_states = self.gelu(hidden_states) + return hidden_states + + +class PixArtAlphaTextProjection(nn.Module): + """ + Projects caption embeddings. Also handles dropout for classifier-free guidance. + + Adapted from https://github.com/PixArt-alpha/PixArt-alpha/blob/master/diffusion/model/nets/PixArt_blocks.py + """ + + def __init__(self, in_features, hidden_size, out_features=None, act_fn="gelu_tanh"): + super().__init__() + if out_features is None: + out_features = hidden_size + self.linear_1 = nn.Linear(in_features=in_features, out_features=hidden_size, bias=True) + if act_fn == "gelu_tanh": + self.act_1 = nn.GELU(approximate="tanh") + elif act_fn == "silu": + self.act_1 = nn.SiLU() + elif act_fn == "silu_fp32": + self.act_1 = FP32SiLU() + else: + raise ValueError(f"Unknown activation function: {act_fn}") + self.linear_2 = nn.Linear(in_features=hidden_size, out_features=out_features, bias=True) + + def forward(self, caption): + hidden_states = self.linear_1(caption) + hidden_states = self.act_1(hidden_states) + hidden_states = self.linear_2(hidden_states) + return hidden_states + + +class LayerNormFramePack(nn.LayerNorm): + # casting to dtype of input tensor is added + def forward(self, x: torch.Tensor) -> torch.Tensor: + return torch.nn.functional.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps).to(x) + + +class FP32LayerNormFramePack(nn.LayerNorm): + def forward(self, x: torch.Tensor) -> torch.Tensor: + origin_dtype = x.dtype + return torch.nn.functional.layer_norm( + x.float(), + self.normalized_shape, + self.weight.float() if self.weight is not None else None, + self.bias.float() if self.bias is not None else None, + self.eps, + ).to(origin_dtype) + + +class RMSNormFramePack(nn.Module): + r""" + RMS Norm as introduced in https://arxiv.org/abs/1910.07467 by Zhang et al. + + Args: + dim (`int`): Number of dimensions to use for `weights`. Only effective when `elementwise_affine` is True. + eps (`float`): Small value to use when calculating the reciprocal of the square-root. + elementwise_affine (`bool`, defaults to `True`): + Boolean flag to denote if affine transformation should be applied. + bias (`bool`, defaults to False): If also training the `bias` param. + """ + + def __init__(self, dim, eps: float, elementwise_affine: bool = True, bias: bool = False): + super().__init__() + + self.eps = eps + self.elementwise_affine = elementwise_affine + + if isinstance(dim, numbers.Integral): + dim = (dim,) + + self.dim = torch.Size(dim) + + self.weight = None + self.bias = None + + if elementwise_affine: + self.weight = nn.Parameter(torch.ones(dim)) + if bias: + self.bias = nn.Parameter(torch.zeros(dim)) + + def forward(self, hidden_states): + input_dtype = hidden_states.dtype + variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True) + hidden_states = hidden_states * torch.rsqrt(variance + self.eps) + + if self.weight is None: + return hidden_states.to(input_dtype) + + return hidden_states.to(input_dtype) * self.weight.to(input_dtype) + + +class AdaLayerNormContinuousFramePack(nn.Module): + r""" + Adaptive normalization layer with a norm layer (layer_norm or rms_norm). + + Args: + embedding_dim (`int`): Embedding dimension to use during projection. + conditioning_embedding_dim (`int`): Dimension of the input condition. + elementwise_affine (`bool`, defaults to `True`): + Boolean flag to denote if affine transformation should be applied. + eps (`float`, defaults to 1e-5): Epsilon factor. + bias (`bias`, defaults to `True`): Boolean flag to denote if bias should be use. + norm_type (`str`, defaults to `"layer_norm"`): + Normalization layer to use. Values supported: "layer_norm", "rms_norm". + """ + + def __init__( + self, + embedding_dim: int, + conditioning_embedding_dim: int, + # NOTE: It is a bit weird that the norm layer can be configured to have scale and shift parameters + # because the output is immediately scaled and shifted by the projected conditioning embeddings. + # Note that AdaLayerNorm does not let the norm layer have scale and shift parameters. + # However, this is how it was implemented in the original code, and it's rather likely you should + # set `elementwise_affine` to False. + elementwise_affine=True, + eps=1e-5, + bias=True, + norm_type="layer_norm", + ): + super().__init__() + self.silu = nn.SiLU() + self.linear = nn.Linear(conditioning_embedding_dim, embedding_dim * 2, bias=bias) + if norm_type == "layer_norm": + self.norm = LayerNormFramePack(embedding_dim, eps, elementwise_affine, bias) + elif norm_type == "rms_norm": + self.norm = RMSNormFramePack(embedding_dim, eps, elementwise_affine) + else: + raise ValueError(f"unknown norm_type {norm_type}") + + def forward(self, x, conditioning_embedding): + emb = self.linear(self.silu(conditioning_embedding)) + scale, shift = emb.chunk(2, dim=1) + x = self.norm(x) * (1 + scale)[:, None, :] + shift[:, None, :] + return x + + +class LinearActivation(nn.Module): + def __init__(self, dim_in: int, dim_out: int, bias: bool = True, activation: str = "silu"): + super().__init__() + + self.proj = nn.Linear(dim_in, dim_out, bias=bias) + self.activation = get_activation(activation) + + def forward(self, hidden_states): + hidden_states = self.proj(hidden_states) + return self.activation(hidden_states) + + +class FeedForward(nn.Module): + r""" + A feed-forward layer. + + Parameters: + dim (`int`): The number of channels in the input. + dim_out (`int`, *optional*): The number of channels in the output. If not given, defaults to `dim`. + mult (`int`, *optional*, defaults to 4): The multiplier to use for the hidden dimension. + dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use. + activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to be used in feed-forward. + final_dropout (`bool` *optional*, defaults to False): Apply a final dropout. + bias (`bool`, defaults to True): Whether to use a bias in the linear layer. + """ + + def __init__( + self, + dim: int, + dim_out: Optional[int] = None, + mult: int = 4, + dropout: float = 0.0, + activation_fn: str = "geglu", + final_dropout: bool = False, + inner_dim=None, + bias: bool = True, + ): + super().__init__() + if inner_dim is None: + inner_dim = int(dim * mult) + dim_out = dim_out if dim_out is not None else dim + + # if activation_fn == "gelu": + # act_fn = GELU(dim, inner_dim, bias=bias) + if activation_fn == "gelu-approximate": + act_fn = GELU(dim, inner_dim, approximate="tanh", bias=bias) + # elif activation_fn == "geglu": + # act_fn = GEGLU(dim, inner_dim, bias=bias) + # elif activation_fn == "geglu-approximate": + # act_fn = ApproximateGELU(dim, inner_dim, bias=bias) + # elif activation_fn == "swiglu": + # act_fn = SwiGLU(dim, inner_dim, bias=bias) + elif activation_fn == "linear-silu": + act_fn = LinearActivation(dim, inner_dim, bias=bias, activation="silu") + else: + raise ValueError(f"Unknown activation function: {activation_fn}") + + self.net = nn.ModuleList([]) + # project in + self.net.append(act_fn) + # project dropout + self.net.append(nn.Dropout(dropout)) + # project out + self.net.append(nn.Linear(inner_dim, dim_out, bias=bias)) + # FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout + if final_dropout: + self.net.append(nn.Dropout(dropout)) + + def forward(self, hidden_states: torch.Tensor, *args, **kwargs) -> torch.Tensor: + if len(args) > 0 or kwargs.get("scale", None) is not None: + # deprecation_message = "The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`." + # deprecate("scale", "1.0.0", deprecation_message) + raise ValueError("scale is not supported in this version. Please remove it.") + for module in self.net: + hidden_states = module(hidden_states) + return hidden_states + + +# @maybe_allow_in_graph +class Attention(nn.Module): + r""" + Minimal copy of Attention class from diffusers. + """ + + def __init__( + self, + query_dim: int, + cross_attention_dim: Optional[int] = None, + heads: int = 8, + dim_head: int = 64, + bias: bool = False, + qk_norm: Optional[str] = None, + added_kv_proj_dim: Optional[int] = None, + eps: float = 1e-5, + processor: Optional[any] = None, + out_dim: int = None, + context_pre_only=None, + pre_only=False, + ): + super().__init__() + self.inner_dim = out_dim if out_dim is not None else dim_head * heads + self.inner_kv_dim = self.inner_dim # if kv_heads is None else dim_head * kv_heads + self.query_dim = query_dim + self.use_bias = bias + self.cross_attention_dim = cross_attention_dim if cross_attention_dim is not None else query_dim + self.out_dim = out_dim if out_dim is not None else query_dim + self.out_context_dim = query_dim + self.context_pre_only = context_pre_only + self.pre_only = pre_only + + self.scale = dim_head**-0.5 + self.heads = out_dim // dim_head if out_dim is not None else heads + + self.added_kv_proj_dim = added_kv_proj_dim + + if qk_norm is None: + self.norm_q = None + self.norm_k = None + elif qk_norm == "rms_norm": + self.norm_q = RMSNormFramePack(dim_head, eps=eps) + self.norm_k = RMSNormFramePack(dim_head, eps=eps) + else: + raise ValueError( + f"unknown qk_norm: {qk_norm}. Should be one of None, 'layer_norm', 'fp32_layer_norm', 'layer_norm_across_heads', 'rms_norm', 'rms_norm_across_heads', 'l2'." + ) + + self.to_q = nn.Linear(query_dim, self.inner_dim, bias=bias) + self.to_k = nn.Linear(self.cross_attention_dim, self.inner_kv_dim, bias=bias) + self.to_v = nn.Linear(self.cross_attention_dim, self.inner_kv_dim, bias=bias) + + self.added_proj_bias = True # added_proj_bias + if self.added_kv_proj_dim is not None: + self.add_k_proj = nn.Linear(added_kv_proj_dim, self.inner_kv_dim, bias=True) + self.add_v_proj = nn.Linear(added_kv_proj_dim, self.inner_kv_dim, bias=True) + if self.context_pre_only is not None: + self.add_q_proj = nn.Linear(added_kv_proj_dim, self.inner_dim, bias=True) + else: + self.add_q_proj = None + self.add_k_proj = None + self.add_v_proj = None + + if not self.pre_only: + self.to_out = nn.ModuleList([]) + self.to_out.append(nn.Linear(self.inner_dim, self.out_dim, bias=True)) + # self.to_out.append(nn.Dropout(dropout)) + self.to_out.append(nn.Identity()) # dropout=0.0 + else: + self.to_out = None + + if self.context_pre_only is not None and not self.context_pre_only: + self.to_add_out = nn.Linear(self.inner_dim, self.out_context_dim, bias=True) + else: + self.to_add_out = None + + if qk_norm is not None and added_kv_proj_dim is not None: + if qk_norm == "rms_norm": + self.norm_added_q = RMSNormFramePack(dim_head, eps=eps) + self.norm_added_k = RMSNormFramePack(dim_head, eps=eps) + else: + raise ValueError(f"unknown qk_norm: {qk_norm}. Should be one of `None,'layer_norm','fp32_layer_norm','rms_norm'`") + else: + self.norm_added_q = None + self.norm_added_k = None + + # set attention processor + # We use the AttnProcessor2_0 by default when torch 2.x is used which uses + # torch.nn.functional.scaled_dot_product_attention for native Flash/memory_efficient_attention + if processor is None: + processor = AttnProcessor2_0() + self.set_processor(processor) + + def set_processor(self, processor: any) -> None: + self.processor = processor + + def get_processor(self) -> any: + return self.processor + + def forward( + self, + hidden_states: torch.Tensor, + encoder_hidden_states: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + **cross_attention_kwargs, + ) -> torch.Tensor: + return self.processor( + self, + hidden_states, + encoder_hidden_states=encoder_hidden_states, + attention_mask=attention_mask, + **cross_attention_kwargs, + ) + + def prepare_attention_mask( + self, attention_mask: torch.Tensor, target_length: int, batch_size: int, out_dim: int = 3 + ) -> torch.Tensor: + r""" + Prepare the attention mask for the attention computation. + + Args: + attention_mask (`torch.Tensor`): + The attention mask to prepare. + target_length (`int`): + The target length of the attention mask. This is the length of the attention mask after padding. + batch_size (`int`): + The batch size, which is used to repeat the attention mask. + out_dim (`int`, *optional*, defaults to `3`): + The output dimension of the attention mask. Can be either `3` or `4`. + + Returns: + `torch.Tensor`: The prepared attention mask. + """ + head_size = self.heads + if attention_mask is None: + return attention_mask + + current_length: int = attention_mask.shape[-1] + if current_length != target_length: + if attention_mask.device.type == "mps": + # HACK: MPS: Does not support padding by greater than dimension of input tensor. + # Instead, we can manually construct the padding tensor. + padding_shape = (attention_mask.shape[0], attention_mask.shape[1], target_length) + padding = torch.zeros(padding_shape, dtype=attention_mask.dtype, device=attention_mask.device) + attention_mask = torch.cat([attention_mask, padding], dim=2) + else: + # TODO: for pipelines such as stable-diffusion, padding cross-attn mask: + # we want to instead pad by (0, remaining_length), where remaining_length is: + # remaining_length: int = target_length - current_length + # TODO: re-enable tests/models/test_models_unet_2d_condition.py#test_model_xattn_padding + attention_mask = F.pad(attention_mask, (0, target_length), value=0.0) + + if out_dim == 3: + if attention_mask.shape[0] < batch_size * head_size: + attention_mask = attention_mask.repeat_interleave(head_size, dim=0, output_size=attention_mask.shape[0] * head_size) + elif out_dim == 4: + attention_mask = attention_mask.unsqueeze(1) + attention_mask = attention_mask.repeat_interleave(head_size, dim=1, output_size=attention_mask.shape[1] * head_size) + + return attention_mask + + +class AttnProcessor2_0: + r""" + Processor for implementing scaled dot-product attention (enabled by default if you're using PyTorch 2.0). + """ + + def __init__(self): + if not hasattr(F, "scaled_dot_product_attention"): + raise ImportError("AttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.") + + def __call__( + self, + attn: Attention, + hidden_states: torch.Tensor, + encoder_hidden_states: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + temb: Optional[torch.Tensor] = None, + *args, + **kwargs, + ) -> torch.Tensor: + input_ndim = hidden_states.ndim + + if input_ndim == 4: + batch_size, channel, height, width = hidden_states.shape + hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2) + + batch_size, sequence_length, _ = hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape + + if attention_mask is not None: + attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) + # scaled_dot_product_attention expects attention_mask shape to be + # (batch, heads, source_length, target_length) + attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1]) + + query = attn.to_q(hidden_states) + query_dtype = query.dtype # store dtype before potentially deleting query + + if encoder_hidden_states is None: + encoder_hidden_states = hidden_states + + key = attn.to_k(encoder_hidden_states) + value = attn.to_v(encoder_hidden_states) + + inner_dim = key.shape[-1] + head_dim = inner_dim // attn.heads + + query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) + key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) + value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) + + if attn.norm_q is not None: + query = attn.norm_q(query) + if attn.norm_k is not None: + key = attn.norm_k(key) + + # the output of sdp = (batch, num_heads, seq_len, head_dim) + hidden_states = F.scaled_dot_product_attention(query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False) + del query, key, value, attention_mask # free memory + + hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) + hidden_states = hidden_states.to(query_dtype) # use stored dtype + + # linear proj + hidden_states = attn.to_out[0](hidden_states) + # dropout + hidden_states = attn.to_out[1](hidden_states) + + if input_ndim == 4: + hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width) + + return hidden_states + + +# endregion diffusers + + +def pad_for_3d_conv(x, kernel_size): + b, c, t, h, w = x.shape + pt, ph, pw = kernel_size + pad_t = (pt - (t % pt)) % pt + pad_h = (ph - (h % ph)) % ph + pad_w = (pw - (w % pw)) % pw + return torch.nn.functional.pad(x, (0, pad_w, 0, pad_h, 0, pad_t), mode="replicate") + + +def center_down_sample_3d(x, kernel_size): + # pt, ph, pw = kernel_size + # cp = (pt * ph * pw) // 2 + # xp = einops.rearrange(x, 'b c (t pt) (h ph) (w pw) -> (pt ph pw) b c t h w', pt=pt, ph=ph, pw=pw) + # xc = xp[cp] + # return xc + return torch.nn.functional.avg_pool3d(x, kernel_size, stride=kernel_size) + + +def get_cu_seqlens(text_mask, img_len): + batch_size = text_mask.shape[0] + text_len = text_mask.sum(dim=1) + max_len = text_mask.shape[1] + img_len + + cu_seqlens = torch.zeros([2 * batch_size + 1], dtype=torch.int32, device=text_mask.device) # ensure device match + + for i in range(batch_size): + s = text_len[i] + img_len + s1 = i * max_len + s + s2 = (i + 1) * max_len + cu_seqlens[2 * i + 1] = s1 + cu_seqlens[2 * i + 2] = s2 + + return cu_seqlens + + +def apply_rotary_emb_transposed(x, freqs_cis): + cos, sin = freqs_cis.unsqueeze(-2).chunk(2, dim=-1) + del freqs_cis + x_real, x_imag = x.unflatten(-1, (-1, 2)).unbind(-1) + x_rotated = torch.stack([-x_imag, x_real], dim=-1).flatten(3) + del x_real, x_imag + return (x.float() * cos + x_rotated.float() * sin).to(x.dtype) + + +def attn_varlen_func(q, k, v, cu_seqlens_q, cu_seqlens_kv, max_seqlen_q, max_seqlen_kv, attn_mode=None, split_attn=False): + if cu_seqlens_q is None and cu_seqlens_kv is None and max_seqlen_q is None and max_seqlen_kv is None: + if attn_mode == "sageattn" or attn_mode is None and sageattn is not None: + x = sageattn(q, k, v, tensor_layout="NHD") + return x + + if attn_mode == "flash" or attn_mode is None and flash_attn_func is not None: + x = flash_attn_func(q, k, v) + return x + + if attn_mode == "xformers" or attn_mode is None and xformers_attn_func is not None: + x = xformers_attn_func(q, k, v) + return x + + x = torch.nn.functional.scaled_dot_product_attention(q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2)).transpose( + 1, 2 + ) + return x + if split_attn: + if attn_mode == "sageattn" or attn_mode is None and sageattn is not None: + x = torch.empty_like(q) + for i in range(q.size(0)): + x[i : i + 1] = sageattn(q[i : i + 1], k[i : i + 1], v[i : i + 1], tensor_layout="NHD") + return x + + if attn_mode == "flash" or attn_mode is None and flash_attn_func is not None: + x = torch.empty_like(q) + for i in range(q.size(0)): + x[i : i + 1] = flash_attn_func(q[i : i + 1], k[i : i + 1], v[i : i + 1]) + return x + + if attn_mode == "xformers" or attn_mode is None and xformers_attn_func is not None: + x = torch.empty_like(q) + for i in range(q.size(0)): + x[i : i + 1] = xformers_attn_func(q[i : i + 1], k[i : i + 1], v[i : i + 1]) + return x + + q = q.transpose(1, 2) + k = k.transpose(1, 2) + v = v.transpose(1, 2) + x = torch.empty_like(q) + for i in range(q.size(0)): + x[i : i + 1] = torch.nn.functional.scaled_dot_product_attention(q[i : i + 1], k[i : i + 1], v[i : i + 1]) + x = x.transpose(1, 2) + return x + + batch_size = q.shape[0] + q = q.view(q.shape[0] * q.shape[1], *q.shape[2:]) + k = k.view(k.shape[0] * k.shape[1], *k.shape[2:]) + v = v.view(v.shape[0] * v.shape[1], *v.shape[2:]) + if attn_mode == "sageattn" or attn_mode is None and sageattn_varlen is not None: + x = sageattn_varlen(q, k, v, cu_seqlens_q, cu_seqlens_kv, max_seqlen_q, max_seqlen_kv) + del q, k, v # free memory + elif attn_mode == "flash" or attn_mode is None and flash_attn_varlen_func is not None: + x = flash_attn_varlen_func(q, k, v, cu_seqlens_q, cu_seqlens_kv, max_seqlen_q, max_seqlen_kv) + del q, k, v # free memory + else: + raise NotImplementedError("No Attn Installed or batch_size > 1 is not supported in this configuration. Try `--split_attn`.") + x = x.view(batch_size, max_seqlen_q, *x.shape[2:]) + return x + + +class HunyuanAttnProcessorFlashAttnDouble: + def __call__( + self, + attn: Attention, + hidden_states, + encoder_hidden_states, + attention_mask, + image_rotary_emb, + attn_mode: Optional[str] = None, + split_attn: Optional[bool] = False, + ): + cu_seqlens_q, cu_seqlens_kv, max_seqlen_q, max_seqlen_kv = attention_mask + + # Project image latents + query = attn.to_q(hidden_states) + key = attn.to_k(hidden_states) + value = attn.to_v(hidden_states) + del hidden_states # free memory + + query = query.unflatten(2, (attn.heads, -1)) + key = key.unflatten(2, (attn.heads, -1)) + value = value.unflatten(2, (attn.heads, -1)) + + query = attn.norm_q(query) + key = attn.norm_k(key) + + query = apply_rotary_emb_transposed(query, image_rotary_emb) + key = apply_rotary_emb_transposed(key, image_rotary_emb) + del image_rotary_emb # free memory + + # Project context (text/encoder) embeddings + encoder_query = attn.add_q_proj(encoder_hidden_states) + encoder_key = attn.add_k_proj(encoder_hidden_states) + encoder_value = attn.add_v_proj(encoder_hidden_states) + txt_length = encoder_hidden_states.shape[1] # store length before deleting + del encoder_hidden_states # free memory + + encoder_query = encoder_query.unflatten(2, (attn.heads, -1)) + encoder_key = encoder_key.unflatten(2, (attn.heads, -1)) + encoder_value = encoder_value.unflatten(2, (attn.heads, -1)) + + encoder_query = attn.norm_added_q(encoder_query) + encoder_key = attn.norm_added_k(encoder_key) + + # Concatenate image and context q, k, v + query = torch.cat([query, encoder_query], dim=1) + key = torch.cat([key, encoder_key], dim=1) + value = torch.cat([value, encoder_value], dim=1) + del encoder_query, encoder_key, encoder_value # free memory + + hidden_states_attn = attn_varlen_func( + query, key, value, cu_seqlens_q, cu_seqlens_kv, max_seqlen_q, max_seqlen_kv, attn_mode=attn_mode, split_attn=split_attn + ) + del query, key, value # free memory + hidden_states_attn = hidden_states_attn.flatten(-2) + + hidden_states, encoder_hidden_states = hidden_states_attn[:, :-txt_length], hidden_states_attn[:, -txt_length:] + del hidden_states_attn # free memory + + # Apply output projections + hidden_states = attn.to_out[0](hidden_states) + hidden_states = attn.to_out[1](hidden_states) # Dropout/Identity + encoder_hidden_states = attn.to_add_out(encoder_hidden_states) + + return hidden_states, encoder_hidden_states + + +class HunyuanAttnProcessorFlashAttnSingle: + def __call__( + self, + attn: Attention, + hidden_states, + encoder_hidden_states, + attention_mask, + image_rotary_emb, + attn_mode: Optional[str] = None, + split_attn: Optional[bool] = False, + ): + cu_seqlens_q, cu_seqlens_kv, max_seqlen_q, max_seqlen_kv = attention_mask + txt_length = encoder_hidden_states.shape[1] # Store text length + + # Concatenate image and context inputs + hidden_states_cat = torch.cat([hidden_states, encoder_hidden_states], dim=1) + del hidden_states, encoder_hidden_states # free memory + + # Project concatenated inputs + query = attn.to_q(hidden_states_cat) + key = attn.to_k(hidden_states_cat) + value = attn.to_v(hidden_states_cat) + del hidden_states_cat # free memory + + query = query.unflatten(2, (attn.heads, -1)) + key = key.unflatten(2, (attn.heads, -1)) + value = value.unflatten(2, (attn.heads, -1)) + + query = attn.norm_q(query) + key = attn.norm_k(key) + + query = torch.cat([apply_rotary_emb_transposed(query[:, :-txt_length], image_rotary_emb), query[:, -txt_length:]], dim=1) + key = torch.cat([apply_rotary_emb_transposed(key[:, :-txt_length], image_rotary_emb), key[:, -txt_length:]], dim=1) + del image_rotary_emb # free memory + + hidden_states = attn_varlen_func( + query, key, value, cu_seqlens_q, cu_seqlens_kv, max_seqlen_q, max_seqlen_kv, attn_mode=attn_mode, split_attn=split_attn + ) + del query, key, value # free memory + hidden_states = hidden_states.flatten(-2) + + hidden_states, encoder_hidden_states = hidden_states[:, :-txt_length], hidden_states[:, -txt_length:] + + return hidden_states, encoder_hidden_states + + +class CombinedTimestepGuidanceTextProjEmbeddings(nn.Module): + def __init__(self, embedding_dim, pooled_projection_dim): + super().__init__() + + self.time_proj = Timesteps(num_channels=256, flip_sin_to_cos=True, downscale_freq_shift=0) + self.timestep_embedder = TimestepEmbedding(in_channels=256, time_embed_dim=embedding_dim) + self.guidance_embedder = TimestepEmbedding(in_channels=256, time_embed_dim=embedding_dim) + self.text_embedder = PixArtAlphaTextProjection(pooled_projection_dim, embedding_dim, act_fn="silu") + + def forward(self, timestep, guidance, pooled_projection): + timesteps_proj = self.time_proj(timestep) + timesteps_emb = self.timestep_embedder(timesteps_proj.to(dtype=pooled_projection.dtype)) + + guidance_proj = self.time_proj(guidance) + guidance_emb = self.guidance_embedder(guidance_proj.to(dtype=pooled_projection.dtype)) + + time_guidance_emb = timesteps_emb + guidance_emb + + pooled_projections = self.text_embedder(pooled_projection) + conditioning = time_guidance_emb + pooled_projections + + return conditioning + + +class CombinedTimestepTextProjEmbeddings(nn.Module): + def __init__(self, embedding_dim, pooled_projection_dim): + super().__init__() + + self.time_proj = Timesteps(num_channels=256, flip_sin_to_cos=True, downscale_freq_shift=0) + self.timestep_embedder = TimestepEmbedding(in_channels=256, time_embed_dim=embedding_dim) + self.text_embedder = PixArtAlphaTextProjection(pooled_projection_dim, embedding_dim, act_fn="silu") + + def forward(self, timestep, pooled_projection): + timesteps_proj = self.time_proj(timestep) + timesteps_emb = self.timestep_embedder(timesteps_proj.to(dtype=pooled_projection.dtype)) + + pooled_projections = self.text_embedder(pooled_projection) + + conditioning = timesteps_emb + pooled_projections + + return conditioning + + +class HunyuanVideoAdaNorm(nn.Module): + def __init__(self, in_features: int, out_features: Optional[int] = None) -> None: + super().__init__() + + out_features = out_features or 2 * in_features + self.linear = nn.Linear(in_features, out_features) + self.nonlinearity = nn.SiLU() + + def forward(self, temb: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: + temb = self.linear(self.nonlinearity(temb)) + gate_msa, gate_mlp = temb.chunk(2, dim=-1) + gate_msa, gate_mlp = gate_msa.unsqueeze(1), gate_mlp.unsqueeze(1) + return gate_msa, gate_mlp + + +class HunyuanVideoIndividualTokenRefinerBlock(nn.Module): + def __init__( + self, + num_attention_heads: int, + attention_head_dim: int, + mlp_width_ratio: float = 4.0, + mlp_drop_rate: float = 0.0, + attention_bias: bool = True, + ) -> None: + super().__init__() + + hidden_size = num_attention_heads * attention_head_dim + + self.norm1 = LayerNormFramePack(hidden_size, elementwise_affine=True, eps=1e-6) + self.attn = Attention( + query_dim=hidden_size, + cross_attention_dim=None, + heads=num_attention_heads, + dim_head=attention_head_dim, + bias=attention_bias, + ) + + self.norm2 = LayerNormFramePack(hidden_size, elementwise_affine=True, eps=1e-6) + self.ff = FeedForward(hidden_size, mult=mlp_width_ratio, activation_fn="linear-silu", dropout=mlp_drop_rate) + + self.norm_out = HunyuanVideoAdaNorm(hidden_size, 2 * hidden_size) + + def forward( + self, + hidden_states: torch.Tensor, + temb: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + ) -> torch.Tensor: + norm_hidden_states = self.norm1(hidden_states) + + # Self-attention + attn_output = self.attn( + hidden_states=norm_hidden_states, + encoder_hidden_states=None, + attention_mask=attention_mask, + ) + del norm_hidden_states # free memory + + gate_msa, gate_mlp = self.norm_out(temb) + hidden_states = hidden_states + attn_output * gate_msa + del attn_output, gate_msa # free memory + + ff_output = self.ff(self.norm2(hidden_states)) + hidden_states = hidden_states + ff_output * gate_mlp + del ff_output, gate_mlp # free memory + + return hidden_states + + +class HunyuanVideoIndividualTokenRefiner(nn.Module): + def __init__( + self, + num_attention_heads: int, + attention_head_dim: int, + num_layers: int, + mlp_width_ratio: float = 4.0, + mlp_drop_rate: float = 0.0, + attention_bias: bool = True, + ) -> None: + super().__init__() + + self.refiner_blocks = nn.ModuleList( + [ + HunyuanVideoIndividualTokenRefinerBlock( + num_attention_heads=num_attention_heads, + attention_head_dim=attention_head_dim, + mlp_width_ratio=mlp_width_ratio, + mlp_drop_rate=mlp_drop_rate, + attention_bias=attention_bias, + ) + for _ in range(num_layers) + ] + ) + + def forward( + self, + hidden_states: torch.Tensor, + temb: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + ) -> torch.Tensor: + self_attn_mask = None + if attention_mask is not None: + batch_size = attention_mask.shape[0] + seq_len = attention_mask.shape[1] + attention_mask = attention_mask.to(hidden_states.device).bool() + self_attn_mask_1 = attention_mask.view(batch_size, 1, 1, seq_len).repeat(1, 1, seq_len, 1) + self_attn_mask_2 = self_attn_mask_1.transpose(2, 3) + self_attn_mask = (self_attn_mask_1 & self_attn_mask_2).bool() + self_attn_mask[:, :, :, 0] = True + + for block in self.refiner_blocks: + hidden_states = block(hidden_states, temb, self_attn_mask) + + return hidden_states + + +class HunyuanVideoTokenRefiner(nn.Module): + def __init__( + self, + in_channels: int, + num_attention_heads: int, + attention_head_dim: int, + num_layers: int, + mlp_ratio: float = 4.0, + mlp_drop_rate: float = 0.0, + attention_bias: bool = True, + ) -> None: + super().__init__() + + hidden_size = num_attention_heads * attention_head_dim + + self.time_text_embed = CombinedTimestepTextProjEmbeddings(embedding_dim=hidden_size, pooled_projection_dim=in_channels) + self.proj_in = nn.Linear(in_channels, hidden_size, bias=True) + self.token_refiner = HunyuanVideoIndividualTokenRefiner( + num_attention_heads=num_attention_heads, + attention_head_dim=attention_head_dim, + num_layers=num_layers, + mlp_width_ratio=mlp_ratio, + mlp_drop_rate=mlp_drop_rate, + attention_bias=attention_bias, + ) + + def forward( + self, + hidden_states: torch.Tensor, + timestep: torch.LongTensor, + attention_mask: Optional[torch.LongTensor] = None, + ) -> torch.Tensor: + if attention_mask is None: + pooled_projections = hidden_states.mean(dim=1) + else: + original_dtype = hidden_states.dtype + mask_float = attention_mask.float().unsqueeze(-1) + pooled_projections = (hidden_states * mask_float).sum(dim=1) / mask_float.sum(dim=1) + pooled_projections = pooled_projections.to(original_dtype) + + temb = self.time_text_embed(timestep, pooled_projections) + del pooled_projections # free memory + + hidden_states = self.proj_in(hidden_states) + hidden_states = self.token_refiner(hidden_states, temb, attention_mask) + del temb, attention_mask # free memory + + return hidden_states + + +class HunyuanVideoRotaryPosEmbed(nn.Module): + def __init__(self, rope_dim, theta): + super().__init__() + self.DT, self.DY, self.DX = rope_dim + self.theta = theta + self.h_w_scaling_factor = 1.0 + + @torch.no_grad() + def get_frequency(self, dim, pos): + T, H, W = pos.shape + freqs = 1.0 / (self.theta ** (torch.arange(0, dim, 2, dtype=torch.float32, device=pos.device)[: (dim // 2)] / dim)) + freqs = torch.outer(freqs, pos.reshape(-1)).unflatten(-1, (T, H, W)).repeat_interleave(2, dim=0) + return freqs.cos(), freqs.sin() + + @torch.no_grad() + def forward_inner(self, frame_indices, height, width, device): + GT, GY, GX = torch.meshgrid( + frame_indices.to(device=device, dtype=torch.float32), + torch.arange(0, height, device=device, dtype=torch.float32) * self.h_w_scaling_factor, + torch.arange(0, width, device=device, dtype=torch.float32) * self.h_w_scaling_factor, + indexing="ij", + ) + + FCT, FST = self.get_frequency(self.DT, GT) + del GT # free memory + FCY, FSY = self.get_frequency(self.DY, GY) + del GY # free memory + FCX, FSX = self.get_frequency(self.DX, GX) + del GX # free memory + + result = torch.cat([FCT, FCY, FCX, FST, FSY, FSX], dim=0) + del FCT, FCY, FCX, FST, FSY, FSX # free memory + + # Return result already on the correct device + return result # Shape (2 * total_dim / 2, T, H, W) -> (total_dim, T, H, W) + + @torch.no_grad() + def forward(self, frame_indices, height, width, device): + frame_indices = frame_indices.unbind(0) + results = [self.forward_inner(f, height, width, device) for f in frame_indices] + results = torch.stack(results, dim=0) + return results + + +class AdaLayerNormZero(nn.Module): + def __init__(self, embedding_dim: int, norm_type="layer_norm", bias=True): + super().__init__() + self.silu = nn.SiLU() + self.linear = nn.Linear(embedding_dim, 6 * embedding_dim, bias=bias) + if norm_type == "layer_norm": + self.norm = LayerNormFramePack(embedding_dim, elementwise_affine=False, eps=1e-6) + else: + raise ValueError(f"unknown norm_type {norm_type}") + + def forward( + self, x: torch.Tensor, emb: Optional[torch.Tensor] = None + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: + emb = emb.unsqueeze(-2) + emb = self.linear(self.silu(emb)) + shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = emb.chunk(6, dim=-1) + x = self.norm(x) * (1 + scale_msa) + shift_msa + return x, gate_msa, shift_mlp, scale_mlp, gate_mlp + + +class AdaLayerNormZeroSingle(nn.Module): + def __init__(self, embedding_dim: int, norm_type="layer_norm", bias=True): + super().__init__() + + self.silu = nn.SiLU() + self.linear = nn.Linear(embedding_dim, 3 * embedding_dim, bias=bias) + if norm_type == "layer_norm": + self.norm = LayerNormFramePack(embedding_dim, elementwise_affine=False, eps=1e-6) + else: + raise ValueError(f"unknown norm_type {norm_type}") + + def forward( + self, + x: torch.Tensor, + emb: Optional[torch.Tensor] = None, + ) -> Tuple[torch.Tensor, torch.Tensor]: + emb = emb.unsqueeze(-2) + emb = self.linear(self.silu(emb)) + shift_msa, scale_msa, gate_msa = emb.chunk(3, dim=-1) + x = self.norm(x) * (1 + scale_msa) + shift_msa + return x, gate_msa + + +class AdaLayerNormContinuous(nn.Module): + def __init__( + self, + embedding_dim: int, + conditioning_embedding_dim: int, + elementwise_affine=True, + eps=1e-5, + bias=True, + norm_type="layer_norm", + ): + super().__init__() + self.silu = nn.SiLU() + self.linear = nn.Linear(conditioning_embedding_dim, embedding_dim * 2, bias=bias) + if norm_type == "layer_norm": + self.norm = LayerNormFramePack(embedding_dim, eps, elementwise_affine, bias) + else: + raise ValueError(f"unknown norm_type {norm_type}") + + def forward(self, x: torch.Tensor, emb: torch.Tensor) -> torch.Tensor: + emb = emb.unsqueeze(-2) + emb = self.linear(self.silu(emb)) + scale, shift = emb.chunk(2, dim=-1) + del emb # free memory + x = self.norm(x) * (1 + scale) + shift + return x + + +class HunyuanVideoSingleTransformerBlock(nn.Module): + def __init__( + self, + num_attention_heads: int, + attention_head_dim: int, + mlp_ratio: float = 4.0, + qk_norm: str = "rms_norm", + attn_mode: Optional[str] = None, + split_attn: Optional[bool] = False, + ) -> None: + super().__init__() + + hidden_size = num_attention_heads * attention_head_dim + mlp_dim = int(hidden_size * mlp_ratio) + self.attn_mode = attn_mode + self.split_attn = split_attn + + # Attention layer (pre_only=True means no output projection in Attention module itself) + self.attn = Attention( + query_dim=hidden_size, + cross_attention_dim=None, + dim_head=attention_head_dim, + heads=num_attention_heads, + out_dim=hidden_size, + bias=True, + processor=HunyuanAttnProcessorFlashAttnSingle(), + qk_norm=qk_norm, + eps=1e-6, + pre_only=True, # Crucial: Attn processor will return raw attention output + ) + + self.norm = AdaLayerNormZeroSingle(hidden_size, norm_type="layer_norm") + self.proj_mlp = nn.Linear(hidden_size, mlp_dim) + self.act_mlp = nn.GELU(approximate="tanh") + self.proj_out = nn.Linear(hidden_size + mlp_dim, hidden_size) + + def forward( + self, + hidden_states: torch.Tensor, + encoder_hidden_states: torch.Tensor, + temb: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + image_rotary_emb: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, + ) -> torch.Tensor: + text_seq_length = encoder_hidden_states.shape[1] + hidden_states = torch.cat([hidden_states, encoder_hidden_states], dim=1) + del encoder_hidden_states # free memory + + residual = hidden_states + + # 1. Input normalization + norm_hidden_states, gate = self.norm(hidden_states, emb=temb) + mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states)) + + norm_hidden_states, norm_encoder_hidden_states = ( + norm_hidden_states[:, :-text_seq_length, :], + norm_hidden_states[:, -text_seq_length:, :], + ) + + # 2. Attention + attn_output, context_attn_output = self.attn( + hidden_states=norm_hidden_states, + encoder_hidden_states=norm_encoder_hidden_states, + attention_mask=attention_mask, + image_rotary_emb=image_rotary_emb, + attn_mode=self.attn_mode, + split_attn=self.split_attn, + ) + attn_output = torch.cat([attn_output, context_attn_output], dim=1) + del norm_hidden_states, norm_encoder_hidden_states, context_attn_output # free memory + del image_rotary_emb + + # 3. Modulation and residual connection + hidden_states = torch.cat([attn_output, mlp_hidden_states], dim=2) + del attn_output, mlp_hidden_states # free memory + hidden_states = gate * self.proj_out(hidden_states) + hidden_states = hidden_states + residual + + hidden_states, encoder_hidden_states = ( + hidden_states[:, :-text_seq_length, :], + hidden_states[:, -text_seq_length:, :], + ) + return hidden_states, encoder_hidden_states + + +class HunyuanVideoTransformerBlock(nn.Module): + def __init__( + self, + num_attention_heads: int, + attention_head_dim: int, + mlp_ratio: float, + qk_norm: str = "rms_norm", + attn_mode: Optional[str] = None, + split_attn: Optional[bool] = False, + ) -> None: + super().__init__() + + hidden_size = num_attention_heads * attention_head_dim + self.attn_mode = attn_mode + self.split_attn = split_attn + + self.norm1 = AdaLayerNormZero(hidden_size, norm_type="layer_norm") + self.norm1_context = AdaLayerNormZero(hidden_size, norm_type="layer_norm") + + self.attn = Attention( + query_dim=hidden_size, + cross_attention_dim=None, + added_kv_proj_dim=hidden_size, + dim_head=attention_head_dim, + heads=num_attention_heads, + out_dim=hidden_size, + context_pre_only=False, + bias=True, + processor=HunyuanAttnProcessorFlashAttnDouble(), + qk_norm=qk_norm, + eps=1e-6, + ) + + self.norm2 = LayerNormFramePack(hidden_size, elementwise_affine=False, eps=1e-6) + self.ff = FeedForward(hidden_size, mult=mlp_ratio, activation_fn="gelu-approximate") + + self.norm2_context = LayerNormFramePack(hidden_size, elementwise_affine=False, eps=1e-6) + self.ff_context = FeedForward(hidden_size, mult=mlp_ratio, activation_fn="gelu-approximate") + + def forward( + self, + hidden_states: torch.Tensor, + encoder_hidden_states: torch.Tensor, + temb: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + freqs_cis: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, + ) -> Tuple[torch.Tensor, torch.Tensor]: + # 1. Input normalization + norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb) + norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context( + encoder_hidden_states, emb=temb + ) + + # 2. Joint attention + attn_output, context_attn_output = self.attn( + hidden_states=norm_hidden_states, + encoder_hidden_states=norm_encoder_hidden_states, + attention_mask=attention_mask, + image_rotary_emb=freqs_cis, + attn_mode=self.attn_mode, + split_attn=self.split_attn, + ) + del norm_hidden_states, norm_encoder_hidden_states, freqs_cis # free memory + + # 3. Modulation and residual connection + hidden_states = hidden_states + attn_output * gate_msa + del attn_output, gate_msa # free memory + encoder_hidden_states = encoder_hidden_states + context_attn_output * c_gate_msa + del context_attn_output, c_gate_msa # free memory + + norm_hidden_states = self.norm2(hidden_states) + norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states) + + norm_hidden_states = norm_hidden_states * (1 + scale_mlp) + shift_mlp + del shift_mlp, scale_mlp # free memory + norm_encoder_hidden_states = norm_encoder_hidden_states * (1 + c_scale_mlp) + c_shift_mlp + del c_shift_mlp, c_scale_mlp # free memory + + # 4. Feed-forward + ff_output = self.ff(norm_hidden_states) + del norm_hidden_states # free memory + context_ff_output = self.ff_context(norm_encoder_hidden_states) + del norm_encoder_hidden_states # free memory + + hidden_states = hidden_states + gate_mlp * ff_output + del ff_output, gate_mlp # free memory + encoder_hidden_states = encoder_hidden_states + c_gate_mlp * context_ff_output + del context_ff_output, c_gate_mlp # free memory + + return hidden_states, encoder_hidden_states + + +class ClipVisionProjection(nn.Module): + def __init__(self, in_channels, out_channels): + super().__init__() + self.up = nn.Linear(in_channels, out_channels * 3) + self.down = nn.Linear(out_channels * 3, out_channels) + + def forward(self, x): + projected_x = self.down(nn.functional.silu(self.up(x))) + return projected_x + + +class HunyuanVideoPatchEmbed(nn.Module): + def __init__(self, patch_size, in_chans, embed_dim): + super().__init__() + self.proj = nn.Conv3d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) + + +class HunyuanVideoPatchEmbedForCleanLatents(nn.Module): + def __init__(self, inner_dim): + super().__init__() + self.proj = nn.Conv3d(16, inner_dim, kernel_size=(1, 2, 2), stride=(1, 2, 2)) + self.proj_2x = nn.Conv3d(16, inner_dim, kernel_size=(2, 4, 4), stride=(2, 4, 4)) + self.proj_4x = nn.Conv3d(16, inner_dim, kernel_size=(4, 8, 8), stride=(4, 8, 8)) + + @torch.no_grad() + def initialize_weight_from_another_conv3d(self, another_layer): + weight = another_layer.weight.detach().clone() + bias = another_layer.bias.detach().clone() + + sd = { + "proj.weight": weight.clone(), + "proj.bias": bias.clone(), + "proj_2x.weight": einops.repeat(weight, "b c t h w -> b c (t tk) (h hk) (w wk)", tk=2, hk=2, wk=2) / 8.0, + "proj_2x.bias": bias.clone(), + "proj_4x.weight": einops.repeat(weight, "b c t h w -> b c (t tk) (h hk) (w wk)", tk=4, hk=4, wk=4) / 64.0, + "proj_4x.bias": bias.clone(), + } + + sd = {k: v.clone() for k, v in sd.items()} + + self.load_state_dict(sd) + return + + +class HunyuanVideoTransformer3DModelPacked(nn.Module): # (PreTrainedModelMixin, GenerationMixin, + # ModelMixin, ConfigMixin, PeftAdapterMixin, FromOriginalModelMixin): + # @register_to_config + def __init__( + self, + in_channels: int = 16, + out_channels: int = 16, + num_attention_heads: int = 24, + attention_head_dim: int = 128, + num_layers: int = 20, + num_single_layers: int = 40, + num_refiner_layers: int = 2, + mlp_ratio: float = 4.0, + patch_size: int = 2, + patch_size_t: int = 1, + qk_norm: str = "rms_norm", + guidance_embeds: bool = True, + text_embed_dim: int = 4096, + pooled_projection_dim: int = 768, + rope_theta: float = 256.0, + rope_axes_dim: Tuple[int] = (16, 56, 56), + has_image_proj=False, + image_proj_dim=1152, + has_clean_x_embedder=False, + attn_mode: Optional[str] = None, + split_attn: Optional[bool] = False, + ) -> None: + super().__init__() + + inner_dim = num_attention_heads * attention_head_dim + out_channels = out_channels or in_channels + self.config_patch_size = patch_size + self.config_patch_size_t = patch_size_t + + # 1. Latent and condition embedders + self.x_embedder = HunyuanVideoPatchEmbed((patch_size_t, patch_size, patch_size), in_channels, inner_dim) + self.context_embedder = HunyuanVideoTokenRefiner( + text_embed_dim, num_attention_heads, attention_head_dim, num_layers=num_refiner_layers + ) + self.time_text_embed = CombinedTimestepGuidanceTextProjEmbeddings(inner_dim, pooled_projection_dim) + + self.clean_x_embedder = None + self.image_projection = None + + # 2. RoPE + self.rope = HunyuanVideoRotaryPosEmbed(rope_axes_dim, rope_theta) + + # 3. Dual stream transformer blocks + self.transformer_blocks = nn.ModuleList( + [ + HunyuanVideoTransformerBlock( + num_attention_heads, + attention_head_dim, + mlp_ratio=mlp_ratio, + qk_norm=qk_norm, + attn_mode=attn_mode, + split_attn=split_attn, + ) + for _ in range(num_layers) + ] + ) + + # 4. Single stream transformer blocks + self.single_transformer_blocks = nn.ModuleList( + [ + HunyuanVideoSingleTransformerBlock( + num_attention_heads, + attention_head_dim, + mlp_ratio=mlp_ratio, + qk_norm=qk_norm, + attn_mode=attn_mode, + split_attn=split_attn, + ) + for _ in range(num_single_layers) + ] + ) + + # 5. Output projection + self.norm_out = AdaLayerNormContinuous(inner_dim, inner_dim, elementwise_affine=False, eps=1e-6) + self.proj_out = nn.Linear(inner_dim, patch_size_t * patch_size * patch_size * out_channels) + + self.inner_dim = inner_dim + self.use_gradient_checkpointing = False + self.enable_teacache = False + + # if has_image_proj: + # self.install_image_projection(image_proj_dim) + self.image_projection = ClipVisionProjection(in_channels=image_proj_dim, out_channels=self.inner_dim) + # self.config["has_image_proj"] = True + # self.config["image_proj_dim"] = in_channels + + # if has_clean_x_embedder: + # self.install_clean_x_embedder() + self.clean_x_embedder = HunyuanVideoPatchEmbedForCleanLatents(self.inner_dim) + # self.config["has_clean_x_embedder"] = True + + self.high_quality_fp32_output_for_inference = True # False # change default to True + + # Block swapping attributes (initialized to None) + self.blocks_to_swap = None + self.offloader_double = None + self.offloader_single = None + + # RoPE scaling + self.rope_scaling_timestep_threshold: Optional[int] = None # scale RoPE above this timestep + self.rope_scaling_factor: float = 1.0 # RoPE scaling factor + + @property + def device(self): + return next(self.parameters()).device + + @property + def dtype(self): + return next(self.parameters()).dtype + + def enable_gradient_checkpointing(self): + self.use_gradient_checkpointing = True + print("Gradient checkpointing enabled for HunyuanVideoTransformer3DModelPacked.") # Logging + + def disable_gradient_checkpointing(self): + self.use_gradient_checkpointing = False + print("Gradient checkpointing disabled for HunyuanVideoTransformer3DModelPacked.") # Logging + + def initialize_teacache(self, enable_teacache=True, num_steps=25, rel_l1_thresh=0.15): + self.enable_teacache = enable_teacache + self.cnt = 0 + self.num_steps = num_steps + self.rel_l1_thresh = rel_l1_thresh # 0.1 for 1.6x speedup, 0.15 for 2.1x speedup + self.accumulated_rel_l1_distance = 0 + self.previous_modulated_input = None + self.previous_residual = None + self.teacache_rescale_func = np.poly1d([7.33226126e02, -4.01131952e02, 6.75869174e01, -3.14987800e00, 9.61237896e-02]) + if enable_teacache: + print(f"TeaCache enabled: num_steps={num_steps}, rel_l1_thresh={rel_l1_thresh}") + else: + print("TeaCache disabled.") + + def gradient_checkpointing_method(self, block, *args): + if self.use_gradient_checkpointing: + result = torch.utils.checkpoint.checkpoint(block, *args, use_reentrant=False) + else: + result = block(*args) + return result + + def enable_block_swap(self, num_blocks: int, device: torch.device, supports_backward: bool): + self.blocks_to_swap = num_blocks + self.num_double_blocks = len(self.transformer_blocks) + self.num_single_blocks = len(self.single_transformer_blocks) + double_blocks_to_swap = num_blocks // 2 + single_blocks_to_swap = (num_blocks - double_blocks_to_swap) * 2 + 1 + + assert double_blocks_to_swap <= self.num_double_blocks - 1 and single_blocks_to_swap <= self.num_single_blocks - 1, ( + f"Cannot swap more than {self.num_double_blocks - 1} double blocks and {self.num_single_blocks - 1} single blocks. " + f"Requested {double_blocks_to_swap} double blocks and {single_blocks_to_swap} single blocks." + ) + + self.offloader_double = ModelOffloader( + "double", + self.transformer_blocks, + self.num_double_blocks, + double_blocks_to_swap, + supports_backward, + device, + # debug=True # Optional debugging + ) + self.offloader_single = ModelOffloader( + "single", + self.single_transformer_blocks, + self.num_single_blocks, + single_blocks_to_swap, + supports_backward, + device, # , debug=True + ) + print( + f"HunyuanVideoTransformer3DModelPacked: Block swap enabled. Swapping {num_blocks} blocks, " + + f"double blocks: {double_blocks_to_swap}, single blocks: {single_blocks_to_swap}, supports_backward: {supports_backward}." + ) + + def switch_block_swap_for_inference(self): + if self.blocks_to_swap and self.blocks_to_swap > 0: + self.offloader_double.set_forward_only(True) + self.offloader_single.set_forward_only(True) + self.prepare_block_swap_before_forward() + print(f"HunyuanVideoTransformer3DModelPacked: Block swap set to forward only.") + + def switch_block_swap_for_training(self): + if self.blocks_to_swap and self.blocks_to_swap > 0: + self.offloader_double.set_forward_only(False) + self.offloader_single.set_forward_only(False) + self.prepare_block_swap_before_forward() + print(f"HunyuanVideoTransformer3DModelPacked: Block swap set to forward and backward.") + + def move_to_device_except_swap_blocks(self, device: torch.device): + # assume model is on cpu. do not move blocks to device to reduce temporary memory usage + if self.blocks_to_swap: + saved_double_blocks = self.transformer_blocks + saved_single_blocks = self.single_transformer_blocks + self.transformer_blocks = None + self.single_transformer_blocks = None + + self.to(device) + + if self.blocks_to_swap: + self.transformer_blocks = saved_double_blocks + self.single_transformer_blocks = saved_single_blocks + + def prepare_block_swap_before_forward(self): + if self.blocks_to_swap is None or self.blocks_to_swap == 0: + return + self.offloader_double.prepare_block_devices_before_forward(self.transformer_blocks) + self.offloader_single.prepare_block_devices_before_forward(self.single_transformer_blocks) + + def enable_rope_scaling(self, timestep_threshold: Optional[int], rope_scaling_factor: float = 1.0): + if timestep_threshold is not None and rope_scaling_factor > 0: + self.rope_scaling_timestep_threshold = timestep_threshold + self.rope_scaling_factor = rope_scaling_factor + logger.info(f"RoPE scaling enabled: threshold={timestep_threshold}, scaling_factor={rope_scaling_factor}.") + else: + self.rope_scaling_timestep_threshold = None + self.rope_scaling_factor = 1.0 + self.rope.h_w_scaling_factor = 1.0 # reset to default + logger.info("RoPE scaling disabled.") + + def process_input_hidden_states( + self, + latents, + latent_indices=None, + clean_latents=None, + clean_latent_indices=None, + clean_latents_2x=None, + clean_latent_2x_indices=None, + clean_latents_4x=None, + clean_latent_4x_indices=None, + ): + hidden_states = self.gradient_checkpointing_method(self.x_embedder.proj, latents) + B, C, T, H, W = hidden_states.shape + + if latent_indices is None: + latent_indices = torch.arange(0, T).unsqueeze(0).expand(B, -1) + + hidden_states = hidden_states.flatten(2).transpose(1, 2) + + rope_freqs = self.rope(frame_indices=latent_indices, height=H, width=W, device=hidden_states.device) + rope_freqs = rope_freqs.flatten(2).transpose(1, 2) + + if clean_latents is not None and clean_latent_indices is not None: + clean_latents = clean_latents.to(hidden_states) + clean_latents = self.gradient_checkpointing_method(self.clean_x_embedder.proj, clean_latents) + clean_latents = clean_latents.flatten(2).transpose(1, 2) + + clean_latent_rope_freqs = self.rope(frame_indices=clean_latent_indices, height=H, width=W, device=clean_latents.device) + clean_latent_rope_freqs = clean_latent_rope_freqs.flatten(2).transpose(1, 2) + + hidden_states = torch.cat([clean_latents, hidden_states], dim=1) + rope_freqs = torch.cat([clean_latent_rope_freqs, rope_freqs], dim=1) + + if clean_latents_2x is not None and clean_latent_2x_indices is not None: + clean_latents_2x = clean_latents_2x.to(hidden_states) + clean_latents_2x = pad_for_3d_conv(clean_latents_2x, (2, 4, 4)) + clean_latents_2x = self.gradient_checkpointing_method(self.clean_x_embedder.proj_2x, clean_latents_2x) + clean_latents_2x = clean_latents_2x.flatten(2).transpose(1, 2) + + clean_latent_2x_rope_freqs = self.rope( + frame_indices=clean_latent_2x_indices, height=H, width=W, device=clean_latents_2x.device + ) + clean_latent_2x_rope_freqs = pad_for_3d_conv(clean_latent_2x_rope_freqs, (2, 2, 2)) + clean_latent_2x_rope_freqs = center_down_sample_3d(clean_latent_2x_rope_freqs, (2, 2, 2)) + clean_latent_2x_rope_freqs = clean_latent_2x_rope_freqs.flatten(2).transpose(1, 2) + + hidden_states = torch.cat([clean_latents_2x, hidden_states], dim=1) + rope_freqs = torch.cat([clean_latent_2x_rope_freqs, rope_freqs], dim=1) + + if clean_latents_4x is not None and clean_latent_4x_indices is not None: + clean_latents_4x = clean_latents_4x.to(hidden_states) + clean_latents_4x = pad_for_3d_conv(clean_latents_4x, (4, 8, 8)) + clean_latents_4x = self.gradient_checkpointing_method(self.clean_x_embedder.proj_4x, clean_latents_4x) + clean_latents_4x = clean_latents_4x.flatten(2).transpose(1, 2) + + clean_latent_4x_rope_freqs = self.rope( + frame_indices=clean_latent_4x_indices, height=H, width=W, device=clean_latents_4x.device + ) + clean_latent_4x_rope_freqs = pad_for_3d_conv(clean_latent_4x_rope_freqs, (4, 4, 4)) + clean_latent_4x_rope_freqs = center_down_sample_3d(clean_latent_4x_rope_freqs, (4, 4, 4)) + clean_latent_4x_rope_freqs = clean_latent_4x_rope_freqs.flatten(2).transpose(1, 2) + + hidden_states = torch.cat([clean_latents_4x, hidden_states], dim=1) + rope_freqs = torch.cat([clean_latent_4x_rope_freqs, rope_freqs], dim=1) + + return hidden_states, rope_freqs + + def forward( + self, + hidden_states, + timestep, + encoder_hidden_states, + encoder_attention_mask, + pooled_projections, + guidance, + latent_indices=None, + clean_latents=None, + clean_latent_indices=None, + clean_latents_2x=None, + clean_latent_2x_indices=None, + clean_latents_4x=None, + clean_latent_4x_indices=None, + image_embeddings=None, + attention_kwargs=None, + return_dict=True, + ): + + if attention_kwargs is None: + attention_kwargs = {} + + # RoPE scaling: must be done before processing hidden states + if self.rope_scaling_timestep_threshold is not None: + if timestep >= self.rope_scaling_timestep_threshold: + self.rope.h_w_scaling_factor = self.rope_scaling_factor + else: + self.rope.h_w_scaling_factor = 1.0 + + batch_size, num_channels, num_frames, height, width = hidden_states.shape + p, p_t = self.config_patch_size, self.config_patch_size_t + post_patch_num_frames = num_frames // p_t + post_patch_height = height // p + post_patch_width = width // p + original_context_length = post_patch_num_frames * post_patch_height * post_patch_width + + hidden_states, rope_freqs = self.process_input_hidden_states( + hidden_states, + latent_indices, + clean_latents, + clean_latent_indices, + clean_latents_2x, + clean_latent_2x_indices, + clean_latents_4x, + clean_latent_4x_indices, + ) + del ( + latent_indices, + clean_latents, + clean_latent_indices, + clean_latents_2x, + clean_latent_2x_indices, + clean_latents_4x, + clean_latent_4x_indices, + ) # free memory + + temb = self.gradient_checkpointing_method(self.time_text_embed, timestep, guidance, pooled_projections) + encoder_hidden_states = self.gradient_checkpointing_method( + self.context_embedder, encoder_hidden_states, timestep, encoder_attention_mask + ) + + if self.image_projection is not None: + assert image_embeddings is not None, "You must use image embeddings!" + extra_encoder_hidden_states = self.gradient_checkpointing_method(self.image_projection, image_embeddings) + extra_attention_mask = torch.ones( + (batch_size, extra_encoder_hidden_states.shape[1]), + dtype=encoder_attention_mask.dtype, + device=encoder_attention_mask.device, + ) + + # must cat before (not after) encoder_hidden_states, due to attn masking + encoder_hidden_states = torch.cat([extra_encoder_hidden_states, encoder_hidden_states], dim=1) + encoder_attention_mask = torch.cat([extra_attention_mask, encoder_attention_mask], dim=1) + del extra_encoder_hidden_states, extra_attention_mask # free memory + + with torch.no_grad(): + if batch_size == 1: + # When batch size is 1, we do not need any masks or var-len funcs since cropping is mathematically same to what we want + # If they are not same, then their impls are wrong. Ours are always the correct one. + text_len = encoder_attention_mask.sum().item() + encoder_hidden_states = encoder_hidden_states[:, :text_len] + attention_mask = None, None, None, None + else: + img_seq_len = hidden_states.shape[1] + txt_seq_len = encoder_hidden_states.shape[1] + + cu_seqlens_q = get_cu_seqlens(encoder_attention_mask, img_seq_len) + cu_seqlens_kv = cu_seqlens_q + max_seqlen_q = img_seq_len + txt_seq_len + max_seqlen_kv = max_seqlen_q + + attention_mask = cu_seqlens_q, cu_seqlens_kv, max_seqlen_q, max_seqlen_kv + del cu_seqlens_q, cu_seqlens_kv, max_seqlen_q, max_seqlen_kv # free memory + del encoder_attention_mask # free memory + + if self.enable_teacache: + modulated_inp = self.transformer_blocks[0].norm1(hidden_states, emb=temb)[0] + + if self.cnt == 0 or self.cnt == self.num_steps - 1: + should_calc = True + self.accumulated_rel_l1_distance = 0 + else: + curr_rel_l1 = ( + ((modulated_inp - self.previous_modulated_input).abs().mean() / self.previous_modulated_input.abs().mean()) + .cpu() + .item() + ) + self.accumulated_rel_l1_distance += self.teacache_rescale_func(curr_rel_l1) + should_calc = self.accumulated_rel_l1_distance >= self.rel_l1_thresh + + if should_calc: + self.accumulated_rel_l1_distance = 0 + + self.previous_modulated_input = modulated_inp + self.cnt += 1 + + if self.cnt == self.num_steps: + self.cnt = 0 + + if not should_calc: + hidden_states = hidden_states + self.previous_residual + else: + ori_hidden_states = hidden_states.clone() + + for block_id, block in enumerate(self.transformer_blocks): + hidden_states, encoder_hidden_states = self.gradient_checkpointing_method( + block, hidden_states, encoder_hidden_states, temb, attention_mask, rope_freqs + ) + + for block_id, block in enumerate(self.single_transformer_blocks): + hidden_states, encoder_hidden_states = self.gradient_checkpointing_method( + block, hidden_states, encoder_hidden_states, temb, attention_mask, rope_freqs + ) + + self.previous_residual = hidden_states - ori_hidden_states + del ori_hidden_states # free memory + else: + for block_id, block in enumerate(self.transformer_blocks): + if self.blocks_to_swap: + self.offloader_double.wait_for_block(block_id) + + hidden_states, encoder_hidden_states = self.gradient_checkpointing_method( + block, hidden_states, encoder_hidden_states, temb, attention_mask, rope_freqs + ) + + if self.blocks_to_swap: + self.offloader_double.submit_move_blocks_forward(self.transformer_blocks, block_id) + + for block_id, block in enumerate(self.single_transformer_blocks): + if self.blocks_to_swap: + self.offloader_single.wait_for_block(block_id) + + hidden_states, encoder_hidden_states = self.gradient_checkpointing_method( + block, hidden_states, encoder_hidden_states, temb, attention_mask, rope_freqs + ) + + if self.blocks_to_swap: + self.offloader_single.submit_move_blocks_forward(self.single_transformer_blocks, block_id) + + del attention_mask, rope_freqs # free memory + del encoder_hidden_states # free memory + + hidden_states = self.gradient_checkpointing_method(self.norm_out, hidden_states, temb) + + hidden_states = hidden_states[:, -original_context_length:, :] + + if self.high_quality_fp32_output_for_inference: + hidden_states = hidden_states.to(dtype=torch.float32) + if self.proj_out.weight.dtype != torch.float32: + self.proj_out.to(dtype=torch.float32) + + hidden_states = self.gradient_checkpointing_method(self.proj_out, hidden_states) + + hidden_states = einops.rearrange( + hidden_states, + "b (t h w) (c pt ph pw) -> b c (t pt) (h ph) (w pw)", + t=post_patch_num_frames, + h=post_patch_height, + w=post_patch_width, + pt=p_t, + ph=p, + pw=p, + ) + + if return_dict: + # return Transformer2DModelOutput(sample=hidden_states) + return SimpleNamespace(sample=hidden_states) + + return (hidden_states,) + + def fp8_optimization( + self, state_dict: dict[str, torch.Tensor], device: torch.device, move_to_device: bool, use_scaled_mm: bool = False + ) -> dict[str, torch.Tensor]: # Return type hint added + """ + Optimize the model state_dict with fp8. + + Args: + state_dict (dict[str, torch.Tensor]): + The state_dict of the model. + device (torch.device): + The device to calculate the weight. + move_to_device (bool): + Whether to move the weight to the device after optimization. + use_scaled_mm (bool): + Whether to use scaled matrix multiplication for FP8. + """ + TARGET_KEYS = ["transformer_blocks", "single_transformer_blocks"] + EXCLUDE_KEYS = ["norm"] # Exclude norm layers (e.g., LayerNorm, RMSNorm) from FP8 + + # inplace optimization + state_dict = optimize_state_dict_with_fp8(state_dict, device, TARGET_KEYS, EXCLUDE_KEYS, move_to_device=move_to_device) + + # apply monkey patching + apply_fp8_monkey_patch(self, state_dict, use_scaled_mm=use_scaled_mm) + + return state_dict + + +def load_packed_model( + device: Union[str, torch.device], + dit_path: str, + attn_mode: str, + loading_device: Union[str, torch.device], + fp8_scaled: bool = False, + split_attn: bool = False, + for_inference: bool = False, +) -> HunyuanVideoTransformer3DModelPacked: + # TODO support split_attn + device = torch.device(device) + loading_device = torch.device(loading_device) + + if os.path.isdir(dit_path): + # we don't support from_pretrained for now, so loading safetensors directly + safetensor_files = glob.glob(os.path.join(dit_path, "*.safetensors")) + if len(safetensor_files) == 0: + raise ValueError(f"Cannot find safetensors file in {dit_path}") + # sort by name and take the first one + safetensor_files.sort() + dit_path = safetensor_files[0] + + with init_empty_weights(): + logger.info(f"Creating HunyuanVideoTransformer3DModelPacked") + + # import here to avoid circular import issues + from musubi_tuner.frame_pack.hunyuan_video_packed_inference import HunyuanVideoTransformer3DModelPackedInference + + model_class = HunyuanVideoTransformer3DModelPackedInference if for_inference else HunyuanVideoTransformer3DModelPacked + model = model_class( + attention_head_dim=128, + guidance_embeds=True, + has_clean_x_embedder=True, + has_image_proj=True, + image_proj_dim=1152, + in_channels=16, + mlp_ratio=4.0, + num_attention_heads=24, + num_layers=20, + num_refiner_layers=2, + num_single_layers=40, + out_channels=16, + patch_size=2, + patch_size_t=1, + pooled_projection_dim=768, + qk_norm="rms_norm", + rope_axes_dim=(16, 56, 56), + rope_theta=256.0, + text_embed_dim=4096, + attn_mode=attn_mode, + split_attn=split_attn, + ) + + # if fp8_scaled, load model weights to CPU to reduce VRAM usage. Otherwise, load to the specified device (CPU for block swap or CUDA for others) + dit_loading_device = torch.device("cpu") if fp8_scaled else loading_device + logger.info(f"Loading DiT model from {dit_path}, device={dit_loading_device}") + + # load model weights with the specified dtype or as is + sd = load_split_weights(dit_path, device=dit_loading_device, disable_mmap=True) + + if fp8_scaled: + # fp8 optimization: calculate on CUDA, move back to CPU if loading_device is CPU (block swap) + logger.info(f"Optimizing model weights to fp8. This may take a while.") + sd = model.fp8_optimization(sd, device, move_to_device=loading_device.type == "cpu") + + if loading_device.type != "cpu": + # make sure all the model weights are on the loading_device + logger.info(f"Moving weights to {loading_device}") + for key in sd.keys(): + sd[key] = sd[key].to(loading_device) + + info = model.load_state_dict(sd, strict=True, assign=True) + logger.info(f"Loaded DiT model from {dit_path}, info={info}") + + return model diff --git a/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/frame_pack/hunyuan_video_packed_inference.py b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/frame_pack/hunyuan_video_packed_inference.py new file mode 100644 index 0000000000000000000000000000000000000000..e760c8fd642025988c8b81b38a2f6055f6590deb --- /dev/null +++ b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/frame_pack/hunyuan_video_packed_inference.py @@ -0,0 +1,341 @@ +# Inference model for Hunyuan Video Packed +# We do not want to break the training accidentally, so we use a separate file for inference model. + +# MagCache: modified from https://github.com/Zehong-Ma/MagCache/blob/main/MagCache4HunyuanVideo/magcache_sample_video.py + +from types import SimpleNamespace +from typing import Optional +import einops +import numpy as np +import torch +from torch.nn import functional as F +from musubi_tuner.frame_pack.hunyuan_video_packed import HunyuanVideoTransformer3DModelPacked, get_cu_seqlens + +import logging + +logger = logging.getLogger(__name__) +logging.basicConfig(level=logging.INFO) + + +class HunyuanVideoTransformer3DModelPackedInference(HunyuanVideoTransformer3DModelPacked): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.enable_magcache = False + + def initialize_magcache( + self, + enable: bool = True, + retention_ratio: float = 0.2, + mag_ratios: Optional[list[float]] = None, + magcache_thresh: float = 0.24, + K: int = 6, + calibration: bool = False, + ): + if mag_ratios is None: + # Copy from original MagCache + mag_ratios = np.array( + [1.0] + + [ + 1.06971, + 1.29073, + 1.11245, + 1.09596, + 1.05233, + 1.01415, + 1.05672, + 1.00848, + 1.03632, + 1.02974, + 1.00984, + 1.03028, + 1.00681, + 1.06614, + 1.05022, + 1.02592, + 1.01776, + 1.02985, + 1.00726, + 1.03727, + 1.01502, + 1.00992, + 1.03371, + 0.9976, + 1.02742, + 1.0093, + 1.01869, + 1.00815, + 1.01461, + 1.01152, + 1.03082, + 1.0061, + 1.02162, + 1.01999, + 0.99063, + 1.01186, + 1.0217, + 0.99947, + 1.01711, + 0.9904, + 1.00258, + 1.00878, + 0.97039, + 0.97686, + 0.94315, + 0.97728, + 0.91154, + 0.86139, + 0.76592, + ] + ) + self.enable_magcache = enable + self.calibration = calibration + self.retention_ratio = retention_ratio + self.default_mag_ratios = mag_ratios + self.magcache_thresh = magcache_thresh + self.K = K + self.reset_magcache() + + def reset_magcache(self, num_steps: int = 50): + if not self.enable_magcache: + return + + def nearest_interp(src_array, target_length): + src_length = len(src_array) + if target_length == 1: + return np.array([src_array[-1]]) + + scale = (src_length - 1) / (target_length - 1) + mapped_indices = np.round(np.arange(target_length) * scale).astype(int) + return np.array(src_array)[mapped_indices] + + if not self.calibration and num_steps != len(self.default_mag_ratios): + logger.info(f"Interpolating mag_ratios from {len(self.default_mag_ratios)} to {num_steps} steps.") + self.mag_ratios = nearest_interp(self.default_mag_ratios, num_steps) + else: + self.mag_ratios = self.default_mag_ratios + + self.cnt = 0 + self.num_steps = num_steps + self.residual_cache = None + self.accumulated_ratio = 1.0 + self.accumulated_steps = 0 + self.accumulated_err = 0 + self.norm_ratio = [] + self.norm_std = [] + self.cos_dis = [] + + def get_calibration_data(self) -> tuple[list[float], list[float], list[float]]: + if not self.enable_magcache or not self.calibration: + raise ValueError("MagCache is not enabled or calibration is not set.") + return self.norm_ratio, self.norm_std, self.cos_dis + + def forward(self, *args, **kwargs): + # Forward pass for inference + if self.enable_magcache: + return self.magcache_forward(*args, **kwargs, calibration=self.calibration) + else: + return super().forward(*args, **kwargs) + + def magcache_forward( + self, + hidden_states, + timestep, + encoder_hidden_states, + encoder_attention_mask, + pooled_projections, + guidance, + latent_indices=None, + clean_latents=None, + clean_latent_indices=None, + clean_latents_2x=None, + clean_latent_2x_indices=None, + clean_latents_4x=None, + clean_latent_4x_indices=None, + image_embeddings=None, + attention_kwargs=None, + return_dict=True, + calibration=False, + ): + + if attention_kwargs is None: + attention_kwargs = {} + + # RoPE scaling: must be done before processing hidden states + if self.rope_scaling_timestep_threshold is not None: + if timestep >= self.rope_scaling_timestep_threshold: + self.rope.h_w_scaling_factor = self.rope_scaling_factor + else: + self.rope.h_w_scaling_factor = 1.0 + + batch_size, num_channels, num_frames, height, width = hidden_states.shape + p, p_t = self.config_patch_size, self.config_patch_size_t + post_patch_num_frames = num_frames // p_t + post_patch_height = height // p + post_patch_width = width // p + original_context_length = post_patch_num_frames * post_patch_height * post_patch_width + + hidden_states, rope_freqs = self.process_input_hidden_states( + hidden_states, + latent_indices, + clean_latents, + clean_latent_indices, + clean_latents_2x, + clean_latent_2x_indices, + clean_latents_4x, + clean_latent_4x_indices, + ) + del ( + latent_indices, + clean_latents, + clean_latent_indices, + clean_latents_2x, + clean_latent_2x_indices, + clean_latents_4x, + clean_latent_4x_indices, + ) # free memory + + temb = self.gradient_checkpointing_method(self.time_text_embed, timestep, guidance, pooled_projections) + encoder_hidden_states = self.gradient_checkpointing_method( + self.context_embedder, encoder_hidden_states, timestep, encoder_attention_mask + ) + + if self.image_projection is not None: + assert image_embeddings is not None, "You must use image embeddings!" + extra_encoder_hidden_states = self.gradient_checkpointing_method(self.image_projection, image_embeddings) + extra_attention_mask = torch.ones( + (batch_size, extra_encoder_hidden_states.shape[1]), + dtype=encoder_attention_mask.dtype, + device=encoder_attention_mask.device, + ) + + # must cat before (not after) encoder_hidden_states, due to attn masking + encoder_hidden_states = torch.cat([extra_encoder_hidden_states, encoder_hidden_states], dim=1) + encoder_attention_mask = torch.cat([extra_attention_mask, encoder_attention_mask], dim=1) + del extra_encoder_hidden_states, extra_attention_mask # free memory + + with torch.no_grad(): + if batch_size == 1: + # When batch size is 1, we do not need any masks or var-len funcs since cropping is mathematically same to what we want + # If they are not same, then their impls are wrong. Ours are always the correct one. + text_len = encoder_attention_mask.sum().item() + encoder_hidden_states = encoder_hidden_states[:, :text_len] + attention_mask = None, None, None, None + else: + img_seq_len = hidden_states.shape[1] + txt_seq_len = encoder_hidden_states.shape[1] + + cu_seqlens_q = get_cu_seqlens(encoder_attention_mask, img_seq_len) + cu_seqlens_kv = cu_seqlens_q + max_seqlen_q = img_seq_len + txt_seq_len + max_seqlen_kv = max_seqlen_q + + attention_mask = cu_seqlens_q, cu_seqlens_kv, max_seqlen_q, max_seqlen_kv + del cu_seqlens_q, cu_seqlens_kv, max_seqlen_q, max_seqlen_kv # free memory + del encoder_attention_mask # free memory + + if self.enable_teacache: + raise NotImplementedError("TEACache is not implemented for inference model.") + + skip_forward = False + if ( + self.enable_magcache + and not calibration + and self.cnt >= max(int(self.retention_ratio * self.num_steps), 1) + and self.cnt < self.num_steps - 1 + ): + cur_mag_ratio = self.mag_ratios[self.cnt] + self.accumulated_ratio = self.accumulated_ratio * cur_mag_ratio + cur_skip_err = np.abs(1 - self.accumulated_ratio) + self.accumulated_err += cur_skip_err + self.accumulated_steps += 1 + if self.accumulated_err <= self.magcache_thresh and self.accumulated_steps <= self.K: + skip_forward = True + else: + self.accumulated_ratio = 1.0 + self.accumulated_steps = 0 + self.accumulated_err = 0 + + if skip_forward: + # uncomment the following line to debug + # print( + # f"Skipping forward pass at step {self.cnt}, accumulated ratio: {self.accumulated_ratio:.4f}, " + # f"accumulated error: {self.accumulated_err:.4f}, accumulated steps: {self.accumulated_steps}" + # ) + hidden_states = hidden_states + self.residual_cache + else: + ori_hidden_states = hidden_states + + for block_id, block in enumerate(self.transformer_blocks): + if self.blocks_to_swap: + self.offloader_double.wait_for_block(block_id) + + hidden_states, encoder_hidden_states = self.gradient_checkpointing_method( + block, hidden_states, encoder_hidden_states, temb, attention_mask, rope_freqs + ) + + if self.blocks_to_swap: + self.offloader_double.submit_move_blocks_forward(self.transformer_blocks, block_id) + + for block_id, block in enumerate(self.single_transformer_blocks): + if self.blocks_to_swap: + self.offloader_single.wait_for_block(block_id) + + hidden_states, encoder_hidden_states = self.gradient_checkpointing_method( + block, hidden_states, encoder_hidden_states, temb, attention_mask, rope_freqs + ) + + if self.blocks_to_swap: + self.offloader_single.submit_move_blocks_forward(self.single_transformer_blocks, block_id) + + if self.enable_magcache: + cur_residual = hidden_states - ori_hidden_states + if calibration and self.cnt >= 1: + norm_ratio = ((cur_residual.norm(dim=-1) / self.residual_cache.norm(dim=-1)).mean()).item() + norm_std = (cur_residual.norm(dim=-1) / self.residual_cache.norm(dim=-1)).std().item() + cos_dis = (1 - F.cosine_similarity(cur_residual, self.residual_cache, dim=-1, eps=1e-8)).mean().item() + self.norm_ratio.append(round(norm_ratio, 5)) + self.norm_std.append(round(norm_std, 5)) + self.cos_dis.append(round(cos_dis, 5)) + logger.info(f"time: {self.cnt}, norm_ratio: {norm_ratio}, norm_std: {norm_std}, cos_dis: {cos_dis}") + self.residual_cache = cur_residual + + del ori_hidden_states # free memory + + del attention_mask, rope_freqs # free memory + del encoder_hidden_states # free memory + + hidden_states = self.gradient_checkpointing_method(self.norm_out, hidden_states, temb) + + hidden_states = hidden_states[:, -original_context_length:, :] + + if self.high_quality_fp32_output_for_inference: + hidden_states = hidden_states.to(dtype=torch.float32) + if self.proj_out.weight.dtype != torch.float32: + self.proj_out.to(dtype=torch.float32) + + hidden_states = self.gradient_checkpointing_method(self.proj_out, hidden_states) + + hidden_states = einops.rearrange( + hidden_states, + "b (t h w) (c pt ph pw) -> b c (t pt) (h ph) (w pw)", + t=post_patch_num_frames, + h=post_patch_height, + w=post_patch_width, + pt=p_t, + ph=p, + pw=p, + ) + + if self.enable_magcache: + self.cnt += 1 + if self.cnt >= self.num_steps: + self.cnt = 0 + self.accumulated_ratio = 1.0 + self.accumulated_steps = 0 + self.accumulated_err = 0 + + if return_dict: + # return Transformer2DModelOutput(sample=hidden_states) + return SimpleNamespace(sample=hidden_states) + + return (hidden_states,) diff --git a/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/frame_pack/k_diffusion_hunyuan.py b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/frame_pack/k_diffusion_hunyuan.py new file mode 100644 index 0000000000000000000000000000000000000000..541a0264612dcea9caf3b3a7972cd656f03d2cd4 --- /dev/null +++ b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/frame_pack/k_diffusion_hunyuan.py @@ -0,0 +1,128 @@ +# original code: https://github.com/lllyasviel/FramePack +# original license: Apache-2.0 + +import torch +import math + +# from diffusers_helper.k_diffusion.uni_pc_fm import sample_unipc +# from diffusers_helper.k_diffusion.wrapper import fm_wrapper +# from diffusers_helper.utils import repeat_to_batch_size +from musubi_tuner.frame_pack.uni_pc_fm import sample_unipc +from musubi_tuner.frame_pack.wrapper import fm_wrapper +from musubi_tuner.frame_pack.utils import repeat_to_batch_size + + +def flux_time_shift(t, mu=1.15, sigma=1.0): + return math.exp(mu) / (math.exp(mu) + (1 / t - 1) ** sigma) + + +def calculate_flux_mu(context_length, x1=256, y1=0.5, x2=4096, y2=1.15, exp_max=7.0): + k = (y2 - y1) / (x2 - x1) + b = y1 - k * x1 + mu = k * context_length + b + mu = min(mu, math.log(exp_max)) + return mu + + +def get_flux_sigmas_from_mu(n, mu): + sigmas = torch.linspace(1, 0, steps=n + 1) + sigmas = flux_time_shift(sigmas, mu=mu) + return sigmas + + +# @torch.inference_mode() +def sample_hunyuan( + transformer, + sampler="unipc", + initial_latent=None, + concat_latent=None, + strength=1.0, + width=512, + height=512, + frames=16, + real_guidance_scale=1.0, + distilled_guidance_scale=6.0, + guidance_rescale=0.0, + shift=None, + num_inference_steps=25, + batch_size=None, + generator=None, + prompt_embeds=None, + prompt_embeds_mask=None, + prompt_poolers=None, + negative_prompt_embeds=None, + negative_prompt_embeds_mask=None, + negative_prompt_poolers=None, + dtype=torch.bfloat16, + device=None, + negative_kwargs=None, + callback=None, + **kwargs, +): + device = device or transformer.device + + if batch_size is None: + batch_size = int(prompt_embeds.shape[0]) + + latents = torch.randn( + (batch_size, 16, (frames + 3) // 4, height // 8, width // 8), generator=generator, device=generator.device + ).to(device=device, dtype=torch.float32) + + B, C, T, H, W = latents.shape + seq_length = T * H * W // 4 # 9*80*80//4 = 14400 + + if shift is None: + mu = calculate_flux_mu(seq_length, exp_max=7.0) # 1.9459... if seq_len is large, mu is clipped. + else: + mu = math.log(shift) + + sigmas = get_flux_sigmas_from_mu(num_inference_steps, mu).to(device) + + k_model = fm_wrapper(transformer) + + if initial_latent is not None: + sigmas = sigmas * strength + first_sigma = sigmas[0].to(device=device, dtype=torch.float32) + initial_latent = initial_latent.to(device=device, dtype=torch.float32) + latents = initial_latent.float() * (1.0 - first_sigma) + latents.float() * first_sigma + + if concat_latent is not None: + concat_latent = concat_latent.to(latents) + + distilled_guidance = torch.tensor([distilled_guidance_scale * 1000.0] * batch_size).to(device=device, dtype=dtype) + + prompt_embeds = repeat_to_batch_size(prompt_embeds, batch_size) + prompt_embeds_mask = repeat_to_batch_size(prompt_embeds_mask, batch_size) + prompt_poolers = repeat_to_batch_size(prompt_poolers, batch_size) + negative_prompt_embeds = repeat_to_batch_size(negative_prompt_embeds, batch_size) + negative_prompt_embeds_mask = repeat_to_batch_size(negative_prompt_embeds_mask, batch_size) + negative_prompt_poolers = repeat_to_batch_size(negative_prompt_poolers, batch_size) + concat_latent = repeat_to_batch_size(concat_latent, batch_size) + + sampler_kwargs = dict( + dtype=dtype, + cfg_scale=real_guidance_scale, + cfg_rescale=guidance_rescale, + concat_latent=concat_latent, + positive=dict( + pooled_projections=prompt_poolers, + encoder_hidden_states=prompt_embeds, + encoder_attention_mask=prompt_embeds_mask, + guidance=distilled_guidance, + **kwargs, + ), + negative=dict( + pooled_projections=negative_prompt_poolers, + encoder_hidden_states=negative_prompt_embeds, + encoder_attention_mask=negative_prompt_embeds_mask, + guidance=distilled_guidance, + **(kwargs if negative_kwargs is None else {**kwargs, **negative_kwargs}), + ), + ) + + if sampler == "unipc": + results = sample_unipc(k_model, latents, sigmas, extra_args=sampler_kwargs, disable=False, callback=callback) + else: + raise NotImplementedError(f"Sampler {sampler} is not supported.") + + return results diff --git a/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/frame_pack/uni_pc_fm.py b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/frame_pack/uni_pc_fm.py new file mode 100644 index 0000000000000000000000000000000000000000..43a198f9f1c408b8c84b47a675c871aaf71bc418 --- /dev/null +++ b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/frame_pack/uni_pc_fm.py @@ -0,0 +1,142 @@ +# Better Flow Matching UniPC by Lvmin Zhang +# (c) 2025 +# CC BY-SA 4.0 +# Attribution-ShareAlike 4.0 International Licence + + +import torch + +from tqdm.auto import trange + + +def expand_dims(v, dims): + return v[(...,) + (None,) * (dims - 1)] + + +class FlowMatchUniPC: + def __init__(self, model, extra_args, variant='bh1'): + self.model = model + self.variant = variant + self.extra_args = extra_args + + def model_fn(self, x, t): + return self.model(x, t, **self.extra_args) + + def update_fn(self, x, model_prev_list, t_prev_list, t, order): + assert order <= len(model_prev_list) + dims = x.dim() + + t_prev_0 = t_prev_list[-1] + lambda_prev_0 = - torch.log(t_prev_0) + lambda_t = - torch.log(t) + model_prev_0 = model_prev_list[-1] + + h = lambda_t - lambda_prev_0 + + rks = [] + D1s = [] + for i in range(1, order): + t_prev_i = t_prev_list[-(i + 1)] + model_prev_i = model_prev_list[-(i + 1)] + lambda_prev_i = - torch.log(t_prev_i) + rk = ((lambda_prev_i - lambda_prev_0) / h)[0] + rks.append(rk) + D1s.append((model_prev_i - model_prev_0) / rk) + + rks.append(1.) + rks = torch.tensor(rks, device=x.device) + + R = [] + b = [] + + hh = -h[0] + h_phi_1 = torch.expm1(hh) + h_phi_k = h_phi_1 / hh - 1 + + factorial_i = 1 + + if self.variant == 'bh1': + B_h = hh + elif self.variant == 'bh2': + B_h = torch.expm1(hh) + else: + raise NotImplementedError('Bad variant!') + + for i in range(1, order + 1): + R.append(torch.pow(rks, i - 1)) + b.append(h_phi_k * factorial_i / B_h) + factorial_i *= (i + 1) + h_phi_k = h_phi_k / hh - 1 / factorial_i + + R = torch.stack(R) + b = torch.tensor(b, device=x.device) + + use_predictor = len(D1s) > 0 + + if use_predictor: + D1s = torch.stack(D1s, dim=1) + if order == 2: + rhos_p = torch.tensor([0.5], device=b.device) + else: + rhos_p = torch.linalg.solve(R[:-1, :-1], b[:-1]) + else: + D1s = None + rhos_p = None + + if order == 1: + rhos_c = torch.tensor([0.5], device=b.device) + else: + rhos_c = torch.linalg.solve(R, b) + + x_t_ = expand_dims(t / t_prev_0, dims) * x - expand_dims(h_phi_1, dims) * model_prev_0 + + if use_predictor: + pred_res = torch.tensordot(D1s, rhos_p, dims=([1], [0])) + else: + pred_res = 0 + + x_t = x_t_ - expand_dims(B_h, dims) * pred_res + model_t = self.model_fn(x_t, t) + + if D1s is not None: + corr_res = torch.tensordot(D1s, rhos_c[:-1], dims=([1], [0])) + else: + corr_res = 0 + + D1_t = (model_t - model_prev_0) + x_t = x_t_ - expand_dims(B_h, dims) * (corr_res + rhos_c[-1] * D1_t) + + return x_t, model_t + + def sample(self, x, sigmas, callback=None, disable_pbar=False): + order = min(3, len(sigmas) - 2) + model_prev_list, t_prev_list = [], [] + for i in trange(len(sigmas) - 1, disable=disable_pbar): + vec_t = sigmas[i].expand(x.shape[0]) + + with torch.no_grad(): + if i == 0: + model_prev_list = [self.model_fn(x, vec_t)] + t_prev_list = [vec_t] + elif i < order: + init_order = i + x, model_x = self.update_fn(x, model_prev_list, t_prev_list, vec_t, init_order) + model_prev_list.append(model_x) + t_prev_list.append(vec_t) + else: + x, model_x = self.update_fn(x, model_prev_list, t_prev_list, vec_t, order) + model_prev_list.append(model_x) + t_prev_list.append(vec_t) + + model_prev_list = model_prev_list[-order:] + t_prev_list = t_prev_list[-order:] + + if callback is not None: + callback({'x': x, 'i': i, 'denoised': model_prev_list[-1]}) + + return model_prev_list[-1] + + +def sample_unipc(model, noise, sigmas, extra_args=None, callback=None, disable=False, variant='bh1'): + assert variant in ['bh1', 'bh2'] + return FlowMatchUniPC(model, extra_args=extra_args, variant=variant).sample(noise, sigmas=sigmas, callback=callback, disable_pbar=disable) diff --git a/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/frame_pack/utils.py b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/frame_pack/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..f69bd5fea25b2e8a6c80a774c0bd8eeb5926d0a7 --- /dev/null +++ b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/frame_pack/utils.py @@ -0,0 +1,617 @@ +import os +import cv2 +import json +import random +import glob +import torch +import einops +import numpy as np +import datetime +import torchvision + +import safetensors.torch as sf +from PIL import Image + + +def min_resize(x, m): + if x.shape[0] < x.shape[1]: + s0 = m + s1 = int(float(m) / float(x.shape[0]) * float(x.shape[1])) + else: + s0 = int(float(m) / float(x.shape[1]) * float(x.shape[0])) + s1 = m + new_max = max(s1, s0) + raw_max = max(x.shape[0], x.shape[1]) + if new_max < raw_max: + interpolation = cv2.INTER_AREA + else: + interpolation = cv2.INTER_LANCZOS4 + y = cv2.resize(x, (s1, s0), interpolation=interpolation) + return y + + +def d_resize(x, y): + H, W, C = y.shape + new_min = min(H, W) + raw_min = min(x.shape[0], x.shape[1]) + if new_min < raw_min: + interpolation = cv2.INTER_AREA + else: + interpolation = cv2.INTER_LANCZOS4 + y = cv2.resize(x, (W, H), interpolation=interpolation) + return y + + +def resize_and_center_crop(image, target_width, target_height): + if target_height == image.shape[0] and target_width == image.shape[1]: + return image + + pil_image = Image.fromarray(image) + original_width, original_height = pil_image.size + scale_factor = max(target_width / original_width, target_height / original_height) + resized_width = int(round(original_width * scale_factor)) + resized_height = int(round(original_height * scale_factor)) + resized_image = pil_image.resize((resized_width, resized_height), Image.LANCZOS) + left = (resized_width - target_width) / 2 + top = (resized_height - target_height) / 2 + right = (resized_width + target_width) / 2 + bottom = (resized_height + target_height) / 2 + cropped_image = resized_image.crop((left, top, right, bottom)) + return np.array(cropped_image) + + +def resize_and_center_crop_pytorch(image, target_width, target_height): + B, C, H, W = image.shape + + if H == target_height and W == target_width: + return image + + scale_factor = max(target_width / W, target_height / H) + resized_width = int(round(W * scale_factor)) + resized_height = int(round(H * scale_factor)) + + resized = torch.nn.functional.interpolate(image, size=(resized_height, resized_width), mode="bilinear", align_corners=False) + + top = (resized_height - target_height) // 2 + left = (resized_width - target_width) // 2 + cropped = resized[:, :, top : top + target_height, left : left + target_width] + + return cropped + + +def resize_without_crop(image, target_width, target_height): + if target_height == image.shape[0] and target_width == image.shape[1]: + return image + + pil_image = Image.fromarray(image) + resized_image = pil_image.resize((target_width, target_height), Image.LANCZOS) + return np.array(resized_image) + + +def just_crop(image, w, h): + if h == image.shape[0] and w == image.shape[1]: + return image + + original_height, original_width = image.shape[:2] + k = min(original_height / h, original_width / w) + new_width = int(round(w * k)) + new_height = int(round(h * k)) + x_start = (original_width - new_width) // 2 + y_start = (original_height - new_height) // 2 + cropped_image = image[y_start : y_start + new_height, x_start : x_start + new_width] + return cropped_image + + +def write_to_json(data, file_path): + temp_file_path = file_path + ".tmp" + with open(temp_file_path, "wt", encoding="utf-8") as temp_file: + json.dump(data, temp_file, indent=4) + os.replace(temp_file_path, file_path) + return + + +def read_from_json(file_path): + with open(file_path, "rt", encoding="utf-8") as file: + data = json.load(file) + return data + + +def get_active_parameters(m): + return {k: v for k, v in m.named_parameters() if v.requires_grad} + + +def cast_training_params(m, dtype=torch.float32): + result = {} + for n, param in m.named_parameters(): + if param.requires_grad: + param.data = param.to(dtype) + result[n] = param + return result + + +def separate_lora_AB(parameters, B_patterns=None): + parameters_normal = {} + parameters_B = {} + + if B_patterns is None: + B_patterns = [".lora_B.", "__zero__"] + + for k, v in parameters.items(): + if any(B_pattern in k for B_pattern in B_patterns): + parameters_B[k] = v + else: + parameters_normal[k] = v + + return parameters_normal, parameters_B + + +def set_attr_recursive(obj, attr, value): + attrs = attr.split(".") + for name in attrs[:-1]: + obj = getattr(obj, name) + setattr(obj, attrs[-1], value) + return + + +def print_tensor_list_size(tensors): + total_size = 0 + total_elements = 0 + + if isinstance(tensors, dict): + tensors = tensors.values() + + for tensor in tensors: + total_size += tensor.nelement() * tensor.element_size() + total_elements += tensor.nelement() + + total_size_MB = total_size / (1024**2) + total_elements_B = total_elements / 1e9 + + print(f"Total number of tensors: {len(tensors)}") + print(f"Total size of tensors: {total_size_MB:.2f} MB") + print(f"Total number of parameters: {total_elements_B:.3f} billion") + return + + +@torch.no_grad() +def batch_mixture(a, b=None, probability_a=0.5, mask_a=None): + batch_size = a.size(0) + + if b is None: + b = torch.zeros_like(a) + + if mask_a is None: + mask_a = torch.rand(batch_size) < probability_a + + mask_a = mask_a.to(a.device) + mask_a = mask_a.reshape((batch_size,) + (1,) * (a.dim() - 1)) + result = torch.where(mask_a, a, b) + return result + + +@torch.no_grad() +def zero_module(module): + for p in module.parameters(): + p.detach().zero_() + return module + + +@torch.no_grad() +def supress_lower_channels(m, k, alpha=0.01): + data = m.weight.data.clone() + + assert int(data.shape[1]) >= k + + data[:, :k] = data[:, :k] * alpha + m.weight.data = data.contiguous().clone() + return m + + +def freeze_module(m): + if not hasattr(m, "_forward_inside_frozen_module"): + m._forward_inside_frozen_module = m.forward + m.requires_grad_(False) + m.forward = torch.no_grad()(m.forward) + return m + + +def get_latest_safetensors(folder_path): + safetensors_files = glob.glob(os.path.join(folder_path, "*.safetensors")) + + if not safetensors_files: + raise ValueError("No file to resume!") + + latest_file = max(safetensors_files, key=os.path.getmtime) + latest_file = os.path.abspath(os.path.realpath(latest_file)) + return latest_file + + +def generate_random_prompt_from_tags(tags_str, min_length=3, max_length=32): + tags = tags_str.split(", ") + tags = random.sample(tags, k=min(random.randint(min_length, max_length), len(tags))) + prompt = ", ".join(tags) + return prompt + + +def interpolate_numbers(a, b, n, round_to_int=False, gamma=1.0): + numbers = a + (b - a) * (np.linspace(0, 1, n) ** gamma) + if round_to_int: + numbers = np.round(numbers).astype(int) + return numbers.tolist() + + +def uniform_random_by_intervals(inclusive, exclusive, n, round_to_int=False): + edges = np.linspace(0, 1, n + 1) + points = np.random.uniform(edges[:-1], edges[1:]) + numbers = inclusive + (exclusive - inclusive) * points + if round_to_int: + numbers = np.round(numbers).astype(int) + return numbers.tolist() + + +def soft_append_bcthw(history, current, overlap=0): + if overlap <= 0: + return torch.cat([history, current], dim=2) + + assert history.shape[2] >= overlap, f"History length ({history.shape[2]}) must be >= overlap ({overlap})" + assert current.shape[2] >= overlap, f"Current length ({current.shape[2]}) must be >= overlap ({overlap})" + + weights = torch.linspace(1, 0, overlap, dtype=history.dtype, device=history.device).view(1, 1, -1, 1, 1) + blended = weights * history[:, :, -overlap:] + (1 - weights) * current[:, :, :overlap] + output = torch.cat([history[:, :, :-overlap], blended, current[:, :, overlap:]], dim=2) + + return output.to(history) + + +def save_bcthw_as_mp4(x, output_filename, fps=10): + b, c, t, h, w = x.shape + + per_row = b + for p in [6, 5, 4, 3, 2]: + if b % p == 0: + per_row = p + break + + os.makedirs(os.path.dirname(os.path.abspath(os.path.realpath(output_filename))), exist_ok=True) + x = torch.clamp(x.float(), -1.0, 1.0) * 127.5 + 127.5 + x = x.detach().cpu().to(torch.uint8) + x = einops.rearrange(x, "(m n) c t h w -> t (m h) (n w) c", n=per_row) + torchvision.io.write_video(output_filename, x, fps=fps, video_codec="libx264", options={"crf": "0"}) + + # write tensor as .pt file + torch.save(x, output_filename.replace(".mp4", ".pt")) + + return x + + +def save_bcthw_as_png(x, output_filename): + os.makedirs(os.path.dirname(os.path.abspath(os.path.realpath(output_filename))), exist_ok=True) + x = torch.clamp(x.float(), -1.0, 1.0) * 127.5 + 127.5 + x = x.detach().cpu().to(torch.uint8) + x = einops.rearrange(x, "b c t h w -> c (b h) (t w)") + torchvision.io.write_png(x, output_filename) + return output_filename + + +def save_bchw_as_png(x, output_filename): + os.makedirs(os.path.dirname(os.path.abspath(os.path.realpath(output_filename))), exist_ok=True) + x = torch.clamp(x.float(), -1.0, 1.0) * 127.5 + 127.5 + x = x.detach().cpu().to(torch.uint8) + x = einops.rearrange(x, "b c h w -> c h (b w)") + torchvision.io.write_png(x, output_filename) + return output_filename + + +def add_tensors_with_padding(tensor1, tensor2): + if tensor1.shape == tensor2.shape: + return tensor1 + tensor2 + + shape1 = tensor1.shape + shape2 = tensor2.shape + + new_shape = tuple(max(s1, s2) for s1, s2 in zip(shape1, shape2)) + + padded_tensor1 = torch.zeros(new_shape) + padded_tensor2 = torch.zeros(new_shape) + + padded_tensor1[tuple(slice(0, s) for s in shape1)] = tensor1 + padded_tensor2[tuple(slice(0, s) for s in shape2)] = tensor2 + + result = padded_tensor1 + padded_tensor2 + return result + + +def print_free_mem(): + torch.cuda.empty_cache() + free_mem, total_mem = torch.cuda.mem_get_info(0) + free_mem_mb = free_mem / (1024**2) + total_mem_mb = total_mem / (1024**2) + print(f"Free memory: {free_mem_mb:.2f} MB") + print(f"Total memory: {total_mem_mb:.2f} MB") + return + + +def print_gpu_parameters(device, state_dict, log_count=1): + summary = {"device": device, "keys_count": len(state_dict)} + + logged_params = {} + for i, (key, tensor) in enumerate(state_dict.items()): + if i >= log_count: + break + logged_params[key] = tensor.flatten()[:3].tolist() + + summary["params"] = logged_params + + print(str(summary)) + return + + +def visualize_txt_as_img(width, height, text, font_path="font/DejaVuSans.ttf", size=18): + from PIL import Image, ImageDraw, ImageFont + + txt = Image.new("RGB", (width, height), color="white") + draw = ImageDraw.Draw(txt) + font = ImageFont.truetype(font_path, size=size) + + if text == "": + return np.array(txt) + + # Split text into lines that fit within the image width + lines = [] + words = text.split() + current_line = words[0] + + for word in words[1:]: + line_with_word = f"{current_line} {word}" + if draw.textbbox((0, 0), line_with_word, font=font)[2] <= width: + current_line = line_with_word + else: + lines.append(current_line) + current_line = word + + lines.append(current_line) + + # Draw the text line by line + y = 0 + line_height = draw.textbbox((0, 0), "A", font=font)[3] + + for line in lines: + if y + line_height > height: + break # stop drawing if the next line will be outside the image + draw.text((0, y), line, fill="black", font=font) + y += line_height + + return np.array(txt) + + +def blue_mark(x): + x = x.copy() + c = x[:, :, 2] + b = cv2.blur(c, (9, 9)) + x[:, :, 2] = ((c - b) * 16.0 + b).clip(-1, 1) + return x + + +def green_mark(x): + x = x.copy() + x[:, :, 2] = -1 + x[:, :, 0] = -1 + return x + + +def frame_mark(x): + x = x.copy() + x[:64] = -1 + x[-64:] = -1 + x[:, :8] = 1 + x[:, -8:] = 1 + return x + + +@torch.inference_mode() +def pytorch2numpy(imgs): + results = [] + for x in imgs: + y = x.movedim(0, -1) + y = y * 127.5 + 127.5 + y = y.detach().float().cpu().numpy().clip(0, 255).astype(np.uint8) + results.append(y) + return results + + +@torch.inference_mode() +def numpy2pytorch(imgs): + h = torch.from_numpy(np.stack(imgs, axis=0)).float() / 127.5 - 1.0 + h = h.movedim(-1, 1) + return h + + +@torch.no_grad() +def duplicate_prefix_to_suffix(x, count, zero_out=False): + if zero_out: + return torch.cat([x, torch.zeros_like(x[:count])], dim=0) + else: + return torch.cat([x, x[:count]], dim=0) + + +def weighted_mse(a, b, weight): + return torch.mean(weight.float() * (a.float() - b.float()) ** 2) + + +def clamped_linear_interpolation(x, x_min, y_min, x_max, y_max, sigma=1.0): + x = (x - x_min) / (x_max - x_min) + x = max(0.0, min(x, 1.0)) + x = x**sigma + return y_min + x * (y_max - y_min) + + +def expand_to_dims(x, target_dims): + return x.view(*x.shape, *([1] * max(0, target_dims - x.dim()))) + + +def repeat_to_batch_size(tensor: torch.Tensor, batch_size: int): + if tensor is None: + return None + + first_dim = tensor.shape[0] + + if first_dim == batch_size: + return tensor + + if batch_size % first_dim != 0: + raise ValueError(f"Cannot evenly repeat first dim {first_dim} to match batch_size {batch_size}.") + + repeat_times = batch_size // first_dim + + return tensor.repeat(repeat_times, *[1] * (tensor.dim() - 1)) + + +def dim5(x): + return expand_to_dims(x, 5) + + +def dim4(x): + return expand_to_dims(x, 4) + + +def dim3(x): + return expand_to_dims(x, 3) + + +def crop_or_pad_yield_mask(x, length): + B, F, C = x.shape + device = x.device + dtype = x.dtype + + if F < length: + y = torch.zeros((B, length, C), dtype=dtype, device=device) + mask = torch.zeros((B, length), dtype=torch.bool, device=device) + y[:, :F, :] = x + mask[:, :F] = True + return y, mask + + return x[:, :length, :], torch.ones((B, length), dtype=torch.bool, device=device) + + +def extend_dim(x, dim, minimal_length, zero_pad=False): + original_length = int(x.shape[dim]) + + if original_length >= minimal_length: + return x + + if zero_pad: + padding_shape = list(x.shape) + padding_shape[dim] = minimal_length - original_length + padding = torch.zeros(padding_shape, dtype=x.dtype, device=x.device) + else: + idx = (slice(None),) * dim + (slice(-1, None),) + (slice(None),) * (len(x.shape) - dim - 1) + last_element = x[idx] + padding = last_element.repeat_interleave(minimal_length - original_length, dim=dim) + + return torch.cat([x, padding], dim=dim) + + +def lazy_positional_encoding(t, repeats=None): + if not isinstance(t, list): + t = [t] + + from diffusers.models.embeddings import get_timestep_embedding + + te = torch.tensor(t) + te = get_timestep_embedding(timesteps=te, embedding_dim=256, flip_sin_to_cos=True, downscale_freq_shift=0.0, scale=1.0) + + if repeats is None: + return te + + te = te[:, None, :].expand(-1, repeats, -1) + + return te + + +def state_dict_offset_merge(A, B, C=None): + result = {} + keys = A.keys() + + for key in keys: + A_value = A[key] + B_value = B[key].to(A_value) + + if C is None: + result[key] = A_value + B_value + else: + C_value = C[key].to(A_value) + result[key] = A_value + B_value - C_value + + return result + + +def state_dict_weighted_merge(state_dicts, weights): + if len(state_dicts) != len(weights): + raise ValueError("Number of state dictionaries must match number of weights") + + if not state_dicts: + return {} + + total_weight = sum(weights) + + if total_weight == 0: + raise ValueError("Sum of weights cannot be zero") + + normalized_weights = [w / total_weight for w in weights] + + keys = state_dicts[0].keys() + result = {} + + for key in keys: + result[key] = state_dicts[0][key] * normalized_weights[0] + + for i in range(1, len(state_dicts)): + state_dict_value = state_dicts[i][key].to(result[key]) + result[key] += state_dict_value * normalized_weights[i] + + return result + + +def group_files_by_folder(all_files): + grouped_files = {} + + for file in all_files: + folder_name = os.path.basename(os.path.dirname(file)) + if folder_name not in grouped_files: + grouped_files[folder_name] = [] + grouped_files[folder_name].append(file) + + list_of_lists = list(grouped_files.values()) + return list_of_lists + + +def generate_timestamp(): + now = datetime.datetime.now() + timestamp = now.strftime("%y%m%d_%H%M%S") + milliseconds = f"{int(now.microsecond / 1000):03d}" + random_number = random.randint(0, 9999) + return f"{timestamp}_{milliseconds}_{random_number}" + + +def write_PIL_image_with_png_info(image, metadata, path): + from PIL.PngImagePlugin import PngInfo + + png_info = PngInfo() + for key, value in metadata.items(): + png_info.add_text(key, value) + + image.save(path, "PNG", pnginfo=png_info) + return image + + +def torch_safe_save(content, path): + torch.save(content, path + "_tmp") + os.replace(path + "_tmp", path) + return path + + +def move_optimizer_to_device(optimizer, device): + for state in optimizer.state.values(): + for k, v in state.items(): + if isinstance(v, torch.Tensor): + state[k] = v.to(device) diff --git a/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/frame_pack/wrapper.py b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/frame_pack/wrapper.py new file mode 100644 index 0000000000000000000000000000000000000000..cc420da4db1134deca30648077923021b35f82d1 --- /dev/null +++ b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/frame_pack/wrapper.py @@ -0,0 +1,51 @@ +import torch + + +def append_dims(x, target_dims): + return x[(...,) + (None,) * (target_dims - x.ndim)] + + +def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=1.0): + if guidance_rescale == 0: + return noise_cfg + + std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True) + std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True) + noise_pred_rescaled = noise_cfg * (std_text / std_cfg) + noise_cfg = guidance_rescale * noise_pred_rescaled + (1.0 - guidance_rescale) * noise_cfg + return noise_cfg + + +def fm_wrapper(transformer, t_scale=1000.0): + def k_model(x, sigma, **extra_args): + dtype = extra_args['dtype'] + cfg_scale = extra_args['cfg_scale'] + cfg_rescale = extra_args['cfg_rescale'] + concat_latent = extra_args['concat_latent'] + + original_dtype = x.dtype + sigma = sigma.float() + + x = x.to(dtype) + timestep = (sigma * t_scale).to(dtype) + + if concat_latent is None: + hidden_states = x + else: + hidden_states = torch.cat([x, concat_latent.to(x)], dim=1) + + pred_positive = transformer(hidden_states=hidden_states, timestep=timestep, return_dict=False, **extra_args['positive'])[0].float() + + if cfg_scale == 1.0: + pred_negative = torch.zeros_like(pred_positive) + else: + pred_negative = transformer(hidden_states=hidden_states, timestep=timestep, return_dict=False, **extra_args['negative'])[0].float() + + pred_cfg = pred_negative + cfg_scale * (pred_positive - pred_negative) + pred = rescale_noise_cfg(pred_cfg, pred_positive, guidance_rescale=cfg_rescale) + + x0 = x.float() - pred.float() * append_dims(sigma, x.ndim) + + return x0.to(dtype=original_dtype) + + return k_model diff --git a/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/hunyuan_model/__init__.py b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/hunyuan_model/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/hunyuan_model/__pycache__/__init__.cpython-312.pyc b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/hunyuan_model/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9176fb1e1f325451c1ffaa1f42ee58707b10be02 Binary files /dev/null and b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/hunyuan_model/__pycache__/__init__.cpython-312.pyc differ diff --git a/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/hunyuan_model/__pycache__/activation_layers.cpython-312.pyc b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/hunyuan_model/__pycache__/activation_layers.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..473f394a7afa018bf06e6b7f06362186068f4e90 Binary files /dev/null and b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/hunyuan_model/__pycache__/activation_layers.cpython-312.pyc differ diff --git a/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/hunyuan_model/__pycache__/attention.cpython-312.pyc b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/hunyuan_model/__pycache__/attention.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e0d3be131dc1e7e6d84ca0e3dad059fbf51b4b00 Binary files /dev/null and b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/hunyuan_model/__pycache__/attention.cpython-312.pyc differ diff --git a/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/hunyuan_model/__pycache__/autoencoder_kl_causal_3d.cpython-312.pyc b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/hunyuan_model/__pycache__/autoencoder_kl_causal_3d.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e8275eacf46979537287dadb9e517d48c51cafad Binary files /dev/null and b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/hunyuan_model/__pycache__/autoencoder_kl_causal_3d.cpython-312.pyc differ diff --git a/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/hunyuan_model/__pycache__/embed_layers.cpython-312.pyc b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/hunyuan_model/__pycache__/embed_layers.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7efbcf1885818ba6d56b18d800cd232d7d3ec794 Binary files /dev/null and b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/hunyuan_model/__pycache__/embed_layers.cpython-312.pyc differ diff --git a/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/hunyuan_model/__pycache__/fp8_optimization.cpython-312.pyc b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/hunyuan_model/__pycache__/fp8_optimization.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d433149282530597f0aab74441e8c52cd9c16c11 Binary files /dev/null and b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/hunyuan_model/__pycache__/fp8_optimization.cpython-312.pyc differ diff --git a/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/hunyuan_model/__pycache__/helpers.cpython-312.pyc b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/hunyuan_model/__pycache__/helpers.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8225807c3f4d9b4026d0a636beb74b731a2ba775 Binary files /dev/null and b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/hunyuan_model/__pycache__/helpers.cpython-312.pyc differ diff --git a/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/hunyuan_model/__pycache__/mlp_layers.cpython-312.pyc b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/hunyuan_model/__pycache__/mlp_layers.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..33024e3069580223ea453860a0865039270ce72d Binary files /dev/null and b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/hunyuan_model/__pycache__/mlp_layers.cpython-312.pyc differ diff --git a/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/hunyuan_model/__pycache__/models.cpython-312.pyc b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/hunyuan_model/__pycache__/models.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0b0751cec1b4188198aa30308a1f4dfc51e84ec7 Binary files /dev/null and b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/hunyuan_model/__pycache__/models.cpython-312.pyc differ diff --git a/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/hunyuan_model/__pycache__/modulate_layers.cpython-312.pyc b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/hunyuan_model/__pycache__/modulate_layers.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e2951f7a02a861e21035a68f9892ffa6352461fe Binary files /dev/null and b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/hunyuan_model/__pycache__/modulate_layers.cpython-312.pyc differ diff --git a/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/hunyuan_model/__pycache__/norm_layers.cpython-312.pyc b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/hunyuan_model/__pycache__/norm_layers.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0bfad5c3a031a756eb1acc9f5bfe1ea4a4aad889 Binary files /dev/null and b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/hunyuan_model/__pycache__/norm_layers.cpython-312.pyc differ diff --git a/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/hunyuan_model/__pycache__/posemb_layers.cpython-312.pyc b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/hunyuan_model/__pycache__/posemb_layers.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..91f78edc0b69cfe612bebcac56fcd20f77f742c6 Binary files /dev/null and b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/hunyuan_model/__pycache__/posemb_layers.cpython-312.pyc differ diff --git a/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/hunyuan_model/__pycache__/text_encoder.cpython-312.pyc b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/hunyuan_model/__pycache__/text_encoder.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d5d97450af502bc62d9c9459bd89d64e8ba8f754 Binary files /dev/null and b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/hunyuan_model/__pycache__/text_encoder.cpython-312.pyc differ diff --git a/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/hunyuan_model/__pycache__/token_refiner.cpython-312.pyc b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/hunyuan_model/__pycache__/token_refiner.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7d4c2965c4e898b9770450ec4230debac3df72eb Binary files /dev/null and b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/hunyuan_model/__pycache__/token_refiner.cpython-312.pyc differ diff --git a/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/hunyuan_model/__pycache__/vae.cpython-312.pyc b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/hunyuan_model/__pycache__/vae.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b63fc55372220beb4040e528617df3768e935306 Binary files /dev/null and b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/hunyuan_model/__pycache__/vae.cpython-312.pyc differ diff --git a/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/hunyuan_model/activation_layers.py b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/hunyuan_model/activation_layers.py new file mode 100644 index 0000000000000000000000000000000000000000..f8774c26ceef6081482ca0dbbf930b207d4ac03b --- /dev/null +++ b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/hunyuan_model/activation_layers.py @@ -0,0 +1,23 @@ +import torch.nn as nn + + +def get_activation_layer(act_type): + """get activation layer + + Args: + act_type (str): the activation type + + Returns: + torch.nn.functional: the activation layer + """ + if act_type == "gelu": + return lambda: nn.GELU() + elif act_type == "gelu_tanh": + # Approximate `tanh` requires torch >= 1.13 + return lambda: nn.GELU(approximate="tanh") + elif act_type == "relu": + return nn.ReLU + elif act_type == "silu": + return nn.SiLU + else: + raise ValueError(f"Unknown activation type: {act_type}") diff --git a/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/hunyuan_model/attention.py b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/hunyuan_model/attention.py new file mode 100644 index 0000000000000000000000000000000000000000..e94253df0aceb11e4f5812b728df75b9d38bf8c2 --- /dev/null +++ b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/hunyuan_model/attention.py @@ -0,0 +1,295 @@ +import importlib.metadata +import math + +import torch +import torch.nn as nn +import torch.nn.functional as F + +try: + import flash_attn + from flash_attn.flash_attn_interface import _flash_attn_forward + from flash_attn.flash_attn_interface import flash_attn_varlen_func + from flash_attn.flash_attn_interface import flash_attn_func +except ImportError: + flash_attn = None + flash_attn_varlen_func = None + _flash_attn_forward = None + flash_attn_func = None + +try: + print(f"Trying to import sageattention") + from sageattention import sageattn_varlen, sageattn + + print("Successfully imported sageattention") +except ImportError: + print(f"Failed to import sageattention") + sageattn_varlen = None + sageattn = None + +try: + import xformers.ops as xops +except ImportError: + xops = None + +MEMORY_LAYOUT = { + "flash": ( + lambda x: x.view(x.shape[0] * x.shape[1], *x.shape[2:]), + lambda x: x, + ), + "flash_fixlen": ( + lambda x: x, + lambda x: x, + ), + "sageattn": ( + lambda x: x.view(x.shape[0] * x.shape[1], *x.shape[2:]), + lambda x: x, + ), + "sageattn_fixlen": ( + lambda x: x.transpose(1, 2), + lambda x: x.transpose(1, 2), + ), + "torch": ( + lambda x: x.transpose(1, 2), + lambda x: x.transpose(1, 2), + ), + "xformers": ( + lambda x: x, + lambda x: x, + ), + "vanilla": ( + lambda x: x.transpose(1, 2), + lambda x: x.transpose(1, 2), + ), +} + + +def get_cu_seqlens(text_mask, img_len): + """Calculate cu_seqlens_q, cu_seqlens_kv using text_mask and img_len + + Args: + text_mask (torch.Tensor): the mask of text + img_len (int): the length of image + + Returns: + torch.Tensor: the calculated cu_seqlens for flash attention + """ + batch_size = text_mask.shape[0] + text_len = text_mask.sum(dim=1) + max_len = text_mask.shape[1] + img_len + + cu_seqlens = torch.zeros([2 * batch_size + 1], dtype=torch.int32, device="cuda") + + for i in range(batch_size): + s = text_len[i] + img_len + s1 = i * max_len + s + s2 = (i + 1) * max_len + cu_seqlens[2 * i + 1] = s1 + cu_seqlens[2 * i + 2] = s2 + + return cu_seqlens + + +def attention( + q_or_qkv_list, + k=None, + v=None, + mode="flash", + drop_rate=0, + attn_mask=None, + total_len=None, + causal=False, + cu_seqlens_q=None, + cu_seqlens_kv=None, + max_seqlen_q=None, + max_seqlen_kv=None, + batch_size=1, +): + """ + Perform QKV self attention. + + Args: + q (torch.Tensor): Query tensor with shape [b, s, a, d], where a is the number of heads. + k (torch.Tensor): Key tensor with shape [b, s1, a, d] + v (torch.Tensor): Value tensor with shape [b, s1, a, d] + mode (str): Attention mode. Choose from 'self_flash', 'cross_flash', 'torch', and 'vanilla'. + drop_rate (float): Dropout rate in attention map. (default: 0) + attn_mask (torch.Tensor): Attention mask with shape [b, s1] (cross_attn), or [b, a, s, s1] (torch or vanilla). + (default: None) + causal (bool): Whether to use causal attention. (default: False) + cu_seqlens_q (torch.Tensor): dtype torch.int32. The cumulative sequence lengths of the sequences in the batch, + used to index into q. + cu_seqlens_kv (torch.Tensor): dtype torch.int32. The cumulative sequence lengths of the sequences in the batch, + used to index into kv. + max_seqlen_q (int): The maximum sequence length in the batch of q. + max_seqlen_kv (int): The maximum sequence length in the batch of k and v. + + Returns: + torch.Tensor: Output tensor after self attention with shape [b, s, ad] + """ + q, k, v = q_or_qkv_list if type(q_or_qkv_list) == list else (q_or_qkv_list, k, v) + if type(q_or_qkv_list) == list: + q_or_qkv_list.clear() + split_attn = total_len is not None + if split_attn and mode == "sageattn": + mode = "sageattn_fixlen" + elif split_attn and mode == "flash": + mode = "flash_fixlen" + # print(f"Attention mode: {mode}, split_attn: {split_attn}") + pre_attn_layout, post_attn_layout = MEMORY_LAYOUT[mode] + + # trim the sequence length to the actual length instead of attn_mask + if split_attn: + trimmed_len = q.shape[1] - total_len + q = [q[i : i + 1, : total_len[i]] for i in range(len(q))] + k = [k[i : i + 1, : total_len[i]] for i in range(len(k))] + v = [v[i : i + 1, : total_len[i]] for i in range(len(v))] + q = [pre_attn_layout(q_i) for q_i in q] + k = [pre_attn_layout(k_i) for k_i in k] + v = [pre_attn_layout(v_i) for v_i in v] + # print( + # f"Trimming the sequence length to {total_len},trimmed_len: {trimmed_len}, q.shape: {[q_i.shape for q_i in q]}, mode: {mode}" + # ) + else: + q = pre_attn_layout(q) + k = pre_attn_layout(k) + v = pre_attn_layout(v) + + if mode == "torch": + if split_attn: + x = [] + for i in range(len(q)): + x_i = F.scaled_dot_product_attention(q[i], k[i], v[i], dropout_p=drop_rate, is_causal=causal) + q[i], k[i], v[i] = None, None, None + x.append(x_i) + del q, k, v + else: + if attn_mask is not None and attn_mask.dtype != torch.bool: + attn_mask = attn_mask.to(q.dtype) + x = F.scaled_dot_product_attention(q, k, v, attn_mask=attn_mask, dropout_p=drop_rate, is_causal=causal) + del q, k, v + del attn_mask + + elif mode == "xformers": + # B, M, H, K: M is the sequence length, H is the number of heads, K is the dimension of the heads -> it is same as input dimension + # currently only support batch_size = 1 + assert split_attn, "Xformers only supports splitting" + x = [] + for i in range(len(q)): + x_i = xops.memory_efficient_attention(q[i], k[i], v[i], p=drop_rate) # , causal=causal) + q[i], k[i], v[i] = None, None, None + x.append(x_i) + del q, k, v + + elif mode == "flash": + x = flash_attn_varlen_func(q, k, v, cu_seqlens_q, cu_seqlens_kv, max_seqlen_q, max_seqlen_kv) + del q, k, v + # x with shape [(bxs), a, d] + x = x.view(batch_size, max_seqlen_q, x.shape[-2], x.shape[-1]) # reshape x to [b, s, a, d] + elif mode == "flash_fixlen": + x = [] + for i in range(len(q)): + # q: (batch_size, seqlen, nheads, headdim), k: (batch_size, seqlen, nheads_k, headdim), v: (batch_size, seqlen, nheads_k, headdim) + x_i = flash_attn_func(q[i], k[i], v[i], dropout_p=drop_rate, causal=causal) + q[i], k[i], v[i] = None, None, None + x.append(x_i) + del q, k, v + elif mode == "sageattn": + x = sageattn_varlen(q, k, v, cu_seqlens_q, cu_seqlens_kv, max_seqlen_q, max_seqlen_kv) + del q, k, v + # x with shape [(bxs), a, d] + x = x.view(batch_size, max_seqlen_q, x.shape[-2], x.shape[-1]) # reshape x to [b, s, a, d] + elif mode == "sageattn_fixlen": + x = [] + for i in range(len(q)): + # HND seems to cause an error + x_i = sageattn(q[i], k[i], v[i]) # (batch_size, seq_len, head_num, head_dim) + q[i], k[i], v[i] = None, None, None + x.append(x_i) + del q, k, v + elif mode == "vanilla": + assert not split_attn, "Vanilla attention does not support trimming" + scale_factor = 1 / math.sqrt(q.size(-1)) + + b, a, s, _ = q.shape + s1 = k.size(2) + attn_bias = torch.zeros(b, a, s, s1, dtype=q.dtype, device=q.device) + if causal: + # Only applied to self attention + assert attn_mask is None, "Causal mask and attn_mask cannot be used together" + temp_mask = torch.ones(b, a, s, s, dtype=torch.bool, device=q.device).tril(diagonal=0) + attn_bias.masked_fill_(temp_mask.logical_not(), float("-inf")) + attn_bias.to(q.dtype) + + if attn_mask is not None: + if attn_mask.dtype == torch.bool: + attn_bias.masked_fill_(attn_mask.logical_not(), float("-inf")) + else: + attn_bias += attn_mask + + # TODO: Maybe force q and k to be float32 to avoid numerical overflow + attn = (q @ k.transpose(-2, -1)) * scale_factor + attn += attn_bias + attn = attn.softmax(dim=-1) + attn = torch.dropout(attn, p=drop_rate, train=True) + x = attn @ v + else: + raise NotImplementedError(f"Unsupported attention mode: {mode}") + + if split_attn: + x = [post_attn_layout(x_i) for x_i in x] + for i in range(len(x)): + x[i] = F.pad(x[i], (0, 0, 0, 0, 0, trimmed_len[i])) + x = torch.cat(x, dim=0) + else: + x = post_attn_layout(x) + + b, s, a, d = x.shape + out = x.reshape(b, s, -1) + return out + + +def parallel_attention(hybrid_seq_parallel_attn, q, k, v, img_q_len, img_kv_len, cu_seqlens_q, cu_seqlens_kv): + attn1 = hybrid_seq_parallel_attn( + None, + q[:, :img_q_len, :, :], + k[:, :img_kv_len, :, :], + v[:, :img_kv_len, :, :], + dropout_p=0.0, + causal=False, + joint_tensor_query=q[:, img_q_len : cu_seqlens_q[1]], + joint_tensor_key=k[:, img_kv_len : cu_seqlens_kv[1]], + joint_tensor_value=v[:, img_kv_len : cu_seqlens_kv[1]], + joint_strategy="rear", + ) + if flash_attn.__version__ >= "2.7.0": + attn2, *_ = _flash_attn_forward( + q[:, cu_seqlens_q[1] :], + k[:, cu_seqlens_kv[1] :], + v[:, cu_seqlens_kv[1] :], + dropout_p=0.0, + softmax_scale=q.shape[-1] ** (-0.5), + causal=False, + window_size_left=-1, + window_size_right=-1, + softcap=0.0, + alibi_slopes=None, + return_softmax=False, + ) + else: + attn2, *_ = _flash_attn_forward( + q[:, cu_seqlens_q[1] :], + k[:, cu_seqlens_kv[1] :], + v[:, cu_seqlens_kv[1] :], + dropout_p=0.0, + softmax_scale=q.shape[-1] ** (-0.5), + causal=False, + window_size=(-1, -1), + softcap=0.0, + alibi_slopes=None, + return_softmax=False, + ) + attn = torch.cat([attn1, attn2], dim=1) + b, s, a, d = attn.shape + attn = attn.reshape(b, s, -1) + + return attn diff --git a/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/hunyuan_model/autoencoder_kl_causal_3d.py b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/hunyuan_model/autoencoder_kl_causal_3d.py new file mode 100644 index 0000000000000000000000000000000000000000..032365e3668dc168760fc347331c498f5e0bb178 --- /dev/null +++ b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/hunyuan_model/autoencoder_kl_causal_3d.py @@ -0,0 +1,609 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +# +# Modified from diffusers==0.29.2 +# +# ============================================================================== +from typing import Dict, Optional, Tuple, Union +from dataclasses import dataclass + +import torch +import torch.nn as nn + +from diffusers.configuration_utils import ConfigMixin, register_to_config + +# try: +# # This diffusers is modified and packed in the mirror. +# from diffusers.loaders import FromOriginalVAEMixin +# except ImportError: +# # Use this to be compatible with the original diffusers. +# from diffusers.loaders.single_file_model import FromOriginalModelMixin as FromOriginalVAEMixin +from diffusers.utils.accelerate_utils import apply_forward_hook +from diffusers.models.attention_processor import ( + ADDED_KV_ATTENTION_PROCESSORS, + CROSS_ATTENTION_PROCESSORS, + Attention, + AttentionProcessor, + AttnAddedKVProcessor, + AttnProcessor, +) +from diffusers.models.modeling_outputs import AutoencoderKLOutput +from diffusers.models.modeling_utils import ModelMixin +from musubi_tuner.hunyuan_model.vae import DecoderCausal3D, BaseOutput, DecoderOutput, DiagonalGaussianDistribution, EncoderCausal3D + + +@dataclass +class DecoderOutput2(BaseOutput): + sample: torch.FloatTensor + posterior: Optional[DiagonalGaussianDistribution] = None + + +class AutoencoderKLCausal3D(ModelMixin, ConfigMixin): + r""" + A VAE model with KL loss for encoding images/videos into latents and decoding latent representations into images/videos. + + This model inherits from [`ModelMixin`]. Check the superclass documentation for it's generic methods implemented + for all models (such as downloading or saving). + """ + + _supports_gradient_checkpointing = True + + @register_to_config + def __init__( + self, + in_channels: int = 3, + out_channels: int = 3, + down_block_types: Tuple[str] = ("DownEncoderBlockCausal3D",), + up_block_types: Tuple[str] = ("UpDecoderBlockCausal3D",), + block_out_channels: Tuple[int] = (64,), + layers_per_block: int = 1, + act_fn: str = "silu", + latent_channels: int = 4, + norm_num_groups: int = 32, + sample_size: int = 32, + sample_tsize: int = 64, + scaling_factor: float = 0.18215, + force_upcast: float = True, + spatial_compression_ratio: int = 8, + time_compression_ratio: int = 4, + mid_block_add_attention: bool = True, + ): + super().__init__() + + self.time_compression_ratio = time_compression_ratio + + self.encoder = EncoderCausal3D( + in_channels=in_channels, + out_channels=latent_channels, + down_block_types=down_block_types, + block_out_channels=block_out_channels, + layers_per_block=layers_per_block, + act_fn=act_fn, + norm_num_groups=norm_num_groups, + double_z=True, + time_compression_ratio=time_compression_ratio, + spatial_compression_ratio=spatial_compression_ratio, + mid_block_add_attention=mid_block_add_attention, + ) + + self.decoder = DecoderCausal3D( + in_channels=latent_channels, + out_channels=out_channels, + up_block_types=up_block_types, + block_out_channels=block_out_channels, + layers_per_block=layers_per_block, + norm_num_groups=norm_num_groups, + act_fn=act_fn, + time_compression_ratio=time_compression_ratio, + spatial_compression_ratio=spatial_compression_ratio, + mid_block_add_attention=mid_block_add_attention, + ) + + self.quant_conv = nn.Conv3d(2 * latent_channels, 2 * latent_channels, kernel_size=1) + self.post_quant_conv = nn.Conv3d(latent_channels, latent_channels, kernel_size=1) + + self.use_slicing = False + self.use_spatial_tiling = False + self.use_temporal_tiling = False + + # only relevant if vae tiling is enabled + self.tile_sample_min_tsize = sample_tsize + self.tile_latent_min_tsize = sample_tsize // time_compression_ratio + + self.tile_sample_min_size = self.config.sample_size + sample_size = self.config.sample_size[0] if isinstance(self.config.sample_size, (list, tuple)) else self.config.sample_size + self.tile_latent_min_size = int(sample_size / (2 ** (len(self.config.block_out_channels) - 1))) + self.tile_overlap_factor = 0.25 + + def _set_gradient_checkpointing(self, module, value=False): + if isinstance(module, (EncoderCausal3D, DecoderCausal3D)): + module.gradient_checkpointing = value + + def enable_temporal_tiling(self, use_tiling: bool = True): + self.use_temporal_tiling = use_tiling + + def disable_temporal_tiling(self): + self.enable_temporal_tiling(False) + + def enable_spatial_tiling(self, use_tiling: bool = True): + self.use_spatial_tiling = use_tiling + + def disable_spatial_tiling(self): + self.enable_spatial_tiling(False) + + def enable_tiling(self, use_tiling: bool = True): + r""" + Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to + compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow + processing larger videos. + """ + self.enable_spatial_tiling(use_tiling) + self.enable_temporal_tiling(use_tiling) + + def disable_tiling(self): + r""" + Disable tiled VAE decoding. If `enable_tiling` was previously enabled, this method will go back to computing + decoding in one step. + """ + self.disable_spatial_tiling() + self.disable_temporal_tiling() + + def enable_slicing(self): + r""" + Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to + compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. + """ + self.use_slicing = True + + def disable_slicing(self): + r""" + Disable sliced VAE decoding. If `enable_slicing` was previously enabled, this method will go back to computing + decoding in one step. + """ + self.use_slicing = False + + def set_chunk_size_for_causal_conv_3d(self, chunk_size: int): + # set chunk_size to CausalConv3d recursively + def set_chunk_size(module): + if hasattr(module, "chunk_size"): + module.chunk_size = chunk_size + + self.apply(set_chunk_size) + + @property + # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors + def attn_processors(self) -> Dict[str, AttentionProcessor]: + r""" + Returns: + `dict` of attention processors: A dictionary containing all attention processors used in the model with + indexed by its weight name. + """ + # set recursively + processors = {} + + def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]): + if hasattr(module, "get_processor"): + processors[f"{name}.processor"] = module.get_processor(return_deprecated_lora=True) + + for sub_name, child in module.named_children(): + fn_recursive_add_processors(f"{name}.{sub_name}", child, processors) + + return processors + + for name, module in self.named_children(): + fn_recursive_add_processors(name, module, processors) + + return processors + + # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.set_attn_processor + def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]], _remove_lora=False): + r""" + Sets the attention processor to use to compute attention. + + Parameters: + processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`): + The instantiated processor class or a dictionary of processor classes that will be set as the processor + for **all** `Attention` layers. + + If `processor` is a dict, the key needs to define the path to the corresponding cross attention + processor. This is strongly recommended when setting trainable attention processors. + + """ + count = len(self.attn_processors.keys()) + + if isinstance(processor, dict) and len(processor) != count: + raise ValueError( + f"A dict of processors was passed, but the number of processors {len(processor)} does not match the" + f" number of attention layers: {count}. Please make sure to pass {count} processor classes." + ) + + def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): + if hasattr(module, "set_processor"): + if not isinstance(processor, dict): + module.set_processor(processor, _remove_lora=_remove_lora) + else: + module.set_processor(processor.pop(f"{name}.processor"), _remove_lora=_remove_lora) + + for sub_name, child in module.named_children(): + fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor) + + for name, module in self.named_children(): + fn_recursive_attn_processor(name, module, processor) + + # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.set_default_attn_processor + def set_default_attn_processor(self): + """ + Disables custom attention processors and sets the default attention implementation. + """ + if all(proc.__class__ in ADDED_KV_ATTENTION_PROCESSORS for proc in self.attn_processors.values()): + processor = AttnAddedKVProcessor() + elif all(proc.__class__ in CROSS_ATTENTION_PROCESSORS for proc in self.attn_processors.values()): + processor = AttnProcessor() + else: + raise ValueError( + f"Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}" + ) + + self.set_attn_processor(processor, _remove_lora=True) + + @apply_forward_hook + def encode( + self, x: torch.FloatTensor, return_dict: bool = True + ) -> Union[AutoencoderKLOutput, Tuple[DiagonalGaussianDistribution]]: + """ + Encode a batch of images/videos into latents. + + Args: + x (`torch.FloatTensor`): Input batch of images/videos. + return_dict (`bool`, *optional*, defaults to `True`): + Whether to return a [`~models.autoencoder_kl.AutoencoderKLOutput`] instead of a plain tuple. + + Returns: + The latent representations of the encoded images/videos. If `return_dict` is True, a + [`~models.autoencoder_kl.AutoencoderKLOutput`] is returned, otherwise a plain `tuple` is returned. + """ + assert len(x.shape) == 5, "The input tensor should have 5 dimensions." + + if self.use_temporal_tiling and x.shape[2] > self.tile_sample_min_tsize: + return self.temporal_tiled_encode(x, return_dict=return_dict) + + if self.use_spatial_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size): + return self.spatial_tiled_encode(x, return_dict=return_dict) + + if self.use_slicing and x.shape[0] > 1: + encoded_slices = [self.encoder(x_slice) for x_slice in x.split(1)] + h = torch.cat(encoded_slices) + else: + h = self.encoder(x) + + moments = self.quant_conv(h) + posterior = DiagonalGaussianDistribution(moments) + + if not return_dict: + return (posterior,) + + return AutoencoderKLOutput(latent_dist=posterior) + + def _decode(self, z: torch.FloatTensor, return_dict: bool = True) -> Union[DecoderOutput, torch.FloatTensor]: + assert len(z.shape) == 5, "The input tensor should have 5 dimensions." + + if self.use_temporal_tiling and z.shape[2] > self.tile_latent_min_tsize: + return self.temporal_tiled_decode(z, return_dict=return_dict) + + if self.use_spatial_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size): + return self.spatial_tiled_decode(z, return_dict=return_dict) + + z = self.post_quant_conv(z) + dec = self.decoder(z) + + if not return_dict: + return (dec,) + + return DecoderOutput(sample=dec) + + @apply_forward_hook + def decode(self, z: torch.FloatTensor, return_dict: bool = True, generator=None) -> Union[DecoderOutput, torch.FloatTensor]: + """ + Decode a batch of images/videos. + + Args: + z (`torch.FloatTensor`): Input batch of latent vectors. + return_dict (`bool`, *optional*, defaults to `True`): + Whether to return a [`~models.vae.DecoderOutput`] instead of a plain tuple. + + Returns: + [`~models.vae.DecoderOutput`] or `tuple`: + If return_dict is True, a [`~models.vae.DecoderOutput`] is returned, otherwise a plain `tuple` is + returned. + + """ + if self.use_slicing and z.shape[0] > 1: + decoded_slices = [self._decode(z_slice).sample for z_slice in z.split(1)] + decoded = torch.cat(decoded_slices) + else: + decoded = self._decode(z).sample + + if not return_dict: + return (decoded,) + + return DecoderOutput(sample=decoded) + + def blend_v(self, a: torch.Tensor, b: torch.Tensor, blend_extent: int) -> torch.Tensor: + blend_extent = min(a.shape[-2], b.shape[-2], blend_extent) + for y in range(blend_extent): + b[:, :, :, y, :] = a[:, :, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, :, y, :] * (y / blend_extent) + return b + + def blend_h(self, a: torch.Tensor, b: torch.Tensor, blend_extent: int) -> torch.Tensor: + blend_extent = min(a.shape[-1], b.shape[-1], blend_extent) + for x in range(blend_extent): + b[:, :, :, :, x] = a[:, :, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, :, x] * (x / blend_extent) + return b + + def blend_t(self, a: torch.Tensor, b: torch.Tensor, blend_extent: int) -> torch.Tensor: + blend_extent = min(a.shape[-3], b.shape[-3], blend_extent) + for x in range(blend_extent): + b[:, :, x, :, :] = a[:, :, -blend_extent + x, :, :] * (1 - x / blend_extent) + b[:, :, x, :, :] * (x / blend_extent) + return b + + def spatial_tiled_encode( + self, x: torch.FloatTensor, return_dict: bool = True, return_moments: bool = False + ) -> AutoencoderKLOutput: + r"""Encode a batch of images/videos using a tiled encoder. + + When this option is enabled, the VAE will split the input tensor into tiles to compute encoding in several + steps. This is useful to keep memory use constant regardless of image/videos size. The end result of tiled encoding is + different from non-tiled encoding because each tile uses a different encoder. To avoid tiling artifacts, the + tiles overlap and are blended together to form a smooth output. You may still see tile-sized changes in the + output, but they should be much less noticeable. + + Args: + x (`torch.FloatTensor`): Input batch of images/videos. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~models.autoencoder_kl.AutoencoderKLOutput`] instead of a plain tuple. + + Returns: + [`~models.autoencoder_kl.AutoencoderKLOutput`] or `tuple`: + If return_dict is True, a [`~models.autoencoder_kl.AutoencoderKLOutput`] is returned, otherwise a plain + `tuple` is returned. + """ + overlap_size = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor)) + blend_extent = int(self.tile_latent_min_size * self.tile_overlap_factor) + row_limit = self.tile_latent_min_size - blend_extent + + # Split video into tiles and encode them separately. + rows = [] + for i in range(0, x.shape[-2], overlap_size): + row = [] + for j in range(0, x.shape[-1], overlap_size): + tile = x[:, :, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size] + tile = self.encoder(tile) + tile = self.quant_conv(tile) + row.append(tile) + rows.append(row) + result_rows = [] + for i, row in enumerate(rows): + result_row = [] + for j, tile in enumerate(row): + # blend the above tile and the left tile + # to the current tile and add the current tile to the result row + if i > 0: + tile = self.blend_v(rows[i - 1][j], tile, blend_extent) + if j > 0: + tile = self.blend_h(row[j - 1], tile, blend_extent) + result_row.append(tile[:, :, :, :row_limit, :row_limit]) + result_rows.append(torch.cat(result_row, dim=-1)) + + moments = torch.cat(result_rows, dim=-2) + if return_moments: + return moments + + posterior = DiagonalGaussianDistribution(moments) + if not return_dict: + return (posterior,) + + return AutoencoderKLOutput(latent_dist=posterior) + + def spatial_tiled_decode(self, z: torch.FloatTensor, return_dict: bool = True) -> Union[DecoderOutput, torch.FloatTensor]: + r""" + Decode a batch of images/videos using a tiled decoder. + + Args: + z (`torch.FloatTensor`): Input batch of latent vectors. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~models.vae.DecoderOutput`] instead of a plain tuple. + + Returns: + [`~models.vae.DecoderOutput`] or `tuple`: + If return_dict is True, a [`~models.vae.DecoderOutput`] is returned, otherwise a plain `tuple` is + returned. + """ + overlap_size = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor)) + blend_extent = int(self.tile_sample_min_size * self.tile_overlap_factor) + row_limit = self.tile_sample_min_size - blend_extent + + # Split z into overlapping tiles and decode them separately. + # The tiles have an overlap to avoid seams between tiles. + rows = [] + for i in range(0, z.shape[-2], overlap_size): + row = [] + for j in range(0, z.shape[-1], overlap_size): + tile = z[:, :, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size] + tile = self.post_quant_conv(tile) + decoded = self.decoder(tile) + row.append(decoded) + rows.append(row) + result_rows = [] + for i, row in enumerate(rows): + result_row = [] + for j, tile in enumerate(row): + # blend the above tile and the left tile + # to the current tile and add the current tile to the result row + if i > 0: + tile = self.blend_v(rows[i - 1][j], tile, blend_extent) + if j > 0: + tile = self.blend_h(row[j - 1], tile, blend_extent) + result_row.append(tile[:, :, :, :row_limit, :row_limit]) + result_rows.append(torch.cat(result_row, dim=-1)) + + dec = torch.cat(result_rows, dim=-2) + if not return_dict: + return (dec,) + + return DecoderOutput(sample=dec) + + def temporal_tiled_encode(self, x: torch.FloatTensor, return_dict: bool = True) -> AutoencoderKLOutput: + + B, C, T, H, W = x.shape + overlap_size = int(self.tile_sample_min_tsize * (1 - self.tile_overlap_factor)) + blend_extent = int(self.tile_latent_min_tsize * self.tile_overlap_factor) + t_limit = self.tile_latent_min_tsize - blend_extent + + # Split the video into tiles and encode them separately. + row = [] + for i in range(0, T, overlap_size): + tile = x[:, :, i : i + self.tile_sample_min_tsize + 1, :, :] + if self.use_spatial_tiling and ( + tile.shape[-1] > self.tile_sample_min_size or tile.shape[-2] > self.tile_sample_min_size + ): + tile = self.spatial_tiled_encode(tile, return_moments=True) + else: + tile = self.encoder(tile) + tile = self.quant_conv(tile) + if i > 0: + tile = tile[:, :, 1:, :, :] + row.append(tile) + result_row = [] + for i, tile in enumerate(row): + if i > 0: + tile = self.blend_t(row[i - 1], tile, blend_extent) + result_row.append(tile[:, :, :t_limit, :, :]) + else: + result_row.append(tile[:, :, : t_limit + 1, :, :]) + + moments = torch.cat(result_row, dim=2) + posterior = DiagonalGaussianDistribution(moments) + + if not return_dict: + return (posterior,) + + return AutoencoderKLOutput(latent_dist=posterior) + + def temporal_tiled_decode(self, z: torch.FloatTensor, return_dict: bool = True) -> Union[DecoderOutput, torch.FloatTensor]: + # Split z into overlapping tiles and decode them separately. + + B, C, T, H, W = z.shape + overlap_size = int(self.tile_latent_min_tsize * (1 - self.tile_overlap_factor)) + blend_extent = int(self.tile_sample_min_tsize * self.tile_overlap_factor) + t_limit = self.tile_sample_min_tsize - blend_extent + + row = [] + for i in range(0, T, overlap_size): + tile = z[:, :, i : i + self.tile_latent_min_tsize + 1, :, :] + if self.use_spatial_tiling and ( + tile.shape[-1] > self.tile_latent_min_size or tile.shape[-2] > self.tile_latent_min_size + ): + decoded = self.spatial_tiled_decode(tile, return_dict=True).sample + else: + tile = self.post_quant_conv(tile) + decoded = self.decoder(tile) + if i > 0: + decoded = decoded[:, :, 1:, :, :] + row.append(decoded) + result_row = [] + for i, tile in enumerate(row): + if i > 0: + tile = self.blend_t(row[i - 1], tile, blend_extent) + result_row.append(tile[:, :, :t_limit, :, :]) + else: + result_row.append(tile[:, :, : t_limit + 1, :, :]) + + dec = torch.cat(result_row, dim=2) + if not return_dict: + return (dec,) + + return DecoderOutput(sample=dec) + + def forward( + self, + sample: torch.FloatTensor, + sample_posterior: bool = False, + return_dict: bool = True, + return_posterior: bool = False, + generator: Optional[torch.Generator] = None, + ) -> Union[DecoderOutput2, torch.FloatTensor]: + r""" + Args: + sample (`torch.FloatTensor`): Input sample. + sample_posterior (`bool`, *optional*, defaults to `False`): + Whether to sample from the posterior. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`DecoderOutput`] instead of a plain tuple. + """ + x = sample + posterior = self.encode(x).latent_dist + if sample_posterior: + z = posterior.sample(generator=generator) + else: + z = posterior.mode() + dec = self.decode(z).sample + + if not return_dict: + if return_posterior: + return (dec, posterior) + else: + return (dec,) + if return_posterior: + return DecoderOutput2(sample=dec, posterior=posterior) + else: + return DecoderOutput2(sample=dec) + + # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.fuse_qkv_projections + def fuse_qkv_projections(self): + """ + Enables fused QKV projections. For self-attention modules, all projection matrices (i.e., query, + key, value) are fused. For cross-attention modules, key and value projection matrices are fused. + + + + This API is 🧪 experimental. + + + """ + self.original_attn_processors = None + + for _, attn_processor in self.attn_processors.items(): + if "Added" in str(attn_processor.__class__.__name__): + raise ValueError("`fuse_qkv_projections()` is not supported for models having added KV projections.") + + self.original_attn_processors = self.attn_processors + + for module in self.modules(): + if isinstance(module, Attention): + module.fuse_projections(fuse=True) + + # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.unfuse_qkv_projections + def unfuse_qkv_projections(self): + """Disables the fused QKV projection if enabled. + + + + This API is 🧪 experimental. + + + + """ + if self.original_attn_processors is not None: + self.set_attn_processor(self.original_attn_processors) diff --git a/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/hunyuan_model/embed_layers.py b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/hunyuan_model/embed_layers.py new file mode 100644 index 0000000000000000000000000000000000000000..0d6fa642c4a99c7abbbc4f91ffc13bb183aa7f2c --- /dev/null +++ b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/hunyuan_model/embed_layers.py @@ -0,0 +1,132 @@ +import collections +import math +import torch +import torch.nn as nn +from einops import rearrange, repeat + +from musubi_tuner.hunyuan_model.helpers import to_2tuple + +class PatchEmbed(nn.Module): + """2D Image to Patch Embedding + + Image to Patch Embedding using Conv2d + + A convolution based approach to patchifying a 2D image w/ embedding projection. + + Based on the impl in https://github.com/google-research/vision_transformer + + Hacked together by / Copyright 2020 Ross Wightman + + Remove the _assert function in forward function to be compatible with multi-resolution images. + """ + + def __init__( + self, + patch_size=16, + in_chans=3, + embed_dim=768, + norm_layer=None, + flatten=True, + bias=True, + dtype=None, + device=None, + ): + factory_kwargs = {"dtype": dtype, "device": device} + super().__init__() + patch_size = to_2tuple(patch_size) + self.patch_size = patch_size + self.flatten = flatten + + self.proj = nn.Conv3d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size, bias=bias, **factory_kwargs) + nn.init.xavier_uniform_(self.proj.weight.view(self.proj.weight.size(0), -1)) + if bias: + nn.init.zeros_(self.proj.bias) + + self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity() + + def forward(self, x): + x = self.proj(x) + if self.flatten: + x = x.flatten(2).transpose(1, 2) # BCHW -> BNC + x = self.norm(x) + return x + + +class TextProjection(nn.Module): + """ + Projects text embeddings. Also handles dropout for classifier-free guidance. + + Adapted from https://github.com/PixArt-alpha/PixArt-alpha/blob/master/diffusion/model/nets/PixArt_blocks.py + """ + + def __init__(self, in_channels, hidden_size, act_layer, dtype=None, device=None): + factory_kwargs = {"dtype": dtype, "device": device} + super().__init__() + self.linear_1 = nn.Linear(in_features=in_channels, out_features=hidden_size, bias=True, **factory_kwargs) + self.act_1 = act_layer() + self.linear_2 = nn.Linear(in_features=hidden_size, out_features=hidden_size, bias=True, **factory_kwargs) + + def forward(self, caption): + hidden_states = self.linear_1(caption) + hidden_states = self.act_1(hidden_states) + hidden_states = self.linear_2(hidden_states) + return hidden_states + + +def timestep_embedding(t, dim, max_period=10000): + """ + Create sinusoidal timestep embeddings. + + Args: + t (torch.Tensor): a 1-D Tensor of N indices, one per batch element. These may be fractional. + dim (int): the dimension of the output. + max_period (int): controls the minimum frequency of the embeddings. + + Returns: + embedding (torch.Tensor): An (N, D) Tensor of positional embeddings. + + .. ref_link: https://github.com/openai/glide-text2im/blob/main/glide_text2im/nn.py + """ + half = dim // 2 + freqs = torch.exp(-math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half).to(device=t.device) + args = t[:, None].float() * freqs[None] + embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1) + if dim % 2: + embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1) + return embedding + + +class TimestepEmbedder(nn.Module): + """ + Embeds scalar timesteps into vector representations. + """ + + def __init__( + self, + hidden_size, + act_layer, + frequency_embedding_size=256, + max_period=10000, + out_size=None, + dtype=None, + device=None, + ): + factory_kwargs = {"dtype": dtype, "device": device} + super().__init__() + self.frequency_embedding_size = frequency_embedding_size + self.max_period = max_period + if out_size is None: + out_size = hidden_size + + self.mlp = nn.Sequential( + nn.Linear(frequency_embedding_size, hidden_size, bias=True, **factory_kwargs), + act_layer(), + nn.Linear(hidden_size, out_size, bias=True, **factory_kwargs), + ) + nn.init.normal_(self.mlp[0].weight, std=0.02) + nn.init.normal_(self.mlp[2].weight, std=0.02) + + def forward(self, t): + t_freq = timestep_embedding(t, self.frequency_embedding_size, self.max_period).type(self.mlp[0].weight.dtype) + t_emb = self.mlp(t_freq) + return t_emb diff --git a/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/hunyuan_model/fp8_optimization.py b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/hunyuan_model/fp8_optimization.py new file mode 100644 index 0000000000000000000000000000000000000000..90b978baca8cd9a3401b8b66a6575c0c3c29c991 --- /dev/null +++ b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/hunyuan_model/fp8_optimization.py @@ -0,0 +1,39 @@ +#based on ComfyUI's and MinusZoneAI's fp8_linear optimization +#further borrowed from HunyuanVideoWrapper for Musubi Tuner +import torch +import torch.nn as nn + +def fp8_linear_forward(cls, original_dtype, input): + weight_dtype = cls.weight.dtype + if weight_dtype in [torch.float8_e4m3fn, torch.float8_e5m2]: + if len(input.shape) == 3: + target_dtype = torch.float8_e5m2 if weight_dtype == torch.float8_e4m3fn else torch.float8_e4m3fn + inn = input.reshape(-1, input.shape[2]).to(target_dtype) + w = cls.weight.t() + + scale = torch.ones((1), device=input.device, dtype=torch.float32) + bias = cls.bias.to(original_dtype) if cls.bias is not None else None + + if bias is not None: + o = torch._scaled_mm(inn, w, out_dtype=original_dtype, bias=bias, scale_a=scale, scale_b=scale) + else: + o = torch._scaled_mm(inn, w, out_dtype=original_dtype, scale_a=scale, scale_b=scale) + + if isinstance(o, tuple): + o = o[0] + + return o.reshape((-1, input.shape[1], cls.weight.shape[0])) + else: + return cls.original_forward(input.to(original_dtype)) + else: + return cls.original_forward(input) + +def convert_fp8_linear(module, original_dtype, params_to_keep={}): + setattr(module, "fp8_matmul_enabled", True) + + for name, module in module.named_modules(): + if not any(keyword in name for keyword in params_to_keep): + if isinstance(module, nn.Linear): + original_forward = module.forward + setattr(module, "original_forward", original_forward) + setattr(module, "forward", lambda input, m=module: fp8_linear_forward(m, original_dtype, input)) diff --git a/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/hunyuan_model/helpers.py b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/hunyuan_model/helpers.py new file mode 100644 index 0000000000000000000000000000000000000000..72ab8cb1feba4ce7782f1ea841fd42c71be7b0d1 --- /dev/null +++ b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/hunyuan_model/helpers.py @@ -0,0 +1,40 @@ +import collections.abc + +from itertools import repeat + + +def _ntuple(n): + def parse(x): + if isinstance(x, collections.abc.Iterable) and not isinstance(x, str): + x = tuple(x) + if len(x) == 1: + x = tuple(repeat(x[0], n)) + return x + return tuple(repeat(x, n)) + return parse + + +to_1tuple = _ntuple(1) +to_2tuple = _ntuple(2) +to_3tuple = _ntuple(3) +to_4tuple = _ntuple(4) + + +def as_tuple(x): + if isinstance(x, collections.abc.Iterable) and not isinstance(x, str): + return tuple(x) + if x is None or isinstance(x, (int, float, str)): + return (x,) + else: + raise ValueError(f"Unknown type {type(x)}") + + +def as_list_of_2tuple(x): + x = as_tuple(x) + if len(x) == 1: + x = (x[0], x[0]) + assert len(x) % 2 == 0, f"Expect even length, got {len(x)}." + lst = [] + for i in range(0, len(x), 2): + lst.append((x[i], x[i + 1])) + return lst diff --git a/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/hunyuan_model/mlp_layers.py b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/hunyuan_model/mlp_layers.py new file mode 100644 index 0000000000000000000000000000000000000000..2606773de650b434413d48b8f319e9f997928812 --- /dev/null +++ b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/hunyuan_model/mlp_layers.py @@ -0,0 +1,118 @@ +# Modified from timm library: +# https://github.com/huggingface/pytorch-image-models/blob/648aaa41233ba83eb38faf5ba9d415d574823241/timm/layers/mlp.py#L13 + +from functools import partial + +import torch +import torch.nn as nn + +from musubi_tuner.hunyuan_model.modulate_layers import modulate +from musubi_tuner.hunyuan_model.helpers import to_2tuple + + +class MLP(nn.Module): + """MLP as used in Vision Transformer, MLP-Mixer and related networks""" + + def __init__( + self, + in_channels, + hidden_channels=None, + out_features=None, + act_layer=nn.GELU, + norm_layer=None, + bias=True, + drop=0.0, + use_conv=False, + device=None, + dtype=None, + ): + factory_kwargs = {"device": device, "dtype": dtype} + super().__init__() + out_features = out_features or in_channels + hidden_channels = hidden_channels or in_channels + bias = to_2tuple(bias) + drop_probs = to_2tuple(drop) + linear_layer = partial(nn.Conv2d, kernel_size=1) if use_conv else nn.Linear + + self.fc1 = linear_layer( + in_channels, hidden_channels, bias=bias[0], **factory_kwargs + ) + self.act = act_layer() + self.drop1 = nn.Dropout(drop_probs[0]) + self.norm = ( + norm_layer(hidden_channels, **factory_kwargs) + if norm_layer is not None + else nn.Identity() + ) + self.fc2 = linear_layer( + hidden_channels, out_features, bias=bias[1], **factory_kwargs + ) + self.drop2 = nn.Dropout(drop_probs[1]) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + x = self.drop1(x) + x = self.norm(x) + x = self.fc2(x) + x = self.drop2(x) + return x + + +# +class MLPEmbedder(nn.Module): + """copied from https://github.com/black-forest-labs/flux/blob/main/src/flux/modules/layers.py""" + def __init__(self, in_dim: int, hidden_dim: int, device=None, dtype=None): + factory_kwargs = {"device": device, "dtype": dtype} + super().__init__() + self.in_layer = nn.Linear(in_dim, hidden_dim, bias=True, **factory_kwargs) + self.silu = nn.SiLU() + self.out_layer = nn.Linear(hidden_dim, hidden_dim, bias=True, **factory_kwargs) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + return self.out_layer(self.silu(self.in_layer(x))) + + +class FinalLayer(nn.Module): + """The final layer of DiT.""" + + def __init__( + self, hidden_size, patch_size, out_channels, act_layer, device=None, dtype=None + ): + factory_kwargs = {"device": device, "dtype": dtype} + super().__init__() + + # Just use LayerNorm for the final layer + self.norm_final = nn.LayerNorm( + hidden_size, elementwise_affine=False, eps=1e-6, **factory_kwargs + ) + if isinstance(patch_size, int): + self.linear = nn.Linear( + hidden_size, + patch_size * patch_size * out_channels, + bias=True, + **factory_kwargs + ) + else: + self.linear = nn.Linear( + hidden_size, + patch_size[0] * patch_size[1] * patch_size[2] * out_channels, + bias=True, + ) + nn.init.zeros_(self.linear.weight) + nn.init.zeros_(self.linear.bias) + + # Here we don't distinguish between the modulate types. Just use the simple one. + self.adaLN_modulation = nn.Sequential( + act_layer(), + nn.Linear(hidden_size, 2 * hidden_size, bias=True, **factory_kwargs), + ) + # Zero-initialize the modulation + nn.init.zeros_(self.adaLN_modulation[1].weight) + nn.init.zeros_(self.adaLN_modulation[1].bias) + + def forward(self, x, c): + shift, scale = self.adaLN_modulation(c).chunk(2, dim=1) + x = modulate(self.norm_final(x), shift=shift, scale=scale) + x = self.linear(x) + return x diff --git a/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/hunyuan_model/models.py b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/hunyuan_model/models.py new file mode 100644 index 0000000000000000000000000000000000000000..90c081efa5ea600e5574f5276eb512cbed128b44 --- /dev/null +++ b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/hunyuan_model/models.py @@ -0,0 +1,1044 @@ +import os +from typing import Any, List, Tuple, Optional, Union, Dict +import accelerate +from einops import rearrange + +import torch +import torch.nn as nn +from torch.utils.checkpoint import checkpoint + +from musubi_tuner.hunyuan_model.activation_layers import get_activation_layer +from musubi_tuner.hunyuan_model.norm_layers import get_norm_layer +from musubi_tuner.hunyuan_model.embed_layers import TimestepEmbedder, PatchEmbed, TextProjection +from musubi_tuner.hunyuan_model.attention import attention, parallel_attention, get_cu_seqlens +from musubi_tuner.hunyuan_model.posemb_layers import apply_rotary_emb +from musubi_tuner.hunyuan_model.mlp_layers import MLP, MLPEmbedder, FinalLayer +from musubi_tuner.hunyuan_model.modulate_layers import ModulateDiT, modulate, apply_gate +from musubi_tuner.hunyuan_model.token_refiner import SingleTokenRefiner +from musubi_tuner.modules.custom_offloading_utils import ModelOffloader, synchronize_device, clean_memory_on_device +from musubi_tuner.hunyuan_model.posemb_layers import get_nd_rotary_pos_embed + +from musubi_tuner.utils.safetensors_utils import MemoryEfficientSafeOpen + + +class MMDoubleStreamBlock(nn.Module): + """ + A multimodal dit block with seperate modulation for + text and image/video, see more details (SD3): https://arxiv.org/abs/2403.03206 + (Flux.1): https://github.com/black-forest-labs/flux + """ + + def __init__( + self, + hidden_size: int, + heads_num: int, + mlp_width_ratio: float, + mlp_act_type: str = "gelu_tanh", + qk_norm: bool = True, + qk_norm_type: str = "rms", + qkv_bias: bool = False, + dtype: Optional[torch.dtype] = None, + device: Optional[torch.device] = None, + attn_mode: str = "flash", + split_attn: bool = False, + ): + factory_kwargs = {"device": device, "dtype": dtype} + super().__init__() + self.attn_mode = attn_mode + self.split_attn = split_attn + + self.deterministic = False + self.heads_num = heads_num + head_dim = hidden_size // heads_num + mlp_hidden_dim = int(hidden_size * mlp_width_ratio) + + self.img_mod = ModulateDiT( + hidden_size, + factor=6, + act_layer=get_activation_layer("silu"), + **factory_kwargs, + ) + self.img_norm1 = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6, **factory_kwargs) + + self.img_attn_qkv = nn.Linear(hidden_size, hidden_size * 3, bias=qkv_bias, **factory_kwargs) + qk_norm_layer = get_norm_layer(qk_norm_type) + self.img_attn_q_norm = ( + qk_norm_layer(head_dim, elementwise_affine=True, eps=1e-6, **factory_kwargs) if qk_norm else nn.Identity() + ) + self.img_attn_k_norm = ( + qk_norm_layer(head_dim, elementwise_affine=True, eps=1e-6, **factory_kwargs) if qk_norm else nn.Identity() + ) + self.img_attn_proj = nn.Linear(hidden_size, hidden_size, bias=qkv_bias, **factory_kwargs) + + self.img_norm2 = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6, **factory_kwargs) + self.img_mlp = MLP( + hidden_size, + mlp_hidden_dim, + act_layer=get_activation_layer(mlp_act_type), + bias=True, + **factory_kwargs, + ) + + self.txt_mod = ModulateDiT( + hidden_size, + factor=6, + act_layer=get_activation_layer("silu"), + **factory_kwargs, + ) + self.txt_norm1 = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6, **factory_kwargs) + + self.txt_attn_qkv = nn.Linear(hidden_size, hidden_size * 3, bias=qkv_bias, **factory_kwargs) + self.txt_attn_q_norm = ( + qk_norm_layer(head_dim, elementwise_affine=True, eps=1e-6, **factory_kwargs) if qk_norm else nn.Identity() + ) + self.txt_attn_k_norm = ( + qk_norm_layer(head_dim, elementwise_affine=True, eps=1e-6, **factory_kwargs) if qk_norm else nn.Identity() + ) + self.txt_attn_proj = nn.Linear(hidden_size, hidden_size, bias=qkv_bias, **factory_kwargs) + + self.txt_norm2 = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6, **factory_kwargs) + self.txt_mlp = MLP( + hidden_size, + mlp_hidden_dim, + act_layer=get_activation_layer(mlp_act_type), + bias=True, + **factory_kwargs, + ) + self.hybrid_seq_parallel_attn = None + + self.gradient_checkpointing = False + + def enable_deterministic(self): + self.deterministic = True + + def disable_deterministic(self): + self.deterministic = False + + def enable_gradient_checkpointing(self): + self.gradient_checkpointing = True + + def disable_gradient_checkpointing(self): + self.gradient_checkpointing = False + + def _forward( + self, + img: torch.Tensor, + txt: torch.Tensor, + vec: torch.Tensor, + attn_mask: Optional[torch.Tensor] = None, + total_len: Optional[torch.Tensor] = None, + cu_seqlens_q: Optional[torch.Tensor] = None, + cu_seqlens_kv: Optional[torch.Tensor] = None, + max_seqlen_q: Optional[int] = None, + max_seqlen_kv: Optional[int] = None, + freqs_cis: tuple = None, + ) -> Tuple[torch.Tensor, torch.Tensor]: + (img_mod1_shift, img_mod1_scale, img_mod1_gate, img_mod2_shift, img_mod2_scale, img_mod2_gate) = self.img_mod(vec).chunk( + 6, dim=-1 + ) + (txt_mod1_shift, txt_mod1_scale, txt_mod1_gate, txt_mod2_shift, txt_mod2_scale, txt_mod2_gate) = self.txt_mod(vec).chunk( + 6, dim=-1 + ) + + # Prepare image for attention. + img_modulated = self.img_norm1(img) + img_modulated = modulate(img_modulated, shift=img_mod1_shift, scale=img_mod1_scale) + img_qkv = self.img_attn_qkv(img_modulated) + img_modulated = None + img_q, img_k, img_v = rearrange(img_qkv, "B L (K H D) -> K B L H D", K=3, H=self.heads_num) + img_qkv = None + # Apply QK-Norm if needed + img_q = self.img_attn_q_norm(img_q).to(img_v) + img_k = self.img_attn_k_norm(img_k).to(img_v) + + # Apply RoPE if needed. + if freqs_cis is not None: + img_q_shape = img_q.shape + img_k_shape = img_k.shape + img_q, img_k = apply_rotary_emb(img_q, img_k, freqs_cis, head_first=False) + assert ( + img_q.shape == img_q_shape and img_k.shape == img_k_shape + ), f"img_kk: {img_q.shape}, img_q: {img_q_shape}, img_kk: {img_k.shape}, img_k: {img_k_shape}" + # img_q, img_k = img_qq, img_kk + + # Prepare txt for attention. + txt_modulated = self.txt_norm1(txt) + txt_modulated = modulate(txt_modulated, shift=txt_mod1_shift, scale=txt_mod1_scale) + txt_qkv = self.txt_attn_qkv(txt_modulated) + txt_modulated = None + txt_q, txt_k, txt_v = rearrange(txt_qkv, "B L (K H D) -> K B L H D", K=3, H=self.heads_num) + txt_qkv = None + # Apply QK-Norm if needed. + txt_q = self.txt_attn_q_norm(txt_q).to(txt_v) + txt_k = self.txt_attn_k_norm(txt_k).to(txt_v) + + # Run actual attention. + img_q_len = img_q.shape[1] + img_kv_len = img_k.shape[1] + batch_size = img_k.shape[0] + q = torch.cat((img_q, txt_q), dim=1) + img_q = txt_q = None + k = torch.cat((img_k, txt_k), dim=1) + img_k = txt_k = None + v = torch.cat((img_v, txt_v), dim=1) + img_v = txt_v = None + + assert ( + cu_seqlens_q.shape[0] == 2 * img.shape[0] + 1 + ), f"cu_seqlens_q.shape:{cu_seqlens_q.shape}, img.shape[0]:{img.shape[0]}" + + # attention computation start + if not self.hybrid_seq_parallel_attn: + l = [q, k, v] + q = k = v = None + attn = attention( + l, + mode=self.attn_mode, + attn_mask=attn_mask, + total_len=total_len, + cu_seqlens_q=cu_seqlens_q, + cu_seqlens_kv=cu_seqlens_kv, + max_seqlen_q=max_seqlen_q, + max_seqlen_kv=max_seqlen_kv, + batch_size=batch_size, + ) + else: + attn = parallel_attention( + self.hybrid_seq_parallel_attn, + q, + k, + v, + img_q_len=img_q_len, + img_kv_len=img_kv_len, + cu_seqlens_q=cu_seqlens_q, + cu_seqlens_kv=cu_seqlens_kv, + ) + + # attention computation end + + img_attn, txt_attn = attn[:, : img.shape[1]], attn[:, img.shape[1] :] + attn = None + + # Calculate the img bloks. + img = img + apply_gate(self.img_attn_proj(img_attn), gate=img_mod1_gate) + img_attn = None + img = img + apply_gate( + self.img_mlp(modulate(self.img_norm2(img), shift=img_mod2_shift, scale=img_mod2_scale)), + gate=img_mod2_gate, + ) + + # Calculate the txt bloks. + txt = txt + apply_gate(self.txt_attn_proj(txt_attn), gate=txt_mod1_gate) + txt_attn = None + txt = txt + apply_gate( + self.txt_mlp(modulate(self.txt_norm2(txt), shift=txt_mod2_shift, scale=txt_mod2_scale)), + gate=txt_mod2_gate, + ) + + return img, txt + + # def forward( + # self, + # img: torch.Tensor, + # txt: torch.Tensor, + # vec: torch.Tensor, + # attn_mask: Optional[torch.Tensor] = None, + # cu_seqlens_q: Optional[torch.Tensor] = None, + # cu_seqlens_kv: Optional[torch.Tensor] = None, + # max_seqlen_q: Optional[int] = None, + # max_seqlen_kv: Optional[int] = None, + # freqs_cis: Tuple[torch.Tensor, torch.Tensor] = None, + # ) -> Tuple[torch.Tensor, torch.Tensor]: + def forward(self, *args, **kwargs): + if self.training and self.gradient_checkpointing: + return checkpoint(self._forward, *args, use_reentrant=False, **kwargs) + else: + return self._forward(*args, **kwargs) + + +class MMSingleStreamBlock(nn.Module): + """ + A DiT block with parallel linear layers as described in + https://arxiv.org/abs/2302.05442 and adapted modulation interface. + Also refer to (SD3): https://arxiv.org/abs/2403.03206 + (Flux.1): https://github.com/black-forest-labs/flux + """ + + def __init__( + self, + hidden_size: int, + heads_num: int, + mlp_width_ratio: float = 4.0, + mlp_act_type: str = "gelu_tanh", + qk_norm: bool = True, + qk_norm_type: str = "rms", + qk_scale: float = None, + dtype: Optional[torch.dtype] = None, + device: Optional[torch.device] = None, + attn_mode: str = "flash", + split_attn: bool = False, + ): + factory_kwargs = {"device": device, "dtype": dtype} + super().__init__() + self.attn_mode = attn_mode + self.split_attn = split_attn + + self.deterministic = False + self.hidden_size = hidden_size + self.heads_num = heads_num + head_dim = hidden_size // heads_num + mlp_hidden_dim = int(hidden_size * mlp_width_ratio) + self.mlp_hidden_dim = mlp_hidden_dim + self.scale = qk_scale or head_dim**-0.5 + + # qkv and mlp_in + self.linear1 = nn.Linear(hidden_size, hidden_size * 3 + mlp_hidden_dim, **factory_kwargs) + # proj and mlp_out + self.linear2 = nn.Linear(hidden_size + mlp_hidden_dim, hidden_size, **factory_kwargs) + + qk_norm_layer = get_norm_layer(qk_norm_type) + self.q_norm = qk_norm_layer(head_dim, elementwise_affine=True, eps=1e-6, **factory_kwargs) if qk_norm else nn.Identity() + self.k_norm = qk_norm_layer(head_dim, elementwise_affine=True, eps=1e-6, **factory_kwargs) if qk_norm else nn.Identity() + + self.pre_norm = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6, **factory_kwargs) + + self.mlp_act = get_activation_layer(mlp_act_type)() + self.modulation = ModulateDiT(hidden_size, factor=3, act_layer=get_activation_layer("silu"), **factory_kwargs) + self.hybrid_seq_parallel_attn = None + + self.gradient_checkpointing = False + + def enable_deterministic(self): + self.deterministic = True + + def disable_deterministic(self): + self.deterministic = False + + def enable_gradient_checkpointing(self): + self.gradient_checkpointing = True + + def disable_gradient_checkpointing(self): + self.gradient_checkpointing = False + + def _forward( + self, + x: torch.Tensor, + vec: torch.Tensor, + txt_len: int, + attn_mask: Optional[torch.Tensor] = None, + total_len: Optional[torch.Tensor] = None, + cu_seqlens_q: Optional[torch.Tensor] = None, + cu_seqlens_kv: Optional[torch.Tensor] = None, + max_seqlen_q: Optional[int] = None, + max_seqlen_kv: Optional[int] = None, + freqs_cis: Tuple[torch.Tensor, torch.Tensor] = None, + ) -> torch.Tensor: + mod_shift, mod_scale, mod_gate = self.modulation(vec).chunk(3, dim=-1) + x_mod = modulate(self.pre_norm(x), shift=mod_shift, scale=mod_scale) + qkv, mlp = torch.split(self.linear1(x_mod), [3 * self.hidden_size, self.mlp_hidden_dim], dim=-1) + x_mod = None + # mlp = mlp.to("cpu", non_blocking=True) + # clean_memory_on_device(x.device) + + q, k, v = rearrange(qkv, "B L (K H D) -> K B L H D", K=3, H=self.heads_num) + qkv = None + + # Apply QK-Norm if needed. + q = self.q_norm(q).to(v) + k = self.k_norm(k).to(v) + + # Apply RoPE if needed. + if freqs_cis is not None: + img_q, txt_q = q[:, :-txt_len, :, :], q[:, -txt_len:, :, :] + img_k, txt_k = k[:, :-txt_len, :, :], k[:, -txt_len:, :, :] + q = k = None + img_q_shape = img_q.shape + img_k_shape = img_k.shape + img_q, img_k = apply_rotary_emb(img_q, img_k, freqs_cis, head_first=False) + assert ( + img_q.shape == img_q_shape and img_k_shape == img_k.shape + ), f"img_kk: {img_q.shape}, img_q: {img_q.shape}, img_kk: {img_k.shape}, img_k: {img_k.shape}" + # img_q, img_k = img_qq, img_kk + # del img_qq, img_kk + q = torch.cat((img_q, txt_q), dim=1) + k = torch.cat((img_k, txt_k), dim=1) + del img_q, txt_q, img_k, txt_k + + # Compute attention. + assert cu_seqlens_q.shape[0] == 2 * x.shape[0] + 1, f"cu_seqlens_q.shape:{cu_seqlens_q.shape}, x.shape[0]:{x.shape[0]}" + + # attention computation start + if not self.hybrid_seq_parallel_attn: + l = [q, k, v] + q = k = v = None + attn = attention( + l, + mode=self.attn_mode, + attn_mask=attn_mask, + total_len=total_len, + cu_seqlens_q=cu_seqlens_q, + cu_seqlens_kv=cu_seqlens_kv, + max_seqlen_q=max_seqlen_q, + max_seqlen_kv=max_seqlen_kv, + batch_size=x.shape[0], + ) + else: + attn = parallel_attention( + self.hybrid_seq_parallel_attn, + q, + k, + v, + img_q_len=img_q.shape[1], + img_kv_len=img_k.shape[1], + cu_seqlens_q=cu_seqlens_q, + cu_seqlens_kv=cu_seqlens_kv, + ) + # attention computation end + + # Compute activation in mlp stream, cat again and run second linear layer. + # mlp = mlp.to(x.device) + mlp = self.mlp_act(mlp) + attn_mlp = torch.cat((attn, mlp), 2) + attn = None + mlp = None + output = self.linear2(attn_mlp) + attn_mlp = None + return x + apply_gate(output, gate=mod_gate) + + # def forward( + # self, + # x: torch.Tensor, + # vec: torch.Tensor, + # txt_len: int, + # attn_mask: Optional[torch.Tensor] = None, + # cu_seqlens_q: Optional[torch.Tensor] = None, + # cu_seqlens_kv: Optional[torch.Tensor] = None, + # max_seqlen_q: Optional[int] = None, + # max_seqlen_kv: Optional[int] = None, + # freqs_cis: Tuple[torch.Tensor, torch.Tensor] = None, + # ) -> torch.Tensor: + def forward(self, *args, **kwargs): + if self.training and self.gradient_checkpointing: + return checkpoint(self._forward, *args, use_reentrant=False, **kwargs) + else: + return self._forward(*args, **kwargs) + + +class HYVideoDiffusionTransformer(nn.Module): # ModelMixin, ConfigMixin): + """ + HunyuanVideo Transformer backbone + + Inherited from ModelMixin and ConfigMixin for compatibility with diffusers' sampler StableDiffusionPipeline. + + Reference: + [1] Flux.1: https://github.com/black-forest-labs/flux + [2] MMDiT: http://arxiv.org/abs/2403.03206 + + Parameters + ---------- + args: argparse.Namespace + The arguments parsed by argparse. + patch_size: list + The size of the patch. + in_channels: int + The number of input channels. + out_channels: int + The number of output channels. + hidden_size: int + The hidden size of the transformer backbone. + heads_num: int + The number of attention heads. + mlp_width_ratio: float + The ratio of the hidden size of the MLP in the transformer block. + mlp_act_type: str + The activation function of the MLP in the transformer block. + depth_double_blocks: int + The number of transformer blocks in the double blocks. + depth_single_blocks: int + The number of transformer blocks in the single blocks. + rope_dim_list: list + The dimension of the rotary embedding for t, h, w. + qkv_bias: bool + Whether to use bias in the qkv linear layer. + qk_norm: bool + Whether to use qk norm. + qk_norm_type: str + The type of qk norm. + guidance_embed: bool + Whether to use guidance embedding for distillation. + text_projection: str + The type of the text projection, default is single_refiner. + use_attention_mask: bool + Whether to use attention mask for text encoder. + dtype: torch.dtype + The dtype of the model. + device: torch.device + The device of the model. + attn_mode: str + The mode of the attention, default is flash. + split_attn: bool + Whether to use split attention (make attention as batch size 1). + """ + + # @register_to_config + def __init__( + self, + text_states_dim: int, + text_states_dim_2: int, + patch_size: list = [1, 2, 2], + in_channels: int = 4, # Should be VAE.config.latent_channels. + out_channels: int = None, + hidden_size: int = 3072, + heads_num: int = 24, + mlp_width_ratio: float = 4.0, + mlp_act_type: str = "gelu_tanh", + mm_double_blocks_depth: int = 20, + mm_single_blocks_depth: int = 40, + rope_dim_list: List[int] = [16, 56, 56], + qkv_bias: bool = True, + qk_norm: bool = True, + qk_norm_type: str = "rms", + guidance_embed: bool = False, # For modulation. + text_projection: str = "single_refiner", + use_attention_mask: bool = True, + dtype: Optional[torch.dtype] = None, + device: Optional[torch.device] = None, + attn_mode: str = "flash", + split_attn: bool = False, + ): + factory_kwargs = {"device": device, "dtype": dtype} + super().__init__() + + self.patch_size = patch_size + self.in_channels = in_channels + self.out_channels = in_channels if out_channels is None else out_channels + self.unpatchify_channels = self.out_channels + self.guidance_embed = guidance_embed + self.rope_dim_list = rope_dim_list + + # Text projection. Default to linear projection. + # Alternative: TokenRefiner. See more details (LI-DiT): http://arxiv.org/abs/2406.11831 + self.use_attention_mask = use_attention_mask + self.text_projection = text_projection + + self.text_states_dim = text_states_dim + self.text_states_dim_2 = text_states_dim_2 + + if hidden_size % heads_num != 0: + raise ValueError(f"Hidden size {hidden_size} must be divisible by heads_num {heads_num}") + pe_dim = hidden_size // heads_num + if sum(rope_dim_list) != pe_dim: + raise ValueError(f"Got {rope_dim_list} but expected positional dim {pe_dim}") + self.hidden_size = hidden_size + self.heads_num = heads_num + + self.attn_mode = attn_mode + self.split_attn = split_attn + print(f"Using {self.attn_mode} attention mode, split_attn: {self.split_attn}") + + # image projection + self.img_in = PatchEmbed(self.patch_size, self.in_channels, self.hidden_size, **factory_kwargs) + + # text projection + if self.text_projection == "linear": + self.txt_in = TextProjection( + self.text_states_dim, + self.hidden_size, + get_activation_layer("silu"), + **factory_kwargs, + ) + elif self.text_projection == "single_refiner": + self.txt_in = SingleTokenRefiner(self.text_states_dim, hidden_size, heads_num, depth=2, **factory_kwargs) + else: + raise NotImplementedError(f"Unsupported text_projection: {self.text_projection}") + + # time modulation + self.time_in = TimestepEmbedder(self.hidden_size, get_activation_layer("silu"), **factory_kwargs) + + # text modulation + self.vector_in = MLPEmbedder(self.text_states_dim_2, self.hidden_size, **factory_kwargs) + + # guidance modulation + self.guidance_in = ( + TimestepEmbedder(self.hidden_size, get_activation_layer("silu"), **factory_kwargs) if guidance_embed else None + ) + + # double blocks + self.double_blocks = nn.ModuleList( + [ + MMDoubleStreamBlock( + self.hidden_size, + self.heads_num, + mlp_width_ratio=mlp_width_ratio, + mlp_act_type=mlp_act_type, + qk_norm=qk_norm, + qk_norm_type=qk_norm_type, + qkv_bias=qkv_bias, + attn_mode=attn_mode, + split_attn=split_attn, + **factory_kwargs, + ) + for _ in range(mm_double_blocks_depth) + ] + ) + + # single blocks + self.single_blocks = nn.ModuleList( + [ + MMSingleStreamBlock( + self.hidden_size, + self.heads_num, + mlp_width_ratio=mlp_width_ratio, + mlp_act_type=mlp_act_type, + qk_norm=qk_norm, + qk_norm_type=qk_norm_type, + attn_mode=attn_mode, + split_attn=split_attn, + **factory_kwargs, + ) + for _ in range(mm_single_blocks_depth) + ] + ) + + self.final_layer = FinalLayer( + self.hidden_size, + self.patch_size, + self.out_channels, + get_activation_layer("silu"), + **factory_kwargs, + ) + + self.gradient_checkpointing = False + self.blocks_to_swap = None + self.offloader_double = None + self.offloader_single = None + self._enable_img_in_txt_in_offloading = False + + @property + def device(self): + return next(self.parameters()).device + + @property + def dtype(self): + return next(self.parameters()).dtype + + def enable_gradient_checkpointing(self): + self.gradient_checkpointing = True + + self.txt_in.enable_gradient_checkpointing() + + for block in self.double_blocks + self.single_blocks: + block.enable_gradient_checkpointing() + + print(f"HYVideoDiffusionTransformer: Gradient checkpointing enabled.") + + def disable_gradient_checkpointing(self): + self.gradient_checkpointing = False + + self.txt_in.disable_gradient_checkpointing() + + for block in self.double_blocks + self.single_blocks: + block.disable_gradient_checkpointing() + + print(f"HYVideoDiffusionTransformer: Gradient checkpointing disabled.") + + def enable_img_in_txt_in_offloading(self): + self._enable_img_in_txt_in_offloading = True + + def enable_block_swap(self, num_blocks: int, device: torch.device, supports_backward: bool): + self.blocks_to_swap = num_blocks + self.num_double_blocks = len(self.double_blocks) + self.num_single_blocks = len(self.single_blocks) + double_blocks_to_swap = num_blocks // 2 + single_blocks_to_swap = (num_blocks - double_blocks_to_swap) * 2 + 1 + + assert double_blocks_to_swap <= self.num_double_blocks - 1 and single_blocks_to_swap <= self.num_single_blocks - 1, ( + f"Cannot swap more than {self.num_double_blocks - 1} double blocks and {self.num_single_blocks - 1} single blocks. " + f"Requested {double_blocks_to_swap} double blocks and {single_blocks_to_swap} single blocks." + ) + + self.offloader_double = ModelOffloader( + "double", self.double_blocks, self.num_double_blocks, double_blocks_to_swap, supports_backward, device # , debug=True + ) + self.offloader_single = ModelOffloader( + "single", self.single_blocks, self.num_single_blocks, single_blocks_to_swap, supports_backward, device # , debug=True + ) + print( + f"HYVideoDiffusionTransformer: Block swap enabled. Swapping {num_blocks} blocks, double blocks: {double_blocks_to_swap}, single blocks: {single_blocks_to_swap}." + ) + + def switch_block_swap_for_inference(self): + if self.blocks_to_swap: + self.offloader_double.set_forward_only(True) + self.offloader_single.set_forward_only(True) + self.prepare_block_swap_before_forward() + print(f"HYVideoDiffusionTransformer: Block swap set to forward only.") + + def switch_block_swap_for_training(self): + if self.blocks_to_swap: + self.offloader_double.set_forward_only(False) + self.offloader_single.set_forward_only(False) + self.prepare_block_swap_before_forward() + print(f"HYVideoDiffusionTransformer: Block swap set to forward and backward.") + + def move_to_device_except_swap_blocks(self, device: torch.device): + # assume model is on cpu. do not move blocks to device to reduce temporary memory usage + if self.blocks_to_swap: + save_double_blocks = self.double_blocks + save_single_blocks = self.single_blocks + self.double_blocks = None + self.single_blocks = None + + self.to(device) + + if self.blocks_to_swap: + self.double_blocks = save_double_blocks + self.single_blocks = save_single_blocks + + def prepare_block_swap_before_forward(self): + if self.blocks_to_swap is None or self.blocks_to_swap == 0: + return + self.offloader_double.prepare_block_devices_before_forward(self.double_blocks) + self.offloader_single.prepare_block_devices_before_forward(self.single_blocks) + + def enable_deterministic(self): + for block in self.double_blocks: + block.enable_deterministic() + for block in self.single_blocks: + block.enable_deterministic() + + def disable_deterministic(self): + for block in self.double_blocks: + block.disable_deterministic() + for block in self.single_blocks: + block.disable_deterministic() + + def forward( + self, + x: torch.Tensor, + t: torch.Tensor, # Should be in range(0, 1000). + text_states: torch.Tensor = None, + text_mask: torch.Tensor = None, # Now we don't use it. + text_states_2: Optional[torch.Tensor] = None, # Text embedding for modulation. + freqs_cos: Optional[torch.Tensor] = None, + freqs_sin: Optional[torch.Tensor] = None, + guidance: torch.Tensor = None, # Guidance for modulation, should be cfg_scale x 1000. + return_dict: bool = True, + ) -> Union[torch.Tensor, Dict[str, torch.Tensor]]: + out = {} + img = x + txt = text_states + _, _, ot, oh, ow = x.shape + tt, th, tw = ( + ot // self.patch_size[0], + oh // self.patch_size[1], + ow // self.patch_size[2], + ) + + # Prepare modulation vectors. + vec = self.time_in(t) + + # text modulation + vec = vec + self.vector_in(text_states_2) + + # guidance modulation + if self.guidance_embed: + if guidance is None: + raise ValueError("Didn't get guidance strength for guidance distilled model.") + + # our timestep_embedding is merged into guidance_in(TimestepEmbedder) + vec = vec + self.guidance_in(guidance) + + # Embed image and text. + if self._enable_img_in_txt_in_offloading: + self.img_in.to(x.device, non_blocking=True) + self.txt_in.to(x.device, non_blocking=True) + synchronize_device(x.device) + + img = self.img_in(img) + if self.text_projection == "linear": + txt = self.txt_in(txt) + elif self.text_projection == "single_refiner": + txt = self.txt_in(txt, t, text_mask if self.use_attention_mask else None) + else: + raise NotImplementedError(f"Unsupported text_projection: {self.text_projection}") + + if self._enable_img_in_txt_in_offloading: + self.img_in.to(torch.device("cpu"), non_blocking=True) + self.txt_in.to(torch.device("cpu"), non_blocking=True) + synchronize_device(x.device) + clean_memory_on_device(x.device) + + txt_seq_len = txt.shape[1] + img_seq_len = img.shape[1] + + # Compute cu_squlens and max_seqlen for flash attention + cu_seqlens_q = get_cu_seqlens(text_mask, img_seq_len) + cu_seqlens_kv = cu_seqlens_q + max_seqlen_q = img_seq_len + txt_seq_len + max_seqlen_kv = max_seqlen_q + + attn_mask = total_len = None + if self.split_attn or self.attn_mode == "torch": + # calculate text length and total length + text_len = text_mask.sum(dim=1) # (bs, ) + total_len = img_seq_len + text_len # (bs, ) + if self.attn_mode == "torch" and not self.split_attn: + # initialize attention mask: bool tensor for sdpa, (b, 1, n, n) + bs = img.shape[0] + attn_mask = torch.zeros((bs, 1, max_seqlen_q, max_seqlen_q), dtype=torch.bool, device=text_mask.device) + + # set attention mask with total_len + for i in range(bs): + attn_mask[i, :, : total_len[i], : total_len[i]] = True + total_len = None # means we don't use split_attn + + freqs_cis = (freqs_cos, freqs_sin) if freqs_cos is not None else None + # --------------------- Pass through DiT blocks ------------------------ + for block_idx, block in enumerate(self.double_blocks): + double_block_args = [ + img, + txt, + vec, + attn_mask, + total_len, + cu_seqlens_q, + cu_seqlens_kv, + max_seqlen_q, + max_seqlen_kv, + freqs_cis, + ] + + if self.blocks_to_swap: + self.offloader_double.wait_for_block(block_idx) + + img, txt = block(*double_block_args) + + if self.blocks_to_swap: + self.offloader_double.submit_move_blocks_forward(self.double_blocks, block_idx) + + # Merge txt and img to pass through single stream blocks. + x = torch.cat((img, txt), 1) + if self.blocks_to_swap: + # delete img, txt to reduce memory usage + del img, txt + clean_memory_on_device(x.device) + + if len(self.single_blocks) > 0: + for block_idx, block in enumerate(self.single_blocks): + single_block_args = [ + x, + vec, + txt_seq_len, + attn_mask, + total_len, + cu_seqlens_q, + cu_seqlens_kv, + max_seqlen_q, + max_seqlen_kv, + freqs_cis, + ] + if self.blocks_to_swap: + self.offloader_single.wait_for_block(block_idx) + + x = block(*single_block_args) + + if self.blocks_to_swap: + self.offloader_single.submit_move_blocks_forward(self.single_blocks, block_idx) + + img = x[:, :img_seq_len, ...] + x = None + + # ---------------------------- Final layer ------------------------------ + img = self.final_layer(img, vec) # (N, T, patch_size ** 2 * out_channels) + + img = self.unpatchify(img, tt, th, tw) + if return_dict: + out["x"] = img + return out + return img + + def unpatchify(self, x, t, h, w): + """ + x: (N, T, patch_size**2 * C) + imgs: (N, H, W, C) + """ + c = self.unpatchify_channels + pt, ph, pw = self.patch_size + assert t * h * w == x.shape[1] + + x = x.reshape(shape=(x.shape[0], t, h, w, c, pt, ph, pw)) + x = torch.einsum("nthwcopq->nctohpwq", x) + imgs = x.reshape(shape=(x.shape[0], c, t * pt, h * ph, w * pw)) + + return imgs + + def params_count(self): + counts = { + "double": sum( + [ + sum(p.numel() for p in block.img_attn_qkv.parameters()) + + sum(p.numel() for p in block.img_attn_proj.parameters()) + + sum(p.numel() for p in block.img_mlp.parameters()) + + sum(p.numel() for p in block.txt_attn_qkv.parameters()) + + sum(p.numel() for p in block.txt_attn_proj.parameters()) + + sum(p.numel() for p in block.txt_mlp.parameters()) + for block in self.double_blocks + ] + ), + "single": sum( + [ + sum(p.numel() for p in block.linear1.parameters()) + sum(p.numel() for p in block.linear2.parameters()) + for block in self.single_blocks + ] + ), + "total": sum(p.numel() for p in self.parameters()), + } + counts["attn+mlp"] = counts["double"] + counts["single"] + return counts + + +################################################################################# +# HunyuanVideo Configs # +################################################################################# + +HUNYUAN_VIDEO_CONFIG = { + "HYVideo-T/2": { + "mm_double_blocks_depth": 20, + "mm_single_blocks_depth": 40, + "rope_dim_list": [16, 56, 56], + "hidden_size": 3072, + "heads_num": 24, + "mlp_width_ratio": 4, + }, + "HYVideo-T/2-cfgdistill": { + "mm_double_blocks_depth": 20, + "mm_single_blocks_depth": 40, + "rope_dim_list": [16, 56, 56], + "hidden_size": 3072, + "heads_num": 24, + "mlp_width_ratio": 4, + "guidance_embed": True, + }, +} + + +def load_dit_model(text_states_dim, text_states_dim_2, in_channels, out_channels, factor_kwargs): + """load hunyuan video model + + NOTE: Only support HYVideo-T/2-cfgdistill now. + + Args: + text_state_dim (int): text state dimension + text_state_dim_2 (int): text state dimension 2 + in_channels (int): input channels number + out_channels (int): output channels number + factor_kwargs (dict): factor kwargs + + Returns: + model (nn.Module): The hunyuan video model + """ + # if args.model in HUNYUAN_VIDEO_CONFIG.keys(): + model = HYVideoDiffusionTransformer( + text_states_dim=text_states_dim, + text_states_dim_2=text_states_dim_2, + in_channels=in_channels, + out_channels=out_channels, + **HUNYUAN_VIDEO_CONFIG["HYVideo-T/2-cfgdistill"], + **factor_kwargs, + ) + return model + # else: + # raise NotImplementedError() + + +def load_state_dict(model, model_path): + state_dict = torch.load(model_path, map_location=lambda storage, loc: storage, weights_only=True) + + load_key = "module" + if load_key in state_dict: + state_dict = state_dict[load_key] + else: + raise KeyError( + f"Missing key: `{load_key}` in the checkpoint: {model_path}. The keys in the checkpoint " + f"are: {list(state_dict.keys())}." + ) + model.load_state_dict(state_dict, strict=True, assign=True) + return model + + +def load_transformer(dit_path, attn_mode, split_attn, device, dtype, in_channels=16) -> HYVideoDiffusionTransformer: + # =========================== Build main model =========================== + factor_kwargs = {"device": device, "dtype": dtype, "attn_mode": attn_mode, "split_attn": split_attn} + latent_channels = 16 + out_channels = latent_channels + + with accelerate.init_empty_weights(): + transformer = load_dit_model( + text_states_dim=4096, + text_states_dim_2=768, + in_channels=in_channels, + out_channels=out_channels, + factor_kwargs=factor_kwargs, + ) + + if os.path.splitext(dit_path)[-1] == ".safetensors": + # loading safetensors: may be already fp8 + with MemoryEfficientSafeOpen(dit_path) as f: + state_dict = {} + for k in f.keys(): + tensor = f.get_tensor(k) + tensor = tensor.to(device=device, dtype=dtype) + # TODO support comfy model + # if k.startswith("model.model."): + # k = convert_comfy_model_key(k) + state_dict[k] = tensor + transformer.load_state_dict(state_dict, strict=True, assign=True) + else: + transformer = load_state_dict(transformer, dit_path) + + return transformer + + +def get_rotary_pos_embed_by_shape(model, latents_size): + target_ndim = 3 + ndim = 5 - 2 + + if isinstance(model.patch_size, int): + assert all(s % model.patch_size == 0 for s in latents_size), ( + f"Latent size(last {ndim} dimensions) should be divisible by patch size({model.patch_size}), " + f"but got {latents_size}." + ) + rope_sizes = [s // model.patch_size for s in latents_size] + elif isinstance(model.patch_size, list): + assert all(s % model.patch_size[idx] == 0 for idx, s in enumerate(latents_size)), ( + f"Latent size(last {ndim} dimensions) should be divisible by patch size({model.patch_size}), " + f"but got {latents_size}." + ) + rope_sizes = [s // model.patch_size[idx] for idx, s in enumerate(latents_size)] + + if len(rope_sizes) != target_ndim: + rope_sizes = [1] * (target_ndim - len(rope_sizes)) + rope_sizes # time axis + head_dim = model.hidden_size // model.heads_num + rope_dim_list = model.rope_dim_list + if rope_dim_list is None: + rope_dim_list = [head_dim // target_ndim for _ in range(target_ndim)] + assert sum(rope_dim_list) == head_dim, "sum(rope_dim_list) should equal to head_dim of attention layer" + + rope_theta = 256 + freqs_cos, freqs_sin = get_nd_rotary_pos_embed( + rope_dim_list, rope_sizes, theta=rope_theta, use_real=True, theta_rescale_factor=1 + ) + return freqs_cos, freqs_sin + + +def get_rotary_pos_embed(vae_name, model, video_length, height, width): + # 884 + if "884" in vae_name: + latents_size = [(video_length - 1) // 4 + 1, height // 8, width // 8] + elif "888" in vae_name: + latents_size = [(video_length - 1) // 8 + 1, height // 8, width // 8] + else: + latents_size = [video_length, height // 8, width // 8] + + return get_rotary_pos_embed_by_shape(model, latents_size) diff --git a/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/hunyuan_model/modulate_layers.py b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/hunyuan_model/modulate_layers.py new file mode 100644 index 0000000000000000000000000000000000000000..93a57c6d2fdc0fca9bf44aeee6996bf1d8a05901 --- /dev/null +++ b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/hunyuan_model/modulate_layers.py @@ -0,0 +1,76 @@ +from typing import Callable + +import torch +import torch.nn as nn + + +class ModulateDiT(nn.Module): + """Modulation layer for DiT.""" + def __init__( + self, + hidden_size: int, + factor: int, + act_layer: Callable, + dtype=None, + device=None, + ): + factory_kwargs = {"dtype": dtype, "device": device} + super().__init__() + self.act = act_layer() + self.linear = nn.Linear( + hidden_size, factor * hidden_size, bias=True, **factory_kwargs + ) + # Zero-initialize the modulation + nn.init.zeros_(self.linear.weight) + nn.init.zeros_(self.linear.bias) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + return self.linear(self.act(x)) + + +def modulate(x, shift=None, scale=None): + """modulate by shift and scale + + Args: + x (torch.Tensor): input tensor. + shift (torch.Tensor, optional): shift tensor. Defaults to None. + scale (torch.Tensor, optional): scale tensor. Defaults to None. + + Returns: + torch.Tensor: the output tensor after modulate. + """ + if scale is None and shift is None: + return x + elif shift is None: + return x * (1 + scale.unsqueeze(1)) + elif scale is None: + return x + shift.unsqueeze(1) + else: + return x * (1 + scale.unsqueeze(1)) + shift.unsqueeze(1) + + +def apply_gate(x, gate=None, tanh=False): + """AI is creating summary for apply_gate + + Args: + x (torch.Tensor): input tensor. + gate (torch.Tensor, optional): gate tensor. Defaults to None. + tanh (bool, optional): whether to use tanh function. Defaults to False. + + Returns: + torch.Tensor: the output tensor after apply gate. + """ + if gate is None: + return x + if tanh: + return x * gate.unsqueeze(1).tanh() + else: + return x * gate.unsqueeze(1) + + +def ckpt_wrapper(module): + def ckpt_forward(*inputs): + outputs = module(*inputs) + return outputs + + return ckpt_forward diff --git a/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/hunyuan_model/norm_layers.py b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/hunyuan_model/norm_layers.py new file mode 100644 index 0000000000000000000000000000000000000000..a53d167436b6971d3aabf5cfe51c0b9d6dfc022f --- /dev/null +++ b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/hunyuan_model/norm_layers.py @@ -0,0 +1,79 @@ +import torch +import torch.nn as nn + + +class RMSNorm(nn.Module): + def __init__( + self, + dim: int, + elementwise_affine=True, + eps: float = 1e-6, + device=None, + dtype=None, + ): + """ + Initialize the RMSNorm normalization layer. + + Args: + dim (int): The dimension of the input tensor. + eps (float, optional): A small value added to the denominator for numerical stability. Default is 1e-6. + + Attributes: + eps (float): A small value added to the denominator for numerical stability. + weight (nn.Parameter): Learnable scaling parameter. + + """ + factory_kwargs = {"device": device, "dtype": dtype} + super().__init__() + self.eps = eps + if elementwise_affine: + self.weight = nn.Parameter(torch.ones(dim, **factory_kwargs)) + + def _norm(self, x): + """ + Apply the RMSNorm normalization to the input tensor. + + Args: + x (torch.Tensor): The input tensor. + + Returns: + torch.Tensor: The normalized tensor. + + """ + return x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + self.eps) + + def forward(self, x): + """ + Forward pass through the RMSNorm layer. + + Args: + x (torch.Tensor): The input tensor. + + Returns: + torch.Tensor: The output tensor after applying RMSNorm. + + """ + output = self._norm(x.float()).type_as(x) + if hasattr(self, "weight"): + # output = output * self.weight + # support fp8 + output = output * self.weight.to(output.dtype) + return output + + +def get_norm_layer(norm_layer): + """ + Get the normalization layer. + + Args: + norm_layer (str): The type of normalization layer. + + Returns: + norm_layer (nn.Module): The normalization layer. + """ + if norm_layer == "layer": + return nn.LayerNorm + elif norm_layer == "rms": + return RMSNorm + else: + raise NotImplementedError(f"Norm layer {norm_layer} is not implemented") diff --git a/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/hunyuan_model/pipeline_hunyuan_video.py b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/hunyuan_model/pipeline_hunyuan_video.py new file mode 100644 index 0000000000000000000000000000000000000000..c1293161e13a47ae7dcedfef2c55e3baefc655f4 --- /dev/null +++ b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/hunyuan_model/pipeline_hunyuan_video.py @@ -0,0 +1,1100 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +# +# Modified from diffusers==0.29.2 +# +# ============================================================================== +import inspect +from typing import Any, Callable, Dict, List, Optional, Union, Tuple +import torch +import torch.distributed as dist +import numpy as np +from dataclasses import dataclass +from packaging import version + +from diffusers.callbacks import MultiPipelineCallbacks, PipelineCallback +from diffusers.configuration_utils import FrozenDict +from diffusers.image_processor import VaeImageProcessor +from diffusers.loaders import LoraLoaderMixin, TextualInversionLoaderMixin +from diffusers.models import AutoencoderKL +from diffusers.models.lora import adjust_lora_scale_text_encoder +from diffusers.schedulers import KarrasDiffusionSchedulers +from diffusers.utils import ( + USE_PEFT_BACKEND, + deprecate, + logging, + replace_example_docstring, + scale_lora_layers, + unscale_lora_layers, +) +from diffusers.utils.torch_utils import randn_tensor +from diffusers.pipelines.pipeline_utils import DiffusionPipeline +from diffusers.utils import BaseOutput + +from ...constants import PRECISION_TO_TYPE +from ...vae.autoencoder_kl_causal_3d import AutoencoderKLCausal3D +from ...text_encoder import TextEncoder +from ...modules import HYVideoDiffusionTransformer + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +EXAMPLE_DOC_STRING = """""" + + +def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0): + """ + Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and + Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4 + """ + std_text = noise_pred_text.std( + dim=list(range(1, noise_pred_text.ndim)), keepdim=True + ) + std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True) + # rescale the results from guidance (fixes overexposure) + noise_pred_rescaled = noise_cfg * (std_text / std_cfg) + # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images + noise_cfg = ( + guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg + ) + return noise_cfg + + +def retrieve_timesteps( + scheduler, + num_inference_steps: Optional[int] = None, + device: Optional[Union[str, torch.device]] = None, + timesteps: Optional[List[int]] = None, + sigmas: Optional[List[float]] = None, + **kwargs, +): + """ + Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles + custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. + + Args: + scheduler (`SchedulerMixin`): + The scheduler to get timesteps from. + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` + must be `None`. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + timesteps (`List[int]`, *optional*): + Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, + `num_inference_steps` and `sigmas` must be `None`. + sigmas (`List[float]`, *optional*): + Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, + `num_inference_steps` and `timesteps` must be `None`. + + Returns: + `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the + second element is the number of inference steps. + """ + if timesteps is not None and sigmas is not None: + raise ValueError( + "Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values" + ) + if timesteps is not None: + accepts_timesteps = "timesteps" in set( + inspect.signature(scheduler.set_timesteps).parameters.keys() + ) + if not accepts_timesteps: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" timestep schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + elif sigmas is not None: + accept_sigmas = "sigmas" in set( + inspect.signature(scheduler.set_timesteps).parameters.keys() + ) + if not accept_sigmas: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" sigmas schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + else: + scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) + timesteps = scheduler.timesteps + return timesteps, num_inference_steps + + +@dataclass +class HunyuanVideoPipelineOutput(BaseOutput): + videos: Union[torch.Tensor, np.ndarray] + + +class HunyuanVideoPipeline(DiffusionPipeline): + r""" + Pipeline for text-to-video generation using HunyuanVideo. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods + implemented for all pipelines (downloading, saving, running on a particular device, etc.). + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. + text_encoder ([`TextEncoder`]): + Frozen text-encoder. + text_encoder_2 ([`TextEncoder`]): + Frozen text-encoder_2. + transformer ([`HYVideoDiffusionTransformer`]): + A `HYVideoDiffusionTransformer` to denoise the encoded video latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `unet` to denoise the encoded image latents. + """ + + model_cpu_offload_seq = "text_encoder->text_encoder_2->transformer->vae" + _optional_components = ["text_encoder_2"] + _exclude_from_cpu_offload = ["transformer"] + _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"] + + def __init__( + self, + vae: AutoencoderKL, + text_encoder: TextEncoder, + transformer: HYVideoDiffusionTransformer, + scheduler: KarrasDiffusionSchedulers, + text_encoder_2: Optional[TextEncoder] = None, + progress_bar_config: Dict[str, Any] = None, + args=None, + ): + super().__init__() + + # ========================================================================================== + if progress_bar_config is None: + progress_bar_config = {} + if not hasattr(self, "_progress_bar_config"): + self._progress_bar_config = {} + self._progress_bar_config.update(progress_bar_config) + + self.args = args + # ========================================================================================== + + if ( + hasattr(scheduler.config, "steps_offset") + and scheduler.config.steps_offset != 1 + ): + deprecation_message = ( + f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`" + f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure " + "to update the config accordingly as leaving `steps_offset` might led to incorrect results" + " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," + " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" + " file" + ) + deprecate( + "steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False + ) + new_config = dict(scheduler.config) + new_config["steps_offset"] = 1 + scheduler._internal_dict = FrozenDict(new_config) + + if ( + hasattr(scheduler.config, "clip_sample") + and scheduler.config.clip_sample is True + ): + deprecation_message = ( + f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`." + " `clip_sample` should be set to False in the configuration file. Please make sure to update the" + " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in" + " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very" + " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file" + ) + deprecate( + "clip_sample not set", "1.0.0", deprecation_message, standard_warn=False + ) + new_config = dict(scheduler.config) + new_config["clip_sample"] = False + scheduler._internal_dict = FrozenDict(new_config) + + self.register_modules( + vae=vae, + text_encoder=text_encoder, + transformer=transformer, + scheduler=scheduler, + text_encoder_2=text_encoder_2, + ) + self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + + def encode_prompt( + self, + prompt, + device, + num_videos_per_prompt, + do_classifier_free_guidance, + negative_prompt=None, + prompt_embeds: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + negative_attention_mask: Optional[torch.Tensor] = None, + lora_scale: Optional[float] = None, + clip_skip: Optional[int] = None, + text_encoder: Optional[TextEncoder] = None, + data_type: Optional[str] = "image", + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + device: (`torch.device`): + torch device + num_videos_per_prompt (`int`): + number of videos that should be generated per prompt + do_classifier_free_guidance (`bool`): + whether to use classifier free guidance or not + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the video generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + attention_mask (`torch.Tensor`, *optional*): + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + negative_attention_mask (`torch.Tensor`, *optional*): + lora_scale (`float`, *optional*): + A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + text_encoder (TextEncoder, *optional*): + data_type (`str`, *optional*): + """ + if text_encoder is None: + text_encoder = self.text_encoder + + # set lora scale so that monkey patched LoRA + # function of text encoder can correctly access it + if lora_scale is not None and isinstance(self, LoraLoaderMixin): + self._lora_scale = lora_scale + + # dynamically adjust the LoRA scale + if not USE_PEFT_BACKEND: + adjust_lora_scale_text_encoder(text_encoder.model, lora_scale) + else: + scale_lora_layers(text_encoder.model, lora_scale) + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + if prompt_embeds is None: + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + prompt = self.maybe_convert_prompt(prompt, text_encoder.tokenizer) + + text_inputs = text_encoder.text2tokens(prompt, data_type=data_type) + + if clip_skip is None: + prompt_outputs = text_encoder.encode( + text_inputs, data_type=data_type, device=device + ) + prompt_embeds = prompt_outputs.hidden_state + else: + prompt_outputs = text_encoder.encode( + text_inputs, + output_hidden_states=True, + data_type=data_type, + device=device, + ) + # Access the `hidden_states` first, that contains a tuple of + # all the hidden states from the encoder layers. Then index into + # the tuple to access the hidden states from the desired layer. + prompt_embeds = prompt_outputs.hidden_states_list[-(clip_skip + 1)] + # We also need to apply the final LayerNorm here to not mess with the + # representations. The `last_hidden_states` that we typically use for + # obtaining the final prompt representations passes through the LayerNorm + # layer. + prompt_embeds = text_encoder.model.text_model.final_layer_norm( + prompt_embeds + ) + + attention_mask = prompt_outputs.attention_mask + if attention_mask is not None: + attention_mask = attention_mask.to(device) + bs_embed, seq_len = attention_mask.shape + attention_mask = attention_mask.repeat(1, num_videos_per_prompt) + attention_mask = attention_mask.view( + bs_embed * num_videos_per_prompt, seq_len + ) + + if text_encoder is not None: + prompt_embeds_dtype = text_encoder.dtype + elif self.transformer is not None: + prompt_embeds_dtype = self.transformer.dtype + else: + prompt_embeds_dtype = prompt_embeds.dtype + + prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + if prompt_embeds.ndim == 2: + bs_embed, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_videos_per_prompt) + prompt_embeds = prompt_embeds.view(bs_embed * num_videos_per_prompt, -1) + else: + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_videos_per_prompt, 1) + prompt_embeds = prompt_embeds.view( + bs_embed * num_videos_per_prompt, seq_len, -1 + ) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens: List[str] + if negative_prompt is None: + uncond_tokens = [""] * batch_size + elif prompt is not None and type(prompt) is not type(negative_prompt): + raise TypeError( + f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" + f" {type(prompt)}." + ) + elif isinstance(negative_prompt, str): + uncond_tokens = [negative_prompt] + elif batch_size != len(negative_prompt): + raise ValueError( + f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" + f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" + " the batch size of `prompt`." + ) + else: + uncond_tokens = negative_prompt + + # textual inversion: process multi-vector tokens if necessary + if isinstance(self, TextualInversionLoaderMixin): + uncond_tokens = self.maybe_convert_prompt( + uncond_tokens, text_encoder.tokenizer + ) + + # max_length = prompt_embeds.shape[1] + uncond_input = text_encoder.text2tokens(uncond_tokens, data_type=data_type) + + negative_prompt_outputs = text_encoder.encode( + uncond_input, data_type=data_type, device=device + ) + negative_prompt_embeds = negative_prompt_outputs.hidden_state + + negative_attention_mask = negative_prompt_outputs.attention_mask + if negative_attention_mask is not None: + negative_attention_mask = negative_attention_mask.to(device) + _, seq_len = negative_attention_mask.shape + negative_attention_mask = negative_attention_mask.repeat( + 1, num_videos_per_prompt + ) + negative_attention_mask = negative_attention_mask.view( + batch_size * num_videos_per_prompt, seq_len + ) + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to( + dtype=prompt_embeds_dtype, device=device + ) + + if negative_prompt_embeds.ndim == 2: + negative_prompt_embeds = negative_prompt_embeds.repeat( + 1, num_videos_per_prompt + ) + negative_prompt_embeds = negative_prompt_embeds.view( + batch_size * num_videos_per_prompt, -1 + ) + else: + negative_prompt_embeds = negative_prompt_embeds.repeat( + 1, num_videos_per_prompt, 1 + ) + negative_prompt_embeds = negative_prompt_embeds.view( + batch_size * num_videos_per_prompt, seq_len, -1 + ) + + if text_encoder is not None: + if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND: + # Retrieve the original scale by scaling back the LoRA layers + unscale_lora_layers(text_encoder.model, lora_scale) + + return ( + prompt_embeds, + negative_prompt_embeds, + attention_mask, + negative_attention_mask, + ) + + def decode_latents(self, latents, enable_tiling=True): + deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead" + deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False) + + latents = 1 / self.vae.config.scaling_factor * latents + if enable_tiling: + self.vae.enable_tiling() + image = self.vae.decode(latents, return_dict=False)[0] + else: + image = self.vae.decode(latents, return_dict=False)[0] + image = (image / 2 + 0.5).clamp(0, 1) + # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 + if image.ndim == 4: + image = image.cpu().permute(0, 2, 3, 1).float() + else: + image = image.cpu().float() + return image + + def prepare_extra_func_kwargs(self, func, kwargs): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + extra_step_kwargs = {} + + for k, v in kwargs.items(): + accepts = k in set(inspect.signature(func).parameters.keys()) + if accepts: + extra_step_kwargs[k] = v + return extra_step_kwargs + + def check_inputs( + self, + prompt, + height, + width, + video_length, + callback_steps, + negative_prompt=None, + prompt_embeds=None, + negative_prompt_embeds=None, + callback_on_step_end_tensor_inputs=None, + vae_ver="88-4c-sd", + ): + if height % 8 != 0 or width % 8 != 0: + raise ValueError( + f"`height` and `width` have to be divisible by 8 but are {height} and {width}." + ) + + if video_length is not None: + if "884" in vae_ver: + if video_length != 1 and (video_length - 1) % 4 != 0: + raise ValueError( + f"`video_length` has to be 1 or a multiple of 4 but is {video_length}." + ) + elif "888" in vae_ver: + if video_length != 1 and (video_length - 1) % 8 != 0: + raise ValueError( + f"`video_length` has to be 1 or a multiple of 8 but is {video_length}." + ) + + if callback_steps is not None and ( + not isinstance(callback_steps, int) or callback_steps <= 0 + ): + raise ValueError( + f"`callback_steps` has to be a positive integer but is {callback_steps} of type" + f" {type(callback_steps)}." + ) + if callback_on_step_end_tensor_inputs is not None and not all( + k in self._callback_tensor_inputs + for k in callback_on_step_end_tensor_inputs + ): + raise ValueError( + f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and ( + not isinstance(prompt, str) and not isinstance(prompt, list) + ): + raise ValueError( + f"`prompt` has to be of type `str` or `list` but is {type(prompt)}" + ) + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + + + def prepare_latents( + self, + batch_size, + num_channels_latents, + height, + width, + video_length, + dtype, + device, + generator, + latents=None, + ): + shape = ( + batch_size, + num_channels_latents, + video_length, + int(height) // self.vae_scale_factor, + int(width) // self.vae_scale_factor, + ) + if isinstance(generator, list) and len(generator) != batch_size: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {batch_size}. Make sure the batch size matches the length of the generators." + ) + + if latents is None: + latents = randn_tensor( + shape, generator=generator, device=device, dtype=dtype + ) + else: + latents = latents.to(device) + + # Check existence to make it compatible with FlowMatchEulerDiscreteScheduler + if hasattr(self.scheduler, "init_noise_sigma"): + # scale the initial noise by the standard deviation required by the scheduler + latents = latents * self.scheduler.init_noise_sigma + return latents + + # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding + def get_guidance_scale_embedding( + self, + w: torch.Tensor, + embedding_dim: int = 512, + dtype: torch.dtype = torch.float32, + ) -> torch.Tensor: + """ + See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298 + + Args: + w (`torch.Tensor`): + Generate embedding vectors with a specified guidance scale to subsequently enrich timestep embeddings. + embedding_dim (`int`, *optional*, defaults to 512): + Dimension of the embeddings to generate. + dtype (`torch.dtype`, *optional*, defaults to `torch.float32`): + Data type of the generated embeddings. + + Returns: + `torch.Tensor`: Embedding vectors with shape `(len(w), embedding_dim)`. + """ + assert len(w.shape) == 1 + w = w * 1000.0 + + half_dim = embedding_dim // 2 + emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1) + emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb) + emb = w.to(dtype)[:, None] * emb[None, :] + emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) + if embedding_dim % 2 == 1: # zero pad + emb = torch.nn.functional.pad(emb, (0, 1)) + assert emb.shape == (w.shape[0], embedding_dim) + return emb + + @property + def guidance_scale(self): + return self._guidance_scale + + @property + def guidance_rescale(self): + return self._guidance_rescale + + @property + def clip_skip(self): + return self._clip_skip + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + @property + def do_classifier_free_guidance(self): + # return self._guidance_scale > 1 and self.transformer.config.time_cond_proj_dim is None + return self._guidance_scale > 1 + + @property + def cross_attention_kwargs(self): + return self._cross_attention_kwargs + + @property + def num_timesteps(self): + return self._num_timesteps + + @property + def interrupt(self): + return self._interrupt + + @torch.no_grad() + @replace_example_docstring(EXAMPLE_DOC_STRING) + def __call__( + self, + prompt: Union[str, List[str]], + height: int, + width: int, + video_length: int, + data_type: str = "video", + num_inference_steps: int = 50, + timesteps: List[int] = None, + sigmas: List[float] = None, + guidance_scale: float = 7.5, + negative_prompt: Optional[Union[str, List[str]]] = None, + num_videos_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.Tensor] = None, + prompt_embeds: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + negative_prompt_embeds: Optional[torch.Tensor] = None, + negative_attention_mask: Optional[torch.Tensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + cross_attention_kwargs: Optional[Dict[str, Any]] = None, + guidance_rescale: float = 0.0, + clip_skip: Optional[int] = None, + callback_on_step_end: Optional[ + Union[ + Callable[[int, int, Dict], None], + PipelineCallback, + MultiPipelineCallbacks, + ] + ] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + freqs_cis: Tuple[torch.Tensor, torch.Tensor] = None, + vae_ver: str = "88-4c-sd", + enable_tiling: bool = False, + n_tokens: Optional[int] = None, + embedded_guidance_scale: Optional[float] = None, + **kwargs, + ): + r""" + The call function to the pipeline for generation. + + Args: + prompt (`str` or `List[str]`): + The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. + height (`int`): + The height in pixels of the generated image. + width (`int`): + The width in pixels of the generated image. + video_length (`int`): + The number of frames in the generated video. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + timesteps (`List[int]`, *optional*): + Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument + in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is + passed will be used. Must be in descending order. + sigmas (`List[float]`, *optional*): + Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in + their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed + will be used. + guidance_scale (`float`, *optional*, defaults to 7.5): + A higher guidance scale value encourages the model to generate images closely linked to the text + `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide what to not include in image generation. If not defined, you need to + pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). + num_videos_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies + to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make + generation deterministic. + latents (`torch.Tensor`, *optional*): + Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor is generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not + provided, text embeddings are generated from the `prompt` input argument. + negative_prompt_embeds (`torch.Tensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If + not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument. + + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generated image. Choose between `PIL.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`HunyuanVideoPipelineOutput`] instead of a + plain tuple. + cross_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in + [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + guidance_rescale (`float`, *optional*, defaults to 0.0): + Guidance rescale factor from [Common Diffusion Noise Schedules and Sample Steps are + Flawed](https://arxiv.org/pdf/2305.08891.pdf). Guidance rescale factor should fix overexposure when + using zero terminal SNR. + clip_skip (`int`, *optional*): + Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that + the output of the pre-final layer will be used for computing the prompt embeddings. + callback_on_step_end (`Callable`, `PipelineCallback`, `MultiPipelineCallbacks`, *optional*): + A function or a subclass of `PipelineCallback` or `MultiPipelineCallbacks` that is called at the end of + each denoising step during the inference. with the following arguments: `callback_on_step_end(self: + DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a + list of all tensors as specified by `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + + Examples: + + Returns: + [`~HunyuanVideoPipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`HunyuanVideoPipelineOutput`] is returned, + otherwise a `tuple` is returned where the first element is a list with the generated images and the + second element is a list of `bool`s indicating whether the corresponding generated image contains + "not-safe-for-work" (nsfw) content. + """ + callback = kwargs.pop("callback", None) + callback_steps = kwargs.pop("callback_steps", None) + + if callback is not None: + deprecate( + "callback", + "1.0.0", + "Passing `callback` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`", + ) + if callback_steps is not None: + deprecate( + "callback_steps", + "1.0.0", + "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`", + ) + + if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): + callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs + + # 0. Default height and width to unet + # height = height or self.transformer.config.sample_size * self.vae_scale_factor + # width = width or self.transformer.config.sample_size * self.vae_scale_factor + # to deal with lora scaling and other possible forward hooks + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + height, + width, + video_length, + callback_steps, + negative_prompt, + prompt_embeds, + negative_prompt_embeds, + callback_on_step_end_tensor_inputs, + vae_ver=vae_ver, + ) + + self._guidance_scale = guidance_scale + self._guidance_rescale = guidance_rescale + self._clip_skip = clip_skip + self._cross_attention_kwargs = cross_attention_kwargs + self._interrupt = False + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = torch.device(f"cuda:{dist.get_rank()}") if dist.is_initialized() else self._execution_device + + # 3. Encode input prompt + lora_scale = ( + self.cross_attention_kwargs.get("scale", None) + if self.cross_attention_kwargs is not None + else None + ) + + ( + prompt_embeds, + negative_prompt_embeds, + prompt_mask, + negative_prompt_mask, + ) = self.encode_prompt( + prompt, + device, + num_videos_per_prompt, + self.do_classifier_free_guidance, + negative_prompt, + prompt_embeds=prompt_embeds, + attention_mask=attention_mask, + negative_prompt_embeds=negative_prompt_embeds, + negative_attention_mask=negative_attention_mask, + lora_scale=lora_scale, + clip_skip=self.clip_skip, + data_type=data_type, + ) + if self.text_encoder_2 is not None: + ( + prompt_embeds_2, + negative_prompt_embeds_2, + prompt_mask_2, + negative_prompt_mask_2, + ) = self.encode_prompt( + prompt, + device, + num_videos_per_prompt, + self.do_classifier_free_guidance, + negative_prompt, + prompt_embeds=None, + attention_mask=None, + negative_prompt_embeds=None, + negative_attention_mask=None, + lora_scale=lora_scale, + clip_skip=self.clip_skip, + text_encoder=self.text_encoder_2, + data_type=data_type, + ) + else: + prompt_embeds_2 = None + negative_prompt_embeds_2 = None + prompt_mask_2 = None + negative_prompt_mask_2 = None + + # For classifier free guidance, we need to do two forward passes. + # Here we concatenate the unconditional and text embeddings into a single batch + # to avoid doing two forward passes + if self.do_classifier_free_guidance: + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) + if prompt_mask is not None: + prompt_mask = torch.cat([negative_prompt_mask, prompt_mask]) + if prompt_embeds_2 is not None: + prompt_embeds_2 = torch.cat([negative_prompt_embeds_2, prompt_embeds_2]) + if prompt_mask_2 is not None: + prompt_mask_2 = torch.cat([negative_prompt_mask_2, prompt_mask_2]) + + + # 4. Prepare timesteps + extra_set_timesteps_kwargs = self.prepare_extra_func_kwargs( + self.scheduler.set_timesteps, {"n_tokens": n_tokens} + ) + timesteps, num_inference_steps = retrieve_timesteps( + self.scheduler, + num_inference_steps, + device, + timesteps, + sigmas, + **extra_set_timesteps_kwargs, + ) + + if "884" in vae_ver: + video_length = (video_length - 1) // 4 + 1 + elif "888" in vae_ver: + video_length = (video_length - 1) // 8 + 1 + else: + video_length = video_length + + # 5. Prepare latent variables + num_channels_latents = self.transformer.config.in_channels + latents = self.prepare_latents( + batch_size * num_videos_per_prompt, + num_channels_latents, + height, + width, + video_length, + prompt_embeds.dtype, + device, + generator, + latents, + ) + + # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_func_kwargs( + self.scheduler.step, + {"generator": generator, "eta": eta}, + ) + + target_dtype = PRECISION_TO_TYPE[self.args.precision] + autocast_enabled = ( + target_dtype != torch.float32 + ) and not self.args.disable_autocast + vae_dtype = PRECISION_TO_TYPE[self.args.vae_precision] + vae_autocast_enabled = ( + vae_dtype != torch.float32 + ) and not self.args.disable_autocast + + # 7. Denoising loop + num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order + self._num_timesteps = len(timesteps) + + # if is_progress_bar: + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + if self.interrupt: + continue + + # expand the latents if we are doing classifier free guidance + latent_model_input = ( + torch.cat([latents] * 2) + if self.do_classifier_free_guidance + else latents + ) + latent_model_input = self.scheduler.scale_model_input( + latent_model_input, t + ) + + t_expand = t.repeat(latent_model_input.shape[0]) + guidance_expand = ( + torch.tensor( + [embedded_guidance_scale] * latent_model_input.shape[0], + dtype=torch.float32, + device=device, + ).to(target_dtype) + * 1000.0 + if embedded_guidance_scale is not None + else None + ) + + # predict the noise residual + with torch.autocast( + device_type="cuda", dtype=target_dtype, enabled=autocast_enabled + ): + noise_pred = self.transformer( # For an input image (129, 192, 336) (1, 256, 256) + latent_model_input, # [2, 16, 33, 24, 42] + t_expand, # [2] + text_states=prompt_embeds, # [2, 256, 4096] + text_mask=prompt_mask, # [2, 256] + text_states_2=prompt_embeds_2, # [2, 768] + freqs_cos=freqs_cis[0], # [seqlen, head_dim] + freqs_sin=freqs_cis[1], # [seqlen, head_dim] + guidance=guidance_expand, + return_dict=True, + )[ + "x" + ] + + # perform guidance + if self.do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + self.guidance_scale * ( + noise_pred_text - noise_pred_uncond + ) + + if self.do_classifier_free_guidance and self.guidance_rescale > 0.0: + # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf + noise_pred = rescale_noise_cfg( + noise_pred, + noise_pred_text, + guidance_rescale=self.guidance_rescale, + ) + + # compute the previous noisy sample x_t -> x_t-1 + latents = self.scheduler.step( + noise_pred, t, latents, **extra_step_kwargs, return_dict=False + )[0] + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + negative_prompt_embeds = callback_outputs.pop( + "negative_prompt_embeds", negative_prompt_embeds + ) + + # call the callback, if provided + if i == len(timesteps) - 1 or ( + (i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0 + ): + if progress_bar is not None: + progress_bar.update() + if callback is not None and i % callback_steps == 0: + step_idx = i // getattr(self.scheduler, "order", 1) + callback(step_idx, t, latents) + + if not output_type == "latent": + expand_temporal_dim = False + if len(latents.shape) == 4: + if isinstance(self.vae, AutoencoderKLCausal3D): + latents = latents.unsqueeze(2) + expand_temporal_dim = True + elif len(latents.shape) == 5: + pass + else: + raise ValueError( + f"Only support latents with shape (b, c, h, w) or (b, c, f, h, w), but got {latents.shape}." + ) + + if ( + hasattr(self.vae.config, "shift_factor") + and self.vae.config.shift_factor + ): + latents = ( + latents / self.vae.config.scaling_factor + + self.vae.config.shift_factor + ) + else: + latents = latents / self.vae.config.scaling_factor + + with torch.autocast( + device_type="cuda", dtype=vae_dtype, enabled=vae_autocast_enabled + ): + if enable_tiling: + self.vae.enable_tiling() + image = self.vae.decode( + latents, return_dict=False, generator=generator + )[0] + else: + image = self.vae.decode( + latents, return_dict=False, generator=generator + )[0] + + if expand_temporal_dim or image.shape[2] == 1: + image = image.squeeze(2) + + else: + image = latents + + image = (image / 2 + 0.5).clamp(0, 1) + # we always cast to float32 as this does not cause significant overhead and is compatible with bfloa16 + image = image.cpu().float() + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return image + + return HunyuanVideoPipelineOutput(videos=image) diff --git a/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/hunyuan_model/posemb_layers.py b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/hunyuan_model/posemb_layers.py new file mode 100644 index 0000000000000000000000000000000000000000..dfce82c690540d17a55a51b7997ee7ceb0bdbf44 --- /dev/null +++ b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/hunyuan_model/posemb_layers.py @@ -0,0 +1,310 @@ +import torch +from typing import Union, Tuple, List + + +def _to_tuple(x, dim=2): + if isinstance(x, int): + return (x,) * dim + elif len(x) == dim: + return x + else: + raise ValueError(f"Expected length {dim} or int, but got {x}") + + +def get_meshgrid_nd(start, *args, dim=2): + """ + Get n-D meshgrid with start, stop and num. + + Args: + start (int or tuple): If len(args) == 0, start is num; If len(args) == 1, start is start, args[0] is stop, + step is 1; If len(args) == 2, start is start, args[0] is stop, args[1] is num. For n-dim, start/stop/num + should be int or n-tuple. If n-tuple is provided, the meshgrid will be stacked following the dim order in + n-tuples. + *args: See above. + dim (int): Dimension of the meshgrid. Defaults to 2. + + Returns: + grid (np.ndarray): [dim, ...] + """ + if len(args) == 0: + # start is grid_size + num = _to_tuple(start, dim=dim) + start = (0,) * dim + stop = num + elif len(args) == 1: + # start is start, args[0] is stop, step is 1 + start = _to_tuple(start, dim=dim) + stop = _to_tuple(args[0], dim=dim) + num = [stop[i] - start[i] for i in range(dim)] + elif len(args) == 2: + # start is start, args[0] is stop, args[1] is num + start = _to_tuple(start, dim=dim) # Left-Top eg: 12,0 + stop = _to_tuple(args[0], dim=dim) # Right-Bottom eg: 20,32 + num = _to_tuple(args[1], dim=dim) # Target Size eg: 32,124 + else: + raise ValueError(f"len(args) should be 0, 1 or 2, but got {len(args)}") + + # PyTorch implement of np.linspace(start[i], stop[i], num[i], endpoint=False) + axis_grid = [] + for i in range(dim): + a, b, n = start[i], stop[i], num[i] + g = torch.linspace(a, b, n + 1, dtype=torch.float32)[:n] + axis_grid.append(g) + grid = torch.meshgrid(*axis_grid, indexing="ij") # dim x [W, H, D] + grid = torch.stack(grid, dim=0) # [dim, W, H, D] + + return grid + + +################################################################################# +# Rotary Positional Embedding Functions # +################################################################################# +# https://github.com/meta-llama/llama/blob/be327c427cc5e89cc1d3ab3d3fec4484df771245/llama/model.py#L80 + + +def reshape_for_broadcast( + freqs_cis: Union[torch.Tensor, Tuple[torch.Tensor]], + x: torch.Tensor, + head_first=False, +): + """ + Reshape frequency tensor for broadcasting it with another tensor. + + This function reshapes the frequency tensor to have the same shape as the target tensor 'x' + for the purpose of broadcasting the frequency tensor during element-wise operations. + + Notes: + When using FlashMHAModified, head_first should be False. + When using Attention, head_first should be True. + + Args: + freqs_cis (Union[torch.Tensor, Tuple[torch.Tensor]]): Frequency tensor to be reshaped. + x (torch.Tensor): Target tensor for broadcasting compatibility. + head_first (bool): head dimension first (except batch dim) or not. + + Returns: + torch.Tensor: Reshaped frequency tensor. + + Raises: + AssertionError: If the frequency tensor doesn't match the expected shape. + AssertionError: If the target tensor 'x' doesn't have the expected number of dimensions. + """ + ndim = x.ndim + assert 0 <= 1 < ndim + + if isinstance(freqs_cis, tuple): + # freqs_cis: (cos, sin) in real space + if head_first: + assert freqs_cis[0].shape == ( + x.shape[-2], + x.shape[-1], + ), f"freqs_cis shape {freqs_cis[0].shape} does not match x shape {x.shape}" + shape = [ + d if i == ndim - 2 or i == ndim - 1 else 1 + for i, d in enumerate(x.shape) + ] + else: + assert freqs_cis[0].shape == ( + x.shape[1], + x.shape[-1], + ), f"freqs_cis shape {freqs_cis[0].shape} does not match x shape {x.shape}" + shape = [d if i == 1 or i == ndim - 1 else 1 for i, d in enumerate(x.shape)] + return freqs_cis[0].view(*shape), freqs_cis[1].view(*shape) + else: + # freqs_cis: values in complex space + if head_first: + assert freqs_cis.shape == ( + x.shape[-2], + x.shape[-1], + ), f"freqs_cis shape {freqs_cis.shape} does not match x shape {x.shape}" + shape = [ + d if i == ndim - 2 or i == ndim - 1 else 1 + for i, d in enumerate(x.shape) + ] + else: + assert freqs_cis.shape == ( + x.shape[1], + x.shape[-1], + ), f"freqs_cis shape {freqs_cis.shape} does not match x shape {x.shape}" + shape = [d if i == 1 or i == ndim - 1 else 1 for i, d in enumerate(x.shape)] + return freqs_cis.view(*shape) + + +def rotate_half(x): + x_real, x_imag = ( + x.float().reshape(*x.shape[:-1], -1, 2).unbind(-1) + ) # [B, S, H, D//2] + return torch.stack([-x_imag, x_real], dim=-1).flatten(3) + + +def apply_rotary_emb( + xq: torch.Tensor, + xk: torch.Tensor, + freqs_cis: Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]], + head_first: bool = False, +) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Apply rotary embeddings to input tensors using the given frequency tensor. + + This function applies rotary embeddings to the given query 'xq' and key 'xk' tensors using the provided + frequency tensor 'freqs_cis'. The input tensors are reshaped as complex numbers, and the frequency tensor + is reshaped for broadcasting compatibility. The resulting tensors contain rotary embeddings and are + returned as real tensors. + + Args: + xq (torch.Tensor): Query tensor to apply rotary embeddings. [B, S, H, D] + xk (torch.Tensor): Key tensor to apply rotary embeddings. [B, S, H, D] + freqs_cis (torch.Tensor or tuple): Precomputed frequency tensor for complex exponential. + head_first (bool): head dimension first (except batch dim) or not. + + Returns: + Tuple[torch.Tensor, torch.Tensor]: Tuple of modified query tensor and key tensor with rotary embeddings. + + """ + xk_out = None + if isinstance(freqs_cis, tuple): + cos, sin = reshape_for_broadcast(freqs_cis, xq, head_first) # [S, D] + cos, sin = cos.to(xq.device), sin.to(xq.device) + # real * cos - imag * sin + # imag * cos + real * sin + xq_out = (xq.float() * cos + rotate_half(xq.float()) * sin).type_as(xq) + xk_out = (xk.float() * cos + rotate_half(xk.float()) * sin).type_as(xk) + else: + # view_as_complex will pack [..., D/2, 2](real) to [..., D/2](complex) + xq_ = torch.view_as_complex( + xq.float().reshape(*xq.shape[:-1], -1, 2) + ) # [B, S, H, D//2] + freqs_cis = reshape_for_broadcast(freqs_cis, xq_, head_first).to( + xq.device + ) # [S, D//2] --> [1, S, 1, D//2] + # (real, imag) * (cos, sin) = (real * cos - imag * sin, imag * cos + real * sin) + # view_as_real will expand [..., D/2](complex) to [..., D/2, 2](real) + xq_out = torch.view_as_real(xq_ * freqs_cis).flatten(3).type_as(xq) + xk_ = torch.view_as_complex( + xk.float().reshape(*xk.shape[:-1], -1, 2) + ) # [B, S, H, D//2] + xk_out = torch.view_as_real(xk_ * freqs_cis).flatten(3).type_as(xk) + + return xq_out, xk_out + + +def get_nd_rotary_pos_embed( + rope_dim_list, + start, + *args, + theta=10000.0, + use_real=False, + theta_rescale_factor: Union[float, List[float]] = 1.0, + interpolation_factor: Union[float, List[float]] = 1.0, +): + """ + This is a n-d version of precompute_freqs_cis, which is a RoPE for tokens with n-d structure. + + Args: + rope_dim_list (list of int): Dimension of each rope. len(rope_dim_list) should equal to n. + sum(rope_dim_list) should equal to head_dim of attention layer. + start (int | tuple of int | list of int): If len(args) == 0, start is num; If len(args) == 1, start is start, + args[0] is stop, step is 1; If len(args) == 2, start is start, args[0] is stop, args[1] is num. + *args: See above. + theta (float): Scaling factor for frequency computation. Defaults to 10000.0. + use_real (bool): If True, return real part and imaginary part separately. Otherwise, return complex numbers. + Some libraries such as TensorRT does not support complex64 data type. So it is useful to provide a real + part and an imaginary part separately. + theta_rescale_factor (float): Rescale factor for theta. Defaults to 1.0. + + Returns: + pos_embed (torch.Tensor): [HW, D/2] + """ + + grid = get_meshgrid_nd( + start, *args, dim=len(rope_dim_list) + ) # [3, W, H, D] / [2, W, H] + + if isinstance(theta_rescale_factor, int) or isinstance(theta_rescale_factor, float): + theta_rescale_factor = [theta_rescale_factor] * len(rope_dim_list) + elif isinstance(theta_rescale_factor, list) and len(theta_rescale_factor) == 1: + theta_rescale_factor = [theta_rescale_factor[0]] * len(rope_dim_list) + assert len(theta_rescale_factor) == len( + rope_dim_list + ), "len(theta_rescale_factor) should equal to len(rope_dim_list)" + + if isinstance(interpolation_factor, int) or isinstance(interpolation_factor, float): + interpolation_factor = [interpolation_factor] * len(rope_dim_list) + elif isinstance(interpolation_factor, list) and len(interpolation_factor) == 1: + interpolation_factor = [interpolation_factor[0]] * len(rope_dim_list) + assert len(interpolation_factor) == len( + rope_dim_list + ), "len(interpolation_factor) should equal to len(rope_dim_list)" + + # use 1/ndim of dimensions to encode grid_axis + embs = [] + for i in range(len(rope_dim_list)): + emb = get_1d_rotary_pos_embed( + rope_dim_list[i], + grid[i].reshape(-1), + theta, + use_real=use_real, + theta_rescale_factor=theta_rescale_factor[i], + interpolation_factor=interpolation_factor[i], + ) # 2 x [WHD, rope_dim_list[i]] + embs.append(emb) + + if use_real: + cos = torch.cat([emb[0] for emb in embs], dim=1) # (WHD, D/2) + sin = torch.cat([emb[1] for emb in embs], dim=1) # (WHD, D/2) + return cos, sin + else: + emb = torch.cat(embs, dim=1) # (WHD, D/2) + return emb + + +def get_1d_rotary_pos_embed( + dim: int, + pos: Union[torch.FloatTensor, int], + theta: float = 10000.0, + use_real: bool = False, + theta_rescale_factor: float = 1.0, + interpolation_factor: float = 1.0, +) -> Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]: + """ + Precompute the frequency tensor for complex exponential (cis) with given dimensions. + (Note: `cis` means `cos + i * sin`, where i is the imaginary unit.) + + This function calculates a frequency tensor with complex exponential using the given dimension 'dim' + and the end index 'end'. The 'theta' parameter scales the frequencies. + The returned tensor contains complex values in complex64 data type. + + Args: + dim (int): Dimension of the frequency tensor. + pos (int or torch.FloatTensor): Position indices for the frequency tensor. [S] or scalar + theta (float, optional): Scaling factor for frequency computation. Defaults to 10000.0. + use_real (bool, optional): If True, return real part and imaginary part separately. + Otherwise, return complex numbers. + theta_rescale_factor (float, optional): Rescale factor for theta. Defaults to 1.0. + + Returns: + freqs_cis: Precomputed frequency tensor with complex exponential. [S, D/2] + freqs_cos, freqs_sin: Precomputed frequency tensor with real and imaginary parts separately. [S, D] + """ + if isinstance(pos, int): + pos = torch.arange(pos).float() + + # proposed by reddit user bloc97, to rescale rotary embeddings to longer sequence length without fine-tuning + # has some connection to NTK literature + if theta_rescale_factor != 1.0: + theta *= theta_rescale_factor ** (dim / (dim - 2)) + + freqs = 1.0 / ( + theta ** (torch.arange(0, dim, 2)[: (dim // 2)].float() / dim) + ) # [D/2] + # assert interpolation_factor == 1.0, f"interpolation_factor: {interpolation_factor}" + freqs = torch.outer(pos * interpolation_factor, freqs) # [S, D/2] + if use_real: + freqs_cos = freqs.cos().repeat_interleave(2, dim=1) # [S, D] + freqs_sin = freqs.sin().repeat_interleave(2, dim=1) # [S, D] + return freqs_cos, freqs_sin + else: + freqs_cis = torch.polar( + torch.ones_like(freqs), freqs + ) # complex64 # [S, D/2] + return freqs_cis diff --git a/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/hunyuan_model/text_encoder.py b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/hunyuan_model/text_encoder.py new file mode 100644 index 0000000000000000000000000000000000000000..f143ddc7b0a44503d9facfb4f7cc0feaa2a9866b --- /dev/null +++ b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/hunyuan_model/text_encoder.py @@ -0,0 +1,710 @@ +from dataclasses import dataclass +import json +import os +from typing import Optional, Tuple, Union +from copy import deepcopy + +import torch +import torch.nn as nn +from transformers import ( + CLIPTextModel, + CLIPTokenizer, + AutoTokenizer, + AutoModel, + CLIPConfig, + LlamaForCausalLM, + LlamaConfig, +) +from transformers.utils import ModelOutput +from transformers.models.llama import LlamaModel +from safetensors.torch import load_file +from accelerate import init_empty_weights + +import logging + +logger = logging.getLogger(__name__) +logging.basicConfig(level=logging.INFO) + + +CLIP_L_HUGGINGFACE_MODEL_ID = "openai/clip-vit-large-patch14" +LLAVA_HUGGINGFACE_MODEL_ID = "xtuner/llava-llama-3-8b-v1_1-transformers" + +CLIP_CONFIG = { + "_name_or_path": "clip-vit-large-patch14/", + "architectures": ["CLIPModel"], + "initializer_factor": 1.0, + "logit_scale_init_value": 2.6592, + "model_type": "clip", + "projection_dim": 768, + # "text_config": { + "_name_or_path": "", + "add_cross_attention": False, + "architectures": None, + "attention_dropout": 0.0, + "bad_words_ids": None, + "bos_token_id": 0, + "chunk_size_feed_forward": 0, + "cross_attention_hidden_size": None, + "decoder_start_token_id": None, + "diversity_penalty": 0.0, + "do_sample": False, + "dropout": 0.0, + "early_stopping": False, + "encoder_no_repeat_ngram_size": 0, + "eos_token_id": 2, + "finetuning_task": None, + "forced_bos_token_id": None, + "forced_eos_token_id": None, + "hidden_act": "quick_gelu", + "hidden_size": 768, + "id2label": {"0": "LABEL_0", "1": "LABEL_1"}, + "initializer_factor": 1.0, + "initializer_range": 0.02, + "intermediate_size": 3072, + "is_decoder": False, + "is_encoder_decoder": False, + "label2id": {"LABEL_0": 0, "LABEL_1": 1}, + "layer_norm_eps": 1e-05, + "length_penalty": 1.0, + "max_length": 20, + "max_position_embeddings": 77, + "min_length": 0, + "model_type": "clip_text_model", + "no_repeat_ngram_size": 0, + "num_attention_heads": 12, + "num_beam_groups": 1, + "num_beams": 1, + "num_hidden_layers": 12, + "num_return_sequences": 1, + "output_attentions": False, + "output_hidden_states": False, + "output_scores": False, + "pad_token_id": 1, + "prefix": None, + "problem_type": None, + "projection_dim": 768, + "pruned_heads": {}, + "remove_invalid_values": False, + "repetition_penalty": 1.0, + "return_dict": True, + "return_dict_in_generate": False, + "sep_token_id": None, + "task_specific_params": None, + "temperature": 1.0, + "tie_encoder_decoder": False, + "tie_word_embeddings": True, + "tokenizer_class": None, + "top_k": 50, + "top_p": 1.0, + "torch_dtype": None, + "torchscript": False, + "transformers_version": "4.16.0.dev0", + "use_bfloat16": False, + "vocab_size": 49408, + # }, + # "text_config_dict": { + "hidden_size": 768, + "intermediate_size": 3072, + "num_attention_heads": 12, + "num_hidden_layers": 12, + "projection_dim": 768, + # }, + # "torch_dtype": "float32", + # "transformers_version": null +} + +LLAMA_CONFIG = { + "architectures": ["LlamaForCausalLM"], + "attention_bias": False, + "attention_dropout": 0.0, + "bos_token_id": 128000, + "eos_token_id": 128001, + "head_dim": 128, + "hidden_act": "silu", + "hidden_size": 4096, + "initializer_range": 0.02, + "intermediate_size": 14336, + "max_position_embeddings": 8192, + "mlp_bias": False, + "model_type": "llama", + "num_attention_heads": 32, + "num_hidden_layers": 32, + "num_key_value_heads": 8, + "pretraining_tp": 1, + "rms_norm_eps": 1e-05, + "rope_scaling": None, + "rope_theta": 500000.0, + "tie_word_embeddings": False, + "torch_dtype": "float16", + "transformers_version": "4.46.3", + "use_cache": True, + "vocab_size": 128320, +} + +# When using decoder-only models, we must provide a prompt template to instruct the text encoder +# on how to generate the text. +# -------------------------------------------------------------------- +PROMPT_TEMPLATE_ENCODE = ( + "<|start_header_id|>system<|end_header_id|>\n\nDescribe the image by detailing the color, shape, size, texture, " + "quantity, text, spatial relationships of the objects and background:<|eot_id|>" + "<|start_header_id|>user<|end_header_id|>\n\n{}<|eot_id|>" +) +PROMPT_TEMPLATE_ENCODE_VIDEO = ( + "<|start_header_id|>system<|end_header_id|>\n\nDescribe the video by detailing the following aspects: " + "1. The main content and theme of the video." + "2. The color, shape, size, texture, quantity, text, and spatial relationships of the objects." + "3. Actions, events, behaviors temporal relationships, physical movement changes of the objects." + "4. background environment, light, style and atmosphere." + "5. camera angles, movements, and transitions used in the video:<|eot_id|>" + "<|start_header_id|>user<|end_header_id|>\n\n{}<|eot_id|>" +) + +NEGATIVE_PROMPT = "Aerial view, aerial view, overexposed, low quality, deformation, a poor composition, bad hands, bad teeth, bad eyes, bad limbs, distortion" + +PROMPT_TEMPLATE = { + "dit-llm-encode": { + "template": PROMPT_TEMPLATE_ENCODE, + "crop_start": 36, + }, + "dit-llm-encode-video": { + "template": PROMPT_TEMPLATE_ENCODE_VIDEO, + "crop_start": 95, + }, +} + + +def use_default(value, default): + return value if value is not None else default + + +def load_clip_l(text_encoder_path: str, dtype: Optional[Union[str, torch.dtype]] = None): + if os.path.isdir(text_encoder_path): + # load from directory, configs are in the directory + text_encoder = CLIPTextModel.from_pretrained(text_encoder_path, torch_dtype=dtype) + else: + # load from file, we create the model with the appropriate config + config = CLIPConfig(**CLIP_CONFIG) + with init_empty_weights(): + text_encoder = CLIPTextModel._from_config(config, torch_dtype=dtype) + + state_dict = load_file(text_encoder_path) + + text_encoder.load_state_dict(state_dict, strict=True, assign=True) + # if dtype is not None: + # text_encoder.to(dtype=dtype) + + return text_encoder + + +def load_clip_l_tokenizer(tokenizer_path: str): + if os.path.isdir(tokenizer_path): + tokenizer = CLIPTokenizer.from_pretrained(tokenizer_path, max_length=77) + else: + # load from Hugging Face + logger.info(f"Loading tokenizer from Hugging Face: {CLIP_L_HUGGINGFACE_MODEL_ID}") + tokenizer = CLIPTokenizer.from_pretrained(CLIP_L_HUGGINGFACE_MODEL_ID, max_length=77) + + return tokenizer + + +def load_llm(text_encoder_path: str, dtype: Optional[Union[str, torch.dtype]] = None): + if os.path.isdir(text_encoder_path): + # load from directory, configs are in the directory + text_encoder = AutoModel.from_pretrained(text_encoder_path, low_cpu_mem_usage=True, torch_dtype=dtype) + else: + # load from file, we create the model with the appropriate config + config = LlamaConfig(**LLAMA_CONFIG) + with init_empty_weights(): + text_encoder = LlamaForCausalLM._from_config(config, torch_dtype=dtype) + + state_dict = load_file(text_encoder_path) + + # support weights from ComfyUI + if "tokenizer" in state_dict: + state_dict.pop("tokenizer") + + text_encoder.load_state_dict(state_dict, strict=True, assign=True) + + return text_encoder + + +def load_llm_tokenizer(tokenizer_path: str, padding_side="right"): + if os.path.isdir(tokenizer_path): + tokenizer = AutoTokenizer.from_pretrained(tokenizer_path) + else: + # load from Hugging Face + logger.info(f"Loading tokenizer from Hugging Face: {LLAVA_HUGGINGFACE_MODEL_ID}") + tokenizer = AutoTokenizer.from_pretrained(LLAVA_HUGGINGFACE_MODEL_ID, padding_side=padding_side) + + return tokenizer + + +def load_text_encoder( + text_encoder_type: str, + text_encoder_path: str, + text_encoder_dtype: Optional[Union[str, torch.dtype]] = None, +): + logger.info(f"Loading text encoder model ({text_encoder_type}) from: {text_encoder_path}") + + # reduce peak memory usage by specifying the dtype of the model + dtype = text_encoder_dtype + if text_encoder_type == "clipL": + text_encoder = load_clip_l(text_encoder_path, dtype=dtype) + text_encoder.final_layer_norm = text_encoder.text_model.final_layer_norm + elif text_encoder_type == "llm": + text_encoder = load_llm(text_encoder_path, dtype=dtype) + if hasattr(text_encoder, "norm"): + text_encoder.final_layer_norm = text_encoder.norm # by from_pretrained + else: + text_encoder.final_layer_norm = text_encoder.model.norm # by _from_config + else: + raise ValueError(f"Unsupported text encoder type: {text_encoder_type}") + # from_pretrained will ensure that the model is in eval mode. + + if dtype is not None: + text_encoder = text_encoder.to(dtype=dtype) + + text_encoder.requires_grad_(False) + + logger.info(f"Text encoder to dtype: {text_encoder.dtype}") + return text_encoder, text_encoder_path + + +def load_tokenizer(tokenizer_type, tokenizer_path=None, padding_side="right"): + logger.info(f"Loading tokenizer ({tokenizer_type}) from: {tokenizer_path}") + + if tokenizer_type == "clipL": + tokenizer = load_clip_l_tokenizer(tokenizer_path) + elif tokenizer_type == "llm": + tokenizer = load_llm_tokenizer(tokenizer_path, padding_side=padding_side) + else: + raise ValueError(f"Unsupported tokenizer type: {tokenizer_type}") + + return tokenizer, tokenizer_path + + +@dataclass +class TextEncoderModelOutput(ModelOutput): + """ + Base class for model's outputs that also contains a pooling of the last hidden states. + + Args: + hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + Sequence of hidden-states at the output of the last layer of the model. + attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: + hidden_states_list (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. + text_outputs (`list`, *optional*, returned when `return_texts=True` is passed): + List of decoded texts. + """ + + hidden_state: torch.FloatTensor = None + attention_mask: Optional[torch.LongTensor] = None + hidden_states_list: Optional[Tuple[torch.FloatTensor, ...]] = None + text_outputs: Optional[list] = None + + +class TextEncoder(nn.Module): + def __init__( + self, + text_encoder_type: str, + max_length: int, + text_encoder_dtype: Optional[Union[str, torch.dtype]] = None, + text_encoder_path: Optional[str] = None, + tokenizer_type: Optional[str] = None, + tokenizer_path: Optional[str] = None, + output_key: Optional[str] = None, + use_attention_mask: bool = True, + input_max_length: Optional[int] = None, + prompt_template: Optional[dict] = None, + prompt_template_video: Optional[dict] = None, + hidden_state_skip_layer: Optional[int] = None, + apply_final_norm: bool = False, + reproduce: bool = False, + ): + super().__init__() + self.text_encoder_type = text_encoder_type + self.max_length = max_length + # self.precision = text_encoder_precision + self.model_path = text_encoder_path + self.tokenizer_type = tokenizer_type if tokenizer_type is not None else text_encoder_type + self.tokenizer_path = tokenizer_path if tokenizer_path is not None else text_encoder_path + self.use_attention_mask = use_attention_mask + if prompt_template_video is not None: + assert use_attention_mask is True, "Attention mask is True required when training videos." + self.input_max_length = input_max_length if input_max_length is not None else max_length + self.prompt_template = prompt_template + self.prompt_template_video = prompt_template_video + self.hidden_state_skip_layer = hidden_state_skip_layer + self.apply_final_norm = apply_final_norm + self.reproduce = reproduce + + self.use_template = self.prompt_template is not None + if self.use_template: + assert ( + isinstance(self.prompt_template, dict) and "template" in self.prompt_template + ), f"`prompt_template` must be a dictionary with a key 'template', got {self.prompt_template}" + assert "{}" in str(self.prompt_template["template"]), ( + "`prompt_template['template']` must contain a placeholder `{}` for the input text, " + f"got {self.prompt_template['template']}" + ) + + self.use_video_template = self.prompt_template_video is not None + if self.use_video_template: + if self.prompt_template_video is not None: + assert ( + isinstance(self.prompt_template_video, dict) and "template" in self.prompt_template_video + ), f"`prompt_template_video` must be a dictionary with a key 'template', got {self.prompt_template_video}" + assert "{}" in str(self.prompt_template_video["template"]), ( + "`prompt_template_video['template']` must contain a placeholder `{}` for the input text, " + f"got {self.prompt_template_video['template']}" + ) + + if "t5" in text_encoder_type: + self.output_key = output_key or "last_hidden_state" + elif "clip" in text_encoder_type: + self.output_key = output_key or "pooler_output" + elif "llm" in text_encoder_type or "glm" in text_encoder_type: + self.output_key = output_key or "last_hidden_state" + else: + raise ValueError(f"Unsupported text encoder type: {text_encoder_type}") + + self.model, self.model_path = load_text_encoder( + text_encoder_type=self.text_encoder_type, text_encoder_path=self.model_path, text_encoder_dtype=text_encoder_dtype + ) + self.dtype = self.model.dtype + + self.tokenizer, self.tokenizer_path = load_tokenizer( + tokenizer_type=self.tokenizer_type, tokenizer_path=self.tokenizer_path, padding_side="right" + ) + + def __repr__(self): + return f"{self.text_encoder_type} ({self.precision} - {self.model_path})" + + @property + def device(self): + return self.model.device + + @staticmethod + def apply_text_to_template(text, template, prevent_empty_text=True): + """ + Apply text to template. + + Args: + text (str): Input text. + template (str or list): Template string or list of chat conversation. + prevent_empty_text (bool): If Ture, we will prevent the user text from being empty + by adding a space. Defaults to True. + """ + if isinstance(template, str): + # Will send string to tokenizer. Used for llm + return template.format(text) + else: + raise TypeError(f"Unsupported template type: {type(template)}") + + def text2tokens(self, text, data_type="image"): + """ + Tokenize the input text. + + Args: + text (str or list): Input text. + """ + tokenize_input_type = "str" + if self.use_template: + if data_type == "image": + prompt_template = self.prompt_template["template"] + elif data_type == "video": + prompt_template = self.prompt_template_video["template"] + else: + raise ValueError(f"Unsupported data type: {data_type}") + if isinstance(text, (list, tuple)): + text = [self.apply_text_to_template(one_text, prompt_template) for one_text in text] + if isinstance(text[0], list): + tokenize_input_type = "list" + elif isinstance(text, str): + text = self.apply_text_to_template(text, prompt_template) + if isinstance(text, list): + tokenize_input_type = "list" + else: + raise TypeError(f"Unsupported text type: {type(text)}") + + kwargs = dict( + truncation=True, + max_length=self.max_length, + padding="max_length", + return_tensors="pt", + ) + if tokenize_input_type == "str": + return self.tokenizer( + text, + return_length=False, + return_overflowing_tokens=False, + return_attention_mask=True, + **kwargs, + ) + elif tokenize_input_type == "list": + return self.tokenizer.apply_chat_template( + text, + add_generation_prompt=True, + tokenize=True, + return_dict=True, + **kwargs, + ) + else: + raise ValueError(f"Unsupported tokenize_input_type: {tokenize_input_type}") + + def encode( + self, + batch_encoding, + use_attention_mask=None, + output_hidden_states=False, + do_sample=None, + hidden_state_skip_layer=None, + return_texts=False, + data_type="image", + device=None, + ): + """ + Args: + batch_encoding (dict): Batch encoding from tokenizer. + use_attention_mask (bool): Whether to use attention mask. If None, use self.use_attention_mask. + Defaults to None. + output_hidden_states (bool): Whether to output hidden states. If False, return the value of + self.output_key. If True, return the entire output. If set self.hidden_state_skip_layer, + output_hidden_states will be set True. Defaults to False. + do_sample (bool): Whether to sample from the model. Used for Decoder-Only LLMs. Defaults to None. + When self.produce is False, do_sample is set to True by default. + hidden_state_skip_layer (int): Number of hidden states to hidden_state_skip_layer. 0 means the last layer. + If None, self.output_key will be used. Defaults to None. + return_texts (bool): Whether to return the decoded texts. Defaults to False. + """ + device = self.model.device if device is None else device + use_attention_mask = use_default(use_attention_mask, self.use_attention_mask) + hidden_state_skip_layer = use_default(hidden_state_skip_layer, self.hidden_state_skip_layer) + do_sample = use_default(do_sample, not self.reproduce) + attention_mask = batch_encoding["attention_mask"].to(device) if use_attention_mask else None + outputs = self.model( + input_ids=batch_encoding["input_ids"].to(device), + attention_mask=attention_mask, + output_hidden_states=output_hidden_states or hidden_state_skip_layer is not None, + ) + if hidden_state_skip_layer is not None: + last_hidden_state = outputs.hidden_states[-(hidden_state_skip_layer + 1)] + # Real last hidden state already has layer norm applied. So here we only apply it + # for intermediate layers. + if hidden_state_skip_layer > 0 and self.apply_final_norm: + last_hidden_state = self.model.final_layer_norm(last_hidden_state) + else: + last_hidden_state = outputs[self.output_key] + + # Remove hidden states of instruction tokens, only keep prompt tokens. + if self.use_template: + if data_type == "image": + crop_start = self.prompt_template.get("crop_start", -1) + elif data_type == "video": + crop_start = self.prompt_template_video.get("crop_start", -1) + else: + raise ValueError(f"Unsupported data type: {data_type}") + if crop_start > 0: + last_hidden_state = last_hidden_state[:, crop_start:] + attention_mask = attention_mask[:, crop_start:] if use_attention_mask else None + + if output_hidden_states: + return TextEncoderModelOutput(last_hidden_state, attention_mask, outputs.hidden_states) + return TextEncoderModelOutput(last_hidden_state, attention_mask) + + def forward( + self, + text, + use_attention_mask=None, + output_hidden_states=False, + do_sample=False, + hidden_state_skip_layer=None, + return_texts=False, + ): + batch_encoding = self.text2tokens(text) + return self.encode( + batch_encoding, + use_attention_mask=use_attention_mask, + output_hidden_states=output_hidden_states, + do_sample=do_sample, + hidden_state_skip_layer=hidden_state_skip_layer, + return_texts=return_texts, + ) + + +# region HunyanVideo architecture + + +def load_text_encoder_1( + text_encoder_dir: str, device: torch.device, fp8_llm: bool, dtype: Optional[Union[str, torch.dtype]] = None +) -> TextEncoder: + text_encoder_dtype = dtype or torch.float16 + text_encoder_type = "llm" + text_len = 256 + hidden_state_skip_layer = 2 + apply_final_norm = False + reproduce = False + + prompt_template = "dit-llm-encode" + prompt_template = PROMPT_TEMPLATE[prompt_template] + prompt_template_video = "dit-llm-encode-video" + prompt_template_video = PROMPT_TEMPLATE[prompt_template_video] + + crop_start = prompt_template_video["crop_start"] # .get("crop_start", 0) + max_length = text_len + crop_start + + text_encoder_1 = TextEncoder( + text_encoder_type=text_encoder_type, + max_length=max_length, + text_encoder_dtype=text_encoder_dtype, + text_encoder_path=text_encoder_dir, + tokenizer_type=text_encoder_type, + prompt_template=prompt_template, + prompt_template_video=prompt_template_video, + hidden_state_skip_layer=hidden_state_skip_layer, + apply_final_norm=apply_final_norm, + reproduce=reproduce, + ) + text_encoder_1.eval() + + if fp8_llm: + org_dtype = text_encoder_1.dtype + logger.info(f"Moving and casting text encoder to {device} and torch.float8_e4m3fn") + text_encoder_1.to(device=device, dtype=torch.float8_e4m3fn) + + # prepare LLM for fp8 + def prepare_fp8(llama_model: LlamaModel, target_dtype): + def forward_hook(module): + def forward(hidden_states): + input_dtype = hidden_states.dtype + hidden_states = hidden_states.to(torch.float32) + variance = hidden_states.pow(2).mean(-1, keepdim=True) + hidden_states = hidden_states * torch.rsqrt(variance + module.variance_epsilon) + return module.weight.to(input_dtype) * hidden_states.to(input_dtype) + + return forward + + for module in llama_model.modules(): + if module.__class__.__name__ in ["Embedding"]: + # print("set", module.__class__.__name__, "to", target_dtype) + module.to(target_dtype) + if module.__class__.__name__ in ["LlamaRMSNorm"]: + # print("set", module.__class__.__name__, "hooks") + module.forward = forward_hook(module) + + prepare_fp8(text_encoder_1.model, org_dtype) + else: + text_encoder_1.to(device=device) + + return text_encoder_1 + + +def load_text_encoder_2( + text_encoder_dir: str, device: torch.device, dtype: Optional[Union[str, torch.dtype]] = None +) -> TextEncoder: + text_encoder_dtype = dtype or torch.float16 + reproduce = False + + text_encoder_2_type = "clipL" + text_len_2 = 77 + + text_encoder_2 = TextEncoder( + text_encoder_type=text_encoder_2_type, + max_length=text_len_2, + text_encoder_dtype=text_encoder_dtype, + text_encoder_path=text_encoder_dir, + tokenizer_type=text_encoder_2_type, + reproduce=reproduce, + ) + text_encoder_2.eval() + + text_encoder_2.to(device=device) + + return text_encoder_2 + + +# endregion + + +if __name__ == "__main__": + import argparse + from musubi_tuner.utils.model_utils import str_to_dtype + + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + + parser = argparse.ArgumentParser() + parser.add_argument("type", type=str, help="Text Encoder type") + parser.add_argument("path1", type=str, help="Text Encoder directory or file 1") + parser.add_argument("path2", type=str, help="Text Encoder directory or file 2") + parser.add_argument("--dtype", type=str, default=None, help="Data type for Text Encoder") + args = parser.parse_args() + + dtype = str_to_dtype(args.dtype) if args.dtype is not None else torch.float16 + + """ + if args.type == "clipL": + text_encoder_1st = load_clip_l(args.path1, dtype=dtype) + tokenizer_1st = load_clip_l_tokenizer(args.path1) + text_encoder_2nd = load_clip_l(args.path2, dtype=dtype) + tokenizer_2nd = load_clip_l_tokenizer(args.path2) + elif args.type == "llm": + text_encoder_1st = load_llm(args.path1, dtype=dtype) + tokenizer_1st = load_llm_tokenizer(args.path1) + text_encoder_2nd = load_llm(args.path2, dtype=dtype) + tokenizer_2nd = load_llm_tokenizer(args.path2) + + print(f"1st Text Encoder dtype: {text_encoder_1st.dtype}") + print(f"2nd Text Encoder dtype: {text_encoder_2nd.dtype}") + + text_encoder_1st.to(device=device) + text_encoder_2nd.to(device=device) + + test_text = "A cat sitting on a table" + token_ids_1st = tokenizer_1st(test_text, return_tensors="pt")["input_ids"] + token_ids_2nd = tokenizer_2nd(test_text, return_tensors="pt")["input_ids"] + assert torch.allclose(token_ids_1st, token_ids_2nd) + print(f"Token IDs are the same: {token_ids_1st}") + + with torch.no_grad(): + text_encoder_1st_output = text_encoder_1st(token_ids_1st.to(device), output_hidden_states=True) + text_encoder_2nd_output = text_encoder_2nd(token_ids_2nd.to(device), output_hidden_states=True) + print(f"1st Text Encoder output keys: {text_encoder_1st_output.keys()}") + print(f"2nd Text Encoder output keys: {text_encoder_2nd_output.keys()}") + for key in text_encoder_1st_output: + print(f"Checking output: {key}") + assert key in text_encoder_2nd_output, f"Key {key} not in 2nd Text Encoder output" + assert torch.allclose(text_encoder_1st_output[key], text_encoder_2nd_output[key]) + print(f"Outputs are the same: {key}") + print("All outputs are the same.") + """ + + if args.type == "clipL": + text_encoder_1st = load_text_encoder_2(args.path1, device, dtype) + text_encoder_2nd = load_text_encoder_2(args.path2, device, dtype) + elif args.type == "llm": + text_encoder_1st = load_text_encoder_1(args.path1, device, False, dtype) + text_encoder_2nd = load_text_encoder_1(args.path2, device, False, dtype) + print(f"1st Text Encoder dtype: {text_encoder_1st.dtype}") + print(f"2nd Text Encoder dtype: {text_encoder_2nd.dtype}") + + prompt = "A cat sitting on a table" + data_type = "video" # video only, image is not supported + text_inputs_1st = text_encoder_1st.text2tokens(prompt, data_type=data_type) + text_inputs_2nd = text_encoder_2nd.text2tokens(prompt, data_type=data_type) + print(text_inputs_1st) + assert torch.allclose(text_inputs_1st["input_ids"], text_inputs_2nd["input_ids"]) + + with torch.no_grad(): + prompt_outputs_1st = text_encoder_1st.encode(text_inputs_1st, data_type=data_type) + prompt_outputs_2nd = text_encoder_2nd.encode(text_inputs_1st, data_type=data_type) + + # prompt_outputs.hidden_state, prompt_outputs.attention_mask + assert torch.allclose(prompt_outputs_1st.hidden_state, prompt_outputs_2nd.hidden_state) + print("Hidden states are the same.") + assert torch.allclose(prompt_outputs_1st.attention_mask, prompt_outputs_2nd.attention_mask) + print("Attention masks are the same.") + print("All outputs are the same.") diff --git a/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/hunyuan_model/token_refiner.py b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/hunyuan_model/token_refiner.py new file mode 100644 index 0000000000000000000000000000000000000000..9ca576932305e75169153b461312012364b1534b --- /dev/null +++ b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/hunyuan_model/token_refiner.py @@ -0,0 +1,245 @@ +from typing import Optional + +from einops import rearrange +import torch +import torch.nn as nn +from torch.utils.checkpoint import checkpoint + +from musubi_tuner.hunyuan_model.activation_layers import get_activation_layer +from musubi_tuner.hunyuan_model.attention import attention +from musubi_tuner.hunyuan_model.norm_layers import get_norm_layer +from musubi_tuner.hunyuan_model.embed_layers import TimestepEmbedder, TextProjection +from musubi_tuner.hunyuan_model.mlp_layers import MLP +from musubi_tuner.hunyuan_model.modulate_layers import modulate, apply_gate + + +class IndividualTokenRefinerBlock(nn.Module): + def __init__( + self, + hidden_size, + heads_num, + mlp_width_ratio: str = 4.0, + mlp_drop_rate: float = 0.0, + act_type: str = "silu", + qk_norm: bool = False, + qk_norm_type: str = "layer", + qkv_bias: bool = True, + dtype: Optional[torch.dtype] = None, + device: Optional[torch.device] = None, + ): + factory_kwargs = {"device": device, "dtype": dtype} + super().__init__() + self.heads_num = heads_num + head_dim = hidden_size // heads_num + mlp_hidden_dim = int(hidden_size * mlp_width_ratio) + + self.norm1 = nn.LayerNorm(hidden_size, elementwise_affine=True, eps=1e-6, **factory_kwargs) + self.self_attn_qkv = nn.Linear(hidden_size, hidden_size * 3, bias=qkv_bias, **factory_kwargs) + qk_norm_layer = get_norm_layer(qk_norm_type) + self.self_attn_q_norm = ( + qk_norm_layer(head_dim, elementwise_affine=True, eps=1e-6, **factory_kwargs) if qk_norm else nn.Identity() + ) + self.self_attn_k_norm = ( + qk_norm_layer(head_dim, elementwise_affine=True, eps=1e-6, **factory_kwargs) if qk_norm else nn.Identity() + ) + self.self_attn_proj = nn.Linear(hidden_size, hidden_size, bias=qkv_bias, **factory_kwargs) + + self.norm2 = nn.LayerNorm(hidden_size, elementwise_affine=True, eps=1e-6, **factory_kwargs) + act_layer = get_activation_layer(act_type) + self.mlp = MLP( + in_channels=hidden_size, + hidden_channels=mlp_hidden_dim, + act_layer=act_layer, + drop=mlp_drop_rate, + **factory_kwargs, + ) + + self.adaLN_modulation = nn.Sequential( + act_layer(), + nn.Linear(hidden_size, 2 * hidden_size, bias=True, **factory_kwargs), + ) + # Zero-initialize the modulation + nn.init.zeros_(self.adaLN_modulation[1].weight) + nn.init.zeros_(self.adaLN_modulation[1].bias) + + self.gradient_checkpointing = False + + def enable_gradient_checkpointing(self): + self.gradient_checkpointing = True + + def disable_gradient_checkpointing(self): + self.gradient_checkpointing = False + + def _forward( + self, + x: torch.Tensor, + c: torch.Tensor, # timestep_aware_representations + context_aware_representations + attn_mask: torch.Tensor = None, + ): + gate_msa, gate_mlp = self.adaLN_modulation(c).chunk(2, dim=1) + + norm_x = self.norm1(x) + qkv = self.self_attn_qkv(norm_x) + q, k, v = rearrange(qkv, "B L (K H D) -> K B L H D", K=3, H=self.heads_num) + # Apply QK-Norm if needed + q = self.self_attn_q_norm(q).to(v) + k = self.self_attn_k_norm(k).to(v) + + # Self-Attention + attn = attention(q, k, v, mode="torch", attn_mask=attn_mask) + + x = x + apply_gate(self.self_attn_proj(attn), gate_msa) + + # FFN Layer + x = x + apply_gate(self.mlp(self.norm2(x)), gate_mlp) + + return x + + def forward(self, *args, **kwargs): + if self.training and self.gradient_checkpointing: + return checkpoint(self._forward, *args, use_reentrant=False, **kwargs) + else: + return self._forward(*args, **kwargs) + + +class IndividualTokenRefiner(nn.Module): + def __init__( + self, + hidden_size, + heads_num, + depth, + mlp_width_ratio: float = 4.0, + mlp_drop_rate: float = 0.0, + act_type: str = "silu", + qk_norm: bool = False, + qk_norm_type: str = "layer", + qkv_bias: bool = True, + dtype: Optional[torch.dtype] = None, + device: Optional[torch.device] = None, + ): + factory_kwargs = {"device": device, "dtype": dtype} + super().__init__() + self.blocks = nn.ModuleList( + [ + IndividualTokenRefinerBlock( + hidden_size=hidden_size, + heads_num=heads_num, + mlp_width_ratio=mlp_width_ratio, + mlp_drop_rate=mlp_drop_rate, + act_type=act_type, + qk_norm=qk_norm, + qk_norm_type=qk_norm_type, + qkv_bias=qkv_bias, + **factory_kwargs, + ) + for _ in range(depth) + ] + ) + + def enable_gradient_checkpointing(self): + for block in self.blocks: + block.enable_gradient_checkpointing() + + def disable_gradient_checkpointing(self): + for block in self.blocks: + block.disable_gradient_checkpointing() + + def forward( + self, + x: torch.Tensor, + c: torch.LongTensor, + mask: Optional[torch.Tensor] = None, + ): + self_attn_mask = None + if mask is not None: + batch_size = mask.shape[0] + seq_len = mask.shape[1] + mask = mask.to(x.device) + # batch_size x 1 x seq_len x seq_len + self_attn_mask_1 = mask.view(batch_size, 1, 1, seq_len).repeat(1, 1, seq_len, 1) + # batch_size x 1 x seq_len x seq_len + self_attn_mask_2 = self_attn_mask_1.transpose(2, 3) + # batch_size x 1 x seq_len x seq_len, 1 for broadcasting of heads_num + self_attn_mask = (self_attn_mask_1 & self_attn_mask_2).bool() + # avoids self-attention weight being NaN for padding tokens + self_attn_mask[:, :, :, 0] = True + + for block in self.blocks: + x = block(x, c, self_attn_mask) + return x + + +class SingleTokenRefiner(nn.Module): + """ + A single token refiner block for llm text embedding refine. + """ + + def __init__( + self, + in_channels, + hidden_size, + heads_num, + depth, + mlp_width_ratio: float = 4.0, + mlp_drop_rate: float = 0.0, + act_type: str = "silu", + qk_norm: bool = False, + qk_norm_type: str = "layer", + qkv_bias: bool = True, + attn_mode: str = "torch", + dtype: Optional[torch.dtype] = None, + device: Optional[torch.device] = None, + ): + factory_kwargs = {"device": device, "dtype": dtype} + super().__init__() + self.attn_mode = attn_mode + assert self.attn_mode == "torch", "Only support 'torch' mode for token refiner." + + self.input_embedder = nn.Linear(in_channels, hidden_size, bias=True, **factory_kwargs) + + act_layer = get_activation_layer(act_type) + # Build timestep embedding layer + self.t_embedder = TimestepEmbedder(hidden_size, act_layer, **factory_kwargs) + # Build context embedding layer + self.c_embedder = TextProjection(in_channels, hidden_size, act_layer, **factory_kwargs) + + self.individual_token_refiner = IndividualTokenRefiner( + hidden_size=hidden_size, + heads_num=heads_num, + depth=depth, + mlp_width_ratio=mlp_width_ratio, + mlp_drop_rate=mlp_drop_rate, + act_type=act_type, + qk_norm=qk_norm, + qk_norm_type=qk_norm_type, + qkv_bias=qkv_bias, + **factory_kwargs, + ) + + def enable_gradient_checkpointing(self): + self.individual_token_refiner.enable_gradient_checkpointing() + + def disable_gradient_checkpointing(self): + self.individual_token_refiner.disable_gradient_checkpointing() + + def forward( + self, + x: torch.Tensor, + t: torch.LongTensor, + mask: Optional[torch.LongTensor] = None, + ): + timestep_aware_representations = self.t_embedder(t) + + if mask is None: + context_aware_representations = x.mean(dim=1) + else: + mask_float = mask.float().unsqueeze(-1) # [b, s1, 1] + context_aware_representations = (x * mask_float).sum(dim=1) / mask_float.sum(dim=1) + context_aware_representations = self.c_embedder(context_aware_representations) + c = timestep_aware_representations + context_aware_representations + + x = self.input_embedder(x) + + x = self.individual_token_refiner(x, c, mask) + + return x diff --git a/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/hunyuan_model/vae.py b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/hunyuan_model/vae.py new file mode 100644 index 0000000000000000000000000000000000000000..93cc83ed1021f0c1ea000acb26a1909f36b54483 --- /dev/null +++ b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/hunyuan_model/vae.py @@ -0,0 +1,446 @@ +from dataclasses import dataclass +import json +from typing import Optional, Tuple, Union +from pathlib import Path + +import numpy as np +import torch +import torch.nn as nn + +from diffusers.utils import BaseOutput, is_torch_version +from diffusers.utils.torch_utils import randn_tensor +from diffusers.models.attention_processor import SpatialNorm +from musubi_tuner.modules.unet_causal_3d_blocks import CausalConv3d, UNetMidBlockCausal3D, get_down_block3d, get_up_block3d + +import logging + +logger = logging.getLogger(__name__) +logging.basicConfig(level=logging.INFO) + + +SCALING_FACTOR = 0.476986 +VAE_VER = "884-16c-hy" # We don't support other versions currently + + +def load_vae( + vae_type: str = "884-16c-hy", + vae_dtype: Optional[Union[str, torch.dtype]] = None, + sample_size: tuple = None, + vae_path: str = None, + device=None, +): + """the fucntion to load the 3D VAE model + + Args: + vae_type (str): the type of the 3D VAE model. Defaults to "884-16c-hy". + vae_precision (str, optional): the precision to load vae. Defaults to None. + sample_size (tuple, optional): the tiling size. Defaults to None. + vae_path (str, optional): the path to vae. Defaults to None. + logger (_type_, optional): logger. Defaults to None. + device (_type_, optional): device to load vae. Defaults to None. + """ + if vae_path is None: + vae_path = VAE_PATH[vae_type] + + logger.info(f"Loading 3D VAE model ({vae_type}) from: {vae_path}") + + # use fixed config for Hunyuan's VAE + CONFIG_JSON = """{ + "_class_name": "AutoencoderKLCausal3D", + "_diffusers_version": "0.4.2", + "act_fn": "silu", + "block_out_channels": [ + 128, + 256, + 512, + 512 + ], + "down_block_types": [ + "DownEncoderBlockCausal3D", + "DownEncoderBlockCausal3D", + "DownEncoderBlockCausal3D", + "DownEncoderBlockCausal3D" + ], + "in_channels": 3, + "latent_channels": 16, + "layers_per_block": 2, + "norm_num_groups": 32, + "out_channels": 3, + "sample_size": 256, + "sample_tsize": 64, + "up_block_types": [ + "UpDecoderBlockCausal3D", + "UpDecoderBlockCausal3D", + "UpDecoderBlockCausal3D", + "UpDecoderBlockCausal3D" + ], + "scaling_factor": 0.476986, + "time_compression_ratio": 4, + "mid_block_add_attention": true + }""" + + # config = AutoencoderKLCausal3D.load_config(vae_path) + config = json.loads(CONFIG_JSON) + + # import here to avoid circular import + from musubi_tuner.hunyuan_model.autoencoder_kl_causal_3d import AutoencoderKLCausal3D + + if sample_size: + vae = AutoencoderKLCausal3D.from_config(config, sample_size=sample_size) + else: + vae = AutoencoderKLCausal3D.from_config(config) + + # vae_ckpt = Path(vae_path) / "pytorch_model.pt" + # assert vae_ckpt.exists(), f"VAE checkpoint not found: {vae_ckpt}" + + if vae_path.endswith(".safetensors"): + from safetensors.torch import load_file + ckpt = load_file(vae_path) + else: + ckpt = torch.load(vae_path, map_location=vae.device, weights_only=True) + if "state_dict" in ckpt: + ckpt = ckpt["state_dict"] + if any(k.startswith("vae.") for k in ckpt.keys()): + ckpt = {k.replace("vae.", ""): v for k, v in ckpt.items() if k.startswith("vae.")} + vae.load_state_dict(ckpt) + + spatial_compression_ratio = vae.config.spatial_compression_ratio + time_compression_ratio = vae.config.time_compression_ratio + + if vae_dtype is not None: + vae = vae.to(vae_dtype) + + vae.requires_grad_(False) + + logger.info(f"VAE to dtype: {vae.dtype}") + + if device is not None: + vae = vae.to(device) + + vae.eval() + + return vae, vae_path, spatial_compression_ratio, time_compression_ratio + + +@dataclass +class DecoderOutput(BaseOutput): + r""" + Output of decoding method. + + Args: + sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): + The decoded output sample from the last layer of the model. + """ + + sample: torch.FloatTensor + + +class EncoderCausal3D(nn.Module): + r""" + The `EncoderCausal3D` layer of a variational autoencoder that encodes its input into a latent representation. + """ + + def __init__( + self, + in_channels: int = 3, + out_channels: int = 3, + down_block_types: Tuple[str, ...] = ("DownEncoderBlockCausal3D",), + block_out_channels: Tuple[int, ...] = (64,), + layers_per_block: int = 2, + norm_num_groups: int = 32, + act_fn: str = "silu", + double_z: bool = True, + mid_block_add_attention=True, + time_compression_ratio: int = 4, + spatial_compression_ratio: int = 8, + ): + super().__init__() + self.layers_per_block = layers_per_block + + self.conv_in = CausalConv3d(in_channels, block_out_channels[0], kernel_size=3, stride=1) + self.mid_block = None + self.down_blocks = nn.ModuleList([]) + + # down + output_channel = block_out_channels[0] + for i, down_block_type in enumerate(down_block_types): + input_channel = output_channel + output_channel = block_out_channels[i] + is_final_block = i == len(block_out_channels) - 1 + num_spatial_downsample_layers = int(np.log2(spatial_compression_ratio)) + num_time_downsample_layers = int(np.log2(time_compression_ratio)) + + if time_compression_ratio == 4: + add_spatial_downsample = bool(i < num_spatial_downsample_layers) + add_time_downsample = bool(i >= (len(block_out_channels) - 1 - num_time_downsample_layers) and not is_final_block) + else: + raise ValueError(f"Unsupported time_compression_ratio: {time_compression_ratio}.") + + downsample_stride_HW = (2, 2) if add_spatial_downsample else (1, 1) + downsample_stride_T = (2,) if add_time_downsample else (1,) + downsample_stride = tuple(downsample_stride_T + downsample_stride_HW) + down_block = get_down_block3d( + down_block_type, + num_layers=self.layers_per_block, + in_channels=input_channel, + out_channels=output_channel, + add_downsample=bool(add_spatial_downsample or add_time_downsample), + downsample_stride=downsample_stride, + resnet_eps=1e-6, + downsample_padding=0, + resnet_act_fn=act_fn, + resnet_groups=norm_num_groups, + attention_head_dim=output_channel, + temb_channels=None, + ) + self.down_blocks.append(down_block) + + # mid + self.mid_block = UNetMidBlockCausal3D( + in_channels=block_out_channels[-1], + resnet_eps=1e-6, + resnet_act_fn=act_fn, + output_scale_factor=1, + resnet_time_scale_shift="default", + attention_head_dim=block_out_channels[-1], + resnet_groups=norm_num_groups, + temb_channels=None, + add_attention=mid_block_add_attention, + ) + + # out + self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[-1], num_groups=norm_num_groups, eps=1e-6) + self.conv_act = nn.SiLU() + + conv_out_channels = 2 * out_channels if double_z else out_channels + self.conv_out = CausalConv3d(block_out_channels[-1], conv_out_channels, kernel_size=3) + + def forward(self, sample: torch.FloatTensor) -> torch.FloatTensor: + r"""The forward method of the `EncoderCausal3D` class.""" + assert len(sample.shape) == 5, "The input tensor should have 5 dimensions" + + sample = self.conv_in(sample) + + # down + for down_block in self.down_blocks: + sample = down_block(sample) + + # middle + sample = self.mid_block(sample) + + # post-process + sample = self.conv_norm_out(sample) + sample = self.conv_act(sample) + sample = self.conv_out(sample) + + return sample + + +class DecoderCausal3D(nn.Module): + r""" + The `DecoderCausal3D` layer of a variational autoencoder that decodes its latent representation into an output sample. + """ + + def __init__( + self, + in_channels: int = 3, + out_channels: int = 3, + up_block_types: Tuple[str, ...] = ("UpDecoderBlockCausal3D",), + block_out_channels: Tuple[int, ...] = (64,), + layers_per_block: int = 2, + norm_num_groups: int = 32, + act_fn: str = "silu", + norm_type: str = "group", # group, spatial + mid_block_add_attention=True, + time_compression_ratio: int = 4, + spatial_compression_ratio: int = 8, + ): + super().__init__() + self.layers_per_block = layers_per_block + + self.conv_in = CausalConv3d(in_channels, block_out_channels[-1], kernel_size=3, stride=1) + self.mid_block = None + self.up_blocks = nn.ModuleList([]) + + temb_channels = in_channels if norm_type == "spatial" else None + + # mid + self.mid_block = UNetMidBlockCausal3D( + in_channels=block_out_channels[-1], + resnet_eps=1e-6, + resnet_act_fn=act_fn, + output_scale_factor=1, + resnet_time_scale_shift="default" if norm_type == "group" else norm_type, + attention_head_dim=block_out_channels[-1], + resnet_groups=norm_num_groups, + temb_channels=temb_channels, + add_attention=mid_block_add_attention, + ) + + # up + reversed_block_out_channels = list(reversed(block_out_channels)) + output_channel = reversed_block_out_channels[0] + for i, up_block_type in enumerate(up_block_types): + prev_output_channel = output_channel + output_channel = reversed_block_out_channels[i] + is_final_block = i == len(block_out_channels) - 1 + num_spatial_upsample_layers = int(np.log2(spatial_compression_ratio)) + num_time_upsample_layers = int(np.log2(time_compression_ratio)) + + if time_compression_ratio == 4: + add_spatial_upsample = bool(i < num_spatial_upsample_layers) + add_time_upsample = bool(i >= len(block_out_channels) - 1 - num_time_upsample_layers and not is_final_block) + else: + raise ValueError(f"Unsupported time_compression_ratio: {time_compression_ratio}.") + + upsample_scale_factor_HW = (2, 2) if add_spatial_upsample else (1, 1) + upsample_scale_factor_T = (2,) if add_time_upsample else (1,) + upsample_scale_factor = tuple(upsample_scale_factor_T + upsample_scale_factor_HW) + up_block = get_up_block3d( + up_block_type, + num_layers=self.layers_per_block + 1, + in_channels=prev_output_channel, + out_channels=output_channel, + prev_output_channel=None, + add_upsample=bool(add_spatial_upsample or add_time_upsample), + upsample_scale_factor=upsample_scale_factor, + resnet_eps=1e-6, + resnet_act_fn=act_fn, + resnet_groups=norm_num_groups, + attention_head_dim=output_channel, + temb_channels=temb_channels, + resnet_time_scale_shift=norm_type, + ) + self.up_blocks.append(up_block) + prev_output_channel = output_channel + + # out + if norm_type == "spatial": + self.conv_norm_out = SpatialNorm(block_out_channels[0], temb_channels) + else: + self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=1e-6) + self.conv_act = nn.SiLU() + self.conv_out = CausalConv3d(block_out_channels[0], out_channels, kernel_size=3) + + self.gradient_checkpointing = False + + def forward( + self, + sample: torch.FloatTensor, + latent_embeds: Optional[torch.FloatTensor] = None, + ) -> torch.FloatTensor: + r"""The forward method of the `DecoderCausal3D` class.""" + assert len(sample.shape) == 5, "The input tensor should have 5 dimensions." + + sample = self.conv_in(sample) + + upscale_dtype = next(iter(self.up_blocks.parameters())).dtype + if self.training and self.gradient_checkpointing: + + def create_custom_forward(module): + def custom_forward(*inputs): + return module(*inputs) + + return custom_forward + + if is_torch_version(">=", "1.11.0"): + # middle + sample = torch.utils.checkpoint.checkpoint( + create_custom_forward(self.mid_block), + sample, + latent_embeds, + use_reentrant=False, + ) + sample = sample.to(upscale_dtype) + + # up + for up_block in self.up_blocks: + sample = torch.utils.checkpoint.checkpoint( + create_custom_forward(up_block), + sample, + latent_embeds, + use_reentrant=False, + ) + else: + # middle + sample = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block), sample, latent_embeds) + sample = sample.to(upscale_dtype) + + # up + for up_block in self.up_blocks: + sample = torch.utils.checkpoint.checkpoint(create_custom_forward(up_block), sample, latent_embeds) + else: + # middle + sample = self.mid_block(sample, latent_embeds) + sample = sample.to(upscale_dtype) + + # up + for up_block in self.up_blocks: + sample = up_block(sample, latent_embeds) + + # post-process + if latent_embeds is None: + sample = self.conv_norm_out(sample) + else: + sample = self.conv_norm_out(sample, latent_embeds) + sample = self.conv_act(sample) + sample = self.conv_out(sample) + + return sample + + +class DiagonalGaussianDistribution(object): + def __init__(self, parameters: torch.Tensor, deterministic: bool = False): + if parameters.ndim == 3: + dim = 2 # (B, L, C) + elif parameters.ndim == 5 or parameters.ndim == 4: + dim = 1 # (B, C, T, H ,W) / (B, C, H, W) + else: + raise NotImplementedError + self.parameters = parameters + self.mean, self.logvar = torch.chunk(parameters, 2, dim=dim) + self.logvar = torch.clamp(self.logvar, -30.0, 20.0) + self.deterministic = deterministic + self.std = torch.exp(0.5 * self.logvar) + self.var = torch.exp(self.logvar) + if self.deterministic: + self.var = self.std = torch.zeros_like(self.mean, device=self.parameters.device, dtype=self.parameters.dtype) + + def sample(self, generator: Optional[torch.Generator] = None) -> torch.FloatTensor: + # make sure sample is on the same device as the parameters and has same dtype + sample = randn_tensor( + self.mean.shape, + generator=generator, + device=self.parameters.device, + dtype=self.parameters.dtype, + ) + x = self.mean + self.std * sample + return x + + def kl(self, other: "DiagonalGaussianDistribution" = None) -> torch.Tensor: + if self.deterministic: + return torch.Tensor([0.0]) + else: + reduce_dim = list(range(1, self.mean.ndim)) + if other is None: + return 0.5 * torch.sum( + torch.pow(self.mean, 2) + self.var - 1.0 - self.logvar, + dim=reduce_dim, + ) + else: + return 0.5 * torch.sum( + torch.pow(self.mean - other.mean, 2) / other.var + self.var / other.var - 1.0 - self.logvar + other.logvar, + dim=reduce_dim, + ) + + def nll(self, sample: torch.Tensor, dims: Tuple[int, ...] = [1, 2, 3]) -> torch.Tensor: + if self.deterministic: + return torch.Tensor([0.0]) + logtwopi = np.log(2.0 * np.pi) + return 0.5 * torch.sum( + logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var, + dim=dims, + ) + + def mode(self) -> torch.Tensor: + return self.mean diff --git a/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/hv_generate_video.py b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/hv_generate_video.py new file mode 100644 index 0000000000000000000000000000000000000000..64b49318dbb5f77e8a5360e66b2c75a1e08ef7a3 --- /dev/null +++ b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/hv_generate_video.py @@ -0,0 +1,936 @@ +import argparse +from datetime import datetime +from pathlib import Path +import random +import sys +import os +import time +from typing import Optional, Union + +import numpy as np +import torch +import torchvision +import accelerate +from diffusers.utils.torch_utils import randn_tensor +from transformers.models.llama import LlamaModel +from tqdm import tqdm +import av +from einops import rearrange +from safetensors.torch import load_file, save_file +from safetensors import safe_open +from PIL import Image + +from musubi_tuner.hunyuan_model import vae +from musubi_tuner.hunyuan_model.text_encoder import TextEncoder +from musubi_tuner.hunyuan_model.text_encoder import PROMPT_TEMPLATE +from musubi_tuner.hunyuan_model.vae import load_vae +from musubi_tuner.hunyuan_model.models import load_transformer, get_rotary_pos_embed +from musubi_tuner.hunyuan_model.fp8_optimization import convert_fp8_linear +from musubi_tuner.modules.scheduling_flow_match_discrete import FlowMatchDiscreteScheduler +from musubi_tuner.networks import lora + +try: + from lycoris.kohya import create_network_from_weights +except: + pass + +from musubi_tuner.utils.model_utils import str_to_dtype +from musubi_tuner.utils.safetensors_utils import mem_eff_save_file +from musubi_tuner.dataset.image_video_dataset import load_video, glob_images, resize_image_to_bucket + +import logging + +logger = logging.getLogger(__name__) +logging.basicConfig(level=logging.INFO) + + +def clean_memory_on_device(device): + if device.type == "cuda": + torch.cuda.empty_cache() + elif device.type == "cpu": + pass + elif device.type == "mps": # not tested + torch.mps.empty_cache() + + +def synchronize_device(device: torch.device): + if device.type == "cuda": + torch.cuda.synchronize() + elif device.type == "xpu": + torch.xpu.synchronize() + elif device.type == "mps": + torch.mps.synchronize() + + +def save_videos_grid(videos: torch.Tensor, path: str, rescale=False, n_rows=1, fps=24): + """save videos by video tensor + copy from https://github.com/guoyww/AnimateDiff/blob/e92bd5671ba62c0d774a32951453e328018b7c5b/animatediff/utils/util.py#L61 + + Args: + videos (torch.Tensor): video tensor predicted by the model + path (str): path to save video + rescale (bool, optional): rescale the video tensor from [-1, 1] to . Defaults to False. + n_rows (int, optional): Defaults to 1. + fps (int, optional): video save fps. Defaults to 8. + """ + videos = rearrange(videos, "b c t h w -> t b c h w") + outputs = [] + for x in videos: + x = torchvision.utils.make_grid(x, nrow=n_rows) + x = x.transpose(0, 1).transpose(1, 2).squeeze(-1) + if rescale: + x = (x + 1.0) / 2.0 # -1,1 -> 0,1 + x = torch.clamp(x, 0, 1) + x = (x * 255).numpy().astype(np.uint8) + outputs.append(x) + + os.makedirs(os.path.dirname(path), exist_ok=True) + + # # save video with av + # container = av.open(path, "w") + # stream = container.add_stream("libx264", rate=fps) + # for x in outputs: + # frame = av.VideoFrame.from_ndarray(x, format="rgb24") + # packet = stream.encode(frame) + # container.mux(packet) + # packet = stream.encode(None) + # container.mux(packet) + # container.close() + + height, width, _ = outputs[0].shape + + # create output container + container = av.open(path, mode="w") + + # create video stream + codec = "libx264" + pixel_format = "yuv420p" + stream = container.add_stream(codec, rate=fps) + stream.width = width + stream.height = height + stream.pix_fmt = pixel_format + stream.bit_rate = 4000000 # 4Mbit/s + + for frame_array in outputs: + frame = av.VideoFrame.from_ndarray(frame_array, format="rgb24") + packets = stream.encode(frame) + for packet in packets: + container.mux(packet) + + for packet in stream.encode(): + container.mux(packet) + + container.close() + + +def save_images_grid( + videos: torch.Tensor, parent_dir: str, image_name: str, rescale: bool = False, n_rows: int = 1, create_subdir=True +): + videos = rearrange(videos, "b c t h w -> t b c h w") + outputs = [] + for x in videos: + x = torchvision.utils.make_grid(x, nrow=n_rows) + x = x.transpose(0, 1).transpose(1, 2).squeeze(-1) + if rescale: + x = (x + 1.0) / 2.0 # -1,1 -> 0,1 + x = torch.clamp(x, 0, 1) + x = (x * 255).numpy().astype(np.uint8) + outputs.append(x) + + if create_subdir: + output_dir = os.path.join(parent_dir, image_name) + else: + output_dir = parent_dir + + os.makedirs(output_dir, exist_ok=True) + for i, x in enumerate(outputs): + image_path = os.path.join(output_dir, f"{image_name}_{i:03d}.png") + image = Image.fromarray(x) + image.save(image_path) + + +# region Encoding prompt + + +def encode_prompt(prompt: Union[str, list[str]], device: torch.device, num_videos_per_prompt: int, text_encoder: TextEncoder): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`): + prompt to be encoded + device: (`torch.device`): + torch device + num_videos_per_prompt (`int`): + number of videos that should be generated per prompt + text_encoder (TextEncoder): + text encoder to be used for encoding the prompt + """ + # LoRA and Textual Inversion are not supported in this script + # negative prompt and prompt embedding are not supported in this script + # clip_skip is not supported in this script because it is not used in the original script + data_type = "video" # video only, image is not supported + + text_inputs = text_encoder.text2tokens(prompt, data_type=data_type) + + with torch.no_grad(): + prompt_outputs = text_encoder.encode(text_inputs, data_type=data_type, device=device) + prompt_embeds = prompt_outputs.hidden_state + + attention_mask = prompt_outputs.attention_mask + if attention_mask is not None: + attention_mask = attention_mask.to(device) + bs_embed, seq_len = attention_mask.shape + attention_mask = attention_mask.repeat(1, num_videos_per_prompt) + attention_mask = attention_mask.view(bs_embed * num_videos_per_prompt, seq_len) + + prompt_embeds_dtype = text_encoder.dtype + prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) + + if prompt_embeds.ndim == 2: + bs_embed, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_videos_per_prompt) + prompt_embeds = prompt_embeds.view(bs_embed * num_videos_per_prompt, -1) + else: + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_videos_per_prompt, 1) + prompt_embeds = prompt_embeds.view(bs_embed * num_videos_per_prompt, seq_len, -1) + + return prompt_embeds, attention_mask + + +def encode_input_prompt(prompt: Union[str, list[str]], args, device, fp8_llm=False, accelerator=None): + # constants + prompt_template_video = "dit-llm-encode-video" + prompt_template = "dit-llm-encode" + text_encoder_dtype = torch.float16 + text_encoder_type = "llm" + text_len = 256 + hidden_state_skip_layer = 2 + apply_final_norm = False + reproduce = False + + text_encoder_2_type = "clipL" + text_len_2 = 77 + + num_videos = 1 + + # if args.prompt_template_video is not None: + # crop_start = PROMPT_TEMPLATE[args.prompt_template_video].get("crop_start", 0) + # elif args.prompt_template is not None: + # crop_start = PROMPT_TEMPLATE[args.prompt_template].get("crop_start", 0) + # else: + # crop_start = 0 + crop_start = PROMPT_TEMPLATE[prompt_template_video].get("crop_start", 0) + max_length = text_len + crop_start + + # prompt_template + prompt_template = PROMPT_TEMPLATE[prompt_template] + + # prompt_template_video + prompt_template_video = PROMPT_TEMPLATE[prompt_template_video] # if args.prompt_template_video is not None else None + + # load text encoders + logger.info(f"loading text encoder: {args.text_encoder1}") + text_encoder = TextEncoder( + text_encoder_type=text_encoder_type, + max_length=max_length, + text_encoder_dtype=text_encoder_dtype, + text_encoder_path=args.text_encoder1, + tokenizer_type=text_encoder_type, + prompt_template=prompt_template, + prompt_template_video=prompt_template_video, + hidden_state_skip_layer=hidden_state_skip_layer, + apply_final_norm=apply_final_norm, + reproduce=reproduce, + ) + text_encoder.eval() + if fp8_llm: + org_dtype = text_encoder.dtype + logger.info(f"Moving and casting text encoder to {device} and torch.float8_e4m3fn") + text_encoder.to(device=device, dtype=torch.float8_e4m3fn) + + # prepare LLM for fp8 + def prepare_fp8(llama_model: LlamaModel, target_dtype): + def forward_hook(module): + def forward(hidden_states): + input_dtype = hidden_states.dtype + hidden_states = hidden_states.to(torch.float32) + variance = hidden_states.pow(2).mean(-1, keepdim=True) + hidden_states = hidden_states * torch.rsqrt(variance + module.variance_epsilon) + return module.weight.to(input_dtype) * hidden_states.to(input_dtype) + + return forward + + for module in llama_model.modules(): + if module.__class__.__name__ in ["Embedding"]: + # print("set", module.__class__.__name__, "to", target_dtype) + module.to(target_dtype) + if module.__class__.__name__ in ["LlamaRMSNorm"]: + # print("set", module.__class__.__name__, "hooks") + module.forward = forward_hook(module) + + prepare_fp8(text_encoder.model, org_dtype) + + logger.info(f"loading text encoder 2: {args.text_encoder2}") + text_encoder_2 = TextEncoder( + text_encoder_type=text_encoder_2_type, + max_length=text_len_2, + text_encoder_dtype=text_encoder_dtype, + text_encoder_path=args.text_encoder2, + tokenizer_type=text_encoder_2_type, + reproduce=reproduce, + ) + text_encoder_2.eval() + + # encode prompt + logger.info(f"Encoding prompt with text encoder 1") + text_encoder.to(device=device) + if fp8_llm: + with accelerator.autocast(): + prompt_embeds, prompt_mask = encode_prompt(prompt, device, num_videos, text_encoder) + else: + prompt_embeds, prompt_mask = encode_prompt(prompt, device, num_videos, text_encoder) + text_encoder = None + clean_memory_on_device(device) + + logger.info(f"Encoding prompt with text encoder 2") + text_encoder_2.to(device=device) + prompt_embeds_2, prompt_mask_2 = encode_prompt(prompt, device, num_videos, text_encoder_2) + + prompt_embeds = prompt_embeds.to("cpu") + prompt_mask = prompt_mask.to("cpu") + prompt_embeds_2 = prompt_embeds_2.to("cpu") + prompt_mask_2 = prompt_mask_2.to("cpu") + + text_encoder_2 = None + clean_memory_on_device(device) + + return prompt_embeds, prompt_mask, prompt_embeds_2, prompt_mask_2 + + +# endregion + + +def prepare_vae(args, device): + vae_dtype = torch.float16 if args.vae_dtype is None else str_to_dtype(args.vae_dtype) + vae, _, s_ratio, t_ratio = load_vae(vae_dtype=vae_dtype, device=device, vae_path=args.vae) + vae.eval() + # vae_kwargs = {"s_ratio": s_ratio, "t_ratio": t_ratio} + + # set chunk_size to CausalConv3d recursively + chunk_size = args.vae_chunk_size + if chunk_size is not None: + vae.set_chunk_size_for_causal_conv_3d(chunk_size) + logger.info(f"Set chunk_size to {chunk_size} for CausalConv3d") + + if args.vae_spatial_tile_sample_min_size is not None: + vae.enable_spatial_tiling(True) + vae.tile_sample_min_size = args.vae_spatial_tile_sample_min_size + vae.tile_latent_min_size = args.vae_spatial_tile_sample_min_size // 8 + # elif args.vae_tiling: + else: + vae.enable_spatial_tiling(True) + + return vae, vae_dtype + + +def encode_to_latents(args, video, device): + vae, vae_dtype = prepare_vae(args, device) + + video = video.to(device=device, dtype=vae_dtype) + video = video * 2 - 1 # 0, 1 -> -1, 1 + with torch.no_grad(): + latents = vae.encode(video).latent_dist.sample() + + if hasattr(vae.config, "shift_factor") and vae.config.shift_factor: + latents = (latents - vae.config.shift_factor) * vae.config.scaling_factor + else: + latents = latents * vae.config.scaling_factor + + return latents + + +def decode_latents(args, latents, device): + vae, vae_dtype = prepare_vae(args, device) + + expand_temporal_dim = False + if len(latents.shape) == 4: + latents = latents.unsqueeze(2) + expand_temporal_dim = True + elif len(latents.shape) == 5: + pass + else: + raise ValueError(f"Only support latents with shape (b, c, h, w) or (b, c, f, h, w), but got {latents.shape}.") + + if hasattr(vae.config, "shift_factor") and vae.config.shift_factor: + latents = latents / vae.config.scaling_factor + vae.config.shift_factor + else: + latents = latents / vae.config.scaling_factor + + latents = latents.to(device=device, dtype=vae_dtype) + with torch.no_grad(): + image = vae.decode(latents, return_dict=False)[0] + + if expand_temporal_dim: + image = image.squeeze(2) + + image = (image / 2 + 0.5).clamp(0, 1) + # we always cast to float32 as this does not cause significant overhead and is compatible with bfloa16 + image = image.cpu().float() + + return image + + +def parse_args(): + parser = argparse.ArgumentParser(description="HunyuanVideo inference script") + + parser.add_argument("--dit", type=str, required=True, help="DiT checkpoint path or directory") + parser.add_argument( + "--dit_in_channels", + type=int, + default=None, + help="input channels for DiT, default is None (automatically detect). 32 for SkyReels-I2V, 16 for others", + ) + parser.add_argument("--vae", type=str, required=True, help="VAE checkpoint path or directory") + parser.add_argument("--vae_dtype", type=str, default=None, help="data type for VAE, default is float16") + parser.add_argument("--text_encoder1", type=str, required=True, help="Text Encoder 1 directory") + parser.add_argument("--text_encoder2", type=str, required=True, help="Text Encoder 2 directory") + + # LoRA + parser.add_argument("--lora_weight", type=str, nargs="*", required=False, default=None, help="LoRA weight path") + parser.add_argument("--lora_multiplier", type=float, nargs="*", default=1.0, help="LoRA multiplier") + parser.add_argument( + "--save_merged_model", + type=str, + default=None, + help="Save merged model to path. If specified, no inference will be performed.", + ) + parser.add_argument("--exclude_single_blocks", action="store_true", help="Exclude single blocks when loading LoRA weights") + + # inference + parser.add_argument("--prompt", type=str, required=True, help="prompt for generation") + parser.add_argument("--negative_prompt", type=str, default=None, help="negative prompt for generation") + parser.add_argument("--video_size", type=int, nargs=2, default=[256, 256], help="video size") + parser.add_argument("--video_length", type=int, default=129, help="video length") + parser.add_argument("--fps", type=int, default=24, help="video fps") + parser.add_argument("--infer_steps", type=int, default=50, help="number of inference steps") + parser.add_argument("--save_path", type=str, required=True, help="path to save generated video") + parser.add_argument("--seed", type=int, default=None, help="Seed for evaluation.") + parser.add_argument( + "--guidance_scale", + type=float, + default=1.0, + help="Guidance scale for classifier free guidance. Default is 1.0 (means no guidance)", + ) + parser.add_argument("--embedded_cfg_scale", type=float, default=6.0, help="Embeded classifier free guidance scale.") + parser.add_argument("--video_path", type=str, default=None, help="path to video for video2video inference") + parser.add_argument( + "--image_path", type=str, default=None, help="path to image for image2video inference, only works for SkyReels-I2V model" + ) + parser.add_argument( + "--split_uncond", + action="store_true", + help="split unconditional call for classifier free guidance, slower but less memory usage", + ) + parser.add_argument("--strength", type=float, default=0.8, help="strength for video2video inference") + + # Flow Matching + parser.add_argument("--flow_shift", type=float, default=7.0, help="Shift factor for flow matching schedulers.") + + parser.add_argument("--fp8", action="store_true", help="use fp8 for DiT model") + parser.add_argument("--fp8_llm", action="store_true", help="use fp8 for Text Encoder 1 (LLM)") + parser.add_argument( + "--device", type=str, default=None, help="device to use for inference. If None, use CUDA if available, otherwise use CPU" + ) + parser.add_argument( + "--attn_mode", type=str, default="torch", choices=["flash", "torch", "sageattn", "xformers", "sdpa"], help="attention mode" + ) + parser.add_argument( + "--split_attn", action="store_true", help="use split attention, default is False. if True, --split_uncond becomes True" + ) + parser.add_argument("--vae_chunk_size", type=int, default=None, help="chunk size for CausalConv3d in VAE") + parser.add_argument( + "--vae_spatial_tile_sample_min_size", type=int, default=None, help="spatial tile sample min size for VAE, default 256" + ) + parser.add_argument("--blocks_to_swap", type=int, default=None, help="number of blocks to swap in the model") + parser.add_argument("--img_in_txt_in_offloading", action="store_true", help="offload img_in and txt_in to cpu") + parser.add_argument( + "--output_type", type=str, default="video", choices=["video", "images", "latent", "both"], help="output type" + ) + parser.add_argument("--no_metadata", action="store_true", help="do not save metadata") + parser.add_argument("--latent_path", type=str, nargs="*", default=None, help="path to latent for decode. no inference") + parser.add_argument("--lycoris", action="store_true", help="use lycoris for inference") + parser.add_argument("--fp8_fast", action="store_true", help="Enable fast FP8 arthimetic(RTX 4XXX+)") + parser.add_argument("--compile", action="store_true", help="Enable torch.compile") + parser.add_argument( + "--compile_args", + nargs=4, + metavar=("BACKEND", "MODE", "DYNAMIC", "FULLGRAPH"), + default=["inductor", "max-autotune-no-cudagraphs", "False", "False"], + help="Torch.compile settings", + ) + + args = parser.parse_args() + + assert (args.latent_path is None or len(args.latent_path) == 0) or ( + args.output_type == "images" or args.output_type == "video" + ), "latent_path is only supported for images or video output" + + # update dit_weight based on model_base if not exists + + if args.fp8_fast and not args.fp8: + raise ValueError("--fp8_fast requires --fp8") + + return args + + +def check_inputs(args): + height = args.video_size[0] + width = args.video_size[1] + video_length = args.video_length + + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + return height, width, video_length + + +def main(): + args = parse_args() + + device = args.device if args.device is not None else "cuda" if torch.cuda.is_available() else "cpu" + device = torch.device(device) + dit_dtype = torch.bfloat16 + dit_weight_dtype = torch.float8_e4m3fn if args.fp8 else dit_dtype + logger.info(f"Using device: {device}, DiT precision: {dit_dtype}, weight precision: {dit_weight_dtype}") + + original_base_names = None + if args.latent_path is not None and len(args.latent_path) > 0: + original_base_names = [] + latents_list = [] + seeds = [] + for latent_path in args.latent_path: + original_base_names.append(os.path.splitext(os.path.basename(latent_path))[0]) + seed = 0 + + if os.path.splitext(latent_path)[1] != ".safetensors": + latents = torch.load(latent_path, map_location="cpu") + else: + latents = load_file(latent_path)["latent"] + with safe_open(latent_path, framework="pt") as f: + metadata = f.metadata() + if metadata is None: + metadata = {} + logger.info(f"Loaded metadata: {metadata}") + + if "seeds" in metadata: + seed = int(metadata["seeds"]) + + seeds.append(seed) + latents_list.append(latents) + + logger.info(f"Loaded latent from {latent_path}. Shape: {latents.shape}") + latents = torch.stack(latents_list, dim=0) + else: + # prepare accelerator + mixed_precision = "bf16" if dit_dtype == torch.bfloat16 else "fp16" + accelerator = accelerate.Accelerator(mixed_precision=mixed_precision) + + # load prompt + prompt = args.prompt # TODO load prompts from file + assert prompt is not None, "prompt is required" + + # check inputs: may be height, width, video_length etc will be changed for each generation in future + height, width, video_length = check_inputs(args) + + # encode prompt with LLM and Text Encoder + logger.info(f"Encoding prompt: {prompt}") + + do_classifier_free_guidance = args.guidance_scale != 1.0 + if do_classifier_free_guidance: + negative_prompt = args.negative_prompt + if negative_prompt is None: + logger.info("Negative prompt is not provided, using empty prompt") + negative_prompt = "" + logger.info(f"Encoding negative prompt: {negative_prompt}") + prompt = [negative_prompt, prompt] + else: + if args.negative_prompt is not None: + logger.warning("Negative prompt is provided but guidance_scale is 1.0, negative prompt will be ignored.") + + prompt_embeds, prompt_mask, prompt_embeds_2, prompt_mask_2 = encode_input_prompt( + prompt, args, device, args.fp8_llm, accelerator + ) + + # encode latents for video2video inference + video_latents = None + if args.video_path is not None: + # v2v inference + logger.info(f"Video2Video inference: {args.video_path}") + video = load_video(args.video_path, 0, video_length, bucket_reso=(width, height)) # list of frames + if len(video) < video_length: + raise ValueError(f"Video length is less than {video_length}") + video = np.stack(video, axis=0) # F, H, W, C + video = torch.from_numpy(video).permute(3, 0, 1, 2).unsqueeze(0).float() # 1, C, F, H, W + video = video / 255.0 + + logger.info(f"Encoding video to latents") + video_latents = encode_to_latents(args, video, device) + video_latents = video_latents.to(device=device, dtype=dit_dtype) + + clean_memory_on_device(device) + + # encode latents for image2video inference + image_latents = None + if args.image_path is not None: + # i2v inference + logger.info(f"Image2Video inference: {args.image_path}") + + image = Image.open(args.image_path) + image = resize_image_to_bucket(image, (width, height)) # returns a numpy array + image = torch.from_numpy(image).permute(2, 0, 1).unsqueeze(0).unsqueeze(2).float() # 1, C, 1, H, W + image = image / 255.0 + + logger.info(f"Encoding image to latents") + image_latents = encode_to_latents(args, image, device) # 1, C, 1, H, W + image_latents = image_latents.to(device=device, dtype=dit_dtype) + + clean_memory_on_device(device) + + # load DiT model + blocks_to_swap = args.blocks_to_swap if args.blocks_to_swap else 0 + loading_device = "cpu" # if blocks_to_swap > 0 else device + + logger.info(f"Loading DiT model from {args.dit}") + if args.attn_mode == "sdpa": + args.attn_mode = "torch" + + # if image_latents is given, the model should be I2V model, so the in_channels should be 32 + dit_in_channels = args.dit_in_channels if args.dit_in_channels is not None else (32 if image_latents is not None else 16) + + # if we use LoRA, weigths should be bf16 instead of fp8, because merging should be done in bf16 + # the model is too large, so we load the model to cpu. in addition, the .pt file is loaded to cpu anyway + # on the fly merging will be a solution for this issue for .safetenors files (not implemented yet) + transformer = load_transformer( + args.dit, args.attn_mode, args.split_attn, loading_device, dit_dtype, in_channels=dit_in_channels + ) + transformer.eval() + + # load LoRA weights + if args.lora_weight is not None and len(args.lora_weight) > 0: + for i, lora_weight in enumerate(args.lora_weight): + if args.lora_multiplier is not None and len(args.lora_multiplier) > i: + lora_multiplier = args.lora_multiplier[i] + else: + lora_multiplier = 1.0 + + logger.info(f"Loading LoRA weights from {lora_weight} with multiplier {lora_multiplier}") + weights_sd = load_file(lora_weight) + + # Filter to exclude keys that are part of single_blocks + if args.exclude_single_blocks: + filtered_weights = {k: v for k, v in weights_sd.items() if "single_blocks" not in k} + weights_sd = filtered_weights + + if args.lycoris: + lycoris_net, _ = create_network_from_weights( + multiplier=lora_multiplier, + file=None, + weights_sd=weights_sd, + unet=transformer, + text_encoder=None, + vae=None, + for_inference=True, + ) + else: + network = lora.create_arch_network_from_weights( + lora_multiplier, weights_sd, unet=transformer, for_inference=True + ) + logger.info("Merging LoRA weights to DiT model") + + # try: + # network.apply_to(None, transformer, apply_text_encoder=False, apply_unet=True) + # info = network.load_state_dict(weights_sd, strict=True) + # logger.info(f"Loaded LoRA weights from {weights_file}: {info}") + # network.eval() + # network.to(device) + # except Exception as e: + if args.lycoris: + lycoris_net.merge_to(None, transformer, weights_sd, dtype=None, device=device) + else: + network.merge_to(None, transformer, weights_sd, device=device, non_blocking=True) + + synchronize_device(device) + + logger.info("LoRA weights loaded") + + # save model here before casting to dit_weight_dtype + if args.save_merged_model: + logger.info(f"Saving merged model to {args.save_merged_model}") + mem_eff_save_file(transformer.state_dict(), args.save_merged_model) # save_file needs a lot of memory + logger.info("Merged model saved") + return + + logger.info(f"Casting model to {dit_weight_dtype}") + transformer.to(dtype=dit_weight_dtype) + + if args.fp8_fast: + logger.info("Enabling FP8 acceleration") + params_to_keep = {"norm", "bias", "time_in", "vector_in", "guidance_in", "txt_in", "img_in"} + for name, param in transformer.named_parameters(): + dtype_to_use = dit_dtype if any(keyword in name for keyword in params_to_keep) else dit_weight_dtype + param.to(dtype=dtype_to_use) + convert_fp8_linear(transformer, dit_dtype, params_to_keep=params_to_keep) + + if args.compile: + compile_backend, compile_mode, compile_dynamic, compile_fullgraph = args.compile_args + logger.info( + f"Torch Compiling[Backend: {compile_backend}; Mode: {compile_mode}; Dynamic: {compile_dynamic}; Fullgraph: {compile_fullgraph}]" + ) + torch._dynamo.config.cache_size_limit = 32 + for i, block in enumerate(transformer.single_blocks): + compiled_block = torch.compile( + block, + backend=compile_backend, + mode=compile_mode, + dynamic=compile_dynamic.lower() in "true", + fullgraph=compile_fullgraph.lower() in "true", + ) + transformer.single_blocks[i] = compiled_block + for i, block in enumerate(transformer.double_blocks): + compiled_block = torch.compile( + block, + backend=compile_backend, + mode=compile_mode, + dynamic=compile_dynamic.lower() in "true", + fullgraph=compile_fullgraph.lower() in "true", + ) + transformer.double_blocks[i] = compiled_block + + if blocks_to_swap > 0: + logger.info(f"Enable swap {blocks_to_swap} blocks to CPU from device: {device}") + transformer.enable_block_swap(blocks_to_swap, device, supports_backward=False) + transformer.move_to_device_except_swap_blocks(device) + transformer.prepare_block_swap_before_forward() + else: + logger.info(f"Moving model to {device}") + transformer.to(device=device) + if args.img_in_txt_in_offloading: + logger.info("Enable offloading img_in and txt_in to CPU") + transformer.enable_img_in_txt_in_offloading() + + # load scheduler + logger.info(f"Loading scheduler") + scheduler = FlowMatchDiscreteScheduler(shift=args.flow_shift, reverse=True, solver="euler") + + # Prepare timesteps + num_inference_steps = args.infer_steps + scheduler.set_timesteps(num_inference_steps, device=device) # n_tokens is not used in FlowMatchDiscreteScheduler + timesteps = scheduler.timesteps + + # Prepare generator + num_videos_per_prompt = 1 # args.num_videos # currently only support 1 video per prompt, this is a batch size + seed = args.seed + if seed is None: + seeds = [random.randint(0, 2**32 - 1) for _ in range(num_videos_per_prompt)] + elif isinstance(seed, int): + seeds = [seed + i for i in range(num_videos_per_prompt)] + else: + raise ValueError(f"Seed must be an integer or None, got {seed}.") + generator = [torch.Generator(device).manual_seed(seed) for seed in seeds] + + # Prepare noisy latents + num_channels_latents = 16 # transformer.config.in_channels + vae_scale_factor = 2 ** (4 - 1) # len(self.vae.config.block_out_channels) == 4 + + vae_ver = vae.VAE_VER + if "884" in vae_ver: + latent_video_length = (video_length - 1) // 4 + 1 + elif "888" in vae_ver: + latent_video_length = (video_length - 1) // 8 + 1 + else: + latent_video_length = video_length + + # shape = ( + # num_videos_per_prompt, + # num_channels_latents, + # latent_video_length, + # height // vae_scale_factor, + # width // vae_scale_factor, + # ) + # latents = randn_tensor(shape, generator=generator, device=device, dtype=dit_dtype) + + # make first N frames to be the same if the given seed is same + shape_of_frame = (num_videos_per_prompt, num_channels_latents, 1, height // vae_scale_factor, width // vae_scale_factor) + latents = [] + for i in range(latent_video_length): + latents.append(randn_tensor(shape_of_frame, generator=generator, device=device, dtype=dit_dtype)) + latents = torch.cat(latents, dim=2) + + # pad image_latents to match the length of video_latents + if image_latents is not None: + zero_latents = torch.zeros_like(latents) + zero_latents[:, :, :1, :, :] = image_latents + image_latents = zero_latents + + if args.video_path is not None: + # v2v inference + noise = latents + assert noise.shape == video_latents.shape, f"noise shape {noise.shape} != video_latents shape {video_latents.shape}" + + num_inference_steps = int(num_inference_steps * args.strength) + timestep_start = scheduler.timesteps[-num_inference_steps] # larger strength, less inference steps and more start time + t = timestep_start / 1000.0 + latents = noise * t + video_latents * (1 - t) + + timesteps = timesteps[-num_inference_steps:] + + logger.info(f"strength: {args.strength}, num_inference_steps: {num_inference_steps}, timestep_start: {timestep_start}") + + # FlowMatchDiscreteScheduler does not have init_noise_sigma + + # Denoising loop + embedded_guidance_scale = args.embedded_cfg_scale + if embedded_guidance_scale is not None: + guidance_expand = torch.tensor([embedded_guidance_scale * 1000.0] * latents.shape[0], dtype=torch.float32, device="cpu") + guidance_expand = guidance_expand.to(device=device, dtype=dit_dtype) + if do_classifier_free_guidance: + guidance_expand = torch.cat([guidance_expand, guidance_expand], dim=0) + else: + guidance_expand = None + freqs_cos, freqs_sin = get_rotary_pos_embed(vae_ver, transformer, video_length, height, width) + # n_tokens = freqs_cos.shape[0] + + # move and cast all inputs to the correct device and dtype + prompt_embeds = prompt_embeds.to(device=device, dtype=dit_dtype) + prompt_mask = prompt_mask.to(device=device) + prompt_embeds_2 = prompt_embeds_2.to(device=device, dtype=dit_dtype) + prompt_mask_2 = prompt_mask_2.to(device=device) + + freqs_cos = freqs_cos.to(device=device, dtype=dit_dtype) + freqs_sin = freqs_sin.to(device=device, dtype=dit_dtype) + + num_warmup_steps = len(timesteps) - num_inference_steps * scheduler.order # this should be 0 in v2v inference + + # assert split_uncond and split_attn + if args.split_attn and do_classifier_free_guidance and not args.split_uncond: + logger.warning("split_attn is enabled, split_uncond will be enabled as well.") + args.split_uncond = True + + # with torch.profiler.profile(activities=[torch.profiler.ProfilerActivity.CPU, torch.profiler.ProfilerActivity.CUDA]) as p: + with tqdm(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + latents = scheduler.scale_model_input(latents, t) + + # predict the noise residual + with torch.no_grad(), accelerator.autocast(): + latents_input = latents if not do_classifier_free_guidance else torch.cat([latents, latents], dim=0) + if image_latents is not None: + latents_image_input = ( + image_latents if not do_classifier_free_guidance else torch.cat([image_latents, image_latents], dim=0) + ) + latents_input = torch.cat([latents_input, latents_image_input], dim=1) # 1 or 2, C*2, F, H, W + + batch_size = 1 if args.split_uncond else latents_input.shape[0] + + noise_pred_list = [] + for j in range(0, latents_input.shape[0], batch_size): + noise_pred = transformer( # For an input image (129, 192, 336) (1, 256, 256) + latents_input[j : j + batch_size], # [1, 16, 33, 24, 42] + t.repeat(batch_size).to(device=device, dtype=dit_dtype), # [1] + text_states=prompt_embeds[j : j + batch_size], # [1, 256, 4096] + text_mask=prompt_mask[j : j + batch_size], # [1, 256] + text_states_2=prompt_embeds_2[j : j + batch_size], # [1, 768] + freqs_cos=freqs_cos, # [seqlen, head_dim] + freqs_sin=freqs_sin, # [seqlen, head_dim] + guidance=guidance_expand[j : j + batch_size], # [1] + return_dict=True, + )["x"] + noise_pred_list.append(noise_pred) + noise_pred = torch.cat(noise_pred_list, dim=0) + + # perform classifier free guidance + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_cond = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + args.guidance_scale * (noise_pred_cond - noise_pred_uncond) + + # # SkyReels' rescale noise config is omitted for now + # if guidance_rescale > 0.0: + # # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf + # noise_pred = rescale_noise_cfg( + # noise_pred, + # noise_pred_cond, + # guidance_rescale=self.guidance_rescale, + # ) + + # compute the previous noisy sample x_t -> x_t-1 + latents = scheduler.step(noise_pred, t, latents, return_dict=False)[0] + + # update progress bar + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % scheduler.order == 0): + if progress_bar is not None: + progress_bar.update() + + # print(p.key_averages().table(sort_by="self_cpu_time_total", row_limit=-1)) + # print(p.key_averages().table(sort_by="self_cuda_time_total", row_limit=-1)) + + latents = latents.detach().cpu() + transformer = None + clean_memory_on_device(device) + + # Save samples + output_type = args.output_type + save_path = args.save_path # if args.save_path_suffix == "" else f"{args.save_path}_{args.save_path_suffix}" + os.makedirs(save_path, exist_ok=True) + time_flag = datetime.fromtimestamp(time.time()).strftime("%Y%m%d-%H%M%S") + + if output_type == "latent" or output_type == "both": + # save latent + for i, latent in enumerate(latents): + latent_path = f"{save_path}/{time_flag}_{i}_{seeds[i]}_latent.safetensors" + + if args.no_metadata: + metadata = None + else: + metadata = { + "seeds": f"{seeds[i]}", + "prompt": f"{args.prompt}", + "height": f"{height}", + "width": f"{width}", + "video_length": f"{video_length}", + "infer_steps": f"{num_inference_steps}", + "guidance_scale": f"{args.guidance_scale}", + "embedded_cfg_scale": f"{args.embedded_cfg_scale}", + } + if args.negative_prompt is not None: + metadata["negative_prompt"] = f"{args.negative_prompt}" + sd = {"latent": latent} + save_file(sd, latent_path, metadata=metadata) + + logger.info(f"Latent save to: {latent_path}") + if output_type == "video" or output_type == "both": + # save video + videos = decode_latents(args, latents, device) + for i, sample in enumerate(videos): + original_name = "" if original_base_names is None else f"_{original_base_names[i]}" + sample = sample.unsqueeze(0) + video_path = f"{save_path}/{time_flag}_{i}_{seeds[i]}{original_name}.mp4" + save_videos_grid(sample, video_path, fps=args.fps) + logger.info(f"Sample save to: {video_path}") + elif output_type == "images": + # save images + videos = decode_latents(args, latents, device) + for i, sample in enumerate(videos): + original_name = "" if original_base_names is None else f"_{original_base_names[i]}" + sample = sample.unsqueeze(0) + image_name = f"{time_flag}_{i}_{seeds[i]}{original_name}" + save_images_grid(sample, save_path, image_name) + logger.info(f"Sample images save to: {save_path}/{image_name}") + + logger.info("Done!") + + +if __name__ == "__main__": + main() diff --git a/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/hv_train.py b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/hv_train.py new file mode 100644 index 0000000000000000000000000000000000000000..c4fc1ddcd3d177a0d3bdb75efe7fd119140f6f67 --- /dev/null +++ b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/hv_train.py @@ -0,0 +1,1725 @@ +import ast +import asyncio +from datetime import timedelta +import gc +import importlib +import argparse +import math +import os +import pathlib +import re +import sys +import random +import time +import json +from multiprocessing import Value +from typing import Any, Dict, List, Optional +import accelerate +import numpy as np +from packaging.version import Version + +import huggingface_hub +import toml + +import torch +from tqdm import tqdm +from accelerate.utils import set_seed +from accelerate import Accelerator, InitProcessGroupKwargs, DistributedDataParallelKwargs +from safetensors.torch import load_file, save_file +import transformers +from diffusers.optimization import ( + SchedulerType as DiffusersSchedulerType, + TYPE_TO_SCHEDULER_FUNCTION as DIFFUSERS_TYPE_TO_SCHEDULER_FUNCTION, +) +from transformers.optimization import SchedulerType, TYPE_TO_SCHEDULER_FUNCTION + +from musubi_tuner.dataset import config_utils +from musubi_tuner.hunyuan_model.models import load_transformer, get_rotary_pos_embed_by_shape +import musubi_tuner.hunyuan_model.text_encoder as text_encoder_module +from musubi_tuner.hunyuan_model.vae import load_vae +import musubi_tuner.hunyuan_model.vae as vae_module +from musubi_tuner.modules.scheduling_flow_match_discrete import FlowMatchDiscreteScheduler +import musubi_tuner.networks.lora as lora_module +from musubi_tuner.dataset.config_utils import BlueprintGenerator, ConfigSanitizer +from musubi_tuner.dataset.image_video_dataset import ARCHITECTURE_HUNYUAN_VIDEO + +import logging + +from musubi_tuner.utils import huggingface_utils, model_utils, train_utils, sai_model_spec + +logger = logging.getLogger(__name__) +logging.basicConfig(level=logging.INFO) + + +BASE_MODEL_VERSION_HUNYUAN_VIDEO = "hunyuan_video" + +# TODO make separate file for some functions to commonize with other scripts + + +def clean_memory_on_device(device: torch.device): + r""" + Clean memory on the specified device, will be called from training scripts. + """ + gc.collect() + + # device may "cuda" or "cuda:0", so we need to check the type of device + if device.type == "cuda": + torch.cuda.empty_cache() + if device.type == "xpu": + torch.xpu.empty_cache() + if device.type == "mps": + torch.mps.empty_cache() + + +# for collate_fn: epoch and step is multiprocessing.Value +class collator_class: + def __init__(self, epoch, step, dataset): + self.current_epoch = epoch + self.current_step = step + self.dataset = dataset # not used if worker_info is not None, in case of multiprocessing + + def __call__(self, examples): + worker_info = torch.utils.data.get_worker_info() + # worker_info is None in the main process + if worker_info is not None: + dataset = worker_info.dataset + else: + dataset = self.dataset + + # set epoch and step + dataset.set_current_epoch(self.current_epoch.value) + dataset.set_current_step(self.current_step.value) + return examples[0] + + +def prepare_accelerator(args: argparse.Namespace) -> Accelerator: + """ + DeepSpeed is not supported in this script currently. + """ + if args.logging_dir is None: + logging_dir = None + else: + log_prefix = "" if args.log_prefix is None else args.log_prefix + logging_dir = args.logging_dir + "/" + log_prefix + time.strftime("%Y%m%d%H%M%S", time.localtime()) + + if args.log_with is None: + if logging_dir is not None: + log_with = "tensorboard" + else: + log_with = None + else: + log_with = args.log_with + if log_with in ["tensorboard", "all"]: + if logging_dir is None: + raise ValueError( + "logging_dir is required when log_with is tensorboard / Tensorboardを使う場合、logging_dirを指定してください" + ) + if log_with in ["wandb", "all"]: + try: + import wandb + except ImportError: + raise ImportError("No wandb / wandb がインストールされていないようです") + if logging_dir is not None: + os.makedirs(logging_dir, exist_ok=True) + os.environ["WANDB_DIR"] = logging_dir + if args.wandb_api_key is not None: + wandb.login(key=args.wandb_api_key) + + kwargs_handlers = [ + ( + InitProcessGroupKwargs( + backend="gloo" if os.name == "nt" or not torch.cuda.is_available() else "nccl", + init_method=( + "env://?use_libuv=False" if os.name == "nt" and Version(torch.__version__) >= Version("2.4.0") else None + ), + timeout=timedelta(minutes=args.ddp_timeout) if args.ddp_timeout else None, + ) + if torch.cuda.device_count() > 1 + else None + ), + ( + DistributedDataParallelKwargs( + gradient_as_bucket_view=args.ddp_gradient_as_bucket_view, static_graph=args.ddp_static_graph + ) + if args.ddp_gradient_as_bucket_view or args.ddp_static_graph + else None + ), + ] + kwargs_handlers = [i for i in kwargs_handlers if i is not None] + + accelerator = Accelerator( + gradient_accumulation_steps=args.gradient_accumulation_steps, + mixed_precision=args.mixed_precision, + log_with=log_with, + project_dir=logging_dir, + kwargs_handlers=kwargs_handlers, + ) + print("accelerator device:", accelerator.device) + return accelerator + + +def line_to_prompt_dict(line: str) -> dict: + # subset of gen_img_diffusers + prompt_args = line.split(" --") + prompt_dict = {} + prompt_dict["prompt"] = prompt_args[0] + + for parg in prompt_args: + try: + m = re.match(r"w (\d+)", parg, re.IGNORECASE) + if m: + prompt_dict["width"] = int(m.group(1)) + continue + + m = re.match(r"h (\d+)", parg, re.IGNORECASE) + if m: + prompt_dict["height"] = int(m.group(1)) + continue + + m = re.match(r"f (\d+)", parg, re.IGNORECASE) + if m: + prompt_dict["frame_count"] = int(m.group(1)) + continue + + m = re.match(r"d (\d+)", parg, re.IGNORECASE) + if m: + prompt_dict["seed"] = int(m.group(1)) + continue + + m = re.match(r"s (\d+)", parg, re.IGNORECASE) + if m: # steps + prompt_dict["sample_steps"] = max(1, min(1000, int(m.group(1)))) + continue + + # m = re.match(r"l ([\d\.]+)", parg, re.IGNORECASE) + # if m: # scale + # prompt_dict["scale"] = float(m.group(1)) + # continue + # m = re.match(r"n (.+)", parg, re.IGNORECASE) + # if m: # negative prompt + # prompt_dict["negative_prompt"] = m.group(1) + # continue + + except ValueError as ex: + logger.error(f"Exception in parsing / 解析エラー: {parg}") + logger.error(ex) + + return prompt_dict + + +def load_prompts(prompt_file: str) -> list[Dict]: + # read prompts + if prompt_file.endswith(".txt"): + with open(prompt_file, "r", encoding="utf-8") as f: + lines = f.readlines() + prompts = [line.strip() for line in lines if len(line.strip()) > 0 and line[0] != "#"] + elif prompt_file.endswith(".toml"): + with open(prompt_file, "r", encoding="utf-8") as f: + data = toml.load(f) + prompts = [dict(**data["prompt"], **subset) for subset in data["prompt"]["subset"]] + elif prompt_file.endswith(".json"): + with open(prompt_file, "r", encoding="utf-8") as f: + prompts = json.load(f) + + # preprocess prompts + for i in range(len(prompts)): + prompt_dict = prompts[i] + if isinstance(prompt_dict, str): + prompt_dict = line_to_prompt_dict(prompt_dict) + prompts[i] = prompt_dict + assert isinstance(prompt_dict, dict) + + # Adds an enumerator to the dict based on prompt position. Used later to name image files. Also cleanup of extra data in original prompt dict. + prompt_dict["enum"] = i + prompt_dict.pop("subset", None) + + return prompts + + +def compute_density_for_timestep_sampling( + weighting_scheme: str, batch_size: int, logit_mean: float = None, logit_std: float = None, mode_scale: float = None +): + """Compute the density for sampling the timesteps when doing SD3 training. + + Courtesy: This was contributed by Rafie Walker in https://github.com/huggingface/diffusers/pull/8528. + + SD3 paper reference: https://arxiv.org/abs/2403.03206v1. + """ + if weighting_scheme == "logit_normal": + # See 3.1 in the SD3 paper ($rf/lognorm(0.00,1.00)$). + u = torch.normal(mean=logit_mean, std=logit_std, size=(batch_size,), device="cpu") + u = torch.nn.functional.sigmoid(u) + elif weighting_scheme == "mode": + u = torch.rand(size=(batch_size,), device="cpu") + u = 1 - u - mode_scale * (torch.cos(math.pi * u / 2) ** 2 - 1 + u) + else: + u = torch.rand(size=(batch_size,), device="cpu") + return u + + +def get_sigmas(noise_scheduler, timesteps, device, n_dim=4, dtype=torch.float32): + sigmas = noise_scheduler.sigmas.to(device=device, dtype=dtype) + schedule_timesteps = noise_scheduler.timesteps.to(device) + timesteps = timesteps.to(device) + + # if sum([(schedule_timesteps == t) for t in timesteps]) < len(timesteps): + if any([(schedule_timesteps == t).sum() == 0 for t in timesteps]): + # raise ValueError("Some timesteps are not in the schedule / 一部のtimestepsがスケジュールに含まれていません") + # round to nearest timestep + logger.warning("Some timesteps are not in the schedule / 一部のtimestepsがスケジュールに含まれていません") + step_indices = [torch.argmin(torch.abs(schedule_timesteps - t)).item() for t in timesteps] + else: + step_indices = [(schedule_timesteps == t).nonzero().item() for t in timesteps] + + sigma = sigmas[step_indices].flatten() + while len(sigma.shape) < n_dim: + sigma = sigma.unsqueeze(-1) + return sigma + + +def compute_loss_weighting_for_sd3(weighting_scheme: str, noise_scheduler, timesteps, device, dtype): + """Computes loss weighting scheme for SD3 training. + + Courtesy: This was contributed by Rafie Walker in https://github.com/huggingface/diffusers/pull/8528. + + SD3 paper reference: https://arxiv.org/abs/2403.03206v1. + """ + if weighting_scheme == "sigma_sqrt" or weighting_scheme == "cosmap": + sigmas = get_sigmas(noise_scheduler, timesteps, device, n_dim=5, dtype=dtype) + if weighting_scheme == "sigma_sqrt": + weighting = (sigmas**-2.0).float() + else: + bot = 1 - 2 * sigmas + 2 * sigmas**2 + weighting = 2 / (math.pi * bot) + else: + weighting = None # torch.ones_like(sigmas) + return weighting + + +class FineTuningTrainer: + def __init__(self): + pass + + def process_sample_prompts( + self, + args: argparse.Namespace, + accelerator: Accelerator, + sample_prompts: str, + text_encoder1: str, + text_encoder2: str, + fp8_llm: bool, + ): + logger.info(f"cache Text Encoder outputs for sample prompt: {sample_prompts}") + prompts = load_prompts(sample_prompts) + + def encode_for_text_encoder(text_encoder, is_llm=True): + sample_prompts_te_outputs = {} # (prompt) -> (embeds, mask) + with accelerator.autocast(), torch.no_grad(): + for prompt_dict in prompts: + for p in [prompt_dict.get("prompt", "")]: + if p not in sample_prompts_te_outputs: + logger.info(f"cache Text Encoder outputs for prompt: {p}") + + data_type = "video" + text_inputs = text_encoder.text2tokens(p, data_type=data_type) + + prompt_outputs = text_encoder.encode(text_inputs, data_type=data_type) + sample_prompts_te_outputs[p] = (prompt_outputs.hidden_state, prompt_outputs.attention_mask) + + return sample_prompts_te_outputs + + # Load Text Encoder 1 and encode + text_encoder_dtype = torch.float16 if args.text_encoder_dtype is None else model_utils.str_to_dtype(args.text_encoder_dtype) + logger.info(f"loading text encoder 1: {text_encoder1}") + text_encoder_1 = text_encoder_module.load_text_encoder_1(text_encoder1, accelerator.device, fp8_llm, text_encoder_dtype) + + logger.info("encoding with Text Encoder 1") + te_outputs_1 = encode_for_text_encoder(text_encoder_1) + del text_encoder_1 + + # Load Text Encoder 2 and encode + logger.info(f"loading text encoder 2: {text_encoder2}") + text_encoder_2 = text_encoder_module.load_text_encoder_2(text_encoder2, accelerator.device, text_encoder_dtype) + + logger.info("encoding with Text Encoder 2") + te_outputs_2 = encode_for_text_encoder(text_encoder_2, is_llm=False) + del text_encoder_2 + + # prepare sample parameters + sample_parameters = [] + for prompt_dict in prompts: + prompt_dict_copy = prompt_dict.copy() + p = prompt_dict.get("prompt", "") + prompt_dict_copy["llm_embeds"] = te_outputs_1[p][0] + prompt_dict_copy["llm_mask"] = te_outputs_1[p][1] + prompt_dict_copy["clipL_embeds"] = te_outputs_2[p][0] + prompt_dict_copy["clipL_mask"] = te_outputs_2[p][1] + sample_parameters.append(prompt_dict_copy) + + clean_memory_on_device(accelerator.device) + + return sample_parameters + + def get_optimizer(self, args, trainable_params: list[torch.nn.Parameter]) -> tuple[str, str, torch.optim.Optimizer]: + # adamw, adamw8bit, adafactor + + optimizer_type = args.optimizer_type.lower() + + # split optimizer_type and optimizer_args + optimizer_kwargs = {} + if args.optimizer_args is not None and len(args.optimizer_args) > 0: + for arg in args.optimizer_args: + key, value = arg.split("=") + value = ast.literal_eval(value) + optimizer_kwargs[key] = value + + lr = args.learning_rate + optimizer = None + optimizer_class = None + + if optimizer_type.endswith("8bit".lower()): + try: + import bitsandbytes as bnb + except ImportError: + raise ImportError("No bitsandbytes / bitsandbytesがインストールされていないようです") + + if optimizer_type == "AdamW8bit".lower(): + logger.info(f"use 8-bit AdamW optimizer | {optimizer_kwargs}") + optimizer_class = bnb.optim.AdamW8bit + optimizer = optimizer_class(trainable_params, lr=lr, **optimizer_kwargs) + + elif optimizer_type == "Adafactor".lower(): + # Adafactor: check relative_step and warmup_init + if "relative_step" not in optimizer_kwargs: + optimizer_kwargs["relative_step"] = True # default + if not optimizer_kwargs["relative_step"] and optimizer_kwargs.get("warmup_init", False): + logger.info( + f"set relative_step to True because warmup_init is True / warmup_initがTrueのためrelative_stepをTrueにします" + ) + optimizer_kwargs["relative_step"] = True + logger.info(f"use Adafactor optimizer | {optimizer_kwargs}") + + if optimizer_kwargs["relative_step"]: + logger.info(f"relative_step is true / relative_stepがtrueです") + if lr != 0.0: + logger.warning(f"learning rate is used as initial_lr / 指定したlearning rateはinitial_lrとして使用されます") + args.learning_rate = None + + if args.lr_scheduler != "adafactor": + logger.info(f"use adafactor_scheduler / スケジューラにadafactor_schedulerを使用します") + args.lr_scheduler = f"adafactor:{lr}" # ちょっと微妙だけど + + lr = None + else: + if args.max_grad_norm != 0.0: + logger.warning( + f"because max_grad_norm is set, clip_grad_norm is enabled. consider set to 0 / max_grad_normが設定されているためclip_grad_normが有効になります。0に設定して無効にしたほうがいいかもしれません" + ) + if args.lr_scheduler != "constant_with_warmup": + logger.warning(f"constant_with_warmup will be good / スケジューラはconstant_with_warmupが良いかもしれません") + if optimizer_kwargs.get("clip_threshold", 1.0) != 1.0: + logger.warning(f"clip_threshold=1.0 will be good / clip_thresholdは1.0が良いかもしれません") + + optimizer_class = transformers.optimization.Adafactor + optimizer = optimizer_class(trainable_params, lr=lr, **optimizer_kwargs) + + elif optimizer_type == "AdamW".lower(): + logger.info(f"use AdamW optimizer | {optimizer_kwargs}") + optimizer_class = torch.optim.AdamW + optimizer = optimizer_class(trainable_params, lr=lr, **optimizer_kwargs) + + if optimizer is None: + # 任意のoptimizerを使う + case_sensitive_optimizer_type = args.optimizer_type # not lower + logger.info(f"use {case_sensitive_optimizer_type} | {optimizer_kwargs}") + + if "." not in case_sensitive_optimizer_type: # from torch.optim + optimizer_module = torch.optim + else: # from other library + values = case_sensitive_optimizer_type.split(".") + optimizer_module = importlib.import_module(".".join(values[:-1])) + case_sensitive_optimizer_type = values[-1] + + optimizer_class = getattr(optimizer_module, case_sensitive_optimizer_type) + optimizer = optimizer_class(trainable_params, lr=lr, **optimizer_kwargs) + + # for logging + optimizer_name = optimizer_class.__module__ + "." + optimizer_class.__name__ + optimizer_args = ",".join([f"{k}={v}" for k, v in optimizer_kwargs.items()]) + + # get train and eval functions + if hasattr(optimizer, "train") and callable(optimizer.train): + train_fn = optimizer.train + eval_fn = optimizer.eval + else: + train_fn = lambda: None + eval_fn = lambda: None + + return optimizer_name, optimizer_args, optimizer, train_fn, eval_fn + + def is_schedulefree_optimizer(self, optimizer: torch.optim.Optimizer, args: argparse.Namespace) -> bool: + return args.optimizer_type.lower().endswith("schedulefree".lower()) # or args.optimizer_schedulefree_wrapper + + def get_dummy_scheduler(optimizer: torch.optim.Optimizer) -> Any: + # dummy scheduler for schedulefree optimizer. supports only empty step(), get_last_lr() and optimizers. + # this scheduler is used for logging only. + # this isn't be wrapped by accelerator because of this class is not a subclass of torch.optim.lr_scheduler._LRScheduler + class DummyScheduler: + def __init__(self, optimizer: torch.optim.Optimizer): + self.optimizer = optimizer + + def step(self): + pass + + def get_last_lr(self): + return [group["lr"] for group in self.optimizer.param_groups] + + return DummyScheduler(optimizer) + + def get_scheduler(self, args, optimizer: torch.optim.Optimizer, num_processes: int): + """ + Unified API to get any scheduler from its name. + """ + # if schedulefree optimizer, return dummy scheduler + if self.is_schedulefree_optimizer(optimizer, args): + return self.get_dummy_scheduler(optimizer) + + name = args.lr_scheduler + num_training_steps = args.max_train_steps * num_processes # * args.gradient_accumulation_steps + num_warmup_steps: Optional[int] = ( + int(args.lr_warmup_steps * num_training_steps) if isinstance(args.lr_warmup_steps, float) else args.lr_warmup_steps + ) + num_decay_steps: Optional[int] = ( + int(args.lr_decay_steps * num_training_steps) if isinstance(args.lr_decay_steps, float) else args.lr_decay_steps + ) + num_stable_steps = num_training_steps - num_warmup_steps - num_decay_steps + num_cycles = args.lr_scheduler_num_cycles + power = args.lr_scheduler_power + timescale = args.lr_scheduler_timescale + min_lr_ratio = args.lr_scheduler_min_lr_ratio + + lr_scheduler_kwargs = {} # get custom lr_scheduler kwargs + if args.lr_scheduler_args is not None and len(args.lr_scheduler_args) > 0: + for arg in args.lr_scheduler_args: + key, value = arg.split("=") + value = ast.literal_eval(value) + lr_scheduler_kwargs[key] = value + + def wrap_check_needless_num_warmup_steps(return_vals): + if num_warmup_steps is not None and num_warmup_steps != 0: + raise ValueError(f"{name} does not require `num_warmup_steps`. Set None or 0.") + return return_vals + + # using any lr_scheduler from other library + if args.lr_scheduler_type: + lr_scheduler_type = args.lr_scheduler_type + logger.info(f"use {lr_scheduler_type} | {lr_scheduler_kwargs} as lr_scheduler") + if "." not in lr_scheduler_type: # default to use torch.optim + lr_scheduler_module = torch.optim.lr_scheduler + else: + values = lr_scheduler_type.split(".") + lr_scheduler_module = importlib.import_module(".".join(values[:-1])) + lr_scheduler_type = values[-1] + lr_scheduler_class = getattr(lr_scheduler_module, lr_scheduler_type) + lr_scheduler = lr_scheduler_class(optimizer, **lr_scheduler_kwargs) + return lr_scheduler + + if name.startswith("adafactor"): + assert ( + type(optimizer) == transformers.optimization.Adafactor + ), f"adafactor scheduler must be used with Adafactor optimizer / adafactor schedulerはAdafactorオプティマイザと同時に使ってください" + initial_lr = float(name.split(":")[1]) + # logger.info(f"adafactor scheduler init lr {initial_lr}") + return wrap_check_needless_num_warmup_steps(transformers.optimization.AdafactorSchedule(optimizer, initial_lr)) + + if name == DiffusersSchedulerType.PIECEWISE_CONSTANT.value: + name = DiffusersSchedulerType(name) + schedule_func = DIFFUSERS_TYPE_TO_SCHEDULER_FUNCTION[name] + return schedule_func(optimizer, **lr_scheduler_kwargs) # step_rules and last_epoch are given as kwargs + + name = SchedulerType(name) + schedule_func = TYPE_TO_SCHEDULER_FUNCTION[name] + + if name == SchedulerType.CONSTANT: + return wrap_check_needless_num_warmup_steps(schedule_func(optimizer, **lr_scheduler_kwargs)) + + # All other schedulers require `num_warmup_steps` + if num_warmup_steps is None: + raise ValueError(f"{name} requires `num_warmup_steps`, please provide that argument.") + + if name == SchedulerType.CONSTANT_WITH_WARMUP: + return schedule_func(optimizer, num_warmup_steps=num_warmup_steps, **lr_scheduler_kwargs) + + if name == SchedulerType.INVERSE_SQRT: + return schedule_func(optimizer, num_warmup_steps=num_warmup_steps, timescale=timescale, **lr_scheduler_kwargs) + + # All other schedulers require `num_training_steps` + if num_training_steps is None: + raise ValueError(f"{name} requires `num_training_steps`, please provide that argument.") + + if name == SchedulerType.COSINE_WITH_RESTARTS: + return schedule_func( + optimizer, + num_warmup_steps=num_warmup_steps, + num_training_steps=num_training_steps, + num_cycles=num_cycles, + **lr_scheduler_kwargs, + ) + + if name == SchedulerType.POLYNOMIAL: + return schedule_func( + optimizer, + num_warmup_steps=num_warmup_steps, + num_training_steps=num_training_steps, + power=power, + **lr_scheduler_kwargs, + ) + + if name == SchedulerType.COSINE_WITH_MIN_LR: + return schedule_func( + optimizer, + num_warmup_steps=num_warmup_steps, + num_training_steps=num_training_steps, + num_cycles=num_cycles / 2, + min_lr_rate=min_lr_ratio, + **lr_scheduler_kwargs, + ) + + # these schedulers do not require `num_decay_steps` + if name == SchedulerType.LINEAR or name == SchedulerType.COSINE: + return schedule_func( + optimizer, + num_warmup_steps=num_warmup_steps, + num_training_steps=num_training_steps, + **lr_scheduler_kwargs, + ) + + # All other schedulers require `num_decay_steps` + if num_decay_steps is None: + raise ValueError(f"{name} requires `num_decay_steps`, please provide that argument.") + if name == SchedulerType.WARMUP_STABLE_DECAY: + return schedule_func( + optimizer, + num_warmup_steps=num_warmup_steps, + num_stable_steps=num_stable_steps, + num_decay_steps=num_decay_steps, + num_cycles=num_cycles / 2, + min_lr_ratio=min_lr_ratio if min_lr_ratio is not None else 0.0, + **lr_scheduler_kwargs, + ) + + return schedule_func( + optimizer, + num_warmup_steps=num_warmup_steps, + num_training_steps=num_training_steps, + num_decay_steps=num_decay_steps, + **lr_scheduler_kwargs, + ) + + def resume_from_local_or_hf_if_specified(self, accelerator: Accelerator, args: argparse.Namespace) -> bool: + if not args.resume: + return False + + if not args.resume_from_huggingface: + logger.info(f"resume training from local state: {args.resume}") + accelerator.load_state(args.resume) + return True + + logger.info(f"resume training from huggingface state: {args.resume}") + repo_id = args.resume.split("/")[0] + "/" + args.resume.split("/")[1] + path_in_repo = "/".join(args.resume.split("/")[2:]) + revision = None + repo_type = None + if ":" in path_in_repo: + divided = path_in_repo.split(":") + if len(divided) == 2: + path_in_repo, revision = divided + repo_type = "model" + else: + path_in_repo, revision, repo_type = divided + logger.info(f"Downloading state from huggingface: {repo_id}/{path_in_repo}@{revision}") + + list_files = huggingface_utils.list_dir( + repo_id=repo_id, + subfolder=path_in_repo, + revision=revision, + token=args.huggingface_token, + repo_type=repo_type, + ) + + async def download(filename) -> str: + def task(): + return huggingface_hub.hf_hub_download( + repo_id=repo_id, + filename=filename, + revision=revision, + repo_type=repo_type, + token=args.huggingface_token, + ) + + return await asyncio.get_event_loop().run_in_executor(None, task) + + loop = asyncio.get_event_loop() + results = loop.run_until_complete(asyncio.gather(*[download(filename=filename.rfilename) for filename in list_files])) + if len(results) == 0: + raise ValueError( + "No files found in the specified repo id/path/revision / 指定されたリポジトリID/パス/リビジョンにファイルが見つかりませんでした" + ) + dirname = os.path.dirname(results[0]) + accelerator.load_state(dirname) + + return True + + def sample_images(self, accelerator, args, epoch, global_step, device, vae, transformer, sample_parameters): + pass + + def get_noisy_model_input_and_timesteps( + self, + args: argparse.Namespace, + noise: torch.Tensor, + latents: torch.Tensor, + noise_scheduler: FlowMatchDiscreteScheduler, + device: torch.device, + dtype: torch.dtype, + ): + batch_size = noise.shape[0] + + if args.timestep_sampling == "uniform" or args.timestep_sampling == "sigmoid" or args.timestep_sampling == "shift": + if args.timestep_sampling == "uniform" or args.timestep_sampling == "sigmoid": + # Simple random t-based noise sampling + if args.timestep_sampling == "sigmoid": + t = torch.sigmoid(args.sigmoid_scale * torch.randn((batch_size,), device=device)) + else: + t = torch.rand((batch_size,), device=device) + + elif args.timestep_sampling == "shift": + shift = args.discrete_flow_shift + logits_norm = torch.randn(batch_size, device=device) + logits_norm = logits_norm * args.sigmoid_scale # larger scale for more uniform sampling + t = logits_norm.sigmoid() + t = (t * shift) / (1 + (shift - 1) * t) + + t_min = args.min_timestep if args.min_timestep is not None else 0 + t_max = args.max_timestep if args.max_timestep is not None else 1000.0 + t_min /= 1000.0 + t_max /= 1000.0 + t = t * (t_max - t_min) + t_min # scale to [t_min, t_max], default [0, 1] + + timesteps = t * 1000.0 + t = t.view(-1, 1, 1, 1, 1) + noisy_model_input = (1 - t) * latents + t * noise + + timesteps += 1 # 1 to 1000 + else: + # Sample a random timestep for each image + # for weighting schemes where we sample timesteps non-uniformly + u = compute_density_for_timestep_sampling( + weighting_scheme=args.weighting_scheme, + batch_size=batch_size, + logit_mean=args.logit_mean, + logit_std=args.logit_std, + mode_scale=args.mode_scale, + ) + # indices = (u * noise_scheduler.config.num_train_timesteps).long() + t_min = args.min_timestep if args.min_timestep is not None else 0 + t_max = args.max_timestep if args.max_timestep is not None else 1000 + indices = (u * (t_max - t_min) + t_min).long() + + timesteps = noise_scheduler.timesteps[indices].to(device=device) # 1 to 1000 + + # Add noise according to flow matching. + sigmas = get_sigmas(noise_scheduler, timesteps, device, n_dim=latents.ndim, dtype=dtype) + noisy_model_input = sigmas * noise + (1.0 - sigmas) * latents + + return noisy_model_input, timesteps + + def train(self, args): + if args.seed is None: + args.seed = random.randint(0, 2**32) + set_seed(args.seed) + + # Load dataset config + blueprint_generator = BlueprintGenerator(ConfigSanitizer()) + logger.info(f"Load dataset config from {args.dataset_config}") + user_config = config_utils.load_user_config(args.dataset_config) + blueprint = blueprint_generator.generate(user_config, args, architecture=ARCHITECTURE_HUNYUAN_VIDEO) + train_dataset_group = config_utils.generate_dataset_group_by_blueprint(blueprint.dataset_group, training=True) + + current_epoch = Value("i", 0) + current_step = Value("i", 0) + ds_for_collator = train_dataset_group if args.max_data_loader_n_workers == 0 else None + collator = collator_class(current_epoch, current_step, ds_for_collator) + + # prepare accelerator + logger.info("preparing accelerator") + accelerator = prepare_accelerator(args) + is_main_process = accelerator.is_main_process + + # prepare dtype + weight_dtype = torch.float32 + if args.mixed_precision == "fp16": + weight_dtype = torch.float16 + elif args.mixed_precision == "bf16": + weight_dtype = torch.bfloat16 + + # HunyuanVideo specific + vae_dtype = torch.float16 if args.vae_dtype is None else model_utils.str_to_dtype(args.vae_dtype) + + # get embedding for sampling images + sample_parameters = vae = None + if args.sample_prompts: + sample_parameters = self.process_sample_prompts( + args, accelerator, args.sample_prompts, args.text_encoder1, args.text_encoder2, args.fp8_llm + ) + + # Load VAE model for sampling images: VAE is loaded to cpu to save gpu memory + vae, _, s_ratio, t_ratio = load_vae(vae_dtype=vae_dtype, device="cpu", vae_path=args.vae) + vae.requires_grad_(False) + vae.eval() + + if args.vae_chunk_size is not None: + vae.set_chunk_size_for_causal_conv_3d(args.vae_chunk_size) + logger.info(f"Set chunk_size to {args.vae_chunk_size} for CausalConv3d in VAE") + if args.vae_spatial_tile_sample_min_size is not None: + vae.enable_spatial_tiling(True) + vae.tile_sample_min_size = args.vae_spatial_tile_sample_min_size + vae.tile_latent_min_size = args.vae_spatial_tile_sample_min_size // 8 + elif args.vae_tiling: + vae.enable_spatial_tiling(True) + + # load DiT model + blocks_to_swap = args.blocks_to_swap if args.blocks_to_swap else 0 + loading_device = "cpu" if blocks_to_swap > 0 else accelerator.device + + logger.info(f"Loading DiT model from {args.dit}") + if args.sdpa: + attn_mode = "torch" + elif args.flash_attn: + attn_mode = "flash" + elif args.sage_attn: + attn_mode = "sageattn" + elif args.xformers: + attn_mode = "xformers" + else: + raise ValueError( + f"either --sdpa, --flash-attn, --sage-attn or --xformers must be specified / --sdpa, --flash-attn, --sage-attn, --xformersのいずれかを指定してください" + ) + transformer = load_transformer( + args.dit, attn_mode, args.split_attn, loading_device, None, in_channels=args.dit_in_channels + ) # load as is + + if blocks_to_swap > 0: + logger.info(f"enable swap {blocks_to_swap} blocks to CPU from device: {accelerator.device}") + transformer.enable_block_swap(blocks_to_swap, accelerator.device, supports_backward=True) + transformer.move_to_device_except_swap_blocks(accelerator.device) + if args.img_in_txt_in_offloading: + logger.info("Enable offloading img_in and txt_in to CPU") + transformer.enable_img_in_txt_in_offloading() + + if args.gradient_checkpointing: + transformer.enable_gradient_checkpointing() + + # prepare optimizer, data loader etc. + accelerator.print("prepare optimizer, data loader etc.") + + transformer.requires_grad_(False) + if accelerator.is_main_process: + accelerator.print(f"Trainable modules '{args.trainable_modules}'.") + for name, param in transformer.named_parameters(): + for trainable_module_name in args.trainable_modules: + if trainable_module_name in name: + param.requires_grad = True + break + + total_params = list(transformer.parameters()) + trainable_params = list(filter(lambda p: p.requires_grad, transformer.parameters())) + logger.info( + f"number of trainable parameters: {sum(p.numel() for p in trainable_params) / 1e6} M, total paramters: {sum(p.numel() for p in total_params) / 1e6} M" + ) + optimizer_name, optimizer_args, optimizer, optimizer_train_fn, optimizer_eval_fn = self.get_optimizer( + args, trainable_params + ) + + # prepare dataloader + + # num workers for data loader: if 0, persistent_workers is not available + n_workers = min(args.max_data_loader_n_workers, os.cpu_count()) # cpu_count or max_data_loader_n_workers + + train_dataloader = torch.utils.data.DataLoader( + train_dataset_group, + batch_size=1, + shuffle=True, + collate_fn=collator, + num_workers=n_workers, + persistent_workers=args.persistent_data_loader_workers, + ) + + # calculate max_train_steps + if args.max_train_epochs is not None: + args.max_train_steps = args.max_train_epochs * math.ceil( + len(train_dataloader) / accelerator.num_processes / args.gradient_accumulation_steps + ) + accelerator.print( + f"override steps. steps for {args.max_train_epochs} epochs is / 指定エポックまでのステップ数: {args.max_train_steps}" + ) + + # send max_train_steps to train_dataset_group + train_dataset_group.set_max_train_steps(args.max_train_steps) + + # prepare lr_scheduler + lr_scheduler = self.get_scheduler(args, optimizer, accelerator.num_processes) + + # prepare training model. accelerator does some magic here + + # experimental feature: train the model with gradients in fp16/bf16 + dit_dtype = torch.float32 + if args.full_fp16: + assert ( + args.mixed_precision == "fp16" + ), "full_fp16 requires mixed precision='fp16' / full_fp16を使う場合はmixed_precision='fp16'を指定してください。" + accelerator.print("enable full fp16 training.") + dit_weight_dtype = torch.float16 + elif args.full_bf16: + assert ( + args.mixed_precision == "bf16" + ), "full_bf16 requires mixed precision='bf16' / full_bf16を使う場合はmixed_precision='bf16'を指定してください。" + accelerator.print("enable full bf16 training.") + dit_weight_dtype = torch.bfloat16 + else: + dit_weight_dtype = torch.float32 + + # TODO add fused optimizer and stochastic rounding + + # cast model to dit_weight_dtype + # if dit_dtype != dit_weight_dtype: + logger.info(f"casting model to {dit_weight_dtype}") + transformer.to(dit_weight_dtype) + + if blocks_to_swap > 0: + transformer = accelerator.prepare(transformer, device_placement=[not blocks_to_swap > 0]) + accelerator.unwrap_model(transformer).move_to_device_except_swap_blocks(accelerator.device) # reduce peak memory usage + accelerator.unwrap_model(transformer).prepare_block_swap_before_forward() + else: + transformer = accelerator.prepare(transformer) + + optimizer, train_dataloader, lr_scheduler = accelerator.prepare(optimizer, train_dataloader, lr_scheduler) + + transformer.train() + + if args.full_fp16: + # patch accelerator for fp16 training + # def patch_accelerator_for_fp16_training(accelerator): + org_unscale_grads = accelerator.scaler._unscale_grads_ + + def _unscale_grads_replacer(optimizer, inv_scale, found_inf, allow_fp16): + return org_unscale_grads(optimizer, inv_scale, found_inf, True) + + accelerator.scaler._unscale_grads_ = _unscale_grads_replacer + + # resume from local or huggingface. accelerator.step is set + self.resume_from_local_or_hf_if_specified(accelerator, args) # accelerator.load_state(args.resume) + + # epoch数を計算する + num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) + num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) + + # 学習する + # total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps + + accelerator.print("running training / 学習開始") + accelerator.print(f" num train items / 学習画像、動画数: {train_dataset_group.num_train_items}") + accelerator.print(f" num batches per epoch / 1epochのバッチ数: {len(train_dataloader)}") + accelerator.print(f" num epochs / epoch数: {num_train_epochs}") + accelerator.print( + f" batch size per device / バッチサイズ: {', '.join([str(d.batch_size) for d in train_dataset_group.datasets])}" + ) + # accelerator.print(f" total train batch size (with parallel & distributed & accumulation) / 総バッチサイズ(並列学習、勾配合計含む): {total_batch_size}") + accelerator.print(f" gradient accumulation steps / 勾配を合計するステップ数 = {args.gradient_accumulation_steps}") + accelerator.print(f" total optimization steps / 学習ステップ数: {args.max_train_steps}") + + if accelerator.is_main_process: + init_kwargs = {} + if args.wandb_run_name: + init_kwargs["wandb"] = {"name": args.wandb_run_name} + if args.log_tracker_config is not None: + init_kwargs = toml.load(args.log_tracker_config) + accelerator.init_trackers( + "hunyuan_video_ft" if args.log_tracker_name is None else args.log_tracker_name, + config=train_utils.get_sanitized_config_or_none(args), + init_kwargs=init_kwargs, + ) + + # TODO skip until initial step + progress_bar = tqdm(range(args.max_train_steps), smoothing=0, disable=not accelerator.is_local_main_process, desc="steps") + + epoch_to_start = 0 + global_step = 0 + noise_scheduler = FlowMatchDiscreteScheduler(shift=args.discrete_flow_shift, reverse=True, solver="euler") + + loss_recorder = train_utils.LossRecorder() + del train_dataset_group + + # function for saving/removing + def save_model(ckpt_name: str, unwrapped_nw, steps, epoch_no, force_sync_upload=False): + os.makedirs(args.output_dir, exist_ok=True) + ckpt_file = os.path.join(args.output_dir, ckpt_name) + + accelerator.print(f"\nsaving checkpoint: {ckpt_file}") + + title = args.metadata_title if args.metadata_title is not None else args.output_name + if args.min_timestep is not None or args.max_timestep is not None: + min_time_step = args.min_timestep if args.min_timestep is not None else 0 + max_time_step = args.max_timestep if args.max_timestep is not None else 1000 + md_timesteps = (min_time_step, max_time_step) + else: + md_timesteps = None + + sai_metadata = sai_model_spec.build_metadata( + None, + ARCHITECTURE_HUNYUAN_VIDEO, + time.time(), + title, + None, + args.metadata_author, + args.metadata_description, + args.metadata_license, + args.metadata_tags, + timesteps=md_timesteps, + is_lora=False, + ) + + save_file(unwrapped_nw.state_dict(), ckpt_file, sai_metadata) + if args.huggingface_repo_id is not None: + huggingface_utils.upload(args, ckpt_file, "/" + ckpt_name, force_sync_upload=force_sync_upload) + + def remove_model(old_ckpt_name): + old_ckpt_file = os.path.join(args.output_dir, old_ckpt_name) + if os.path.exists(old_ckpt_file): + accelerator.print(f"removing old checkpoint: {old_ckpt_file}") + os.remove(old_ckpt_file) + + # For --sample_at_first + optimizer_eval_fn() + self.sample_images(accelerator, args, 0, global_step, accelerator.device, vae, transformer, sample_parameters) + optimizer_train_fn() + if len(accelerator.trackers) > 0: + # log empty object to commit the sample images to wandb + accelerator.log({}, step=0) + + # training loop + + # log device and dtype for each model + logger.info(f"DiT dtype: {transformer.dtype}, device: {transformer.device}") + + clean_memory_on_device(accelerator.device) + + pos_embed_cache = {} + + for epoch in range(epoch_to_start, num_train_epochs): + accelerator.print(f"\nepoch {epoch+1}/{num_train_epochs}") + current_epoch.value = epoch + 1 + + for step, batch in enumerate(train_dataloader): + latents, llm_embeds, llm_mask, clip_embeds = batch + bsz = latents.shape[0] + current_step.value = global_step + + with accelerator.accumulate(transformer): + latents = latents * vae_module.SCALING_FACTOR + + # Sample noise that we'll add to the latents + noise = torch.randn_like(latents) + + # calculate model input and timesteps + noisy_model_input, timesteps = self.get_noisy_model_input_and_timesteps( + args, noise, latents, noise_scheduler, accelerator.device, dit_dtype + ) + + weighting = compute_loss_weighting_for_sd3( + args.weighting_scheme, noise_scheduler, timesteps, accelerator.device, dit_dtype + ) + + # ensure guidance_scale in args is float + guidance_vec = torch.full((bsz,), float(args.guidance_scale), device=accelerator.device) # , dtype=dit_dtype) + + # ensure the hidden state will require grad + if args.gradient_checkpointing: + noisy_model_input.requires_grad_(True) + guidance_vec.requires_grad_(True) + + pos_emb_shape = latents.shape[1:] + if pos_emb_shape not in pos_embed_cache: + freqs_cos, freqs_sin = get_rotary_pos_embed_by_shape( + accelerator.unwrap_model(transformer), latents.shape[2:] + ) + # freqs_cos = freqs_cos.to(device=accelerator.device, dtype=dit_dtype) + # freqs_sin = freqs_sin.to(device=accelerator.device, dtype=dit_dtype) + pos_embed_cache[pos_emb_shape] = (freqs_cos, freqs_sin) + else: + freqs_cos, freqs_sin = pos_embed_cache[pos_emb_shape] + + # call DiT + latents = latents.to(device=accelerator.device, dtype=dit_dtype) + noisy_model_input = noisy_model_input.to(device=accelerator.device, dtype=dit_dtype) + # timesteps = timesteps.to(device=accelerator.device, dtype=dit_dtype) + # llm_embeds = llm_embeds.to(device=accelerator.device, dtype=dit_dtype) + # llm_mask = llm_mask.to(device=accelerator.device) + # clip_embeds = clip_embeds.to(device=accelerator.device, dtype=dit_dtype) + with accelerator.autocast(): + model_pred = transformer( + noisy_model_input, + timesteps, + text_states=llm_embeds, + text_mask=llm_mask, + text_states_2=clip_embeds, + freqs_cos=freqs_cos, + freqs_sin=freqs_sin, + guidance=guidance_vec, + return_dict=False, + ) + + # flow matching loss + target = noise - latents + + loss = torch.nn.functional.mse_loss(model_pred.to(dit_dtype), target, reduction="none") + + if weighting is not None: + loss = loss * weighting + # loss = loss.mean([1, 2, 3]) + # # min snr gamma, scale v pred loss like noise pred, v pred like loss, debiased estimation etc. + # loss = self.post_process_loss(loss, args, timesteps, noise_scheduler) + + loss = loss.mean() # 平均なのでbatch_sizeで割る必要なし + + accelerator.backward(loss) + if accelerator.sync_gradients: + # self.all_reduce_network(accelerator, network) # sync DDP grad manually + state = accelerate.PartialState() + if state.distributed_type != accelerate.DistributedType.NO: + for param in transformer.parameters(): + if param.grad is not None: + param.grad = accelerator.reduce(param.grad, reduction="mean") + + if args.max_grad_norm != 0.0: + params_to_clip = accelerator.unwrap_model(transformer).parameters() + accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm) + + optimizer.step() + lr_scheduler.step() + optimizer.zero_grad(set_to_none=True) + + # Checks if the accelerator has performed an optimization step behind the scenes + if accelerator.sync_gradients: + progress_bar.update(1) + global_step += 1 + + optimizer_eval_fn() + self.sample_images( + accelerator, args, None, global_step, accelerator.device, vae, transformer, sample_parameters + ) + + # 指定ステップごとにモデルを保存 + if args.save_every_n_steps is not None and global_step % args.save_every_n_steps == 0: + accelerator.wait_for_everyone() + if accelerator.is_main_process: + ckpt_name = train_utils.get_step_ckpt_name(args.output_name, global_step) + save_model(ckpt_name, accelerator.unwrap_model(transformer), global_step, epoch) + + if args.save_state: + train_utils.save_and_remove_state_stepwise(args, accelerator, global_step) + + remove_step_no = train_utils.get_remove_step_no(args, global_step) + if remove_step_no is not None: + remove_ckpt_name = train_utils.get_step_ckpt_name(args.output_name, remove_step_no) + remove_model(remove_ckpt_name) + optimizer_train_fn() + + current_loss = loss.detach().item() + loss_recorder.add(epoch=epoch, step=step, loss=current_loss) + avr_loss: float = loss_recorder.moving_average + logs = {"avr_loss": avr_loss} # , "lr": lr_scheduler.get_last_lr()[0]} + progress_bar.set_postfix(**logs) + + if len(accelerator.trackers) > 0: + logs = {"loss": current_loss, "lr": lr_scheduler.get_last_lr()[0]} + accelerator.log(logs, step=global_step) + + if global_step >= args.max_train_steps: + break + + if len(accelerator.trackers) > 0: + logs = {"loss/epoch": loss_recorder.moving_average} + accelerator.log(logs, step=epoch + 1) + + accelerator.wait_for_everyone() + + # 指定エポックごとにモデルを保存 + optimizer_eval_fn() + if args.save_every_n_epochs is not None: + saving = (epoch + 1) % args.save_every_n_epochs == 0 and (epoch + 1) < num_train_epochs + if is_main_process and saving: + ckpt_name = train_utils.get_epoch_ckpt_name(args.output_name, epoch + 1) + save_model(ckpt_name, accelerator.unwrap_model(transformer), global_step, epoch + 1) + + remove_epoch_no = train_utils.get_remove_epoch_no(args, epoch + 1) + if remove_epoch_no is not None: + remove_ckpt_name = train_utils.get_epoch_ckpt_name(args.output_name, remove_epoch_no) + remove_model(remove_ckpt_name) + + if args.save_state: + train_utils.save_and_remove_state_on_epoch_end(args, accelerator, epoch + 1) + + self.sample_images(accelerator, args, epoch + 1, global_step, accelerator.device, vae, transformer, sample_parameters) + optimizer_train_fn() + + # end of epoch + + if is_main_process: + transformer = accelerator.unwrap_model(transformer) + + accelerator.end_training() + optimizer_eval_fn() + + if args.save_state or args.save_state_on_train_end: + train_utils.save_state_on_train_end(args, accelerator) + + if is_main_process: + ckpt_name = train_utils.get_last_ckpt_name(args.output_name) + save_model(ckpt_name, transformer, global_step, num_train_epochs, force_sync_upload=True) + + logger.info("model saved.") + + +def setup_parser() -> argparse.ArgumentParser: + def int_or_float(value): + if value.endswith("%"): + try: + return float(value[:-1]) / 100.0 + except ValueError: + raise argparse.ArgumentTypeError(f"Value '{value}' is not a valid percentage") + try: + float_value = float(value) + if float_value >= 1 and float_value.is_integer(): + return int(value) + return float(value) + except ValueError: + raise argparse.ArgumentTypeError(f"'{value}' is not an int or float") + + parser = argparse.ArgumentParser() + + # general settings + parser.add_argument( + "--config_file", + type=str, + default=None, + help="using .toml instead of args to pass hyperparameter / ハイパーパラメータを引数ではなく.tomlファイルで渡す", + ) + parser.add_argument( + "--dataset_config", + type=pathlib.Path, + default=None, + required=True, + help="config file for dataset / データセットの設定ファイル", + ) + + # training settings + parser.add_argument( + "--sdpa", + action="store_true", + help="use sdpa for CrossAttention (requires PyTorch 2.0) / CrossAttentionにsdpaを使う(PyTorch 2.0が必要)", + ) + parser.add_argument( + "--flash_attn", + action="store_true", + help="use FlashAttention for CrossAttention, requires FlashAttention / CrossAttentionにFlashAttentionを使う、FlashAttentionが必要", + ) + parser.add_argument( + "--sage_attn", + action="store_true", + help="use SageAttention. requires SageAttention / SageAttentionを使う。SageAttentionが必要", + ) + parser.add_argument( + "--xformers", + action="store_true", + help="use xformers for CrossAttention, requires xformers / CrossAttentionにxformersを使う、xformersが必要", + ) + parser.add_argument( + "--split_attn", + action="store_true", + help="use split attention for attention calculation (split batch size=1, affects memory usage and speed)" + " / attentionを分割して計算する(バッチサイズ=1に分割、メモリ使用量と速度に影響)", + ) + + parser.add_argument("--max_train_steps", type=int, default=1600, help="training steps / 学習ステップ数") + parser.add_argument( + "--max_train_epochs", + type=int, + default=None, + help="training epochs (overrides max_train_steps) / 学習エポック数(max_train_stepsを上書きします)", + ) + parser.add_argument( + "--max_data_loader_n_workers", + type=int, + default=8, + help="max num workers for DataLoader (lower is less main RAM usage, faster epoch start and slower data loading) / DataLoaderの最大プロセス数(小さい値ではメインメモリの使用量が減りエポック間の待ち時間が減りますが、データ読み込みは遅くなります)", + ) + parser.add_argument( + "--persistent_data_loader_workers", + action="store_true", + help="persistent DataLoader workers (useful for reduce time gap between epoch, but may use more memory) / DataLoader のワーカーを持続させる (エポック間の時間差を少なくするのに有効だが、より多くのメモリを消費する可能性がある)", + ) + parser.add_argument("--seed", type=int, default=None, help="random seed for training / 学習時の乱数のseed") + parser.add_argument( + "--gradient_checkpointing", action="store_true", help="enable gradient checkpointing / gradient checkpointingを有効にする" + ) + parser.add_argument( + "--gradient_accumulation_steps", + type=int, + default=1, + help="Number of updates steps to accumulate before performing a backward/update pass / 学習時に逆伝播をする前に勾配を合計するステップ数", + ) + parser.add_argument( + "--mixed_precision", + type=str, + default="no", + choices=["no", "fp16", "bf16"], + help="use mixed precision / 混合精度を使う場合、その精度", + ) + parser.add_argument("--trainable_modules", nargs="+", default=".", help="Enter a list of trainable modules") + + parser.add_argument( + "--logging_dir", + type=str, + default=None, + help="enable logging and output TensorBoard log to this directory / ログ出力を有効にしてこのディレクトリにTensorBoard用のログを出力する", + ) + parser.add_argument( + "--log_with", + type=str, + default=None, + choices=["tensorboard", "wandb", "all"], + help="what logging tool(s) to use (if 'all', TensorBoard and WandB are both used) / ログ出力に使用するツール (allを指定するとTensorBoardとWandBの両方が使用される)", + ) + parser.add_argument( + "--log_prefix", type=str, default=None, help="add prefix for each log directory / ログディレクトリ名の先頭に追加する文字列" + ) + parser.add_argument( + "--log_tracker_name", + type=str, + default=None, + help="name of tracker to use for logging, default is script-specific default name / ログ出力に使用するtrackerの名前、省略時はスクリプトごとのデフォルト名", + ) + parser.add_argument( + "--wandb_run_name", + type=str, + default=None, + help="The name of the specific wandb session / wandb ログに表示される特定の実行の名前", + ) + parser.add_argument( + "--log_tracker_config", + type=str, + default=None, + help="path to tracker config file to use for logging / ログ出力に使用するtrackerの設定ファイルのパス", + ) + parser.add_argument( + "--wandb_api_key", + type=str, + default=None, + help="specify WandB API key to log in before starting training (optional). / WandB APIキーを指定して学習開始前にログインする(オプション)", + ) + parser.add_argument("--log_config", action="store_true", help="log training configuration / 学習設定をログに出力する") + + parser.add_argument( + "--ddp_timeout", + type=int, + default=None, + help="DDP timeout (min, None for default of accelerate) / DDPのタイムアウト(分、Noneでaccelerateのデフォルト)", + ) + parser.add_argument( + "--ddp_gradient_as_bucket_view", + action="store_true", + help="enable gradient_as_bucket_view for DDP / DDPでgradient_as_bucket_viewを有効にする", + ) + parser.add_argument( + "--ddp_static_graph", + action="store_true", + help="enable static_graph for DDP / DDPでstatic_graphを有効にする", + ) + + parser.add_argument( + "--sample_every_n_steps", + type=int, + default=None, + help="generate sample images every N steps / 学習中のモデルで指定ステップごとにサンプル出力する", + ) + parser.add_argument( + "--sample_at_first", action="store_true", help="generate sample images before training / 学習前にサンプル出力する" + ) + parser.add_argument( + "--sample_every_n_epochs", + type=int, + default=None, + help="generate sample images every N epochs (overwrites n_steps) / 学習中のモデルで指定エポックごとにサンプル出力する(ステップ数指定を上書きします)", + ) + parser.add_argument( + "--sample_prompts", + type=str, + default=None, + help="file for prompts to generate sample images / 学習中モデルのサンプル出力用プロンプトのファイル", + ) + + # optimizer and lr scheduler settings + parser.add_argument( + "--optimizer_type", + type=str, + default="", + help="Optimizer to use / オプティマイザの種類: AdamW (default), AdamW8bit, AdaFactor. " + "Also, you can use any optimizer by specifying the full path to the class, like 'torch.optim.AdamW', 'bitsandbytes.optim.AdEMAMix8bit' or 'bitsandbytes.optim.PagedAdEMAMix8bit' etc. / ", + ) + parser.add_argument( + "--optimizer_args", + type=str, + default=None, + nargs="*", + help='additional arguments for optimizer (like "weight_decay=0.01 betas=0.9,0.999 ...") / オプティマイザの追加引数(例: "weight_decay=0.01 betas=0.9,0.999 ...")', + ) + parser.add_argument("--learning_rate", type=float, default=2.0e-6, help="learning rate / 学習率") + parser.add_argument( + "--max_grad_norm", + default=1.0, + type=float, + help="Max gradient norm, 0 for no clipping / 勾配正規化の最大norm、0でclippingを行わない", + ) + + parser.add_argument( + "--lr_scheduler", + type=str, + default="constant", + help="scheduler to use for learning rate / 学習率のスケジューラ: linear, cosine, cosine_with_restarts, polynomial, constant (default), constant_with_warmup, adafactor", + ) + parser.add_argument( + "--lr_warmup_steps", + type=int_or_float, + default=0, + help="Int number of steps for the warmup in the lr scheduler (default is 0) or float with ratio of train steps" + " / 学習率のスケジューラをウォームアップするステップ数(デフォルト0)、または学習ステップの比率(1未満のfloat値の場合)", + ) + parser.add_argument( + "--lr_decay_steps", + type=int_or_float, + default=0, + help="Int number of steps for the decay in the lr scheduler (default is 0) or float (<1) with ratio of train steps" + " / 学習率のスケジューラを減衰させるステップ数(デフォルト0)、または学習ステップの比率(1未満のfloat値の場合)", + ) + parser.add_argument( + "--lr_scheduler_num_cycles", + type=int, + default=1, + help="Number of restarts for cosine scheduler with restarts / cosine with restartsスケジューラでのリスタート回数", + ) + parser.add_argument( + "--lr_scheduler_power", + type=float, + default=1, + help="Polynomial power for polynomial scheduler / polynomialスケジューラでのpolynomial power", + ) + parser.add_argument( + "--lr_scheduler_timescale", + type=int, + default=None, + help="Inverse sqrt timescale for inverse sqrt scheduler,defaults to `num_warmup_steps`" + + " / 逆平方根スケジューラのタイムスケール、デフォルトは`num_warmup_steps`", + ) + parser.add_argument( + "--lr_scheduler_min_lr_ratio", + type=float, + default=None, + help="The minimum learning rate as a ratio of the initial learning rate for cosine with min lr scheduler and warmup decay scheduler" + + " / 初期学習率の比率としての最小学習率を指定する、cosine with min lr と warmup decay スケジューラ で有効", + ) + parser.add_argument("--lr_scheduler_type", type=str, default="", help="custom scheduler module / 使用するスケジューラ") + parser.add_argument( + "--lr_scheduler_args", + type=str, + default=None, + nargs="*", + help='additional arguments for scheduler (like "T_max=100") / スケジューラの追加引数(例: "T_max100")', + ) + + # model settings + parser.add_argument("--dit", type=str, required=True, help="DiT checkpoint path / DiTのチェックポイントのパス") + parser.add_argument("--dit_dtype", type=str, default=None, help="data type for DiT, default is bfloat16") + parser.add_argument("--dit_in_channels", type=int, default=16, help="input channels for DiT, default is 16, skyreels I2V is 32") + parser.add_argument("--vae", type=str, help="VAE checkpoint path / VAEのチェックポイントのパス") + parser.add_argument("--vae_dtype", type=str, default=None, help="data type for VAE, default is float16") + parser.add_argument( + "--vae_tiling", + action="store_true", + help="enable spatial tiling for VAE, default is False. If vae_spatial_tile_sample_min_size is set, this is automatically enabled." + " / VAEの空間タイリングを有効にする、デフォルトはFalse。vae_spatial_tile_sample_min_sizeが設定されている場合、自動的に有効になります。", + ) + parser.add_argument("--vae_chunk_size", type=int, default=None, help="chunk size for CausalConv3d in VAE") + parser.add_argument( + "--vae_spatial_tile_sample_min_size", type=int, default=None, help="spatial tile sample min size for VAE, default 256" + ) + parser.add_argument("--text_encoder1", type=str, help="Text Encoder 1 directory / テキストエンコーダ1のディレクトリ") + parser.add_argument("--text_encoder2", type=str, help="Text Encoder 2 directory / テキストエンコーダ2のディレクトリ") + parser.add_argument("--text_encoder_dtype", type=str, default=None, help="data type for Text Encoder, default is float16") + parser.add_argument("--fp8_llm", action="store_true", help="use fp8 for LLM / LLMにfp8を使う") + parser.add_argument("--full_fp16", action="store_true", help="fp16 training including gradients / 勾配も含めてfp16で学習する") + parser.add_argument("--full_bf16", action="store_true", help="bf16 training including gradients / 勾配も含めてbf16で学習する") + + parser.add_argument( + "--blocks_to_swap", + type=int, + default=None, + help="number of blocks to swap in the model, max XXX / モデル内のブロックの数、最大XXX", + ) + parser.add_argument( + "--img_in_txt_in_offloading", + action="store_true", + help="offload img_in and txt_in to cpu / img_inとtxt_inをCPUにオフロードする", + ) + + # parser.add_argument("--flow_shift", type=float, default=7.0, help="Shift factor for flow matching schedulers") + parser.add_argument("--guidance_scale", type=float, default=1.0, help="Embeded classifier free guidance scale.") + parser.add_argument( + "--timestep_sampling", + choices=["sigma", "uniform", "sigmoid", "shift"], + default="sigma", + help="Method to sample timesteps: sigma-based, uniform random, sigmoid of random normal and shift of sigmoid." + " / タイムステップをサンプリングする方法:sigma、random uniform、random normalのsigmoid、sigmoidのシフト。", + ) + parser.add_argument( + "--discrete_flow_shift", + type=float, + default=1.0, + help="Discrete flow shift for the Euler Discrete Scheduler, default is 1.0. / Euler Discrete Schedulerの離散フローシフト、デフォルトは1.0。", + ) + parser.add_argument( + "--sigmoid_scale", + type=float, + default=1.0, + help='Scale factor for sigmoid timestep sampling (only used when timestep-sampling is "sigmoid" or "shift"). / sigmoidタイムステップサンプリングの倍率(timestep-samplingが"sigmoid"または"shift"の場合のみ有効)。', + ) + parser.add_argument( + "--weighting_scheme", + type=str, + default="none", + choices=["logit_normal", "mode", "cosmap", "sigma_sqrt", "none"], + help="weighting scheme for timestep distribution. Default is none" + " / タイムステップ分布の重み付けスキーム、デフォルトはnone", + ) + parser.add_argument( + "--logit_mean", + type=float, + default=0.0, + help="mean to use when using the `'logit_normal'` weighting scheme / `'logit_normal'`重み付けスキームを使用する場合の平均", + ) + parser.add_argument( + "--logit_std", + type=float, + default=1.0, + help="std to use when using the `'logit_normal'` weighting scheme / `'logit_normal'`重み付けスキームを使用する場合のstd", + ) + parser.add_argument( + "--mode_scale", + type=float, + default=1.29, + help="Scale of mode weighting scheme. Only effective when using the `'mode'` as the `weighting_scheme` / モード重み付けスキームのスケール", + ) + parser.add_argument( + "--min_timestep", + type=int, + default=None, + help="set minimum time step for training (0~999, default is 0) / 学習時のtime stepの最小値を設定する(0~999で指定、省略時はデフォルト値(0)) ", + ) + parser.add_argument( + "--max_timestep", + type=int, + default=None, + help="set maximum time step for training (1~1000, default is 1000) / 学習時のtime stepの最大値を設定する(1~1000で指定、省略時はデフォルト値(1000))", + ) + + # save and load settings + parser.add_argument( + "--output_dir", type=str, default=None, help="directory to output trained model / 学習後のモデル出力先ディレクトリ" + ) + parser.add_argument( + "--output_name", + type=str, + default=None, + required=True, + help="base name of trained model file / 学習後のモデルの拡張子を除くファイル名", + ) + parser.add_argument("--resume", type=str, default=None, help="saved state to resume training / 学習再開するモデルのstate") + + parser.add_argument( + "--save_every_n_epochs", + type=int, + default=None, + help="save checkpoint every N epochs / 学習中のモデルを指定エポックごとに保存する", + ) + parser.add_argument( + "--save_every_n_steps", + type=int, + default=None, + help="save checkpoint every N steps / 学習中のモデルを指定ステップごとに保存する", + ) + parser.add_argument( + "--save_last_n_epochs", + type=int, + default=None, + help="save last N checkpoints when saving every N epochs (remove older checkpoints) / 指定エポックごとにモデルを保存するとき最大Nエポック保存する(古いチェックポイントは削除する)", + ) + parser.add_argument( + "--save_last_n_epochs_state", + type=int, + default=None, + help="save last N checkpoints of state (overrides the value of --save_last_n_epochs)/ 最大Nエポックstateを保存する(--save_last_n_epochsの指定を上書きする)", + ) + parser.add_argument( + "--save_last_n_steps", + type=int, + default=None, + help="save checkpoints until N steps elapsed (remove older checkpoints if N steps elapsed) / 指定ステップごとにモデルを保存するとき、このステップ数経過するまで保存する(このステップ数経過したら削除する)", + ) + parser.add_argument( + "--save_last_n_steps_state", + type=int, + default=None, + help="save states until N steps elapsed (remove older states if N steps elapsed, overrides --save_last_n_steps) / 指定ステップごとにstateを保存するとき、このステップ数経過するまで保存する(このステップ数経過したら削除する。--save_last_n_stepsを上書きする)", + ) + parser.add_argument( + "--save_state", + action="store_true", + help="save training state additionally (including optimizer states etc.) when saving model / optimizerなど学習状態も含めたstateをモデル保存時に追加で保存する", + ) + parser.add_argument( + "--save_state_on_train_end", + action="store_true", + help="save training state (including optimizer states etc.) on train end even if --save_state is not specified" + " / --save_stateが未指定時にもoptimizerなど学習状態も含めたstateを学習終了時に保存する", + ) + + # SAI Model spec + parser.add_argument( + "--metadata_title", + type=str, + default=None, + help="title for model metadata (default is output_name) / メタデータに書き込まれるモデルタイトル、省略時はoutput_name", + ) + parser.add_argument( + "--metadata_author", + type=str, + default=None, + help="author name for model metadata / メタデータに書き込まれるモデル作者名", + ) + parser.add_argument( + "--metadata_description", + type=str, + default=None, + help="description for model metadata / メタデータに書き込まれるモデル説明", + ) + parser.add_argument( + "--metadata_license", + type=str, + default=None, + help="license for model metadata / メタデータに書き込まれるモデルライセンス", + ) + parser.add_argument( + "--metadata_tags", + type=str, + default=None, + help="tags for model metadata, separated by comma / メタデータに書き込まれるモデルタグ、カンマ区切り", + ) + + # huggingface settings + parser.add_argument( + "--huggingface_repo_id", + type=str, + default=None, + help="huggingface repo name to upload / huggingfaceにアップロードするリポジトリ名", + ) + parser.add_argument( + "--huggingface_repo_type", + type=str, + default=None, + help="huggingface repo type to upload / huggingfaceにアップロードするリポジトリの種類", + ) + parser.add_argument( + "--huggingface_path_in_repo", + type=str, + default=None, + help="huggingface model path to upload files / huggingfaceにアップロードするファイルのパス", + ) + parser.add_argument("--huggingface_token", type=str, default=None, help="huggingface token / huggingfaceのトークン") + parser.add_argument( + "--huggingface_repo_visibility", + type=str, + default=None, + help="huggingface repository visibility ('public' for public, 'private' or None for private) / huggingfaceにアップロードするリポジトリの公開設定('public'で公開、'private'またはNoneで非公開)", + ) + parser.add_argument( + "--save_state_to_huggingface", action="store_true", help="save state to huggingface / huggingfaceにstateを保存する" + ) + parser.add_argument( + "--resume_from_huggingface", + action="store_true", + help="resume from huggingface (ex: --resume {repo_id}/{path_in_repo}:{revision}:{repo_type}) / huggingfaceから学習を再開する(例: --resume {repo_id}/{path_in_repo}:{revision}:{repo_type})", + ) + parser.add_argument( + "--async_upload", + action="store_true", + help="upload to huggingface asynchronously / huggingfaceに非同期でアップロードする", + ) + + return parser + + +def read_config_from_file(args: argparse.Namespace, parser: argparse.ArgumentParser): + if not args.config_file: + return args + + config_path = args.config_file + ".toml" if not args.config_file.endswith(".toml") else args.config_file + + if not os.path.exists(config_path): + logger.info(f"{config_path} not found.") + exit(1) + + logger.info(f"Loading settings from {config_path}...") + with open(config_path, "r", encoding="utf-8") as f: + config_dict = toml.load(f) + + # combine all sections into one + ignore_nesting_dict = {} + for section_name, section_dict in config_dict.items(): + # if value is not dict, save key and value as is + if not isinstance(section_dict, dict): + ignore_nesting_dict[section_name] = section_dict + continue + + # if value is dict, save all key and value into one dict + for key, value in section_dict.items(): + ignore_nesting_dict[key] = value + + config_args = argparse.Namespace(**ignore_nesting_dict) + args = parser.parse_args(namespace=config_args) + args.config_file = os.path.splitext(args.config_file)[0] + logger.info(args.config_file) + + return args + + +def main(): + parser = setup_parser() + + args = parser.parse_args() + args = read_config_from_file(args, parser) + + trainer = FineTuningTrainer() + trainer.train(args) + + +if __name__ == "__main__": + main() diff --git a/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/hv_train_network.py b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/hv_train_network.py new file mode 100644 index 0000000000000000000000000000000000000000..d0a37c5719d36c631e673eb309ef3f14cf59b8ea --- /dev/null +++ b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/hv_train_network.py @@ -0,0 +1,2706 @@ +import ast +import asyncio +from datetime import timedelta +import gc +import importlib +import argparse +import math +import os +import pathlib +import re +import sys +import random +import time +import json +from multiprocessing import Value +from typing import Any, Dict, List, Optional +import accelerate +import numpy as np +from packaging.version import Version +from PIL import Image + +import huggingface_hub +import toml + +import torch +from tqdm import tqdm +from accelerate.utils import TorchDynamoPlugin, set_seed, DynamoBackend +from accelerate import Accelerator, InitProcessGroupKwargs, DistributedDataParallelKwargs, PartialState +from safetensors.torch import load_file +import transformers +from diffusers.optimization import ( + SchedulerType as DiffusersSchedulerType, + TYPE_TO_SCHEDULER_FUNCTION as DIFFUSERS_TYPE_TO_SCHEDULER_FUNCTION, +) +from transformers.optimization import SchedulerType, TYPE_TO_SCHEDULER_FUNCTION + +from musubi_tuner.dataset import config_utils +from musubi_tuner.hunyuan_model.models import load_transformer, get_rotary_pos_embed_by_shape, HYVideoDiffusionTransformer +import musubi_tuner.hunyuan_model.text_encoder as text_encoder_module +from musubi_tuner.hunyuan_model.vae import load_vae, VAE_VER +import musubi_tuner.hunyuan_model.vae as vae_module +from musubi_tuner.modules.scheduling_flow_match_discrete import FlowMatchDiscreteScheduler +import musubi_tuner.networks.lora as lora_module +from musubi_tuner.dataset.config_utils import BlueprintGenerator, ConfigSanitizer +from musubi_tuner.dataset.image_video_dataset import ARCHITECTURE_HUNYUAN_VIDEO, ARCHITECTURE_HUNYUAN_VIDEO_FULL +from musubi_tuner.hv_generate_video import save_images_grid, save_videos_grid, resize_image_to_bucket, encode_to_latents + +import logging + +from musubi_tuner.utils import huggingface_utils, model_utils, train_utils, sai_model_spec + +logger = logging.getLogger(__name__) +logging.basicConfig(level=logging.INFO) + + +SS_METADATA_KEY_BASE_MODEL_VERSION = "ss_base_model_version" +SS_METADATA_KEY_NETWORK_MODULE = "ss_network_module" +SS_METADATA_KEY_NETWORK_DIM = "ss_network_dim" +SS_METADATA_KEY_NETWORK_ALPHA = "ss_network_alpha" +SS_METADATA_KEY_NETWORK_ARGS = "ss_network_args" + +SS_METADATA_MINIMUM_KEYS = [ + SS_METADATA_KEY_BASE_MODEL_VERSION, + SS_METADATA_KEY_NETWORK_MODULE, + SS_METADATA_KEY_NETWORK_DIM, + SS_METADATA_KEY_NETWORK_ALPHA, + SS_METADATA_KEY_NETWORK_ARGS, +] + + +def clean_memory_on_device(device: torch.device): + r""" + Clean memory on the specified device, will be called from training scripts. + """ + gc.collect() + + # device may "cuda" or "cuda:0", so we need to check the type of device + if device.type == "cuda": + torch.cuda.empty_cache() + if device.type == "xpu": + torch.xpu.empty_cache() + if device.type == "mps": + torch.mps.empty_cache() + + +# for collate_fn: epoch and step is multiprocessing.Value +class collator_class: + def __init__(self, epoch, step, dataset): + self.current_epoch = epoch + self.current_step = step + self.dataset = dataset # not used if worker_info is not None, in case of multiprocessing + + def __call__(self, examples): + worker_info = torch.utils.data.get_worker_info() + # worker_info is None in the main process + if worker_info is not None: + dataset = worker_info.dataset + else: + dataset = self.dataset + + # set epoch and step + dataset.set_current_epoch(self.current_epoch.value) + dataset.set_current_step(self.current_step.value) + return examples[0] + + +def prepare_accelerator(args: argparse.Namespace) -> Accelerator: + """ + DeepSpeed is not supported in this script currently. + """ + if args.logging_dir is None: + logging_dir = None + else: + log_prefix = "" if args.log_prefix is None else args.log_prefix + logging_dir = args.logging_dir + "/" + log_prefix + time.strftime("%Y%m%d%H%M%S", time.localtime()) + + if args.log_with is None: + if logging_dir is not None: + log_with = "tensorboard" + else: + log_with = None + else: + log_with = args.log_with + if log_with in ["tensorboard", "all"]: + if logging_dir is None: + raise ValueError( + "logging_dir is required when log_with is tensorboard / Tensorboardを使う場合、logging_dirを指定してください" + ) + if log_with in ["wandb", "all"]: + try: + import wandb + except ImportError: + raise ImportError("No wandb / wandb がインストールされていないようです") + if logging_dir is not None: + os.makedirs(logging_dir, exist_ok=True) + os.environ["WANDB_DIR"] = logging_dir + if args.wandb_api_key is not None: + wandb.login(key=args.wandb_api_key) + + kwargs_handlers = [ + ( + InitProcessGroupKwargs( + backend="gloo" if os.name == "nt" or not torch.cuda.is_available() else "nccl", + init_method=( + "env://?use_libuv=False" if os.name == "nt" and Version(torch.__version__) >= Version("2.4.0") else None + ), + timeout=timedelta(minutes=args.ddp_timeout) if args.ddp_timeout else None, + ) + if torch.cuda.device_count() > 1 + else None + ), + ( + DistributedDataParallelKwargs( + gradient_as_bucket_view=args.ddp_gradient_as_bucket_view, static_graph=args.ddp_static_graph + ) + if args.ddp_gradient_as_bucket_view or args.ddp_static_graph + else None + ), + ] + kwargs_handlers = [i for i in kwargs_handlers if i is not None] + + dynamo_plugin = None + if args.dynamo_backend.upper() != "NO": + dynamo_plugin = TorchDynamoPlugin( + backend=DynamoBackend(args.dynamo_backend.upper()), + mode=args.dynamo_mode, + fullgraph=args.dynamo_fullgraph, + dynamic=args.dynamo_dynamic, + ) + + accelerator = Accelerator( + gradient_accumulation_steps=args.gradient_accumulation_steps, + mixed_precision=args.mixed_precision, + log_with=log_with, + project_dir=logging_dir, + dynamo_plugin=dynamo_plugin, + kwargs_handlers=kwargs_handlers, + ) + print("accelerator device:", accelerator.device) + return accelerator + + +def line_to_prompt_dict(line: str) -> dict: + # subset of gen_img_diffusers + prompt_args = line.split(" --") + prompt_dict = {} + prompt_dict["prompt"] = prompt_args[0] + + for parg in prompt_args: + try: + m = re.match(r"w (\d+)", parg, re.IGNORECASE) + if m: + prompt_dict["width"] = int(m.group(1)) + continue + + m = re.match(r"h (\d+)", parg, re.IGNORECASE) + if m: + prompt_dict["height"] = int(m.group(1)) + continue + + m = re.match(r"f (\d+)", parg, re.IGNORECASE) + if m: + prompt_dict["frame_count"] = int(m.group(1)) + continue + + m = re.match(r"d (\d+)", parg, re.IGNORECASE) + if m: + prompt_dict["seed"] = int(m.group(1)) + continue + + m = re.match(r"s (\d+)", parg, re.IGNORECASE) + if m: # steps + prompt_dict["sample_steps"] = max(1, min(1000, int(m.group(1)))) + continue + + m = re.match(r"g ([\d\.]+)", parg, re.IGNORECASE) + if m: # scale + prompt_dict["guidance_scale"] = float(m.group(1)) + continue + + m = re.match(r"fs ([\d\.]+)", parg, re.IGNORECASE) + if m: # scale + prompt_dict["discrete_flow_shift"] = float(m.group(1)) + continue + + m = re.match(r"l ([\d\.]+)", parg, re.IGNORECASE) + if m: # scale + prompt_dict["cfg_scale"] = float(m.group(1)) + continue + + m = re.match(r"n (.+)", parg, re.IGNORECASE) + if m: # negative prompt + prompt_dict["negative_prompt"] = m.group(1) + continue + + m = re.match(r"i (.+)", parg, re.IGNORECASE) + if m: # image path + prompt_dict["image_path"] = m.group(1) + continue + + m = re.match(r"ei (.+)", parg, re.IGNORECASE) + if m: # end image path + prompt_dict["end_image_path"] = m.group(1) + continue + + m = re.match(r"cn (.+)", parg, re.IGNORECASE) + if m: + prompt_dict["control_video_path"] = m.group(1) + continue + + m = re.match(r"ci (.+)", parg, re.IGNORECASE) + if m: + # can be multiple control images + control_image_path = m.group(1) + if "control_image_path" not in prompt_dict: + prompt_dict["control_image_path"] = [] + prompt_dict["control_image_path"].append(control_image_path) + continue + + m = re.match(r"of (.+)", parg, re.IGNORECASE) + if m: # output folder + prompt_dict["one_frame"] = m.group(1) + continue + + except ValueError as ex: + logger.error(f"Exception in parsing / 解析エラー: {parg}") + logger.error(ex) + + return prompt_dict + + +def load_prompts(prompt_file: str) -> list[Dict]: + # read prompts + if prompt_file.endswith(".txt"): + with open(prompt_file, "r", encoding="utf-8") as f: + lines = f.readlines() + prompts = [line.strip() for line in lines if len(line.strip()) > 0 and line[0] != "#"] + elif prompt_file.endswith(".toml"): + with open(prompt_file, "r", encoding="utf-8") as f: + data = toml.load(f) + prompts = [dict(**data["prompt"], **subset) for subset in data["prompt"]["subset"]] + elif prompt_file.endswith(".json"): + with open(prompt_file, "r", encoding="utf-8") as f: + prompts = json.load(f) + + # preprocess prompts + for i in range(len(prompts)): + prompt_dict = prompts[i] + if isinstance(prompt_dict, str): + prompt_dict = line_to_prompt_dict(prompt_dict) + prompts[i] = prompt_dict + assert isinstance(prompt_dict, dict) + + # Adds an enumerator to the dict based on prompt position. Used later to name image files. Also cleanup of extra data in original prompt dict. + prompt_dict["enum"] = i + prompt_dict.pop("subset", None) + + return prompts + + +def compute_density_for_timestep_sampling( + weighting_scheme: str, batch_size: int, logit_mean: float = None, logit_std: float = None, mode_scale: float = None +): + """Compute the density for sampling the timesteps when doing SD3 training. + + Courtesy: This was contributed by Rafie Walker in https://github.com/huggingface/diffusers/pull/8528. + + SD3 paper reference: https://arxiv.org/abs/2403.03206v1. + """ + if weighting_scheme == "logit_normal": + # See 3.1 in the SD3 paper ($rf/lognorm(0.00,1.00)$). + u = torch.normal(mean=logit_mean, std=logit_std, size=(batch_size,), device="cpu") + u = torch.nn.functional.sigmoid(u) + elif weighting_scheme == "mode": + u = torch.rand(size=(batch_size,), device="cpu") + u = 1 - u - mode_scale * (torch.cos(math.pi * u / 2) ** 2 - 1 + u) + else: + u = torch.rand(size=(batch_size,), device="cpu") + return u + + +def get_sigmas(noise_scheduler, timesteps, device, n_dim=4, dtype=torch.float32): + sigmas = noise_scheduler.sigmas.to(device=device, dtype=dtype) + schedule_timesteps = noise_scheduler.timesteps.to(device) + timesteps = timesteps.to(device) + + # if sum([(schedule_timesteps == t) for t in timesteps]) < len(timesteps): + if any([(schedule_timesteps == t).sum() == 0 for t in timesteps]): + # raise ValueError("Some timesteps are not in the schedule / 一部のtimestepsがスケジュールに含まれていません") + # round to nearest timestep + logger.warning("Some timesteps are not in the schedule / 一部のtimestepsがスケジュールに含まれていません") + step_indices = [torch.argmin(torch.abs(schedule_timesteps - t)).item() for t in timesteps] + else: + step_indices = [(schedule_timesteps == t).nonzero().item() for t in timesteps] + + sigma = sigmas[step_indices].flatten() + while len(sigma.shape) < n_dim: + sigma = sigma.unsqueeze(-1) + return sigma + + +def compute_loss_weighting_for_sd3(weighting_scheme: str, noise_scheduler, timesteps, device, dtype): + """Computes loss weighting scheme for SD3 training. + + Courtesy: This was contributed by Rafie Walker in https://github.com/huggingface/diffusers/pull/8528. + + SD3 paper reference: https://arxiv.org/abs/2403.03206v1. + """ + if weighting_scheme == "sigma_sqrt" or weighting_scheme == "cosmap": + sigmas = get_sigmas(noise_scheduler, timesteps, device, n_dim=5, dtype=dtype) + if weighting_scheme == "sigma_sqrt": + weighting = (sigmas**-2.0).float() + else: + bot = 1 - 2 * sigmas + 2 * sigmas**2 + weighting = 2 / (math.pi * bot) + else: + weighting = None # torch.ones_like(sigmas) + return weighting + + +def should_sample_images(args, steps, epoch=None): + if steps == 0: + if not args.sample_at_first: + return False + else: + should_sample_by_steps = args.sample_every_n_steps is not None and steps % args.sample_every_n_steps == 0 + should_sample_by_epochs = ( + args.sample_every_n_epochs is not None and epoch is not None and epoch % args.sample_every_n_epochs == 0 + ) + if not should_sample_by_steps and not should_sample_by_epochs: + return False + return True + + +class NetworkTrainer: + def __init__(self): + self.blocks_to_swap = None + + # TODO 他のスクリプトと共通化する + def generate_step_logs( + self, + args: argparse.Namespace, + current_loss, + avr_loss, + lr_scheduler, + lr_descriptions, + optimizer=None, + keys_scaled=None, + mean_norm=None, + maximum_norm=None, + ): + network_train_unet_only = True + logs = {"loss/current": current_loss, "loss/average": avr_loss} + + if keys_scaled is not None: + logs["max_norm/keys_scaled"] = keys_scaled + logs["max_norm/average_key_norm"] = mean_norm + logs["max_norm/max_key_norm"] = maximum_norm + + lrs = lr_scheduler.get_last_lr() + for i, lr in enumerate(lrs): + if lr_descriptions is not None: + lr_desc = lr_descriptions[i] + else: + idx = i - (0 if network_train_unet_only else -1) + if idx == -1: + lr_desc = "textencoder" + else: + if len(lrs) > 2: + lr_desc = f"group{idx}" + else: + lr_desc = "unet" + + logs[f"lr/{lr_desc}"] = lr + + if args.optimizer_type.lower().startswith("DAdapt".lower()) or args.optimizer_type.lower() == "Prodigy".lower(): + # tracking d*lr value + logs[f"lr/d*lr/{lr_desc}"] = ( + lr_scheduler.optimizers[-1].param_groups[i]["d"] * lr_scheduler.optimizers[-1].param_groups[i]["lr"] + ) + if ( + args.optimizer_type.lower().endswith("ProdigyPlusScheduleFree".lower()) and optimizer is not None + ): # tracking d*lr value of unet. + logs["lr/d*lr"] = optimizer.param_groups[0]["d"] * optimizer.param_groups[0]["lr"] + else: + idx = 0 + if not network_train_unet_only: + logs["lr/textencoder"] = float(lrs[0]) + idx = 1 + + for i in range(idx, len(lrs)): + logs[f"lr/group{i}"] = float(lrs[i]) + if args.optimizer_type.lower().startswith("DAdapt".lower()) or args.optimizer_type.lower() == "Prodigy".lower(): + logs[f"lr/d*lr/group{i}"] = ( + lr_scheduler.optimizers[-1].param_groups[i]["d"] * lr_scheduler.optimizers[-1].param_groups[i]["lr"] + ) + if args.optimizer_type.lower().endswith("ProdigyPlusScheduleFree".lower()) and optimizer is not None: + logs[f"lr/d*lr/group{i}"] = optimizer.param_groups[i]["d"] * optimizer.param_groups[i]["lr"] + + return logs + + def get_optimizer(self, args, trainable_params: list[torch.nn.Parameter]) -> tuple[str, str, torch.optim.Optimizer]: + # adamw, adamw8bit, adafactor + + optimizer_type = args.optimizer_type.lower() + + # split optimizer_type and optimizer_args + optimizer_kwargs = {} + if args.optimizer_args is not None and len(args.optimizer_args) > 0: + for arg in args.optimizer_args: + key, value = arg.split("=") + value = ast.literal_eval(value) + optimizer_kwargs[key] = value + + lr = args.learning_rate + optimizer = None + optimizer_class = None + + if optimizer_type.endswith("8bit".lower()): + try: + import bitsandbytes as bnb + except ImportError: + raise ImportError("No bitsandbytes / bitsandbytesがインストールされていないようです") + + if optimizer_type == "AdamW8bit".lower(): + logger.info(f"use 8-bit AdamW optimizer | {optimizer_kwargs}") + optimizer_class = bnb.optim.AdamW8bit + optimizer = optimizer_class(trainable_params, lr=lr, **optimizer_kwargs) + + elif optimizer_type == "Adafactor".lower(): + # Adafactor: check relative_step and warmup_init + if "relative_step" not in optimizer_kwargs: + optimizer_kwargs["relative_step"] = True # default + if not optimizer_kwargs["relative_step"] and optimizer_kwargs.get("warmup_init", False): + logger.info( + f"set relative_step to True because warmup_init is True / warmup_initがTrueのためrelative_stepをTrueにします" + ) + optimizer_kwargs["relative_step"] = True + logger.info(f"use Adafactor optimizer | {optimizer_kwargs}") + + if optimizer_kwargs["relative_step"]: + logger.info(f"relative_step is true / relative_stepがtrueです") + if lr != 0.0: + logger.warning(f"learning rate is used as initial_lr / 指定したlearning rateはinitial_lrとして使用されます") + args.learning_rate = None + + if args.lr_scheduler != "adafactor": + logger.info(f"use adafactor_scheduler / スケジューラにadafactor_schedulerを使用します") + args.lr_scheduler = f"adafactor:{lr}" # ちょっと微妙だけど + + lr = None + else: + if args.max_grad_norm != 0.0: + logger.warning( + f"because max_grad_norm is set, clip_grad_norm is enabled. consider set to 0 / max_grad_normが設定されているためclip_grad_normが有効になります。0に設定して無効にしたほうがいいかもしれません" + ) + if args.lr_scheduler != "constant_with_warmup": + logger.warning(f"constant_with_warmup will be good / スケジューラはconstant_with_warmupが良いかもしれません") + if optimizer_kwargs.get("clip_threshold", 1.0) != 1.0: + logger.warning(f"clip_threshold=1.0 will be good / clip_thresholdは1.0が良いかもしれません") + + optimizer_class = transformers.optimization.Adafactor + optimizer = optimizer_class(trainable_params, lr=lr, **optimizer_kwargs) + + elif optimizer_type == "AdamW".lower(): + logger.info(f"use AdamW optimizer | {optimizer_kwargs}") + optimizer_class = torch.optim.AdamW + optimizer = optimizer_class(trainable_params, lr=lr, **optimizer_kwargs) + + if optimizer is None: + # 任意のoptimizerを使う + case_sensitive_optimizer_type = args.optimizer_type # not lower + logger.info(f"use {case_sensitive_optimizer_type} | {optimizer_kwargs}") + + if "." not in case_sensitive_optimizer_type: # from torch.optim + optimizer_module = torch.optim + else: # from other library + values = case_sensitive_optimizer_type.split(".") + optimizer_module = importlib.import_module(".".join(values[:-1])) + case_sensitive_optimizer_type = values[-1] + + optimizer_class = getattr(optimizer_module, case_sensitive_optimizer_type) + optimizer = optimizer_class(trainable_params, lr=lr, **optimizer_kwargs) + + # for logging + optimizer_name = optimizer_class.__module__ + "." + optimizer_class.__name__ + optimizer_args = ",".join([f"{k}={v}" for k, v in optimizer_kwargs.items()]) + + # get train and eval functions + if hasattr(optimizer, "train") and callable(optimizer.train): + train_fn = optimizer.train + eval_fn = optimizer.eval + else: + train_fn = lambda: None + eval_fn = lambda: None + + return optimizer_name, optimizer_args, optimizer, train_fn, eval_fn + + def is_schedulefree_optimizer(self, optimizer: torch.optim.Optimizer, args: argparse.Namespace) -> bool: + return args.optimizer_type.lower().endswith("schedulefree".lower()) # or args.optimizer_schedulefree_wrapper + + def get_dummy_scheduler(optimizer: torch.optim.Optimizer) -> Any: + # dummy scheduler for schedulefree optimizer. supports only empty step(), get_last_lr() and optimizers. + # this scheduler is used for logging only. + # this isn't be wrapped by accelerator because of this class is not a subclass of torch.optim.lr_scheduler._LRScheduler + class DummyScheduler: + def __init__(self, optimizer: torch.optim.Optimizer): + self.optimizer = optimizer + + def step(self): + pass + + def get_last_lr(self): + return [group["lr"] for group in self.optimizer.param_groups] + + return DummyScheduler(optimizer) + + def get_lr_scheduler(self, args, optimizer: torch.optim.Optimizer, num_processes: int): + """ + Unified API to get any scheduler from its name. + """ + # if schedulefree optimizer, return dummy scheduler + if self.is_schedulefree_optimizer(optimizer, args): + return self.get_dummy_scheduler(optimizer) + + name = args.lr_scheduler + num_training_steps = args.max_train_steps * num_processes # * args.gradient_accumulation_steps + num_warmup_steps: Optional[int] = ( + int(args.lr_warmup_steps * num_training_steps) if isinstance(args.lr_warmup_steps, float) else args.lr_warmup_steps + ) + num_decay_steps: Optional[int] = ( + int(args.lr_decay_steps * num_training_steps) if isinstance(args.lr_decay_steps, float) else args.lr_decay_steps + ) + num_stable_steps = num_training_steps - num_warmup_steps - num_decay_steps + num_cycles = args.lr_scheduler_num_cycles + power = args.lr_scheduler_power + timescale = args.lr_scheduler_timescale + min_lr_ratio = args.lr_scheduler_min_lr_ratio + + lr_scheduler_kwargs = {} # get custom lr_scheduler kwargs + if args.lr_scheduler_args is not None and len(args.lr_scheduler_args) > 0: + for arg in args.lr_scheduler_args: + key, value = arg.split("=") + value = ast.literal_eval(value) + lr_scheduler_kwargs[key] = value + + def wrap_check_needless_num_warmup_steps(return_vals): + if num_warmup_steps is not None and num_warmup_steps != 0: + raise ValueError(f"{name} does not require `num_warmup_steps`. Set None or 0.") + return return_vals + + # using any lr_scheduler from other library + if args.lr_scheduler_type: + lr_scheduler_type = args.lr_scheduler_type + logger.info(f"use {lr_scheduler_type} | {lr_scheduler_kwargs} as lr_scheduler") + if "." not in lr_scheduler_type: # default to use torch.optim + lr_scheduler_module = torch.optim.lr_scheduler + else: + values = lr_scheduler_type.split(".") + lr_scheduler_module = importlib.import_module(".".join(values[:-1])) + lr_scheduler_type = values[-1] + lr_scheduler_class = getattr(lr_scheduler_module, lr_scheduler_type) + lr_scheduler = lr_scheduler_class(optimizer, **lr_scheduler_kwargs) + return lr_scheduler + + if name.startswith("adafactor"): + assert ( + type(optimizer) == transformers.optimization.Adafactor + ), f"adafactor scheduler must be used with Adafactor optimizer / adafactor schedulerはAdafactorオプティマイザと同時に使ってください" + initial_lr = float(name.split(":")[1]) + # logger.info(f"adafactor scheduler init lr {initial_lr}") + return wrap_check_needless_num_warmup_steps(transformers.optimization.AdafactorSchedule(optimizer, initial_lr)) + + if name == DiffusersSchedulerType.PIECEWISE_CONSTANT.value: + name = DiffusersSchedulerType(name) + schedule_func = DIFFUSERS_TYPE_TO_SCHEDULER_FUNCTION[name] + return schedule_func(optimizer, **lr_scheduler_kwargs) # step_rules and last_epoch are given as kwargs + + name = SchedulerType(name) + schedule_func = TYPE_TO_SCHEDULER_FUNCTION[name] + + if name == SchedulerType.CONSTANT: + return wrap_check_needless_num_warmup_steps(schedule_func(optimizer, **lr_scheduler_kwargs)) + + # All other schedulers require `num_warmup_steps` + if num_warmup_steps is None: + raise ValueError(f"{name} requires `num_warmup_steps`, please provide that argument.") + + if name == SchedulerType.CONSTANT_WITH_WARMUP: + return schedule_func(optimizer, num_warmup_steps=num_warmup_steps, **lr_scheduler_kwargs) + + if name == SchedulerType.INVERSE_SQRT: + return schedule_func(optimizer, num_warmup_steps=num_warmup_steps, timescale=timescale, **lr_scheduler_kwargs) + + # All other schedulers require `num_training_steps` + if num_training_steps is None: + raise ValueError(f"{name} requires `num_training_steps`, please provide that argument.") + + if name == SchedulerType.COSINE_WITH_RESTARTS: + return schedule_func( + optimizer, + num_warmup_steps=num_warmup_steps, + num_training_steps=num_training_steps, + num_cycles=num_cycles, + **lr_scheduler_kwargs, + ) + + if name == SchedulerType.POLYNOMIAL: + return schedule_func( + optimizer, + num_warmup_steps=num_warmup_steps, + num_training_steps=num_training_steps, + power=power, + **lr_scheduler_kwargs, + ) + + if name == SchedulerType.COSINE_WITH_MIN_LR: + return schedule_func( + optimizer, + num_warmup_steps=num_warmup_steps, + num_training_steps=num_training_steps, + num_cycles=num_cycles / 2, + min_lr_rate=min_lr_ratio, + **lr_scheduler_kwargs, + ) + + # these schedulers do not require `num_decay_steps` + if name == SchedulerType.LINEAR or name == SchedulerType.COSINE: + return schedule_func( + optimizer, + num_warmup_steps=num_warmup_steps, + num_training_steps=num_training_steps, + **lr_scheduler_kwargs, + ) + + # All other schedulers require `num_decay_steps` + if num_decay_steps is None: + raise ValueError(f"{name} requires `num_decay_steps`, please provide that argument.") + if name == SchedulerType.WARMUP_STABLE_DECAY: + return schedule_func( + optimizer, + num_warmup_steps=num_warmup_steps, + num_stable_steps=num_stable_steps, + num_decay_steps=num_decay_steps, + num_cycles=num_cycles / 2, + min_lr_ratio=min_lr_ratio if min_lr_ratio is not None else 0.0, + **lr_scheduler_kwargs, + ) + + return schedule_func( + optimizer, + num_warmup_steps=num_warmup_steps, + num_training_steps=num_training_steps, + num_decay_steps=num_decay_steps, + **lr_scheduler_kwargs, + ) + + def resume_from_local_or_hf_if_specified(self, accelerator: Accelerator, args: argparse.Namespace) -> bool: + if not args.resume: + return False + + if not args.resume_from_huggingface: + logger.info(f"resume training from local state: {args.resume}") + accelerator.load_state(args.resume) + return True + + logger.info(f"resume training from huggingface state: {args.resume}") + repo_id = args.resume.split("/")[0] + "/" + args.resume.split("/")[1] + path_in_repo = "/".join(args.resume.split("/")[2:]) + revision = None + repo_type = None + if ":" in path_in_repo: + divided = path_in_repo.split(":") + if len(divided) == 2: + path_in_repo, revision = divided + repo_type = "model" + else: + path_in_repo, revision, repo_type = divided + logger.info(f"Downloading state from huggingface: {repo_id}/{path_in_repo}@{revision}") + + list_files = huggingface_utils.list_dir( + repo_id=repo_id, + subfolder=path_in_repo, + revision=revision, + token=args.huggingface_token, + repo_type=repo_type, + ) + + async def download(filename) -> str: + def task(): + return huggingface_hub.hf_hub_download( + repo_id=repo_id, + filename=filename, + revision=revision, + repo_type=repo_type, + token=args.huggingface_token, + ) + + return await asyncio.get_event_loop().run_in_executor(None, task) + + loop = asyncio.get_event_loop() + results = loop.run_until_complete(asyncio.gather(*[download(filename=filename.rfilename) for filename in list_files])) + if len(results) == 0: + raise ValueError( + "No files found in the specified repo id/path/revision / 指定されたリポジトリID/パス/リビジョンにファイルが見つかりませんでした" + ) + dirname = os.path.dirname(results[0]) + accelerator.load_state(dirname) + + return True + + def get_noisy_model_input_and_timesteps( + self, + args: argparse.Namespace, + noise: torch.Tensor, + latents: torch.Tensor, + noise_scheduler: FlowMatchDiscreteScheduler, + device: torch.device, + dtype: torch.dtype, + ): + batch_size = noise.shape[0] + + if args.timestep_sampling == "uniform" or args.timestep_sampling == "sigmoid" or args.timestep_sampling == "shift": + if args.timestep_sampling == "uniform" or args.timestep_sampling == "sigmoid": + # Simple random t-based noise sampling + if args.timestep_sampling == "sigmoid": + t = torch.sigmoid(args.sigmoid_scale * torch.randn((batch_size,), device=device)) + else: + t = torch.rand((batch_size,), device=device) + + elif args.timestep_sampling == "shift": + shift = args.discrete_flow_shift + logits_norm = torch.randn(batch_size, device=device) + logits_norm = logits_norm * args.sigmoid_scale # larger scale for more uniform sampling + t = logits_norm.sigmoid() + t = (t * shift) / (1 + (shift - 1) * t) + + t_min = args.min_timestep if args.min_timestep is not None else 0 + t_max = args.max_timestep if args.max_timestep is not None else 1000.0 + t_min /= 1000.0 + t_max /= 1000.0 + t = t * (t_max - t_min) + t_min # scale to [t_min, t_max], default [0, 1] + + timesteps = t * 1000.0 + t = t.view(-1, 1, 1, 1, 1) + noisy_model_input = (1 - t) * latents + t * noise + + timesteps += 1 # 1 to 1000 + else: + # Sample a random timestep for each image + # for weighting schemes where we sample timesteps non-uniformly + u = compute_density_for_timestep_sampling( + weighting_scheme=args.weighting_scheme, + batch_size=batch_size, + logit_mean=args.logit_mean, + logit_std=args.logit_std, + mode_scale=args.mode_scale, + ) + # indices = (u * noise_scheduler.config.num_train_timesteps).long() + t_min = args.min_timestep if args.min_timestep is not None else 0 + t_max = args.max_timestep if args.max_timestep is not None else 1000 + indices = (u * (t_max - t_min) + t_min).long() + + timesteps = noise_scheduler.timesteps[indices].to(device=device) # 1 to 1000 + + # Add noise according to flow matching. + sigmas = get_sigmas(noise_scheduler, timesteps, device, n_dim=latents.ndim, dtype=dtype) + noisy_model_input = sigmas * noise + (1.0 - sigmas) * latents + + return noisy_model_input, timesteps + + def show_timesteps(self, args: argparse.Namespace): + N_TRY = 100000 + BATCH_SIZE = 1000 + CONSOLE_WIDTH = 64 + N_TIMESTEPS_PER_LINE = 25 + + noise_scheduler = FlowMatchDiscreteScheduler(shift=args.discrete_flow_shift, reverse=True, solver="euler") + # print(f"Noise scheduler timesteps: {noise_scheduler.timesteps}") + + latents = torch.zeros(BATCH_SIZE, 1, 1, 1, 1, dtype=torch.float16) + noise = torch.ones_like(latents) + + # sample timesteps + sampled_timesteps = [0] * noise_scheduler.config.num_train_timesteps + for i in tqdm(range(N_TRY // BATCH_SIZE)): + # we use noise=1, so retured noisy_model_input is same as timestep, because `noisy_model_input = (1 - t) * latents + t * noise` + actual_timesteps, _ = self.get_noisy_model_input_and_timesteps( + args, noise, latents, noise_scheduler, "cpu", torch.float16 + ) + actual_timesteps = actual_timesteps[:, 0, 0, 0, 0] * 1000 + for t in actual_timesteps: + t = int(t.item()) + sampled_timesteps[t] += 1 + + # sample weighting + sampled_weighting = [0] * noise_scheduler.config.num_train_timesteps + for i in tqdm(range(len(sampled_weighting))): + timesteps = torch.tensor([i + 1], device="cpu") + weighting = compute_loss_weighting_for_sd3(args.weighting_scheme, noise_scheduler, timesteps, "cpu", torch.float16) + if weighting is None: + weighting = torch.tensor(1.0, device="cpu") + elif torch.isinf(weighting).any(): + weighting = torch.tensor(1.0, device="cpu") + sampled_weighting[i] = weighting.item() + + # show results + if args.show_timesteps == "image": + # show timesteps with matplotlib + import matplotlib.pyplot as plt + + plt.figure(figsize=(10, 5)) + plt.subplot(1, 2, 1) + plt.bar(range(len(sampled_timesteps)), sampled_timesteps, width=1.0) + plt.title("Sampled timesteps") + plt.xlabel("Timestep") + plt.ylabel("Count") + + plt.subplot(1, 2, 2) + plt.bar(range(len(sampled_weighting)), sampled_weighting, width=1.0) + plt.title("Sampled loss weighting") + plt.xlabel("Timestep") + plt.ylabel("Weighting") + + plt.tight_layout() + plt.show() + + else: + sampled_timesteps = np.array(sampled_timesteps) + sampled_weighting = np.array(sampled_weighting) + + # average per line + sampled_timesteps = sampled_timesteps.reshape(-1, N_TIMESTEPS_PER_LINE).mean(axis=1) + sampled_weighting = sampled_weighting.reshape(-1, N_TIMESTEPS_PER_LINE).mean(axis=1) + + max_count = max(sampled_timesteps) + print(f"Sampled timesteps: max count={max_count}") + for i, t in enumerate(sampled_timesteps): + line = f"{(i)*N_TIMESTEPS_PER_LINE:4d}-{(i+1)*N_TIMESTEPS_PER_LINE-1:4d}: " + line += "#" * int(t / max_count * CONSOLE_WIDTH) + print(line) + + max_weighting = max(sampled_weighting) + print(f"Sampled loss weighting: max weighting={max_weighting}") + for i, w in enumerate(sampled_weighting): + line = f"{i*N_TIMESTEPS_PER_LINE:4d}-{(i+1)*N_TIMESTEPS_PER_LINE-1:4d}: {w:8.2f} " + line += "#" * int(w / max_weighting * CONSOLE_WIDTH) + print(line) + + def sample_images(self, accelerator, args, epoch, steps, vae, transformer, sample_parameters, dit_dtype): + """architecture independent sample images""" + if not should_sample_images(args, steps, epoch): + return + + logger.info("") + logger.info(f"generating sample images at step / サンプル画像生成 ステップ: {steps}") + if sample_parameters is None: + logger.error(f"No prompt file / プロンプトファイルがありません: {args.sample_prompts}") + return + + distributed_state = PartialState() # for multi gpu distributed inference. this is a singleton, so it's safe to use it here + + # Use the unwrapped model + transformer = accelerator.unwrap_model(transformer) + transformer.switch_block_swap_for_inference() + + # Create a directory to save the samples + save_dir = os.path.join(args.output_dir, "sample") + os.makedirs(save_dir, exist_ok=True) + + # save random state to restore later + rng_state = torch.get_rng_state() + cuda_rng_state = None + try: + cuda_rng_state = torch.cuda.get_rng_state() if torch.cuda.is_available() else None + except Exception: + pass + + if distributed_state.num_processes <= 1: + # If only one device is available, just use the original prompt list. We don't need to care about the distribution of prompts. + with torch.no_grad(), accelerator.autocast(): + for sample_parameter in sample_parameters: + self.sample_image_inference( + accelerator, args, transformer, dit_dtype, vae, save_dir, sample_parameter, epoch, steps + ) + clean_memory_on_device(accelerator.device) + else: + # Creating list with N elements, where each element is a list of prompt_dicts, and N is the number of processes available (number of devices available) + # prompt_dicts are assigned to lists based on order of processes, to attempt to time the image creation time to match enum order. Probably only works when steps and sampler are identical. + per_process_params = [] # list of lists + for i in range(distributed_state.num_processes): + per_process_params.append(sample_parameters[i :: distributed_state.num_processes]) + + with torch.no_grad(): + with distributed_state.split_between_processes(per_process_params) as sample_parameter_lists: + for sample_parameter in sample_parameter_lists[0]: + self.sample_image_inference( + accelerator, args, transformer, dit_dtype, vae, save_dir, sample_parameter, epoch, steps + ) + clean_memory_on_device(accelerator.device) + + torch.set_rng_state(rng_state) + if cuda_rng_state is not None: + torch.cuda.set_rng_state(cuda_rng_state) + + transformer.switch_block_swap_for_training() + clean_memory_on_device(accelerator.device) + + def sample_image_inference(self, accelerator, args, transformer, dit_dtype, vae, save_dir, sample_parameter, epoch, steps): + """architecture independent sample images""" + sample_steps = sample_parameter.get("sample_steps", 20) + width = sample_parameter.get("width", 256) # make smaller for faster and memory saving inference + height = sample_parameter.get("height", 256) + frame_count = sample_parameter.get("frame_count", 1) + guidance_scale = sample_parameter.get("guidance_scale", self.default_guidance_scale) + discrete_flow_shift = sample_parameter.get("discrete_flow_shift", 14.5) + seed = sample_parameter.get("seed") + prompt: str = sample_parameter.get("prompt", "") + cfg_scale = sample_parameter.get("cfg_scale", None) # None for architecture default + negative_prompt = sample_parameter.get("negative_prompt", None) + + # round width and height to multiples of 8 + width = (width // 8) * 8 + height = (height // 8) * 8 + + frame_count = (frame_count - 1) // 4 * 4 + 1 # 1, 5, 9, 13, ... For HunyuanVideo and Wan2.1 + + if self.i2v_training: + image_path = sample_parameter.get("image_path", None) + if image_path is None: + logger.error("No image_path for i2v model / i2vモデルのサンプル画像生成にはimage_pathが必要です") + return + else: + image_path = None + + if self.control_training: + control_video_path = sample_parameter.get("control_video_path", None) + if control_video_path is None: + logger.error( + "No control_video_path for control model / controlモデルのサンプル画像生成にはcontrol_video_pathが必要です" + ) + return + else: + control_video_path = None + + device = accelerator.device + if seed is not None: + torch.manual_seed(seed) + torch.cuda.manual_seed(seed) + generator = torch.Generator(device=device).manual_seed(seed) + else: + # True random sample image generation + torch.seed() + torch.cuda.seed() + generator = torch.Generator(device=device).manual_seed(torch.initial_seed()) + + logger.info(f"prompt: {prompt}") + logger.info(f"height: {height}") + logger.info(f"width: {width}") + logger.info(f"frame count: {frame_count}") + logger.info(f"sample steps: {sample_steps}") + logger.info(f"guidance scale: {guidance_scale}") + logger.info(f"discrete flow shift: {discrete_flow_shift}") + if seed is not None: + logger.info(f"seed: {seed}") + + do_classifier_free_guidance = False + if negative_prompt is not None: + do_classifier_free_guidance = True + logger.info(f"negative prompt: {negative_prompt}") + logger.info(f"cfg scale: {cfg_scale}") + + if self.i2v_training: + logger.info(f"image path: {image_path}") + if self.control_training: + logger.info(f"control video path: {control_video_path}") + + # inference: architecture dependent + video = self.do_inference( + accelerator, + args, + sample_parameter, + vae, + dit_dtype, + transformer, + discrete_flow_shift, + sample_steps, + width, + height, + frame_count, + generator, + do_classifier_free_guidance, + guidance_scale, + cfg_scale, + image_path=image_path, + control_video_path=control_video_path, + ) + + # Save video + if video is None: + logger.error("No video generated / 生成された動画がありません") + return + + ts_str = time.strftime("%Y%m%d%H%M%S", time.localtime()) + num_suffix = f"e{epoch:06d}" if epoch is not None else f"{steps:06d}" + seed_suffix = "" if seed is None else f"_{seed}" + prompt_idx = sample_parameter.get("enum", 0) + save_path = ( + f"{'' if args.output_name is None else args.output_name + '_'}{num_suffix}_{prompt_idx:02d}_{ts_str}{seed_suffix}" + ) + if video.shape[2] == 1: + save_images_grid(video, save_dir, save_path, create_subdir=False) + else: + save_videos_grid(video, os.path.join(save_dir, save_path) + ".mp4") + + # Move models back to initial state + vae.to("cpu") + clean_memory_on_device(device) + + # region model specific + + @property + def architecture(self) -> str: + return ARCHITECTURE_HUNYUAN_VIDEO + + @property + def architecture_full_name(self) -> str: + return ARCHITECTURE_HUNYUAN_VIDEO_FULL + + def handle_model_specific_args(self, args: argparse.Namespace): + self.pos_embed_cache = {} + + self._i2v_training = args.dit_in_channels == 32 # may be changed in the future + if self._i2v_training: + logger.info("I2V training mode") + + self._control_training = False # HunyuanVideo does not support control training yet + + self.default_guidance_scale = 6.0 + + @property + def i2v_training(self) -> bool: + return self._i2v_training + + @property + def control_training(self) -> bool: + return self._control_training + + def process_sample_prompts( + self, + args: argparse.Namespace, + accelerator: Accelerator, + sample_prompts: str, + ): + text_encoder1, text_encoder2, fp8_llm = args.text_encoder1, args.text_encoder2, args.fp8_llm + + logger.info(f"cache Text Encoder outputs for sample prompt: {sample_prompts}") + prompts = load_prompts(sample_prompts) + + def encode_for_text_encoder(text_encoder, is_llm=True): + sample_prompts_te_outputs = {} # (prompt) -> (embeds, mask) + with accelerator.autocast(), torch.no_grad(): + for prompt_dict in prompts: + for p in [prompt_dict.get("prompt", ""), prompt_dict.get("negative_prompt", None)]: + if p is None: + continue + if p not in sample_prompts_te_outputs: + logger.info(f"cache Text Encoder outputs for prompt: {p}") + + data_type = "video" + text_inputs = text_encoder.text2tokens(p, data_type=data_type) + + prompt_outputs = text_encoder.encode(text_inputs, data_type=data_type) + sample_prompts_te_outputs[p] = (prompt_outputs.hidden_state, prompt_outputs.attention_mask) + + return sample_prompts_te_outputs + + # Load Text Encoder 1 and encode + text_encoder_dtype = torch.float16 if args.text_encoder_dtype is None else model_utils.str_to_dtype(args.text_encoder_dtype) + logger.info(f"loading text encoder 1: {text_encoder1}") + text_encoder_1 = text_encoder_module.load_text_encoder_1(text_encoder1, accelerator.device, fp8_llm, text_encoder_dtype) + + logger.info("encoding with Text Encoder 1") + te_outputs_1 = encode_for_text_encoder(text_encoder_1) + del text_encoder_1 + + # Load Text Encoder 2 and encode + logger.info(f"loading text encoder 2: {text_encoder2}") + text_encoder_2 = text_encoder_module.load_text_encoder_2(text_encoder2, accelerator.device, text_encoder_dtype) + + logger.info("encoding with Text Encoder 2") + te_outputs_2 = encode_for_text_encoder(text_encoder_2, is_llm=False) + del text_encoder_2 + + # prepare sample parameters + sample_parameters = [] + for prompt_dict in prompts: + prompt_dict_copy = prompt_dict.copy() + + p = prompt_dict.get("prompt", "") + prompt_dict_copy["llm_embeds"] = te_outputs_1[p][0] + prompt_dict_copy["llm_mask"] = te_outputs_1[p][1] + prompt_dict_copy["clipL_embeds"] = te_outputs_2[p][0] + prompt_dict_copy["clipL_mask"] = te_outputs_2[p][1] + + p = prompt_dict.get("negative_prompt", None) + if p is not None: + prompt_dict_copy["negative_llm_embeds"] = te_outputs_1[p][0] + prompt_dict_copy["negative_llm_mask"] = te_outputs_1[p][1] + prompt_dict_copy["negative_clipL_embeds"] = te_outputs_2[p][0] + prompt_dict_copy["negative_clipL_mask"] = te_outputs_2[p][1] + + sample_parameters.append(prompt_dict_copy) + + clean_memory_on_device(accelerator.device) + + return sample_parameters + + def do_inference( + self, + accelerator, + args, + sample_parameter, + vae, + dit_dtype, + transformer, + discrete_flow_shift, + sample_steps, + width, + height, + frame_count, + generator, + do_classifier_free_guidance, + guidance_scale, + cfg_scale, + image_path=None, + control_video_path=None, + ): + """architecture dependent inference""" + device = accelerator.device + if cfg_scale is None: + cfg_scale = 1.0 + do_classifier_free_guidance = do_classifier_free_guidance and cfg_scale != 1.0 + + # Prepare scheduler for each prompt + scheduler = FlowMatchDiscreteScheduler(shift=discrete_flow_shift, reverse=True, solver="euler") + + # Number of inference steps for sampling + scheduler.set_timesteps(sample_steps, device=device) + timesteps = scheduler.timesteps + + # Calculate latent video length based on VAE version + if "884" in VAE_VER: + latent_video_length = (frame_count - 1) // 4 + 1 + elif "888" in VAE_VER: + latent_video_length = (frame_count - 1) // 8 + 1 + else: + latent_video_length = frame_count + + # Get embeddings + prompt_embeds = sample_parameter["llm_embeds"].to(device=device, dtype=dit_dtype) + prompt_mask = sample_parameter["llm_mask"].to(device=device) + prompt_embeds_2 = sample_parameter["clipL_embeds"].to(device=device, dtype=dit_dtype) + + if do_classifier_free_guidance: + negative_prompt_embeds = sample_parameter["negative_llm_embeds"].to(device=device, dtype=dit_dtype) + negative_prompt_mask = sample_parameter["negative_llm_mask"].to(device=device) + negative_prompt_embeds_2 = sample_parameter["negative_clipL_embeds"].to(device=device, dtype=dit_dtype) + prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) + prompt_mask = torch.cat([negative_prompt_mask, prompt_mask], dim=0) + prompt_embeds_2 = torch.cat([negative_prompt_embeds_2, prompt_embeds_2], dim=0) + + num_channels_latents = 16 # transformer.config.in_channels + vae_scale_factor = 2 ** (4 - 1) # Assuming 4 VAE blocks + + # Initialize latents + shape_or_frame = ( + 1, + num_channels_latents, + 1, + height // vae_scale_factor, + width // vae_scale_factor, + ) + latents = [] + for _ in range(latent_video_length): + latents.append(torch.randn(shape_or_frame, generator=generator, device=device, dtype=dit_dtype)) + latents = torch.cat(latents, dim=2) + + if self.i2v_training: + # Move VAE to the appropriate device for sampling + vae.to(device) + vae.eval() + + image = Image.open(image_path) + image = resize_image_to_bucket(image, (width, height)) # returns a numpy array + image = torch.from_numpy(image).permute(2, 0, 1).unsqueeze(0).unsqueeze(2).float() # 1, C, 1, H, W + image = image / 255.0 + + logger.info(f"Encoding image to latents") + image_latents = encode_to_latents(args, image, device) # 1, C, 1, H, W + image_latents = image_latents.to(device=device, dtype=dit_dtype) + + vae.to("cpu") + clean_memory_on_device(device) + + zero_latents = torch.zeros_like(latents) + zero_latents[:, :, :1, :, :] = image_latents + image_latents = zero_latents + else: + image_latents = None + + # Guidance scale + guidance_expand = torch.tensor([guidance_scale * 1000.0], dtype=torch.float32, device=device).to(dit_dtype) + + # Get rotary positional embeddings + freqs_cos, freqs_sin = get_rotary_pos_embed_by_shape(transformer, latents.shape[2:]) + freqs_cos = freqs_cos.to(device=device, dtype=dit_dtype) + freqs_sin = freqs_sin.to(device=device, dtype=dit_dtype) + + # Wrap the inner loop with tqdm to track progress over timesteps + prompt_idx = sample_parameter.get("enum", 0) + with torch.no_grad(): + for i, t in enumerate(tqdm(timesteps, desc=f"Sampling timesteps for prompt {prompt_idx+1}")): + latents_input = scheduler.scale_model_input(latents, t) + + if do_classifier_free_guidance: + latents_input = torch.cat([latents_input, latents_input], dim=0) # 2, C, F, H, W + + if image_latents is not None: + latents_image_input = ( + image_latents if not do_classifier_free_guidance else torch.cat([image_latents, image_latents], dim=0) + ) + latents_input = torch.cat([latents_input, latents_image_input], dim=1) # 1 or 2, C*2, F, H, W + + noise_pred = transformer( + latents_input, + t.repeat(latents.shape[0]).to(device=device, dtype=dit_dtype), + text_states=prompt_embeds, + text_mask=prompt_mask, + text_states_2=prompt_embeds_2, + freqs_cos=freqs_cos, + freqs_sin=freqs_sin, + guidance=guidance_expand, + return_dict=True, + )["x"] + + # perform classifier free guidance + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_cond = noise_pred.chunk(2) + noise_pred = noise_pred_uncond + cfg_scale * (noise_pred_cond - noise_pred_uncond) + + # Compute the previous noisy sample x_t -> x_t-1 + latents = scheduler.step(noise_pred, t, latents, return_dict=False)[0] + + # Move VAE to the appropriate device for sampling + vae.to(device) + vae.eval() + + # Decode latents to video + if hasattr(vae.config, "shift_factor") and vae.config.shift_factor: + latents = latents / vae.config.scaling_factor + vae.config.shift_factor + else: + latents = latents / vae.config.scaling_factor + + latents = latents.to(device=device, dtype=vae.dtype) + with torch.no_grad(): + video = vae.decode(latents, return_dict=False)[0] + video = (video / 2 + 0.5).clamp(0, 1) + video = video.cpu().float() + + return video + + def load_vae(self, args: argparse.Namespace, vae_dtype: torch.dtype, vae_path: str): + vae, _, s_ratio, t_ratio = load_vae(vae_dtype=vae_dtype, device="cpu", vae_path=vae_path) + + if args.vae_chunk_size is not None: + vae.set_chunk_size_for_causal_conv_3d(args.vae_chunk_size) + logger.info(f"Set chunk_size to {args.vae_chunk_size} for CausalConv3d in VAE") + if args.vae_spatial_tile_sample_min_size is not None: + vae.enable_spatial_tiling(True) + vae.tile_sample_min_size = args.vae_spatial_tile_sample_min_size + vae.tile_latent_min_size = args.vae_spatial_tile_sample_min_size // 8 + elif args.vae_tiling: + vae.enable_spatial_tiling(True) + + return vae + + def load_transformer( + self, + accelerator: Accelerator, + args: argparse.Namespace, + dit_path: str, + attn_mode: str, + split_attn: bool, + loading_device: str, + dit_weight_dtype: Optional[torch.dtype], + ): + transformer = load_transformer(dit_path, attn_mode, split_attn, loading_device, dit_weight_dtype, args.dit_in_channels) + + if args.img_in_txt_in_offloading: + logger.info("Enable offloading img_in and txt_in to CPU") + transformer.enable_img_in_txt_in_offloading() + + return transformer + + def scale_shift_latents(self, latents): + latents = latents * vae_module.SCALING_FACTOR + return latents + + def call_dit( + self, + args: argparse.Namespace, + accelerator: Accelerator, + transformer_arg, + latents: torch.Tensor, + batch: dict[str, torch.Tensor], + noise: torch.Tensor, + noisy_model_input: torch.Tensor, + timesteps: torch.Tensor, + network_dtype: torch.dtype, + ): + transformer: HYVideoDiffusionTransformer = transformer_arg + bsz = latents.shape[0] + + # I2V training + if self.i2v_training: + image_latents = torch.zeros_like(latents) + image_latents[:, :, :1, :, :] = latents[:, :, :1, :, :] + noisy_model_input = torch.cat([noisy_model_input, image_latents], dim=1) # concat along channel dim + + # ensure guidance_scale in args is float + guidance_vec = torch.full((bsz,), float(args.guidance_scale), device=accelerator.device) # , dtype=dit_dtype) + + # ensure the hidden state will require grad + if args.gradient_checkpointing: + noisy_model_input.requires_grad_(True) + guidance_vec.requires_grad_(True) + + pos_emb_shape = latents.shape[1:] + if pos_emb_shape not in self.pos_embed_cache: + freqs_cos, freqs_sin = get_rotary_pos_embed_by_shape(transformer, latents.shape[2:]) + # freqs_cos = freqs_cos.to(device=accelerator.device, dtype=dit_dtype) + # freqs_sin = freqs_sin.to(device=accelerator.device, dtype=dit_dtype) + self.pos_embed_cache[pos_emb_shape] = (freqs_cos, freqs_sin) + else: + freqs_cos, freqs_sin = self.pos_embed_cache[pos_emb_shape] + + # call DiT + latents = latents.to(device=accelerator.device, dtype=network_dtype) + noisy_model_input = noisy_model_input.to(device=accelerator.device, dtype=network_dtype) + with accelerator.autocast(): + model_pred = transformer( + noisy_model_input, + timesteps, + text_states=batch["llm"], + text_mask=batch["llm_mask"], + text_states_2=batch["clipL"], + freqs_cos=freqs_cos, + freqs_sin=freqs_sin, + guidance=guidance_vec, + return_dict=False, + ) + + # flow matching loss + target = noise - latents + + return model_pred, target + + # endregion model specific + + def train(self, args): + # check required arguments + if args.dataset_config is None: + raise ValueError("dataset_config is required / dataset_configが必要です") + if args.dit is None: + raise ValueError("path to DiT model is required / DiTモデルのパスが必要です") + assert not args.fp8_scaled or args.fp8_base, "fp8_scaled requires fp8_base / fp8_scaledはfp8_baseが必要です" + + if args.sage_attn: + raise ValueError( + "SageAttention doesn't support training currently. Please use `--sdpa` or `--xformers` etc. instead." + " / SageAttentionは現在学習をサポートしていないようです。`--sdpa`や`--xformers`などの他のオプションを使ってください" + ) + + # check model specific arguments + self.handle_model_specific_args(args) + + # show timesteps for debugging + if args.show_timesteps: + self.show_timesteps(args) + return + + session_id = random.randint(0, 2**32) + training_started_at = time.time() + # setup_logging(args, reset=True) + + if args.seed is None: + args.seed = random.randint(0, 2**32) + set_seed(args.seed) + + # Load dataset config + blueprint_generator = BlueprintGenerator(ConfigSanitizer()) + logger.info(f"Load dataset config from {args.dataset_config}") + user_config = config_utils.load_user_config(args.dataset_config) + blueprint = blueprint_generator.generate(user_config, args, architecture=self.architecture) + train_dataset_group = config_utils.generate_dataset_group_by_blueprint(blueprint.dataset_group, training=True) + + current_epoch = Value("i", 0) + current_step = Value("i", 0) + ds_for_collator = train_dataset_group if args.max_data_loader_n_workers == 0 else None + collator = collator_class(current_epoch, current_step, ds_for_collator) + + # prepare accelerator + logger.info("preparing accelerator") + accelerator = prepare_accelerator(args) + is_main_process = accelerator.is_main_process + + # prepare dtype + weight_dtype = torch.float32 + if args.mixed_precision == "fp16": + weight_dtype = torch.float16 + elif args.mixed_precision == "bf16": + weight_dtype = torch.bfloat16 + + # HunyuanVideo: bfloat16 or float16, Wan2.1: bfloat16 + dit_dtype = torch.bfloat16 if args.dit_dtype is None else model_utils.str_to_dtype(args.dit_dtype) + dit_weight_dtype = (None if args.fp8_scaled else torch.float8_e4m3fn) if args.fp8_base else dit_dtype + logger.info(f"DiT precision: {dit_dtype}, weight precision: {dit_weight_dtype}") + + # get embedding for sampling images + vae_dtype = torch.float16 if args.vae_dtype is None else model_utils.str_to_dtype(args.vae_dtype) + sample_parameters = None + vae = None + if args.sample_prompts: + sample_parameters = self.process_sample_prompts(args, accelerator, args.sample_prompts) + + # Load VAE model for sampling images: VAE is loaded to cpu to save gpu memory + vae = self.load_vae(args, vae_dtype=vae_dtype, vae_path=args.vae) + vae.requires_grad_(False) + vae.eval() + + # load DiT model + blocks_to_swap = args.blocks_to_swap if args.blocks_to_swap else 0 + self.blocks_to_swap = blocks_to_swap + loading_device = "cpu" if blocks_to_swap > 0 else accelerator.device + + logger.info(f"Loading DiT model from {args.dit}") + if args.sdpa: + attn_mode = "torch" + elif args.flash_attn: + attn_mode = "flash" + elif args.sage_attn: + attn_mode = "sageattn" + elif args.xformers: + attn_mode = "xformers" + elif args.flash3: + attn_mode = "flash3" + else: + raise ValueError( + f"either --sdpa, --flash-attn, --flash3, --sage-attn or --xformers must be specified / --sdpa, --flash-attn, --flash3, --sage-attn, --xformersのいずれかを指定してください" + ) + transformer = self.load_transformer( + accelerator, args, args.dit, attn_mode, args.split_attn, loading_device, dit_weight_dtype + ) + transformer.eval() + transformer.requires_grad_(False) + + if blocks_to_swap > 0: + logger.info(f"enable swap {blocks_to_swap} blocks to CPU from device: {accelerator.device}") + transformer.enable_block_swap(blocks_to_swap, accelerator.device, supports_backward=True) + transformer.move_to_device_except_swap_blocks(accelerator.device) + + # load network model for differential training + sys.path.append(os.path.dirname(__file__)) + accelerator.print("import network module:", args.network_module) + network_module: lora_module = importlib.import_module(args.network_module) # actual module may be different + + if args.base_weights is not None: + # if base_weights is specified, merge the weights to DiT model + for i, weight_path in enumerate(args.base_weights): + if args.base_weights_multiplier is None or len(args.base_weights_multiplier) <= i: + multiplier = 1.0 + else: + multiplier = args.base_weights_multiplier[i] + + accelerator.print(f"merging module: {weight_path} with multiplier {multiplier}") + + weights_sd = load_file(weight_path) + module = network_module.create_arch_network_from_weights( + multiplier, weights_sd, unet=transformer, for_inference=True + ) + module.merge_to(None, transformer, weights_sd, weight_dtype, "cpu") + + accelerator.print(f"all weights merged: {', '.join(args.base_weights)}") + + # prepare network + net_kwargs = {} + if args.network_args is not None: + for net_arg in args.network_args: + key, value = net_arg.split("=") + net_kwargs[key] = value + + if args.dim_from_weights: + logger.info(f"Loading network from weights: {args.dim_from_weights}") + weights_sd = load_file(args.dim_from_weights) + network, _ = network_module.create_arch_network_from_weights(1, weights_sd, unet=transformer) + else: + # We use the name create_arch_network for compatibility with LyCORIS + if hasattr(network_module, "create_arch_network"): + network = network_module.create_arch_network( + 1.0, + args.network_dim, + args.network_alpha, + vae, + None, + transformer, + neuron_dropout=args.network_dropout, + **net_kwargs, + ) + else: + # LyCORIS compatibility + network = network_module.create_network( + 1.0, + args.network_dim, + args.network_alpha, + vae, + None, + transformer, + **net_kwargs, + ) + if network is None: + return + + if hasattr(network_module, "prepare_network"): + network.prepare_network(args) + + # apply network to DiT + network.apply_to(None, transformer, apply_text_encoder=False, apply_unet=True) + + if args.network_weights is not None: + # FIXME consider alpha of weights: this assumes that the alpha is not changed + info = network.load_weights(args.network_weights) + accelerator.print(f"load network weights from {args.network_weights}: {info}") + + if args.gradient_checkpointing: + transformer.enable_gradient_checkpointing() + network.enable_gradient_checkpointing() # may have no effect + + # prepare optimizer, data loader etc. + accelerator.print("prepare optimizer, data loader etc.") + + trainable_params, lr_descriptions = network.prepare_optimizer_params(unet_lr=args.learning_rate) + optimizer_name, optimizer_args, optimizer, optimizer_train_fn, optimizer_eval_fn = self.get_optimizer( + args, trainable_params + ) + + # prepare dataloader + + # num workers for data loader: if 0, persistent_workers is not available + n_workers = min(args.max_data_loader_n_workers, os.cpu_count()) # cpu_count or max_data_loader_n_workers + + train_dataloader = torch.utils.data.DataLoader( + train_dataset_group, + batch_size=1, + shuffle=True, + collate_fn=collator, + num_workers=n_workers, + persistent_workers=args.persistent_data_loader_workers, + ) + + # calculate max_train_steps + if args.max_train_epochs is not None: + args.max_train_steps = args.max_train_epochs * math.ceil( + len(train_dataloader) / accelerator.num_processes / args.gradient_accumulation_steps + ) + accelerator.print( + f"override steps. steps for {args.max_train_epochs} epochs is / 指定エポックまでのステップ数: {args.max_train_steps}" + ) + + # send max_train_steps to train_dataset_group + train_dataset_group.set_max_train_steps(args.max_train_steps) + + # prepare lr_scheduler + lr_scheduler = self.get_lr_scheduler(args, optimizer, accelerator.num_processes) + + # prepare training model. accelerator does some magic here + + # experimental feature: train the model with gradients in fp16/bf16 + network_dtype = torch.float32 + args.full_fp16 = args.full_bf16 = False # temporary disabled because stochastic rounding is not supported yet + if args.full_fp16: + assert ( + args.mixed_precision == "fp16" + ), "full_fp16 requires mixed precision='fp16' / full_fp16を使う場合はmixed_precision='fp16'を指定してください。" + accelerator.print("enable full fp16 training.") + network_dtype = weight_dtype + network.to(network_dtype) + elif args.full_bf16: + assert ( + args.mixed_precision == "bf16" + ), "full_bf16 requires mixed precision='bf16' / full_bf16を使う場合はmixed_precision='bf16'を指定してください。" + accelerator.print("enable full bf16 training.") + network_dtype = weight_dtype + network.to(network_dtype) + + if dit_weight_dtype != dit_dtype and dit_weight_dtype is not None: + logger.info(f"casting model to {dit_weight_dtype}") + transformer.to(dit_weight_dtype) + + if blocks_to_swap > 0: + transformer = accelerator.prepare(transformer, device_placement=[not blocks_to_swap > 0]) + accelerator.unwrap_model(transformer).move_to_device_except_swap_blocks(accelerator.device) # reduce peak memory usage + accelerator.unwrap_model(transformer).prepare_block_swap_before_forward() + else: + transformer = accelerator.prepare(transformer) + + network, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(network, optimizer, train_dataloader, lr_scheduler) + training_model = network + + if args.gradient_checkpointing: + transformer.train() + else: + transformer.eval() + + accelerator.unwrap_model(network).prepare_grad_etc(transformer) + + if args.full_fp16: + # patch accelerator for fp16 training + # def patch_accelerator_for_fp16_training(accelerator): + org_unscale_grads = accelerator.scaler._unscale_grads_ + + def _unscale_grads_replacer(optimizer, inv_scale, found_inf, allow_fp16): + return org_unscale_grads(optimizer, inv_scale, found_inf, True) + + accelerator.scaler._unscale_grads_ = _unscale_grads_replacer + + # before resuming make hook for saving/loading to save/load the network weights only + def save_model_hook(models, weights, output_dir): + # pop weights of other models than network to save only network weights + # only main process or deepspeed https://github.com/huggingface/diffusers/issues/2606 + if accelerator.is_main_process: # or args.deepspeed: + remove_indices = [] + for i, model in enumerate(models): + if not isinstance(model, type(accelerator.unwrap_model(network))): + remove_indices.append(i) + for i in reversed(remove_indices): + if len(weights) > i: + weights.pop(i) + # print(f"save model hook: {len(weights)} weights will be saved") + + def load_model_hook(models, input_dir): + # remove models except network + remove_indices = [] + for i, model in enumerate(models): + if not isinstance(model, type(accelerator.unwrap_model(network))): + remove_indices.append(i) + for i in reversed(remove_indices): + models.pop(i) + # print(f"load model hook: {len(models)} models will be loaded") + + accelerator.register_save_state_pre_hook(save_model_hook) + accelerator.register_load_state_pre_hook(load_model_hook) + + # resume from local or huggingface. accelerator.step is set + self.resume_from_local_or_hf_if_specified(accelerator, args) # accelerator.load_state(args.resume) + + # epoch数を計算する + num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) + num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) + + # 学習する + # total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps + + accelerator.print("running training / 学習開始") + accelerator.print(f" num train items / 学習画像、動画数: {train_dataset_group.num_train_items}") + accelerator.print(f" num batches per epoch / 1epochのバッチ数: {len(train_dataloader)}") + accelerator.print(f" num epochs / epoch数: {num_train_epochs}") + accelerator.print( + f" batch size per device / バッチサイズ: {', '.join([str(d.batch_size) for d in train_dataset_group.datasets])}" + ) + # accelerator.print(f" total train batch size (with parallel & distributed & accumulation) / 総バッチサイズ(並列学習、勾配合計含む): {total_batch_size}") + accelerator.print(f" gradient accumulation steps / 勾配を合計するステップ数 = {args.gradient_accumulation_steps}") + accelerator.print(f" total optimization steps / 学習ステップ数: {args.max_train_steps}") + + # TODO refactor metadata creation and move to util + metadata = { + "ss_" "ss_session_id": session_id, # random integer indicating which group of epochs the model came from + "ss_training_started_at": training_started_at, # unix timestamp + "ss_output_name": args.output_name, + "ss_learning_rate": args.learning_rate, + "ss_num_train_items": train_dataset_group.num_train_items, + "ss_num_batches_per_epoch": len(train_dataloader), + "ss_num_epochs": num_train_epochs, + "ss_gradient_checkpointing": args.gradient_checkpointing, + "ss_gradient_accumulation_steps": args.gradient_accumulation_steps, + "ss_max_train_steps": args.max_train_steps, + "ss_lr_warmup_steps": args.lr_warmup_steps, + "ss_lr_scheduler": args.lr_scheduler, + SS_METADATA_KEY_BASE_MODEL_VERSION: self.architecture_full_name, + # "ss_network_module": args.network_module, + # "ss_network_dim": args.network_dim, # None means default because another network than LoRA may have another default dim + # "ss_network_alpha": args.network_alpha, # some networks may not have alpha + SS_METADATA_KEY_NETWORK_MODULE: args.network_module, + SS_METADATA_KEY_NETWORK_DIM: args.network_dim, + SS_METADATA_KEY_NETWORK_ALPHA: args.network_alpha, + "ss_network_dropout": args.network_dropout, # some networks may not have dropout + "ss_mixed_precision": args.mixed_precision, + "ss_seed": args.seed, + "ss_training_comment": args.training_comment, # will not be updated after training + # "ss_sd_scripts_commit_hash": train_util.get_git_revision_hash(), + "ss_optimizer": optimizer_name + (f"({optimizer_args})" if len(optimizer_args) > 0 else ""), + "ss_max_grad_norm": args.max_grad_norm, + "ss_fp8_base": bool(args.fp8_base), + # "ss_fp8_llm": bool(args.fp8_llm), # remove this because this is only for HuanyuanVideo TODO set architecure dependent metadata + "ss_full_fp16": bool(args.full_fp16), + "ss_full_bf16": bool(args.full_bf16), + "ss_weighting_scheme": args.weighting_scheme, + "ss_logit_mean": args.logit_mean, + "ss_logit_std": args.logit_std, + "ss_mode_scale": args.mode_scale, + "ss_guidance_scale": args.guidance_scale, + "ss_timestep_sampling": args.timestep_sampling, + "ss_sigmoid_scale": args.sigmoid_scale, + "ss_discrete_flow_shift": args.discrete_flow_shift, + } + + datasets_metadata = [] + # tag_frequency = {} # merge tag frequency for metadata editor # TODO support tag frequency + for dataset in train_dataset_group.datasets: + dataset_metadata = dataset.get_metadata() + datasets_metadata.append(dataset_metadata) + + metadata["ss_datasets"] = json.dumps(datasets_metadata) + + # add extra args + if args.network_args: + # metadata["ss_network_args"] = json.dumps(net_kwargs) + metadata[SS_METADATA_KEY_NETWORK_ARGS] = json.dumps(net_kwargs) + + # model name and hash + # calculate hash takes time, so we omit it for now + if args.dit is not None: + # logger.info(f"calculate hash for DiT model: {args.dit}") + logger.info(f"set DiT model name for metadata: {args.dit}") + sd_model_name = args.dit + if os.path.exists(sd_model_name): + # metadata["ss_sd_model_hash"] = model_utils.model_hash(sd_model_name) + # metadata["ss_new_sd_model_hash"] = model_utils.calculate_sha256(sd_model_name) + sd_model_name = os.path.basename(sd_model_name) + metadata["ss_sd_model_name"] = sd_model_name + + if args.vae is not None: + # logger.info(f"calculate hash for VAE model: {args.vae}") + logger.info(f"set VAE model name for metadata: {args.vae}") + vae_name = args.vae + if os.path.exists(vae_name): + # metadata["ss_vae_hash"] = model_utils.model_hash(vae_name) + # metadata["ss_new_vae_hash"] = model_utils.calculate_sha256(vae_name) + vae_name = os.path.basename(vae_name) + metadata["ss_vae_name"] = vae_name + + metadata = {k: str(v) for k, v in metadata.items()} + + # make minimum metadata for filtering + minimum_metadata = {} + for key in SS_METADATA_MINIMUM_KEYS: + if key in metadata: + minimum_metadata[key] = metadata[key] + + if accelerator.is_main_process: + init_kwargs = {} + if args.wandb_run_name: + init_kwargs["wandb"] = {"name": args.wandb_run_name} + if args.log_tracker_config is not None: + init_kwargs = toml.load(args.log_tracker_config) + accelerator.init_trackers( + "network_train" if args.log_tracker_name is None else args.log_tracker_name, + config=train_utils.get_sanitized_config_or_none(args), + init_kwargs=init_kwargs, + ) + + # TODO skip until initial step + progress_bar = tqdm(range(args.max_train_steps), smoothing=0, disable=not accelerator.is_local_main_process, desc="steps") + + epoch_to_start = 0 + global_step = 0 + noise_scheduler = FlowMatchDiscreteScheduler(shift=args.discrete_flow_shift, reverse=True, solver="euler") + + loss_recorder = train_utils.LossRecorder() + del train_dataset_group + + # function for saving/removing + save_dtype = dit_dtype + + def save_model(ckpt_name: str, unwrapped_nw, steps, epoch_no, force_sync_upload=False): + os.makedirs(args.output_dir, exist_ok=True) + ckpt_file = os.path.join(args.output_dir, ckpt_name) + + accelerator.print(f"\nsaving checkpoint: {ckpt_file}") + metadata["ss_training_finished_at"] = str(time.time()) + metadata["ss_steps"] = str(steps) + metadata["ss_epoch"] = str(epoch_no) + + metadata_to_save = minimum_metadata if args.no_metadata else metadata + + title = args.metadata_title if args.metadata_title is not None else args.output_name + if args.min_timestep is not None or args.max_timestep is not None: + min_time_step = args.min_timestep if args.min_timestep is not None else 0 + max_time_step = args.max_timestep if args.max_timestep is not None else 1000 + md_timesteps = (min_time_step, max_time_step) + else: + md_timesteps = None + + sai_metadata = sai_model_spec.build_metadata( + None, + self.architecture, + time.time(), + title, + None, + args.metadata_author, + args.metadata_description, + args.metadata_license, + args.metadata_tags, + timesteps=md_timesteps, + ) + + metadata_to_save.update(sai_metadata) + + unwrapped_nw.save_weights(ckpt_file, save_dtype, metadata_to_save) + if args.huggingface_repo_id is not None: + huggingface_utils.upload(args, ckpt_file, "/" + ckpt_name, force_sync_upload=force_sync_upload) + + def remove_model(old_ckpt_name): + old_ckpt_file = os.path.join(args.output_dir, old_ckpt_name) + if os.path.exists(old_ckpt_file): + accelerator.print(f"removing old checkpoint: {old_ckpt_file}") + os.remove(old_ckpt_file) + + # For --sample_at_first + if should_sample_images(args, global_step, epoch=0): + optimizer_eval_fn() + self.sample_images(accelerator, args, 0, global_step, vae, transformer, sample_parameters, dit_dtype) + optimizer_train_fn() + if len(accelerator.trackers) > 0: + # log empty object to commit the sample images to wandb + accelerator.log({}, step=0) + + # training loop + + # log device and dtype for each model + logger.info(f"DiT dtype: {transformer.dtype}, device: {transformer.device}") + + clean_memory_on_device(accelerator.device) + + for epoch in range(epoch_to_start, num_train_epochs): + accelerator.print(f"\nepoch {epoch+1}/{num_train_epochs}") + current_epoch.value = epoch + 1 + + metadata["ss_epoch"] = str(epoch + 1) + + accelerator.unwrap_model(network).on_epoch_start(transformer) + + for step, batch in enumerate(train_dataloader): + latents = batch["latents"] + bsz = latents.shape[0] + current_step.value = global_step + + with accelerator.accumulate(training_model): + accelerator.unwrap_model(network).on_step_start() + + latents = self.scale_shift_latents(latents) + + # Sample noise that we'll add to the latents + noise = torch.randn_like(latents) + + # calculate model input and timesteps + noisy_model_input, timesteps = self.get_noisy_model_input_and_timesteps( + args, noise, latents, noise_scheduler, accelerator.device, dit_dtype + ) + + weighting = compute_loss_weighting_for_sd3( + args.weighting_scheme, noise_scheduler, timesteps, accelerator.device, dit_dtype + ) + + model_pred, target = self.call_dit( + args, accelerator, transformer, latents, batch, noise, noisy_model_input, timesteps, network_dtype + ) + loss = torch.nn.functional.mse_loss(model_pred.to(network_dtype), target, reduction="none") + + if weighting is not None: + loss = loss * weighting + # loss = loss.mean([1, 2, 3]) + # # min snr gamma, scale v pred loss like noise pred, v pred like loss, debiased estimation etc. + # loss = self.post_process_loss(loss, args, timesteps, noise_scheduler) + + loss = loss.mean() # mean loss over all elements in batch + + accelerator.backward(loss) + if accelerator.sync_gradients: + # self.all_reduce_network(accelerator, network) # sync DDP grad manually + state = accelerate.PartialState() + if state.distributed_type != accelerate.DistributedType.NO: + for param in network.parameters(): + if param.grad is not None: + param.grad = accelerator.reduce(param.grad, reduction="mean") + + if args.max_grad_norm != 0.0: + params_to_clip = accelerator.unwrap_model(network).get_trainable_params() + accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm) + + optimizer.step() + lr_scheduler.step() + optimizer.zero_grad(set_to_none=True) + + if args.scale_weight_norms: + keys_scaled, mean_norm, maximum_norm = accelerator.unwrap_model(network).apply_max_norm_regularization( + args.scale_weight_norms, accelerator.device + ) + max_mean_logs = {"Keys Scaled": keys_scaled, "Average key norm": mean_norm} + else: + keys_scaled, mean_norm, maximum_norm = None, None, None + + # Checks if the accelerator has performed an optimization step behind the scenes + if accelerator.sync_gradients: + progress_bar.update(1) + global_step += 1 + + # to avoid calling optimizer_eval_fn() too frequently, we call it only when we need to sample images or save the model + should_sampling = should_sample_images(args, global_step, epoch=None) + should_saving = args.save_every_n_steps is not None and global_step % args.save_every_n_steps == 0 + + if should_sampling or should_saving: + optimizer_eval_fn() + if should_sampling: + self.sample_images(accelerator, args, None, global_step, vae, transformer, sample_parameters, dit_dtype) + + if should_saving: + accelerator.wait_for_everyone() + if accelerator.is_main_process: + ckpt_name = train_utils.get_step_ckpt_name(args.output_name, global_step) + save_model(ckpt_name, accelerator.unwrap_model(network), global_step, epoch) + + if args.save_state: + train_utils.save_and_remove_state_stepwise(args, accelerator, global_step) + + remove_step_no = train_utils.get_remove_step_no(args, global_step) + if remove_step_no is not None: + remove_ckpt_name = train_utils.get_step_ckpt_name(args.output_name, remove_step_no) + remove_model(remove_ckpt_name) + optimizer_train_fn() + + current_loss = loss.detach().item() + loss_recorder.add(epoch=epoch, step=step, loss=current_loss) + avr_loss: float = loss_recorder.moving_average + logs = {"avr_loss": avr_loss} # , "lr": lr_scheduler.get_last_lr()[0]} + progress_bar.set_postfix(**logs) + + if args.scale_weight_norms: + progress_bar.set_postfix(**{**max_mean_logs, **logs}) + + if len(accelerator.trackers) > 0: + logs = self.generate_step_logs( + args, current_loss, avr_loss, lr_scheduler, lr_descriptions, optimizer, keys_scaled, mean_norm, maximum_norm + ) + accelerator.log(logs, step=global_step) + + if global_step >= args.max_train_steps: + break + + if len(accelerator.trackers) > 0: + logs = {"loss/epoch": loss_recorder.moving_average} + accelerator.log(logs, step=epoch + 1) + + accelerator.wait_for_everyone() + + # save model at the end of epoch if needed + optimizer_eval_fn() + if args.save_every_n_epochs is not None: + saving = (epoch + 1) % args.save_every_n_epochs == 0 and (epoch + 1) < num_train_epochs + if is_main_process and saving: + ckpt_name = train_utils.get_epoch_ckpt_name(args.output_name, epoch + 1) + save_model(ckpt_name, accelerator.unwrap_model(network), global_step, epoch + 1) + + remove_epoch_no = train_utils.get_remove_epoch_no(args, epoch + 1) + if remove_epoch_no is not None: + remove_ckpt_name = train_utils.get_epoch_ckpt_name(args.output_name, remove_epoch_no) + remove_model(remove_ckpt_name) + + if args.save_state: + train_utils.save_and_remove_state_on_epoch_end(args, accelerator, epoch + 1) + + self.sample_images(accelerator, args, epoch + 1, global_step, vae, transformer, sample_parameters, dit_dtype) + optimizer_train_fn() + + # end of epoch + + # metadata["ss_epoch"] = str(num_train_epochs) + metadata["ss_training_finished_at"] = str(time.time()) + + if is_main_process: + network = accelerator.unwrap_model(network) + + accelerator.end_training() + optimizer_eval_fn() + + if is_main_process and (args.save_state or args.save_state_on_train_end): + train_utils.save_state_on_train_end(args, accelerator) + + if is_main_process: + ckpt_name = train_utils.get_last_ckpt_name(args.output_name) + save_model(ckpt_name, network, global_step, num_train_epochs, force_sync_upload=True) + + logger.info("model saved.") + + +def setup_parser_common() -> argparse.ArgumentParser: + def int_or_float(value): + if value.endswith("%"): + try: + return float(value[:-1]) / 100.0 + except ValueError: + raise argparse.ArgumentTypeError(f"Value '{value}' is not a valid percentage") + try: + float_value = float(value) + if float_value >= 1 and float_value.is_integer(): + return int(value) + return float(value) + except ValueError: + raise argparse.ArgumentTypeError(f"'{value}' is not an int or float") + + parser = argparse.ArgumentParser() + + # general settings + parser.add_argument( + "--config_file", + type=str, + default=None, + help="using .toml instead of args to pass hyperparameter / ハイパーパラメータを引数ではなく.tomlファイルで渡す", + ) + parser.add_argument( + "--dataset_config", + type=pathlib.Path, + default=None, + help="config file for dataset / データセットの設定ファイル", + ) + + # training settings + parser.add_argument( + "--sdpa", + action="store_true", + help="use sdpa for CrossAttention (requires PyTorch 2.0) / CrossAttentionにsdpaを使う(PyTorch 2.0が必要)", + ) + parser.add_argument( + "--flash_attn", + action="store_true", + help="use FlashAttention for CrossAttention, requires FlashAttention / CrossAttentionにFlashAttentionを使う、FlashAttentionが必要", + ) + parser.add_argument( + "--sage_attn", + action="store_true", + help="use SageAttention. requires SageAttention / SageAttentionを使う。SageAttentionが必要", + ) + parser.add_argument( + "--xformers", + action="store_true", + help="use xformers for CrossAttention, requires xformers / CrossAttentionにxformersを使う、xformersが必要", + ) + parser.add_argument( + "--flash3", + action="store_true", + help="use FlashAttention 3 for CrossAttention, requires FlashAttention 3, HunyuanVideo does not support this yet" + " / CrossAttentionにFlashAttention 3を使う、FlashAttention 3が必要。HunyuanVideoは未対応。", + ) + parser.add_argument( + "--split_attn", + action="store_true", + help="use split attention for attention calculation (split batch size=1, affects memory usage and speed)" + " / attentionを分割して計算する(バッチサイズ=1に分割、メモリ使用量と速度に影響)", + ) + + parser.add_argument("--max_train_steps", type=int, default=1600, help="training steps / 学習ステップ数") + parser.add_argument( + "--max_train_epochs", + type=int, + default=None, + help="training epochs (overrides max_train_steps) / 学習エポック数(max_train_stepsを上書きします)", + ) + parser.add_argument( + "--max_data_loader_n_workers", + type=int, + default=8, + help="max num workers for DataLoader (lower is less main RAM usage, faster epoch start and slower data loading) / DataLoaderの最大プロセス数(小さい値ではメインメモリの使用量が減りエポック間の待ち時間が減りますが、データ読み込みは遅くなります)", + ) + parser.add_argument( + "--persistent_data_loader_workers", + action="store_true", + help="persistent DataLoader workers (useful for reduce time gap between epoch, but may use more memory) / DataLoader のワーカーを持続させる (エポック間の時間差を少なくするのに有効だが、より多くのメモリを消費する可能性がある)", + ) + parser.add_argument("--seed", type=int, default=None, help="random seed for training / 学習時の乱数のseed") + parser.add_argument( + "--gradient_checkpointing", action="store_true", help="enable gradient checkpointing / gradient checkpointingを有効にする" + ) + parser.add_argument( + "--gradient_accumulation_steps", + type=int, + default=1, + help="Number of updates steps to accumulate before performing a backward/update pass / 学習時に逆伝播をする前に勾配を合計するステップ数", + ) + parser.add_argument( + "--mixed_precision", + type=str, + default="no", + choices=["no", "fp16", "bf16"], + help="use mixed precision / 混合精度を使う場合、その精度", + ) + + parser.add_argument( + "--logging_dir", + type=str, + default=None, + help="enable logging and output TensorBoard log to this directory / ログ出力を有効にしてこのディレクトリにTensorBoard用のログを出力する", + ) + parser.add_argument( + "--log_with", + type=str, + default=None, + choices=["tensorboard", "wandb", "all"], + help="what logging tool(s) to use (if 'all', TensorBoard and WandB are both used) / ログ出力に使用するツール (allを指定するとTensorBoardとWandBの両方が使用される)", + ) + parser.add_argument( + "--log_prefix", type=str, default=None, help="add prefix for each log directory / ログディレクトリ名の先頭に追加する文字列" + ) + parser.add_argument( + "--log_tracker_name", + type=str, + default=None, + help="name of tracker to use for logging, default is script-specific default name / ログ出力に使用するtrackerの名前、省略時はスクリプトごとのデフォルト名", + ) + parser.add_argument( + "--wandb_run_name", + type=str, + default=None, + help="The name of the specific wandb session / wandb ログに表示される特定の実行の名前", + ) + parser.add_argument( + "--log_tracker_config", + type=str, + default=None, + help="path to tracker config file to use for logging / ログ出力に使用するtrackerの設定ファイルのパス", + ) + parser.add_argument( + "--wandb_api_key", + type=str, + default=None, + help="specify WandB API key to log in before starting training (optional). / WandB APIキーを指定して学習開始前にログインする(オプション)", + ) + parser.add_argument("--log_config", action="store_true", help="log training configuration / 学習設定をログに出力する") + + parser.add_argument( + "--ddp_timeout", + type=int, + default=None, + help="DDP timeout (min, None for default of accelerate) / DDPのタイムアウト(分、Noneでaccelerateのデフォルト)", + ) + parser.add_argument( + "--ddp_gradient_as_bucket_view", + action="store_true", + help="enable gradient_as_bucket_view for DDP / DDPでgradient_as_bucket_viewを有効にする", + ) + parser.add_argument( + "--ddp_static_graph", + action="store_true", + help="enable static_graph for DDP / DDPでstatic_graphを有効にする", + ) + + parser.add_argument( + "--sample_every_n_steps", + type=int, + default=None, + help="generate sample images every N steps / 学習中のモデルで指定ステップごとにサンプル出力する", + ) + parser.add_argument( + "--sample_at_first", action="store_true", help="generate sample images before training / 学習前にサンプル出力する" + ) + parser.add_argument( + "--sample_every_n_epochs", + type=int, + default=None, + help="generate sample images every N epochs (overwrites n_steps) / 学習中のモデルで指定エポックごとにサンプル出力する(ステップ数指定を上書きします)", + ) + parser.add_argument( + "--sample_prompts", + type=str, + default=None, + help="file for prompts to generate sample images / 学習中モデルのサンプル出力用プロンプトのファイル", + ) + + # optimizer and lr scheduler settings + parser.add_argument( + "--optimizer_type", + type=str, + default="", + help="Optimizer to use / オプティマイザの種類: AdamW (default), AdamW8bit, AdaFactor. " + "Also, you can use any optimizer by specifying the full path to the class, like 'torch.optim.AdamW', 'bitsandbytes.optim.AdEMAMix8bit' or 'bitsandbytes.optim.PagedAdEMAMix8bit' etc. / ", + ) + parser.add_argument( + "--optimizer_args", + type=str, + default=None, + nargs="*", + help='additional arguments for optimizer (like "weight_decay=0.01 betas=0.9,0.999 ...") / オプティマイザの追加引数(例: "weight_decay=0.01 betas=0.9,0.999 ...")', + ) + parser.add_argument("--learning_rate", type=float, default=2.0e-6, help="learning rate / 学習率") + parser.add_argument( + "--max_grad_norm", + default=1.0, + type=float, + help="Max gradient norm, 0 for no clipping / 勾配正規化の最大norm、0でclippingを行わない", + ) + + parser.add_argument( + "--lr_scheduler", + type=str, + default="constant", + help="scheduler to use for learning rate / 学習率のスケジューラ: linear, cosine, cosine_with_restarts, polynomial, constant (default), constant_with_warmup, adafactor", + ) + parser.add_argument( + "--lr_warmup_steps", + type=int_or_float, + default=0, + help="Int number of steps for the warmup in the lr scheduler (default is 0) or float with ratio of train steps" + " / 学習率のスケジューラをウォームアップするステップ数(デフォルト0)、または学習ステップの比率(1未満のfloat値の場合)", + ) + parser.add_argument( + "--lr_decay_steps", + type=int_or_float, + default=0, + help="Int number of steps for the decay in the lr scheduler (default is 0) or float (<1) with ratio of train steps" + " / 学習率のスケジューラを減衰させるステップ数(デフォルト0)、または学習ステップの比率(1未満のfloat値の場合)", + ) + parser.add_argument( + "--lr_scheduler_num_cycles", + type=int, + default=1, + help="Number of restarts for cosine scheduler with restarts / cosine with restartsスケジューラでのリスタート回数", + ) + parser.add_argument( + "--lr_scheduler_power", + type=float, + default=1, + help="Polynomial power for polynomial scheduler / polynomialスケジューラでのpolynomial power", + ) + parser.add_argument( + "--lr_scheduler_timescale", + type=int, + default=None, + help="Inverse sqrt timescale for inverse sqrt scheduler,defaults to `num_warmup_steps`" + + " / 逆平方根スケジューラのタイムスケール、デフォルトは`num_warmup_steps`", + ) + parser.add_argument( + "--lr_scheduler_min_lr_ratio", + type=float, + default=None, + help="The minimum learning rate as a ratio of the initial learning rate for cosine with min lr scheduler and warmup decay scheduler" + + " / 初期学習率の比率としての最小学習率を指定する、cosine with min lr と warmup decay スケジューラ で有効", + ) + parser.add_argument("--lr_scheduler_type", type=str, default="", help="custom scheduler module / 使用するスケジューラ") + parser.add_argument( + "--lr_scheduler_args", + type=str, + default=None, + nargs="*", + help='additional arguments for scheduler (like "T_max=100") / スケジューラの追加引数(例: "T_max100")', + ) + + parser.add_argument("--fp8_base", action="store_true", help="use fp8 for base model / base modelにfp8を使う") + # parser.add_argument("--full_fp16", action="store_true", help="fp16 training including gradients / 勾配も含めてfp16で学習する") + # parser.add_argument("--full_bf16", action="store_true", help="bf16 training including gradients / 勾配も含めてbf16で学習する") + + parser.add_argument( + "--dynamo_backend", + type=str, + default="NO", + choices=[e.value for e in DynamoBackend], + help="dynamo backend type (default is None) / dynamoのbackendの種類(デフォルトは None)", + ) + + parser.add_argument( + "--dynamo_mode", + type=str, + default=None, + choices=["default", "reduce-overhead", "max-autotune"], + help="dynamo mode (default is default) / dynamoのモード(デフォルトは default)", + ) + + parser.add_argument( + "--dynamo_fullgraph", + action="store_true", + help="use fullgraph mode for dynamo / dynamoのfullgraphモードを使う", + ) + + parser.add_argument( + "--dynamo_dynamic", + action="store_true", + help="use dynamic mode for dynamo / dynamoのdynamicモードを使う", + ) + + parser.add_argument( + "--blocks_to_swap", + type=int, + default=None, + help="number of blocks to swap in the model, max XXX / モデル内のブロックの数、最大XXX", + ) + parser.add_argument( + "--img_in_txt_in_offloading", + action="store_true", + help="offload img_in and txt_in to cpu / img_inとtxt_inをCPUにオフロードする", + ) + + # parser.add_argument("--flow_shift", type=float, default=7.0, help="Shift factor for flow matching schedulers") + parser.add_argument( + "--guidance_scale", type=float, default=1.0, help="Embeded classifier free guidance scale (HunyuanVideo only)." + ) + parser.add_argument( + "--timestep_sampling", + choices=["sigma", "uniform", "sigmoid", "shift"], + default="sigma", + help="Method to sample timesteps: sigma-based, uniform random, sigmoid of random normal and shift of sigmoid." + " / タイムステップをサンプリングする方法:sigma、random uniform、random normalのsigmoid、sigmoidのシフト。", + ) + parser.add_argument( + "--discrete_flow_shift", + type=float, + default=1.0, + help="Discrete flow shift for the Euler Discrete Scheduler, default is 1.0. / Euler Discrete Schedulerの離散フローシフト、デフォルトは1.0。", + ) + parser.add_argument( + "--sigmoid_scale", + type=float, + default=1.0, + help='Scale factor for sigmoid timestep sampling (only used when timestep-sampling is "sigmoid" or "shift"). / sigmoidタイムステップサンプリングの倍率(timestep-samplingが"sigmoid"または"shift"の場合のみ有効)。', + ) + parser.add_argument( + "--weighting_scheme", + type=str, + default="none", + choices=["logit_normal", "mode", "cosmap", "sigma_sqrt", "none"], + help="weighting scheme for timestep distribution. Default is none" + " / タイムステップ分布の重み付けスキーム、デフォルトはnone", + ) + parser.add_argument( + "--logit_mean", + type=float, + default=0.0, + help="mean to use when using the `'logit_normal'` weighting scheme / `'logit_normal'`重み付けスキームを使用する場合の平均", + ) + parser.add_argument( + "--logit_std", + type=float, + default=1.0, + help="std to use when using the `'logit_normal'` weighting scheme / `'logit_normal'`重み付けスキームを使用する場合のstd", + ) + parser.add_argument( + "--mode_scale", + type=float, + default=1.29, + help="Scale of mode weighting scheme. Only effective when using the `'mode'` as the `weighting_scheme` / モード重み付けスキームのスケール", + ) + parser.add_argument( + "--min_timestep", + type=int, + default=None, + help="set minimum time step for training (0~999, default is 0) / 学習時のtime stepの最小値を設定する(0~999で指定、省略時はデフォルト値(0)) ", + ) + parser.add_argument( + "--max_timestep", + type=int, + default=None, + help="set maximum time step for training (1~1000, default is 1000) / 学習時のtime stepの最大値を設定する(1~1000で指定、省略時はデフォルト値(1000))", + ) + + parser.add_argument( + "--show_timesteps", + type=str, + default=None, + choices=["image", "console"], + help="show timesteps in image or console, and return to console / タイムステップを画像またはコンソールに表示し、コンソールに戻る", + ) + + # network settings + parser.add_argument( + "--no_metadata", action="store_true", help="do not save metadata in output model / メタデータを出力先モデルに保存しない" + ) + parser.add_argument( + "--network_weights", type=str, default=None, help="pretrained weights for network / 学習するネットワークの初期重み" + ) + parser.add_argument( + "--network_module", type=str, default=None, help="network module to train / 学習対象のネットワークのモジュール" + ) + parser.add_argument( + "--network_dim", + type=int, + default=None, + help="network dimensions (depends on each network) / モジュールの次元数(ネットワークにより定義は異なります)", + ) + parser.add_argument( + "--network_alpha", + type=float, + default=1, + help="alpha for LoRA weight scaling, default 1 (same as network_dim for same behavior as old version) / LoRaの重み調整のalpha値、デフォルト1(旧バージョンと同じ動作をするにはnetwork_dimと同じ値を指定)", + ) + parser.add_argument( + "--network_dropout", + type=float, + default=None, + help="Drops neurons out of training every step (0 or None is default behavior (no dropout), 1 would drop all neurons) / 訓練時に毎ステップでニューロンをdropする(0またはNoneはdropoutなし、1は全ニューロンをdropout)", + ) + parser.add_argument( + "--network_args", + type=str, + default=None, + nargs="*", + help="additional arguments for network (key=value) / ネットワークへの追加の引数", + ) + parser.add_argument( + "--training_comment", + type=str, + default=None, + help="arbitrary comment string stored in metadata / メタデータに記録する任意のコメント文字列", + ) + parser.add_argument( + "--dim_from_weights", + action="store_true", + help="automatically determine dim (rank) from network_weights / dim (rank)をnetwork_weightsで指定した重みから自動で決定する", + ) + parser.add_argument( + "--scale_weight_norms", + type=float, + default=None, + help="Scale the weight of each key pair to help prevent overtraing via exploding gradients. (1 is a good starting point) / 重みの値をスケーリングして勾配爆発を防ぐ(1が初期値としては適当)", + ) + parser.add_argument( + "--base_weights", + type=str, + default=None, + nargs="*", + help="network weights to merge into the model before training / 学習前にあらかじめモデルにマージするnetworkの重みファイル", + ) + parser.add_argument( + "--base_weights_multiplier", + type=float, + default=None, + nargs="*", + help="multiplier for network weights to merge into the model before training / 学習前にあらかじめモデルにマージするnetworkの重みの倍率", + ) + + # save and load settings + parser.add_argument( + "--output_dir", type=str, default=None, help="directory to output trained model / 学習後のモデル出力先ディレクトリ" + ) + parser.add_argument( + "--output_name", + type=str, + default=None, + help="base name of trained model file / 学習後のモデルの拡張子を除くファイル名", + ) + parser.add_argument("--resume", type=str, default=None, help="saved state to resume training / 学習再開するモデルのstate") + + parser.add_argument( + "--save_every_n_epochs", + type=int, + default=None, + help="save checkpoint every N epochs / 学習中のモデルを指定エポックごとに保存する", + ) + parser.add_argument( + "--save_every_n_steps", + type=int, + default=None, + help="save checkpoint every N steps / 学習中のモデルを指定ステップごとに保存する", + ) + parser.add_argument( + "--save_last_n_epochs", + type=int, + default=None, + help="save last N checkpoints when saving every N epochs (remove older checkpoints) / 指定エポックごとにモデルを保存するとき最大Nエポック保存する(古いチェックポイントは削除する)", + ) + parser.add_argument( + "--save_last_n_epochs_state", + type=int, + default=None, + help="save last N checkpoints of state (overrides the value of --save_last_n_epochs)/ 最大Nエポックstateを保存する(--save_last_n_epochsの指定を上書きする)", + ) + parser.add_argument( + "--save_last_n_steps", + type=int, + default=None, + help="save checkpoints until N steps elapsed (remove older checkpoints if N steps elapsed) / 指定ステップごとにモデルを保存するとき、このステップ数経過するまで保存する(このステップ数経過したら削除する)", + ) + parser.add_argument( + "--save_last_n_steps_state", + type=int, + default=None, + help="save states until N steps elapsed (remove older states if N steps elapsed, overrides --save_last_n_steps) / 指定ステップごとにstateを保存するとき、このステップ数経過するまで保存する(このステップ数経過したら削除する。--save_last_n_stepsを上書きする)", + ) + parser.add_argument( + "--save_state", + action="store_true", + help="save training state additionally (including optimizer states etc.) when saving model / optimizerなど学習状態も含めたstateをモデル保存時に追加で保存する", + ) + parser.add_argument( + "--save_state_on_train_end", + action="store_true", + help="save training state (including optimizer states etc.) on train end even if --save_state is not specified" + " / --save_stateが未指定時にもoptimizerなど学習状態も含めたstateを学習終了時に保存する", + ) + + # SAI Model spec + parser.add_argument( + "--metadata_title", + type=str, + default=None, + help="title for model metadata (default is output_name) / メタデータに書き込まれるモデルタイトル、省略時はoutput_name", + ) + parser.add_argument( + "--metadata_author", + type=str, + default=None, + help="author name for model metadata / メタデータに書き込まれるモデル作者名", + ) + parser.add_argument( + "--metadata_description", + type=str, + default=None, + help="description for model metadata / メタデータに書き込まれるモデル説明", + ) + parser.add_argument( + "--metadata_license", + type=str, + default=None, + help="license for model metadata / メタデータに書き込まれるモデルライセンス", + ) + parser.add_argument( + "--metadata_tags", + type=str, + default=None, + help="tags for model metadata, separated by comma / メタデータに書き込まれるモデルタグ、カンマ区切り", + ) + + # huggingface settings + parser.add_argument( + "--huggingface_repo_id", + type=str, + default=None, + help="huggingface repo name to upload / huggingfaceにアップロードするリポジトリ名", + ) + parser.add_argument( + "--huggingface_repo_type", + type=str, + default=None, + help="huggingface repo type to upload / huggingfaceにアップロードするリポジトリの種類", + ) + parser.add_argument( + "--huggingface_path_in_repo", + type=str, + default=None, + help="huggingface model path to upload files / huggingfaceにアップロードするファイルのパス", + ) + parser.add_argument("--huggingface_token", type=str, default=None, help="huggingface token / huggingfaceのトークン") + parser.add_argument( + "--huggingface_repo_visibility", + type=str, + default=None, + help="huggingface repository visibility ('public' for public, 'private' or None for private) / huggingfaceにアップロードするリポジトリの公開設定('public'で公開、'private'またはNoneで非公開)", + ) + parser.add_argument( + "--save_state_to_huggingface", action="store_true", help="save state to huggingface / huggingfaceにstateを保存する" + ) + parser.add_argument( + "--resume_from_huggingface", + action="store_true", + help="resume from huggingface (ex: --resume {repo_id}/{path_in_repo}:{revision}:{repo_type}) / huggingfaceから学習を再開する(例: --resume {repo_id}/{path_in_repo}:{revision}:{repo_type})", + ) + parser.add_argument( + "--async_upload", + action="store_true", + help="upload to huggingface asynchronously / huggingfaceに非同期でアップロードする", + ) + + parser.add_argument("--dit", type=str, help="DiT checkpoint path / DiTのチェックポイントのパス") + parser.add_argument("--vae", type=str, help="VAE checkpoint path / VAEのチェックポイントのパス") + parser.add_argument("--vae_dtype", type=str, default=None, help="data type for VAE, default is float16") + + return parser + + +def read_config_from_file(args: argparse.Namespace, parser: argparse.ArgumentParser): + if not args.config_file: + return args + + config_path = args.config_file + ".toml" if not args.config_file.endswith(".toml") else args.config_file + + if not os.path.exists(config_path): + logger.info(f"{config_path} not found.") + exit(1) + + logger.info(f"Loading settings from {config_path}...") + with open(config_path, "r", encoding="utf-8") as f: + config_dict = toml.load(f) + + # combine all sections into one + ignore_nesting_dict = {} + for section_name, section_dict in config_dict.items(): + # if value is not dict, save key and value as is + if not isinstance(section_dict, dict): + ignore_nesting_dict[section_name] = section_dict + continue + + # if value is dict, save all key and value into one dict + for key, value in section_dict.items(): + ignore_nesting_dict[key] = value + + config_args = argparse.Namespace(**ignore_nesting_dict) + args = parser.parse_args(namespace=config_args) + args.config_file = os.path.splitext(args.config_file)[0] + logger.info(args.config_file) + + return args + + +def hv_setup_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: + """HunyuanVideo specific parser setup""" + # model settings + parser.add_argument("--dit_dtype", type=str, default=None, help="data type for DiT, default is bfloat16") + parser.add_argument("--dit_in_channels", type=int, default=16, help="input channels for DiT, default is 16, skyreels I2V is 32") + parser.add_argument("--fp8_llm", action="store_true", help="use fp8 for LLM / LLMにfp8を使う") + parser.add_argument("--text_encoder1", type=str, help="Text Encoder 1 directory / テキストエンコーダ1のディレクトリ") + parser.add_argument("--text_encoder2", type=str, help="Text Encoder 2 directory / テキストエンコーダ2のディレクトリ") + parser.add_argument("--text_encoder_dtype", type=str, default=None, help="data type for Text Encoder, default is float16") + parser.add_argument( + "--vae_tiling", + action="store_true", + help="enable spatial tiling for VAE, default is False. If vae_spatial_tile_sample_min_size is set, this is automatically enabled." + " / VAEの空間タイリングを有効にする、デフォルトはFalse。vae_spatial_tile_sample_min_sizeが設定されている場合、自動的に有効になります。", + ) + parser.add_argument("--vae_chunk_size", type=int, default=None, help="chunk size for CausalConv3d in VAE") + parser.add_argument( + "--vae_spatial_tile_sample_min_size", type=int, default=None, help="spatial tile sample min size for VAE, default 256" + ) + return parser + + +def main(): + parser = setup_parser_common() + parser = hv_setup_parser(parser) + + args = parser.parse_args() + args = read_config_from_file(args, parser) + + args.fp8_scaled = False # HunyuanVideo does not support this yet + + trainer = NetworkTrainer() + trainer.train(args) + + +if __name__ == "__main__": + main() diff --git a/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/lora_post_hoc_ema.py b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/lora_post_hoc_ema.py new file mode 100644 index 0000000000000000000000000000000000000000..1b0610ffbff48845e19590ce3b8110b98b1d3ef9 --- /dev/null +++ b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/lora_post_hoc_ema.py @@ -0,0 +1,141 @@ +# merge LoRA weights with Post-Hoc EMA method +# 1. Sort the files for the specified path by modification time +# 2. Load the oldest file and initialize weights +# 3. Iterate through the remaining files, loading and merging their weights with decay rate beta +# 4. Save the final merged weights to a new file. The metadata is updated to reflect the new file + +import os +from typing import Optional +import numpy as np +import torch +from safetensors.torch import save_file +from musubi_tuner.utils import model_utils +from musubi_tuner.utils.safetensors_utils import MemoryEfficientSafeOpen + + +def sigma_rel_to_gamma(sigma_rel): + """Implementation of Algorithm 2 from the paper: https://arxiv.org/pdf/2312.02696""" + # solve the cubic equation γ^3 + 7γ^2 + (16 - 1/σ_rel^2)γ + (12 - 1/σ_rel^2) = 0 + t = sigma_rel**-2 + # coefficients [1, 7, 16-t, 12-t] + coeffs = [1, 7, 16 - t, 12 - t] + # positive real root is γ + roots = np.roots(coeffs) + gamma = roots[np.isreal(roots) & (roots.real >= 0)].real.max() + return gamma + + +def merge_lora_weights_with_post_hoc_ema( + path: list[str], no_sort: bool, beta1: float, beta2: float, sigma_rel: Optional[float], output_file: str +): + # Sort the files by modification time + if not no_sort: + print("Sorting files by modification time...") + path.sort(key=lambda x: os.path.getmtime(x)) + + # Load metadata from the last file + print(f"Loading metadata from {path[-1]}") + with MemoryEfficientSafeOpen(path[-1]) as f: + metadata = f.metadata() + if metadata is None: + print("No metadata found in the last file, proceeding without metadata.") + else: + print("Metadata found, using metadata from the last file.") + + # Load the oldest file and initialize weights + print(f"Loading weights from {path[0]}") + with MemoryEfficientSafeOpen(path[0]) as f: + original_dtypes = {} + state_dict = {} + for key in f.keys(): + value: torch.Tensor = f.get_tensor(key) + + if value.dtype.is_floating_point: + original_dtypes[key] = value.dtype + value = value.to(torch.float32) # Convert to float32 for merging + else: + print(f"Skipping non-floating point tensor: {key}") + + state_dict[key] = value + + # Iterate through the remaining files, loading and merging their weights with decay rate beta + ema_count = len(path) - 1 + if sigma_rel is not None: + gamma = sigma_rel_to_gamma(sigma_rel) + else: + gamma = None + + for i, file in enumerate(path[1:]): + if sigma_rel is not None: + # Calculate beta using Power Function EMA + t = i + 1 + beta = (1 - 1 / t) ** (gamma + 1) + else: + beta = beta1 + (beta2 - beta1) * (i / (ema_count - 1)) if ema_count > 1 else beta1 + + print(f"Loading weights from {file} for merging with beta={beta:.4f}") + with MemoryEfficientSafeOpen(file) as f: + for key in f.keys(): + value = f.get_tensor(key) + if key.endswith(".alpha"): + # compare alpha tensors and raise an error if they differ + if key not in state_dict or torch.allclose(state_dict[key], value.to(torch.float32)): + # If alpha tensors match, skip merging + continue + else: + raise ValueError(f"Alpha tensors for key {key} do not match across files.") + + if not value.dtype.is_floating_point: + # Skip non-floating point tensors + print(f"Skipping non-floating point tensor: {key}") + continue + + if key in state_dict: + # Merge the weights with decay rate beta + value = value.to(torch.float32) + state_dict[key] = state_dict[key] * beta + value * (1 - beta) + else: + raise KeyError(f"Key {key} not found in the initial state_dict.") + + # Convert the merged weights back to their original dtypes + for key in state_dict: + if key in original_dtypes: + state_dict[key] = state_dict[key].to(original_dtypes[key]) + + # update metadata with new hash + if metadata is not None: + print("Updating metadata with new hashes.") + model_hash, legacy_hash = model_utils.precalculate_safetensors_hashes(state_dict, metadata) + metadata["sshs_model_hash"] = model_hash + metadata["sshs_legacy_hash"] = legacy_hash + + # Save the final merged weights to a new file + print(f"Saving merged weights to {output_file}") + save_file(state_dict, output_file, metadata=metadata) + print("Merging completed successfully.") + + +def main(): + import argparse + + parser = argparse.ArgumentParser(description="Merge LoRA weights with Post-Hoc EMA method.") + parser.add_argument("path", nargs="+", help="List of paths to the LoRA weight files.") + parser.add_argument("--no_sort", action="store_true", help="Do not sort the files by modification time.") + parser.add_argument("--beta", type=float, default=0.95, help="Decay rate for merging weights.") + parser.add_argument("--beta2", type=float, default=None, help="Decay rate for merging weights for linear interpolation.") + parser.add_argument( + "--sigma_rel", + type=float, + default=None, + help="Relative sigma for Power Function EMA, default is None (linear interpolation).", + ) + parser.add_argument("--output_file", type=str, required=True, help="Output file path for merged weights.") + + args = parser.parse_args() + + beta2 = args.beta if args.beta2 is None else args.beta2 + merge_lora_weights_with_post_hoc_ema(args.path, args.no_sort, args.beta, beta2, args.sigma_rel, args.output_file) + + +if __name__ == "__main__": + main() diff --git a/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/merge_lora.py b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/merge_lora.py new file mode 100644 index 0000000000000000000000000000000000000000..10d38db8f69966e0137c73936d6bc46f3962feed --- /dev/null +++ b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/merge_lora.py @@ -0,0 +1,63 @@ +import argparse +import logging +import torch +from safetensors.torch import load_file +from musubi_tuner.networks import lora +from musubi_tuner.utils.safetensors_utils import mem_eff_save_file +from musubi_tuner.hunyuan_model.models import load_transformer + +logger = logging.getLogger(__name__) +logging.basicConfig(level=logging.INFO) + + +def parse_args(): + parser = argparse.ArgumentParser(description="HunyuanVideo model merger script") + + parser.add_argument("--dit", type=str, required=True, help="DiT checkpoint path or directory") + parser.add_argument("--dit_in_channels", type=int, default=16, help="input channels for DiT, default is 16, skyreels I2V is 32") + parser.add_argument("--lora_weight", type=str, nargs="*", required=False, default=None, help="LoRA weight path") + parser.add_argument("--lora_multiplier", type=float, nargs="*", default=[1.0], help="LoRA multiplier (can specify multiple values)") + parser.add_argument("--save_merged_model", type=str, required=True, help="Path to save the merged model") + parser.add_argument("--device", type=str, default="cuda" if torch.cuda.is_available() else "cpu", help="Device to use for merging") + + return parser.parse_args() + + +def main(): + args = parse_args() + + device = torch.device(args.device) + logger.info(f"Using device: {device}") + + # Load DiT model + logger.info(f"Loading DiT model from {args.dit}") + transformer = load_transformer(args.dit, "torch", False, "cpu", torch.bfloat16, in_channels=args.dit_in_channels) + transformer.eval() + + # Load LoRA weights and merge + if args.lora_weight is not None and len(args.lora_weight) > 0: + for i, lora_weight in enumerate(args.lora_weight): + # Use the corresponding lora_multiplier or default to 1.0 + if args.lora_multiplier is not None and len(args.lora_multiplier) > i: + lora_multiplier = args.lora_multiplier[i] + else: + lora_multiplier = 1.0 + + logger.info(f"Loading LoRA weights from {lora_weight} with multiplier {lora_multiplier}") + weights_sd = load_file(lora_weight) + network = lora.create_arch_network_from_weights( + lora_multiplier, weights_sd, unet=transformer, for_inference=True + ) + logger.info("Merging LoRA weights to DiT model") + network.merge_to(None, transformer, weights_sd, device=device, non_blocking=True) + + logger.info("LoRA weights loaded") + + # Save the merged model + logger.info(f"Saving merged model to {args.save_merged_model}") + mem_eff_save_file(transformer.state_dict(), args.save_merged_model) + logger.info("Merged model saved") + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/modules/__init__.py b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/modules/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/modules/__pycache__/__init__.cpython-312.pyc b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/modules/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..559b42115d2915d43f6fcbace2bdbbd164e6c1c0 Binary files /dev/null and b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/modules/__pycache__/__init__.cpython-312.pyc differ diff --git a/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/modules/__pycache__/custom_offloading_utils.cpython-312.pyc b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/modules/__pycache__/custom_offloading_utils.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a38262b411ca442c7161447995c866177dbadd63 Binary files /dev/null and b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/modules/__pycache__/custom_offloading_utils.cpython-312.pyc differ diff --git a/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/modules/__pycache__/fp8_optimization_utils.cpython-312.pyc b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/modules/__pycache__/fp8_optimization_utils.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..67c44b5a8dd87cc7d9caba050883a79676fa8db8 Binary files /dev/null and b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/modules/__pycache__/fp8_optimization_utils.cpython-312.pyc differ diff --git a/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/modules/__pycache__/scheduling_flow_match_discrete.cpython-312.pyc b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/modules/__pycache__/scheduling_flow_match_discrete.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f263acf3489c7aa0b077a545e1870d01a8fc59d7 Binary files /dev/null and b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/modules/__pycache__/scheduling_flow_match_discrete.cpython-312.pyc differ diff --git a/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/modules/__pycache__/unet_causal_3d_blocks.cpython-312.pyc b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/modules/__pycache__/unet_causal_3d_blocks.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eb6994a2115d079f0ba3b5e6fee189bbf9d0d0c8 Binary files /dev/null and b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/modules/__pycache__/unet_causal_3d_blocks.cpython-312.pyc differ diff --git a/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/modules/custom_offloading_utils.py b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/modules/custom_offloading_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..d813575af2ce4fcccf4a305c1002bf618844e591 --- /dev/null +++ b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/modules/custom_offloading_utils.py @@ -0,0 +1,266 @@ +from concurrent.futures import ThreadPoolExecutor +import gc +import time +from typing import Optional +import torch +import torch.nn as nn + + +def clean_memory_on_device(device: torch.device): + r""" + Clean memory on the specified device, will be called from training scripts. + """ + gc.collect() + + # device may "cuda" or "cuda:0", so we need to check the type of device + if device.type == "cuda": + torch.cuda.empty_cache() + if device.type == "xpu": + torch.xpu.empty_cache() + if device.type == "mps": + torch.mps.empty_cache() + + +def synchronize_device(device: torch.device): + if device.type == "cuda": + torch.cuda.synchronize() + elif device.type == "xpu": + torch.xpu.synchronize() + elif device.type == "mps": + torch.mps.synchronize() + + +def swap_weight_devices_cuda(device: torch.device, layer_to_cpu: nn.Module, layer_to_cuda: nn.Module): + assert layer_to_cpu.__class__ == layer_to_cuda.__class__ + + weight_swap_jobs = [] + + # This is not working for all cases (e.g. SD3), so we need to find the corresponding modules + # for module_to_cpu, module_to_cuda in zip(layer_to_cpu.modules(), layer_to_cuda.modules()): + # print(module_to_cpu.__class__, module_to_cuda.__class__) + # if hasattr(module_to_cpu, "weight") and module_to_cpu.weight is not None: + # weight_swap_jobs.append((module_to_cpu, module_to_cuda, module_to_cpu.weight.data, module_to_cuda.weight.data)) + + modules_to_cpu = {k: v for k, v in layer_to_cpu.named_modules()} + for module_to_cuda_name, module_to_cuda in layer_to_cuda.named_modules(): + if hasattr(module_to_cuda, "weight") and module_to_cuda.weight is not None: + module_to_cpu = modules_to_cpu.get(module_to_cuda_name, None) + if module_to_cpu is not None and module_to_cpu.weight.shape == module_to_cuda.weight.shape: + weight_swap_jobs.append((module_to_cpu, module_to_cuda, module_to_cpu.weight.data, module_to_cuda.weight.data)) + else: + if module_to_cuda.weight.data.device.type != device.type: + # print( + # f"Module {module_to_cuda_name} not found in CPU model or shape mismatch, so not swapping and moving to device" + # ) + module_to_cuda.weight.data = module_to_cuda.weight.data.to(device) + + torch.cuda.current_stream().synchronize() # this prevents the illegal loss value + + stream = torch.cuda.Stream() + with torch.cuda.stream(stream): + # cuda to cpu + for module_to_cpu, module_to_cuda, cuda_data_view, cpu_data_view in weight_swap_jobs: + cuda_data_view.record_stream(stream) + module_to_cpu.weight.data = cuda_data_view.data.to("cpu", non_blocking=True) + + stream.synchronize() + + # cpu to cuda + for module_to_cpu, module_to_cuda, cuda_data_view, cpu_data_view in weight_swap_jobs: + cuda_data_view.copy_(module_to_cuda.weight.data, non_blocking=True) + module_to_cuda.weight.data = cuda_data_view + + stream.synchronize() + torch.cuda.current_stream().synchronize() # this prevents the illegal loss value + + +def swap_weight_devices_no_cuda(device: torch.device, layer_to_cpu: nn.Module, layer_to_cuda: nn.Module): + """ + not tested + """ + assert layer_to_cpu.__class__ == layer_to_cuda.__class__ + + weight_swap_jobs = [] + for module_to_cpu, module_to_cuda in zip(layer_to_cpu.modules(), layer_to_cuda.modules()): + if hasattr(module_to_cpu, "weight") and module_to_cpu.weight is not None: + weight_swap_jobs.append((module_to_cpu, module_to_cuda, module_to_cpu.weight.data, module_to_cuda.weight.data)) + + # device to cpu + for module_to_cpu, module_to_cuda, cuda_data_view, cpu_data_view in weight_swap_jobs: + module_to_cpu.weight.data = cuda_data_view.data.to("cpu", non_blocking=True) + + synchronize_device() + + # cpu to device + for module_to_cpu, module_to_cuda, cuda_data_view, cpu_data_view in weight_swap_jobs: + cuda_data_view.copy_(module_to_cuda.weight.data, non_blocking=True) + module_to_cuda.weight.data = cuda_data_view + + synchronize_device() + + +def weighs_to_device(layer: nn.Module, device: torch.device): + for module in layer.modules(): + if hasattr(module, "weight") and module.weight is not None: + module.weight.data = module.weight.data.to(device, non_blocking=True) + + +class Offloader: + """ + common offloading class + """ + + def __init__(self, block_type: str, num_blocks: int, blocks_to_swap: int, device: torch.device, debug: bool = False): + self.block_type = block_type + self.num_blocks = num_blocks + self.blocks_to_swap = blocks_to_swap + self.device = device + self.debug = debug + + self.thread_pool = ThreadPoolExecutor(max_workers=1) + self.futures = {} + self.cuda_available = device.type == "cuda" + + def swap_weight_devices(self, block_to_cpu: nn.Module, block_to_cuda: nn.Module): + if self.cuda_available: + swap_weight_devices_cuda(self.device, block_to_cpu, block_to_cuda) + else: + swap_weight_devices_no_cuda(self.device, block_to_cpu, block_to_cuda) + + def _submit_move_blocks(self, blocks, block_idx_to_cpu, block_idx_to_cuda): + def move_blocks(bidx_to_cpu, block_to_cpu, bidx_to_cuda, block_to_cuda): + if self.debug: + start_time = time.perf_counter() + print( + f"[{self.block_type}] Move block {bidx_to_cpu} to CPU and block {bidx_to_cuda} to {'CUDA' if self.cuda_available else 'device'}" + ) + + self.swap_weight_devices(block_to_cpu, block_to_cuda) + + if self.debug: + print(f"[{self.block_type}] Moved blocks {bidx_to_cpu} and {bidx_to_cuda} in {time.perf_counter()-start_time:.2f}s") + return bidx_to_cpu, bidx_to_cuda # , event + + block_to_cpu = blocks[block_idx_to_cpu] + block_to_cuda = blocks[block_idx_to_cuda] + + self.futures[block_idx_to_cuda] = self.thread_pool.submit( + move_blocks, block_idx_to_cpu, block_to_cpu, block_idx_to_cuda, block_to_cuda + ) + + def _wait_blocks_move(self, block_idx): + if block_idx not in self.futures: + return + + if self.debug: + print(f"[{self.block_type}] Wait for block {block_idx}") + start_time = time.perf_counter() + + future = self.futures.pop(block_idx) + _, bidx_to_cuda = future.result() + + assert block_idx == bidx_to_cuda, f"Block index mismatch: {block_idx} != {bidx_to_cuda}" + + if self.debug: + print(f"[{self.block_type}] Waited for block {block_idx}: {time.perf_counter()-start_time:.2f}s") + + +class ModelOffloader(Offloader): + """ + supports forward offloading + """ + + def __init__( + self, + block_type: str, + blocks: list[nn.Module], + num_blocks: int, + blocks_to_swap: int, + supports_backward: bool, + device: torch.device, + debug: bool = False, + ): + super().__init__(block_type, num_blocks, blocks_to_swap, device, debug) + + self.supports_backward = supports_backward + self.forward_only = not supports_backward # forward only offloading: can be changed to True for inference + + if self.supports_backward: + # register backward hooks + self.remove_handles = [] + for i, block in enumerate(blocks): + hook = self.create_backward_hook(blocks, i) + if hook is not None: + handle = block.register_full_backward_hook(hook) + self.remove_handles.append(handle) + + def set_forward_only(self, forward_only: bool): + self.forward_only = forward_only + + def __del__(self): + if self.supports_backward: + for handle in self.remove_handles: + handle.remove() + + def create_backward_hook(self, blocks: list[nn.Module], block_index: int) -> Optional[callable]: + # -1 for 0-based index + num_blocks_propagated = self.num_blocks - block_index - 1 + swapping = num_blocks_propagated > 0 and num_blocks_propagated <= self.blocks_to_swap + waiting = block_index > 0 and block_index <= self.blocks_to_swap + + if not swapping and not waiting: + return None + + # create hook + block_idx_to_cpu = self.num_blocks - num_blocks_propagated + block_idx_to_cuda = self.blocks_to_swap - num_blocks_propagated + block_idx_to_wait = block_index - 1 + + def backward_hook(module, grad_input, grad_output): + if self.debug: + print(f"Backward hook for block {block_index}") + + if swapping: + self._submit_move_blocks(blocks, block_idx_to_cpu, block_idx_to_cuda) + if waiting: + self._wait_blocks_move(block_idx_to_wait) + return None + + return backward_hook + + def prepare_block_devices_before_forward(self, blocks: list[nn.Module]): + if self.blocks_to_swap is None or self.blocks_to_swap == 0: + return + + if self.debug: + print(f"[{self.block_type}] Prepare block devices before forward") + + for b in blocks[0 : self.num_blocks - self.blocks_to_swap]: + b.to(self.device) + weighs_to_device(b, self.device) # make sure weights are on device + + for b in blocks[self.num_blocks - self.blocks_to_swap :]: + b.to(self.device) # move block to device first + weighs_to_device(b, "cpu") # make sure weights are on cpu + + synchronize_device(self.device) + clean_memory_on_device(self.device) + + def wait_for_block(self, block_idx: int): + if self.blocks_to_swap is None or self.blocks_to_swap == 0: + return + self._wait_blocks_move(block_idx) + + def submit_move_blocks_forward(self, blocks: list[nn.Module], block_idx: int): + # check if blocks_to_swap is enabled + if self.blocks_to_swap is None or self.blocks_to_swap == 0: + return + + # if supports_backward and backward is enabled, we swap blocks more than blocks_to_swap in backward pass + if not self.forward_only and block_idx >= self.blocks_to_swap: + return + + block_idx_to_cpu = block_idx + block_idx_to_cuda = self.num_blocks - self.blocks_to_swap + block_idx + block_idx_to_cuda = block_idx_to_cuda % self.num_blocks # this works for forward-only offloading + self._submit_move_blocks(blocks, block_idx_to_cpu, block_idx_to_cuda) diff --git a/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/modules/fp8_optimization_utils.py b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/modules/fp8_optimization_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..33ba9e6beb1e2cd671f7106541435ac85373c2e9 --- /dev/null +++ b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/modules/fp8_optimization_utils.py @@ -0,0 +1,356 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + +import logging + +from tqdm import tqdm + +logger = logging.getLogger(__name__) +logging.basicConfig(level=logging.INFO) + +from musubi_tuner.utils.device_utils import clean_memory_on_device + + +def calculate_fp8_maxval(exp_bits=4, mantissa_bits=3, sign_bits=1): + """ + Calculate the maximum representable value in FP8 format. + Default is E4M3 format (4-bit exponent, 3-bit mantissa, 1-bit sign). + + Args: + exp_bits (int): Number of exponent bits + mantissa_bits (int): Number of mantissa bits + sign_bits (int): Number of sign bits (0 or 1) + + Returns: + float: Maximum value representable in FP8 format + """ + assert exp_bits + mantissa_bits + sign_bits == 8, "Total bits must be 8" + + # Calculate exponent bias + bias = 2 ** (exp_bits - 1) - 1 + + # Calculate maximum mantissa value + mantissa_max = 1.0 + for i in range(mantissa_bits - 1): + mantissa_max += 2 ** -(i + 1) + + # Calculate maximum value + max_value = mantissa_max * (2 ** (2**exp_bits - 1 - bias)) + + return max_value + + +def quantize_tensor_to_fp8(tensor, scale, exp_bits=4, mantissa_bits=3, sign_bits=1, max_value=None, min_value=None): + """ + Quantize a tensor to FP8 format. + + Args: + tensor (torch.Tensor): Tensor to quantize + scale (float or torch.Tensor): Scale factor + exp_bits (int): Number of exponent bits + mantissa_bits (int): Number of mantissa bits + sign_bits (int): Number of sign bits + + Returns: + tuple: (quantized_tensor, scale_factor) + """ + # Create scaled tensor + scaled_tensor = tensor / scale + + # Calculate FP8 parameters + bias = 2 ** (exp_bits - 1) - 1 + + if max_value is None: + # Calculate max and min values + max_value = calculate_fp8_maxval(exp_bits, mantissa_bits, sign_bits) + min_value = -max_value if sign_bits > 0 else 0.0 + + # Clamp tensor to range + clamped_tensor = torch.clamp(scaled_tensor, min_value, max_value) + + # Quantization process + abs_values = torch.abs(clamped_tensor) + nonzero_mask = abs_values > 0 + + # Calculate log scales (only for non-zero elements) + log_scales = torch.zeros_like(clamped_tensor) + if nonzero_mask.any(): + log_scales[nonzero_mask] = torch.floor(torch.log2(abs_values[nonzero_mask]) + bias).detach() + + # Limit log scales and calculate quantization factor + log_scales = torch.clamp(log_scales, min=1.0) + quant_factor = 2.0 ** (log_scales - mantissa_bits - bias) + + # Quantize and dequantize + quantized = torch.round(clamped_tensor / quant_factor) * quant_factor + + return quantized, scale + + +def optimize_state_dict_with_fp8( + state_dict, calc_device, target_layer_keys=None, exclude_layer_keys=None, exp_bits=4, mantissa_bits=3, move_to_device=False +): + """ + Optimize Linear layer weights in a model's state dict to FP8 format. + + Args: + state_dict (dict): State dict to optimize, replaced in-place + calc_device (str): Device to quantize tensors on + target_layer_keys (list, optional): Layer key patterns to target (None for all Linear layers) + exclude_layer_keys (list, optional): Layer key patterns to exclude + exp_bits (int): Number of exponent bits + mantissa_bits (int): Number of mantissa bits + move_to_device (bool): Move optimized tensors to the calculating device + + Returns: + dict: FP8 optimized state dict + """ + if exp_bits == 4 and mantissa_bits == 3: + fp8_dtype = torch.float8_e4m3fn + elif exp_bits == 5 and mantissa_bits == 2: + fp8_dtype = torch.float8_e5m2 + else: + raise ValueError(f"Unsupported FP8 format: E{exp_bits}M{mantissa_bits}") + + # Calculate FP8 max value + max_value = calculate_fp8_maxval(exp_bits, mantissa_bits) + min_value = -max_value # this function supports only signed FP8 + + # Create optimized state dict + optimized_count = 0 + + # Enumerate tarket keys + target_state_dict_keys = [] + for key in state_dict.keys(): + # Check if it's a weight key and matches target patterns + is_target = (target_layer_keys is None or any(pattern in key for pattern in target_layer_keys)) and key.endswith(".weight") + is_excluded = exclude_layer_keys is not None and any(pattern in key for pattern in exclude_layer_keys) + is_target = is_target and not is_excluded + + if is_target and isinstance(state_dict[key], torch.Tensor): + target_state_dict_keys.append(key) + + # Process each key + for key in tqdm(target_state_dict_keys): + value = state_dict[key] + + # Save original device and dtype + original_device = value.device + original_dtype = value.dtype + + # Move to calculation device + if calc_device is not None: + value = value.to(calc_device) + + # Calculate scale factor + scale = torch.max(torch.abs(value.flatten())) / max_value + # print(f"Optimizing {key} with scale: {scale}") + + # Quantize weight to FP8 + quantized_weight, _ = quantize_tensor_to_fp8(value, scale, exp_bits, mantissa_bits, 1, max_value, min_value) + + # Add to state dict using original key for weight and new key for scale + fp8_key = key # Maintain original key + scale_key = key.replace(".weight", ".scale_weight") + + quantized_weight = quantized_weight.to(fp8_dtype) + + if not move_to_device: + quantized_weight = quantized_weight.to(original_device) + + scale_tensor = torch.tensor([scale], dtype=original_dtype, device=quantized_weight.device) + + state_dict[fp8_key] = quantized_weight + state_dict[scale_key] = scale_tensor + + optimized_count += 1 + + if calc_device is not None: # optimized_count % 10 == 0 and + # free memory on calculation device + clean_memory_on_device(calc_device) + + logger.info(f"Number of optimized Linear layers: {optimized_count}") + return state_dict + + +def fp8_linear_forward_patch(self: nn.Linear, x, use_scaled_mm=False, max_value=None): + """ + Patched forward method for Linear layers with FP8 weights. + + Args: + self: Linear layer instance + x (torch.Tensor): Input tensor + use_scaled_mm (bool): Use scaled_mm for FP8 Linear layers, requires SM 8.9+ (RTX 40 series) + max_value (float): Maximum value for FP8 quantization. If None, no quantization is applied for input tensor. + + Returns: + torch.Tensor: Result of linear transformation + """ + if use_scaled_mm: + input_dtype = x.dtype + original_weight_dtype = self.scale_weight.dtype + weight_dtype = self.weight.dtype + target_dtype = torch.float8_e5m2 + assert weight_dtype == torch.float8_e4m3fn, "Only FP8 E4M3FN format is supported" + assert x.ndim == 3, "Input tensor must be 3D (batch_size, seq_len, hidden_dim)" + + if max_value is None: + # no input quantization + scale_x = torch.tensor(1.0, dtype=torch.float32, device=x.device) + else: + # calculate scale factor for input tensor + scale_x = (torch.max(torch.abs(x.flatten())) / max_value).to(torch.float32) + + # quantize input tensor to FP8: this seems to consume a lot of memory + x, _ = quantize_tensor_to_fp8(x, scale_x, 5, 2, 1, max_value, -max_value) + + original_shape = x.shape + x = x.reshape(-1, x.shape[2]).to(target_dtype) + + weight = self.weight.t() + scale_weight = self.scale_weight.to(torch.float32) + + if self.bias is not None: + # float32 is not supported with bias in scaled_mm + o = torch._scaled_mm(x, weight, out_dtype=original_weight_dtype, bias=self.bias, scale_a=scale_x, scale_b=scale_weight) + else: + o = torch._scaled_mm(x, weight, out_dtype=input_dtype, scale_a=scale_x, scale_b=scale_weight) + + return o.reshape(original_shape[0], original_shape[1], -1).to(input_dtype) + + else: + # Dequantize the weight + original_dtype = self.scale_weight.dtype + dequantized_weight = self.weight.to(original_dtype) * self.scale_weight + + # Perform linear transformation + if self.bias is not None: + output = F.linear(x, dequantized_weight, self.bias) + else: + output = F.linear(x, dequantized_weight) + + return output + + +def apply_fp8_monkey_patch(model, optimized_state_dict, use_scaled_mm=False): + """ + Apply monkey patching to a model using FP8 optimized state dict. + + Args: + model (nn.Module): Model instance to patch + optimized_state_dict (dict): FP8 optimized state dict + use_scaled_mm (bool): Use scaled_mm for FP8 Linear layers, requires SM 8.9+ (RTX 40 series) + + Returns: + nn.Module: The patched model (same instance, modified in-place) + """ + # # Calculate FP8 float8_e5m2 max value + # max_value = calculate_fp8_maxval(5, 2) + max_value = None # do not quantize input tensor + + # Find all scale keys to identify FP8-optimized layers + scale_keys = [k for k in optimized_state_dict.keys() if k.endswith(".scale_weight")] + + # Enumerate patched layers + patched_module_paths = set() + for scale_key in scale_keys: + # Extract module path from scale key (remove .scale_weight) + module_path = scale_key.rsplit(".scale_weight", 1)[0] + patched_module_paths.add(module_path) + + patched_count = 0 + + # Apply monkey patch to each layer with FP8 weights + for name, module in model.named_modules(): + # Check if this module has a corresponding scale_weight + has_scale = name in patched_module_paths + + # Apply patch if it's a Linear layer with FP8 scale + if isinstance(module, nn.Linear) and has_scale: + # register the scale_weight as a buffer to load the state_dict + module.register_buffer("scale_weight", torch.tensor(1.0, dtype=module.weight.dtype)) + + # Create a new forward method with the patched version. + def new_forward(self, x): + return fp8_linear_forward_patch(self, x, use_scaled_mm, max_value) + + # Bind method to module + module.forward = new_forward.__get__(module, type(module)) + + patched_count += 1 + + logger.info(f"Number of monkey-patched Linear layers: {patched_count}") + return model + + +# Example usage +def example_usage(): + # Small test model + class TestModel(nn.Module): + def __init__(self): + super().__init__() + fc1 = nn.Linear(768, 3072) + act1 = nn.GELU() + fc2 = nn.Linear(3072, 768) + act2 = nn.GELU() + fc3 = nn.Linear(768, 768) + + # Set layer names for testing + self.single_blocks = nn.ModuleList([fc1, act1, fc2, act2, fc3]) + + self.fc4 = nn.Linear(768, 128) + + def forward(self, x): + for layer in self.single_blocks: + x = layer(x) + x = self.fc4(x) + return x + + # Instantiate model + test_model = TestModel() + test_model.to(torch.float16) # convert to FP16 for testing + + # Test input tensor + test_input = torch.randn(1, 768, dtype=torch.float16) + + # Calculate output before optimization + with torch.no_grad(): + original_output = test_model(test_input) + print("original output", original_output[0, :5]) + + # Get state dict + state_dict = test_model.state_dict() + + # Apply FP8 optimization to state dict + cuda_device = torch.device("cuda") + optimized_state_dict = optimize_state_dict_with_fp8(state_dict, cuda_device, ["single_blocks"], ["2"]) + + # Apply monkey patching to the model + optimized_model = TestModel() # re-instantiate model + optimized_model.to(torch.float16) # convert to FP16 for testing + apply_fp8_monkey_patch(optimized_model, optimized_state_dict) + + # Load optimized state dict + optimized_model.load_state_dict(optimized_state_dict, strict=True, assign=True) # assign=True to load buffer + + # Calculate output after optimization + with torch.no_grad(): + optimized_output = optimized_model(test_input) + print("optimized output", optimized_output[0, :5]) + + # Compare accuracy + error = torch.mean(torch.abs(original_output - optimized_output)) + print(f"Mean absolute error: {error.item()}") + + # Check memory usage + original_params = sum(p.nelement() * p.element_size() for p in test_model.parameters()) / (1024 * 1024) + print(f"Model parameter memory: {original_params:.2f} MB") + optimized_params = sum(p.nelement() * p.element_size() for p in optimized_model.parameters()) / (1024 * 1024) + print(f"Optimized model parameter memory: {optimized_params:.2f} MB") + + return test_model + + +if __name__ == "__main__": + example_usage() diff --git a/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/modules/scheduling_flow_match_discrete.py b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/modules/scheduling_flow_match_discrete.py new file mode 100644 index 0000000000000000000000000000000000000000..c507ec4eb050463188e250c20aec8d1fde2c4a5d --- /dev/null +++ b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/modules/scheduling_flow_match_discrete.py @@ -0,0 +1,257 @@ +# Copyright 2024 Stability AI, Katherine Crowson and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +# +# Modified from diffusers==0.29.2 +# +# ============================================================================== + +from dataclasses import dataclass +from typing import Optional, Tuple, Union + +import numpy as np +import torch + +from diffusers.configuration_utils import ConfigMixin, register_to_config +from diffusers.utils import BaseOutput, logging +from diffusers.schedulers.scheduling_utils import SchedulerMixin + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +@dataclass +class FlowMatchDiscreteSchedulerOutput(BaseOutput): + """ + Output class for the scheduler's `step` function output. + + Args: + prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): + Computed sample `(x_{t-1})` of previous timestep. `prev_sample` should be used as next model input in the + denoising loop. + """ + + prev_sample: torch.FloatTensor + + +class FlowMatchDiscreteScheduler(SchedulerMixin, ConfigMixin): + """ + Euler scheduler. + + This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic + methods the library implements for all schedulers such as loading and saving. + + Args: + num_train_timesteps (`int`, defaults to 1000): + The number of diffusion steps to train the model. + timestep_spacing (`str`, defaults to `"linspace"`): + The way the timesteps should be scaled. Refer to Table 2 of the [Common Diffusion Noise Schedules and + Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) for more information. + shift (`float`, defaults to 1.0): + The shift value for the timestep schedule. + reverse (`bool`, defaults to `True`): + Whether to reverse the timestep schedule. + """ + + _compatibles = [] + order = 1 + + @register_to_config + def __init__( + self, + num_train_timesteps: int = 1000, + shift: float = 1.0, + reverse: bool = True, + solver: str = "euler", + n_tokens: Optional[int] = None, + ): + sigmas = torch.linspace(1, 0, num_train_timesteps + 1) + + if not reverse: + sigmas = sigmas.flip(0) + + self.sigmas = sigmas + # the value fed to model + self.timesteps = (sigmas[:-1] * num_train_timesteps).to(dtype=torch.float32) + + self._step_index = None + self._begin_index = None + + self.supported_solver = ["euler"] + if solver not in self.supported_solver: + raise ValueError( + f"Solver {solver} not supported. Supported solvers: {self.supported_solver}" + ) + + @property + def step_index(self): + """ + The index counter for current timestep. It will increase 1 after each scheduler step. + """ + return self._step_index + + @property + def begin_index(self): + """ + The index for the first timestep. It should be set from pipeline with `set_begin_index` method. + """ + return self._begin_index + + # Copied from diffusers.schedulers.scheduling_dpmsolver_multistep.DPMSolverMultistepScheduler.set_begin_index + def set_begin_index(self, begin_index: int = 0): + """ + Sets the begin index for the scheduler. This function should be run from pipeline before the inference. + + Args: + begin_index (`int`): + The begin index for the scheduler. + """ + self._begin_index = begin_index + + def _sigma_to_t(self, sigma): + return sigma * self.config.num_train_timesteps + + def set_timesteps( + self, + num_inference_steps: int, + device: Union[str, torch.device] = None, + n_tokens: int = None, + ): + """ + Sets the discrete timesteps used for the diffusion chain (to be run before inference). + + Args: + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + n_tokens (`int`, *optional*): + Number of tokens in the input sequence. + """ + self.num_inference_steps = num_inference_steps + + sigmas = torch.linspace(1, 0, num_inference_steps + 1) + sigmas = self.sd3_time_shift(sigmas) + + if not self.config.reverse: + sigmas = 1 - sigmas + + self.sigmas = sigmas + self.timesteps = (sigmas[:-1] * self.config.num_train_timesteps).to( + dtype=torch.float32, device=device + ) + + # Reset step index + self._step_index = None + + def index_for_timestep(self, timestep, schedule_timesteps=None): + if schedule_timesteps is None: + schedule_timesteps = self.timesteps + + indices = (schedule_timesteps == timestep).nonzero() + + # The sigma index that is taken for the **very** first `step` + # is always the second index (or the last index if there is only 1) + # This way we can ensure we don't accidentally skip a sigma in + # case we start in the middle of the denoising schedule (e.g. for image-to-image) + pos = 1 if len(indices) > 1 else 0 + + return indices[pos].item() + + def _init_step_index(self, timestep): + if self.begin_index is None: + if isinstance(timestep, torch.Tensor): + timestep = timestep.to(self.timesteps.device) + self._step_index = self.index_for_timestep(timestep) + else: + self._step_index = self._begin_index + + def scale_model_input( + self, sample: torch.Tensor, timestep: Optional[int] = None + ) -> torch.Tensor: + return sample + + def sd3_time_shift(self, t: torch.Tensor): + return (self.config.shift * t) / (1 + (self.config.shift - 1) * t) + + def step( + self, + model_output: torch.FloatTensor, + timestep: Union[float, torch.FloatTensor], + sample: torch.FloatTensor, + return_dict: bool = True, + ) -> Union[FlowMatchDiscreteSchedulerOutput, Tuple]: + """ + Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion + process from the learned model outputs (most often the predicted noise). + + Args: + model_output (`torch.FloatTensor`): + The direct output from learned diffusion model. + timestep (`float`): + The current discrete timestep in the diffusion chain. + sample (`torch.FloatTensor`): + A current instance of a sample created by the diffusion process. + generator (`torch.Generator`, *optional*): + A random number generator. + n_tokens (`int`, *optional*): + Number of tokens in the input sequence. + return_dict (`bool`): + Whether or not to return a [`~schedulers.scheduling_euler_discrete.EulerDiscreteSchedulerOutput`] or + tuple. + + Returns: + [`~schedulers.scheduling_euler_discrete.EulerDiscreteSchedulerOutput`] or `tuple`: + If return_dict is `True`, [`~schedulers.scheduling_euler_discrete.EulerDiscreteSchedulerOutput`] is + returned, otherwise a tuple is returned where the first element is the sample tensor. + """ + + if ( + isinstance(timestep, int) + or isinstance(timestep, torch.IntTensor) + or isinstance(timestep, torch.LongTensor) + ): + raise ValueError( + ( + "Passing integer indices (e.g. from `enumerate(timesteps)`) as timesteps to" + " `EulerDiscreteScheduler.step()` is not supported. Make sure to pass" + " one of the `scheduler.timesteps` as a timestep." + ), + ) + + if self.step_index is None: + self._init_step_index(timestep) + + # Upcast to avoid precision issues when computing prev_sample + sample = sample.to(torch.float32) + + dt = self.sigmas[self.step_index + 1] - self.sigmas[self.step_index] + + if self.config.solver == "euler": + prev_sample = sample + model_output.to(torch.float32) * dt + else: + raise ValueError( + f"Solver {self.config.solver} not supported. Supported solvers: {self.supported_solver}" + ) + + # upon completion increase step index by one + self._step_index += 1 + + if not return_dict: + return (prev_sample,) + + return FlowMatchDiscreteSchedulerOutput(prev_sample=prev_sample) + + def __len__(self): + return self.config.num_train_timesteps diff --git a/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/modules/unet_causal_3d_blocks.py b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/modules/unet_causal_3d_blocks.py new file mode 100644 index 0000000000000000000000000000000000000000..27d544170ece6a370cdacfe9e31367b884c2e516 --- /dev/null +++ b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/modules/unet_causal_3d_blocks.py @@ -0,0 +1,818 @@ +# Copyright 2024 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +# +# Modified from diffusers==0.29.2 +# +# ============================================================================== + +from typing import Optional, Tuple, Union + +import torch +import torch.nn.functional as F +from torch import nn +from einops import rearrange + +from diffusers.utils import logging +from diffusers.models.activations import get_activation +from diffusers.models.attention_processor import SpatialNorm +from diffusers.models.attention_processor import Attention +from diffusers.models.normalization import AdaGroupNorm +from diffusers.models.normalization import RMSNorm + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +def prepare_causal_attention_mask(n_frame: int, n_hw: int, dtype, device, batch_size: int = None): + seq_len = n_frame * n_hw + mask = torch.full((seq_len, seq_len), float("-inf"), dtype=dtype, device=device) + for i in range(seq_len): + i_frame = i // n_hw + mask[i, : (i_frame + 1) * n_hw] = 0 + if batch_size is not None: + mask = mask.unsqueeze(0).expand(batch_size, -1, -1) + return mask + + +class CausalConv3d(nn.Module): + """ + Implements a causal 3D convolution layer where each position only depends on previous timesteps and current spatial locations. + This maintains temporal causality in video generation tasks. + """ + + def __init__( + self, + chan_in, + chan_out, + kernel_size: Union[int, Tuple[int, int, int]], + stride: Union[int, Tuple[int, int, int]] = 1, + dilation: Union[int, Tuple[int, int, int]] = 1, + pad_mode="replicate", + chunk_size=0, + **kwargs, + ): + super().__init__() + + self.pad_mode = pad_mode + padding = (kernel_size // 2, kernel_size // 2, kernel_size // 2, kernel_size // 2, kernel_size - 1, 0) # W, H, T + self.time_causal_padding = padding + self.chunk_size = chunk_size + + self.conv = nn.Conv3d(chan_in, chan_out, kernel_size, stride=stride, dilation=dilation, **kwargs) + + def original_forward(self, x): + x = F.pad(x, self.time_causal_padding, mode=self.pad_mode) + return self.conv(x) + + def forward(self, x): + if self.chunk_size == 0: + return self.original_forward(x) + + # if not large, call original forward + if x.shape[4] < self.chunk_size * 1.5: + return self.original_forward(x) + + # # debug: verify the original forward is the same as chunked forward + # orig_forwarded_value = None + # if x.shape[4] < self.chunk_size * 4: + # orig_forwarded_value = self.original_forward(x) + + # get the kernel size + kernel_size = self.conv.kernel_size[0] # assume cubic kernel + assert kernel_size == self.conv.kernel_size[1] == self.conv.kernel_size[2], "Only cubic kernels are supported" + padding_size = kernel_size // 2 # 1 for kernel_size=3, 0 for kernel_size=1 + + x = F.pad(x, self.time_causal_padding, mode=self.pad_mode) + + B, C, D, H, W = orig_shape = x.shape + chunk_size = self.chunk_size + chunk_size -= chunk_size % self.conv.stride[2] # make sure the chunk size is divisible by stride + # print(f"chunked forward: {x.shape}, chunk_size: {chunk_size}") + + # calculate the indices for chunking with overlap and padding by kernel size and stride + indices = [] + i = 0 + while i < W - padding_size: + start_idx = i - padding_size + end_idx = min(i + chunk_size + padding_size, W) + if i == 0: + start_idx = 0 + end_idx += padding_size # to make sure the first chunk is divisible by stride + if W - end_idx < chunk_size // 2: # small chunk at the end + end_idx = W + indices.append((start_idx, end_idx)) + i = end_idx - padding_size + # print(f"chunked forward: {x.shape}, chunked indices: {indices}") + + chunks = [] + for start_idx, end_idx in indices: + chunk = x[:, :, :, :, start_idx:end_idx] + chunk_output = self.conv(chunk) + # print(chunk.shape, chunk_output.shape) + chunks.append(chunk_output) + + # concatenate the chunks + x = torch.cat(chunks, dim=4) + + assert ( + x.shape[2] == ((D - padding_size * 2) + self.conv.stride[0] - 1) // self.conv.stride[0] + ), f"Invalid shape: {x.shape}, {orig_shape}, {padding_size}, {self.conv.stride}" + assert ( + x.shape[3] == ((H - padding_size * 2) + self.conv.stride[1] - 1) // self.conv.stride[1] + ), f"Invalid shape: {x.shape}, {orig_shape}, {padding_size}, {self.conv.stride}" + assert ( + x.shape[4] == ((W - padding_size * 2) + self.conv.stride[2] - 1) // self.conv.stride[2] + ), f"Invalid shape: {x.shape}, {orig_shape}, {padding_size}, {self.conv.stride}" + + # # debug: verify the original forward is the same as chunked forward + # if orig_forwarded_value is not None: + # assert torch.allclose( + # orig_forwarded_value, x, rtol=1e-4, atol=1e-2 + # ), f"Chunked forward is different from original forward. {x.shape}, {orig_shape}, {padding_size}, {self.conv.stride}, {self.conv.kernel_size}" + + return x + + +class UpsampleCausal3D(nn.Module): + """ + A 3D upsampling layer with an optional convolution. + """ + + def __init__( + self, + channels: int, + use_conv: bool = False, + use_conv_transpose: bool = False, + out_channels: Optional[int] = None, + name: str = "conv", + kernel_size: Optional[int] = None, + padding=1, + norm_type=None, + eps=None, + elementwise_affine=None, + bias=True, + interpolate=True, + upsample_factor=(2, 2, 2), + ): + super().__init__() + self.channels = channels + self.out_channels = out_channels or channels + self.use_conv = use_conv + self.use_conv_transpose = use_conv_transpose + self.name = name + self.interpolate = interpolate + self.upsample_factor = upsample_factor + + if norm_type == "ln_norm": + self.norm = nn.LayerNorm(channels, eps, elementwise_affine) + elif norm_type == "rms_norm": + self.norm = RMSNorm(channels, eps, elementwise_affine) + elif norm_type is None: + self.norm = None + else: + raise ValueError(f"unknown norm_type: {norm_type}") + + conv = None + if use_conv_transpose: + raise NotImplementedError + elif use_conv: + if kernel_size is None: + kernel_size = 3 + conv = CausalConv3d(self.channels, self.out_channels, kernel_size=kernel_size, bias=bias) + + if name == "conv": + self.conv = conv + else: + self.Conv2d_0 = conv + + def forward( + self, + hidden_states: torch.FloatTensor, + output_size: Optional[int] = None, + scale: float = 1.0, + ) -> torch.FloatTensor: + assert hidden_states.shape[1] == self.channels + + if self.norm is not None: + raise NotImplementedError + + if self.use_conv_transpose: + return self.conv(hidden_states) + + # Cast to float32 to as 'upsample_nearest2d_out_frame' op does not support bfloat16 + dtype = hidden_states.dtype + if dtype == torch.bfloat16: + hidden_states = hidden_states.to(torch.float32) + + # upsample_nearest_nhwc fails with large batch sizes. see https://github.com/huggingface/diffusers/issues/984 + if hidden_states.shape[0] >= 64: + hidden_states = hidden_states.contiguous() + + # if `output_size` is passed we force the interpolation output + # size and do not make use of `scale_factor=2` + if self.interpolate: + B, C, T, H, W = hidden_states.shape + first_h, other_h = hidden_states.split((1, T - 1), dim=2) + if output_size is None: + if T > 1: + other_h = F.interpolate(other_h, scale_factor=self.upsample_factor, mode="nearest") + + first_h = first_h.squeeze(2) + first_h = F.interpolate(first_h, scale_factor=self.upsample_factor[1:], mode="nearest") + first_h = first_h.unsqueeze(2) + else: + raise NotImplementedError + + if T > 1: + hidden_states = torch.cat((first_h, other_h), dim=2) + else: + hidden_states = first_h + + # If the input is bfloat16, we cast back to bfloat16 + if dtype == torch.bfloat16: + hidden_states = hidden_states.to(dtype) + + if self.use_conv: + if self.name == "conv": + hidden_states = self.conv(hidden_states) + else: + hidden_states = self.Conv2d_0(hidden_states) + + return hidden_states + + +class DownsampleCausal3D(nn.Module): + """ + A 3D downsampling layer with an optional convolution. + """ + + def __init__( + self, + channels: int, + use_conv: bool = False, + out_channels: Optional[int] = None, + padding: int = 1, + name: str = "conv", + kernel_size=3, + norm_type=None, + eps=None, + elementwise_affine=None, + bias=True, + stride=2, + ): + super().__init__() + self.channels = channels + self.out_channels = out_channels or channels + self.use_conv = use_conv + self.padding = padding + stride = stride + self.name = name + + if norm_type == "ln_norm": + self.norm = nn.LayerNorm(channels, eps, elementwise_affine) + elif norm_type == "rms_norm": + self.norm = RMSNorm(channels, eps, elementwise_affine) + elif norm_type is None: + self.norm = None + else: + raise ValueError(f"unknown norm_type: {norm_type}") + + if use_conv: + conv = CausalConv3d(self.channels, self.out_channels, kernel_size=kernel_size, stride=stride, bias=bias) + else: + raise NotImplementedError + + if name == "conv": + self.Conv2d_0 = conv + self.conv = conv + elif name == "Conv2d_0": + self.conv = conv + else: + self.conv = conv + + def forward(self, hidden_states: torch.FloatTensor, scale: float = 1.0) -> torch.FloatTensor: + assert hidden_states.shape[1] == self.channels + + if self.norm is not None: + hidden_states = self.norm(hidden_states.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) + + assert hidden_states.shape[1] == self.channels + + hidden_states = self.conv(hidden_states) + + return hidden_states + + +class ResnetBlockCausal3D(nn.Module): + r""" + A Resnet block. + """ + + def __init__( + self, + *, + in_channels: int, + out_channels: Optional[int] = None, + conv_shortcut: bool = False, + dropout: float = 0.0, + temb_channels: int = 512, + groups: int = 32, + groups_out: Optional[int] = None, + pre_norm: bool = True, + eps: float = 1e-6, + non_linearity: str = "swish", + skip_time_act: bool = False, + # default, scale_shift, ada_group, spatial + time_embedding_norm: str = "default", + kernel: Optional[torch.FloatTensor] = None, + output_scale_factor: float = 1.0, + use_in_shortcut: Optional[bool] = None, + up: bool = False, + down: bool = False, + conv_shortcut_bias: bool = True, + conv_3d_out_channels: Optional[int] = None, + ): + super().__init__() + self.pre_norm = pre_norm + self.pre_norm = True + self.in_channels = in_channels + out_channels = in_channels if out_channels is None else out_channels + self.out_channels = out_channels + self.use_conv_shortcut = conv_shortcut + self.up = up + self.down = down + self.output_scale_factor = output_scale_factor + self.time_embedding_norm = time_embedding_norm + self.skip_time_act = skip_time_act + + linear_cls = nn.Linear + + if groups_out is None: + groups_out = groups + + if self.time_embedding_norm == "ada_group": + self.norm1 = AdaGroupNorm(temb_channels, in_channels, groups, eps=eps) + elif self.time_embedding_norm == "spatial": + self.norm1 = SpatialNorm(in_channels, temb_channels) + else: + self.norm1 = torch.nn.GroupNorm(num_groups=groups, num_channels=in_channels, eps=eps, affine=True) + + self.conv1 = CausalConv3d(in_channels, out_channels, kernel_size=3, stride=1) + + if temb_channels is not None: + if self.time_embedding_norm == "default": + self.time_emb_proj = linear_cls(temb_channels, out_channels) + elif self.time_embedding_norm == "scale_shift": + self.time_emb_proj = linear_cls(temb_channels, 2 * out_channels) + elif self.time_embedding_norm == "ada_group" or self.time_embedding_norm == "spatial": + self.time_emb_proj = None + else: + raise ValueError(f"Unknown time_embedding_norm : {self.time_embedding_norm} ") + else: + self.time_emb_proj = None + + if self.time_embedding_norm == "ada_group": + self.norm2 = AdaGroupNorm(temb_channels, out_channels, groups_out, eps=eps) + elif self.time_embedding_norm == "spatial": + self.norm2 = SpatialNorm(out_channels, temb_channels) + else: + self.norm2 = torch.nn.GroupNorm(num_groups=groups_out, num_channels=out_channels, eps=eps, affine=True) + + self.dropout = torch.nn.Dropout(dropout) + conv_3d_out_channels = conv_3d_out_channels or out_channels + self.conv2 = CausalConv3d(out_channels, conv_3d_out_channels, kernel_size=3, stride=1) + + self.nonlinearity = get_activation(non_linearity) + + self.upsample = self.downsample = None + if self.up: + self.upsample = UpsampleCausal3D(in_channels, use_conv=False) + elif self.down: + self.downsample = DownsampleCausal3D(in_channels, use_conv=False, name="op") + + self.use_in_shortcut = self.in_channels != conv_3d_out_channels if use_in_shortcut is None else use_in_shortcut + + self.conv_shortcut = None + if self.use_in_shortcut: + self.conv_shortcut = CausalConv3d( + in_channels, + conv_3d_out_channels, + kernel_size=1, + stride=1, + bias=conv_shortcut_bias, + ) + + def forward( + self, + input_tensor: torch.FloatTensor, + temb: torch.FloatTensor, + scale: float = 1.0, + ) -> torch.FloatTensor: + hidden_states = input_tensor + + if self.time_embedding_norm == "ada_group" or self.time_embedding_norm == "spatial": + hidden_states = self.norm1(hidden_states, temb) + else: + hidden_states = self.norm1(hidden_states) + + hidden_states = self.nonlinearity(hidden_states) + + if self.upsample is not None: + # upsample_nearest_nhwc fails with large batch sizes. see https://github.com/huggingface/diffusers/issues/984 + if hidden_states.shape[0] >= 64: + input_tensor = input_tensor.contiguous() + hidden_states = hidden_states.contiguous() + input_tensor = self.upsample(input_tensor, scale=scale) + hidden_states = self.upsample(hidden_states, scale=scale) + elif self.downsample is not None: + input_tensor = self.downsample(input_tensor, scale=scale) + hidden_states = self.downsample(hidden_states, scale=scale) + + hidden_states = self.conv1(hidden_states) + + if self.time_emb_proj is not None: + if not self.skip_time_act: + temb = self.nonlinearity(temb) + temb = self.time_emb_proj(temb, scale)[:, :, None, None] + + if temb is not None and self.time_embedding_norm == "default": + hidden_states = hidden_states + temb + + if self.time_embedding_norm == "ada_group" or self.time_embedding_norm == "spatial": + hidden_states = self.norm2(hidden_states, temb) + else: + hidden_states = self.norm2(hidden_states) + + if temb is not None and self.time_embedding_norm == "scale_shift": + scale, shift = torch.chunk(temb, 2, dim=1) + hidden_states = hidden_states * (1 + scale) + shift + + hidden_states = self.nonlinearity(hidden_states) + + hidden_states = self.dropout(hidden_states) + hidden_states = self.conv2(hidden_states) + + if self.conv_shortcut is not None: + input_tensor = self.conv_shortcut(input_tensor) + + output_tensor = (input_tensor + hidden_states) / self.output_scale_factor + + return output_tensor + + +def get_down_block3d( + down_block_type: str, + num_layers: int, + in_channels: int, + out_channels: int, + temb_channels: int, + add_downsample: bool, + downsample_stride: int, + resnet_eps: float, + resnet_act_fn: str, + transformer_layers_per_block: int = 1, + num_attention_heads: Optional[int] = None, + resnet_groups: Optional[int] = None, + cross_attention_dim: Optional[int] = None, + downsample_padding: Optional[int] = None, + dual_cross_attention: bool = False, + use_linear_projection: bool = False, + only_cross_attention: bool = False, + upcast_attention: bool = False, + resnet_time_scale_shift: str = "default", + attention_type: str = "default", + resnet_skip_time_act: bool = False, + resnet_out_scale_factor: float = 1.0, + cross_attention_norm: Optional[str] = None, + attention_head_dim: Optional[int] = None, + downsample_type: Optional[str] = None, + dropout: float = 0.0, +): + # If attn head dim is not defined, we default it to the number of heads + if attention_head_dim is None: + logger.warn( + f"It is recommended to provide `attention_head_dim` when calling `get_down_block`. Defaulting `attention_head_dim` to {num_attention_heads}." + ) + attention_head_dim = num_attention_heads + + down_block_type = down_block_type[7:] if down_block_type.startswith("UNetRes") else down_block_type + if down_block_type == "DownEncoderBlockCausal3D": + return DownEncoderBlockCausal3D( + num_layers=num_layers, + in_channels=in_channels, + out_channels=out_channels, + dropout=dropout, + add_downsample=add_downsample, + downsample_stride=downsample_stride, + resnet_eps=resnet_eps, + resnet_act_fn=resnet_act_fn, + resnet_groups=resnet_groups, + downsample_padding=downsample_padding, + resnet_time_scale_shift=resnet_time_scale_shift, + ) + raise ValueError(f"{down_block_type} does not exist.") + + +def get_up_block3d( + up_block_type: str, + num_layers: int, + in_channels: int, + out_channels: int, + prev_output_channel: int, + temb_channels: int, + add_upsample: bool, + upsample_scale_factor: Tuple, + resnet_eps: float, + resnet_act_fn: str, + resolution_idx: Optional[int] = None, + transformer_layers_per_block: int = 1, + num_attention_heads: Optional[int] = None, + resnet_groups: Optional[int] = None, + cross_attention_dim: Optional[int] = None, + dual_cross_attention: bool = False, + use_linear_projection: bool = False, + only_cross_attention: bool = False, + upcast_attention: bool = False, + resnet_time_scale_shift: str = "default", + attention_type: str = "default", + resnet_skip_time_act: bool = False, + resnet_out_scale_factor: float = 1.0, + cross_attention_norm: Optional[str] = None, + attention_head_dim: Optional[int] = None, + upsample_type: Optional[str] = None, + dropout: float = 0.0, +) -> nn.Module: + # If attn head dim is not defined, we default it to the number of heads + if attention_head_dim is None: + logger.warn( + f"It is recommended to provide `attention_head_dim` when calling `get_up_block`. Defaulting `attention_head_dim` to {num_attention_heads}." + ) + attention_head_dim = num_attention_heads + + up_block_type = up_block_type[7:] if up_block_type.startswith("UNetRes") else up_block_type + if up_block_type == "UpDecoderBlockCausal3D": + return UpDecoderBlockCausal3D( + num_layers=num_layers, + in_channels=in_channels, + out_channels=out_channels, + resolution_idx=resolution_idx, + dropout=dropout, + add_upsample=add_upsample, + upsample_scale_factor=upsample_scale_factor, + resnet_eps=resnet_eps, + resnet_act_fn=resnet_act_fn, + resnet_groups=resnet_groups, + resnet_time_scale_shift=resnet_time_scale_shift, + temb_channels=temb_channels, + ) + raise ValueError(f"{up_block_type} does not exist.") + + +class UNetMidBlockCausal3D(nn.Module): + """ + A 3D UNet mid-block [`UNetMidBlockCausal3D`] with multiple residual blocks and optional attention blocks. + """ + + def __init__( + self, + in_channels: int, + temb_channels: int, + dropout: float = 0.0, + num_layers: int = 1, + resnet_eps: float = 1e-6, + resnet_time_scale_shift: str = "default", # default, spatial + resnet_act_fn: str = "swish", + resnet_groups: int = 32, + attn_groups: Optional[int] = None, + resnet_pre_norm: bool = True, + add_attention: bool = True, + attention_head_dim: int = 1, + output_scale_factor: float = 1.0, + ): + super().__init__() + resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32) + self.add_attention = add_attention + + if attn_groups is None: + attn_groups = resnet_groups if resnet_time_scale_shift == "default" else None + + # there is always at least one resnet + resnets = [ + ResnetBlockCausal3D( + in_channels=in_channels, + out_channels=in_channels, + temb_channels=temb_channels, + eps=resnet_eps, + groups=resnet_groups, + dropout=dropout, + time_embedding_norm=resnet_time_scale_shift, + non_linearity=resnet_act_fn, + output_scale_factor=output_scale_factor, + pre_norm=resnet_pre_norm, + ) + ] + attentions = [] + + if attention_head_dim is None: + logger.warn( + f"It is not recommend to pass `attention_head_dim=None`. Defaulting `attention_head_dim` to `in_channels`: {in_channels}." + ) + attention_head_dim = in_channels + + for _ in range(num_layers): + if self.add_attention: + attentions.append( + Attention( + in_channels, + heads=in_channels // attention_head_dim, + dim_head=attention_head_dim, + rescale_output_factor=output_scale_factor, + eps=resnet_eps, + norm_num_groups=attn_groups, + spatial_norm_dim=temb_channels if resnet_time_scale_shift == "spatial" else None, + residual_connection=True, + bias=True, + upcast_softmax=True, + _from_deprecated_attn_block=True, + ) + ) + else: + attentions.append(None) + + resnets.append( + ResnetBlockCausal3D( + in_channels=in_channels, + out_channels=in_channels, + temb_channels=temb_channels, + eps=resnet_eps, + groups=resnet_groups, + dropout=dropout, + time_embedding_norm=resnet_time_scale_shift, + non_linearity=resnet_act_fn, + output_scale_factor=output_scale_factor, + pre_norm=resnet_pre_norm, + ) + ) + + self.attentions = nn.ModuleList(attentions) + self.resnets = nn.ModuleList(resnets) + + def forward(self, hidden_states: torch.FloatTensor, temb: Optional[torch.FloatTensor] = None) -> torch.FloatTensor: + hidden_states = self.resnets[0](hidden_states, temb) + for attn, resnet in zip(self.attentions, self.resnets[1:]): + if attn is not None: + B, C, T, H, W = hidden_states.shape + hidden_states = rearrange(hidden_states, "b c f h w -> b (f h w) c") + attention_mask = prepare_causal_attention_mask(T, H * W, hidden_states.dtype, hidden_states.device, batch_size=B) + hidden_states = attn(hidden_states, temb=temb, attention_mask=attention_mask) + hidden_states = rearrange(hidden_states, "b (f h w) c -> b c f h w", f=T, h=H, w=W) + hidden_states = resnet(hidden_states, temb) + + return hidden_states + + +class DownEncoderBlockCausal3D(nn.Module): + def __init__( + self, + in_channels: int, + out_channels: int, + dropout: float = 0.0, + num_layers: int = 1, + resnet_eps: float = 1e-6, + resnet_time_scale_shift: str = "default", + resnet_act_fn: str = "swish", + resnet_groups: int = 32, + resnet_pre_norm: bool = True, + output_scale_factor: float = 1.0, + add_downsample: bool = True, + downsample_stride: int = 2, + downsample_padding: int = 1, + ): + super().__init__() + resnets = [] + + for i in range(num_layers): + in_channels = in_channels if i == 0 else out_channels + resnets.append( + ResnetBlockCausal3D( + in_channels=in_channels, + out_channels=out_channels, + temb_channels=None, + eps=resnet_eps, + groups=resnet_groups, + dropout=dropout, + time_embedding_norm=resnet_time_scale_shift, + non_linearity=resnet_act_fn, + output_scale_factor=output_scale_factor, + pre_norm=resnet_pre_norm, + ) + ) + + self.resnets = nn.ModuleList(resnets) + + if add_downsample: + self.downsamplers = nn.ModuleList( + [ + DownsampleCausal3D( + out_channels, + use_conv=True, + out_channels=out_channels, + padding=downsample_padding, + name="op", + stride=downsample_stride, + ) + ] + ) + else: + self.downsamplers = None + + def forward(self, hidden_states: torch.FloatTensor, scale: float = 1.0) -> torch.FloatTensor: + for resnet in self.resnets: + hidden_states = resnet(hidden_states, temb=None, scale=scale) + + if self.downsamplers is not None: + for downsampler in self.downsamplers: + hidden_states = downsampler(hidden_states, scale) + + return hidden_states + + +class UpDecoderBlockCausal3D(nn.Module): + def __init__( + self, + in_channels: int, + out_channels: int, + resolution_idx: Optional[int] = None, + dropout: float = 0.0, + num_layers: int = 1, + resnet_eps: float = 1e-6, + resnet_time_scale_shift: str = "default", # default, spatial + resnet_act_fn: str = "swish", + resnet_groups: int = 32, + resnet_pre_norm: bool = True, + output_scale_factor: float = 1.0, + add_upsample: bool = True, + upsample_scale_factor=(2, 2, 2), + temb_channels: Optional[int] = None, + ): + super().__init__() + resnets = [] + + for i in range(num_layers): + input_channels = in_channels if i == 0 else out_channels + + resnets.append( + ResnetBlockCausal3D( + in_channels=input_channels, + out_channels=out_channels, + temb_channels=temb_channels, + eps=resnet_eps, + groups=resnet_groups, + dropout=dropout, + time_embedding_norm=resnet_time_scale_shift, + non_linearity=resnet_act_fn, + output_scale_factor=output_scale_factor, + pre_norm=resnet_pre_norm, + ) + ) + + self.resnets = nn.ModuleList(resnets) + + if add_upsample: + self.upsamplers = nn.ModuleList( + [ + UpsampleCausal3D( + out_channels, + use_conv=True, + out_channels=out_channels, + upsample_factor=upsample_scale_factor, + ) + ] + ) + else: + self.upsamplers = None + + self.resolution_idx = resolution_idx + + def forward( + self, hidden_states: torch.FloatTensor, temb: Optional[torch.FloatTensor] = None, scale: float = 1.0 + ) -> torch.FloatTensor: + for resnet in self.resnets: + hidden_states = resnet(hidden_states, temb=temb, scale=scale) + + if self.upsamplers is not None: + for upsampler in self.upsamplers: + hidden_states = upsampler(hidden_states) + + return hidden_states diff --git a/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/networks/__init__.py b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/networks/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/networks/__pycache__/__init__.cpython-312.pyc b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/networks/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3e7c805db2bfcb8a09b6f76b4eeaf890fcc0bd09 Binary files /dev/null and b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/networks/__pycache__/__init__.cpython-312.pyc differ diff --git a/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/networks/lora.py b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/networks/lora.py new file mode 100644 index 0000000000000000000000000000000000000000..e49fa9ca846e25a7d8f21e442a26a3d94a4db682 --- /dev/null +++ b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/networks/lora.py @@ -0,0 +1,913 @@ +# LoRA network module: currently conv2d is not fully supported +# reference: +# https://github.com/microsoft/LoRA/blob/main/loralib/layers.py +# https://github.com/cloneofsimo/lora/blob/master/lora_diffusion/lora.py + +import ast +import math +import os +import re +from typing import Dict, List, Optional, Type, Union +from transformers import CLIPTextModel +import numpy as np +import torch +import torch.nn as nn + +import logging + +logger = logging.getLogger(__name__) +logging.basicConfig(level=logging.INFO) + +HUNYUAN_TARGET_REPLACE_MODULES = ["MMDoubleStreamBlock", "MMSingleStreamBlock"] + + +class LoRAModule(torch.nn.Module): + """ + replaces forward method of the original Linear, instead of replacing the original Linear module. + """ + + def __init__( + self, + lora_name, + org_module: torch.nn.Module, + multiplier=1.0, + lora_dim=4, + alpha=1, + dropout=None, + rank_dropout=None, + module_dropout=None, + split_dims: Optional[List[int]] = None, + ): + """ + if alpha == 0 or None, alpha is rank (no scaling). + + split_dims is used to mimic the split qkv of multi-head attention. + """ + super().__init__() + self.lora_name = lora_name + + if org_module.__class__.__name__ == "Conv2d": + in_dim = org_module.in_channels + out_dim = org_module.out_channels + else: + in_dim = org_module.in_features + out_dim = org_module.out_features + + self.lora_dim = lora_dim + self.split_dims = split_dims + + if split_dims is None: + if org_module.__class__.__name__ == "Conv2d": + kernel_size = org_module.kernel_size + stride = org_module.stride + padding = org_module.padding + self.lora_down = torch.nn.Conv2d(in_dim, self.lora_dim, kernel_size, stride, padding, bias=False) + self.lora_up = torch.nn.Conv2d(self.lora_dim, out_dim, (1, 1), (1, 1), bias=False) + else: + self.lora_down = torch.nn.Linear(in_dim, self.lora_dim, bias=False) + self.lora_up = torch.nn.Linear(self.lora_dim, out_dim, bias=False) + + torch.nn.init.kaiming_uniform_(self.lora_down.weight, a=math.sqrt(5)) + torch.nn.init.zeros_(self.lora_up.weight) + else: + # conv2d not supported + assert sum(split_dims) == out_dim, "sum of split_dims must be equal to out_dim" + assert org_module.__class__.__name__ == "Linear", "split_dims is only supported for Linear" + # print(f"split_dims: {split_dims}") + self.lora_down = torch.nn.ModuleList( + [torch.nn.Linear(in_dim, self.lora_dim, bias=False) for _ in range(len(split_dims))] + ) + self.lora_up = torch.nn.ModuleList([torch.nn.Linear(self.lora_dim, split_dim, bias=False) for split_dim in split_dims]) + for lora_down in self.lora_down: + torch.nn.init.kaiming_uniform_(lora_down.weight, a=math.sqrt(5)) + for lora_up in self.lora_up: + torch.nn.init.zeros_(lora_up.weight) + + if type(alpha) == torch.Tensor: + alpha = alpha.detach().float().numpy() # without casting, bf16 causes error + alpha = self.lora_dim if alpha is None or alpha == 0 else alpha + self.scale = alpha / self.lora_dim + self.register_buffer("alpha", torch.tensor(alpha)) # for save/load + + # same as microsoft's + self.multiplier = multiplier + self.org_module = org_module # remove in applying + self.dropout = dropout + self.rank_dropout = rank_dropout + self.module_dropout = module_dropout + + def apply_to(self): + self.org_forward = self.org_module.forward + self.org_module.forward = self.forward + del self.org_module + + def forward(self, x): + org_forwarded = self.org_forward(x) + + # module dropout + if self.module_dropout is not None and self.training: + if torch.rand(1) < self.module_dropout: + return org_forwarded + + if self.split_dims is None: + lx = self.lora_down(x) + + # normal dropout + if self.dropout is not None and self.training: + lx = torch.nn.functional.dropout(lx, p=self.dropout) + + # rank dropout + if self.rank_dropout is not None and self.training: + mask = torch.rand((lx.size(0), self.lora_dim), device=lx.device) > self.rank_dropout + if len(lx.size()) == 3: + mask = mask.unsqueeze(1) # for Text Encoder + elif len(lx.size()) == 4: + mask = mask.unsqueeze(-1).unsqueeze(-1) # for Conv2d + lx = lx * mask + + # scaling for rank dropout: treat as if the rank is changed + scale = self.scale * (1.0 / (1.0 - self.rank_dropout)) # redundant for readability + else: + scale = self.scale + + lx = self.lora_up(lx) + + return org_forwarded + lx * self.multiplier * scale + else: + lxs = [lora_down(x) for lora_down in self.lora_down] + + # normal dropout + if self.dropout is not None and self.training: + lxs = [torch.nn.functional.dropout(lx, p=self.dropout) for lx in lxs] + + # rank dropout + if self.rank_dropout is not None and self.training: + masks = [torch.rand((lx.size(0), self.lora_dim), device=lx.device) > self.rank_dropout for lx in lxs] + for i in range(len(lxs)): + if len(lx.size()) == 3: + masks[i] = masks[i].unsqueeze(1) + elif len(lx.size()) == 4: + masks[i] = masks[i].unsqueeze(-1).unsqueeze(-1) + lxs[i] = lxs[i] * masks[i] + + # scaling for rank dropout: treat as if the rank is changed + scale = self.scale * (1.0 / (1.0 - self.rank_dropout)) # redundant for readability + else: + scale = self.scale + + lxs = [lora_up(lx) for lora_up, lx in zip(self.lora_up, lxs)] + + return org_forwarded + torch.cat(lxs, dim=-1) * self.multiplier * scale + + +class LoRAInfModule(LoRAModule): + def __init__( + self, + lora_name, + org_module: torch.nn.Module, + multiplier=1.0, + lora_dim=4, + alpha=1, + **kwargs, + ): + # no dropout for inference + super().__init__(lora_name, org_module, multiplier, lora_dim, alpha) + + self.org_module_ref = [org_module] # for reference + self.enabled = True + self.network: LoRANetwork = None + + def set_network(self, network): + self.network = network + + # merge weight to org_module + # def merge_to(self, sd, dtype, device, non_blocking=False): + # if torch.cuda.is_available(): + # stream = torch.cuda.Stream(device=device) + # with torch.cuda.stream(stream): + # print(f"merge_to {self.lora_name}") + # self._merge_to(sd, dtype, device, non_blocking) + # torch.cuda.synchronize(device=device) + # print(f"merge_to {self.lora_name} done") + # torch.cuda.empty_cache() + # else: + # self._merge_to(sd, dtype, device, non_blocking) + + def merge_to(self, sd, dtype, device, non_blocking=False): + # extract weight from org_module + org_sd = self.org_module.state_dict() + weight = org_sd["weight"] + org_dtype = weight.dtype + org_device = weight.device + weight = weight.to(device, dtype=torch.float, non_blocking=non_blocking) # for calculation + + if dtype is None: + dtype = org_dtype + if device is None: + device = org_device + + if self.split_dims is None: + # get up/down weight + down_weight = sd["lora_down.weight"].to(device, dtype=torch.float, non_blocking=non_blocking) + up_weight = sd["lora_up.weight"].to(device, dtype=torch.float, non_blocking=non_blocking) + + # merge weight + if len(weight.size()) == 2: + # linear + weight = weight + self.multiplier * (up_weight @ down_weight) * self.scale + elif down_weight.size()[2:4] == (1, 1): + # conv2d 1x1 + weight = ( + weight + + self.multiplier + * (up_weight.squeeze(3).squeeze(2) @ down_weight.squeeze(3).squeeze(2)).unsqueeze(2).unsqueeze(3) + * self.scale + ) + else: + # conv2d 3x3 + conved = torch.nn.functional.conv2d(down_weight.permute(1, 0, 2, 3), up_weight).permute(1, 0, 2, 3) + # logger.info(conved.size(), weight.size(), module.stride, module.padding) + weight = weight + self.multiplier * conved * self.scale + + # set weight to org_module + org_sd["weight"] = weight.to(org_device, dtype=dtype) # back to CPU without non_blocking + self.org_module.load_state_dict(org_sd) + else: + # split_dims + total_dims = sum(self.split_dims) + for i in range(len(self.split_dims)): + # get up/down weight + down_weight = sd[f"lora_down.{i}.weight"].to(device, torch.float, non_blocking=non_blocking) # (rank, in_dim) + up_weight = sd[f"lora_up.{i}.weight"].to(device, torch.float, non_blocking=non_blocking) # (split dim, rank) + + # pad up_weight -> (total_dims, rank) + padded_up_weight = torch.zeros((total_dims, up_weight.size(0)), device=device, dtype=torch.float) + padded_up_weight[sum(self.split_dims[:i]) : sum(self.split_dims[: i + 1])] = up_weight + + # merge weight + weight = weight + self.multiplier * (up_weight @ down_weight) * self.scale + + # set weight to org_module + org_sd["weight"] = weight.to(org_device, dtype) # back to CPU without non_blocking + self.org_module.load_state_dict(org_sd) + + # return weight for merge + def get_weight(self, multiplier=None): + if multiplier is None: + multiplier = self.multiplier + + # get up/down weight from module + up_weight = self.lora_up.weight.to(torch.float) + down_weight = self.lora_down.weight.to(torch.float) + + # pre-calculated weight + if len(down_weight.size()) == 2: + # linear + weight = self.multiplier * (up_weight @ down_weight) * self.scale + elif down_weight.size()[2:4] == (1, 1): + # conv2d 1x1 + weight = ( + self.multiplier + * (up_weight.squeeze(3).squeeze(2) @ down_weight.squeeze(3).squeeze(2)).unsqueeze(2).unsqueeze(3) + * self.scale + ) + else: + # conv2d 3x3 + conved = torch.nn.functional.conv2d(down_weight.permute(1, 0, 2, 3), up_weight).permute(1, 0, 2, 3) + weight = self.multiplier * conved * self.scale + + return weight + + def default_forward(self, x): + # logger.info(f"default_forward {self.lora_name} {x.size()}") + if self.split_dims is None: + lx = self.lora_down(x) + lx = self.lora_up(lx) + return self.org_forward(x) + lx * self.multiplier * self.scale + else: + lxs = [lora_down(x) for lora_down in self.lora_down] + lxs = [lora_up(lx) for lora_up, lx in zip(self.lora_up, lxs)] + return self.org_forward(x) + torch.cat(lxs, dim=-1) * self.multiplier * self.scale + + def forward(self, x): + if not self.enabled: + return self.org_forward(x) + return self.default_forward(x) + + +def create_arch_network( + multiplier: float, + network_dim: Optional[int], + network_alpha: Optional[float], + vae: nn.Module, + text_encoders: List[nn.Module], + unet: nn.Module, + neuron_dropout: Optional[float] = None, + **kwargs, +): + # add default exclude patterns + exclude_patterns = kwargs.get("exclude_patterns", None) + if exclude_patterns is None: + exclude_patterns = [] + else: + exclude_patterns = ast.literal_eval(exclude_patterns) + + # exclude if 'img_mod', 'txt_mod' or 'modulation' in the name + exclude_patterns.append(r".*(img_mod|txt_mod|modulation).*") + + kwargs["exclude_patterns"] = exclude_patterns + + return create_network( + HUNYUAN_TARGET_REPLACE_MODULES, + "lora_unet", + multiplier, + network_dim, + network_alpha, + vae, + text_encoders, + unet, + neuron_dropout=neuron_dropout, + **kwargs, + ) + + +def create_network( + target_replace_modules: List[str], + prefix: str, + multiplier: float, + network_dim: Optional[int], + network_alpha: Optional[float], + vae: nn.Module, + text_encoders: List[nn.Module], + unet: nn.Module, + neuron_dropout: Optional[float] = None, + **kwargs, +): + """ architecture independent network creation """ + if network_dim is None: + network_dim = 4 # default + if network_alpha is None: + network_alpha = 1.0 + + # extract dim/alpha for conv2d, and block dim + conv_dim = kwargs.get("conv_dim", None) + conv_alpha = kwargs.get("conv_alpha", None) + if conv_dim is not None: + conv_dim = int(conv_dim) + if conv_alpha is None: + conv_alpha = 1.0 + else: + conv_alpha = float(conv_alpha) + + # TODO generic rank/dim setting with regular expression + + # rank/module dropout + rank_dropout = kwargs.get("rank_dropout", None) + if rank_dropout is not None: + rank_dropout = float(rank_dropout) + module_dropout = kwargs.get("module_dropout", None) + if module_dropout is not None: + module_dropout = float(module_dropout) + + # verbose + verbose = kwargs.get("verbose", False) + if verbose is not None: + verbose = True if verbose == "True" else False + + # regular expression for module selection: exclude and include + exclude_patterns = kwargs.get("exclude_patterns", None) + if exclude_patterns is not None and isinstance(exclude_patterns, str): + exclude_patterns = ast.literal_eval(exclude_patterns) + include_patterns = kwargs.get("include_patterns", None) + if include_patterns is not None and isinstance(include_patterns, str): + include_patterns = ast.literal_eval(include_patterns) + + # too many arguments ( ^ω^)・・・ + network = LoRANetwork( + target_replace_modules, + prefix, + text_encoders, + unet, + multiplier=multiplier, + lora_dim=network_dim, + alpha=network_alpha, + dropout=neuron_dropout, + rank_dropout=rank_dropout, + module_dropout=module_dropout, + conv_lora_dim=conv_dim, + conv_alpha=conv_alpha, + exclude_patterns=exclude_patterns, + include_patterns=include_patterns, + verbose=verbose, + ) + + loraplus_lr_ratio = kwargs.get("loraplus_lr_ratio", None) + # loraplus_unet_lr_ratio = kwargs.get("loraplus_unet_lr_ratio", None) + # loraplus_text_encoder_lr_ratio = kwargs.get("loraplus_text_encoder_lr_ratio", None) + loraplus_lr_ratio = float(loraplus_lr_ratio) if loraplus_lr_ratio is not None else None + # loraplus_unet_lr_ratio = float(loraplus_unet_lr_ratio) if loraplus_unet_lr_ratio is not None else None + # loraplus_text_encoder_lr_ratio = float(loraplus_text_encoder_lr_ratio) if loraplus_text_encoder_lr_ratio is not None else None + if loraplus_lr_ratio is not None: # or loraplus_unet_lr_ratio is not None or loraplus_text_encoder_lr_ratio is not None: + network.set_loraplus_lr_ratio(loraplus_lr_ratio) # , loraplus_unet_lr_ratio, loraplus_text_encoder_lr_ratio) + + return network + + +class LoRANetwork(torch.nn.Module): + # only supports U-Net (DiT), Text Encoders are not supported + + def __init__( + self, + target_replace_modules: List[str], + prefix: str, + text_encoders: Union[List[CLIPTextModel], CLIPTextModel], + unet: nn.Module, + multiplier: float = 1.0, + lora_dim: int = 4, + alpha: float = 1, + dropout: Optional[float] = None, + rank_dropout: Optional[float] = None, + module_dropout: Optional[float] = None, + conv_lora_dim: Optional[int] = None, + conv_alpha: Optional[float] = None, + module_class: Type[object] = LoRAModule, + modules_dim: Optional[Dict[str, int]] = None, + modules_alpha: Optional[Dict[str, int]] = None, + exclude_patterns: Optional[List[str]] = None, + include_patterns: Optional[List[str]] = None, + verbose: Optional[bool] = False, + ) -> None: + super().__init__() + self.multiplier = multiplier + + self.lora_dim = lora_dim + self.alpha = alpha + self.conv_lora_dim = conv_lora_dim + self.conv_alpha = conv_alpha + self.dropout = dropout + self.rank_dropout = rank_dropout + self.module_dropout = module_dropout + self.target_replace_modules = target_replace_modules + self.prefix = prefix + + self.loraplus_lr_ratio = None + # self.loraplus_unet_lr_ratio = None + # self.loraplus_text_encoder_lr_ratio = None + + if modules_dim is not None: + logger.info(f"create LoRA network from weights") + else: + logger.info(f"create LoRA network. base dim (rank): {lora_dim}, alpha: {alpha}") + logger.info( + f"neuron dropout: p={self.dropout}, rank dropout: p={self.rank_dropout}, module dropout: p={self.module_dropout}" + ) + # if self.conv_lora_dim is not None: + # logger.info( + # f"apply LoRA to Conv2d with kernel size (3,3). dim (rank): {self.conv_lora_dim}, alpha: {self.conv_alpha}" + # ) + # if train_t5xxl: + # logger.info(f"train T5XXL as well") + + # compile regular expression if specified + exclude_re_patterns = [] + if exclude_patterns is not None: + for pattern in exclude_patterns: + try: + re_pattern = re.compile(pattern) + except re.error as e: + logger.error(f"Invalid exclude pattern '{pattern}': {e}") + continue + exclude_re_patterns.append(re_pattern) + + include_re_patterns = [] + if include_patterns is not None: + for pattern in include_patterns: + try: + re_pattern = re.compile(pattern) + except re.error as e: + logger.error(f"Invalid include pattern '{pattern}': {e}") + continue + include_re_patterns.append(re_pattern) + + # create module instances + def create_modules( + is_unet: bool, + pfx: str, + root_module: torch.nn.Module, + target_replace_mods: Optional[List[str]] = None, + filter: Optional[str] = None, + default_dim: Optional[int] = None, + ) -> List[LoRAModule]: + loras = [] + skipped = [] + for name, module in root_module.named_modules(): + if target_replace_mods is None or module.__class__.__name__ in target_replace_mods: + if target_replace_mods is None: # dirty hack for all modules + module = root_module # search all modules + + for child_name, child_module in module.named_modules(): + is_linear = child_module.__class__.__name__ == "Linear" + is_conv2d = child_module.__class__.__name__ == "Conv2d" + is_conv2d_1x1 = is_conv2d and child_module.kernel_size == (1, 1) + + if is_linear or is_conv2d: + original_name = (name + "." if name else "") + child_name + lora_name = f"{pfx}.{original_name}".replace(".", "_") + + # exclude/include filter + excluded = False + for pattern in exclude_re_patterns: + if pattern.match(original_name): + excluded = True + break + included = False + for pattern in include_re_patterns: + if pattern.match(original_name): + included = True + break + if excluded and not included: + if verbose: + logger.info(f"exclude: {original_name}") + continue + + # filter by name (not used in the current implementation) + if filter is not None and not filter in lora_name: + continue + + dim = None + alpha = None + + if modules_dim is not None: + # モジュール指定あり + if lora_name in modules_dim: + dim = modules_dim[lora_name] + alpha = modules_alpha[lora_name] + else: + # 通常、すべて対象とする + if is_linear or is_conv2d_1x1: + dim = default_dim if default_dim is not None else self.lora_dim + alpha = self.alpha + elif self.conv_lora_dim is not None: + dim = self.conv_lora_dim + alpha = self.conv_alpha + + if dim is None or dim == 0: + # skipした情報を出力 + if is_linear or is_conv2d_1x1 or (self.conv_lora_dim is not None): + skipped.append(lora_name) + continue + + lora = module_class( + lora_name, + child_module, + self.multiplier, + dim, + alpha, + dropout=dropout, + rank_dropout=rank_dropout, + module_dropout=module_dropout, + ) + loras.append(lora) + + if target_replace_mods is None: + break # all modules are searched + return loras, skipped + + # # create LoRA for text encoder + # # it is redundant to create LoRA modules even if they are not used + + self.text_encoder_loras: List[Union[LoRAModule, LoRAInfModule]] = [] + # skipped_te = [] + # for i, text_encoder in enumerate(text_encoders): + # index = i + # if not train_t5xxl and index > 0: # 0: CLIP, 1: T5XXL, so we skip T5XXL if train_t5xxl is False + # break + # logger.info(f"create LoRA for Text Encoder {index+1}:") + # text_encoder_loras, skipped = create_modules(False, index, text_encoder, LoRANetwork.TEXT_ENCODER_TARGET_REPLACE_MODULE) + # logger.info(f"create LoRA for Text Encoder {index+1}: {len(text_encoder_loras)} modules.") + # self.text_encoder_loras.extend(text_encoder_loras) + # skipped_te += skipped + + # create LoRA for U-Net + self.unet_loras: List[Union[LoRAModule, LoRAInfModule]] + self.unet_loras, skipped_un = create_modules(True, prefix, unet, target_replace_modules) + + logger.info(f"create LoRA for U-Net/DiT: {len(self.unet_loras)} modules.") + if verbose: + for lora in self.unet_loras: + logger.info(f"\t{lora.lora_name:50} {lora.lora_dim}, {lora.alpha}") + + skipped = skipped_un + if verbose and len(skipped) > 0: + logger.warning( + f"because dim (rank) is 0, {len(skipped)} LoRA modules are skipped / dim (rank)が0の為、次の{len(skipped)}個のLoRAモジュールはスキップされます:" + ) + for name in skipped: + logger.info(f"\t{name}") + + # assertion + names = set() + for lora in self.text_encoder_loras + self.unet_loras: + assert lora.lora_name not in names, f"duplicated lora name: {lora.lora_name}" + names.add(lora.lora_name) + + def prepare_network(self, args): + """ + called after the network is created + """ + pass + + def set_multiplier(self, multiplier): + self.multiplier = multiplier + for lora in self.text_encoder_loras + self.unet_loras: + lora.multiplier = self.multiplier + + def set_enabled(self, is_enabled): + for lora in self.text_encoder_loras + self.unet_loras: + lora.enabled = is_enabled + + def load_weights(self, file): + if os.path.splitext(file)[1] == ".safetensors": + from safetensors.torch import load_file + + weights_sd = load_file(file) + else: + weights_sd = torch.load(file, map_location="cpu") + + info = self.load_state_dict(weights_sd, False) + return info + + def apply_to( + self, + text_encoders: Optional[nn.Module], + unet: Optional[nn.Module], + apply_text_encoder: bool = True, + apply_unet: bool = True, + ): + if apply_text_encoder: + logger.info(f"enable LoRA for text encoder: {len(self.text_encoder_loras)} modules") + else: + self.text_encoder_loras = [] + + if apply_unet: + logger.info(f"enable LoRA for U-Net: {len(self.unet_loras)} modules") + else: + self.unet_loras = [] + + for lora in self.text_encoder_loras + self.unet_loras: + lora.apply_to() + self.add_module(lora.lora_name, lora) + + # マージできるかどうかを返す + def is_mergeable(self): + return True + + # TODO refactor to common function with apply_to + def merge_to(self, text_encoders, unet, weights_sd, dtype=None, device=None, non_blocking=False): + from concurrent.futures import ThreadPoolExecutor + + with ThreadPoolExecutor(max_workers=2) as executor: # 2 workers is enough + futures = [] + for lora in self.text_encoder_loras + self.unet_loras: + sd_for_lora = {} + for key in weights_sd.keys(): + if key.startswith(lora.lora_name): + sd_for_lora[key[len(lora.lora_name) + 1 :]] = weights_sd[key] + if len(sd_for_lora) == 0: + logger.info(f"no weight for {lora.lora_name}") + continue + + # lora.merge_to(sd_for_lora, dtype, device) + futures.append(executor.submit(lora.merge_to, sd_for_lora, dtype, device, non_blocking)) + + for future in futures: + future.result() + + logger.info(f"weights are merged") + + def set_loraplus_lr_ratio(self, loraplus_lr_ratio): # , loraplus_unet_lr_ratio, loraplus_text_encoder_lr_ratio): + self.loraplus_lr_ratio = loraplus_lr_ratio + + logger.info(f"LoRA+ UNet LR Ratio: {self.loraplus_lr_ratio}") + # logger.info(f"LoRA+ Text Encoder LR Ratio: {self.loraplus_text_encoder_lr_ratio or self.loraplus_lr_ratio}") + + def prepare_optimizer_params(self, unet_lr: float = 1e-4, **kwargs): + self.requires_grad_(True) + + all_params = [] + lr_descriptions = [] + + def assemble_params(loras, lr, loraplus_ratio): + param_groups = {"lora": {}, "plus": {}} + for lora in loras: + for name, param in lora.named_parameters(): + if loraplus_ratio is not None and "lora_up" in name: + param_groups["plus"][f"{lora.lora_name}.{name}"] = param + else: + param_groups["lora"][f"{lora.lora_name}.{name}"] = param + + params = [] + descriptions = [] + for key in param_groups.keys(): + param_data = {"params": param_groups[key].values()} + + if len(param_data["params"]) == 0: + continue + + if lr is not None: + if key == "plus": + param_data["lr"] = lr * loraplus_ratio + else: + param_data["lr"] = lr + + if param_data.get("lr", None) == 0 or param_data.get("lr", None) is None: + logger.info("NO LR skipping!") + continue + + params.append(param_data) + descriptions.append("plus" if key == "plus" else "") + + return params, descriptions + + if self.unet_loras: + params, descriptions = assemble_params(self.unet_loras, unet_lr, self.loraplus_lr_ratio) + all_params.extend(params) + lr_descriptions.extend(["unet" + (" " + d if d else "") for d in descriptions]) + + return all_params, lr_descriptions + + def enable_gradient_checkpointing(self): + # not supported + pass + + def prepare_grad_etc(self, unet): + self.requires_grad_(True) + + def on_epoch_start(self, unet): + self.train() + + def on_step_start(self): + pass + + def get_trainable_params(self): + return self.parameters() + + def save_weights(self, file, dtype, metadata): + if metadata is not None and len(metadata) == 0: + metadata = None + + state_dict = self.state_dict() + + if dtype is not None: + for key in list(state_dict.keys()): + v = state_dict[key] + v = v.detach().clone().to("cpu").to(dtype) + state_dict[key] = v + + if os.path.splitext(file)[1] == ".safetensors": + from safetensors.torch import save_file + from musubi_tuner.utils import model_utils + + # Precalculate model hashes to save time on indexing + if metadata is None: + metadata = {} + model_hash, legacy_hash = model_utils.precalculate_safetensors_hashes(state_dict, metadata) + metadata["sshs_model_hash"] = model_hash + metadata["sshs_legacy_hash"] = legacy_hash + + save_file(state_dict, file, metadata) + else: + torch.save(state_dict, file) + + def backup_weights(self): + # 重みのバックアップを行う + loras: List[LoRAInfModule] = self.text_encoder_loras + self.unet_loras + for lora in loras: + org_module = lora.org_module_ref[0] + if not hasattr(org_module, "_lora_org_weight"): + sd = org_module.state_dict() + org_module._lora_org_weight = sd["weight"].detach().clone() + org_module._lora_restored = True + + def restore_weights(self): + # 重みのリストアを行う + loras: List[LoRAInfModule] = self.text_encoder_loras + self.unet_loras + for lora in loras: + org_module = lora.org_module_ref[0] + if not org_module._lora_restored: + sd = org_module.state_dict() + sd["weight"] = org_module._lora_org_weight + org_module.load_state_dict(sd) + org_module._lora_restored = True + + def pre_calculation(self): + # 事前計算を行う + loras: List[LoRAInfModule] = self.text_encoder_loras + self.unet_loras + for lora in loras: + org_module = lora.org_module_ref[0] + sd = org_module.state_dict() + + org_weight = sd["weight"] + lora_weight = lora.get_weight().to(org_weight.device, dtype=org_weight.dtype) + sd["weight"] = org_weight + lora_weight + assert sd["weight"].shape == org_weight.shape + org_module.load_state_dict(sd) + + org_module._lora_restored = False + lora.enabled = False + + def apply_max_norm_regularization(self, max_norm_value, device): + downkeys = [] + upkeys = [] + alphakeys = [] + norms = [] + keys_scaled = 0 + + state_dict = self.state_dict() + for key in state_dict.keys(): + if "lora_down" in key and "weight" in key: + downkeys.append(key) + upkeys.append(key.replace("lora_down", "lora_up")) + alphakeys.append(key.replace("lora_down.weight", "alpha")) + + for i in range(len(downkeys)): + down = state_dict[downkeys[i]].to(device) + up = state_dict[upkeys[i]].to(device) + alpha = state_dict[alphakeys[i]].to(device) + dim = down.shape[0] + scale = alpha / dim + + if up.shape[2:] == (1, 1) and down.shape[2:] == (1, 1): + updown = (up.squeeze(2).squeeze(2) @ down.squeeze(2).squeeze(2)).unsqueeze(2).unsqueeze(3) + elif up.shape[2:] == (3, 3) or down.shape[2:] == (3, 3): + updown = torch.nn.functional.conv2d(down.permute(1, 0, 2, 3), up).permute(1, 0, 2, 3) + else: + updown = up @ down + + updown *= scale + + norm = updown.norm().clamp(min=max_norm_value / 2) + desired = torch.clamp(norm, max=max_norm_value) + ratio = desired.cpu() / norm.cpu() + sqrt_ratio = ratio**0.5 + if ratio != 1: + keys_scaled += 1 + state_dict[upkeys[i]] *= sqrt_ratio + state_dict[downkeys[i]] *= sqrt_ratio + scalednorm = updown.norm() * ratio + norms.append(scalednorm.item()) + + return keys_scaled, sum(norms) / len(norms), max(norms) + + +def create_arch_network_from_weights( + multiplier: float, + weights_sd: Dict[str, torch.Tensor], + text_encoders: Optional[List[nn.Module]] = None, + unet: Optional[nn.Module] = None, + for_inference: bool = False, + **kwargs, +) -> LoRANetwork: + return create_network_from_weights( + HUNYUAN_TARGET_REPLACE_MODULES, multiplier, weights_sd, text_encoders, unet, for_inference, **kwargs + ) + + +# Create network from weights for inference, weights are not loaded here (because can be merged) +def create_network_from_weights( + target_replace_modules: List[str], + multiplier: float, + weights_sd: Dict[str, torch.Tensor], + text_encoders: Optional[List[nn.Module]] = None, + unet: Optional[nn.Module] = None, + for_inference: bool = False, + **kwargs, +) -> LoRANetwork: + # get dim/alpha mapping + modules_dim = {} + modules_alpha = {} + for key, value in weights_sd.items(): + if "." not in key: + continue + + lora_name = key.split(".")[0] + if "alpha" in key: + modules_alpha[lora_name] = value + elif "lora_down" in key: + dim = value.shape[0] + modules_dim[lora_name] = dim + # logger.info(lora_name, value.size(), dim) + + module_class = LoRAInfModule if for_inference else LoRAModule + + network = LoRANetwork( + target_replace_modules, + "lora_unet", + text_encoders, + unet, + multiplier=multiplier, + modules_dim=modules_dim, + modules_alpha=modules_alpha, + module_class=module_class, + ) + return network diff --git a/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/networks/lora_framepack.py b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/networks/lora_framepack.py new file mode 100644 index 0000000000000000000000000000000000000000..e20208f8044eab2ea856a60743197dcdd93b3693 --- /dev/null +++ b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/networks/lora_framepack.py @@ -0,0 +1,65 @@ +# LoRA module for FramePack + +import ast +from typing import Dict, List, Optional +import torch +import torch.nn as nn + +import logging + +logger = logging.getLogger(__name__) +logging.basicConfig(level=logging.INFO) + +import musubi_tuner.networks.lora as lora + + +FRAMEPACK_TARGET_REPLACE_MODULES = ["HunyuanVideoTransformerBlock", "HunyuanVideoSingleTransformerBlock"] + + +def create_arch_network( + multiplier: float, + network_dim: Optional[int], + network_alpha: Optional[float], + vae: nn.Module, + text_encoders: List[nn.Module], + unet: nn.Module, + neuron_dropout: Optional[float] = None, + **kwargs, +): + # add default exclude patterns + exclude_patterns = kwargs.get("exclude_patterns", None) + if exclude_patterns is None: + exclude_patterns = [] + else: + exclude_patterns = ast.literal_eval(exclude_patterns) + + # exclude if 'norm' in the name of the module + exclude_patterns.append(r".*(norm).*") + + kwargs["exclude_patterns"] = exclude_patterns + + return lora.create_network( + FRAMEPACK_TARGET_REPLACE_MODULES, + "lora_unet", + multiplier, + network_dim, + network_alpha, + vae, + text_encoders, + unet, + neuron_dropout=neuron_dropout, + **kwargs, + ) + + +def create_arch_network_from_weights( + multiplier: float, + weights_sd: Dict[str, torch.Tensor], + text_encoders: Optional[List[nn.Module]] = None, + unet: Optional[nn.Module] = None, + for_inference: bool = False, + **kwargs, +) -> lora.LoRANetwork: + return lora.create_network_from_weights( + FRAMEPACK_TARGET_REPLACE_MODULES, multiplier, weights_sd, text_encoders, unet, for_inference, **kwargs + ) diff --git a/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/networks/lora_wan.py b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/networks/lora_wan.py new file mode 100644 index 0000000000000000000000000000000000000000..918d3038b356dedcd53c82cd333dda46fff2512a --- /dev/null +++ b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/networks/lora_wan.py @@ -0,0 +1,65 @@ +# LoRA module for Wan2.1 + +import ast +from typing import Dict, List, Optional +import torch +import torch.nn as nn + +import logging + +logger = logging.getLogger(__name__) +logging.basicConfig(level=logging.INFO) + +import musubi_tuner.networks.lora as lora + + +WAN_TARGET_REPLACE_MODULES = ["WanAttentionBlock"] + + +def create_arch_network( + multiplier: float, + network_dim: Optional[int], + network_alpha: Optional[float], + vae: nn.Module, + text_encoders: List[nn.Module], + unet: nn.Module, + neuron_dropout: Optional[float] = None, + **kwargs, +): + # add default exclude patterns + exclude_patterns = kwargs.get("exclude_patterns", None) + if exclude_patterns is None: + exclude_patterns = [] + else: + exclude_patterns = ast.literal_eval(exclude_patterns) + + # exclude if 'img_mod', 'txt_mod' or 'modulation' in the name + exclude_patterns.append(r".*(patch_embedding|text_embedding|time_embedding|time_projection|norm|head).*") + + kwargs["exclude_patterns"] = exclude_patterns + + return lora.create_network( + WAN_TARGET_REPLACE_MODULES, + "lora_unet", + multiplier, + network_dim, + network_alpha, + vae, + text_encoders, + unet, + neuron_dropout=neuron_dropout, + **kwargs, + ) + + +def create_arch_network_from_weights( + multiplier: float, + weights_sd: Dict[str, torch.Tensor], + text_encoders: Optional[List[nn.Module]] = None, + unet: Optional[nn.Module] = None, + for_inference: bool = False, + **kwargs, +) -> lora.LoRANetwork: + return lora.create_network_from_weights( + WAN_TARGET_REPLACE_MODULES, multiplier, weights_sd, text_encoders, unet, for_inference, **kwargs + ) diff --git a/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/wan_cache_latents.py b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/wan_cache_latents.py new file mode 100644 index 0000000000000000000000000000000000000000..548d04341429c65f3828a17e6904c244ef15e23f --- /dev/null +++ b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/wan_cache_latents.py @@ -0,0 +1,277 @@ +import argparse +import os +import glob +from typing import Optional, Union + +import numpy as np +import torch +from tqdm import tqdm + +from musubi_tuner.dataset import config_utils +from musubi_tuner.dataset.config_utils import BlueprintGenerator, ConfigSanitizer +from PIL import Image + +import logging + +from musubi_tuner.dataset.image_video_dataset import ItemInfo, save_latent_cache_wan, ARCHITECTURE_WAN +from musubi_tuner.utils.model_utils import str_to_dtype +from musubi_tuner.wan.configs import wan_i2v_14B +from musubi_tuner.wan.modules.vae import WanVAE +from musubi_tuner.wan.modules.clip import CLIPModel +import musubi_tuner.cache_latents as cache_latents + +logger = logging.getLogger(__name__) +logging.basicConfig(level=logging.INFO) + +black_image_latents = {} # global variable for black image latent, used in encode_and_save_batch_one_frame. key: tuple for shape + + +def encode_and_save_batch(vae: WanVAE, clip: Optional[CLIPModel], batch: list[ItemInfo], one_frame: bool = False): + if one_frame: + encode_and_save_batch_one_frame(vae, clip, batch) + return + + contents = torch.stack([torch.from_numpy(item.content) for item in batch]) + if len(contents.shape) == 4: + contents = contents.unsqueeze(1) # B, H, W, C -> B, F, H, W, C + + contents = contents.permute(0, 4, 1, 2, 3).contiguous() # B, C, F, H, W + contents = contents.to(vae.device, dtype=vae.dtype) + contents = contents / 127.5 - 1.0 # normalize to [-1, 1] + + h, w = contents.shape[3], contents.shape[4] + if h < 8 or w < 8: + item = batch[0] # other items should have the same size + raise ValueError(f"Image or video size too small: {item.item_key} and {len(batch) - 1} more, size: {item.original_size}") + + # print(f"encode batch: {contents.shape}") + with torch.amp.autocast(device_type=vae.device.type, dtype=vae.dtype), torch.no_grad(): + latent = vae.encode(contents) # list of Tensor[C, F, H, W] + latent = torch.stack(latent, dim=0) # B, C, F, H, W + latent = latent.to(vae.dtype) # convert to bfloat16, we are not sure if this is correct + + if clip is not None: + # extract first frame of contents + images = contents[:, :, 0:1, :, :] # B, C, F, H, W, non contiguous view is fine + + with torch.amp.autocast(device_type=clip.device.type, dtype=torch.float16), torch.no_grad(): + clip_context = clip.visual(images) + clip_context = clip_context.to(torch.float16) # convert to fp16 + + # encode image latent for I2V + B, _, _, lat_h, lat_w = latent.shape + F = contents.shape[2] + + # Create mask for the required number of frames + msk = torch.ones(1, F, lat_h, lat_w, dtype=vae.dtype, device=vae.device) + msk[:, 1:] = 0 + msk = torch.concat([torch.repeat_interleave(msk[:, 0:1], repeats=4, dim=1), msk[:, 1:]], dim=1) + msk = msk.view(1, msk.shape[1] // 4, 4, lat_h, lat_w) + msk = msk.transpose(1, 2) # 1, F, 4, H, W -> 1, 4, F, H, W + msk = msk.repeat(B, 1, 1, 1, 1) # B, 4, F, H, W + + # Zero padding for the required number of frames only + padding_frames = F - 1 # The first frame is the input image + images_resized = torch.concat([images, torch.zeros(B, 3, padding_frames, h, w, device=vae.device)], dim=2) + with torch.amp.autocast(device_type=vae.device.type, dtype=vae.dtype), torch.no_grad(): + y = vae.encode(images_resized) + y = torch.stack(y, dim=0) # B, C, F, H, W + + y = y[:, :, :F] # may be not needed + y = y.to(vae.dtype) # convert to bfloat16 + y = torch.concat([msk, y], dim=1) # B, 4 + C, F, H, W + + else: + clip_context = None + y = None + + # control videos + if batch[0].control_content is not None: + control_contents = torch.stack([torch.from_numpy(item.control_content) for item in batch]) + if len(control_contents.shape) == 4: + control_contents = control_contents.unsqueeze(1) + control_contents = control_contents.permute(0, 4, 1, 2, 3).contiguous() # B, C, F, H, W + control_contents = control_contents.to(vae.device, dtype=vae.dtype) + control_contents = control_contents / 127.5 - 1.0 # normalize to [-1, 1] + with torch.amp.autocast(device_type=vae.device.type, dtype=vae.dtype), torch.no_grad(): + control_latent = vae.encode(control_contents) # list of Tensor[C, F, H, W] + control_latent = torch.stack(control_latent, dim=0) # B, C, F, H, W + control_latent = control_latent.to(vae.dtype) # convert to bfloat16 + else: + control_latent = None + + # # debug: decode and save + # with torch.no_grad(): + # latent_to_decode = latent / vae.config.scaling_factor + # images = vae.decode(latent_to_decode, return_dict=False)[0] + # images = (images / 2 + 0.5).clamp(0, 1) + # images = images.cpu().float().numpy() + # images = (images * 255).astype(np.uint8) + # images = images.transpose(0, 2, 3, 4, 1) # B, C, F, H, W -> B, F, H, W, C + # for b in range(images.shape[0]): + # for f in range(images.shape[1]): + # fln = os.path.splitext(os.path.basename(batch[b].item_key))[0] + # img = Image.fromarray(images[b, f]) + # img.save(f"./logs/decode_{fln}_{b}_{f:03d}.jpg") + + for i, item in enumerate(batch): + l = latent[i] + cctx = clip_context[i] if clip is not None else None + y_i = y[i] if clip is not None else None + control_latent_i = control_latent[i] if control_latent is not None else None + # print(f"save latent cache: {item.latent_cache_path}, latent shape: {l.shape}") + save_latent_cache_wan(item, l, cctx, y_i, control_latent_i) + + +def encode_and_save_batch_one_frame(vae: WanVAE, clip: Optional[CLIPModel], batch: list[ItemInfo]): + # item.content: target image (H, W, C) + # item.control_content: list of images (H, W, C) + assert clip is not None, "clip is required for one frame training" + + # contents: control_content + content + _, _, contents, content_masks = cache_latents.preprocess_contents(batch) + contents = contents.to(vae.device, dtype=vae.dtype) # B, C, F, H, W + assert contents.shape[2] >= 2, "One frame training requires at least 1 control frame and 1 target frame" + + # print(f"encode batch: {contents.shape}") + with torch.amp.autocast(device_type=vae.device.type, dtype=vae.dtype), torch.no_grad(): + # VAE encode: we need to encode one frame at a time because VAE encoder has stride=4 for the time dimension except for the first frame. + latent = [] + for bi in range(contents.shape[0]): + c = contents[bi : bi + 1] # B, C, F, H, W, b=1 + l = [] + for f in range(c.shape[2]): # iterate over frames + cf = c[:, :, f : f + 1, :, :] # B, C, 1, H, W + l.append(vae.encode(cf)[0].unsqueeze(0)) # list of [C, 1, H, W] to [1, C, 1, H, W] + latent.append(torch.cat(l, dim=2)) # B, C, F, H, W + latent = torch.cat(latent, dim=0) # B, C, F, H, W + + latent = latent.to(vae.dtype) # convert to bfloat16, we are not sure if this is correct + control_latent = latent[:, :, :-1, :, :] + target_latent = latent[:, :, -1:, :, :] + + # Create black image latent for the target frame + global black_image_latents + shape = (1, contents.shape[1], 1, contents.shape[3], contents.shape[4]) # B=1, C, F=1, H, W + if shape not in black_image_latents: + with torch.amp.autocast(device_type=vae.device.type, dtype=vae.dtype), torch.no_grad(): + black_image_latent = vae.encode(torch.zeros(shape, device=vae.device, dtype=vae.dtype))[0] + black_image_latent = black_image_latent.to(device="cpu", dtype=vae.dtype) + black_image_latents[shape] = black_image_latent # store for future use + black_image_latent = black_image_latents[shape] # [C, 1, H, W] + + # Vision encoding per‑item (once): use first content (first control content) because it is the start image + num_control_images = contents.shape[2] - 1 # number of control images + if num_control_images > 2: + logger.error(f"One frame training requires 1 or 2 control images, but found {num_control_images} in {batch[0].item_key}. ") + raise ValueError( + f"One frame training requires 1 or 2 control images, but found {num_control_images} in {batch[0].item_key}." + ) + + images = contents[:, :, 0:num_control_images, :, :] # B, C, F, H, W + clip_context = [] + for i in range(images.shape[0]): + with torch.amp.autocast(device_type=clip.device.type, dtype=torch.float16), torch.no_grad(): + clip_context.append(clip.visual(images[i : i + 1])) + clip_context = torch.stack(clip_context, dim=0) # B, num_control_images, N, D + clip_context = clip_context.to(torch.float16) # convert to fp16 + + B, C, _, lat_h, lat_w = latent.shape + for i, item in enumerate(batch): + latent = target_latent[i] # C, 1, H, W + F = contents.shape[2] # number of frames + y = torch.zeros((4 + C, F, lat_h, lat_w), dtype=vae.dtype, device=vae.device) # conditioning + l = torch.zeros((C, F, lat_h, lat_w), dtype=vae.dtype, device=vae.device) # training latent + + # Create latent and mask for the required number of frames + control_latent_indices = item.fp_1f_clean_indices + target_and_control_latent_indices = control_latent_indices + [item.fp_1f_target_index] + f_indices = sorted(target_and_control_latent_indices) + + ci = 0 + for j, index in enumerate(f_indices): + if index == item.fp_1f_target_index: + # print(f"Set target latent. latent shape: {latent.shape}, black_image_latent shape: {black_image_latent.shape}") + y[4:, j : j + 1, :, :] = black_image_latent + l[:, j : j + 1, :, :] = latent # set target latent + else: + # print(f"Set control latent. control_latent shape: {control_latent[i, :, ci, :, :].shape}") + y[:4, j, :, :] = 1.0 # set mask to 1.0 for the clean latent frames + y[4:, j, :, :] = control_latent[i, :, ci, :, :] # set control latent + l[:, j, :, :] = control_latent[i, :, ci, :, :] # also set control latent to training latent + ci += 1 # increment control latent index + + cctx = clip_context[i] + + logger.info(f"Saving cache for item: {item.item_key} at {item.latent_cache_path}") + logger.info(f" control_latent_indices: {control_latent_indices}, fp_1f_target_index: {item.fp_1f_target_index}") + logger.info(f" y shape: {y.shape}, mask: {y[0, :,0,0]}, l shape: {l.shape}, clip_context shape: {cctx.shape}") + logger.info(f" f_indices: {f_indices}") + + save_latent_cache_wan(item, l, cctx, y, None, f_indices=f_indices) + + +def main(): + parser = cache_latents.setup_parser_common() + parser = wan_setup_parser(parser) + + args = parser.parse_args() + + device = args.device if args.device is not None else "cuda" if torch.cuda.is_available() else "cpu" + device = torch.device(device) + + # Load dataset config + blueprint_generator = BlueprintGenerator(ConfigSanitizer()) + logger.info(f"Load dataset config from {args.dataset_config}") + user_config = config_utils.load_user_config(args.dataset_config) + blueprint = blueprint_generator.generate(user_config, args, architecture=ARCHITECTURE_WAN) + train_dataset_group = config_utils.generate_dataset_group_by_blueprint(blueprint.dataset_group) + + datasets = train_dataset_group.datasets + + if args.debug_mode is not None: + cache_latents.show_datasets( + datasets, args.debug_mode, args.console_width, args.console_back, args.console_num_images, fps=16 + ) + return + + assert args.vae is not None, "vae checkpoint is required" + + vae_path = args.vae + + logger.info(f"Loading VAE model from {vae_path}") + vae_dtype = torch.bfloat16 if args.vae_dtype is None else str_to_dtype(args.vae_dtype) + cache_device = torch.device("cpu") if args.vae_cache_cpu else None + vae = WanVAE(vae_path=vae_path, device=device, dtype=vae_dtype, cache_device=cache_device) + + if args.clip is not None: + clip_dtype = wan_i2v_14B.i2v_14B["clip_dtype"] + clip = CLIPModel(dtype=clip_dtype, device=device, weight_path=args.clip) + else: + clip = None + + # Encode images + def encode(one_batch: list[ItemInfo]): + encode_and_save_batch(vae, clip, one_batch, args.one_frame) + + cache_latents.encode_datasets(datasets, encode, args) + + +def wan_setup_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: + parser.add_argument("--vae_cache_cpu", action="store_true", help="cache features in VAE on CPU") + parser.add_argument( + "--clip", + type=str, + default=None, + help="text encoder (CLIP) checkpoint path, optional. If training I2V model, this is required", + ) + parser.add_argument( + "--one_frame", + action="store_true", + help="Generate cache for one frame training (single frame, single section).", + ) + return parser + + +if __name__ == "__main__": + main() diff --git a/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/wan_cache_text_encoder_outputs.py b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/wan_cache_text_encoder_outputs.py new file mode 100644 index 0000000000000000000000000000000000000000..cd3fb72ba1bac9326cd0230cc6f653de8a194f07 --- /dev/null +++ b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/wan_cache_text_encoder_outputs.py @@ -0,0 +1,108 @@ +import argparse +import os +from typing import Optional, Union + +import numpy as np +import torch +from tqdm import tqdm + +from musubi_tuner.dataset import config_utils +from musubi_tuner.dataset.config_utils import BlueprintGenerator, ConfigSanitizer +import accelerate + +from musubi_tuner.dataset.image_video_dataset import ARCHITECTURE_WAN, ItemInfo, save_text_encoder_output_cache_wan + +# for t5 config: all Wan2.1 models have the same config for t5 +from musubi_tuner.wan.configs import wan_t2v_14B + +import musubi_tuner.cache_text_encoder_outputs as cache_text_encoder_outputs +import logging + +from musubi_tuner.utils.model_utils import str_to_dtype +from musubi_tuner.wan.modules.t5 import T5EncoderModel + +logger = logging.getLogger(__name__) +logging.basicConfig(level=logging.INFO) + + +def encode_and_save_batch( + text_encoder: T5EncoderModel, batch: list[ItemInfo], device: torch.device, accelerator: Optional[accelerate.Accelerator] +): + prompts = [item.caption for item in batch] + # print(prompts) + + # encode prompt + with torch.no_grad(): + if accelerator is not None: + with accelerator.autocast(): + context = text_encoder(prompts, device) + else: + context = text_encoder(prompts, device) + + # save prompt cache + for item, ctx in zip(batch, context): + save_text_encoder_output_cache_wan(item, ctx) + + +def main(): + parser = cache_text_encoder_outputs.setup_parser_common() + parser = wan_setup_parser(parser) + + args = parser.parse_args() + + device = args.device if args.device is not None else "cuda" if torch.cuda.is_available() else "cpu" + device = torch.device(device) + + # Load dataset config + blueprint_generator = BlueprintGenerator(ConfigSanitizer()) + logger.info(f"Load dataset config from {args.dataset_config}") + user_config = config_utils.load_user_config(args.dataset_config) + blueprint = blueprint_generator.generate(user_config, args, architecture=ARCHITECTURE_WAN) + train_dataset_group = config_utils.generate_dataset_group_by_blueprint(blueprint.dataset_group) + + datasets = train_dataset_group.datasets + + # define accelerator for fp8 inference + config = wan_t2v_14B.t2v_14B # all Wan2.1 models have the same config for t5 + accelerator = None + if args.fp8_t5: + accelerator = accelerate.Accelerator(mixed_precision="bf16" if config.t5_dtype == torch.bfloat16 else "fp16") + + # prepare cache files and paths: all_cache_files_for_dataset = exisiting cache files, all_cache_paths_for_dataset = all cache paths in the dataset + all_cache_files_for_dataset, all_cache_paths_for_dataset = cache_text_encoder_outputs.prepare_cache_files_and_paths(datasets) + + # Load T5 + logger.info(f"Loading T5: {args.t5}") + text_encoder = T5EncoderModel( + text_len=config.text_len, dtype=config.t5_dtype, device=device, weight_path=args.t5, fp8=args.fp8_t5 + ) + + # Encode with T5 + logger.info("Encoding with T5") + + def encode_for_text_encoder(batch: list[ItemInfo]): + encode_and_save_batch(text_encoder, batch, device, accelerator) + + cache_text_encoder_outputs.process_text_encoder_batches( + args.num_workers, + args.skip_existing, + args.batch_size, + datasets, + all_cache_files_for_dataset, + all_cache_paths_for_dataset, + encode_for_text_encoder, + ) + del text_encoder + + # remove cache files not in dataset + cache_text_encoder_outputs.post_process_cache_files(datasets, all_cache_files_for_dataset, all_cache_paths_for_dataset, args.keep_cache) + + +def wan_setup_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: + parser.add_argument("--t5", type=str, default=None, required=True, help="text encoder (T5) checkpoint path") + parser.add_argument("--fp8_t5", action="store_true", help="use fp8 for Text Encoder model") + return parser + + +if __name__ == "__main__": + main() diff --git a/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/wan_generate_video.py b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/wan_generate_video.py new file mode 100644 index 0000000000000000000000000000000000000000..0cb725ab900ee3d1592d368f36bbbf70a1cb5829 --- /dev/null +++ b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/wan_generate_video.py @@ -0,0 +1,2117 @@ +import argparse +from datetime import datetime +import gc +import random +import os +import re +import time +import math +import copy +from types import ModuleType, SimpleNamespace +from typing import Tuple, Optional, List, Union, Any, Dict + +import torch +import accelerate +from accelerate import Accelerator +from safetensors.torch import load_file, save_file +from safetensors import safe_open +from PIL import Image +import cv2 +import numpy as np +import torchvision.transforms.functional as TF +from tqdm import tqdm + +from musubi_tuner.dataset import image_video_dataset +from musubi_tuner.networks import lora_wan +from musubi_tuner.utils.safetensors_utils import mem_eff_save_file, load_safetensors +from musubi_tuner.wan.configs import WAN_CONFIGS, SUPPORTED_SIZES +import musubi_tuner.wan as wan +from musubi_tuner.wan.modules.model import WanModel, load_wan_model, detect_wan_sd_dtype +from musubi_tuner.wan.modules.vae import WanVAE +from musubi_tuner.wan.modules.t5 import T5EncoderModel +from musubi_tuner.wan.modules.clip import CLIPModel +from musubi_tuner.modules.scheduling_flow_match_discrete import FlowMatchDiscreteScheduler +from musubi_tuner.wan.utils.fm_solvers import FlowDPMSolverMultistepScheduler, get_sampling_sigmas, retrieve_timesteps +from musubi_tuner.wan.utils.fm_solvers_unipc import FlowUniPCMultistepScheduler + +try: + from lycoris.kohya import create_network_from_weights +except: + pass + +from musubi_tuner.utils.model_utils import str_to_dtype +from musubi_tuner.utils.device_utils import clean_memory_on_device +from musubi_tuner.hv_generate_video import save_images_grid, save_videos_grid, synchronize_device +from musubi_tuner.dataset.image_video_dataset import load_video + +import logging + +logger = logging.getLogger(__name__) +logging.basicConfig(level=logging.INFO) + + +class GenerationSettings: + def __init__( + self, device: torch.device, cfg, dit_dtype: torch.dtype, dit_weight_dtype: Optional[torch.dtype], vae_dtype: torch.dtype + ): + self.device = device + self.cfg = cfg + self.dit_dtype = dit_dtype + self.dit_weight_dtype = dit_weight_dtype + self.vae_dtype = vae_dtype + + +def parse_args() -> argparse.Namespace: + """parse command line arguments""" + parser = argparse.ArgumentParser(description="Wan 2.1 inference script") + + # WAN arguments + parser.add_argument("--ckpt_dir", type=str, default=None, help="The path to the checkpoint directory (Wan 2.1 official).") + parser.add_argument("--task", type=str, default="t2v-14B", choices=list(WAN_CONFIGS.keys()), help="The task to run.") + parser.add_argument( + "--sample_solver", type=str, default="unipc", choices=["unipc", "dpm++", "vanilla"], help="The solver used to sample." + ) + + parser.add_argument("--dit", type=str, default=None, help="DiT checkpoint path") + parser.add_argument("--vae", type=str, default=None, help="VAE checkpoint path") + parser.add_argument("--vae_dtype", type=str, default=None, help="data type for VAE, default is bfloat16") + parser.add_argument("--vae_cache_cpu", action="store_true", help="cache features in VAE on CPU") + parser.add_argument("--t5", type=str, default=None, help="text encoder (T5) checkpoint path") + parser.add_argument("--clip", type=str, default=None, help="text encoder (CLIP) checkpoint path") + # LoRA + parser.add_argument("--lora_weight", type=str, nargs="*", required=False, default=None, help="LoRA weight path") + parser.add_argument("--lora_multiplier", type=float, nargs="*", default=1.0, help="LoRA multiplier") + parser.add_argument("--include_patterns", type=str, nargs="*", default=None, help="LoRA module include patterns") + parser.add_argument("--exclude_patterns", type=str, nargs="*", default=None, help="LoRA module exclude patterns") + parser.add_argument( + "--save_merged_model", + type=str, + default=None, + help="Save merged model to path. If specified, no inference will be performed.", + ) + + # inference + parser.add_argument("--prompt", type=str, default=None, help="prompt for generation") + parser.add_argument( + "--negative_prompt", + type=str, + default=None, + help="negative prompt for generation, use default negative prompt if not specified", + ) + parser.add_argument("--video_size", type=int, nargs=2, default=[256, 256], help="video size, height and width") + parser.add_argument("--video_length", type=int, default=None, help="video length, Default depends on task") + parser.add_argument("--fps", type=int, default=16, help="video fps, Default is 16") + parser.add_argument("--infer_steps", type=int, default=None, help="number of inference steps") + parser.add_argument("--save_path", type=str, required=True, help="path to save generated video") + parser.add_argument("--seed", type=int, default=None, help="Seed for evaluation.") + parser.add_argument( + "--cpu_noise", action="store_true", help="Use CPU to generate noise (compatible with ComfyUI). Default is False." + ) + parser.add_argument( + "--guidance_scale", + type=float, + default=5.0, + help="Guidance scale for classifier free guidance. Default is 5.0.", + ) + parser.add_argument("--video_path", type=str, default=None, help="path to video for video2video inference") + parser.add_argument("--image_path", type=str, default=None, help="path to image for image2video inference") + parser.add_argument("--end_image_path", type=str, default=None, help="path to end image for image2video inference") + parser.add_argument( + "--control_path", + type=str, + default=None, + help="path to control video for inference with controlnet. video file or directory with images", + ) + parser.add_argument( + "--one_frame_inference", + type=str, + default=None, + help="one frame inference, default is None, comma separated values from 'no_2x', 'no_4x', 'no_post', 'control_indices' and 'target_index'.", + ) + parser.add_argument( + "--control_image_path", type=str, default=None, nargs="*", help="path to control (reference) image for one frame inference." + ) + parser.add_argument( + "--control_image_mask_path", + type=str, + default=None, + nargs="*", + help="path to control (reference) image mask for one frame inference.", + ) + parser.add_argument("--trim_tail_frames", type=int, default=0, help="trim tail N frames from the video before saving") + parser.add_argument( + "--cfg_skip_mode", + type=str, + default="none", + choices=["early", "late", "middle", "early_late", "alternate", "none"], + help="CFG skip mode. each mode skips different parts of the CFG. " + " early: initial steps, late: later steps, middle: middle steps, early_late: both early and late, alternate: alternate, none: no skip (default)", + ) + parser.add_argument( + "--cfg_apply_ratio", + type=float, + default=None, + help="The ratio of steps to apply CFG (0.0 to 1.0). Default is None (apply all steps).", + ) + parser.add_argument( + "--slg_layers", type=str, default=None, help="Skip block (layer) indices for SLG (Skip Layer Guidance), comma separated" + ) + parser.add_argument( + "--slg_scale", + type=float, + default=3.0, + help="scale for SLG classifier free guidance. Default is 3.0. Ignored if slg_mode is None or uncond", + ) + parser.add_argument("--slg_start", type=float, default=0.0, help="start ratio for inference steps for SLG. Default is 0.0.") + parser.add_argument("--slg_end", type=float, default=0.3, help="end ratio for inference steps for SLG. Default is 0.3.") + parser.add_argument( + "--slg_mode", + type=str, + default=None, + choices=["original", "uncond"], + help="SLG mode. original: same as SD3, uncond: replace uncond pred with SLG pred", + ) + + # Flow Matching + parser.add_argument( + "--flow_shift", + type=float, + default=None, + help="Shift factor for flow matching schedulers. Default depends on task.", + ) + + parser.add_argument("--fp8", action="store_true", help="use fp8 for DiT model") + parser.add_argument("--fp8_scaled", action="store_true", help="use scaled fp8 for DiT, only for fp8") + parser.add_argument("--fp8_fast", action="store_true", help="Enable fast FP8 arithmetic (RTX 4XXX+), only for fp8_scaled") + parser.add_argument("--fp8_t5", action="store_true", help="use fp8 for Text Encoder model") + parser.add_argument( + "--device", type=str, default=None, help="device to use for inference. If None, use CUDA if available, otherwise use CPU" + ) + parser.add_argument( + "--attn_mode", + type=str, + default="torch", + choices=["flash", "flash2", "flash3", "torch", "sageattn", "xformers", "sdpa"], + help="attention mode", + ) + parser.add_argument("--blocks_to_swap", type=int, default=0, help="number of blocks to swap in the model") + parser.add_argument( + "--output_type", + type=str, + default="video", + choices=["video", "images", "latent", "both", "latent_images"], + help="output type", + ) + parser.add_argument("--no_metadata", action="store_true", help="do not save metadata") + parser.add_argument("--latent_path", type=str, nargs="*", default=None, help="path to latent for decode. no inference") + parser.add_argument("--lycoris", action="store_true", help="use lycoris for inference") + parser.add_argument("--compile", action="store_true", help="Enable torch.compile") + parser.add_argument( + "--compile_args", + nargs=4, + metavar=("BACKEND", "MODE", "DYNAMIC", "FULLGRAPH"), + default=["inductor", "max-autotune-no-cudagraphs", "False", "False"], + help="Torch.compile settings", + ) + + # New arguments for batch and interactive modes + parser.add_argument("--from_file", type=str, default=None, help="Read prompts from a file") + parser.add_argument("--interactive", action="store_true", help="Interactive mode: read prompts from console") + + args = parser.parse_args() + + # Validate arguments + if args.from_file and args.interactive: + raise ValueError("Cannot use both --from_file and --interactive at the same time") + + if args.prompt is None and not args.from_file and not args.interactive and args.latent_path is None: + raise ValueError("Either --prompt, --from_file, --interactive, or --latent_path must be specified") + + assert (args.latent_path is None or len(args.latent_path) == 0) or ( + args.output_type == "images" or args.output_type == "video" + ), "latent_path is only supported for images or video output" + + return args + + +def parse_prompt_line(line: str) -> Dict[str, Any]: + """Parse a prompt line into a dictionary of argument overrides + + Args: + line: Prompt line with options + + Returns: + Dict[str, Any]: Dictionary of argument overrides + """ + # TODO common function with hv_train_network.line_to_prompt_dict + parts = line.split(" --") + prompt = parts[0].strip() + + # Create dictionary of overrides + overrides = {"prompt": prompt} + # Initialize control_image_path and control_image_mask_path as a list to accommodate multiple paths + overrides["control_image_path"] = [] + overrides["control_image_mask_path"] = [] + + for part in parts[1:]: + if not part.strip(): + continue + option_parts = part.split(" ", 1) + option = option_parts[0].strip() + value = option_parts[1].strip() if len(option_parts) > 1 else "" + + # Map options to argument names + if option == "w": + overrides["video_size_width"] = int(value) + elif option == "h": + overrides["video_size_height"] = int(value) + elif option == "f": + overrides["video_length"] = int(value) + elif option == "d": + overrides["seed"] = int(value) + elif option == "s": + overrides["infer_steps"] = int(value) + elif option == "g" or option == "l": + overrides["guidance_scale"] = float(value) + elif option == "fs": + overrides["flow_shift"] = float(value) + elif option == "i": + overrides["image_path"] = value + elif option == "ei": + overrides["end_image_path"] = value + elif option == "cn": + overrides["control_path"] = value + elif option == "n": + overrides["negative_prompt"] = value + # one frame inference options + elif option == "ci": # control_image_path + overrides["control_image_path"].append(value) + elif option == "cim": # control_image_mask_path + overrides["control_image_mask_path"].append(value) + elif option == "of": # one_frame_inference + overrides["one_frame_inference"] = value + + # If no control_image_path was provided, remove the empty list + if not overrides["control_image_path"]: + del overrides["control_image_path"] + if not overrides["control_image_mask_path"]: + del overrides["control_image_mask_path"] + + return overrides + + +def apply_overrides(args: argparse.Namespace, overrides: Dict[str, Any]) -> argparse.Namespace: + """Apply overrides to args + + Args: + args: Original arguments + overrides: Dictionary of overrides + + Returns: + argparse.Namespace: New arguments with overrides applied + """ + args_copy = copy.deepcopy(args) + + for key, value in overrides.items(): + if key == "video_size_width": + args_copy.video_size[1] = value + elif key == "video_size_height": + args_copy.video_size[0] = value + else: + setattr(args_copy, key, value) + + return args_copy + + +def get_task_defaults(task: str, size: Optional[Tuple[int, int]] = None) -> Tuple[int, float, int, bool]: + """Return default values for each task + + Args: + task: task name (t2v, t2i, i2v etc.) + size: size of the video (width, height) + + Returns: + Tuple[int, float, int, bool]: (infer_steps, flow_shift, video_length, needs_clip) + """ + width, height = size if size else (0, 0) + + if "t2i" in task: + return 50, 5.0, 1, False + elif "i2v" in task: + flow_shift = 3.0 if (width == 832 and height == 480) or (width == 480 and height == 832) else 5.0 + return 40, flow_shift, 81, True + else: # t2v or default + return 50, 5.0, 81, False + + +def setup_args(args: argparse.Namespace) -> argparse.Namespace: + """Validate and set default values for optional arguments + + Args: + args: command line arguments + + Returns: + argparse.Namespace: updated arguments + """ + # Get default values for the task + infer_steps, flow_shift, video_length, _ = get_task_defaults(args.task, tuple(args.video_size)) + + # Apply default values to unset arguments + if args.infer_steps is None: + args.infer_steps = infer_steps + if args.flow_shift is None: + args.flow_shift = flow_shift + if args.video_length is None: + args.video_length = video_length + + # Force video_length to 1 for t2i tasks + if "t2i" in args.task: + assert args.video_length == 1, f"video_length should be 1 for task {args.task}" + + # parse slg_layers + if args.slg_layers is not None: + args.slg_layers = list(map(int, args.slg_layers.split(","))) + + return args + + +def check_inputs(args: argparse.Namespace) -> Tuple[int, int, int]: + """Validate video size and length + + Args: + args: command line arguments + + Returns: + Tuple[int, int, int]: (height, width, video_length) + """ + height = args.video_size[0] + width = args.video_size[1] + size = f"{width}*{height}" + + if size not in SUPPORTED_SIZES[args.task]: + logger.warning(f"Size {size} is not supported for task {args.task}. Supported sizes are {SUPPORTED_SIZES[args.task]}.") + + video_length = args.video_length + + if height % 8 != 0 or width % 8 != 0: + raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") + + return height, width, video_length + + +def calculate_dimensions(video_size: Tuple[int, int], video_length: int, config) -> Tuple[Tuple[int, int, int, int], int]: + """calculate dimensions for the generation + + Args: + video_size: video frame size (height, width) + video_length: number of frames in the video + config: model configuration + + Returns: + Tuple[Tuple[int, int, int, int], int]: + ((channels, frames, height, width), seq_len) + """ + height, width = video_size + frames = video_length + + # calculate latent space dimensions + lat_f = (frames - 1) // config.vae_stride[0] + 1 + lat_h = height // config.vae_stride[1] + lat_w = width // config.vae_stride[2] + + # calculate sequence length + seq_len = math.ceil((lat_h * lat_w) / (config.patch_size[1] * config.patch_size[2]) * lat_f) + + return ((16, lat_f, lat_h, lat_w), seq_len) + + +def load_vae(args: argparse.Namespace, config, device: torch.device, dtype: torch.dtype) -> WanVAE: + """load VAE model + + Args: + args: command line arguments + config: model configuration + device: device to use + dtype: data type for the model + + Returns: + WanVAE: loaded VAE model + """ + vae_path = args.vae if args.vae is not None else os.path.join(args.ckpt_dir, config.vae_checkpoint) + + logger.info(f"Loading VAE model from {vae_path}") + cache_device = torch.device("cpu") if args.vae_cache_cpu else None + vae = WanVAE(vae_path=vae_path, device=device, dtype=dtype, cache_device=cache_device) + return vae + + +def load_text_encoder(args: argparse.Namespace, config, device: torch.device) -> T5EncoderModel: + """load text encoder (T5) model + + Args: + args: command line arguments + config: model configuration + device: device to use + + Returns: + T5EncoderModel: loaded text encoder model + """ + checkpoint_path = None if args.ckpt_dir is None else os.path.join(args.ckpt_dir, config.t5_checkpoint) + tokenizer_path = None if args.ckpt_dir is None else os.path.join(args.ckpt_dir, config.t5_tokenizer) + + text_encoder = T5EncoderModel( + text_len=config.text_len, + dtype=config.t5_dtype, + device=device, + checkpoint_path=checkpoint_path, + tokenizer_path=tokenizer_path, + weight_path=args.t5, + fp8=args.fp8_t5, + ) + + return text_encoder + + +def load_clip_model(args: argparse.Namespace, config, device: torch.device) -> CLIPModel: + """load CLIP model (for I2V only) + + Args: + args: command line arguments + config: model configuration + device: device to use + + Returns: + CLIPModel: loaded CLIP model + """ + checkpoint_path = None if args.ckpt_dir is None else os.path.join(args.ckpt_dir, config.clip_checkpoint) + tokenizer_path = None if args.ckpt_dir is None else os.path.join(args.ckpt_dir, config.clip_tokenizer) + + clip = CLIPModel( + dtype=config.clip_dtype, + device=device, + checkpoint_path=checkpoint_path, + tokenizer_path=tokenizer_path, + weight_path=args.clip, + ) + + return clip + + +def load_dit_model( + args: argparse.Namespace, + config, + device: torch.device, + dit_dtype: torch.dtype, + dit_weight_dtype: Optional[torch.dtype] = None, + is_i2v: bool = False, +) -> WanModel: + """load DiT model + + Args: + args: command line arguments + config: model configuration + device: device to use + dit_dtype: data type for the model + dit_weight_dtype: data type for the model weights. None for as-is + is_i2v: I2V mode + + Returns: + WanModel: loaded DiT model + """ + loading_device = "cpu" + if args.blocks_to_swap == 0 and args.lora_weight is None and not args.fp8_scaled: + loading_device = device + + loading_weight_dtype = dit_weight_dtype + if args.fp8_scaled or args.lora_weight is not None: + loading_weight_dtype = dit_dtype # load as-is + + # do not fp8 optimize because we will merge LoRA weights + model = load_wan_model(config, device, args.dit, args.attn_mode, False, loading_device, loading_weight_dtype, False) + + return model + + +def merge_lora_weights( + lora_module: ModuleType, + model: torch.nn.Module, + args: argparse.Namespace, + device: torch.device, + converter: Optional[callable] = None, +) -> None: + """merge LoRA weights to the model + + Args: + lora_module: LoRA module, e.g. lora_wan + model: DiT model + args: command line arguments + device: device to use + converter: Optional callable to convert weights + """ + if args.lora_weight is None or len(args.lora_weight) == 0: + return + + for i, lora_weight in enumerate(args.lora_weight): + if args.lora_multiplier is not None and len(args.lora_multiplier) > i: + lora_multiplier = args.lora_multiplier[i] + else: + lora_multiplier = 1.0 + + logger.info(f"Loading LoRA weights from {lora_weight} with multiplier {lora_multiplier}") + weights_sd = load_file(lora_weight) + if converter is not None: + weights_sd = converter(weights_sd) + + # apply include/exclude patterns + original_key_count = len(weights_sd.keys()) + if args.include_patterns is not None and len(args.include_patterns) > i: + include_pattern = args.include_patterns[i] + regex_include = re.compile(include_pattern) + weights_sd = {k: v for k, v in weights_sd.items() if regex_include.search(k)} + logger.info(f"Filtered keys with include pattern {include_pattern}: {original_key_count} -> {len(weights_sd.keys())}") + if args.exclude_patterns is not None and len(args.exclude_patterns) > i: + original_key_count_ex = len(weights_sd.keys()) + exclude_pattern = args.exclude_patterns[i] + regex_exclude = re.compile(exclude_pattern) + weights_sd = {k: v for k, v in weights_sd.items() if not regex_exclude.search(k)} + logger.info( + f"Filtered keys with exclude pattern {exclude_pattern}: {original_key_count_ex} -> {len(weights_sd.keys())}" + ) + if len(weights_sd) != original_key_count: + remaining_keys = list(set([k.split(".", 1)[0] for k in weights_sd.keys()])) + remaining_keys.sort() + logger.info(f"Remaining LoRA modules after filtering: {remaining_keys}") + if len(weights_sd) == 0: + logger.warning(f"No keys left after filtering.") + + if args.lycoris: + lycoris_net, _ = create_network_from_weights( + multiplier=lora_multiplier, + file=None, + weights_sd=weights_sd, + unet=model, + text_encoder=None, + vae=None, + for_inference=True, + ) + lycoris_net.merge_to(None, model, weights_sd, dtype=None, device=device) + else: + network = lora_module.create_arch_network_from_weights(lora_multiplier, weights_sd, unet=model, for_inference=True) + network.merge_to(None, model, weights_sd, device=device, non_blocking=True) + + synchronize_device(device) + logger.info("LoRA weights loaded") + + # save model here before casting to dit_weight_dtype + if args.save_merged_model: + logger.info(f"Saving merged model to {args.save_merged_model}") + mem_eff_save_file(model.state_dict(), args.save_merged_model) # save_file needs a lot of memory + logger.info("Merged model saved") + + +def optimize_model( + model: WanModel, args: argparse.Namespace, device: torch.device, dit_dtype: torch.dtype, dit_weight_dtype: torch.dtype +) -> None: + """optimize the model (FP8 conversion, device move etc.) + + Args: + model: dit model + args: command line arguments + device: device to use + dit_dtype: dtype for the model + dit_weight_dtype: dtype for the model weights + """ + if args.fp8_scaled: + # load state dict as-is and optimize to fp8 + state_dict = model.state_dict() + + # if no blocks to swap, we can move the weights to GPU after optimization on GPU (omit redundant CPU->GPU copy) + move_to_device = args.blocks_to_swap == 0 # if blocks_to_swap > 0, we will keep the model on CPU + state_dict = model.fp8_optimization(state_dict, device, move_to_device, use_scaled_mm=args.fp8_fast) + + info = model.load_state_dict(state_dict, strict=True, assign=True) + logger.info(f"Loaded FP8 optimized weights: {info}") + + if args.blocks_to_swap == 0: + model.to(device) # make sure all parameters are on the right device (e.g. RoPE etc.) + else: + # simple cast to dit_dtype + target_dtype = None # load as-is (dit_weight_dtype == dtype of the weights in state_dict) + target_device = None + + if dit_weight_dtype is not None: # in case of args.fp8 and not args.fp8_scaled + logger.info(f"Convert model to {dit_weight_dtype}") + target_dtype = dit_weight_dtype + + if args.blocks_to_swap == 0: + logger.info(f"Move model to device: {device}") + target_device = device + + model.to(target_device, target_dtype) # move and cast at the same time. this reduces redundant copy operations + + if args.compile: + compile_backend, compile_mode, compile_dynamic, compile_fullgraph = args.compile_args + logger.info( + f"Torch Compiling[Backend: {compile_backend}; Mode: {compile_mode}; Dynamic: {compile_dynamic}; Fullgraph: {compile_fullgraph}]" + ) + torch._dynamo.config.cache_size_limit = 32 + for i in range(len(model.blocks)): + model.blocks[i] = torch.compile( + model.blocks[i], + backend=compile_backend, + mode=compile_mode, + dynamic=compile_dynamic.lower() in "true", + fullgraph=compile_fullgraph.lower() in "true", + ) + + if args.blocks_to_swap > 0: + logger.info(f"Enable swap {args.blocks_to_swap} blocks to CPU from device: {device}") + model.enable_block_swap(args.blocks_to_swap, device, supports_backward=False) + model.move_to_device_except_swap_blocks(device) + model.prepare_block_swap_before_forward() + else: + # make sure the model is on the right device + model.to(device) + + model.eval().requires_grad_(False) + clean_memory_on_device(device) + + +def prepare_t2v_inputs( + args: argparse.Namespace, + config, + accelerator: Accelerator, + device: torch.device, + vae: Optional[WanVAE] = None, + encoded_context: Optional[Dict] = None, +) -> Tuple[torch.Tensor, Tuple[dict, dict]]: + """Prepare inputs for T2V + + Args: + args: command line arguments + config: model configuration + accelerator: Accelerator instance + device: device to use + vae: VAE model for control video encoding + encoded_context: Pre-encoded text context + + Returns: + Tuple[torch.Tensor, torch.Tensor, torch.Tensor, Tuple[dict, dict]]: + (noise, context, context_null, (arg_c, arg_null)) + """ + # Prepare inputs for T2V + # calculate dimensions and sequence length + height, width = args.video_size + frames = args.video_length + (_, lat_f, lat_h, lat_w), seq_len = calculate_dimensions(args.video_size, args.video_length, config) + target_shape = (16, lat_f, lat_h, lat_w) + + # configure negative prompt + n_prompt = args.negative_prompt if args.negative_prompt else config.sample_neg_prompt + + # set seed + seed = args.seed if args.seed is not None else random.randint(0, 2**32 - 1) + if not args.cpu_noise: + seed_g = torch.Generator(device=device) + seed_g.manual_seed(seed) + else: + # ComfyUI compatible noise + seed_g = torch.manual_seed(seed) + + if encoded_context is None: + # load text encoder + text_encoder = load_text_encoder(args, config, device) + text_encoder.model.to(device) + + # encode prompt + with torch.no_grad(): + if args.fp8_t5: + with torch.amp.autocast(device_type=device.type, dtype=config.t5_dtype): + context = text_encoder([args.prompt], device) + context_null = text_encoder([n_prompt], device) + else: + context = text_encoder([args.prompt], device) + context_null = text_encoder([n_prompt], device) + + # free text encoder and clean memory + del text_encoder + clean_memory_on_device(device) + else: + # Use pre-encoded context + context = encoded_context["context"] + context_null = encoded_context["context_null"] + + # Fun-Control: encode control video to latent space + if config.is_fun_control: + # TODO use same resizing as for image + logger.info(f"Encoding control video to latent space") + # C, F, H, W + control_video = load_control_video(args.control_path, frames, height, width).to(device) + vae.to_device(device) + with torch.autocast(device_type=device.type, dtype=vae.dtype), torch.no_grad(): + control_latent = vae.encode([control_video])[0] + y = torch.concat([control_latent, torch.zeros_like(control_latent)], dim=0) # add control video latent + vae.to_device("cpu") + else: + y = None + + # generate noise + noise = torch.randn(target_shape, dtype=torch.float32, generator=seed_g, device=device if not args.cpu_noise else "cpu") + noise = noise.to(device) + + # prepare model input arguments + arg_c = {"context": context, "seq_len": seq_len} + arg_null = {"context": context_null, "seq_len": seq_len} + if y is not None: + arg_c["y"] = [y] + arg_null["y"] = [y] + + return noise, (arg_c, arg_null) + + +def parse_one_frame_inference_args(one_frame_inference_arg: str) -> Tuple[int, List[int], List[int], int]: + """Parse one frame inference arguments""" + one_frame_inference = set() + for mode in one_frame_inference_arg.split(","): + one_frame_inference.add(mode.strip()) + + target_index = 0 + control_indices = [] + for one_frame_param in one_frame_inference: + if one_frame_param.startswith("target_index="): + target_index = int(one_frame_param.split("=")[1]) + logger.info(f"Set index for target: {target_index}") + elif one_frame_param.startswith("control_index="): + control_indices = one_frame_param.split("=")[1].split(";") + control_indices = [int(idx) for idx in control_indices] + logger.info(f"Set control indices: {control_indices}") + + target_and_control_latent_indices = control_indices + [target_index] + f_indices = sorted(target_and_control_latent_indices) + + one_frame_inference_index = f_indices.index(target_index) + + return target_index, control_indices, f_indices, one_frame_inference_index + + +def prepare_one_frame_inference( + args: argparse.Namespace, + accelerator: Accelerator, + vae: WanVAE, + device: torch.device, + lat_h: int, + lat_w: int, + height: int, + width: int, +) -> Tuple[int, torch.Tensor, List[int]]: + + target_index, _, f_indices, one_frame_inference_index = parse_one_frame_inference_args(args.one_frame_inference) + + # prepare image + def preprocess_image(image_path: str): + image = Image.open(image_path) + if image.mode == "RGBA": + alpha = image.split()[-1] + else: + alpha = None + image = image.convert("RGB") + + image_np = np.array(image) # PIL to numpy, HWC + + image_np = image_video_dataset.resize_image_to_bucket(image_np, (width, height)) + image_tensor = torch.from_numpy(image_np).float() / 127.5 - 1.0 # -1 to 1.0, HWC + image_tensor = image_tensor.permute(2, 0, 1)[:, None] # HWC -> CHW -> CFHW, C=3, F=1 + return image_tensor, image_np, alpha + + # check control images + control_image_tensors = [] + control_mask_images = [] + if args.control_image_path is not None and len(args.control_image_path) > 0: + for ctrl_image_path in args.control_image_path: + control_image_tensor, _, control_mask = preprocess_image(ctrl_image_path) + control_image_tensors.append(control_image_tensor) + control_mask_images.append(control_mask) + + # TODO mask is not supported yet + + vae.to_device(device) + + with accelerator.autocast(), torch.no_grad(): + black_image_latent = vae.encode([torch.zeros((3, 1, height, width), dtype=torch.float32, device=device)])[0] + + control_latents = [] + if control_image_tensors is not None: + # encode image to latent space with VAE + logger.info(f"Encoding image to latent space") + + for ctrl_image_tensor in control_image_tensors: + # encode image one by one + with accelerator.autocast(), torch.no_grad(): + control_latent = vae.encode([ctrl_image_tensor.to(device)])[0] + control_latents.append(control_latent) + + vae.to_device("cpu") + + lat_f = 1 + (len(control_latents) if control_latents is not None else 0) + + # Create latent and mask for the required number of frames + y = torch.zeros(4 + 16, lat_f, lat_h, lat_w, dtype=torch.float32, device=device) + ci = 0 + for j, index in enumerate(f_indices): + if index == target_index: + y[4:, j : j + 1, :, :] = black_image_latent # set target latent to black image + else: + y[:4, j, :, :] = 1.0 # set mask to 1.0 for the clean latent frames + y[4:, j : j + 1, :, :] = control_latents[ci] # set control latent + ci += 1 + + return one_frame_inference_index, y, f_indices + + +def prepare_i2v_inputs( + args: argparse.Namespace, + config, + accelerator: Accelerator, + device: torch.device, + vae: WanVAE, + encoded_context: Optional[Dict] = None, +) -> Tuple[torch.Tensor, Tuple[dict, dict], Optional[int]]: + """Prepare inputs for I2V + + Args: + args: command line arguments + config: model configuration + accelerator: Accelerator instance + device: device to use + vae: VAE model, used for image encoding + encoded_context: Pre-encoded text context + + Returns: + Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, Tuple[dict, dict]]: + (noise, context, context_null, y, (arg_c, arg_null)) + """ + # get video dimensions + height, width = args.video_size + frames = args.video_length + max_area = width * height + + # load image + img = Image.open(args.image_path).convert("RGB") + + # convert to numpy + img_cv2 = np.array(img) # PIL to numpy + + # convert to tensor (-1 to 1) + img_tensor = TF.to_tensor(img).sub_(0.5).div_(0.5).to(device) + + # end frame image + if args.end_image_path is not None: + end_img = Image.open(args.end_image_path).convert("RGB") + end_img_cv2 = np.array(end_img) # PIL to numpy + else: + end_img = None + end_img_cv2 = None + has_end_image = end_img is not None + additional_frames = 1 if has_end_image and not config.flf2v else 0 + + # calculate latent dimensions: keep aspect ratio + height, width = img_tensor.shape[1:] + aspect_ratio = height / width + lat_h = round(np.sqrt(max_area * aspect_ratio) // config.vae_stride[1] // config.patch_size[1] * config.patch_size[1]) + lat_w = round(np.sqrt(max_area / aspect_ratio) // config.vae_stride[2] // config.patch_size[2] * config.patch_size[2]) + height = lat_h * config.vae_stride[1] + width = lat_w * config.vae_stride[2] + lat_f = (frames - 1) // config.vae_stride[0] + 1 # size of latent frames + max_seq_len = (lat_f + additional_frames) * lat_h * lat_w // (config.patch_size[1] * config.patch_size[2]) + + # set seed + seed = args.seed if args.seed is not None else random.randint(0, 2**32 - 1) + if not args.cpu_noise: + seed_g = torch.Generator(device=device) + seed_g.manual_seed(seed) + else: + # ComfyUI compatible noise + seed_g = torch.manual_seed(seed) + + # configure negative prompt + n_prompt = args.negative_prompt if args.negative_prompt else config.sample_neg_prompt + + if encoded_context is None: + # load text encoder + text_encoder = load_text_encoder(args, config, device) + text_encoder.model.to(device) + + # encode prompt + with torch.no_grad(): + if args.fp8_t5: + with torch.amp.autocast(device_type=device.type, dtype=config.t5_dtype): + context = text_encoder([args.prompt], device) + context_null = text_encoder([n_prompt], device) + else: + context = text_encoder([args.prompt], device) + context_null = text_encoder([n_prompt], device) + + # free text encoder and clean memory + del text_encoder + clean_memory_on_device(device) + + # load CLIP model + clip = load_clip_model(args, config, device) + clip.model.to(device) + + # encode image to CLIP context + logger.info(f"Encoding image to CLIP context") + with torch.amp.autocast(device_type=device.type, dtype=torch.float16), torch.no_grad(): + clip_context = clip.visual([img_tensor[:, None, :, :]]) + # I2V end image is not officially supported, so no additional CLIP context + if end_img is not None and config.flf2v: + end_img_tensor = TF.to_tensor(end_img).sub_(0.5).div_(0.5).to(device) + end_clip_context = clip.visual([end_img_tensor[:, None, :, :]]) + clip_context = torch.concat([clip_context, end_clip_context], dim=0) + logger.info(f"Encoding complete") + + # free CLIP model and clean memory + del clip + clean_memory_on_device(device) + else: + # Use pre-encoded context + context = encoded_context["context"] + context_null = encoded_context["context_null"] + clip_context = encoded_context["clip_context"] + + # check if one frame inference is enabled + if args.one_frame_inference is not None: + if has_end_image and not config.flf2v: + logger.warning("One frame inference with end image is not supported other than FLF2V") + one_frame_inference_index, y, f_indices = prepare_one_frame_inference( + args, accelerator, vae, device, lat_h, lat_w, height, width + ) + max_seq_len = len(f_indices) * lat_h * lat_w // (config.patch_size[1] * config.patch_size[2]) + else: + one_frame_inference_index, f_indices = None, None + + # encode image to latent space with VAE + logger.info(f"Encoding image to latent space") + vae.to_device(device) + + # resize image + interpolation = cv2.INTER_AREA if height < img_cv2.shape[0] else cv2.INTER_CUBIC + img_resized = cv2.resize(img_cv2, (width, height), interpolation=interpolation) + img_resized = TF.to_tensor(img_resized).sub_(0.5).div_(0.5).to(device) # -1 to 1, CHW + img_resized = img_resized.unsqueeze(1) # CFHW + + if has_end_image: + interpolation = cv2.INTER_AREA if height < end_img_cv2.shape[1] else cv2.INTER_CUBIC + end_img_resized = cv2.resize(end_img_cv2, (width, height), interpolation=interpolation) + end_img_resized = TF.to_tensor(end_img_resized).sub_(0.5).div_(0.5).to(device) # -1 to 1, CHW + end_img_resized = end_img_resized.unsqueeze(1) # CFHW + + # create mask for the first frame + # if unofficial end image is used, we need to add an additional frame for it + msk = torch.zeros(4, lat_f + additional_frames, lat_h, lat_w, device=device) + msk[:, 0] = 1 + if has_end_image: + # this process is confirmed by official code for FLF2V + msk[:, -1] = 1 + + # encode image to latent space + with accelerator.autocast(), torch.no_grad(): + if not config.flf2v or not has_end_image: + # padding to match the required number of frames + padding_frames = frames - 1 # the first frame is image + img_resized = torch.concat([img_resized, torch.zeros(3, padding_frames, height, width, device=device)], dim=1) + y = vae.encode([img_resized])[0] + + if has_end_image: + y_end = vae.encode([end_img_resized])[0] + y = torch.concat([y, y_end], dim=1) # add end frame + else: + # FLF2V: encode image and end image together + padding_frames = frames - 2 # first and last frames are images + img_resized = torch.concat( + [img_resized, torch.zeros(3, padding_frames, height, width, device=device), end_img_resized], dim=1 + ) + y = vae.encode([img_resized])[0] + + y = torch.concat([msk, y]) + logger.info(f"Encoding complete") + + # Fun-Control: encode control video to latent space + if config.is_fun_control: + # TODO use same resizing as for image + logger.info(f"Encoding control video to latent space") + # C, F, H, W + control_video = load_control_video(args.control_path, frames + (1 if has_end_image else 0), height, width).to(device) + with accelerator.autocast(), torch.no_grad(): + control_latent = vae.encode([control_video])[0] + y = y[msk.shape[0] :] # remove mask because Fun-Control does not need it + if has_end_image: + y[:, 1:-1] = 0 # remove image latent except first and last frame. according to WanVideoWrapper, this doesn't work + else: + y[:, 1:] = 0 # remove image latent except first frame + y = torch.concat([control_latent, y], dim=0) # add control video latent + + # generate noise + noise = torch.randn( + 16, + y.shape[1], # number of frames in latent space + lat_h, + lat_w, + dtype=torch.float32, + generator=seed_g, + device=device if not args.cpu_noise else "cpu", + ) + noise = noise.to(device) + + print( + f"noise shape: {noise.shape}, y shape: {y.shape}, context shape: {context[0].shape}, clip_context shape: {clip_context.shape}" + ) + + # prepare model input arguments + arg_c = { + "context": [context[0]], + "clip_fea": clip_context, + "seq_len": max_seq_len, + "y": [y], + "f_indices": [f_indices] if f_indices is not None else None, + } + + arg_null = { + "context": context_null, + "clip_fea": clip_context, + "seq_len": max_seq_len, + "y": [y], + "f_indices": [f_indices] if f_indices is not None else None, + } + + vae.to_device("cpu") # move VAE to CPU to save memory + clean_memory_on_device(device) + + return noise, (arg_c, arg_null), one_frame_inference_index + + +def load_control_video(control_path: str, frames: int, height: int, width: int) -> torch.Tensor: + """load control video to latent space + + Args: + control_path: path to control video + frames: number of frames in the video + height: height of the video + width: width of the video + + Returns: + torch.Tensor: control video latent, CFHW + """ + logger.info(f"Load control video from {control_path}") + video = load_video(control_path, 0, frames, bucket_reso=(width, height)) # list of frames + if len(video) < frames: + raise ValueError(f"Video length is less than {frames}") + # video = np.stack(video, axis=0) # F, H, W, C + video = torch.stack([TF.to_tensor(frame).sub_(0.5).div_(0.5) for frame in video], dim=0) # F, C, H, W, -1 to 1 + video = video.permute(1, 0, 2, 3) # C, F, H, W + return video + + +def setup_scheduler(args: argparse.Namespace, config, device: torch.device) -> Tuple[Any, torch.Tensor]: + """setup scheduler for sampling + + Args: + args: command line arguments + config: model configuration + device: device to use + + Returns: + Tuple[Any, torch.Tensor]: (scheduler, timesteps) + """ + if args.sample_solver == "unipc": + scheduler = FlowUniPCMultistepScheduler(num_train_timesteps=config.num_train_timesteps, shift=1, use_dynamic_shifting=False) + scheduler.set_timesteps(args.infer_steps, device=device, shift=args.flow_shift) + timesteps = scheduler.timesteps + elif args.sample_solver == "dpm++": + scheduler = FlowDPMSolverMultistepScheduler( + num_train_timesteps=config.num_train_timesteps, shift=1, use_dynamic_shifting=False + ) + sampling_sigmas = get_sampling_sigmas(args.infer_steps, args.flow_shift) + timesteps, _ = retrieve_timesteps(scheduler, device=device, sigmas=sampling_sigmas) + elif args.sample_solver == "vanilla": + scheduler = FlowMatchDiscreteScheduler(num_train_timesteps=config.num_train_timesteps, shift=args.flow_shift) + scheduler.set_timesteps(args.infer_steps, device=device) + timesteps = scheduler.timesteps + + # FlowMatchDiscreteScheduler does not support generator argument in step method + org_step = scheduler.step + + def step_wrapper( + model_output: torch.Tensor, + timestep: Union[int, torch.Tensor], + sample: torch.Tensor, + return_dict: bool = True, + generator=None, + ): + return org_step(model_output, timestep, sample, return_dict=return_dict) + + scheduler.step = step_wrapper + else: + raise NotImplementedError("Unsupported solver.") + + return scheduler, timesteps + + +def run_sampling( + model: WanModel, + noise: torch.Tensor, + scheduler: Any, + timesteps: torch.Tensor, + args: argparse.Namespace, + inputs: Tuple[dict, dict], + device: torch.device, + seed_g: torch.Generator, + accelerator: Accelerator, + is_i2v: bool = False, + use_cpu_offload: bool = True, +) -> torch.Tensor: + """run sampling + Args: + model: dit model + noise: initial noise + scheduler: scheduler for sampling + timesteps: time steps for sampling + args: command line arguments + inputs: model input (arg_c, arg_null) + device: device to use + seed_g: random generator + accelerator: Accelerator instance + is_i2v: I2V mode (False means T2V mode) + use_cpu_offload: Whether to offload tensors to CPU during processing + Returns: + torch.Tensor: generated latent + """ + arg_c, arg_null = inputs + + latent = noise + latent_storage_device = device if not use_cpu_offload else "cpu" + latent = latent.to(latent_storage_device) + + # cfg skip + apply_cfg_array = [] + num_timesteps = len(timesteps) + + if args.cfg_skip_mode != "none" and args.cfg_apply_ratio is not None: + # Calculate thresholds based on cfg_apply_ratio + apply_steps = int(num_timesteps * args.cfg_apply_ratio) + + if args.cfg_skip_mode == "early": + # Skip CFG in early steps, apply in late steps + start_index = num_timesteps - apply_steps + end_index = num_timesteps + elif args.cfg_skip_mode == "late": + # Skip CFG in late steps, apply in early steps + start_index = 0 + end_index = apply_steps + elif args.cfg_skip_mode == "early_late": + # Skip CFG in early and late steps, apply in middle steps + start_index = (num_timesteps - apply_steps) // 2 + end_index = start_index + apply_steps + elif args.cfg_skip_mode == "middle": + # Skip CFG in middle steps, apply in early and late steps + skip_steps = num_timesteps - apply_steps + middle_start = (num_timesteps - skip_steps) // 2 + middle_end = middle_start + skip_steps + + w = 0.0 + for step_idx in range(num_timesteps): + if args.cfg_skip_mode == "alternate": + # accumulate w and apply CFG when w >= 1.0 + w += args.cfg_apply_ratio + apply = w >= 1.0 + if apply: + w -= 1.0 + elif args.cfg_skip_mode == "middle": + # Skip CFG in early and late steps, apply in middle steps + apply = step_idx < middle_start or step_idx >= middle_end + else: + # Apply CFG on some steps based on ratio + apply = step_idx >= start_index and step_idx < end_index + + apply_cfg_array.append(apply) + + pattern = ["A" if apply else "S" for apply in apply_cfg_array] + pattern = "".join(pattern) + logger.info(f"CFG skip mode: {args.cfg_skip_mode}, apply ratio: {args.cfg_apply_ratio}, pattern: {pattern}") + else: + # Apply CFG on all steps + apply_cfg_array = [True] * num_timesteps + + # SLG original implementation is based on https://github.com/Stability-AI/sd3.5/blob/main/sd3_impls.py + slg_start_step = int(args.slg_start * num_timesteps) + slg_end_step = int(args.slg_end * num_timesteps) + + for i, t in enumerate(tqdm(timesteps)): + # latent is on CPU if use_cpu_offload is True + latent_model_input = [latent.to(device)] + timestep = torch.stack([t]).to(device) + + with accelerator.autocast(), torch.no_grad(): + noise_pred_cond = model(latent_model_input, t=timestep, **arg_c)[0].to(latent_storage_device) + + apply_cfg = apply_cfg_array[i] # apply CFG or not + if apply_cfg: + apply_slg = i >= slg_start_step and i < slg_end_step + # print(f"Applying SLG: {apply_slg}, i: {i}, slg_start_step: {slg_start_step}, slg_end_step: {slg_end_step}") + if args.slg_mode == "original" and apply_slg: + noise_pred_uncond = model(latent_model_input, t=timestep, **arg_null)[0].to(latent_storage_device) + + # apply guidance + # SD3 formula: scaled = neg_out + (pos_out - neg_out) * cond_scale + noise_pred = noise_pred_uncond + args.guidance_scale * (noise_pred_cond - noise_pred_uncond) + + # calculate skip layer out + skip_layer_out = model(latent_model_input, t=timestep, skip_block_indices=args.slg_layers, **arg_null)[0].to( + latent_storage_device + ) + + # apply skip layer guidance + # SD3 formula: scaled = scaled + (pos_out - skip_layer_out) * self.slg + noise_pred = noise_pred + args.slg_scale * (noise_pred_cond - skip_layer_out) + elif args.slg_mode == "uncond" and apply_slg: + # noise_pred_uncond is skip layer out + noise_pred_uncond = model(latent_model_input, t=timestep, skip_block_indices=args.slg_layers, **arg_null)[0].to( + latent_storage_device + ) + + # apply guidance + noise_pred = noise_pred_uncond + args.guidance_scale * (noise_pred_cond - noise_pred_uncond) + + else: + # normal guidance + noise_pred_uncond = model(latent_model_input, t=timestep, **arg_null)[0].to(latent_storage_device) + + # apply guidance + noise_pred = noise_pred_uncond + args.guidance_scale * (noise_pred_cond - noise_pred_uncond) + else: + noise_pred = noise_pred_cond + + # step + latent_input = latent.unsqueeze(0) + temp_x0 = scheduler.step(noise_pred.unsqueeze(0), t, latent_input, return_dict=False, generator=seed_g)[0] + + # update latent + latent = temp_x0.squeeze(0) + + return latent + + +def generate( + args: argparse.Namespace, gen_settings: GenerationSettings, shared_models: Optional[Dict] = None +) -> tuple[torch.Tensor, Optional[int]]: + """main function for generation + + Args: + args: command line arguments + shared_models: dictionary containing pre-loaded models and encoded data + + Returns: + tuple[torch.Tensor, Optional[int]]: (latent tensor, one frame inference index) + """ + device, cfg, dit_dtype, dit_weight_dtype, vae_dtype = ( + gen_settings.device, + gen_settings.cfg, + gen_settings.dit_dtype, + gen_settings.dit_weight_dtype, + gen_settings.vae_dtype, + ) + + # prepare accelerator + mixed_precision = "bf16" if dit_dtype == torch.bfloat16 else "fp16" + accelerator = accelerate.Accelerator(mixed_precision=mixed_precision) + + # I2V or T2V + is_i2v = "i2v" in args.task or "flf2v" in args.task + + # prepare seed + seed = args.seed if args.seed is not None else random.randint(0, 2**32 - 1) + args.seed = seed # set seed to args for saving + + # Check if we have shared models + one_frame_inference_index = None + if shared_models is not None: + # Use shared models and encoded data + vae = shared_models.get("vae") + model = shared_models.get("model") + encoded_context = shared_models.get("encoded_contexts", {}).get(args.prompt) + + # prepare inputs + if is_i2v: + # I2V + noise, inputs, one_frame_inference_index = prepare_i2v_inputs(args, cfg, accelerator, device, vae, encoded_context) + else: + # T2V + noise, inputs = prepare_t2v_inputs(args, cfg, accelerator, device, vae, encoded_context) + else: + # prepare inputs without shared models + if is_i2v: + # I2V: need text encoder, VAE and CLIP + vae = load_vae(args, cfg, device, vae_dtype) + noise, inputs, one_frame_inference_index = prepare_i2v_inputs(args, cfg, accelerator, device, vae) + # vae is on CPU after prepare_i2v_inputs + else: + # T2V: need text encoder + vae = None + if cfg.is_fun_control: + # Fun-Control: need VAE for encoding control video + vae = load_vae(args, cfg, device, vae_dtype) + noise, inputs = prepare_t2v_inputs(args, cfg, accelerator, device, vae) + + # load DiT model + model = load_dit_model(args, cfg, device, dit_dtype, dit_weight_dtype, is_i2v) + + # merge LoRA weights + if args.lora_weight is not None and len(args.lora_weight) > 0: + merge_lora_weights(lora_wan, model, args, device) + + # if we only want to save the model, we can skip the rest + if args.save_merged_model: + return None + + # optimize model: fp8 conversion, block swap etc. + optimize_model(model, args, device, dit_dtype, dit_weight_dtype) + + # setup scheduler + scheduler, timesteps = setup_scheduler(args, cfg, device) + + # set random generator + seed_g = torch.Generator(device=device) + seed_g.manual_seed(seed) + + # run sampling + latent = run_sampling(model, noise, scheduler, timesteps, args, inputs, device, seed_g, accelerator, is_i2v) + if one_frame_inference_index is not None: + latent = latent[:, one_frame_inference_index : one_frame_inference_index + 1, :] + latent = latent.contiguous() # safetensors requires contiguous tensors :( + + # Only clean up shared models if they were created within this function + if shared_models is None: + # free memory + del model + del scheduler + synchronize_device(device) + + # wait for 5 seconds until block swap is done + if args.blocks_to_swap > 0: + logger.info("Waiting for 5 seconds to finish block swap") + time.sleep(5) + + gc.collect() + clean_memory_on_device(device) + + # save VAE model for decoding + if vae is None: + args._vae = None + else: + args._vae = vae + + return latent + + +def decode_latent(latent: torch.Tensor, args: argparse.Namespace, cfg) -> torch.Tensor: + """decode latent + + Args: + latent: latent tensor + args: command line arguments + cfg: model configuration + + Returns: + torch.Tensor: decoded video or image + """ + device = torch.device(args.device) + + # load VAE model or use the one from the generation + vae_dtype = str_to_dtype(args.vae_dtype) if args.vae_dtype is not None else torch.bfloat16 + if hasattr(args, "_vae") and args._vae is not None: + vae = args._vae + else: + vae = load_vae(args, cfg, device, vae_dtype) + + vae.to_device(device) + + x0 = latent.to(device) + + logger.info(f"Decoding video from latents: {latent.shape}") + with torch.autocast(device_type=device.type, dtype=vae_dtype), torch.no_grad(): + videos = vae.decode(x0) + + # some tail frames may be corrupted when end frame is used, we add an option to remove them + if args.trim_tail_frames: + videos[0] = videos[0][:, : -args.trim_tail_frames] + + video = videos[0] + del videos + video = video.to(torch.float32).cpu() + + logger.info(f"Decoding complete") + return video + + +def save_latent(latent: torch.Tensor, args: argparse.Namespace, height: int, width: int) -> str: + """Save latent to file + + Args: + latent: latent tensor + args: command line arguments + height: height of frame + width: width of frame + + Returns: + str: Path to saved latent file + """ + save_path = args.save_path + os.makedirs(save_path, exist_ok=True) + time_flag = datetime.fromtimestamp(time.time()).strftime("%Y%m%d-%H%M%S") + + seed = args.seed + video_length = args.video_length + latent_path = f"{save_path}/{time_flag}_{seed}_latent.safetensors" + + if args.no_metadata: + metadata = None + else: + metadata = { + "seeds": f"{seed}", + "prompt": f"{args.prompt}", + "height": f"{height}", + "width": f"{width}", + "video_length": f"{video_length}", + "infer_steps": f"{args.infer_steps}", + "guidance_scale": f"{args.guidance_scale}", + } + if args.negative_prompt is not None: + metadata["negative_prompt"] = f"{args.negative_prompt}" + + sd = {"latent": latent} + save_file(sd, latent_path, metadata=metadata) + logger.info(f"Latent saved to: {latent_path}") + + return latent_path + + +def save_video(video: torch.Tensor, args: argparse.Namespace, original_base_name: Optional[str] = None) -> str: + """Save video to file + + Args: + video: Video tensor + args: command line arguments + original_base_name: Original base name (if latents are loaded from files) + + Returns: + str: Path to saved video file + """ + save_path = args.save_path + os.makedirs(save_path, exist_ok=True) + time_flag = datetime.fromtimestamp(time.time()).strftime("%Y%m%d-%H%M%S") + + seed = args.seed + original_name = "" if original_base_name is None else f"_{original_base_name}" + video_path = f"{save_path}/{time_flag}_{seed}{original_name}.mp4" + + video = video.unsqueeze(0) + save_videos_grid(video, video_path, fps=args.fps, rescale=True) + logger.info(f"Video saved to: {video_path}") + + return video_path + + +def save_images(sample: torch.Tensor, args: argparse.Namespace, original_base_name: Optional[str] = None) -> str: + """Save images to directory + + Args: + sample: Video tensor + args: command line arguments + original_base_name: Original base name (if latents are loaded from files) + + Returns: + str: Path to saved images directory + """ + save_path = args.save_path + os.makedirs(save_path, exist_ok=True) + time_flag = datetime.fromtimestamp(time.time()).strftime("%Y%m%d-%H%M%S") + + seed = args.seed + original_name = "" if original_base_name is None else f"_{original_base_name}" + image_name = f"{time_flag}_{seed}{original_name}" + sample = sample.unsqueeze(0) + one_frame_inference = sample.shape[2] == 1 # check if one frame inference is used + save_images_grid(sample, save_path, image_name, rescale=True, create_subdir=not one_frame_inference) + logger.info(f"Sample images saved to: {save_path}/{image_name}") + + return f"{save_path}/{image_name}" + + +def save_output( + latent: torch.Tensor, args: argparse.Namespace, cfg, height: int, width: int, original_base_names: Optional[List[str]] = None +) -> None: + """save output + + Args: + latent: latent tensor + args: command line arguments + cfg: model configuration + height: height of frame + width: width of frame + original_base_names: original base names (if latents are loaded from files) + """ + if args.output_type == "latent" or args.output_type == "both" or args.output_type == "latent_images": + # save latent + save_latent(latent, args, height, width) + + if args.output_type == "video" or args.output_type == "both": + # save video + sample = decode_latent(latent.unsqueeze(0), args, cfg) + original_name = "" if original_base_names is None else f"_{original_base_names[0]}" + save_video(sample, args, original_name) + + elif args.output_type == "images": + # save images + sample = decode_latent(latent.unsqueeze(0), args, cfg) + original_name = "" if original_base_names is None else f"_{original_base_names[0]}" + save_images(sample, args, original_name) + + +def preprocess_prompts_for_batch(prompt_lines: List[str], base_args: argparse.Namespace) -> List[Dict]: + """Process multiple prompts for batch mode + + Args: + prompt_lines: List of prompt lines + base_args: Base command line arguments + + Returns: + List[Dict]: List of prompt data dictionaries + """ + prompts_data = [] + + for line in prompt_lines: + line = line.strip() + if not line or line.startswith("#"): # Skip empty lines and comments + continue + + # Parse prompt line and create override dictionary + prompt_data = parse_prompt_line(line) + logger.info(f"Parsed prompt data: {prompt_data}") + prompts_data.append(prompt_data) + + return prompts_data + + +def process_batch_prompts(prompts_data: List[Dict], args: argparse.Namespace) -> None: + """Process multiple prompts with model reuse + + Args: + prompts_data: List of prompt data dictionaries + args: Base command line arguments + """ + if not prompts_data: + logger.warning("No valid prompts found") + return + + # 1. Load configuration + gen_settings = get_generation_settings(args) + device, cfg, dit_dtype, dit_weight_dtype, vae_dtype = ( + gen_settings.device, + gen_settings.cfg, + gen_settings.dit_dtype, + gen_settings.dit_weight_dtype, + gen_settings.vae_dtype, + ) + is_i2v = "i2v" in args.task + + # 2. Encode all prompts + logger.info("Loading text encoder to encode all prompts") + text_encoder = load_text_encoder(args, cfg, device) + text_encoder.model.to(device) + + encoded_contexts = {} + + with torch.no_grad(): + for prompt_data in prompts_data: + prompt = prompt_data["prompt"] + prompt_args = apply_overrides(args, prompt_data) + n_prompt = prompt_data.get( + "negative_prompt", prompt_args.negative_prompt if prompt_args.negative_prompt else cfg.sample_neg_prompt + ) + + if args.fp8_t5: + with torch.amp.autocast(device_type=device.type, dtype=cfg.t5_dtype): + context = text_encoder([prompt], device) + context_null = text_encoder([n_prompt], device) + else: + context = text_encoder([prompt], device) + context_null = text_encoder([n_prompt], device) + + encoded_contexts[prompt] = {"context": context, "context_null": context_null} + + # Free text encoder and clean memory + del text_encoder + clean_memory_on_device(device) + + # 3. Process I2V additional encodings if needed + vae = None + if is_i2v: + logger.info("Loading VAE and CLIP for I2V preprocessing") + vae = load_vae(args, cfg, device, vae_dtype) + vae.to_device(device) + + clip = load_clip_model(args, cfg, device) + clip.model.to(device) + + # Process each image and encode with CLIP + for prompt_data in prompts_data: + if "image_path" not in prompt_data: + continue + + prompt_args = apply_overrides(args, prompt_data) + if not os.path.exists(prompt_args.image_path): + logger.warning(f"Image path not found: {prompt_args.image_path}") + continue + + # Load and encode image with CLIP + img = Image.open(prompt_args.image_path).convert("RGB") + img_tensor = TF.to_tensor(img).sub_(0.5).div_(0.5).to(device) + + with torch.amp.autocast(device_type=device.type, dtype=torch.float16), torch.no_grad(): + clip_context = clip.visual([img_tensor[:, None, :, :]]) + + if prompt_args.end_image_path is not None and os.path.exists(prompt_args.end_image_path): + end_img = Image.open(prompt_args.end_image_path).convert("RGB") + end_img_tensor = TF.to_tensor(end_img).sub_(0.5).div_(0.5).to(device) + end_clip_context = clip.visual([end_img_tensor[:, None, :, :]]) + clip_context = torch.concat([clip_context, end_clip_context], dim=0) + + encoded_contexts[prompt_data["prompt"]]["clip_context"] = clip_context + + # Free CLIP and clean memory + del clip + clean_memory_on_device(device) + + # Keep VAE in CPU memory for later use + vae.to_device("cpu") + elif cfg.is_fun_control: + # For Fun-Control, we need VAE but keep it on CPU + vae = load_vae(args, cfg, device, vae_dtype) + vae.to_device("cpu") + + # 4. Load DiT model + logger.info("Loading DiT model") + model = load_dit_model(args, cfg, device, dit_dtype, dit_weight_dtype, is_i2v) + + # 5. Merge LoRA weights if needed + if args.lora_weight is not None and len(args.lora_weight) > 0: + merge_lora_weights(lora_wan, model, args, device) + if args.save_merged_model: + logger.info("Model merged and saved. Exiting.") + return + + # 6. Optimize model + optimize_model(model, args, device, dit_dtype, dit_weight_dtype) + + # Create shared models dict for generate function + shared_models = {"vae": vae, "model": model, "encoded_contexts": encoded_contexts} + + # 7. Generate for each prompt + all_latents = [] + all_prompt_args = [] + + for i, prompt_data in enumerate(prompts_data): + logger.info(f"Processing prompt {i+1}/{len(prompts_data)}: {prompt_data['prompt'][:50]}...") + + # Apply overrides for this prompt + prompt_args = apply_overrides(args, prompt_data) + + # Generate latent + latent = generate(prompt_args, gen_settings, shared_models) + + # Save latent if needed + height, width, _ = check_inputs(prompt_args) + if prompt_args.output_type == "latent" or prompt_args.output_type == "both" or prompt_args.output_type == "latent_images": + save_latent(latent, prompt_args, height, width) + + all_latents.append(latent) + all_prompt_args.append(prompt_args) + + # 8. Free DiT model + del model + clean_memory_on_device(device) + synchronize_device(device) + + # wait for 5 seconds until block swap is done + if args.blocks_to_swap > 0: + logger.info("Waiting for 5 seconds to finish block swap") + time.sleep(5) + + gc.collect() + clean_memory_on_device(device) + + # 9. Decode latents if needed + if args.output_type != "latent": + logger.info("Decoding latents to videos/images") + + if vae is None: + vae = load_vae(args, cfg, device, vae_dtype) + + vae.to_device(device) + + for i, (latent, prompt_args) in enumerate(zip(all_latents, all_prompt_args)): + logger.info(f"Decoding output {i+1}/{len(all_latents)}") + + # Decode latent + video = decode_latent(latent.unsqueeze(0), prompt_args, cfg) + + # Save as video or images + if prompt_args.output_type == "video" or prompt_args.output_type == "both": + save_video(video, prompt_args) + elif prompt_args.output_type == "images": + save_images(video, prompt_args) + + # Free VAE + del vae + + clean_memory_on_device(device) + gc.collect() + + +def process_interactive(args: argparse.Namespace) -> None: + """Process prompts in interactive mode + + Args: + args: Base command line arguments + """ + gen_settings = get_generation_settings(args) + device, cfg, dit_dtype, dit_weight_dtype, vae_dtype = ( + gen_settings.device, + gen_settings.cfg, + gen_settings.dit_dtype, + gen_settings.dit_weight_dtype, + gen_settings.vae_dtype, + ) + is_i2v = "i2v" in args.task or "flf2v" in args.task + + # Initialize models to None + text_encoder = None + vae = None + model = None + clip = None + + print("Interactive mode. Enter prompts (Ctrl+D or Ctrl+Z (Windows) to exit):") + + try: + import prompt_toolkit + except ImportError: + logger.warning("prompt_toolkit not found. Using basic input instead.") + prompt_toolkit = None + + if prompt_toolkit: + session = prompt_toolkit.PromptSession() + + def input_line(prompt: str) -> str: + return session.prompt(prompt) + + else: + + def input_line(prompt: str) -> str: + return input(prompt) + + try: + while True: + try: + line = input_line("> ") + if not line.strip(): + continue + if len(line.strip()) == 1 and line.strip() in ["\x04", "\x1a"]: # Ctrl+D or Ctrl+Z with prompt_toolkit + raise EOFError # Exit on Ctrl+D or Ctrl+Z + + # Parse prompt + prompt_data = parse_prompt_line(line) + prompt_args = apply_overrides(args, prompt_data) + + # Ensure we have all the models we need + + # 1. Load text encoder if not already loaded + if text_encoder is None: + logger.info("Loading text encoder") + text_encoder = load_text_encoder(args, cfg, device) + + text_encoder.model.to(device) + + # Encode prompt + n_prompt = prompt_data.get( + "negative_prompt", prompt_args.negative_prompt if prompt_args.negative_prompt else cfg.sample_neg_prompt + ) + + with torch.no_grad(): + if args.fp8_t5: + with torch.amp.autocast(device_type=device.type, dtype=cfg.t5_dtype): + context = text_encoder([prompt_data["prompt"]], device) + context_null = text_encoder([n_prompt], device) + else: + context = text_encoder([prompt_data["prompt"]], device) + context_null = text_encoder([n_prompt], device) + + encoded_context = {"context": context, "context_null": context_null} + + # Move text encoder to CPU after use + text_encoder.model.to("cpu") + + # 2. For I2V, we need CLIP and VAE + if is_i2v: + if clip is None: + logger.info("Loading CLIP model") + clip = load_clip_model(args, cfg, device) + + clip.model.to(device) + + # Encode image with CLIP if there's an image path + if prompt_args.image_path and os.path.exists(prompt_args.image_path): + img = Image.open(prompt_args.image_path).convert("RGB") + img_tensor = TF.to_tensor(img).sub_(0.5).div_(0.5).to(device) + + with torch.amp.autocast(device_type=device.type, dtype=torch.float16), torch.no_grad(): + clip_context = clip.visual([img_tensor[:, None, :, :]]) + + if prompt_args.end_image_path is not None and os.path.exists(prompt_args.end_image_path): + end_img = Image.open(prompt_args.end_image_path).convert("RGB") + end_img_tensor = TF.to_tensor(end_img).sub_(0.5).div_(0.5).to(device) + with torch.amp.autocast(device_type=device.type, dtype=torch.float16), torch.no_grad(): + end_clip_context = clip.visual([end_img_tensor[:, None, :, :]]) + clip_context = torch.concat([clip_context, end_clip_context], dim=0) + + encoded_context["clip_context"] = clip_context + + # Move CLIP to CPU after use + clip.model.to("cpu") + + # Load VAE if needed + if vae is None: + logger.info("Loading VAE model") + vae = load_vae(args, cfg, device, vae_dtype) + elif cfg.is_fun_control and vae is None: + # For Fun-Control, we need VAE + logger.info("Loading VAE model for Fun-Control") + vae = load_vae(args, cfg, device, vae_dtype) + + # 3. Load DiT model if not already loaded + if model is None: + logger.info("Loading DiT model") + model = load_dit_model(args, cfg, device, dit_dtype, dit_weight_dtype, is_i2v) + + # Merge LoRA weights if needed + if args.lora_weight is not None and len(args.lora_weight) > 0: + merge_lora_weights(lora_wan, model, args, device) + + # Optimize model + optimize_model(model, args, device, dit_dtype, dit_weight_dtype) + else: + # Move model to GPU if it was offloaded + model.to(device) + + # Create shared models dict + shared_models = {"vae": vae, "model": model, "encoded_contexts": {prompt_data["prompt"]: encoded_context}} + + # Generate latent + latent = generate(prompt_args, gen_settings, shared_models) + + # Move model to CPU after generation + model.to("cpu") + + # Save latent if needed + height, width, _ = check_inputs(prompt_args) + if ( + prompt_args.output_type == "latent" + or prompt_args.output_type == "both" + or prompt_args.output_type == "latent_images" + ): + save_latent(latent, prompt_args, height, width) + + # Decode and save output + if prompt_args.output_type != "latent": + if vae is None: + vae = load_vae(args, cfg, device, vae_dtype) + + vae.to_device(device) + video = decode_latent(latent.unsqueeze(0), prompt_args, cfg) + + if prompt_args.output_type == "video" or prompt_args.output_type == "both": + save_video(video, prompt_args) + elif prompt_args.output_type == "images": + save_images(video, prompt_args) + + # Move VAE to CPU after use + vae.to_device("cpu") + + clean_memory_on_device(device) + + except KeyboardInterrupt: + print("\nInterrupted. Continue (Ctrl+D or Ctrl+Z (Windows) to exit)") + continue + + except EOFError: + print("\nExiting interactive mode") + + # Clean up all models + if text_encoder is not None: + del text_encoder + if clip is not None: + del clip + if vae is not None: + del vae + if model is not None: + del model + + clean_memory_on_device(device) + gc.collect() + + +def get_generation_settings(args: argparse.Namespace) -> GenerationSettings: + device = torch.device(args.device) + + cfg = WAN_CONFIGS[args.task] + + # select dtype + dit_dtype = detect_wan_sd_dtype(args.dit) if args.dit is not None else torch.bfloat16 + if dit_dtype.itemsize == 1: + # if weight is in fp8, use bfloat16 for DiT (input/output) + dit_dtype = torch.bfloat16 + if args.fp8_scaled: + raise ValueError( + "DiT weights is already in fp8 format, cannot scale to fp8. Please use fp16/bf16 weights / DiTの重みはすでにfp8形式です。fp8にスケーリングできません。fp16/bf16の重みを使用してください" + ) + + dit_weight_dtype = dit_dtype # default + if args.fp8_scaled: + dit_weight_dtype = None # various precision weights, so don't cast to specific dtype + elif args.fp8: + dit_weight_dtype = torch.float8_e4m3fn + + vae_dtype = str_to_dtype(args.vae_dtype) if args.vae_dtype is not None else dit_dtype + logger.info( + f"Using device: {device}, DiT precision: {dit_dtype}, weight precision: {dit_weight_dtype}, VAE precision: {vae_dtype}" + ) + + gen_settings = GenerationSettings( + device=device, + cfg=cfg, + dit_dtype=dit_dtype, + dit_weight_dtype=dit_weight_dtype, + vae_dtype=vae_dtype, + ) + return gen_settings + + +def main(): + # Parse arguments + args = parse_args() + + # Check if latents are provided + latents_mode = args.latent_path is not None and len(args.latent_path) > 0 + + # Set device + device = args.device if args.device is not None else "cuda" if torch.cuda.is_available() else "cpu" + device = torch.device(device) + logger.info(f"Using device: {device}") + args.device = device + + if latents_mode: + # Original latent decode mode + cfg = WAN_CONFIGS[args.task] # any task is fine + original_base_names = [] + latents_list = [] + seeds = [] + + assert len(args.latent_path) == 1, "Only one latent path is supported for now" + + for latent_path in args.latent_path: + original_base_names.append(os.path.splitext(os.path.basename(latent_path))[0]) + seed = 0 + + if os.path.splitext(latent_path)[1] != ".safetensors": + latents = torch.load(latent_path, map_location="cpu") + else: + latents = load_file(latent_path)["latent"] + with safe_open(latent_path, framework="pt") as f: + metadata = f.metadata() + if metadata is None: + metadata = {} + logger.info(f"Loaded metadata: {metadata}") + + if "seeds" in metadata: + seed = int(metadata["seeds"]) + if "height" in metadata and "width" in metadata: + height = int(metadata["height"]) + width = int(metadata["width"]) + args.video_size = [height, width] + if "video_length" in metadata: + args.video_length = int(metadata["video_length"]) + + seeds.append(seed) + latents_list.append(latents) + + logger.info(f"Loaded latent from {latent_path}. Shape: {latents.shape}") + + latent = torch.stack(latents_list, dim=0) # [N, ...], must be same shape + + height = latents.shape[-2] + width = latents.shape[-1] + height *= cfg.patch_size[1] * cfg.vae_stride[1] + width *= cfg.patch_size[2] * cfg.vae_stride[2] + video_length = latents.shape[1] + video_length = (video_length - 1) * cfg.vae_stride[0] + 1 + args.seed = seeds[0] + + save_output(latent[0], args, cfg, height, width, original_base_names) + + elif args.from_file: + # Batch mode from file + args = setup_args(args) + + # Read prompts from file + with open(args.from_file, "r", encoding="utf-8") as f: + prompt_lines = f.readlines() + + # Process prompts + prompts_data = preprocess_prompts_for_batch(prompt_lines, args) + process_batch_prompts(prompts_data, args) + + elif args.interactive: + # Interactive mode + args = setup_args(args) + process_interactive(args) + + else: + # Single prompt mode (original behavior) + args = setup_args(args) + height, width, video_length = check_inputs(args) + + logger.info( + f"Video size: {height}x{width}@{video_length} (HxW@F), fps: {args.fps}, " + f"infer_steps: {args.infer_steps}, flow_shift: {args.flow_shift}" + ) + + # Generate latent + gen_settings = get_generation_settings(args) + latent = generate(args, gen_settings) + + # Make sure the model is freed from GPU memory + gc.collect() + clean_memory_on_device(args.device) + + # Save latent and video + if args.save_merged_model: + return + + # Add batch dimension + latent = latent.unsqueeze(0) + save_output(latent[0], args, WAN_CONFIGS[args.task], height, width) + + logger.info("Done!") + + +if __name__ == "__main__": + main() diff --git a/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/wan_train_network.py b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/wan_train_network.py new file mode 100644 index 0000000000000000000000000000000000000000..284822cb891189ea6cd5e2e778c6c59b9c6fcaec --- /dev/null +++ b/exp_code/1_benchmark/musubi-tuner/src/musubi_tuner/wan_train_network.py @@ -0,0 +1,544 @@ +import argparse +from typing import Optional +from PIL import Image + + +import numpy as np +import torch +import torchvision.transforms.functional as TF +from tqdm import tqdm +from accelerate import Accelerator, init_empty_weights + +from musubi_tuner.dataset.image_video_dataset import ARCHITECTURE_WAN, ARCHITECTURE_WAN_FULL, load_video +from musubi_tuner.hv_generate_video import resize_image_to_bucket +from musubi_tuner.hv_train_network import ( + NetworkTrainer, + load_prompts, + clean_memory_on_device, + setup_parser_common, + read_config_from_file, +) +from musubi_tuner.wan_generate_video import parse_one_frame_inference_args + +import logging + +logger = logging.getLogger(__name__) +logging.basicConfig(level=logging.INFO) + +from musubi_tuner.utils import model_utils +from musubi_tuner.utils.safetensors_utils import load_safetensors, MemoryEfficientSafeOpen +from musubi_tuner.wan.configs import WAN_CONFIGS +from musubi_tuner.wan.modules.clip import CLIPModel +from musubi_tuner.wan.modules.model import WanModel, detect_wan_sd_dtype, load_wan_model +from musubi_tuner.wan.modules.t5 import T5EncoderModel +from musubi_tuner.wan.modules.vae import WanVAE +from musubi_tuner.wan.utils.fm_solvers_unipc import FlowUniPCMultistepScheduler + + +class WanNetworkTrainer(NetworkTrainer): + def __init__(self): + super().__init__() + + # region model specific + + @property + def architecture(self) -> str: + return ARCHITECTURE_WAN + + @property + def architecture_full_name(self) -> str: + return ARCHITECTURE_WAN_FULL + + def handle_model_specific_args(self, args): + self.config = WAN_CONFIGS[args.task] + # we cannot use config.i2v because Fun-Control T2V has i2v flag TODO refactor this + self._i2v_training = "i2v" in args.task or "flf2v" in args.task + self._control_training = self.config.is_fun_control + + self.dit_dtype = detect_wan_sd_dtype(args.dit) + + if self.dit_dtype == torch.float16: + assert args.mixed_precision in ["fp16", "no"], "DiT weights are in fp16, mixed precision must be fp16 or no" + elif self.dit_dtype == torch.bfloat16: + assert args.mixed_precision in ["bf16", "no"], "DiT weights are in bf16, mixed precision must be bf16 or no" + + if args.fp8_scaled and self.dit_dtype.itemsize == 1: + raise ValueError( + "DiT weights is already in fp8 format, cannot scale to fp8. Please use fp16/bf16 weights / DiTの重みはすでにfp8形式です。fp8にスケーリングできません。fp16/bf16の重みを使用してください" + ) + + # dit_dtype cannot be fp8, so we select the appropriate dtype + if self.dit_dtype.itemsize == 1: + self.dit_dtype = torch.float16 if args.mixed_precision == "fp16" else torch.bfloat16 + + args.dit_dtype = model_utils.dtype_to_str(self.dit_dtype) + + self.default_guidance_scale = 1.0 # not used + + def process_sample_prompts( + self, + args: argparse.Namespace, + accelerator: Accelerator, + sample_prompts: str, + ): + config = self.config + device = accelerator.device + t5_path, clip_path, fp8_t5 = args.t5, args.clip, args.fp8_t5 + + logger.info(f"cache Text Encoder outputs for sample prompt: {sample_prompts}") + prompts = load_prompts(sample_prompts) + + def encode_for_text_encoder(text_encoder): + sample_prompts_te_outputs = {} # (prompt) -> (embeds, mask) + # with accelerator.autocast(), torch.no_grad(): # this causes NaN if dit_dtype is fp16 + t5_dtype = config.t5_dtype + with torch.amp.autocast(device_type=device.type, dtype=t5_dtype), torch.no_grad(): + for prompt_dict in prompts: + if "negative_prompt" not in prompt_dict: + prompt_dict["negative_prompt"] = self.config["sample_neg_prompt"] + for p in [prompt_dict.get("prompt", ""), prompt_dict.get("negative_prompt", None)]: + if p is None: + continue + if p not in sample_prompts_te_outputs: + logger.info(f"cache Text Encoder outputs for prompt: {p}") + + prompt_outputs = text_encoder([p], device) + sample_prompts_te_outputs[p] = prompt_outputs + + return sample_prompts_te_outputs + + # Load Text Encoder 1 and encode + logger.info(f"loading T5: {t5_path}") + t5 = T5EncoderModel(text_len=config.text_len, dtype=config.t5_dtype, device=device, weight_path=t5_path, fp8=fp8_t5) + + logger.info("encoding with Text Encoder 1") + te_outputs_1 = encode_for_text_encoder(t5) + del t5 + + # load CLIP and encode image (for I2V training) + # Note: VAE encoding is done in do_inference() for I2V training, because we have VAE in the pipeline. Control video is also done in do_inference() + sample_prompts_image_embs = {} + for prompt_dict in prompts: + if prompt_dict.get("image_path", None) is not None and self.i2v_training: + sample_prompts_image_embs[prompt_dict["image_path"]] = None # this will be replaced with CLIP context + if prompt_dict.get("end_image_path", None) is not None and self.i2v_training: + sample_prompts_image_embs[prompt_dict["end_image_path"]] = None + + if len(sample_prompts_image_embs) > 0: + logger.info(f"loading CLIP: {clip_path}") + assert clip_path is not None, "CLIP path is required for I2V training / I2V学習にはCLIPのパスが必要です" + clip = CLIPModel(dtype=config.clip_dtype, device=device, weight_path=clip_path) + clip.model.to(device) + + logger.info(f"Encoding image to CLIP context") + with torch.amp.autocast(device_type=device.type, dtype=torch.float16), torch.no_grad(): + for image_path in sample_prompts_image_embs: + logger.info(f"Encoding image: {image_path}") + img = Image.open(image_path).convert("RGB") + img = TF.to_tensor(img).sub_(0.5).div_(0.5).to(device) # -1 to 1 + clip_context = clip.visual([img[:, None, :, :]]) + sample_prompts_image_embs[image_path] = clip_context + + del clip + clean_memory_on_device(device) + + # prepare sample parameters + sample_parameters = [] + for prompt_dict in prompts: + prompt_dict_copy = prompt_dict.copy() + + p = prompt_dict.get("prompt", "") + prompt_dict_copy["t5_embeds"] = te_outputs_1[p][0] + + p = prompt_dict.get("negative_prompt", None) + if p is not None: + prompt_dict_copy["negative_t5_embeds"] = te_outputs_1[p][0] + + p = prompt_dict.get("image_path", None) + if p is not None and self.i2v_training: + prompt_dict_copy["clip_embeds"] = sample_prompts_image_embs[p] + + p = prompt_dict.get("end_image_path", None) + if p is not None and self.i2v_training: + prompt_dict_copy["end_image_clip_embeds"] = sample_prompts_image_embs[p] + + sample_parameters.append(prompt_dict_copy) + + clean_memory_on_device(accelerator.device) + + return sample_parameters + + def do_inference( + self, + accelerator, + args, + sample_parameter, + vae, + dit_dtype, + transformer, + discrete_flow_shift, + sample_steps, + width, + height, + frame_count, + generator, + do_classifier_free_guidance, + guidance_scale, + cfg_scale, + image_path=None, + control_video_path=None, + ): + """architecture dependent inference""" + model: WanModel = transformer + device = accelerator.device + if cfg_scale is None: + cfg_scale = 5.0 + do_classifier_free_guidance = do_classifier_free_guidance and cfg_scale != 1.0 + + # prepare parameters + one_frame_mode = args.one_frame + if one_frame_mode: + target_index, control_indices, f_indices, one_frame_inference_index = parse_one_frame_inference_args( + sample_parameter["one_frame"] + ) + latent_video_length = len(f_indices) # number of frames in the video + else: + target_index, control_indices, f_indices, one_frame_inference_index = None, None, None, None + + # Calculate latent video length based on VAE version + latent_video_length = (frame_count - 1) // self.config["vae_stride"][0] + 1 + + # Get embeddings + context = sample_parameter["t5_embeds"].to(device=device) + if do_classifier_free_guidance: + context_null = sample_parameter["negative_t5_embeds"].to(device=device) + else: + context_null = None + + num_channels_latents = 16 # model.in_dim + vae_scale_factor = self.config["vae_stride"][1] + + # Initialize latents + lat_h = height // vae_scale_factor + lat_w = width // vae_scale_factor + shape_or_frame = (1, num_channels_latents, 1, lat_h, lat_w) + latents = [] + for _ in range(latent_video_length): + latents.append(torch.randn(shape_or_frame, generator=generator, device=device, dtype=torch.float32)) + latents = torch.cat(latents, dim=2) + + image_latents = None + + if one_frame_mode: + # One frame inference mode + logger.info( + f"One frame inference mode: target_index={target_index}, control_indices={control_indices}, f_indices={f_indices}" + ) + vae.to(device) + vae.eval() + + # prepare start and control latent + def encode_image(path): + image = Image.open(path) + if image.mode == "RGBA": + alpha = image.split()[-1] + image = image.convert("RGB") + else: + alpha = None + image = resize_image_to_bucket(image, (width, height)) # returns a numpy array + image = torch.from_numpy(image).permute(2, 0, 1).unsqueeze(1).unsqueeze(0).float() # 1, C, 1, H, W + image = image / 127.5 - 1 # -1 to 1 + with torch.amp.autocast(device_type=device.type, dtype=vae.dtype), torch.no_grad(): + image = image.to(device=device) + latent = vae.encode(image)[0] + return latent, alpha + + control_latents = [] + control_alphas = [] + if "control_image_path" in sample_parameter: + for control_image_path in sample_parameter["control_image_path"]: + control_latent, control_alpha = encode_image(control_image_path) + control_latents.append(control_latent) + control_alphas.append(control_alpha) + + with torch.amp.autocast(device_type=device.type, dtype=vae.dtype), torch.no_grad(): + black_image_latent = vae.encode([torch.zeros((3, 1, height, width), dtype=torch.float32, device=device)])[0] + + # Create latent and mask for the required number of frames + image_latents = torch.zeros(4 + 16, len(f_indices), lat_h, lat_w, dtype=torch.float32, device=device) + ci = 0 + for j, index in enumerate(f_indices): + if index == target_index: + image_latents[4:, j : j + 1, :, :] = black_image_latent # set black latent for the target frame + else: + image_latents[:4, j, :, :] = 1.0 # set mask to 1.0 for the clean latent frames + image_latents[4:, j : j + 1, :, :] = control_latents[ci] # set control latent + ci += 1 + image_latents = image_latents.unsqueeze(0) # add batch dim + + vae.to("cpu") + clean_memory_on_device(device) + + elif self.i2v_training or self.control_training: + # Move VAE to the appropriate device for sampling: consider to cache image latents in CPU in advance + vae.to(device) + vae.eval() + + if self.i2v_training: + image = Image.open(image_path) + image = resize_image_to_bucket(image, (width, height)) # returns a numpy array + image = torch.from_numpy(image).permute(2, 0, 1).unsqueeze(1).float() # C, 1, H, W + image = image / 127.5 - 1 # -1 to 1 + + # Create mask for the required number of frames + msk = torch.ones(1, frame_count, lat_h, lat_w, device=device) + msk[:, 1:] = 0 + msk = torch.concat([torch.repeat_interleave(msk[:, 0:1], repeats=4, dim=1), msk[:, 1:]], dim=1) + msk = msk.view(1, msk.shape[1] // 4, 4, lat_h, lat_w) + msk = msk.transpose(1, 2) # B, C, T, H, W + + with torch.amp.autocast(device_type=device.type, dtype=vae.dtype), torch.no_grad(): + # Zero padding for the required number of frames only + padding_frames = frame_count - 1 # The first frame is the input image + image = torch.concat([image, torch.zeros(3, padding_frames, height, width)], dim=1).to(device=device) + y = vae.encode([image])[0] + + y = y[:, :latent_video_length] # may be not needed + y = y.unsqueeze(0) # add batch dim + image_latents = torch.concat([msk, y], dim=1) + + if self.control_training: + # Control video + video = load_video(control_video_path, 0, frame_count, bucket_reso=(width, height)) # list of frames + video = np.stack(video, axis=0) # F, H, W, C + video = torch.from_numpy(video).permute(3, 0, 1, 2).float() # C, F, H, W + video = video / 127.5 - 1 # -1 to 1 + video = video.to(device=device) + + with torch.amp.autocast(device_type=device.type, dtype=vae.dtype), torch.no_grad(): + control_latents = vae.encode([video])[0] + control_latents = control_latents[:, :latent_video_length] + control_latents = control_latents.unsqueeze(0) # add batch dim + + # We supports Wan2.1-Fun-Control only + if image_latents is not None: + image_latents = image_latents[:, 4:] # remove mask for Wan2.1-Fun-Control + image_latents[:, :, 1:] = 0 # remove except the first frame + else: + image_latents = torch.zeros_like(control_latents) # B, C, F, H, W + + image_latents = torch.concat([control_latents, image_latents], dim=1) # B, C, F, H, W + + vae.to("cpu") + clean_memory_on_device(device) + + # use the default value for num_train_timesteps (1000) + scheduler = FlowUniPCMultistepScheduler(shift=1, use_dynamic_shifting=False) + scheduler.set_timesteps(sample_steps, device=device, shift=discrete_flow_shift) + timesteps = scheduler.timesteps + + # Generate noise for the required number of frames only + noise = torch.randn(16, latent_video_length, lat_h, lat_w, dtype=torch.float32, generator=generator, device=device).to( + "cpu" + ) + + # prepare the model input + max_seq_len = latent_video_length * lat_h * lat_w // (self.config.patch_size[1] * self.config.patch_size[2]) + arg_c = {"context": [context], "seq_len": max_seq_len} + arg_null = {"context": [context_null], "seq_len": max_seq_len} + + if self.i2v_training and not one_frame_mode: + arg_c["clip_fea"] = sample_parameter["clip_embeds"].to(device=device, dtype=dit_dtype) + arg_null["clip_fea"] = arg_c["clip_fea"] + if one_frame_mode: + if "end_image_clip_embeds" in sample_parameter: + arg_c["clip_fea"] = torch.cat( + [sample_parameter["clip_embeds"], sample_parameter["end_image_clip_embeds"]], dim=0 + ).to(device=device, dtype=dit_dtype) + else: + arg_c["clip_fea"] = sample_parameter["clip_embeds"].to(device=device, dtype=dit_dtype) + arg_null["clip_fea"] = arg_c["clip_fea"] + arg_c["f_indices"] = [f_indices] + arg_null["f_indices"] = arg_c["f_indices"] + # print(f"One arg_c: {arg_c}, arg_null: {arg_null}") + if self.i2v_training or self.control_training: + arg_c["y"] = image_latents + arg_null["y"] = image_latents + + # Wrap the inner loop with tqdm to track progress over timesteps + prompt_idx = sample_parameter.get("enum", 0) + latent = noise + with torch.no_grad(): + for i, t in enumerate(tqdm(timesteps, desc=f"Sampling timesteps for prompt {prompt_idx+1}")): + latent_model_input = [latent.to(device=device)] + timestep = t.unsqueeze(0) + + with accelerator.autocast(): + noise_pred_cond = model(latent_model_input, t=timestep, **arg_c)[0].to("cpu") + if do_classifier_free_guidance: + noise_pred_uncond = model(latent_model_input, t=timestep, **arg_null)[0].to("cpu") + else: + noise_pred_uncond = None + + if do_classifier_free_guidance: + noise_pred = noise_pred_uncond + cfg_scale * (noise_pred_cond - noise_pred_uncond) + else: + noise_pred = noise_pred_cond + + temp_x0 = scheduler.step(noise_pred.unsqueeze(0), t, latent.unsqueeze(0), return_dict=False, generator=generator)[0] + latent = temp_x0.squeeze(0) + + # Move VAE to the appropriate device for sampling + vae.to(device) + vae.eval() + + # Decode latents to video + logger.info(f"Decoding video from latents: {latent.shape}") + latent = latent.unsqueeze(0) # add batch dim + latent = latent.to(device=device) + + if one_frame_mode: + latent = latent[:, :, one_frame_inference_index : one_frame_inference_index + 1, :, :] # select the one frame + with torch.amp.autocast(device_type=device.type, dtype=vae.dtype), torch.no_grad(): + video = vae.decode(latent)[0] # vae returns list + video = video.unsqueeze(0) # add batch dim + del latent + + logger.info(f"Decoding complete") + video = video.to(torch.float32).cpu() + video = (video / 2 + 0.5).clamp(0, 1) # -1 to 1 -> 0 to 1 + + vae.to("cpu") + clean_memory_on_device(device) + + return video + + def load_vae(self, args: argparse.Namespace, vae_dtype: torch.dtype, vae_path: str): + vae_path = args.vae + + logger.info(f"Loading VAE model from {vae_path}") + cache_device = torch.device("cpu") if args.vae_cache_cpu else None + vae = WanVAE(vae_path=vae_path, device="cpu", dtype=vae_dtype, cache_device=cache_device) + return vae + + def load_transformer( + self, + accelerator: Accelerator, + args: argparse.Namespace, + dit_path: str, + attn_mode: str, + split_attn: bool, + loading_device: str, + dit_weight_dtype: Optional[torch.dtype], + ): + model = load_wan_model( + self.config, accelerator.device, dit_path, attn_mode, split_attn, loading_device, dit_weight_dtype, args.fp8_scaled + ) + return model + + def scale_shift_latents(self, latents): + return latents + + def call_dit( + self, + args: argparse.Namespace, + accelerator: Accelerator, + transformer, + latents: torch.Tensor, + batch: dict[str, torch.Tensor], + noise: torch.Tensor, + noisy_model_input: torch.Tensor, + timesteps: torch.Tensor, + network_dtype: torch.dtype, + ): + model: WanModel = transformer + + # I2V training and Control training + image_latents = None + clip_fea = None + if self.i2v_training: + image_latents = batch["latents_image"] + image_latents = image_latents.to(device=accelerator.device, dtype=network_dtype) + clip_fea = batch["clip"] + clip_fea = clip_fea.to(device=accelerator.device, dtype=network_dtype) + + # clip_fea is [B, N, D] (normal) or [B, 1, N, D] (one frame) for I2V, and [B, 2, N, D] for FLF2V, we need to reshape it to [B, N, D] for I2V and [B*2, N, D] for FLF2V + if clip_fea.shape[1] == 1: + clip_fea = clip_fea.squeeze(1) + elif clip_fea.shape[1] == 2: + clip_fea = clip_fea.view(-1, clip_fea.shape[2], clip_fea.shape[3]) + + if self.control_training: + control_latents = batch["latents_control"] + control_latents = control_latents.to(device=accelerator.device, dtype=network_dtype) + if image_latents is not None: + image_latents = image_latents[:, 4:] # remove mask for Wan2.1-Fun-Control + image_latents[:, :, 1:] = 0 # remove except the first frame + else: + image_latents = torch.zeros_like(control_latents) # B, C, F, H, W + image_latents = torch.concat([control_latents, image_latents], dim=1) # B, C, F, H, W + control_latents = None + + context = [t.to(device=accelerator.device, dtype=network_dtype) for t in batch["t5"]] + + # ensure the hidden state will require grad + if args.gradient_checkpointing: + noisy_model_input.requires_grad_(True) + for t in context: + t.requires_grad_(True) + if image_latents is not None: + image_latents.requires_grad_(True) + if clip_fea is not None: + clip_fea.requires_grad_(True) + + # call DiT + lat_f, lat_h, lat_w = latents.shape[2:5] + seq_len = lat_f * lat_h * lat_w // (self.config.patch_size[0] * self.config.patch_size[1] * self.config.patch_size[2]) + latents = latents.to(device=accelerator.device, dtype=network_dtype) + noisy_model_input = noisy_model_input.to(device=accelerator.device, dtype=network_dtype) + with accelerator.autocast(): + model_pred = model(noisy_model_input, t=timesteps, context=context, clip_fea=clip_fea, seq_len=seq_len, y=image_latents) + model_pred = torch.stack(model_pred, dim=0) # list to tensor + + # flow matching loss + target = noise - latents + + return model_pred, target + + # endregion model specific + + +def wan_setup_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: + """Wan2.1 specific parser setup""" + parser.add_argument("--task", type=str, default="t2v-14B", choices=list(WAN_CONFIGS.keys()), help="The task to run.") + parser.add_argument("--fp8_scaled", action="store_true", help="use scaled fp8 for DiT / DiTにスケーリングされたfp8を使う") + parser.add_argument("--t5", type=str, default=None, help="text encoder (T5) checkpoint path") + parser.add_argument("--fp8_t5", action="store_true", help="use fp8 for Text Encoder model") + parser.add_argument( + "--clip", + type=str, + default=None, + help="text encoder (CLIP) checkpoint path, optional. If training I2V model, this is required", + ) + parser.add_argument("--vae_cache_cpu", action="store_true", help="cache features in VAE on CPU") + parser.add_argument("--one_frame", action="store_true", help="Use one frame sampling method for sample generation") + return parser + + +def main(): + parser = setup_parser_common() + parser = wan_setup_parser(parser) + + args = parser.parse_args() + args = read_config_from_file(args, parser) + + args.dit_dtype = None # automatically detected + if args.vae_dtype is None: + args.vae_dtype = "bfloat16" # make bfloat16 as default for VAE + + trainer = WanNetworkTrainer() + trainer.train(args) + + +if __name__ == "__main__": + main() diff --git a/exp_code/1_benchmark/musubi-tuner/wan_cache_latents.py b/exp_code/1_benchmark/musubi-tuner/wan_cache_latents.py new file mode 100644 index 0000000000000000000000000000000000000000..2e3300d37073de991c693ec60d486c227082e483 --- /dev/null +++ b/exp_code/1_benchmark/musubi-tuner/wan_cache_latents.py @@ -0,0 +1,4 @@ +from musubi_tuner.wan_cache_latents import main + +if __name__ == "__main__": + main() diff --git a/exp_code/1_benchmark/musubi-tuner/wan_cache_text_encoder_outputs.py b/exp_code/1_benchmark/musubi-tuner/wan_cache_text_encoder_outputs.py new file mode 100644 index 0000000000000000000000000000000000000000..fb1caf7b58bc795ef9e9aa211e6a9023f50de73d --- /dev/null +++ b/exp_code/1_benchmark/musubi-tuner/wan_cache_text_encoder_outputs.py @@ -0,0 +1,4 @@ +from musubi_tuner.wan_cache_text_encoder_outputs import main + +if __name__ == "__main__": + main() diff --git a/exp_code/1_benchmark/musubi-tuner/wan_generate_video.py b/exp_code/1_benchmark/musubi-tuner/wan_generate_video.py new file mode 100644 index 0000000000000000000000000000000000000000..addfc82e03705fc371fcdd452975e8d3107e4057 --- /dev/null +++ b/exp_code/1_benchmark/musubi-tuner/wan_generate_video.py @@ -0,0 +1,4 @@ +from musubi_tuner.wan_generate_video import main + +if __name__ == "__main__": + main() diff --git a/exp_code/1_benchmark/musubi-tuner/wan_train_network.py b/exp_code/1_benchmark/musubi-tuner/wan_train_network.py new file mode 100644 index 0000000000000000000000000000000000000000..db36447d79c1902d67eb240620719c8212380a04 --- /dev/null +++ b/exp_code/1_benchmark/musubi-tuner/wan_train_network.py @@ -0,0 +1,4 @@ +from musubi_tuner.wan_train_network import main + +if __name__ == "__main__": + main()