Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/utils/dummy_transformers_and_torch_and_note_seq_objects.py +17 -0
- exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/utils/dynamic_modules_utils.py +507 -0
- exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/utils/export_utils.py +209 -0
- exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/utils/hub_utils.py +573 -0
- exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/utils/import_utils.py +962 -0
- exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/utils/loading_utils.py +210 -0
- exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/utils/logging.py +340 -0
- exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/utils/model_card_template.md +24 -0
- exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/utils/outputs.py +138 -0
- exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/utils/peft_utils.py +376 -0
- exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/utils/pil_utils.py +67 -0
- exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/utils/remote_utils.py +425 -0
- exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/utils/source_code_parsing_utils.py +52 -0
- exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/utils/state_dict_utils.py +366 -0
- exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/utils/testing_utils.py +1601 -0
- exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/utils/torch_utils.py +334 -0
- exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/utils/typing_utils.py +91 -0
- exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/utils/versions.py +117 -0
- exp_code/1_benchmark/diffusers-WanS2V/tests/__init__.py +0 -0
- exp_code/1_benchmark/diffusers-WanS2V/tests/conftest.py +48 -0
- exp_code/1_benchmark/diffusers-WanS2V/tests/fixtures/custom_pipeline/pipeline.py +102 -0
- exp_code/1_benchmark/diffusers-WanS2V/tests/fixtures/custom_pipeline/what_ever.py +103 -0
- exp_code/1_benchmark/diffusers-WanS2V/tests/fixtures/elise_format0.mid +0 -0
- exp_code/1_benchmark/diffusers-WanS2V/tests/hooks/__init__.py +0 -0
- exp_code/1_benchmark/diffusers-WanS2V/tests/hooks/test_group_offloading.py +364 -0
- exp_code/1_benchmark/diffusers-WanS2V/tests/hooks/test_hooks.py +377 -0
- exp_code/1_benchmark/diffusers-WanS2V/tests/lora/__init__.py +0 -0
- exp_code/1_benchmark/diffusers-WanS2V/tests/lora/test_lora_layers_auraflow.py +137 -0
- exp_code/1_benchmark/diffusers-WanS2V/tests/lora/test_lora_layers_cogvideox.py +174 -0
- exp_code/1_benchmark/diffusers-WanS2V/tests/lora/test_lora_layers_cogview4.py +189 -0
- exp_code/1_benchmark/diffusers-WanS2V/tests/lora/test_lora_layers_flux.py +1054 -0
- exp_code/1_benchmark/diffusers-WanS2V/tests/lora/test_lora_layers_hunyuanvideo.py +264 -0
- exp_code/1_benchmark/diffusers-WanS2V/tests/lora/test_lora_layers_ltx_video.py +148 -0
- exp_code/1_benchmark/diffusers-WanS2V/tests/lora/test_lora_layers_lumina2.py +173 -0
- exp_code/1_benchmark/diffusers-WanS2V/tests/lora/test_lora_layers_mochi.py +143 -0
- exp_code/1_benchmark/diffusers-WanS2V/tests/lora/test_lora_layers_qwenimage.py +130 -0
- exp_code/1_benchmark/diffusers-WanS2V/tests/lora/test_lora_layers_sana.py +139 -0
- exp_code/1_benchmark/diffusers-WanS2V/tests/lora/test_lora_layers_sd.py +769 -0
- exp_code/1_benchmark/diffusers-WanS2V/tests/lora/test_lora_layers_sd3.py +191 -0
- exp_code/1_benchmark/diffusers-WanS2V/tests/lora/test_lora_layers_sdxl.py +681 -0
- exp_code/1_benchmark/diffusers-WanS2V/tests/lora/test_lora_layers_wan.py +144 -0
- exp_code/1_benchmark/diffusers-WanS2V/tests/lora/test_lora_layers_wanvace.py +217 -0
- exp_code/1_benchmark/diffusers-WanS2V/tests/lora/utils.py +0 -0
- exp_code/1_benchmark/diffusers-WanS2V/tests/models/__init__.py +0 -0
- exp_code/1_benchmark/diffusers-WanS2V/tests/models/autoencoders/__init__.py +0 -0
- exp_code/1_benchmark/diffusers-WanS2V/tests/models/autoencoders/test_models_asymmetric_autoencoder_kl.py +276 -0
- exp_code/1_benchmark/diffusers-WanS2V/tests/models/autoencoders/test_models_autoencoder_cosmos.py +86 -0
- exp_code/1_benchmark/diffusers-WanS2V/tests/models/autoencoders/test_models_autoencoder_dc.py +87 -0
- exp_code/1_benchmark/diffusers-WanS2V/tests/models/autoencoders/test_models_autoencoder_hunyuan_video.py +210 -0
- exp_code/1_benchmark/diffusers-WanS2V/tests/models/autoencoders/test_models_autoencoder_kl.py +468 -0
exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/utils/dummy_transformers_and_torch_and_note_seq_objects.py
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# This file is autogenerated by the command `make fix-copies`, do not edit.
|
| 2 |
+
from ..utils import DummyObject, requires_backends
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
class SpectrogramDiffusionPipeline(metaclass=DummyObject):
|
| 6 |
+
_backends = ["transformers", "torch", "note_seq"]
|
| 7 |
+
|
| 8 |
+
def __init__(self, *args, **kwargs):
|
| 9 |
+
requires_backends(self, ["transformers", "torch", "note_seq"])
|
| 10 |
+
|
| 11 |
+
@classmethod
|
| 12 |
+
def from_config(cls, *args, **kwargs):
|
| 13 |
+
requires_backends(cls, ["transformers", "torch", "note_seq"])
|
| 14 |
+
|
| 15 |
+
@classmethod
|
| 16 |
+
def from_pretrained(cls, *args, **kwargs):
|
| 17 |
+
requires_backends(cls, ["transformers", "torch", "note_seq"])
|
exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/utils/dynamic_modules_utils.py
ADDED
|
@@ -0,0 +1,507 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2025 The HuggingFace Inc. team.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
"""Utilities to dynamically load objects from the Hub."""
|
| 16 |
+
|
| 17 |
+
import importlib
|
| 18 |
+
import inspect
|
| 19 |
+
import json
|
| 20 |
+
import os
|
| 21 |
+
import re
|
| 22 |
+
import shutil
|
| 23 |
+
import sys
|
| 24 |
+
import threading
|
| 25 |
+
from pathlib import Path
|
| 26 |
+
from types import ModuleType
|
| 27 |
+
from typing import Dict, Optional, Union
|
| 28 |
+
from urllib import request
|
| 29 |
+
|
| 30 |
+
from huggingface_hub import hf_hub_download, model_info
|
| 31 |
+
from huggingface_hub.utils import RevisionNotFoundError, validate_hf_hub_args
|
| 32 |
+
from packaging import version
|
| 33 |
+
|
| 34 |
+
from .. import __version__
|
| 35 |
+
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
|
| 36 |
+
from .constants import DIFFUSERS_DISABLE_REMOTE_CODE
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
| 40 |
+
|
| 41 |
+
# See https://huggingface.co/datasets/diffusers/community-pipelines-mirror
|
| 42 |
+
COMMUNITY_PIPELINES_MIRROR_ID = "diffusers/community-pipelines-mirror"
|
| 43 |
+
TIME_OUT_REMOTE_CODE = int(os.getenv("DIFFUSERS_TIMEOUT_REMOTE_CODE", 15))
|
| 44 |
+
_HF_REMOTE_CODE_LOCK = threading.Lock()
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
def get_diffusers_versions():
|
| 48 |
+
url = "https://pypi.org/pypi/diffusers/json"
|
| 49 |
+
releases = json.loads(request.urlopen(url).read())["releases"].keys()
|
| 50 |
+
return sorted(releases, key=lambda x: version.Version(x))
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
def init_hf_modules():
|
| 54 |
+
"""
|
| 55 |
+
Creates the cache directory for modules with an init, and adds it to the Python path.
|
| 56 |
+
"""
|
| 57 |
+
# This function has already been executed if HF_MODULES_CACHE already is in the Python path.
|
| 58 |
+
if HF_MODULES_CACHE in sys.path:
|
| 59 |
+
return
|
| 60 |
+
|
| 61 |
+
sys.path.append(HF_MODULES_CACHE)
|
| 62 |
+
os.makedirs(HF_MODULES_CACHE, exist_ok=True)
|
| 63 |
+
init_path = Path(HF_MODULES_CACHE) / "__init__.py"
|
| 64 |
+
if not init_path.exists():
|
| 65 |
+
init_path.touch()
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
def create_dynamic_module(name: Union[str, os.PathLike]):
|
| 69 |
+
"""
|
| 70 |
+
Creates a dynamic module in the cache directory for modules.
|
| 71 |
+
"""
|
| 72 |
+
init_hf_modules()
|
| 73 |
+
dynamic_module_path = Path(HF_MODULES_CACHE) / name
|
| 74 |
+
# If the parent module does not exist yet, recursively create it.
|
| 75 |
+
if not dynamic_module_path.parent.exists():
|
| 76 |
+
create_dynamic_module(dynamic_module_path.parent)
|
| 77 |
+
os.makedirs(dynamic_module_path, exist_ok=True)
|
| 78 |
+
init_path = dynamic_module_path / "__init__.py"
|
| 79 |
+
if not init_path.exists():
|
| 80 |
+
init_path.touch()
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
def get_relative_imports(module_file):
|
| 84 |
+
"""
|
| 85 |
+
Get the list of modules that are relatively imported in a module file.
|
| 86 |
+
|
| 87 |
+
Args:
|
| 88 |
+
module_file (`str` or `os.PathLike`): The module file to inspect.
|
| 89 |
+
"""
|
| 90 |
+
with open(module_file, "r", encoding="utf-8") as f:
|
| 91 |
+
content = f.read()
|
| 92 |
+
|
| 93 |
+
# Imports of the form `import .xxx`
|
| 94 |
+
relative_imports = re.findall(r"^\s*import\s+\.(\S+)\s*$", content, flags=re.MULTILINE)
|
| 95 |
+
# Imports of the form `from .xxx import yyy`
|
| 96 |
+
relative_imports += re.findall(r"^\s*from\s+\.(\S+)\s+import", content, flags=re.MULTILINE)
|
| 97 |
+
# Unique-ify
|
| 98 |
+
return list(set(relative_imports))
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
def get_relative_import_files(module_file):
|
| 102 |
+
"""
|
| 103 |
+
Get the list of all files that are needed for a given module. Note that this function recurses through the relative
|
| 104 |
+
imports (if a imports b and b imports c, it will return module files for b and c).
|
| 105 |
+
|
| 106 |
+
Args:
|
| 107 |
+
module_file (`str` or `os.PathLike`): The module file to inspect.
|
| 108 |
+
"""
|
| 109 |
+
no_change = False
|
| 110 |
+
files_to_check = [module_file]
|
| 111 |
+
all_relative_imports = []
|
| 112 |
+
|
| 113 |
+
# Let's recurse through all relative imports
|
| 114 |
+
while not no_change:
|
| 115 |
+
new_imports = []
|
| 116 |
+
for f in files_to_check:
|
| 117 |
+
new_imports.extend(get_relative_imports(f))
|
| 118 |
+
|
| 119 |
+
module_path = Path(module_file).parent
|
| 120 |
+
new_import_files = [str(module_path / m) for m in new_imports]
|
| 121 |
+
new_import_files = [f for f in new_import_files if f not in all_relative_imports]
|
| 122 |
+
files_to_check = [f"{f}.py" for f in new_import_files]
|
| 123 |
+
|
| 124 |
+
no_change = len(new_import_files) == 0
|
| 125 |
+
all_relative_imports.extend(files_to_check)
|
| 126 |
+
|
| 127 |
+
return all_relative_imports
|
| 128 |
+
|
| 129 |
+
|
| 130 |
+
def check_imports(filename):
|
| 131 |
+
"""
|
| 132 |
+
Check if the current Python environment contains all the libraries that are imported in a file.
|
| 133 |
+
"""
|
| 134 |
+
with open(filename, "r", encoding="utf-8") as f:
|
| 135 |
+
content = f.read()
|
| 136 |
+
|
| 137 |
+
# Imports of the form `import xxx`
|
| 138 |
+
imports = re.findall(r"^\s*import\s+(\S+)\s*$", content, flags=re.MULTILINE)
|
| 139 |
+
# Imports of the form `from xxx import yyy`
|
| 140 |
+
imports += re.findall(r"^\s*from\s+(\S+)\s+import", content, flags=re.MULTILINE)
|
| 141 |
+
# Only keep the top-level module
|
| 142 |
+
imports = [imp.split(".")[0] for imp in imports if not imp.startswith(".")]
|
| 143 |
+
|
| 144 |
+
# Unique-ify and test we got them all
|
| 145 |
+
imports = list(set(imports))
|
| 146 |
+
missing_packages = []
|
| 147 |
+
for imp in imports:
|
| 148 |
+
try:
|
| 149 |
+
importlib.import_module(imp)
|
| 150 |
+
except ImportError:
|
| 151 |
+
missing_packages.append(imp)
|
| 152 |
+
|
| 153 |
+
if len(missing_packages) > 0:
|
| 154 |
+
raise ImportError(
|
| 155 |
+
"This modeling file requires the following packages that were not found in your environment: "
|
| 156 |
+
f"{', '.join(missing_packages)}. Run `pip install {' '.join(missing_packages)}`"
|
| 157 |
+
)
|
| 158 |
+
|
| 159 |
+
return get_relative_imports(filename)
|
| 160 |
+
|
| 161 |
+
|
| 162 |
+
def resolve_trust_remote_code(trust_remote_code, model_name, has_remote_code):
|
| 163 |
+
trust_remote_code = trust_remote_code and not DIFFUSERS_DISABLE_REMOTE_CODE
|
| 164 |
+
if DIFFUSERS_DISABLE_REMOTE_CODE:
|
| 165 |
+
logger.warning(
|
| 166 |
+
"Downloading remote code is disabled globally via the DIFFUSERS_DISABLE_REMOTE_CODE environment variable. Ignoring `trust_remote_code`."
|
| 167 |
+
)
|
| 168 |
+
|
| 169 |
+
if has_remote_code and not trust_remote_code:
|
| 170 |
+
error_msg = f"The repository for {model_name} contains custom code. "
|
| 171 |
+
error_msg += (
|
| 172 |
+
"Downloading remote code is disabled globally via the DIFFUSERS_DISABLE_REMOTE_CODE environment variable."
|
| 173 |
+
if DIFFUSERS_DISABLE_REMOTE_CODE
|
| 174 |
+
else "Pass `trust_remote_code=True` to allow loading remote code modules."
|
| 175 |
+
)
|
| 176 |
+
raise ValueError(error_msg)
|
| 177 |
+
|
| 178 |
+
elif has_remote_code and trust_remote_code:
|
| 179 |
+
logger.warning(
|
| 180 |
+
f"`trust_remote_code` is enabled. Downloading code from {model_name}. Please ensure you trust the contents of this repository"
|
| 181 |
+
)
|
| 182 |
+
|
| 183 |
+
return trust_remote_code
|
| 184 |
+
|
| 185 |
+
|
| 186 |
+
def get_class_in_module(class_name, module_path, force_reload=False):
|
| 187 |
+
"""
|
| 188 |
+
Import a module on the cache directory for modules and extract a class from it.
|
| 189 |
+
"""
|
| 190 |
+
name = os.path.normpath(module_path)
|
| 191 |
+
if name.endswith(".py"):
|
| 192 |
+
name = name[:-3]
|
| 193 |
+
name = name.replace(os.path.sep, ".")
|
| 194 |
+
module_file: Path = Path(HF_MODULES_CACHE) / module_path
|
| 195 |
+
|
| 196 |
+
with _HF_REMOTE_CODE_LOCK:
|
| 197 |
+
if force_reload:
|
| 198 |
+
sys.modules.pop(name, None)
|
| 199 |
+
importlib.invalidate_caches()
|
| 200 |
+
cached_module: Optional[ModuleType] = sys.modules.get(name)
|
| 201 |
+
module_spec = importlib.util.spec_from_file_location(name, location=module_file)
|
| 202 |
+
|
| 203 |
+
module: ModuleType
|
| 204 |
+
if cached_module is None:
|
| 205 |
+
module = importlib.util.module_from_spec(module_spec)
|
| 206 |
+
# insert it into sys.modules before any loading begins
|
| 207 |
+
sys.modules[name] = module
|
| 208 |
+
else:
|
| 209 |
+
module = cached_module
|
| 210 |
+
|
| 211 |
+
module_spec.loader.exec_module(module)
|
| 212 |
+
|
| 213 |
+
if class_name is None:
|
| 214 |
+
return find_pipeline_class(module)
|
| 215 |
+
|
| 216 |
+
return getattr(module, class_name)
|
| 217 |
+
|
| 218 |
+
|
| 219 |
+
def find_pipeline_class(loaded_module):
|
| 220 |
+
"""
|
| 221 |
+
Retrieve pipeline class that inherits from `DiffusionPipeline`. Note that there has to be exactly one class
|
| 222 |
+
inheriting from `DiffusionPipeline`.
|
| 223 |
+
"""
|
| 224 |
+
from ..pipelines import DiffusionPipeline
|
| 225 |
+
|
| 226 |
+
cls_members = dict(inspect.getmembers(loaded_module, inspect.isclass))
|
| 227 |
+
|
| 228 |
+
pipeline_class = None
|
| 229 |
+
for cls_name, cls in cls_members.items():
|
| 230 |
+
if (
|
| 231 |
+
cls_name != DiffusionPipeline.__name__
|
| 232 |
+
and issubclass(cls, DiffusionPipeline)
|
| 233 |
+
and cls.__module__.split(".")[0] != "diffusers"
|
| 234 |
+
):
|
| 235 |
+
if pipeline_class is not None:
|
| 236 |
+
raise ValueError(
|
| 237 |
+
f"Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:"
|
| 238 |
+
f" {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in"
|
| 239 |
+
f" {loaded_module}."
|
| 240 |
+
)
|
| 241 |
+
pipeline_class = cls
|
| 242 |
+
|
| 243 |
+
return pipeline_class
|
| 244 |
+
|
| 245 |
+
|
| 246 |
+
@validate_hf_hub_args
|
| 247 |
+
def get_cached_module_file(
|
| 248 |
+
pretrained_model_name_or_path: Union[str, os.PathLike],
|
| 249 |
+
module_file: str,
|
| 250 |
+
cache_dir: Optional[Union[str, os.PathLike]] = None,
|
| 251 |
+
force_download: bool = False,
|
| 252 |
+
proxies: Optional[Dict[str, str]] = None,
|
| 253 |
+
token: Optional[Union[bool, str]] = None,
|
| 254 |
+
revision: Optional[str] = None,
|
| 255 |
+
local_files_only: bool = False,
|
| 256 |
+
):
|
| 257 |
+
"""
|
| 258 |
+
Prepares Downloads a module from a local folder or a distant repo and returns its path inside the cached
|
| 259 |
+
Transformers module.
|
| 260 |
+
|
| 261 |
+
Args:
|
| 262 |
+
pretrained_model_name_or_path (`str` or `os.PathLike`):
|
| 263 |
+
This can be either:
|
| 264 |
+
|
| 265 |
+
- a string, the *model id* of a pretrained model configuration hosted inside a model repo on
|
| 266 |
+
huggingface.co. Valid model ids can be located at the root-level, like `bert-base-uncased`, or namespaced
|
| 267 |
+
under a user or organization name, like `dbmdz/bert-base-german-cased`.
|
| 268 |
+
- a path to a *directory* containing a configuration file saved using the
|
| 269 |
+
[`~PreTrainedTokenizer.save_pretrained`] method, e.g., `./my_model_directory/`.
|
| 270 |
+
|
| 271 |
+
module_file (`str`):
|
| 272 |
+
The name of the module file containing the class to look for.
|
| 273 |
+
cache_dir (`str` or `os.PathLike`, *optional*):
|
| 274 |
+
Path to a directory in which a downloaded pretrained model configuration should be cached if the standard
|
| 275 |
+
cache should not be used.
|
| 276 |
+
force_download (`bool`, *optional*, defaults to `False`):
|
| 277 |
+
Whether or not to force to (re-)download the configuration files and override the cached versions if they
|
| 278 |
+
exist.
|
| 279 |
+
proxies (`Dict[str, str]`, *optional*):
|
| 280 |
+
A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
|
| 281 |
+
'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request.
|
| 282 |
+
token (`str` or *bool*, *optional*):
|
| 283 |
+
The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated
|
| 284 |
+
when running `transformers-cli login` (stored in `~/.huggingface`).
|
| 285 |
+
revision (`str`, *optional*, defaults to `"main"`):
|
| 286 |
+
The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
|
| 287 |
+
git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
|
| 288 |
+
identifier allowed by git.
|
| 289 |
+
local_files_only (`bool`, *optional*, defaults to `False`):
|
| 290 |
+
If `True`, will only try to load the tokenizer configuration from local files.
|
| 291 |
+
|
| 292 |
+
<Tip>
|
| 293 |
+
|
| 294 |
+
You may pass a token in `token` if you are not logged in (`hf auth login`) and want to use private or [gated
|
| 295 |
+
models](https://huggingface.co/docs/hub/models-gated#gated-models).
|
| 296 |
+
|
| 297 |
+
</Tip>
|
| 298 |
+
|
| 299 |
+
Returns:
|
| 300 |
+
`str`: The path to the module inside the cache.
|
| 301 |
+
"""
|
| 302 |
+
# Download and cache module_file from the repo `pretrained_model_name_or_path` of grab it if it's a local file.
|
| 303 |
+
pretrained_model_name_or_path = str(pretrained_model_name_or_path)
|
| 304 |
+
|
| 305 |
+
module_file_or_url = os.path.join(pretrained_model_name_or_path, module_file)
|
| 306 |
+
|
| 307 |
+
if os.path.isfile(module_file_or_url):
|
| 308 |
+
resolved_module_file = module_file_or_url
|
| 309 |
+
submodule = "local"
|
| 310 |
+
elif pretrained_model_name_or_path.count("/") == 0:
|
| 311 |
+
available_versions = get_diffusers_versions()
|
| 312 |
+
# cut ".dev0"
|
| 313 |
+
latest_version = "v" + ".".join(__version__.split(".")[:3])
|
| 314 |
+
|
| 315 |
+
# retrieve github version that matches
|
| 316 |
+
if revision is None:
|
| 317 |
+
revision = latest_version if latest_version[1:] in available_versions else "main"
|
| 318 |
+
logger.info(f"Defaulting to latest_version: {revision}.")
|
| 319 |
+
elif revision in available_versions:
|
| 320 |
+
revision = f"v{revision}"
|
| 321 |
+
elif revision == "main":
|
| 322 |
+
revision = revision
|
| 323 |
+
else:
|
| 324 |
+
raise ValueError(
|
| 325 |
+
f"`custom_revision`: {revision} does not exist. Please make sure to choose one of"
|
| 326 |
+
f" {', '.join(available_versions + ['main'])}."
|
| 327 |
+
)
|
| 328 |
+
|
| 329 |
+
try:
|
| 330 |
+
resolved_module_file = hf_hub_download(
|
| 331 |
+
repo_id=COMMUNITY_PIPELINES_MIRROR_ID,
|
| 332 |
+
repo_type="dataset",
|
| 333 |
+
filename=f"{revision}/{pretrained_model_name_or_path}.py",
|
| 334 |
+
cache_dir=cache_dir,
|
| 335 |
+
force_download=force_download,
|
| 336 |
+
proxies=proxies,
|
| 337 |
+
local_files_only=local_files_only,
|
| 338 |
+
)
|
| 339 |
+
submodule = "git"
|
| 340 |
+
module_file = pretrained_model_name_or_path + ".py"
|
| 341 |
+
except RevisionNotFoundError as e:
|
| 342 |
+
raise EnvironmentError(
|
| 343 |
+
f"Revision '{revision}' not found in the community pipelines mirror. Check available revisions on"
|
| 344 |
+
" https://huggingface.co/datasets/diffusers/community-pipelines-mirror/tree/main."
|
| 345 |
+
" If you don't find the revision you are looking for, please open an issue on https://github.com/huggingface/diffusers/issues."
|
| 346 |
+
) from e
|
| 347 |
+
except EnvironmentError:
|
| 348 |
+
logger.error(f"Could not locate the {module_file} inside {pretrained_model_name_or_path}.")
|
| 349 |
+
raise
|
| 350 |
+
else:
|
| 351 |
+
try:
|
| 352 |
+
# Load from URL or cache if already cached
|
| 353 |
+
resolved_module_file = hf_hub_download(
|
| 354 |
+
pretrained_model_name_or_path,
|
| 355 |
+
module_file,
|
| 356 |
+
cache_dir=cache_dir,
|
| 357 |
+
force_download=force_download,
|
| 358 |
+
proxies=proxies,
|
| 359 |
+
local_files_only=local_files_only,
|
| 360 |
+
token=token,
|
| 361 |
+
)
|
| 362 |
+
submodule = os.path.join("local", "--".join(pretrained_model_name_or_path.split("/")))
|
| 363 |
+
except EnvironmentError:
|
| 364 |
+
logger.error(f"Could not locate the {module_file} inside {pretrained_model_name_or_path}.")
|
| 365 |
+
raise
|
| 366 |
+
|
| 367 |
+
# Check we have all the requirements in our environment
|
| 368 |
+
modules_needed = check_imports(resolved_module_file)
|
| 369 |
+
|
| 370 |
+
# Now we move the module inside our cached dynamic modules.
|
| 371 |
+
full_submodule = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
|
| 372 |
+
create_dynamic_module(full_submodule)
|
| 373 |
+
submodule_path = Path(HF_MODULES_CACHE) / full_submodule
|
| 374 |
+
if submodule == "local" or submodule == "git":
|
| 375 |
+
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
|
| 376 |
+
# that hash, to only copy when there is a modification but it seems overkill for now).
|
| 377 |
+
# The only reason we do the copy is to avoid putting too many folders in sys.path.
|
| 378 |
+
shutil.copyfile(resolved_module_file, submodule_path / module_file)
|
| 379 |
+
for module_needed in modules_needed:
|
| 380 |
+
if len(module_needed.split(".")) == 2:
|
| 381 |
+
module_needed = "/".join(module_needed.split("."))
|
| 382 |
+
module_folder = module_needed.split("/")[0]
|
| 383 |
+
if not os.path.exists(submodule_path / module_folder):
|
| 384 |
+
os.makedirs(submodule_path / module_folder)
|
| 385 |
+
module_needed = f"{module_needed}.py"
|
| 386 |
+
shutil.copyfile(os.path.join(pretrained_model_name_or_path, module_needed), submodule_path / module_needed)
|
| 387 |
+
else:
|
| 388 |
+
# Get the commit hash
|
| 389 |
+
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
|
| 390 |
+
commit_hash = model_info(pretrained_model_name_or_path, revision=revision, token=token).sha
|
| 391 |
+
|
| 392 |
+
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
|
| 393 |
+
# benefit of versioning.
|
| 394 |
+
submodule_path = submodule_path / commit_hash
|
| 395 |
+
full_submodule = full_submodule + os.path.sep + commit_hash
|
| 396 |
+
create_dynamic_module(full_submodule)
|
| 397 |
+
|
| 398 |
+
if not (submodule_path / module_file).exists():
|
| 399 |
+
if len(module_file.split("/")) == 2:
|
| 400 |
+
module_folder = module_file.split("/")[0]
|
| 401 |
+
if not os.path.exists(submodule_path / module_folder):
|
| 402 |
+
os.makedirs(submodule_path / module_folder)
|
| 403 |
+
shutil.copyfile(resolved_module_file, submodule_path / module_file)
|
| 404 |
+
|
| 405 |
+
# Make sure we also have every file with relative
|
| 406 |
+
for module_needed in modules_needed:
|
| 407 |
+
if len(module_needed.split(".")) == 2:
|
| 408 |
+
module_needed = "/".join(module_needed.split("."))
|
| 409 |
+
if not (submodule_path / module_needed).exists():
|
| 410 |
+
get_cached_module_file(
|
| 411 |
+
pretrained_model_name_or_path,
|
| 412 |
+
f"{module_needed}.py",
|
| 413 |
+
cache_dir=cache_dir,
|
| 414 |
+
force_download=force_download,
|
| 415 |
+
proxies=proxies,
|
| 416 |
+
token=token,
|
| 417 |
+
revision=revision,
|
| 418 |
+
local_files_only=local_files_only,
|
| 419 |
+
)
|
| 420 |
+
return os.path.join(full_submodule, module_file)
|
| 421 |
+
|
| 422 |
+
|
| 423 |
+
@validate_hf_hub_args
|
| 424 |
+
def get_class_from_dynamic_module(
|
| 425 |
+
pretrained_model_name_or_path: Union[str, os.PathLike],
|
| 426 |
+
module_file: str,
|
| 427 |
+
class_name: Optional[str] = None,
|
| 428 |
+
cache_dir: Optional[Union[str, os.PathLike]] = None,
|
| 429 |
+
force_download: bool = False,
|
| 430 |
+
proxies: Optional[Dict[str, str]] = None,
|
| 431 |
+
token: Optional[Union[bool, str]] = None,
|
| 432 |
+
revision: Optional[str] = None,
|
| 433 |
+
local_files_only: bool = False,
|
| 434 |
+
**kwargs,
|
| 435 |
+
):
|
| 436 |
+
"""
|
| 437 |
+
Extracts a class from a module file, present in the local folder or repository of a model.
|
| 438 |
+
|
| 439 |
+
<Tip warning={true}>
|
| 440 |
+
|
| 441 |
+
Calling this function will execute the code in the module file found locally or downloaded from the Hub. It should
|
| 442 |
+
therefore only be called on trusted repos.
|
| 443 |
+
|
| 444 |
+
</Tip>
|
| 445 |
+
|
| 446 |
+
Args:
|
| 447 |
+
pretrained_model_name_or_path (`str` or `os.PathLike`):
|
| 448 |
+
This can be either:
|
| 449 |
+
|
| 450 |
+
- a string, the *model id* of a pretrained model configuration hosted inside a model repo on
|
| 451 |
+
huggingface.co. Valid model ids can be located at the root-level, like `bert-base-uncased`, or namespaced
|
| 452 |
+
under a user or organization name, like `dbmdz/bert-base-german-cased`.
|
| 453 |
+
- a path to a *directory* containing a configuration file saved using the
|
| 454 |
+
[`~PreTrainedTokenizer.save_pretrained`] method, e.g., `./my_model_directory/`.
|
| 455 |
+
|
| 456 |
+
module_file (`str`):
|
| 457 |
+
The name of the module file containing the class to look for.
|
| 458 |
+
class_name (`str`):
|
| 459 |
+
The name of the class to import in the module.
|
| 460 |
+
cache_dir (`str` or `os.PathLike`, *optional*):
|
| 461 |
+
Path to a directory in which a downloaded pretrained model configuration should be cached if the standard
|
| 462 |
+
cache should not be used.
|
| 463 |
+
force_download (`bool`, *optional*, defaults to `False`):
|
| 464 |
+
Whether or not to force to (re-)download the configuration files and override the cached versions if they
|
| 465 |
+
exist.
|
| 466 |
+
proxies (`Dict[str, str]`, *optional*):
|
| 467 |
+
A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
|
| 468 |
+
'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request.
|
| 469 |
+
token (`str` or `bool`, *optional*):
|
| 470 |
+
The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated
|
| 471 |
+
when running `transformers-cli login` (stored in `~/.huggingface`).
|
| 472 |
+
revision (`str`, *optional*, defaults to `"main"`):
|
| 473 |
+
The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
|
| 474 |
+
git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
|
| 475 |
+
identifier allowed by git.
|
| 476 |
+
local_files_only (`bool`, *optional*, defaults to `False`):
|
| 477 |
+
If `True`, will only try to load the tokenizer configuration from local files.
|
| 478 |
+
|
| 479 |
+
<Tip>
|
| 480 |
+
|
| 481 |
+
You may pass a token in `token` if you are not logged in (`hf auth login`) and want to use private or [gated
|
| 482 |
+
models](https://huggingface.co/docs/hub/models-gated#gated-models).
|
| 483 |
+
|
| 484 |
+
</Tip>
|
| 485 |
+
|
| 486 |
+
Returns:
|
| 487 |
+
`type`: The class, dynamically imported from the module.
|
| 488 |
+
|
| 489 |
+
Examples:
|
| 490 |
+
|
| 491 |
+
```python
|
| 492 |
+
# Download module `modeling.py` from huggingface.co and cache then extract the class `MyBertModel` from this
|
| 493 |
+
# module.
|
| 494 |
+
cls = get_class_from_dynamic_module("sgugger/my-bert-model", "modeling.py", "MyBertModel")
|
| 495 |
+
```"""
|
| 496 |
+
# And lastly we get the class inside our newly created module
|
| 497 |
+
final_module = get_cached_module_file(
|
| 498 |
+
pretrained_model_name_or_path,
|
| 499 |
+
module_file,
|
| 500 |
+
cache_dir=cache_dir,
|
| 501 |
+
force_download=force_download,
|
| 502 |
+
proxies=proxies,
|
| 503 |
+
token=token,
|
| 504 |
+
revision=revision,
|
| 505 |
+
local_files_only=local_files_only,
|
| 506 |
+
)
|
| 507 |
+
return get_class_in_module(class_name, final_module)
|
exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/utils/export_utils.py
ADDED
|
@@ -0,0 +1,209 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import io
|
| 2 |
+
import random
|
| 3 |
+
import struct
|
| 4 |
+
import tempfile
|
| 5 |
+
from contextlib import contextmanager
|
| 6 |
+
from typing import List, Optional, Union
|
| 7 |
+
|
| 8 |
+
import numpy as np
|
| 9 |
+
import PIL.Image
|
| 10 |
+
import PIL.ImageOps
|
| 11 |
+
|
| 12 |
+
from .import_utils import BACKENDS_MAPPING, is_imageio_available, is_opencv_available
|
| 13 |
+
from .logging import get_logger
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
global_rng = random.Random()
|
| 17 |
+
|
| 18 |
+
logger = get_logger(__name__)
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
@contextmanager
|
| 22 |
+
def buffered_writer(raw_f):
|
| 23 |
+
f = io.BufferedWriter(raw_f)
|
| 24 |
+
yield f
|
| 25 |
+
f.flush()
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
def export_to_gif(image: List[PIL.Image.Image], output_gif_path: str = None, fps: int = 10) -> str:
|
| 29 |
+
if output_gif_path is None:
|
| 30 |
+
output_gif_path = tempfile.NamedTemporaryFile(suffix=".gif").name
|
| 31 |
+
|
| 32 |
+
image[0].save(
|
| 33 |
+
output_gif_path,
|
| 34 |
+
save_all=True,
|
| 35 |
+
append_images=image[1:],
|
| 36 |
+
optimize=False,
|
| 37 |
+
duration=1000 // fps,
|
| 38 |
+
loop=0,
|
| 39 |
+
)
|
| 40 |
+
return output_gif_path
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
def export_to_ply(mesh, output_ply_path: str = None):
|
| 44 |
+
"""
|
| 45 |
+
Write a PLY file for a mesh.
|
| 46 |
+
"""
|
| 47 |
+
if output_ply_path is None:
|
| 48 |
+
output_ply_path = tempfile.NamedTemporaryFile(suffix=".ply").name
|
| 49 |
+
|
| 50 |
+
coords = mesh.verts.detach().cpu().numpy()
|
| 51 |
+
faces = mesh.faces.cpu().numpy()
|
| 52 |
+
rgb = np.stack([mesh.vertex_channels[x].detach().cpu().numpy() for x in "RGB"], axis=1)
|
| 53 |
+
|
| 54 |
+
with buffered_writer(open(output_ply_path, "wb")) as f:
|
| 55 |
+
f.write(b"ply\n")
|
| 56 |
+
f.write(b"format binary_little_endian 1.0\n")
|
| 57 |
+
f.write(bytes(f"element vertex {len(coords)}\n", "ascii"))
|
| 58 |
+
f.write(b"property float x\n")
|
| 59 |
+
f.write(b"property float y\n")
|
| 60 |
+
f.write(b"property float z\n")
|
| 61 |
+
if rgb is not None:
|
| 62 |
+
f.write(b"property uchar red\n")
|
| 63 |
+
f.write(b"property uchar green\n")
|
| 64 |
+
f.write(b"property uchar blue\n")
|
| 65 |
+
if faces is not None:
|
| 66 |
+
f.write(bytes(f"element face {len(faces)}\n", "ascii"))
|
| 67 |
+
f.write(b"property list uchar int vertex_index\n")
|
| 68 |
+
f.write(b"end_header\n")
|
| 69 |
+
|
| 70 |
+
if rgb is not None:
|
| 71 |
+
rgb = (rgb * 255.499).round().astype(int)
|
| 72 |
+
vertices = [
|
| 73 |
+
(*coord, *rgb)
|
| 74 |
+
for coord, rgb in zip(
|
| 75 |
+
coords.tolist(),
|
| 76 |
+
rgb.tolist(),
|
| 77 |
+
)
|
| 78 |
+
]
|
| 79 |
+
format = struct.Struct("<3f3B")
|
| 80 |
+
for item in vertices:
|
| 81 |
+
f.write(format.pack(*item))
|
| 82 |
+
else:
|
| 83 |
+
format = struct.Struct("<3f")
|
| 84 |
+
for vertex in coords.tolist():
|
| 85 |
+
f.write(format.pack(*vertex))
|
| 86 |
+
|
| 87 |
+
if faces is not None:
|
| 88 |
+
format = struct.Struct("<B3I")
|
| 89 |
+
for tri in faces.tolist():
|
| 90 |
+
f.write(format.pack(len(tri), *tri))
|
| 91 |
+
|
| 92 |
+
return output_ply_path
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
def export_to_obj(mesh, output_obj_path: str = None):
|
| 96 |
+
if output_obj_path is None:
|
| 97 |
+
output_obj_path = tempfile.NamedTemporaryFile(suffix=".obj").name
|
| 98 |
+
|
| 99 |
+
verts = mesh.verts.detach().cpu().numpy()
|
| 100 |
+
faces = mesh.faces.cpu().numpy()
|
| 101 |
+
|
| 102 |
+
vertex_colors = np.stack([mesh.vertex_channels[x].detach().cpu().numpy() for x in "RGB"], axis=1)
|
| 103 |
+
vertices = [
|
| 104 |
+
"{} {} {} {} {} {}".format(*coord, *color) for coord, color in zip(verts.tolist(), vertex_colors.tolist())
|
| 105 |
+
]
|
| 106 |
+
|
| 107 |
+
faces = ["f {} {} {}".format(str(tri[0] + 1), str(tri[1] + 1), str(tri[2] + 1)) for tri in faces.tolist()]
|
| 108 |
+
|
| 109 |
+
combined_data = ["v " + vertex for vertex in vertices] + faces
|
| 110 |
+
|
| 111 |
+
with open(output_obj_path, "w") as f:
|
| 112 |
+
f.writelines("\n".join(combined_data))
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
def _legacy_export_to_video(
|
| 116 |
+
video_frames: Union[List[np.ndarray], List[PIL.Image.Image]], output_video_path: str = None, fps: int = 10
|
| 117 |
+
):
|
| 118 |
+
if is_opencv_available():
|
| 119 |
+
import cv2
|
| 120 |
+
else:
|
| 121 |
+
raise ImportError(BACKENDS_MAPPING["opencv"][1].format("export_to_video"))
|
| 122 |
+
if output_video_path is None:
|
| 123 |
+
output_video_path = tempfile.NamedTemporaryFile(suffix=".mp4").name
|
| 124 |
+
|
| 125 |
+
if isinstance(video_frames[0], np.ndarray):
|
| 126 |
+
video_frames = [(frame * 255).astype(np.uint8) for frame in video_frames]
|
| 127 |
+
|
| 128 |
+
elif isinstance(video_frames[0], PIL.Image.Image):
|
| 129 |
+
video_frames = [np.array(frame) for frame in video_frames]
|
| 130 |
+
|
| 131 |
+
fourcc = cv2.VideoWriter_fourcc(*"mp4v")
|
| 132 |
+
h, w, c = video_frames[0].shape
|
| 133 |
+
video_writer = cv2.VideoWriter(output_video_path, fourcc, fps=fps, frameSize=(w, h))
|
| 134 |
+
for i in range(len(video_frames)):
|
| 135 |
+
img = cv2.cvtColor(video_frames[i], cv2.COLOR_RGB2BGR)
|
| 136 |
+
video_writer.write(img)
|
| 137 |
+
|
| 138 |
+
return output_video_path
|
| 139 |
+
|
| 140 |
+
|
| 141 |
+
def export_to_video(
|
| 142 |
+
video_frames: Union[List[np.ndarray], List[PIL.Image.Image]],
|
| 143 |
+
output_video_path: str = None,
|
| 144 |
+
fps: int = 10,
|
| 145 |
+
quality: float = 5.0,
|
| 146 |
+
bitrate: Optional[int] = None,
|
| 147 |
+
macro_block_size: Optional[int] = 16,
|
| 148 |
+
) -> str:
|
| 149 |
+
"""
|
| 150 |
+
quality:
|
| 151 |
+
Video output quality. Default is 5. Uses variable bit rate. Highest quality is 10, lowest is 0. Set to None to
|
| 152 |
+
prevent variable bitrate flags to FFMPEG so you can manually specify them using output_params instead.
|
| 153 |
+
Specifying a fixed bitrate using `bitrate` disables this parameter.
|
| 154 |
+
|
| 155 |
+
bitrate:
|
| 156 |
+
Set a constant bitrate for the video encoding. Default is None causing `quality` parameter to be used instead.
|
| 157 |
+
Better quality videos with smaller file sizes will result from using the `quality` variable bitrate parameter
|
| 158 |
+
rather than specifying a fixed bitrate with this parameter.
|
| 159 |
+
|
| 160 |
+
macro_block_size:
|
| 161 |
+
Size constraint for video. Width and height, must be divisible by this number. If not divisible by this number
|
| 162 |
+
imageio will tell ffmpeg to scale the image up to the next closest size divisible by this number. Most codecs
|
| 163 |
+
are compatible with a macroblock size of 16 (default), some can go smaller (4, 8). To disable this automatic
|
| 164 |
+
feature set it to None or 1, however be warned many players can't decode videos that are odd in size and some
|
| 165 |
+
codecs will produce poor results or fail. See https://en.wikipedia.org/wiki/Macroblock.
|
| 166 |
+
"""
|
| 167 |
+
# TODO: Dhruv. Remove by Diffusers release 0.33.0
|
| 168 |
+
# Added to prevent breaking existing code
|
| 169 |
+
if not is_imageio_available():
|
| 170 |
+
logger.warning(
|
| 171 |
+
(
|
| 172 |
+
"It is recommended to use `export_to_video` with `imageio` and `imageio-ffmpeg` as a backend. \n"
|
| 173 |
+
"These libraries are not present in your environment. Attempting to use legacy OpenCV backend to export video. \n"
|
| 174 |
+
"Support for the OpenCV backend will be deprecated in a future Diffusers version"
|
| 175 |
+
)
|
| 176 |
+
)
|
| 177 |
+
return _legacy_export_to_video(video_frames, output_video_path, fps)
|
| 178 |
+
|
| 179 |
+
if is_imageio_available():
|
| 180 |
+
import imageio
|
| 181 |
+
else:
|
| 182 |
+
raise ImportError(BACKENDS_MAPPING["imageio"][1].format("export_to_video"))
|
| 183 |
+
|
| 184 |
+
try:
|
| 185 |
+
imageio.plugins.ffmpeg.get_exe()
|
| 186 |
+
except AttributeError:
|
| 187 |
+
raise AttributeError(
|
| 188 |
+
(
|
| 189 |
+
"Found an existing imageio backend in your environment. Attempting to export video with imageio. \n"
|
| 190 |
+
"Unable to find a compatible ffmpeg installation in your environment to use with imageio. Please install via `pip install imageio-ffmpeg"
|
| 191 |
+
)
|
| 192 |
+
)
|
| 193 |
+
|
| 194 |
+
if output_video_path is None:
|
| 195 |
+
output_video_path = tempfile.NamedTemporaryFile(suffix=".mp4").name
|
| 196 |
+
|
| 197 |
+
if isinstance(video_frames[0], np.ndarray):
|
| 198 |
+
video_frames = [(frame * 255).astype(np.uint8) for frame in video_frames]
|
| 199 |
+
|
| 200 |
+
elif isinstance(video_frames[0], PIL.Image.Image):
|
| 201 |
+
video_frames = [np.array(frame) for frame in video_frames]
|
| 202 |
+
|
| 203 |
+
with imageio.get_writer(
|
| 204 |
+
output_video_path, fps=fps, quality=quality, bitrate=bitrate, macro_block_size=macro_block_size
|
| 205 |
+
) as writer:
|
| 206 |
+
for frame in video_frames:
|
| 207 |
+
writer.append_data(frame)
|
| 208 |
+
|
| 209 |
+
return output_video_path
|
exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/utils/hub_utils.py
ADDED
|
@@ -0,0 +1,573 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2025 The HuggingFace Inc. team.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
import json
|
| 18 |
+
import os
|
| 19 |
+
import re
|
| 20 |
+
import sys
|
| 21 |
+
import tempfile
|
| 22 |
+
import warnings
|
| 23 |
+
from pathlib import Path
|
| 24 |
+
from typing import Dict, List, Optional, Union
|
| 25 |
+
from uuid import uuid4
|
| 26 |
+
|
| 27 |
+
from huggingface_hub import (
|
| 28 |
+
DDUFEntry,
|
| 29 |
+
ModelCard,
|
| 30 |
+
ModelCardData,
|
| 31 |
+
create_repo,
|
| 32 |
+
hf_hub_download,
|
| 33 |
+
model_info,
|
| 34 |
+
snapshot_download,
|
| 35 |
+
upload_folder,
|
| 36 |
+
)
|
| 37 |
+
from huggingface_hub.constants import HF_HUB_DISABLE_TELEMETRY, HF_HUB_OFFLINE
|
| 38 |
+
from huggingface_hub.file_download import REGEX_COMMIT_HASH
|
| 39 |
+
from huggingface_hub.utils import (
|
| 40 |
+
EntryNotFoundError,
|
| 41 |
+
RepositoryNotFoundError,
|
| 42 |
+
RevisionNotFoundError,
|
| 43 |
+
is_jinja_available,
|
| 44 |
+
validate_hf_hub_args,
|
| 45 |
+
)
|
| 46 |
+
from packaging import version
|
| 47 |
+
from requests import HTTPError
|
| 48 |
+
|
| 49 |
+
from .. import __version__
|
| 50 |
+
from .constants import (
|
| 51 |
+
DEPRECATED_REVISION_ARGS,
|
| 52 |
+
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
|
| 53 |
+
SAFETENSORS_WEIGHTS_NAME,
|
| 54 |
+
WEIGHTS_NAME,
|
| 55 |
+
)
|
| 56 |
+
from .import_utils import (
|
| 57 |
+
ENV_VARS_TRUE_VALUES,
|
| 58 |
+
_flax_version,
|
| 59 |
+
_jax_version,
|
| 60 |
+
_onnxruntime_version,
|
| 61 |
+
_torch_version,
|
| 62 |
+
is_flax_available,
|
| 63 |
+
is_onnx_available,
|
| 64 |
+
is_torch_available,
|
| 65 |
+
)
|
| 66 |
+
from .logging import get_logger
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
logger = get_logger(__name__)
|
| 70 |
+
|
| 71 |
+
MODEL_CARD_TEMPLATE_PATH = Path(__file__).parent / "model_card_template.md"
|
| 72 |
+
SESSION_ID = uuid4().hex
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
def http_user_agent(user_agent: Union[Dict, str, None] = None) -> str:
|
| 76 |
+
"""
|
| 77 |
+
Formats a user-agent string with basic info about a request.
|
| 78 |
+
"""
|
| 79 |
+
ua = f"diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}"
|
| 80 |
+
if HF_HUB_DISABLE_TELEMETRY or HF_HUB_OFFLINE:
|
| 81 |
+
return ua + "; telemetry/off"
|
| 82 |
+
if is_torch_available():
|
| 83 |
+
ua += f"; torch/{_torch_version}"
|
| 84 |
+
if is_flax_available():
|
| 85 |
+
ua += f"; jax/{_jax_version}"
|
| 86 |
+
ua += f"; flax/{_flax_version}"
|
| 87 |
+
if is_onnx_available():
|
| 88 |
+
ua += f"; onnxruntime/{_onnxruntime_version}"
|
| 89 |
+
# CI will set this value to True
|
| 90 |
+
if os.environ.get("DIFFUSERS_IS_CI", "").upper() in ENV_VARS_TRUE_VALUES:
|
| 91 |
+
ua += "; is_ci/true"
|
| 92 |
+
if isinstance(user_agent, dict):
|
| 93 |
+
ua += "; " + "; ".join(f"{k}/{v}" for k, v in user_agent.items())
|
| 94 |
+
elif isinstance(user_agent, str):
|
| 95 |
+
ua += "; " + user_agent
|
| 96 |
+
return ua
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
def load_or_create_model_card(
|
| 100 |
+
repo_id_or_path: str = None,
|
| 101 |
+
token: Optional[str] = None,
|
| 102 |
+
is_pipeline: bool = False,
|
| 103 |
+
from_training: bool = False,
|
| 104 |
+
model_description: Optional[str] = None,
|
| 105 |
+
base_model: str = None,
|
| 106 |
+
prompt: Optional[str] = None,
|
| 107 |
+
license: Optional[str] = None,
|
| 108 |
+
widget: Optional[List[dict]] = None,
|
| 109 |
+
inference: Optional[bool] = None,
|
| 110 |
+
) -> ModelCard:
|
| 111 |
+
"""
|
| 112 |
+
Loads or creates a model card.
|
| 113 |
+
|
| 114 |
+
Args:
|
| 115 |
+
repo_id_or_path (`str`):
|
| 116 |
+
The repo id (e.g., "runwayml/stable-diffusion-v1-5") or local path where to look for the model card.
|
| 117 |
+
token (`str`, *optional*):
|
| 118 |
+
Authentication token. Will default to the stored token. See https://huggingface.co/settings/token for more
|
| 119 |
+
details.
|
| 120 |
+
is_pipeline (`bool`):
|
| 121 |
+
Boolean to indicate if we're adding tag to a [`DiffusionPipeline`].
|
| 122 |
+
from_training: (`bool`): Boolean flag to denote if the model card is being created from a training script.
|
| 123 |
+
model_description (`str`, *optional*): Model description to add to the model card. Helpful when using
|
| 124 |
+
`load_or_create_model_card` from a training script.
|
| 125 |
+
base_model (`str`): Base model identifier (e.g., "stabilityai/stable-diffusion-xl-base-1.0"). Useful
|
| 126 |
+
for DreamBooth-like training.
|
| 127 |
+
prompt (`str`, *optional*): Prompt used for training. Useful for DreamBooth-like training.
|
| 128 |
+
license: (`str`, *optional*): License of the output artifact. Helpful when using
|
| 129 |
+
`load_or_create_model_card` from a training script.
|
| 130 |
+
widget (`List[dict]`, *optional*): Widget to accompany a gallery template.
|
| 131 |
+
inference: (`bool`, optional): Whether to turn on inference widget. Helpful when using
|
| 132 |
+
`load_or_create_model_card` from a training script.
|
| 133 |
+
"""
|
| 134 |
+
if not is_jinja_available():
|
| 135 |
+
raise ValueError(
|
| 136 |
+
"Modelcard rendering is based on Jinja templates."
|
| 137 |
+
" Please make sure to have `jinja` installed before using `load_or_create_model_card`."
|
| 138 |
+
" To install it, please run `pip install Jinja2`."
|
| 139 |
+
)
|
| 140 |
+
|
| 141 |
+
try:
|
| 142 |
+
# Check if the model card is present on the remote repo
|
| 143 |
+
model_card = ModelCard.load(repo_id_or_path, token=token)
|
| 144 |
+
except (EntryNotFoundError, RepositoryNotFoundError):
|
| 145 |
+
# Otherwise create a model card from template
|
| 146 |
+
if from_training:
|
| 147 |
+
model_card = ModelCard.from_template(
|
| 148 |
+
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
|
| 149 |
+
license=license,
|
| 150 |
+
library_name="diffusers",
|
| 151 |
+
inference=inference,
|
| 152 |
+
base_model=base_model,
|
| 153 |
+
instance_prompt=prompt,
|
| 154 |
+
widget=widget,
|
| 155 |
+
),
|
| 156 |
+
template_path=MODEL_CARD_TEMPLATE_PATH,
|
| 157 |
+
model_description=model_description,
|
| 158 |
+
)
|
| 159 |
+
else:
|
| 160 |
+
card_data = ModelCardData()
|
| 161 |
+
component = "pipeline" if is_pipeline else "model"
|
| 162 |
+
if model_description is None:
|
| 163 |
+
model_description = f"This is the model card of a 🧨 diffusers {component} that has been pushed on the Hub. This model card has been automatically generated."
|
| 164 |
+
model_card = ModelCard.from_template(card_data, model_description=model_description)
|
| 165 |
+
|
| 166 |
+
return model_card
|
| 167 |
+
|
| 168 |
+
|
| 169 |
+
def populate_model_card(model_card: ModelCard, tags: Union[str, List[str]] = None) -> ModelCard:
|
| 170 |
+
"""Populates the `model_card` with library name and optional tags."""
|
| 171 |
+
if model_card.data.library_name is None:
|
| 172 |
+
model_card.data.library_name = "diffusers"
|
| 173 |
+
|
| 174 |
+
if tags is not None:
|
| 175 |
+
if isinstance(tags, str):
|
| 176 |
+
tags = [tags]
|
| 177 |
+
if model_card.data.tags is None:
|
| 178 |
+
model_card.data.tags = []
|
| 179 |
+
for tag in tags:
|
| 180 |
+
model_card.data.tags.append(tag)
|
| 181 |
+
|
| 182 |
+
return model_card
|
| 183 |
+
|
| 184 |
+
|
| 185 |
+
def extract_commit_hash(resolved_file: Optional[str], commit_hash: Optional[str] = None):
|
| 186 |
+
"""
|
| 187 |
+
Extracts the commit hash from a resolved filename toward a cache file.
|
| 188 |
+
"""
|
| 189 |
+
if resolved_file is None or commit_hash is not None:
|
| 190 |
+
return commit_hash
|
| 191 |
+
resolved_file = str(Path(resolved_file).as_posix())
|
| 192 |
+
search = re.search(r"snapshots/([^/]+)/", resolved_file)
|
| 193 |
+
if search is None:
|
| 194 |
+
return None
|
| 195 |
+
commit_hash = search.groups()[0]
|
| 196 |
+
return commit_hash if REGEX_COMMIT_HASH.match(commit_hash) else None
|
| 197 |
+
|
| 198 |
+
|
| 199 |
+
def _add_variant(weights_name: str, variant: Optional[str] = None) -> str:
|
| 200 |
+
if variant is not None:
|
| 201 |
+
splits = weights_name.split(".")
|
| 202 |
+
splits = splits[:-1] + [variant] + splits[-1:]
|
| 203 |
+
weights_name = ".".join(splits)
|
| 204 |
+
|
| 205 |
+
return weights_name
|
| 206 |
+
|
| 207 |
+
|
| 208 |
+
@validate_hf_hub_args
|
| 209 |
+
def _get_model_file(
|
| 210 |
+
pretrained_model_name_or_path: Union[str, Path],
|
| 211 |
+
*,
|
| 212 |
+
weights_name: str,
|
| 213 |
+
subfolder: Optional[str] = None,
|
| 214 |
+
cache_dir: Optional[str] = None,
|
| 215 |
+
force_download: bool = False,
|
| 216 |
+
proxies: Optional[Dict] = None,
|
| 217 |
+
local_files_only: bool = False,
|
| 218 |
+
token: Optional[str] = None,
|
| 219 |
+
user_agent: Optional[Union[Dict, str]] = None,
|
| 220 |
+
revision: Optional[str] = None,
|
| 221 |
+
commit_hash: Optional[str] = None,
|
| 222 |
+
dduf_entries: Optional[Dict[str, DDUFEntry]] = None,
|
| 223 |
+
):
|
| 224 |
+
pretrained_model_name_or_path = str(pretrained_model_name_or_path)
|
| 225 |
+
|
| 226 |
+
if dduf_entries:
|
| 227 |
+
if subfolder is not None:
|
| 228 |
+
raise ValueError(
|
| 229 |
+
"DDUF file only allow for 1 level of directory (e.g transformer/model1/model.safetentors is not allowed). "
|
| 230 |
+
"Please check the DDUF structure"
|
| 231 |
+
)
|
| 232 |
+
model_file = (
|
| 233 |
+
weights_name
|
| 234 |
+
if pretrained_model_name_or_path == ""
|
| 235 |
+
else "/".join([pretrained_model_name_or_path, weights_name])
|
| 236 |
+
)
|
| 237 |
+
if model_file in dduf_entries:
|
| 238 |
+
return model_file
|
| 239 |
+
else:
|
| 240 |
+
raise EnvironmentError(f"Error no file named {weights_name} found in archive {dduf_entries.keys()}.")
|
| 241 |
+
elif os.path.isfile(pretrained_model_name_or_path):
|
| 242 |
+
return pretrained_model_name_or_path
|
| 243 |
+
elif os.path.isdir(pretrained_model_name_or_path):
|
| 244 |
+
if os.path.isfile(os.path.join(pretrained_model_name_or_path, weights_name)):
|
| 245 |
+
# Load from a PyTorch checkpoint
|
| 246 |
+
model_file = os.path.join(pretrained_model_name_or_path, weights_name)
|
| 247 |
+
return model_file
|
| 248 |
+
elif subfolder is not None and os.path.isfile(
|
| 249 |
+
os.path.join(pretrained_model_name_or_path, subfolder, weights_name)
|
| 250 |
+
):
|
| 251 |
+
model_file = os.path.join(pretrained_model_name_or_path, subfolder, weights_name)
|
| 252 |
+
return model_file
|
| 253 |
+
else:
|
| 254 |
+
raise EnvironmentError(
|
| 255 |
+
f"Error no file named {weights_name} found in directory {pretrained_model_name_or_path}."
|
| 256 |
+
)
|
| 257 |
+
else:
|
| 258 |
+
# 1. First check if deprecated way of loading from branches is used
|
| 259 |
+
if (
|
| 260 |
+
revision in DEPRECATED_REVISION_ARGS
|
| 261 |
+
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
|
| 262 |
+
and version.parse(version.parse(__version__).base_version) >= version.parse("0.22.0")
|
| 263 |
+
):
|
| 264 |
+
try:
|
| 265 |
+
model_file = hf_hub_download(
|
| 266 |
+
pretrained_model_name_or_path,
|
| 267 |
+
filename=_add_variant(weights_name, revision),
|
| 268 |
+
cache_dir=cache_dir,
|
| 269 |
+
force_download=force_download,
|
| 270 |
+
proxies=proxies,
|
| 271 |
+
local_files_only=local_files_only,
|
| 272 |
+
token=token,
|
| 273 |
+
user_agent=user_agent,
|
| 274 |
+
subfolder=subfolder,
|
| 275 |
+
revision=revision or commit_hash,
|
| 276 |
+
)
|
| 277 |
+
warnings.warn(
|
| 278 |
+
f"Loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'` is deprecated. Loading instead from `revision='main'` with `variant={revision}`. Loading model variants via `revision='{revision}'` will be removed in diffusers v1. Please use `variant='{revision}'` instead.",
|
| 279 |
+
FutureWarning,
|
| 280 |
+
)
|
| 281 |
+
return model_file
|
| 282 |
+
except: # noqa: E722
|
| 283 |
+
warnings.warn(
|
| 284 |
+
f"You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant='{revision}'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(weights_name, revision)} file in the 'main' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title '{pretrained_model_name_or_path} is missing {_add_variant(weights_name, revision)}' so that the correct variant file can be added.",
|
| 285 |
+
FutureWarning,
|
| 286 |
+
)
|
| 287 |
+
try:
|
| 288 |
+
# 2. Load model file as usual
|
| 289 |
+
model_file = hf_hub_download(
|
| 290 |
+
pretrained_model_name_or_path,
|
| 291 |
+
filename=weights_name,
|
| 292 |
+
cache_dir=cache_dir,
|
| 293 |
+
force_download=force_download,
|
| 294 |
+
proxies=proxies,
|
| 295 |
+
local_files_only=local_files_only,
|
| 296 |
+
token=token,
|
| 297 |
+
user_agent=user_agent,
|
| 298 |
+
subfolder=subfolder,
|
| 299 |
+
revision=revision or commit_hash,
|
| 300 |
+
)
|
| 301 |
+
return model_file
|
| 302 |
+
|
| 303 |
+
except RepositoryNotFoundError as e:
|
| 304 |
+
raise EnvironmentError(
|
| 305 |
+
f"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier "
|
| 306 |
+
"listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a "
|
| 307 |
+
"token having permission to this repo with `token` or log in with `hf auth login`."
|
| 308 |
+
) from e
|
| 309 |
+
except RevisionNotFoundError as e:
|
| 310 |
+
raise EnvironmentError(
|
| 311 |
+
f"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for "
|
| 312 |
+
"this model name. Check the model page at "
|
| 313 |
+
f"'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions."
|
| 314 |
+
) from e
|
| 315 |
+
except EntryNotFoundError as e:
|
| 316 |
+
raise EnvironmentError(
|
| 317 |
+
f"{pretrained_model_name_or_path} does not appear to have a file named {weights_name}."
|
| 318 |
+
) from e
|
| 319 |
+
except HTTPError as e:
|
| 320 |
+
raise EnvironmentError(
|
| 321 |
+
f"There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{e}"
|
| 322 |
+
) from e
|
| 323 |
+
except ValueError as e:
|
| 324 |
+
raise EnvironmentError(
|
| 325 |
+
f"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it"
|
| 326 |
+
f" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a"
|
| 327 |
+
f" directory containing a file named {weights_name} or"
|
| 328 |
+
" \nCheckout your internet connection or see how to run the library in"
|
| 329 |
+
" offline mode at 'https://huggingface.co/docs/diffusers/installation#offline-mode'."
|
| 330 |
+
) from e
|
| 331 |
+
except EnvironmentError as e:
|
| 332 |
+
raise EnvironmentError(
|
| 333 |
+
f"Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from "
|
| 334 |
+
"'https://huggingface.co/models', make sure you don't have a local directory with the same name. "
|
| 335 |
+
f"Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory "
|
| 336 |
+
f"containing a file named {weights_name}"
|
| 337 |
+
) from e
|
| 338 |
+
|
| 339 |
+
|
| 340 |
+
def _get_checkpoint_shard_files(
|
| 341 |
+
pretrained_model_name_or_path,
|
| 342 |
+
index_filename,
|
| 343 |
+
cache_dir=None,
|
| 344 |
+
proxies=None,
|
| 345 |
+
local_files_only=False,
|
| 346 |
+
token=None,
|
| 347 |
+
user_agent=None,
|
| 348 |
+
revision=None,
|
| 349 |
+
subfolder="",
|
| 350 |
+
dduf_entries: Optional[Dict[str, DDUFEntry]] = None,
|
| 351 |
+
):
|
| 352 |
+
"""
|
| 353 |
+
For a given model:
|
| 354 |
+
|
| 355 |
+
- download and cache all the shards of a sharded checkpoint if `pretrained_model_name_or_path` is a model ID on the
|
| 356 |
+
Hub
|
| 357 |
+
- returns the list of paths to all the shards, as well as some metadata.
|
| 358 |
+
|
| 359 |
+
For the description of each arg, see [`PreTrainedModel.from_pretrained`]. `index_filename` is the full path to the
|
| 360 |
+
index (downloaded and cached if `pretrained_model_name_or_path` is a model ID on the Hub).
|
| 361 |
+
"""
|
| 362 |
+
if dduf_entries:
|
| 363 |
+
if index_filename not in dduf_entries:
|
| 364 |
+
raise ValueError(f"Can't find a checkpoint index ({index_filename}) in {pretrained_model_name_or_path}.")
|
| 365 |
+
else:
|
| 366 |
+
if not os.path.isfile(index_filename):
|
| 367 |
+
raise ValueError(f"Can't find a checkpoint index ({index_filename}) in {pretrained_model_name_or_path}.")
|
| 368 |
+
|
| 369 |
+
if dduf_entries:
|
| 370 |
+
index = json.loads(dduf_entries[index_filename].read_text())
|
| 371 |
+
else:
|
| 372 |
+
with open(index_filename, "r") as f:
|
| 373 |
+
index = json.loads(f.read())
|
| 374 |
+
|
| 375 |
+
original_shard_filenames = sorted(set(index["weight_map"].values()))
|
| 376 |
+
sharded_metadata = index["metadata"]
|
| 377 |
+
sharded_metadata["all_checkpoint_keys"] = list(index["weight_map"].keys())
|
| 378 |
+
sharded_metadata["weight_map"] = index["weight_map"].copy()
|
| 379 |
+
shards_path = os.path.join(pretrained_model_name_or_path, subfolder)
|
| 380 |
+
|
| 381 |
+
# First, let's deal with local folder.
|
| 382 |
+
if os.path.isdir(pretrained_model_name_or_path) or dduf_entries:
|
| 383 |
+
shard_filenames = [os.path.join(shards_path, f) for f in original_shard_filenames]
|
| 384 |
+
for shard_file in shard_filenames:
|
| 385 |
+
if dduf_entries:
|
| 386 |
+
if shard_file not in dduf_entries:
|
| 387 |
+
raise FileNotFoundError(
|
| 388 |
+
f"{shards_path} does not appear to have a file named {shard_file} which is "
|
| 389 |
+
"required according to the checkpoint index."
|
| 390 |
+
)
|
| 391 |
+
else:
|
| 392 |
+
if not os.path.exists(shard_file):
|
| 393 |
+
raise FileNotFoundError(
|
| 394 |
+
f"{shards_path} does not appear to have a file named {shard_file} which is "
|
| 395 |
+
"required according to the checkpoint index."
|
| 396 |
+
)
|
| 397 |
+
return shard_filenames, sharded_metadata
|
| 398 |
+
|
| 399 |
+
# At this stage pretrained_model_name_or_path is a model identifier on the Hub
|
| 400 |
+
allow_patterns = original_shard_filenames
|
| 401 |
+
if subfolder is not None:
|
| 402 |
+
allow_patterns = [os.path.join(subfolder, p) for p in allow_patterns]
|
| 403 |
+
|
| 404 |
+
ignore_patterns = ["*.json", "*.md"]
|
| 405 |
+
|
| 406 |
+
# If the repo doesn't have the required shards, error out early even before downloading anything.
|
| 407 |
+
if not local_files_only:
|
| 408 |
+
model_files_info = model_info(pretrained_model_name_or_path, revision=revision, token=token)
|
| 409 |
+
for shard_file in original_shard_filenames:
|
| 410 |
+
shard_file_present = any(shard_file in k.rfilename for k in model_files_info.siblings)
|
| 411 |
+
if not shard_file_present:
|
| 412 |
+
raise EnvironmentError(
|
| 413 |
+
f"{shards_path} does not appear to have a file named {shard_file} which is "
|
| 414 |
+
"required according to the checkpoint index."
|
| 415 |
+
)
|
| 416 |
+
|
| 417 |
+
try:
|
| 418 |
+
# Load from URL
|
| 419 |
+
cached_folder = snapshot_download(
|
| 420 |
+
pretrained_model_name_or_path,
|
| 421 |
+
cache_dir=cache_dir,
|
| 422 |
+
proxies=proxies,
|
| 423 |
+
local_files_only=local_files_only,
|
| 424 |
+
token=token,
|
| 425 |
+
revision=revision,
|
| 426 |
+
allow_patterns=allow_patterns,
|
| 427 |
+
ignore_patterns=ignore_patterns,
|
| 428 |
+
user_agent=user_agent,
|
| 429 |
+
)
|
| 430 |
+
if subfolder is not None:
|
| 431 |
+
cached_folder = os.path.join(cached_folder, subfolder)
|
| 432 |
+
|
| 433 |
+
# We have already dealt with RepositoryNotFoundError and RevisionNotFoundError when getting the index, so
|
| 434 |
+
# we don't have to catch them here. We have also dealt with EntryNotFoundError.
|
| 435 |
+
except HTTPError as e:
|
| 436 |
+
raise EnvironmentError(
|
| 437 |
+
f"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load {pretrained_model_name_or_path}. You should try"
|
| 438 |
+
" again after checking your internet connection."
|
| 439 |
+
) from e
|
| 440 |
+
|
| 441 |
+
cached_filenames = [os.path.join(cached_folder, f) for f in original_shard_filenames]
|
| 442 |
+
for cached_file in cached_filenames:
|
| 443 |
+
if not os.path.isfile(cached_file):
|
| 444 |
+
raise EnvironmentError(
|
| 445 |
+
f"{cached_folder} does not have a file named {cached_file} which is required according to the checkpoint index."
|
| 446 |
+
)
|
| 447 |
+
|
| 448 |
+
return cached_filenames, sharded_metadata
|
| 449 |
+
|
| 450 |
+
|
| 451 |
+
def _check_legacy_sharding_variant_format(folder: str = None, filenames: List[str] = None, variant: str = None):
|
| 452 |
+
if filenames and folder:
|
| 453 |
+
raise ValueError("Both `filenames` and `folder` cannot be provided.")
|
| 454 |
+
if not filenames:
|
| 455 |
+
filenames = []
|
| 456 |
+
for _, _, files in os.walk(folder):
|
| 457 |
+
for file in files:
|
| 458 |
+
filenames.append(os.path.basename(file))
|
| 459 |
+
transformers_index_format = r"\d{5}-of-\d{5}"
|
| 460 |
+
variant_file_re = re.compile(rf".*-{transformers_index_format}\.{variant}\.[a-z]+$")
|
| 461 |
+
return any(variant_file_re.match(f) is not None for f in filenames)
|
| 462 |
+
|
| 463 |
+
|
| 464 |
+
class PushToHubMixin:
|
| 465 |
+
"""
|
| 466 |
+
A Mixin to push a model, scheduler, or pipeline to the Hugging Face Hub.
|
| 467 |
+
"""
|
| 468 |
+
|
| 469 |
+
def _upload_folder(
|
| 470 |
+
self,
|
| 471 |
+
working_dir: Union[str, os.PathLike],
|
| 472 |
+
repo_id: str,
|
| 473 |
+
token: Optional[str] = None,
|
| 474 |
+
commit_message: Optional[str] = None,
|
| 475 |
+
create_pr: bool = False,
|
| 476 |
+
subfolder: Optional[str] = None,
|
| 477 |
+
):
|
| 478 |
+
"""
|
| 479 |
+
Uploads all files in `working_dir` to `repo_id`.
|
| 480 |
+
"""
|
| 481 |
+
if commit_message is None:
|
| 482 |
+
if "Model" in self.__class__.__name__:
|
| 483 |
+
commit_message = "Upload model"
|
| 484 |
+
elif "Scheduler" in self.__class__.__name__:
|
| 485 |
+
commit_message = "Upload scheduler"
|
| 486 |
+
else:
|
| 487 |
+
commit_message = f"Upload {self.__class__.__name__}"
|
| 488 |
+
|
| 489 |
+
logger.info(f"Uploading the files of {working_dir} to {repo_id}.")
|
| 490 |
+
return upload_folder(
|
| 491 |
+
repo_id=repo_id,
|
| 492 |
+
folder_path=working_dir,
|
| 493 |
+
token=token,
|
| 494 |
+
commit_message=commit_message,
|
| 495 |
+
create_pr=create_pr,
|
| 496 |
+
path_in_repo=subfolder,
|
| 497 |
+
)
|
| 498 |
+
|
| 499 |
+
def push_to_hub(
|
| 500 |
+
self,
|
| 501 |
+
repo_id: str,
|
| 502 |
+
commit_message: Optional[str] = None,
|
| 503 |
+
private: Optional[bool] = None,
|
| 504 |
+
token: Optional[str] = None,
|
| 505 |
+
create_pr: bool = False,
|
| 506 |
+
safe_serialization: bool = True,
|
| 507 |
+
variant: Optional[str] = None,
|
| 508 |
+
subfolder: Optional[str] = None,
|
| 509 |
+
) -> str:
|
| 510 |
+
"""
|
| 511 |
+
Upload model, scheduler, or pipeline files to the 🤗 Hugging Face Hub.
|
| 512 |
+
|
| 513 |
+
Parameters:
|
| 514 |
+
repo_id (`str`):
|
| 515 |
+
The name of the repository you want to push your model, scheduler, or pipeline files to. It should
|
| 516 |
+
contain your organization name when pushing to an organization. `repo_id` can also be a path to a local
|
| 517 |
+
directory.
|
| 518 |
+
commit_message (`str`, *optional*):
|
| 519 |
+
Message to commit while pushing. Default to `"Upload {object}"`.
|
| 520 |
+
private (`bool`, *optional*):
|
| 521 |
+
Whether to make the repo private. If `None` (default), the repo will be public unless the
|
| 522 |
+
organization's default is private. This value is ignored if the repo already exists.
|
| 523 |
+
token (`str`, *optional*):
|
| 524 |
+
The token to use as HTTP bearer authorization for remote files. The token generated when running `hf
|
| 525 |
+
auth login` (stored in `~/.huggingface`).
|
| 526 |
+
create_pr (`bool`, *optional*, defaults to `False`):
|
| 527 |
+
Whether or not to create a PR with the uploaded files or directly commit.
|
| 528 |
+
safe_serialization (`bool`, *optional*, defaults to `True`):
|
| 529 |
+
Whether or not to convert the model weights to the `safetensors` format.
|
| 530 |
+
variant (`str`, *optional*):
|
| 531 |
+
If specified, weights are saved in the format `pytorch_model.<variant>.bin`.
|
| 532 |
+
|
| 533 |
+
Examples:
|
| 534 |
+
|
| 535 |
+
```python
|
| 536 |
+
from diffusers import UNet2DConditionModel
|
| 537 |
+
|
| 538 |
+
unet = UNet2DConditionModel.from_pretrained("stabilityai/stable-diffusion-2", subfolder="unet")
|
| 539 |
+
|
| 540 |
+
# Push the `unet` to your namespace with the name "my-finetuned-unet".
|
| 541 |
+
unet.push_to_hub("my-finetuned-unet")
|
| 542 |
+
|
| 543 |
+
# Push the `unet` to an organization with the name "my-finetuned-unet".
|
| 544 |
+
unet.push_to_hub("your-org/my-finetuned-unet")
|
| 545 |
+
```
|
| 546 |
+
"""
|
| 547 |
+
repo_id = create_repo(repo_id, private=private, token=token, exist_ok=True).repo_id
|
| 548 |
+
|
| 549 |
+
# Create a new empty model card and eventually tag it
|
| 550 |
+
if not subfolder:
|
| 551 |
+
model_card = load_or_create_model_card(repo_id, token=token)
|
| 552 |
+
model_card = populate_model_card(model_card)
|
| 553 |
+
|
| 554 |
+
# Save all files.
|
| 555 |
+
save_kwargs = {"safe_serialization": safe_serialization}
|
| 556 |
+
if "Scheduler" not in self.__class__.__name__:
|
| 557 |
+
save_kwargs.update({"variant": variant})
|
| 558 |
+
|
| 559 |
+
with tempfile.TemporaryDirectory() as tmpdir:
|
| 560 |
+
self.save_pretrained(tmpdir, **save_kwargs)
|
| 561 |
+
|
| 562 |
+
# Update model card if needed:
|
| 563 |
+
if not subfolder:
|
| 564 |
+
model_card.save(os.path.join(tmpdir, "README.md"))
|
| 565 |
+
|
| 566 |
+
return self._upload_folder(
|
| 567 |
+
tmpdir,
|
| 568 |
+
repo_id,
|
| 569 |
+
token=token,
|
| 570 |
+
commit_message=commit_message,
|
| 571 |
+
create_pr=create_pr,
|
| 572 |
+
subfolder=subfolder,
|
| 573 |
+
)
|
exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/utils/import_utils.py
ADDED
|
@@ -0,0 +1,962 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2025 The HuggingFace Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
"""
|
| 15 |
+
Import utilities: Utilities related to imports and our lazy inits.
|
| 16 |
+
"""
|
| 17 |
+
|
| 18 |
+
import importlib.util
|
| 19 |
+
import inspect
|
| 20 |
+
import operator as op
|
| 21 |
+
import os
|
| 22 |
+
import sys
|
| 23 |
+
from collections import OrderedDict, defaultdict
|
| 24 |
+
from itertools import chain
|
| 25 |
+
from types import ModuleType
|
| 26 |
+
from typing import Any, Tuple, Union
|
| 27 |
+
|
| 28 |
+
from huggingface_hub.utils import is_jinja_available # noqa: F401
|
| 29 |
+
from packaging.version import Version, parse
|
| 30 |
+
|
| 31 |
+
from . import logging
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
# The package importlib_metadata is in a different place, depending on the python version.
|
| 35 |
+
if sys.version_info < (3, 8):
|
| 36 |
+
import importlib_metadata
|
| 37 |
+
else:
|
| 38 |
+
import importlib.metadata as importlib_metadata
|
| 39 |
+
try:
|
| 40 |
+
_package_map = importlib_metadata.packages_distributions() # load-once to avoid expensive calls
|
| 41 |
+
except Exception:
|
| 42 |
+
_package_map = None
|
| 43 |
+
|
| 44 |
+
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
| 45 |
+
|
| 46 |
+
ENV_VARS_TRUE_VALUES = {"1", "ON", "YES", "TRUE"}
|
| 47 |
+
ENV_VARS_TRUE_AND_AUTO_VALUES = ENV_VARS_TRUE_VALUES.union({"AUTO"})
|
| 48 |
+
|
| 49 |
+
USE_TF = os.environ.get("USE_TF", "AUTO").upper()
|
| 50 |
+
USE_TORCH = os.environ.get("USE_TORCH", "AUTO").upper()
|
| 51 |
+
USE_JAX = os.environ.get("USE_FLAX", "AUTO").upper()
|
| 52 |
+
USE_SAFETENSORS = os.environ.get("USE_SAFETENSORS", "AUTO").upper()
|
| 53 |
+
DIFFUSERS_SLOW_IMPORT = os.environ.get("DIFFUSERS_SLOW_IMPORT", "FALSE").upper()
|
| 54 |
+
DIFFUSERS_SLOW_IMPORT = DIFFUSERS_SLOW_IMPORT in ENV_VARS_TRUE_VALUES
|
| 55 |
+
|
| 56 |
+
STR_OPERATION_TO_FUNC = {">": op.gt, ">=": op.ge, "==": op.eq, "!=": op.ne, "<=": op.le, "<": op.lt}
|
| 57 |
+
|
| 58 |
+
_is_google_colab = "google.colab" in sys.modules or any(k.startswith("COLAB_") for k in os.environ)
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
def _is_package_available(pkg_name: str, get_dist_name: bool = False) -> Tuple[bool, str]:
|
| 62 |
+
global _package_map
|
| 63 |
+
pkg_exists = importlib.util.find_spec(pkg_name) is not None
|
| 64 |
+
pkg_version = "N/A"
|
| 65 |
+
|
| 66 |
+
if pkg_exists:
|
| 67 |
+
if _package_map is None:
|
| 68 |
+
_package_map = defaultdict(list)
|
| 69 |
+
try:
|
| 70 |
+
# Fallback for Python < 3.10
|
| 71 |
+
for dist in importlib_metadata.distributions():
|
| 72 |
+
_top_level_declared = (dist.read_text("top_level.txt") or "").split()
|
| 73 |
+
# Infer top-level package names from file structure
|
| 74 |
+
_inferred_opt_names = {
|
| 75 |
+
f.parts[0] if len(f.parts) > 1 else inspect.getmodulename(f) for f in (dist.files or [])
|
| 76 |
+
} - {None}
|
| 77 |
+
_top_level_inferred = filter(lambda name: "." not in name, _inferred_opt_names)
|
| 78 |
+
for pkg in _top_level_declared or _top_level_inferred:
|
| 79 |
+
_package_map[pkg].append(dist.metadata["Name"])
|
| 80 |
+
except Exception as _:
|
| 81 |
+
pass
|
| 82 |
+
try:
|
| 83 |
+
if get_dist_name and pkg_name in _package_map and _package_map[pkg_name]:
|
| 84 |
+
if len(_package_map[pkg_name]) > 1:
|
| 85 |
+
logger.warning(
|
| 86 |
+
f"Multiple distributions found for package {pkg_name}. Picked distribution: {_package_map[pkg_name][0]}"
|
| 87 |
+
)
|
| 88 |
+
pkg_name = _package_map[pkg_name][0]
|
| 89 |
+
pkg_version = importlib_metadata.version(pkg_name)
|
| 90 |
+
logger.debug(f"Successfully imported {pkg_name} version {pkg_version}")
|
| 91 |
+
except (ImportError, importlib_metadata.PackageNotFoundError):
|
| 92 |
+
pkg_exists = False
|
| 93 |
+
|
| 94 |
+
return pkg_exists, pkg_version
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
if USE_TORCH in ENV_VARS_TRUE_AND_AUTO_VALUES and USE_TF not in ENV_VARS_TRUE_VALUES:
|
| 98 |
+
_torch_available, _torch_version = _is_package_available("torch")
|
| 99 |
+
|
| 100 |
+
else:
|
| 101 |
+
logger.info("Disabling PyTorch because USE_TORCH is set")
|
| 102 |
+
_torch_available = False
|
| 103 |
+
_torch_version = "N/A"
|
| 104 |
+
|
| 105 |
+
_jax_version = "N/A"
|
| 106 |
+
_flax_version = "N/A"
|
| 107 |
+
if USE_JAX in ENV_VARS_TRUE_AND_AUTO_VALUES:
|
| 108 |
+
_flax_available = importlib.util.find_spec("jax") is not None and importlib.util.find_spec("flax") is not None
|
| 109 |
+
if _flax_available:
|
| 110 |
+
try:
|
| 111 |
+
_jax_version = importlib_metadata.version("jax")
|
| 112 |
+
_flax_version = importlib_metadata.version("flax")
|
| 113 |
+
logger.info(f"JAX version {_jax_version}, Flax version {_flax_version} available.")
|
| 114 |
+
except importlib_metadata.PackageNotFoundError:
|
| 115 |
+
_flax_available = False
|
| 116 |
+
else:
|
| 117 |
+
_flax_available = False
|
| 118 |
+
|
| 119 |
+
if USE_SAFETENSORS in ENV_VARS_TRUE_AND_AUTO_VALUES:
|
| 120 |
+
_safetensors_available, _safetensors_version = _is_package_available("safetensors")
|
| 121 |
+
|
| 122 |
+
else:
|
| 123 |
+
logger.info("Disabling Safetensors because USE_SAFETENSORS is set")
|
| 124 |
+
_safetensors_available = False
|
| 125 |
+
|
| 126 |
+
_onnxruntime_version = "N/A"
|
| 127 |
+
_onnx_available = importlib.util.find_spec("onnxruntime") is not None
|
| 128 |
+
if _onnx_available:
|
| 129 |
+
candidates = (
|
| 130 |
+
"onnxruntime",
|
| 131 |
+
"onnxruntime-cann",
|
| 132 |
+
"onnxruntime-directml",
|
| 133 |
+
"ort_nightly_directml",
|
| 134 |
+
"onnxruntime-gpu",
|
| 135 |
+
"ort_nightly_gpu",
|
| 136 |
+
"onnxruntime-migraphx",
|
| 137 |
+
"onnxruntime-openvino",
|
| 138 |
+
"onnxruntime-qnn",
|
| 139 |
+
"onnxruntime-rocm",
|
| 140 |
+
"onnxruntime-training",
|
| 141 |
+
"onnxruntime-vitisai",
|
| 142 |
+
)
|
| 143 |
+
_onnxruntime_version = None
|
| 144 |
+
# For the metadata, we have to look for both onnxruntime and onnxruntime-x
|
| 145 |
+
for pkg in candidates:
|
| 146 |
+
try:
|
| 147 |
+
_onnxruntime_version = importlib_metadata.version(pkg)
|
| 148 |
+
break
|
| 149 |
+
except importlib_metadata.PackageNotFoundError:
|
| 150 |
+
pass
|
| 151 |
+
_onnx_available = _onnxruntime_version is not None
|
| 152 |
+
if _onnx_available:
|
| 153 |
+
logger.debug(f"Successfully imported onnxruntime version {_onnxruntime_version}")
|
| 154 |
+
|
| 155 |
+
# (sayakpaul): importlib.util.find_spec("opencv-python") returns None even when it's installed.
|
| 156 |
+
# _opencv_available = importlib.util.find_spec("opencv-python") is not None
|
| 157 |
+
try:
|
| 158 |
+
candidates = (
|
| 159 |
+
"opencv-python",
|
| 160 |
+
"opencv-contrib-python",
|
| 161 |
+
"opencv-python-headless",
|
| 162 |
+
"opencv-contrib-python-headless",
|
| 163 |
+
)
|
| 164 |
+
_opencv_version = None
|
| 165 |
+
for pkg in candidates:
|
| 166 |
+
try:
|
| 167 |
+
_opencv_version = importlib_metadata.version(pkg)
|
| 168 |
+
break
|
| 169 |
+
except importlib_metadata.PackageNotFoundError:
|
| 170 |
+
pass
|
| 171 |
+
_opencv_available = _opencv_version is not None
|
| 172 |
+
if _opencv_available:
|
| 173 |
+
logger.debug(f"Successfully imported cv2 version {_opencv_version}")
|
| 174 |
+
except importlib_metadata.PackageNotFoundError:
|
| 175 |
+
_opencv_available = False
|
| 176 |
+
|
| 177 |
+
_bs4_available = importlib.util.find_spec("bs4") is not None
|
| 178 |
+
try:
|
| 179 |
+
# importlib metadata under different name
|
| 180 |
+
_bs4_version = importlib_metadata.version("beautifulsoup4")
|
| 181 |
+
logger.debug(f"Successfully imported ftfy version {_bs4_version}")
|
| 182 |
+
except importlib_metadata.PackageNotFoundError:
|
| 183 |
+
_bs4_available = False
|
| 184 |
+
|
| 185 |
+
_invisible_watermark_available = importlib.util.find_spec("imwatermark") is not None
|
| 186 |
+
try:
|
| 187 |
+
_invisible_watermark_version = importlib_metadata.version("invisible-watermark")
|
| 188 |
+
logger.debug(f"Successfully imported invisible-watermark version {_invisible_watermark_version}")
|
| 189 |
+
except importlib_metadata.PackageNotFoundError:
|
| 190 |
+
_invisible_watermark_available = False
|
| 191 |
+
|
| 192 |
+
_torch_xla_available, _torch_xla_version = _is_package_available("torch_xla")
|
| 193 |
+
_torch_npu_available, _torch_npu_version = _is_package_available("torch_npu")
|
| 194 |
+
_transformers_available, _transformers_version = _is_package_available("transformers")
|
| 195 |
+
_hf_hub_available, _hf_hub_version = _is_package_available("huggingface_hub")
|
| 196 |
+
_kernels_available, _kernels_version = _is_package_available("kernels")
|
| 197 |
+
_inflect_available, _inflect_version = _is_package_available("inflect")
|
| 198 |
+
_unidecode_available, _unidecode_version = _is_package_available("unidecode")
|
| 199 |
+
_k_diffusion_available, _k_diffusion_version = _is_package_available("k_diffusion")
|
| 200 |
+
_note_seq_available, _note_seq_version = _is_package_available("note_seq")
|
| 201 |
+
_wandb_available, _wandb_version = _is_package_available("wandb")
|
| 202 |
+
_tensorboard_available, _tensorboard_version = _is_package_available("tensorboard")
|
| 203 |
+
_compel_available, _compel_version = _is_package_available("compel")
|
| 204 |
+
_sentencepiece_available, _sentencepiece_version = _is_package_available("sentencepiece")
|
| 205 |
+
_torchsde_available, _torchsde_version = _is_package_available("torchsde")
|
| 206 |
+
_peft_available, _peft_version = _is_package_available("peft")
|
| 207 |
+
_torchvision_available, _torchvision_version = _is_package_available("torchvision")
|
| 208 |
+
_matplotlib_available, _matplotlib_version = _is_package_available("matplotlib")
|
| 209 |
+
_timm_available, _timm_version = _is_package_available("timm")
|
| 210 |
+
_bitsandbytes_available, _bitsandbytes_version = _is_package_available("bitsandbytes")
|
| 211 |
+
_imageio_available, _imageio_version = _is_package_available("imageio")
|
| 212 |
+
_ftfy_available, _ftfy_version = _is_package_available("ftfy")
|
| 213 |
+
_scipy_available, _scipy_version = _is_package_available("scipy")
|
| 214 |
+
_librosa_available, _librosa_version = _is_package_available("librosa")
|
| 215 |
+
_accelerate_available, _accelerate_version = _is_package_available("accelerate")
|
| 216 |
+
_xformers_available, _xformers_version = _is_package_available("xformers")
|
| 217 |
+
_gguf_available, _gguf_version = _is_package_available("gguf")
|
| 218 |
+
_torchao_available, _torchao_version = _is_package_available("torchao")
|
| 219 |
+
_bitsandbytes_available, _bitsandbytes_version = _is_package_available("bitsandbytes")
|
| 220 |
+
_optimum_quanto_available, _optimum_quanto_version = _is_package_available("optimum", get_dist_name=True)
|
| 221 |
+
_pytorch_retinaface_available, _pytorch_retinaface_version = _is_package_available("pytorch_retinaface")
|
| 222 |
+
_better_profanity_available, _better_profanity_version = _is_package_available("better_profanity")
|
| 223 |
+
_nltk_available, _nltk_version = _is_package_available("nltk")
|
| 224 |
+
_cosmos_guardrail_available, _cosmos_guardrail_version = _is_package_available("cosmos_guardrail")
|
| 225 |
+
_sageattention_available, _sageattention_version = _is_package_available("sageattention")
|
| 226 |
+
_flash_attn_available, _flash_attn_version = _is_package_available("flash_attn")
|
| 227 |
+
_flash_attn_3_available, _flash_attn_3_version = _is_package_available("flash_attn_3")
|
| 228 |
+
_kornia_available, _kornia_version = _is_package_available("kornia")
|
| 229 |
+
|
| 230 |
+
|
| 231 |
+
def is_torch_available():
|
| 232 |
+
return _torch_available
|
| 233 |
+
|
| 234 |
+
|
| 235 |
+
def is_torch_xla_available():
|
| 236 |
+
return _torch_xla_available
|
| 237 |
+
|
| 238 |
+
|
| 239 |
+
def is_torch_npu_available():
|
| 240 |
+
return _torch_npu_available
|
| 241 |
+
|
| 242 |
+
|
| 243 |
+
def is_flax_available():
|
| 244 |
+
return _flax_available
|
| 245 |
+
|
| 246 |
+
|
| 247 |
+
def is_transformers_available():
|
| 248 |
+
return _transformers_available
|
| 249 |
+
|
| 250 |
+
|
| 251 |
+
def is_inflect_available():
|
| 252 |
+
return _inflect_available
|
| 253 |
+
|
| 254 |
+
|
| 255 |
+
def is_unidecode_available():
|
| 256 |
+
return _unidecode_available
|
| 257 |
+
|
| 258 |
+
|
| 259 |
+
def is_onnx_available():
|
| 260 |
+
return _onnx_available
|
| 261 |
+
|
| 262 |
+
|
| 263 |
+
def is_opencv_available():
|
| 264 |
+
return _opencv_available
|
| 265 |
+
|
| 266 |
+
|
| 267 |
+
def is_scipy_available():
|
| 268 |
+
return _scipy_available
|
| 269 |
+
|
| 270 |
+
|
| 271 |
+
def is_librosa_available():
|
| 272 |
+
return _librosa_available
|
| 273 |
+
|
| 274 |
+
|
| 275 |
+
def is_xformers_available():
|
| 276 |
+
return _xformers_available
|
| 277 |
+
|
| 278 |
+
|
| 279 |
+
def is_accelerate_available():
|
| 280 |
+
return _accelerate_available
|
| 281 |
+
|
| 282 |
+
|
| 283 |
+
def is_kernels_available():
|
| 284 |
+
return _kernels_available
|
| 285 |
+
|
| 286 |
+
|
| 287 |
+
def is_k_diffusion_available():
|
| 288 |
+
return _k_diffusion_available
|
| 289 |
+
|
| 290 |
+
|
| 291 |
+
def is_note_seq_available():
|
| 292 |
+
return _note_seq_available
|
| 293 |
+
|
| 294 |
+
|
| 295 |
+
def is_wandb_available():
|
| 296 |
+
return _wandb_available
|
| 297 |
+
|
| 298 |
+
|
| 299 |
+
def is_tensorboard_available():
|
| 300 |
+
return _tensorboard_available
|
| 301 |
+
|
| 302 |
+
|
| 303 |
+
def is_compel_available():
|
| 304 |
+
return _compel_available
|
| 305 |
+
|
| 306 |
+
|
| 307 |
+
def is_ftfy_available():
|
| 308 |
+
return _ftfy_available
|
| 309 |
+
|
| 310 |
+
|
| 311 |
+
def is_bs4_available():
|
| 312 |
+
return _bs4_available
|
| 313 |
+
|
| 314 |
+
|
| 315 |
+
def is_torchsde_available():
|
| 316 |
+
return _torchsde_available
|
| 317 |
+
|
| 318 |
+
|
| 319 |
+
def is_invisible_watermark_available():
|
| 320 |
+
return _invisible_watermark_available
|
| 321 |
+
|
| 322 |
+
|
| 323 |
+
def is_peft_available():
|
| 324 |
+
return _peft_available
|
| 325 |
+
|
| 326 |
+
|
| 327 |
+
def is_torchvision_available():
|
| 328 |
+
return _torchvision_available
|
| 329 |
+
|
| 330 |
+
|
| 331 |
+
def is_matplotlib_available():
|
| 332 |
+
return _matplotlib_available
|
| 333 |
+
|
| 334 |
+
|
| 335 |
+
def is_safetensors_available():
|
| 336 |
+
return _safetensors_available
|
| 337 |
+
|
| 338 |
+
|
| 339 |
+
def is_bitsandbytes_available():
|
| 340 |
+
return _bitsandbytes_available
|
| 341 |
+
|
| 342 |
+
|
| 343 |
+
def is_google_colab():
|
| 344 |
+
return _is_google_colab
|
| 345 |
+
|
| 346 |
+
|
| 347 |
+
def is_sentencepiece_available():
|
| 348 |
+
return _sentencepiece_available
|
| 349 |
+
|
| 350 |
+
|
| 351 |
+
def is_imageio_available():
|
| 352 |
+
return _imageio_available
|
| 353 |
+
|
| 354 |
+
|
| 355 |
+
def is_gguf_available():
|
| 356 |
+
return _gguf_available
|
| 357 |
+
|
| 358 |
+
|
| 359 |
+
def is_torchao_available():
|
| 360 |
+
return _torchao_available
|
| 361 |
+
|
| 362 |
+
|
| 363 |
+
def is_optimum_quanto_available():
|
| 364 |
+
return _optimum_quanto_available
|
| 365 |
+
|
| 366 |
+
|
| 367 |
+
def is_timm_available():
|
| 368 |
+
return _timm_available
|
| 369 |
+
|
| 370 |
+
|
| 371 |
+
def is_pytorch_retinaface_available():
|
| 372 |
+
return _pytorch_retinaface_available
|
| 373 |
+
|
| 374 |
+
|
| 375 |
+
def is_better_profanity_available():
|
| 376 |
+
return _better_profanity_available
|
| 377 |
+
|
| 378 |
+
|
| 379 |
+
def is_nltk_available():
|
| 380 |
+
return _nltk_available
|
| 381 |
+
|
| 382 |
+
|
| 383 |
+
def is_cosmos_guardrail_available():
|
| 384 |
+
return _cosmos_guardrail_available
|
| 385 |
+
|
| 386 |
+
|
| 387 |
+
def is_hpu_available():
|
| 388 |
+
return all(importlib.util.find_spec(lib) for lib in ("habana_frameworks", "habana_frameworks.torch"))
|
| 389 |
+
|
| 390 |
+
|
| 391 |
+
def is_sageattention_available():
|
| 392 |
+
return _sageattention_available
|
| 393 |
+
|
| 394 |
+
|
| 395 |
+
def is_flash_attn_available():
|
| 396 |
+
return _flash_attn_available
|
| 397 |
+
|
| 398 |
+
|
| 399 |
+
def is_flash_attn_3_available():
|
| 400 |
+
return _flash_attn_3_available
|
| 401 |
+
|
| 402 |
+
|
| 403 |
+
def is_kornia_available():
|
| 404 |
+
return _kornia_available
|
| 405 |
+
|
| 406 |
+
|
| 407 |
+
# docstyle-ignore
|
| 408 |
+
FLAX_IMPORT_ERROR = """
|
| 409 |
+
{0} requires the FLAX library but it was not found in your environment. Checkout the instructions on the
|
| 410 |
+
installation page: https://github.com/google/flax and follow the ones that match your environment.
|
| 411 |
+
"""
|
| 412 |
+
|
| 413 |
+
# docstyle-ignore
|
| 414 |
+
INFLECT_IMPORT_ERROR = """
|
| 415 |
+
{0} requires the inflect library but it was not found in your environment. You can install it with pip: `pip install
|
| 416 |
+
inflect`
|
| 417 |
+
"""
|
| 418 |
+
|
| 419 |
+
# docstyle-ignore
|
| 420 |
+
PYTORCH_IMPORT_ERROR = """
|
| 421 |
+
{0} requires the PyTorch library but it was not found in your environment. Checkout the instructions on the
|
| 422 |
+
installation page: https://pytorch.org/get-started/locally/ and follow the ones that match your environment.
|
| 423 |
+
"""
|
| 424 |
+
|
| 425 |
+
# docstyle-ignore
|
| 426 |
+
ONNX_IMPORT_ERROR = """
|
| 427 |
+
{0} requires the onnxruntime library but it was not found in your environment. You can install it with pip: `pip
|
| 428 |
+
install onnxruntime`
|
| 429 |
+
"""
|
| 430 |
+
|
| 431 |
+
# docstyle-ignore
|
| 432 |
+
OPENCV_IMPORT_ERROR = """
|
| 433 |
+
{0} requires the OpenCV library but it was not found in your environment. You can install it with pip: `pip
|
| 434 |
+
install opencv-python`
|
| 435 |
+
"""
|
| 436 |
+
|
| 437 |
+
# docstyle-ignore
|
| 438 |
+
SCIPY_IMPORT_ERROR = """
|
| 439 |
+
{0} requires the scipy library but it was not found in your environment. You can install it with pip: `pip install
|
| 440 |
+
scipy`
|
| 441 |
+
"""
|
| 442 |
+
|
| 443 |
+
# docstyle-ignore
|
| 444 |
+
LIBROSA_IMPORT_ERROR = """
|
| 445 |
+
{0} requires the librosa library but it was not found in your environment. Checkout the instructions on the
|
| 446 |
+
installation page: https://librosa.org/doc/latest/install.html and follow the ones that match your environment.
|
| 447 |
+
"""
|
| 448 |
+
|
| 449 |
+
# docstyle-ignore
|
| 450 |
+
TRANSFORMERS_IMPORT_ERROR = """
|
| 451 |
+
{0} requires the transformers library but it was not found in your environment. You can install it with pip: `pip
|
| 452 |
+
install transformers`
|
| 453 |
+
"""
|
| 454 |
+
|
| 455 |
+
# docstyle-ignore
|
| 456 |
+
UNIDECODE_IMPORT_ERROR = """
|
| 457 |
+
{0} requires the unidecode library but it was not found in your environment. You can install it with pip: `pip install
|
| 458 |
+
Unidecode`
|
| 459 |
+
"""
|
| 460 |
+
|
| 461 |
+
# docstyle-ignore
|
| 462 |
+
K_DIFFUSION_IMPORT_ERROR = """
|
| 463 |
+
{0} requires the k-diffusion library but it was not found in your environment. You can install it with pip: `pip
|
| 464 |
+
install k-diffusion`
|
| 465 |
+
"""
|
| 466 |
+
|
| 467 |
+
# docstyle-ignore
|
| 468 |
+
NOTE_SEQ_IMPORT_ERROR = """
|
| 469 |
+
{0} requires the note-seq library but it was not found in your environment. You can install it with pip: `pip
|
| 470 |
+
install note-seq`
|
| 471 |
+
"""
|
| 472 |
+
|
| 473 |
+
# docstyle-ignore
|
| 474 |
+
WANDB_IMPORT_ERROR = """
|
| 475 |
+
{0} requires the wandb library but it was not found in your environment. You can install it with pip: `pip
|
| 476 |
+
install wandb`
|
| 477 |
+
"""
|
| 478 |
+
|
| 479 |
+
# docstyle-ignore
|
| 480 |
+
TENSORBOARD_IMPORT_ERROR = """
|
| 481 |
+
{0} requires the tensorboard library but it was not found in your environment. You can install it with pip: `pip
|
| 482 |
+
install tensorboard`
|
| 483 |
+
"""
|
| 484 |
+
|
| 485 |
+
|
| 486 |
+
# docstyle-ignore
|
| 487 |
+
COMPEL_IMPORT_ERROR = """
|
| 488 |
+
{0} requires the compel library but it was not found in your environment. You can install it with pip: `pip install compel`
|
| 489 |
+
"""
|
| 490 |
+
|
| 491 |
+
# docstyle-ignore
|
| 492 |
+
BS4_IMPORT_ERROR = """
|
| 493 |
+
{0} requires the Beautiful Soup library but it was not found in your environment. You can install it with pip:
|
| 494 |
+
`pip install beautifulsoup4`. Please note that you may need to restart your runtime after installation.
|
| 495 |
+
"""
|
| 496 |
+
|
| 497 |
+
# docstyle-ignore
|
| 498 |
+
FTFY_IMPORT_ERROR = """
|
| 499 |
+
{0} requires the ftfy library but it was not found in your environment. Checkout the instructions on the
|
| 500 |
+
installation section: https://github.com/rspeer/python-ftfy/tree/master#installing and follow the ones
|
| 501 |
+
that match your environment. Please note that you may need to restart your runtime after installation.
|
| 502 |
+
"""
|
| 503 |
+
|
| 504 |
+
# docstyle-ignore
|
| 505 |
+
TORCHSDE_IMPORT_ERROR = """
|
| 506 |
+
{0} requires the torchsde library but it was not found in your environment. You can install it with pip: `pip install torchsde`
|
| 507 |
+
"""
|
| 508 |
+
|
| 509 |
+
# docstyle-ignore
|
| 510 |
+
INVISIBLE_WATERMARK_IMPORT_ERROR = """
|
| 511 |
+
{0} requires the invisible-watermark library but it was not found in your environment. You can install it with pip: `pip install invisible-watermark>=0.2.0`
|
| 512 |
+
"""
|
| 513 |
+
|
| 514 |
+
# docstyle-ignore
|
| 515 |
+
PEFT_IMPORT_ERROR = """
|
| 516 |
+
{0} requires the peft library but it was not found in your environment. You can install it with pip: `pip install peft`
|
| 517 |
+
"""
|
| 518 |
+
|
| 519 |
+
# docstyle-ignore
|
| 520 |
+
SAFETENSORS_IMPORT_ERROR = """
|
| 521 |
+
{0} requires the safetensors library but it was not found in your environment. You can install it with pip: `pip install safetensors`
|
| 522 |
+
"""
|
| 523 |
+
|
| 524 |
+
# docstyle-ignore
|
| 525 |
+
SENTENCEPIECE_IMPORT_ERROR = """
|
| 526 |
+
{0} requires the sentencepiece library but it was not found in your environment. You can install it with pip: `pip install sentencepiece`
|
| 527 |
+
"""
|
| 528 |
+
|
| 529 |
+
|
| 530 |
+
# docstyle-ignore
|
| 531 |
+
BITSANDBYTES_IMPORT_ERROR = """
|
| 532 |
+
{0} requires the bitsandbytes library but it was not found in your environment. You can install it with pip: `pip install bitsandbytes`
|
| 533 |
+
"""
|
| 534 |
+
|
| 535 |
+
# docstyle-ignore
|
| 536 |
+
IMAGEIO_IMPORT_ERROR = """
|
| 537 |
+
{0} requires the imageio library and ffmpeg but it was not found in your environment. You can install it with pip: `pip install imageio imageio-ffmpeg`
|
| 538 |
+
"""
|
| 539 |
+
|
| 540 |
+
# docstyle-ignore
|
| 541 |
+
GGUF_IMPORT_ERROR = """
|
| 542 |
+
{0} requires the gguf library but it was not found in your environment. You can install it with pip: `pip install gguf`
|
| 543 |
+
"""
|
| 544 |
+
|
| 545 |
+
TORCHAO_IMPORT_ERROR = """
|
| 546 |
+
{0} requires the torchao library but it was not found in your environment. You can install it with pip: `pip install
|
| 547 |
+
torchao`
|
| 548 |
+
"""
|
| 549 |
+
|
| 550 |
+
QUANTO_IMPORT_ERROR = """
|
| 551 |
+
{0} requires the optimum-quanto library but it was not found in your environment. You can install it with pip: `pip
|
| 552 |
+
install optimum-quanto`
|
| 553 |
+
"""
|
| 554 |
+
|
| 555 |
+
# docstyle-ignore
|
| 556 |
+
PYTORCH_RETINAFACE_IMPORT_ERROR = """
|
| 557 |
+
{0} requires the pytorch_retinaface library but it was not found in your environment. You can install it with pip: `pip install pytorch_retinaface`
|
| 558 |
+
"""
|
| 559 |
+
|
| 560 |
+
# docstyle-ignore
|
| 561 |
+
BETTER_PROFANITY_IMPORT_ERROR = """
|
| 562 |
+
{0} requires the better_profanity library but it was not found in your environment. You can install it with pip: `pip install better_profanity`
|
| 563 |
+
"""
|
| 564 |
+
|
| 565 |
+
# docstyle-ignore
|
| 566 |
+
NLTK_IMPORT_ERROR = """
|
| 567 |
+
{0} requires the nltk library but it was not found in your environment. You can install it with pip: `pip install nltk`
|
| 568 |
+
"""
|
| 569 |
+
|
| 570 |
+
|
| 571 |
+
BACKENDS_MAPPING = OrderedDict(
|
| 572 |
+
[
|
| 573 |
+
("bs4", (is_bs4_available, BS4_IMPORT_ERROR)),
|
| 574 |
+
("flax", (is_flax_available, FLAX_IMPORT_ERROR)),
|
| 575 |
+
("inflect", (is_inflect_available, INFLECT_IMPORT_ERROR)),
|
| 576 |
+
("onnx", (is_onnx_available, ONNX_IMPORT_ERROR)),
|
| 577 |
+
("opencv", (is_opencv_available, OPENCV_IMPORT_ERROR)),
|
| 578 |
+
("scipy", (is_scipy_available, SCIPY_IMPORT_ERROR)),
|
| 579 |
+
("torch", (is_torch_available, PYTORCH_IMPORT_ERROR)),
|
| 580 |
+
("transformers", (is_transformers_available, TRANSFORMERS_IMPORT_ERROR)),
|
| 581 |
+
("unidecode", (is_unidecode_available, UNIDECODE_IMPORT_ERROR)),
|
| 582 |
+
("librosa", (is_librosa_available, LIBROSA_IMPORT_ERROR)),
|
| 583 |
+
("k_diffusion", (is_k_diffusion_available, K_DIFFUSION_IMPORT_ERROR)),
|
| 584 |
+
("note_seq", (is_note_seq_available, NOTE_SEQ_IMPORT_ERROR)),
|
| 585 |
+
("wandb", (is_wandb_available, WANDB_IMPORT_ERROR)),
|
| 586 |
+
("tensorboard", (is_tensorboard_available, TENSORBOARD_IMPORT_ERROR)),
|
| 587 |
+
("compel", (is_compel_available, COMPEL_IMPORT_ERROR)),
|
| 588 |
+
("ftfy", (is_ftfy_available, FTFY_IMPORT_ERROR)),
|
| 589 |
+
("torchsde", (is_torchsde_available, TORCHSDE_IMPORT_ERROR)),
|
| 590 |
+
("invisible_watermark", (is_invisible_watermark_available, INVISIBLE_WATERMARK_IMPORT_ERROR)),
|
| 591 |
+
("peft", (is_peft_available, PEFT_IMPORT_ERROR)),
|
| 592 |
+
("safetensors", (is_safetensors_available, SAFETENSORS_IMPORT_ERROR)),
|
| 593 |
+
("bitsandbytes", (is_bitsandbytes_available, BITSANDBYTES_IMPORT_ERROR)),
|
| 594 |
+
("sentencepiece", (is_sentencepiece_available, SENTENCEPIECE_IMPORT_ERROR)),
|
| 595 |
+
("imageio", (is_imageio_available, IMAGEIO_IMPORT_ERROR)),
|
| 596 |
+
("gguf", (is_gguf_available, GGUF_IMPORT_ERROR)),
|
| 597 |
+
("torchao", (is_torchao_available, TORCHAO_IMPORT_ERROR)),
|
| 598 |
+
("quanto", (is_optimum_quanto_available, QUANTO_IMPORT_ERROR)),
|
| 599 |
+
("pytorch_retinaface", (is_pytorch_retinaface_available, PYTORCH_RETINAFACE_IMPORT_ERROR)),
|
| 600 |
+
("better_profanity", (is_better_profanity_available, BETTER_PROFANITY_IMPORT_ERROR)),
|
| 601 |
+
("nltk", (is_nltk_available, NLTK_IMPORT_ERROR)),
|
| 602 |
+
]
|
| 603 |
+
)
|
| 604 |
+
|
| 605 |
+
|
| 606 |
+
def requires_backends(obj, backends):
|
| 607 |
+
if not isinstance(backends, (list, tuple)):
|
| 608 |
+
backends = [backends]
|
| 609 |
+
|
| 610 |
+
name = obj.__name__ if hasattr(obj, "__name__") else obj.__class__.__name__
|
| 611 |
+
checks = (BACKENDS_MAPPING[backend] for backend in backends)
|
| 612 |
+
failed = [msg.format(name) for available, msg in checks if not available()]
|
| 613 |
+
if failed:
|
| 614 |
+
raise ImportError("".join(failed))
|
| 615 |
+
|
| 616 |
+
if name in [
|
| 617 |
+
"VersatileDiffusionTextToImagePipeline",
|
| 618 |
+
"VersatileDiffusionPipeline",
|
| 619 |
+
"VersatileDiffusionDualGuidedPipeline",
|
| 620 |
+
"StableDiffusionImageVariationPipeline",
|
| 621 |
+
"UnCLIPPipeline",
|
| 622 |
+
] and is_transformers_version("<", "4.25.0"):
|
| 623 |
+
raise ImportError(
|
| 624 |
+
f"You need to install `transformers>=4.25` in order to use {name}: \n```\n pip install"
|
| 625 |
+
" --upgrade transformers \n```"
|
| 626 |
+
)
|
| 627 |
+
|
| 628 |
+
if name in ["StableDiffusionDepth2ImgPipeline", "StableDiffusionPix2PixZeroPipeline"] and is_transformers_version(
|
| 629 |
+
"<", "4.26.0"
|
| 630 |
+
):
|
| 631 |
+
raise ImportError(
|
| 632 |
+
f"You need to install `transformers>=4.26` in order to use {name}: \n```\n pip install"
|
| 633 |
+
" --upgrade transformers \n```"
|
| 634 |
+
)
|
| 635 |
+
|
| 636 |
+
|
| 637 |
+
class DummyObject(type):
|
| 638 |
+
"""
|
| 639 |
+
Metaclass for the dummy objects. Any class inheriting from it will return the ImportError generated by
|
| 640 |
+
`requires_backend` each time a user tries to access any method of that class.
|
| 641 |
+
"""
|
| 642 |
+
|
| 643 |
+
def __getattr__(cls, key):
|
| 644 |
+
if key.startswith("_") and key not in ["_load_connected_pipes", "_is_onnx"]:
|
| 645 |
+
return super().__getattr__(cls, key)
|
| 646 |
+
requires_backends(cls, cls._backends)
|
| 647 |
+
|
| 648 |
+
|
| 649 |
+
# This function was copied from: https://github.com/huggingface/accelerate/blob/874c4967d94badd24f893064cc3bef45f57cadf7/src/accelerate/utils/versions.py#L319
|
| 650 |
+
def compare_versions(library_or_version: Union[str, Version], operation: str, requirement_version: str):
|
| 651 |
+
"""
|
| 652 |
+
Compares a library version to some requirement using a given operation.
|
| 653 |
+
|
| 654 |
+
Args:
|
| 655 |
+
library_or_version (`str` or `packaging.version.Version`):
|
| 656 |
+
A library name or a version to check.
|
| 657 |
+
operation (`str`):
|
| 658 |
+
A string representation of an operator, such as `">"` or `"<="`.
|
| 659 |
+
requirement_version (`str`):
|
| 660 |
+
The version to compare the library version against
|
| 661 |
+
"""
|
| 662 |
+
if operation not in STR_OPERATION_TO_FUNC.keys():
|
| 663 |
+
raise ValueError(f"`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys())}, received {operation}")
|
| 664 |
+
operation = STR_OPERATION_TO_FUNC[operation]
|
| 665 |
+
if isinstance(library_or_version, str):
|
| 666 |
+
library_or_version = parse(importlib_metadata.version(library_or_version))
|
| 667 |
+
return operation(library_or_version, parse(requirement_version))
|
| 668 |
+
|
| 669 |
+
|
| 670 |
+
# This function was copied from: https://github.com/huggingface/accelerate/blob/874c4967d94badd24f893064cc3bef45f57cadf7/src/accelerate/utils/versions.py#L338
|
| 671 |
+
def is_torch_version(operation: str, version: str):
|
| 672 |
+
"""
|
| 673 |
+
Compares the current PyTorch version to a given reference with an operation.
|
| 674 |
+
|
| 675 |
+
Args:
|
| 676 |
+
operation (`str`):
|
| 677 |
+
A string representation of an operator, such as `">"` or `"<="`
|
| 678 |
+
version (`str`):
|
| 679 |
+
A string version of PyTorch
|
| 680 |
+
"""
|
| 681 |
+
return compare_versions(parse(_torch_version), operation, version)
|
| 682 |
+
|
| 683 |
+
|
| 684 |
+
def is_torch_xla_version(operation: str, version: str):
|
| 685 |
+
"""
|
| 686 |
+
Compares the current torch_xla version to a given reference with an operation.
|
| 687 |
+
|
| 688 |
+
Args:
|
| 689 |
+
operation (`str`):
|
| 690 |
+
A string representation of an operator, such as `">"` or `"<="`
|
| 691 |
+
version (`str`):
|
| 692 |
+
A string version of torch_xla
|
| 693 |
+
"""
|
| 694 |
+
if not is_torch_xla_available:
|
| 695 |
+
return False
|
| 696 |
+
return compare_versions(parse(_torch_xla_version), operation, version)
|
| 697 |
+
|
| 698 |
+
|
| 699 |
+
def is_transformers_version(operation: str, version: str):
|
| 700 |
+
"""
|
| 701 |
+
Compares the current Transformers version to a given reference with an operation.
|
| 702 |
+
|
| 703 |
+
Args:
|
| 704 |
+
operation (`str`):
|
| 705 |
+
A string representation of an operator, such as `">"` or `"<="`
|
| 706 |
+
version (`str`):
|
| 707 |
+
A version string
|
| 708 |
+
"""
|
| 709 |
+
if not _transformers_available:
|
| 710 |
+
return False
|
| 711 |
+
return compare_versions(parse(_transformers_version), operation, version)
|
| 712 |
+
|
| 713 |
+
|
| 714 |
+
def is_hf_hub_version(operation: str, version: str):
|
| 715 |
+
"""
|
| 716 |
+
Compares the current Hugging Face Hub version to a given reference with an operation.
|
| 717 |
+
|
| 718 |
+
Args:
|
| 719 |
+
operation (`str`):
|
| 720 |
+
A string representation of an operator, such as `">"` or `"<="`
|
| 721 |
+
version (`str`):
|
| 722 |
+
A version string
|
| 723 |
+
"""
|
| 724 |
+
if not _hf_hub_available:
|
| 725 |
+
return False
|
| 726 |
+
return compare_versions(parse(_hf_hub_version), operation, version)
|
| 727 |
+
|
| 728 |
+
|
| 729 |
+
def is_accelerate_version(operation: str, version: str):
|
| 730 |
+
"""
|
| 731 |
+
Compares the current Accelerate version to a given reference with an operation.
|
| 732 |
+
|
| 733 |
+
Args:
|
| 734 |
+
operation (`str`):
|
| 735 |
+
A string representation of an operator, such as `">"` or `"<="`
|
| 736 |
+
version (`str`):
|
| 737 |
+
A version string
|
| 738 |
+
"""
|
| 739 |
+
if not _accelerate_available:
|
| 740 |
+
return False
|
| 741 |
+
return compare_versions(parse(_accelerate_version), operation, version)
|
| 742 |
+
|
| 743 |
+
|
| 744 |
+
def is_peft_version(operation: str, version: str):
|
| 745 |
+
"""
|
| 746 |
+
Compares the current PEFT version to a given reference with an operation.
|
| 747 |
+
|
| 748 |
+
Args:
|
| 749 |
+
operation (`str`):
|
| 750 |
+
A string representation of an operator, such as `">"` or `"<="`
|
| 751 |
+
version (`str`):
|
| 752 |
+
A version string
|
| 753 |
+
"""
|
| 754 |
+
if not _peft_available:
|
| 755 |
+
return False
|
| 756 |
+
return compare_versions(parse(_peft_version), operation, version)
|
| 757 |
+
|
| 758 |
+
|
| 759 |
+
def is_bitsandbytes_version(operation: str, version: str):
|
| 760 |
+
"""
|
| 761 |
+
Args:
|
| 762 |
+
Compares the current bitsandbytes version to a given reference with an operation.
|
| 763 |
+
operation (`str`):
|
| 764 |
+
A string representation of an operator, such as `">"` or `"<="`
|
| 765 |
+
version (`str`):
|
| 766 |
+
A version string
|
| 767 |
+
"""
|
| 768 |
+
if not _bitsandbytes_available:
|
| 769 |
+
return False
|
| 770 |
+
return compare_versions(parse(_bitsandbytes_version), operation, version)
|
| 771 |
+
|
| 772 |
+
|
| 773 |
+
def is_gguf_version(operation: str, version: str):
|
| 774 |
+
"""
|
| 775 |
+
Compares the current Accelerate version to a given reference with an operation.
|
| 776 |
+
|
| 777 |
+
Args:
|
| 778 |
+
operation (`str`):
|
| 779 |
+
A string representation of an operator, such as `">"` or `"<="`
|
| 780 |
+
version (`str`):
|
| 781 |
+
A version string
|
| 782 |
+
"""
|
| 783 |
+
if not _gguf_available:
|
| 784 |
+
return False
|
| 785 |
+
return compare_versions(parse(_gguf_version), operation, version)
|
| 786 |
+
|
| 787 |
+
|
| 788 |
+
def is_torchao_version(operation: str, version: str):
|
| 789 |
+
"""
|
| 790 |
+
Compares the current torchao version to a given reference with an operation.
|
| 791 |
+
|
| 792 |
+
Args:
|
| 793 |
+
operation (`str`):
|
| 794 |
+
A string representation of an operator, such as `">"` or `"<="`
|
| 795 |
+
version (`str`):
|
| 796 |
+
A version string
|
| 797 |
+
"""
|
| 798 |
+
if not _torchao_available:
|
| 799 |
+
return False
|
| 800 |
+
return compare_versions(parse(_torchao_version), operation, version)
|
| 801 |
+
|
| 802 |
+
|
| 803 |
+
def is_k_diffusion_version(operation: str, version: str):
|
| 804 |
+
"""
|
| 805 |
+
Compares the current k-diffusion version to a given reference with an operation.
|
| 806 |
+
|
| 807 |
+
Args:
|
| 808 |
+
operation (`str`):
|
| 809 |
+
A string representation of an operator, such as `">"` or `"<="`
|
| 810 |
+
version (`str`):
|
| 811 |
+
A version string
|
| 812 |
+
"""
|
| 813 |
+
if not _k_diffusion_available:
|
| 814 |
+
return False
|
| 815 |
+
return compare_versions(parse(_k_diffusion_version), operation, version)
|
| 816 |
+
|
| 817 |
+
|
| 818 |
+
def is_optimum_quanto_version(operation: str, version: str):
|
| 819 |
+
"""
|
| 820 |
+
Compares the current Accelerate version to a given reference with an operation.
|
| 821 |
+
|
| 822 |
+
Args:
|
| 823 |
+
operation (`str`):
|
| 824 |
+
A string representation of an operator, such as `">"` or `"<="`
|
| 825 |
+
version (`str`):
|
| 826 |
+
A version string
|
| 827 |
+
"""
|
| 828 |
+
if not _optimum_quanto_available:
|
| 829 |
+
return False
|
| 830 |
+
return compare_versions(parse(_optimum_quanto_version), operation, version)
|
| 831 |
+
|
| 832 |
+
|
| 833 |
+
def is_xformers_version(operation: str, version: str):
|
| 834 |
+
"""
|
| 835 |
+
Compares the current xformers version to a given reference with an operation.
|
| 836 |
+
|
| 837 |
+
Args:
|
| 838 |
+
operation (`str`):
|
| 839 |
+
A string representation of an operator, such as `">"` or `"<="`
|
| 840 |
+
version (`str`):
|
| 841 |
+
A version string
|
| 842 |
+
"""
|
| 843 |
+
if not _xformers_available:
|
| 844 |
+
return False
|
| 845 |
+
return compare_versions(parse(_xformers_version), operation, version)
|
| 846 |
+
|
| 847 |
+
|
| 848 |
+
def is_sageattention_version(operation: str, version: str):
|
| 849 |
+
"""
|
| 850 |
+
Compares the current sageattention version to a given reference with an operation.
|
| 851 |
+
|
| 852 |
+
Args:
|
| 853 |
+
operation (`str`):
|
| 854 |
+
A string representation of an operator, such as `">"` or `"<="`
|
| 855 |
+
version (`str`):
|
| 856 |
+
A version string
|
| 857 |
+
"""
|
| 858 |
+
if not _sageattention_available:
|
| 859 |
+
return False
|
| 860 |
+
return compare_versions(parse(_sageattention_version), operation, version)
|
| 861 |
+
|
| 862 |
+
|
| 863 |
+
def is_flash_attn_version(operation: str, version: str):
|
| 864 |
+
"""
|
| 865 |
+
Compares the current flash-attention version to a given reference with an operation.
|
| 866 |
+
|
| 867 |
+
Args:
|
| 868 |
+
operation (`str`):
|
| 869 |
+
A string representation of an operator, such as `">"` or `"<="`
|
| 870 |
+
version (`str`):
|
| 871 |
+
A version string
|
| 872 |
+
"""
|
| 873 |
+
if not _flash_attn_available:
|
| 874 |
+
return False
|
| 875 |
+
return compare_versions(parse(_flash_attn_version), operation, version)
|
| 876 |
+
|
| 877 |
+
|
| 878 |
+
def get_objects_from_module(module):
|
| 879 |
+
"""
|
| 880 |
+
Returns a dict of object names and values in a module, while skipping private/internal objects
|
| 881 |
+
|
| 882 |
+
Args:
|
| 883 |
+
module (ModuleType):
|
| 884 |
+
Module to extract the objects from.
|
| 885 |
+
|
| 886 |
+
Returns:
|
| 887 |
+
dict: Dictionary of object names and corresponding values
|
| 888 |
+
"""
|
| 889 |
+
|
| 890 |
+
objects = {}
|
| 891 |
+
for name in dir(module):
|
| 892 |
+
if name.startswith("_"):
|
| 893 |
+
continue
|
| 894 |
+
objects[name] = getattr(module, name)
|
| 895 |
+
|
| 896 |
+
return objects
|
| 897 |
+
|
| 898 |
+
|
| 899 |
+
class OptionalDependencyNotAvailable(BaseException):
|
| 900 |
+
"""
|
| 901 |
+
An error indicating that an optional dependency of Diffusers was not found in the environment.
|
| 902 |
+
"""
|
| 903 |
+
|
| 904 |
+
|
| 905 |
+
class _LazyModule(ModuleType):
|
| 906 |
+
"""
|
| 907 |
+
Module class that surfaces all objects but only performs associated imports when the objects are requested.
|
| 908 |
+
"""
|
| 909 |
+
|
| 910 |
+
# Very heavily inspired by optuna.integration._IntegrationModule
|
| 911 |
+
# https://github.com/optuna/optuna/blob/master/optuna/integration/__init__.py
|
| 912 |
+
def __init__(self, name, module_file, import_structure, module_spec=None, extra_objects=None):
|
| 913 |
+
super().__init__(name)
|
| 914 |
+
self._modules = set(import_structure.keys())
|
| 915 |
+
self._class_to_module = {}
|
| 916 |
+
for key, values in import_structure.items():
|
| 917 |
+
for value in values:
|
| 918 |
+
self._class_to_module[value] = key
|
| 919 |
+
# Needed for autocompletion in an IDE
|
| 920 |
+
self.__all__ = list(import_structure.keys()) + list(chain(*import_structure.values()))
|
| 921 |
+
self.__file__ = module_file
|
| 922 |
+
self.__spec__ = module_spec
|
| 923 |
+
self.__path__ = [os.path.dirname(module_file)]
|
| 924 |
+
self._objects = {} if extra_objects is None else extra_objects
|
| 925 |
+
self._name = name
|
| 926 |
+
self._import_structure = import_structure
|
| 927 |
+
|
| 928 |
+
# Needed for autocompletion in an IDE
|
| 929 |
+
def __dir__(self):
|
| 930 |
+
result = super().__dir__()
|
| 931 |
+
# The elements of self.__all__ that are submodules may or may not be in the dir already, depending on whether
|
| 932 |
+
# they have been accessed or not. So we only add the elements of self.__all__ that are not already in the dir.
|
| 933 |
+
for attr in self.__all__:
|
| 934 |
+
if attr not in result:
|
| 935 |
+
result.append(attr)
|
| 936 |
+
return result
|
| 937 |
+
|
| 938 |
+
def __getattr__(self, name: str) -> Any:
|
| 939 |
+
if name in self._objects:
|
| 940 |
+
return self._objects[name]
|
| 941 |
+
if name in self._modules:
|
| 942 |
+
value = self._get_module(name)
|
| 943 |
+
elif name in self._class_to_module.keys():
|
| 944 |
+
module = self._get_module(self._class_to_module[name])
|
| 945 |
+
value = getattr(module, name)
|
| 946 |
+
else:
|
| 947 |
+
raise AttributeError(f"module {self.__name__} has no attribute {name}")
|
| 948 |
+
|
| 949 |
+
setattr(self, name, value)
|
| 950 |
+
return value
|
| 951 |
+
|
| 952 |
+
def _get_module(self, module_name: str):
|
| 953 |
+
try:
|
| 954 |
+
return importlib.import_module("." + module_name, self.__name__)
|
| 955 |
+
except Exception as e:
|
| 956 |
+
raise RuntimeError(
|
| 957 |
+
f"Failed to import {self.__name__}.{module_name} because of the following error (look up to see its"
|
| 958 |
+
f" traceback):\n{e}"
|
| 959 |
+
) from e
|
| 960 |
+
|
| 961 |
+
def __reduce__(self):
|
| 962 |
+
return (self.__class__, (self._name, self.__file__, self._import_structure))
|
exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/utils/loading_utils.py
ADDED
|
@@ -0,0 +1,210 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import tempfile
|
| 3 |
+
from typing import Any, Callable, List, Optional, Tuple, Union
|
| 4 |
+
from urllib.parse import unquote, urlparse
|
| 5 |
+
|
| 6 |
+
import librosa
|
| 7 |
+
import numpy
|
| 8 |
+
import PIL.Image
|
| 9 |
+
import PIL.ImageOps
|
| 10 |
+
import requests
|
| 11 |
+
|
| 12 |
+
from .constants import DIFFUSERS_REQUEST_TIMEOUT
|
| 13 |
+
from .import_utils import BACKENDS_MAPPING, is_imageio_available
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
def load_image(
|
| 17 |
+
image: Union[str, PIL.Image.Image], convert_method: Optional[Callable[[PIL.Image.Image], PIL.Image.Image]] = None
|
| 18 |
+
) -> PIL.Image.Image:
|
| 19 |
+
"""
|
| 20 |
+
Loads `image` to a PIL Image.
|
| 21 |
+
|
| 22 |
+
Args:
|
| 23 |
+
image (`str` or `PIL.Image.Image`):
|
| 24 |
+
The image to convert to the PIL Image format.
|
| 25 |
+
convert_method (Callable[[PIL.Image.Image], PIL.Image.Image], *optional*):
|
| 26 |
+
A conversion method to apply to the image after loading it. When set to `None` the image will be converted
|
| 27 |
+
"RGB".
|
| 28 |
+
|
| 29 |
+
Returns:
|
| 30 |
+
`PIL.Image.Image`:
|
| 31 |
+
A PIL Image.
|
| 32 |
+
"""
|
| 33 |
+
if isinstance(image, str):
|
| 34 |
+
if image.startswith("http://") or image.startswith("https://"):
|
| 35 |
+
image = PIL.Image.open(requests.get(image, stream=True, timeout=DIFFUSERS_REQUEST_TIMEOUT).raw)
|
| 36 |
+
elif os.path.isfile(image):
|
| 37 |
+
image = PIL.Image.open(image)
|
| 38 |
+
else:
|
| 39 |
+
raise ValueError(
|
| 40 |
+
f"Incorrect path or URL. URLs must start with `http://` or `https://`, and {image} is not a valid path."
|
| 41 |
+
)
|
| 42 |
+
elif isinstance(image, PIL.Image.Image):
|
| 43 |
+
image = image
|
| 44 |
+
else:
|
| 45 |
+
raise ValueError(
|
| 46 |
+
"Incorrect format used for the image. Should be a URL linking to an image, a local path, or a PIL image."
|
| 47 |
+
)
|
| 48 |
+
|
| 49 |
+
image = PIL.ImageOps.exif_transpose(image)
|
| 50 |
+
|
| 51 |
+
if convert_method is not None:
|
| 52 |
+
image = convert_method(image)
|
| 53 |
+
else:
|
| 54 |
+
image = image.convert("RGB")
|
| 55 |
+
|
| 56 |
+
return image
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
def load_video(
|
| 60 |
+
video: str,
|
| 61 |
+
convert_method: Optional[Callable[[List[PIL.Image.Image]], List[PIL.Image.Image]]] = None,
|
| 62 |
+
) -> List[PIL.Image.Image]:
|
| 63 |
+
"""
|
| 64 |
+
Loads `video` to a list of PIL Image.
|
| 65 |
+
|
| 66 |
+
Args:
|
| 67 |
+
video (`str`):
|
| 68 |
+
A URL or Path to a video to convert to a list of PIL Image format.
|
| 69 |
+
convert_method (Callable[[List[PIL.Image.Image]], List[PIL.Image.Image]], *optional*):
|
| 70 |
+
A conversion method to apply to the video after loading it. When set to `None` the images will be converted
|
| 71 |
+
to "RGB".
|
| 72 |
+
|
| 73 |
+
Returns:
|
| 74 |
+
`List[PIL.Image.Image]`:
|
| 75 |
+
The video as a list of PIL images.
|
| 76 |
+
"""
|
| 77 |
+
is_url = video.startswith("http://") or video.startswith("https://")
|
| 78 |
+
is_file = os.path.isfile(video)
|
| 79 |
+
was_tempfile_created = False
|
| 80 |
+
|
| 81 |
+
if not (is_url or is_file):
|
| 82 |
+
raise ValueError(
|
| 83 |
+
f"Incorrect path or URL. URLs must start with `http://` or `https://`, and {video} is not a valid path."
|
| 84 |
+
)
|
| 85 |
+
|
| 86 |
+
if is_url:
|
| 87 |
+
response = requests.get(video, stream=True)
|
| 88 |
+
if response.status_code != 200:
|
| 89 |
+
raise ValueError(f"Failed to download video. Status code: {response.status_code}")
|
| 90 |
+
|
| 91 |
+
parsed_url = urlparse(video)
|
| 92 |
+
file_name = os.path.basename(unquote(parsed_url.path))
|
| 93 |
+
|
| 94 |
+
suffix = os.path.splitext(file_name)[1] or ".mp4"
|
| 95 |
+
video_path = tempfile.NamedTemporaryFile(suffix=suffix, delete=False).name
|
| 96 |
+
|
| 97 |
+
was_tempfile_created = True
|
| 98 |
+
|
| 99 |
+
video_data = response.iter_content(chunk_size=8192)
|
| 100 |
+
with open(video_path, "wb") as f:
|
| 101 |
+
for chunk in video_data:
|
| 102 |
+
f.write(chunk)
|
| 103 |
+
|
| 104 |
+
video = video_path
|
| 105 |
+
|
| 106 |
+
pil_images = []
|
| 107 |
+
if video.endswith(".gif"):
|
| 108 |
+
gif = PIL.Image.open(video)
|
| 109 |
+
try:
|
| 110 |
+
while True:
|
| 111 |
+
pil_images.append(gif.copy())
|
| 112 |
+
gif.seek(gif.tell() + 1)
|
| 113 |
+
except EOFError:
|
| 114 |
+
pass
|
| 115 |
+
|
| 116 |
+
else:
|
| 117 |
+
if is_imageio_available():
|
| 118 |
+
import imageio
|
| 119 |
+
else:
|
| 120 |
+
raise ImportError(BACKENDS_MAPPING["imageio"][1].format("load_video"))
|
| 121 |
+
|
| 122 |
+
try:
|
| 123 |
+
imageio.plugins.ffmpeg.get_exe()
|
| 124 |
+
except AttributeError:
|
| 125 |
+
raise AttributeError(
|
| 126 |
+
"`Unable to find an ffmpeg installation on your machine. Please install via `pip install imageio-ffmpeg"
|
| 127 |
+
)
|
| 128 |
+
|
| 129 |
+
with imageio.get_reader(video) as reader:
|
| 130 |
+
# Read all frames
|
| 131 |
+
for frame in reader:
|
| 132 |
+
pil_images.append(PIL.Image.fromarray(frame))
|
| 133 |
+
|
| 134 |
+
if was_tempfile_created:
|
| 135 |
+
os.remove(video_path)
|
| 136 |
+
|
| 137 |
+
if convert_method is not None:
|
| 138 |
+
pil_images = convert_method(pil_images)
|
| 139 |
+
|
| 140 |
+
return pil_images
|
| 141 |
+
|
| 142 |
+
|
| 143 |
+
def load_audio(
|
| 144 |
+
audio: Union[str, numpy.ndarray], convert_method: Optional[Callable[[numpy.ndarray], numpy.ndarray]] = None
|
| 145 |
+
) -> numpy.ndarray:
|
| 146 |
+
"""
|
| 147 |
+
Loads `audio` to a numpy array.
|
| 148 |
+
|
| 149 |
+
Args:
|
| 150 |
+
audio (`str` or `numpy.ndarray`):
|
| 151 |
+
The audio to convert to the numpy array format.
|
| 152 |
+
convert_method (Callable[[numpy.ndarray], numpy.ndarray], *optional*):
|
| 153 |
+
A conversion method to apply to the audio after loading it. When set to `None` the audio will be converted
|
| 154 |
+
to a specific format.
|
| 155 |
+
|
| 156 |
+
Returns:
|
| 157 |
+
`numpy.ndarray`:
|
| 158 |
+
A Librosa audio object.
|
| 159 |
+
`int`:
|
| 160 |
+
The sample rate of the audio.
|
| 161 |
+
"""
|
| 162 |
+
if isinstance(audio, str):
|
| 163 |
+
if audio.startswith("http://") or audio.startswith("https://"):
|
| 164 |
+
audio = PIL.Image.open(requests.get(audio, stream=True, timeout=DIFFUSERS_REQUEST_TIMEOUT).raw)
|
| 165 |
+
elif os.path.isfile(audio):
|
| 166 |
+
audio, sample_rate = librosa.load(audio, sr=16000)
|
| 167 |
+
else:
|
| 168 |
+
raise ValueError(
|
| 169 |
+
f"Incorrect path or URL. URLs must start with `http://` or `https://`, and {audio} is not a valid path."
|
| 170 |
+
)
|
| 171 |
+
elif isinstance(audio, numpy.ndarray):
|
| 172 |
+
audio = audio
|
| 173 |
+
else:
|
| 174 |
+
raise ValueError(
|
| 175 |
+
"Incorrect format used for the audio. Should be a URL linking to an audio, a local path, or a PIL audio."
|
| 176 |
+
)
|
| 177 |
+
|
| 178 |
+
# audio = PIL.ImageOps.exif_transpose(audio)
|
| 179 |
+
|
| 180 |
+
if convert_method is not None:
|
| 181 |
+
audio = convert_method(audio)
|
| 182 |
+
else:
|
| 183 |
+
audio = audio.convert("RGB")
|
| 184 |
+
|
| 185 |
+
return audio, sample_rate
|
| 186 |
+
|
| 187 |
+
|
| 188 |
+
# Taken from `transformers`.
|
| 189 |
+
def get_module_from_name(module, tensor_name: str) -> Tuple[Any, str]:
|
| 190 |
+
if "." in tensor_name:
|
| 191 |
+
splits = tensor_name.split(".")
|
| 192 |
+
for split in splits[:-1]:
|
| 193 |
+
new_module = getattr(module, split)
|
| 194 |
+
if new_module is None:
|
| 195 |
+
raise ValueError(f"{module} has no attribute {split}.")
|
| 196 |
+
module = new_module
|
| 197 |
+
tensor_name = splits[-1]
|
| 198 |
+
return module, tensor_name
|
| 199 |
+
|
| 200 |
+
|
| 201 |
+
def get_submodule_by_name(root_module, module_path: str):
|
| 202 |
+
current = root_module
|
| 203 |
+
parts = module_path.split(".")
|
| 204 |
+
for part in parts:
|
| 205 |
+
if part.isdigit():
|
| 206 |
+
idx = int(part)
|
| 207 |
+
current = current[idx] # e.g., for nn.ModuleList or nn.Sequential
|
| 208 |
+
else:
|
| 209 |
+
current = getattr(current, part)
|
| 210 |
+
return current
|
exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/utils/logging.py
ADDED
|
@@ -0,0 +1,340 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2025 Optuna, Hugging Face
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
"""Logging utilities."""
|
| 16 |
+
|
| 17 |
+
import logging
|
| 18 |
+
import os
|
| 19 |
+
import sys
|
| 20 |
+
import threading
|
| 21 |
+
from logging import (
|
| 22 |
+
CRITICAL, # NOQA
|
| 23 |
+
DEBUG, # NOQA
|
| 24 |
+
ERROR, # NOQA
|
| 25 |
+
FATAL, # NOQA
|
| 26 |
+
INFO, # NOQA
|
| 27 |
+
NOTSET, # NOQA
|
| 28 |
+
WARN, # NOQA
|
| 29 |
+
WARNING, # NOQA
|
| 30 |
+
)
|
| 31 |
+
from typing import Dict, Optional
|
| 32 |
+
|
| 33 |
+
from tqdm import auto as tqdm_lib
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
_lock = threading.Lock()
|
| 37 |
+
_default_handler: Optional[logging.Handler] = None
|
| 38 |
+
|
| 39 |
+
log_levels = {
|
| 40 |
+
"debug": logging.DEBUG,
|
| 41 |
+
"info": logging.INFO,
|
| 42 |
+
"warning": logging.WARNING,
|
| 43 |
+
"error": logging.ERROR,
|
| 44 |
+
"critical": logging.CRITICAL,
|
| 45 |
+
}
|
| 46 |
+
|
| 47 |
+
_default_log_level = logging.WARNING
|
| 48 |
+
|
| 49 |
+
_tqdm_active = True
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
def _get_default_logging_level() -> int:
|
| 53 |
+
"""
|
| 54 |
+
If DIFFUSERS_VERBOSITY env var is set to one of the valid choices return that as the new default level. If it is
|
| 55 |
+
not - fall back to `_default_log_level`
|
| 56 |
+
"""
|
| 57 |
+
env_level_str = os.getenv("DIFFUSERS_VERBOSITY", None)
|
| 58 |
+
if env_level_str:
|
| 59 |
+
if env_level_str in log_levels:
|
| 60 |
+
return log_levels[env_level_str]
|
| 61 |
+
else:
|
| 62 |
+
logging.getLogger().warning(
|
| 63 |
+
f"Unknown option DIFFUSERS_VERBOSITY={env_level_str}, has to be one of: {', '.join(log_levels.keys())}"
|
| 64 |
+
)
|
| 65 |
+
return _default_log_level
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
def _get_library_name() -> str:
|
| 69 |
+
return __name__.split(".")[0]
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
def _get_library_root_logger() -> logging.Logger:
|
| 73 |
+
return logging.getLogger(_get_library_name())
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
def _configure_library_root_logger() -> None:
|
| 77 |
+
global _default_handler
|
| 78 |
+
|
| 79 |
+
with _lock:
|
| 80 |
+
if _default_handler:
|
| 81 |
+
# This library has already configured the library root logger.
|
| 82 |
+
return
|
| 83 |
+
_default_handler = logging.StreamHandler() # Set sys.stderr as stream.
|
| 84 |
+
|
| 85 |
+
if sys.stderr: # only if sys.stderr exists, e.g. when not using pythonw in windows
|
| 86 |
+
_default_handler.flush = sys.stderr.flush
|
| 87 |
+
|
| 88 |
+
# Apply our default configuration to the library root logger.
|
| 89 |
+
library_root_logger = _get_library_root_logger()
|
| 90 |
+
library_root_logger.addHandler(_default_handler)
|
| 91 |
+
library_root_logger.setLevel(_get_default_logging_level())
|
| 92 |
+
library_root_logger.propagate = False
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
def _reset_library_root_logger() -> None:
|
| 96 |
+
global _default_handler
|
| 97 |
+
|
| 98 |
+
with _lock:
|
| 99 |
+
if not _default_handler:
|
| 100 |
+
return
|
| 101 |
+
|
| 102 |
+
library_root_logger = _get_library_root_logger()
|
| 103 |
+
library_root_logger.removeHandler(_default_handler)
|
| 104 |
+
library_root_logger.setLevel(logging.NOTSET)
|
| 105 |
+
_default_handler = None
|
| 106 |
+
|
| 107 |
+
|
| 108 |
+
def get_log_levels_dict() -> Dict[str, int]:
|
| 109 |
+
return log_levels
|
| 110 |
+
|
| 111 |
+
|
| 112 |
+
def get_logger(name: Optional[str] = None) -> logging.Logger:
|
| 113 |
+
"""
|
| 114 |
+
Return a logger with the specified name.
|
| 115 |
+
|
| 116 |
+
This function is not supposed to be directly accessed unless you are writing a custom diffusers module.
|
| 117 |
+
"""
|
| 118 |
+
|
| 119 |
+
if name is None:
|
| 120 |
+
name = _get_library_name()
|
| 121 |
+
|
| 122 |
+
_configure_library_root_logger()
|
| 123 |
+
return logging.getLogger(name)
|
| 124 |
+
|
| 125 |
+
|
| 126 |
+
def get_verbosity() -> int:
|
| 127 |
+
"""
|
| 128 |
+
Return the current level for the 🤗 Diffusers' root logger as an `int`.
|
| 129 |
+
|
| 130 |
+
Returns:
|
| 131 |
+
`int`:
|
| 132 |
+
Logging level integers which can be one of:
|
| 133 |
+
|
| 134 |
+
- `50`: `diffusers.logging.CRITICAL` or `diffusers.logging.FATAL`
|
| 135 |
+
- `40`: `diffusers.logging.ERROR`
|
| 136 |
+
- `30`: `diffusers.logging.WARNING` or `diffusers.logging.WARN`
|
| 137 |
+
- `20`: `diffusers.logging.INFO`
|
| 138 |
+
- `10`: `diffusers.logging.DEBUG`
|
| 139 |
+
|
| 140 |
+
"""
|
| 141 |
+
|
| 142 |
+
_configure_library_root_logger()
|
| 143 |
+
return _get_library_root_logger().getEffectiveLevel()
|
| 144 |
+
|
| 145 |
+
|
| 146 |
+
def set_verbosity(verbosity: int) -> None:
|
| 147 |
+
"""
|
| 148 |
+
Set the verbosity level for the 🤗 Diffusers' root logger.
|
| 149 |
+
|
| 150 |
+
Args:
|
| 151 |
+
verbosity (`int`):
|
| 152 |
+
Logging level which can be one of:
|
| 153 |
+
|
| 154 |
+
- `diffusers.logging.CRITICAL` or `diffusers.logging.FATAL`
|
| 155 |
+
- `diffusers.logging.ERROR`
|
| 156 |
+
- `diffusers.logging.WARNING` or `diffusers.logging.WARN`
|
| 157 |
+
- `diffusers.logging.INFO`
|
| 158 |
+
- `diffusers.logging.DEBUG`
|
| 159 |
+
"""
|
| 160 |
+
|
| 161 |
+
_configure_library_root_logger()
|
| 162 |
+
_get_library_root_logger().setLevel(verbosity)
|
| 163 |
+
|
| 164 |
+
|
| 165 |
+
def set_verbosity_info() -> None:
|
| 166 |
+
"""Set the verbosity to the `INFO` level."""
|
| 167 |
+
return set_verbosity(INFO)
|
| 168 |
+
|
| 169 |
+
|
| 170 |
+
def set_verbosity_warning() -> None:
|
| 171 |
+
"""Set the verbosity to the `WARNING` level."""
|
| 172 |
+
return set_verbosity(WARNING)
|
| 173 |
+
|
| 174 |
+
|
| 175 |
+
def set_verbosity_debug() -> None:
|
| 176 |
+
"""Set the verbosity to the `DEBUG` level."""
|
| 177 |
+
return set_verbosity(DEBUG)
|
| 178 |
+
|
| 179 |
+
|
| 180 |
+
def set_verbosity_error() -> None:
|
| 181 |
+
"""Set the verbosity to the `ERROR` level."""
|
| 182 |
+
return set_verbosity(ERROR)
|
| 183 |
+
|
| 184 |
+
|
| 185 |
+
def disable_default_handler() -> None:
|
| 186 |
+
"""Disable the default handler of the 🤗 Diffusers' root logger."""
|
| 187 |
+
|
| 188 |
+
_configure_library_root_logger()
|
| 189 |
+
|
| 190 |
+
assert _default_handler is not None
|
| 191 |
+
_get_library_root_logger().removeHandler(_default_handler)
|
| 192 |
+
|
| 193 |
+
|
| 194 |
+
def enable_default_handler() -> None:
|
| 195 |
+
"""Enable the default handler of the 🤗 Diffusers' root logger."""
|
| 196 |
+
|
| 197 |
+
_configure_library_root_logger()
|
| 198 |
+
|
| 199 |
+
assert _default_handler is not None
|
| 200 |
+
_get_library_root_logger().addHandler(_default_handler)
|
| 201 |
+
|
| 202 |
+
|
| 203 |
+
def add_handler(handler: logging.Handler) -> None:
|
| 204 |
+
"""adds a handler to the HuggingFace Diffusers' root logger."""
|
| 205 |
+
|
| 206 |
+
_configure_library_root_logger()
|
| 207 |
+
|
| 208 |
+
assert handler is not None
|
| 209 |
+
_get_library_root_logger().addHandler(handler)
|
| 210 |
+
|
| 211 |
+
|
| 212 |
+
def remove_handler(handler: logging.Handler) -> None:
|
| 213 |
+
"""removes given handler from the HuggingFace Diffusers' root logger."""
|
| 214 |
+
|
| 215 |
+
_configure_library_root_logger()
|
| 216 |
+
|
| 217 |
+
assert handler is not None and handler in _get_library_root_logger().handlers
|
| 218 |
+
_get_library_root_logger().removeHandler(handler)
|
| 219 |
+
|
| 220 |
+
|
| 221 |
+
def disable_propagation() -> None:
|
| 222 |
+
"""
|
| 223 |
+
Disable propagation of the library log outputs. Note that log propagation is disabled by default.
|
| 224 |
+
"""
|
| 225 |
+
|
| 226 |
+
_configure_library_root_logger()
|
| 227 |
+
_get_library_root_logger().propagate = False
|
| 228 |
+
|
| 229 |
+
|
| 230 |
+
def enable_propagation() -> None:
|
| 231 |
+
"""
|
| 232 |
+
Enable propagation of the library log outputs. Please disable the HuggingFace Diffusers' default handler to prevent
|
| 233 |
+
double logging if the root logger has been configured.
|
| 234 |
+
"""
|
| 235 |
+
|
| 236 |
+
_configure_library_root_logger()
|
| 237 |
+
_get_library_root_logger().propagate = True
|
| 238 |
+
|
| 239 |
+
|
| 240 |
+
def enable_explicit_format() -> None:
|
| 241 |
+
"""
|
| 242 |
+
Enable explicit formatting for every 🤗 Diffusers' logger. The explicit formatter is as follows:
|
| 243 |
+
```
|
| 244 |
+
[LEVELNAME|FILENAME|LINE NUMBER] TIME >> MESSAGE
|
| 245 |
+
```
|
| 246 |
+
All handlers currently bound to the root logger are affected by this method.
|
| 247 |
+
"""
|
| 248 |
+
handlers = _get_library_root_logger().handlers
|
| 249 |
+
|
| 250 |
+
for handler in handlers:
|
| 251 |
+
formatter = logging.Formatter("[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s")
|
| 252 |
+
handler.setFormatter(formatter)
|
| 253 |
+
|
| 254 |
+
|
| 255 |
+
def reset_format() -> None:
|
| 256 |
+
"""
|
| 257 |
+
Resets the formatting for 🤗 Diffusers' loggers.
|
| 258 |
+
|
| 259 |
+
All handlers currently bound to the root logger are affected by this method.
|
| 260 |
+
"""
|
| 261 |
+
handlers = _get_library_root_logger().handlers
|
| 262 |
+
|
| 263 |
+
for handler in handlers:
|
| 264 |
+
handler.setFormatter(None)
|
| 265 |
+
|
| 266 |
+
|
| 267 |
+
def warning_advice(self, *args, **kwargs) -> None:
|
| 268 |
+
"""
|
| 269 |
+
This method is identical to `logger.warning()`, but if env var DIFFUSERS_NO_ADVISORY_WARNINGS=1 is set, this
|
| 270 |
+
warning will not be printed
|
| 271 |
+
"""
|
| 272 |
+
no_advisory_warnings = os.getenv("DIFFUSERS_NO_ADVISORY_WARNINGS", False)
|
| 273 |
+
if no_advisory_warnings:
|
| 274 |
+
return
|
| 275 |
+
self.warning(*args, **kwargs)
|
| 276 |
+
|
| 277 |
+
|
| 278 |
+
logging.Logger.warning_advice = warning_advice
|
| 279 |
+
|
| 280 |
+
|
| 281 |
+
class EmptyTqdm:
|
| 282 |
+
"""Dummy tqdm which doesn't do anything."""
|
| 283 |
+
|
| 284 |
+
def __init__(self, *args, **kwargs): # pylint: disable=unused-argument
|
| 285 |
+
self._iterator = args[0] if args else None
|
| 286 |
+
|
| 287 |
+
def __iter__(self):
|
| 288 |
+
return iter(self._iterator)
|
| 289 |
+
|
| 290 |
+
def __getattr__(self, _):
|
| 291 |
+
"""Return empty function."""
|
| 292 |
+
|
| 293 |
+
def empty_fn(*args, **kwargs): # pylint: disable=unused-argument
|
| 294 |
+
return
|
| 295 |
+
|
| 296 |
+
return empty_fn
|
| 297 |
+
|
| 298 |
+
def __enter__(self):
|
| 299 |
+
return self
|
| 300 |
+
|
| 301 |
+
def __exit__(self, type_, value, traceback):
|
| 302 |
+
return
|
| 303 |
+
|
| 304 |
+
|
| 305 |
+
class _tqdm_cls:
|
| 306 |
+
def __call__(self, *args, **kwargs):
|
| 307 |
+
if _tqdm_active:
|
| 308 |
+
return tqdm_lib.tqdm(*args, **kwargs)
|
| 309 |
+
else:
|
| 310 |
+
return EmptyTqdm(*args, **kwargs)
|
| 311 |
+
|
| 312 |
+
def set_lock(self, *args, **kwargs):
|
| 313 |
+
self._lock = None
|
| 314 |
+
if _tqdm_active:
|
| 315 |
+
return tqdm_lib.tqdm.set_lock(*args, **kwargs)
|
| 316 |
+
|
| 317 |
+
def get_lock(self):
|
| 318 |
+
if _tqdm_active:
|
| 319 |
+
return tqdm_lib.tqdm.get_lock()
|
| 320 |
+
|
| 321 |
+
|
| 322 |
+
tqdm = _tqdm_cls()
|
| 323 |
+
|
| 324 |
+
|
| 325 |
+
def is_progress_bar_enabled() -> bool:
|
| 326 |
+
"""Return a boolean indicating whether tqdm progress bars are enabled."""
|
| 327 |
+
global _tqdm_active
|
| 328 |
+
return bool(_tqdm_active)
|
| 329 |
+
|
| 330 |
+
|
| 331 |
+
def enable_progress_bar() -> None:
|
| 332 |
+
"""Enable tqdm progress bar."""
|
| 333 |
+
global _tqdm_active
|
| 334 |
+
_tqdm_active = True
|
| 335 |
+
|
| 336 |
+
|
| 337 |
+
def disable_progress_bar() -> None:
|
| 338 |
+
"""Disable tqdm progress bar."""
|
| 339 |
+
global _tqdm_active
|
| 340 |
+
_tqdm_active = False
|
exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/utils/model_card_template.md
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
{{ card_data }}
|
| 3 |
+
---
|
| 4 |
+
|
| 5 |
+
<!-- This model card has been generated automatically according to the information the training script had access to. You
|
| 6 |
+
should probably proofread and complete it, then remove this comment. -->
|
| 7 |
+
|
| 8 |
+
{{ model_description }}
|
| 9 |
+
|
| 10 |
+
## Intended uses & limitations
|
| 11 |
+
|
| 12 |
+
#### How to use
|
| 13 |
+
|
| 14 |
+
```python
|
| 15 |
+
# TODO: add an example code snippet for running this diffusion pipeline
|
| 16 |
+
```
|
| 17 |
+
|
| 18 |
+
#### Limitations and bias
|
| 19 |
+
|
| 20 |
+
[TODO: provide examples of latent issues and potential remediations]
|
| 21 |
+
|
| 22 |
+
## Training details
|
| 23 |
+
|
| 24 |
+
[TODO: describe the data used to train the model]
|
exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/utils/outputs.py
ADDED
|
@@ -0,0 +1,138 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2025 The HuggingFace Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
"""
|
| 15 |
+
Generic utilities
|
| 16 |
+
"""
|
| 17 |
+
|
| 18 |
+
from collections import OrderedDict
|
| 19 |
+
from dataclasses import fields, is_dataclass
|
| 20 |
+
from typing import Any, Tuple
|
| 21 |
+
|
| 22 |
+
import numpy as np
|
| 23 |
+
|
| 24 |
+
from .import_utils import is_torch_available, is_torch_version
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def is_tensor(x) -> bool:
|
| 28 |
+
"""
|
| 29 |
+
Tests if `x` is a `torch.Tensor` or `np.ndarray`.
|
| 30 |
+
"""
|
| 31 |
+
if is_torch_available():
|
| 32 |
+
import torch
|
| 33 |
+
|
| 34 |
+
if isinstance(x, torch.Tensor):
|
| 35 |
+
return True
|
| 36 |
+
|
| 37 |
+
return isinstance(x, np.ndarray)
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
class BaseOutput(OrderedDict):
|
| 41 |
+
"""
|
| 42 |
+
Base class for all model outputs as dataclass. Has a `__getitem__` that allows indexing by integer or slice (like a
|
| 43 |
+
tuple) or strings (like a dictionary) that will ignore the `None` attributes. Otherwise behaves like a regular
|
| 44 |
+
Python dictionary.
|
| 45 |
+
|
| 46 |
+
<Tip warning={true}>
|
| 47 |
+
|
| 48 |
+
You can't unpack a [`BaseOutput`] directly. Use the [`~utils.BaseOutput.to_tuple`] method to convert it to a tuple
|
| 49 |
+
first.
|
| 50 |
+
|
| 51 |
+
</Tip>
|
| 52 |
+
"""
|
| 53 |
+
|
| 54 |
+
def __init_subclass__(cls) -> None:
|
| 55 |
+
"""Register subclasses as pytree nodes.
|
| 56 |
+
|
| 57 |
+
This is necessary to synchronize gradients when using `torch.nn.parallel.DistributedDataParallel` with
|
| 58 |
+
`static_graph=True` with modules that output `ModelOutput` subclasses.
|
| 59 |
+
"""
|
| 60 |
+
if is_torch_available():
|
| 61 |
+
import torch.utils._pytree
|
| 62 |
+
|
| 63 |
+
if is_torch_version("<", "2.2"):
|
| 64 |
+
torch.utils._pytree._register_pytree_node(
|
| 65 |
+
cls,
|
| 66 |
+
torch.utils._pytree._dict_flatten,
|
| 67 |
+
lambda values, context: cls(**torch.utils._pytree._dict_unflatten(values, context)),
|
| 68 |
+
)
|
| 69 |
+
else:
|
| 70 |
+
torch.utils._pytree.register_pytree_node(
|
| 71 |
+
cls,
|
| 72 |
+
torch.utils._pytree._dict_flatten,
|
| 73 |
+
lambda values, context: cls(**torch.utils._pytree._dict_unflatten(values, context)),
|
| 74 |
+
serialized_type_name=f"{cls.__module__}.{cls.__name__}",
|
| 75 |
+
)
|
| 76 |
+
|
| 77 |
+
def __post_init__(self) -> None:
|
| 78 |
+
class_fields = fields(self)
|
| 79 |
+
|
| 80 |
+
# Safety and consistency checks
|
| 81 |
+
if not len(class_fields):
|
| 82 |
+
raise ValueError(f"{self.__class__.__name__} has no fields.")
|
| 83 |
+
|
| 84 |
+
first_field = getattr(self, class_fields[0].name)
|
| 85 |
+
other_fields_are_none = all(getattr(self, field.name) is None for field in class_fields[1:])
|
| 86 |
+
|
| 87 |
+
if other_fields_are_none and isinstance(first_field, dict):
|
| 88 |
+
for key, value in first_field.items():
|
| 89 |
+
self[key] = value
|
| 90 |
+
else:
|
| 91 |
+
for field in class_fields:
|
| 92 |
+
v = getattr(self, field.name)
|
| 93 |
+
if v is not None:
|
| 94 |
+
self[field.name] = v
|
| 95 |
+
|
| 96 |
+
def __delitem__(self, *args, **kwargs):
|
| 97 |
+
raise Exception(f"You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.")
|
| 98 |
+
|
| 99 |
+
def setdefault(self, *args, **kwargs):
|
| 100 |
+
raise Exception(f"You cannot use ``setdefault`` on a {self.__class__.__name__} instance.")
|
| 101 |
+
|
| 102 |
+
def pop(self, *args, **kwargs):
|
| 103 |
+
raise Exception(f"You cannot use ``pop`` on a {self.__class__.__name__} instance.")
|
| 104 |
+
|
| 105 |
+
def update(self, *args, **kwargs):
|
| 106 |
+
raise Exception(f"You cannot use ``update`` on a {self.__class__.__name__} instance.")
|
| 107 |
+
|
| 108 |
+
def __getitem__(self, k: Any) -> Any:
|
| 109 |
+
if isinstance(k, str):
|
| 110 |
+
inner_dict = dict(self.items())
|
| 111 |
+
return inner_dict[k]
|
| 112 |
+
else:
|
| 113 |
+
return self.to_tuple()[k]
|
| 114 |
+
|
| 115 |
+
def __setattr__(self, name: Any, value: Any) -> None:
|
| 116 |
+
if name in self.keys() and value is not None:
|
| 117 |
+
# Don't call self.__setitem__ to avoid recursion errors
|
| 118 |
+
super().__setitem__(name, value)
|
| 119 |
+
super().__setattr__(name, value)
|
| 120 |
+
|
| 121 |
+
def __setitem__(self, key, value):
|
| 122 |
+
# Will raise a KeyException if needed
|
| 123 |
+
super().__setitem__(key, value)
|
| 124 |
+
# Don't call self.__setattr__ to avoid recursion errors
|
| 125 |
+
super().__setattr__(key, value)
|
| 126 |
+
|
| 127 |
+
def __reduce__(self):
|
| 128 |
+
if not is_dataclass(self):
|
| 129 |
+
return super().__reduce__()
|
| 130 |
+
callable, _args, *remaining = super().__reduce__()
|
| 131 |
+
args = tuple(getattr(self, field.name) for field in fields(self))
|
| 132 |
+
return callable, args, *remaining
|
| 133 |
+
|
| 134 |
+
def to_tuple(self) -> Tuple[Any, ...]:
|
| 135 |
+
"""
|
| 136 |
+
Convert self to a tuple containing all the attributes/keys that are not `None`.
|
| 137 |
+
"""
|
| 138 |
+
return tuple(self[k] for k in self.keys())
|
exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/utils/peft_utils.py
ADDED
|
@@ -0,0 +1,376 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2025 The HuggingFace Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
"""
|
| 15 |
+
PEFT utilities: Utilities related to peft library
|
| 16 |
+
"""
|
| 17 |
+
|
| 18 |
+
import collections
|
| 19 |
+
import importlib
|
| 20 |
+
from typing import Optional
|
| 21 |
+
|
| 22 |
+
from packaging import version
|
| 23 |
+
|
| 24 |
+
from . import logging
|
| 25 |
+
from .import_utils import is_peft_available, is_peft_version, is_torch_available
|
| 26 |
+
from .torch_utils import empty_device_cache
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
logger = logging.get_logger(__name__)
|
| 30 |
+
|
| 31 |
+
if is_torch_available():
|
| 32 |
+
import torch
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
def recurse_remove_peft_layers(model):
|
| 36 |
+
r"""
|
| 37 |
+
Recursively replace all instances of `LoraLayer` with corresponding new layers in `model`.
|
| 38 |
+
"""
|
| 39 |
+
from peft.tuners.tuners_utils import BaseTunerLayer
|
| 40 |
+
|
| 41 |
+
has_base_layer_pattern = False
|
| 42 |
+
for module in model.modules():
|
| 43 |
+
if isinstance(module, BaseTunerLayer):
|
| 44 |
+
has_base_layer_pattern = hasattr(module, "base_layer")
|
| 45 |
+
break
|
| 46 |
+
|
| 47 |
+
if has_base_layer_pattern:
|
| 48 |
+
from peft.utils import _get_submodules
|
| 49 |
+
|
| 50 |
+
key_list = [key for key, _ in model.named_modules() if "lora" not in key]
|
| 51 |
+
for key in key_list:
|
| 52 |
+
try:
|
| 53 |
+
parent, target, target_name = _get_submodules(model, key)
|
| 54 |
+
except AttributeError:
|
| 55 |
+
continue
|
| 56 |
+
if hasattr(target, "base_layer"):
|
| 57 |
+
setattr(parent, target_name, target.get_base_layer())
|
| 58 |
+
else:
|
| 59 |
+
# This is for backwards compatibility with PEFT <= 0.6.2.
|
| 60 |
+
# TODO can be removed once that PEFT version is no longer supported.
|
| 61 |
+
from peft.tuners.lora import LoraLayer
|
| 62 |
+
|
| 63 |
+
for name, module in model.named_children():
|
| 64 |
+
if len(list(module.children())) > 0:
|
| 65 |
+
## compound module, go inside it
|
| 66 |
+
recurse_remove_peft_layers(module)
|
| 67 |
+
|
| 68 |
+
module_replaced = False
|
| 69 |
+
|
| 70 |
+
if isinstance(module, LoraLayer) and isinstance(module, torch.nn.Linear):
|
| 71 |
+
new_module = torch.nn.Linear(
|
| 72 |
+
module.in_features,
|
| 73 |
+
module.out_features,
|
| 74 |
+
bias=module.bias is not None,
|
| 75 |
+
).to(module.weight.device)
|
| 76 |
+
new_module.weight = module.weight
|
| 77 |
+
if module.bias is not None:
|
| 78 |
+
new_module.bias = module.bias
|
| 79 |
+
|
| 80 |
+
module_replaced = True
|
| 81 |
+
elif isinstance(module, LoraLayer) and isinstance(module, torch.nn.Conv2d):
|
| 82 |
+
new_module = torch.nn.Conv2d(
|
| 83 |
+
module.in_channels,
|
| 84 |
+
module.out_channels,
|
| 85 |
+
module.kernel_size,
|
| 86 |
+
module.stride,
|
| 87 |
+
module.padding,
|
| 88 |
+
module.dilation,
|
| 89 |
+
module.groups,
|
| 90 |
+
).to(module.weight.device)
|
| 91 |
+
|
| 92 |
+
new_module.weight = module.weight
|
| 93 |
+
if module.bias is not None:
|
| 94 |
+
new_module.bias = module.bias
|
| 95 |
+
|
| 96 |
+
module_replaced = True
|
| 97 |
+
|
| 98 |
+
if module_replaced:
|
| 99 |
+
setattr(model, name, new_module)
|
| 100 |
+
del module
|
| 101 |
+
|
| 102 |
+
empty_device_cache()
|
| 103 |
+
return model
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
def scale_lora_layers(model, weight):
|
| 107 |
+
"""
|
| 108 |
+
Adjust the weightage given to the LoRA layers of the model.
|
| 109 |
+
|
| 110 |
+
Args:
|
| 111 |
+
model (`torch.nn.Module`):
|
| 112 |
+
The model to scale.
|
| 113 |
+
weight (`float`):
|
| 114 |
+
The weight to be given to the LoRA layers.
|
| 115 |
+
"""
|
| 116 |
+
from peft.tuners.tuners_utils import BaseTunerLayer
|
| 117 |
+
|
| 118 |
+
if weight == 1.0:
|
| 119 |
+
return
|
| 120 |
+
|
| 121 |
+
for module in model.modules():
|
| 122 |
+
if isinstance(module, BaseTunerLayer):
|
| 123 |
+
module.scale_layer(weight)
|
| 124 |
+
|
| 125 |
+
|
| 126 |
+
def unscale_lora_layers(model, weight: Optional[float] = None):
|
| 127 |
+
"""
|
| 128 |
+
Removes the previously passed weight given to the LoRA layers of the model.
|
| 129 |
+
|
| 130 |
+
Args:
|
| 131 |
+
model (`torch.nn.Module`):
|
| 132 |
+
The model to scale.
|
| 133 |
+
weight (`float`, *optional*):
|
| 134 |
+
The weight to be given to the LoRA layers. If no scale is passed the scale of the lora layer will be
|
| 135 |
+
re-initialized to the correct value. If 0.0 is passed, we will re-initialize the scale with the correct
|
| 136 |
+
value.
|
| 137 |
+
"""
|
| 138 |
+
from peft.tuners.tuners_utils import BaseTunerLayer
|
| 139 |
+
|
| 140 |
+
if weight is None or weight == 1.0:
|
| 141 |
+
return
|
| 142 |
+
|
| 143 |
+
for module in model.modules():
|
| 144 |
+
if isinstance(module, BaseTunerLayer):
|
| 145 |
+
if weight != 0:
|
| 146 |
+
module.unscale_layer(weight)
|
| 147 |
+
else:
|
| 148 |
+
for adapter_name in module.active_adapters:
|
| 149 |
+
# if weight == 0 unscale should re-set the scale to the original value.
|
| 150 |
+
module.set_scale(adapter_name, 1.0)
|
| 151 |
+
|
| 152 |
+
|
| 153 |
+
def get_peft_kwargs(
|
| 154 |
+
rank_dict, network_alpha_dict, peft_state_dict, is_unet=True, model_state_dict=None, adapter_name=None
|
| 155 |
+
):
|
| 156 |
+
rank_pattern = {}
|
| 157 |
+
alpha_pattern = {}
|
| 158 |
+
r = lora_alpha = list(rank_dict.values())[0]
|
| 159 |
+
|
| 160 |
+
if len(set(rank_dict.values())) > 1:
|
| 161 |
+
# get the rank occurring the most number of times
|
| 162 |
+
r = collections.Counter(rank_dict.values()).most_common()[0][0]
|
| 163 |
+
|
| 164 |
+
# for modules with rank different from the most occurring rank, add it to the `rank_pattern`
|
| 165 |
+
rank_pattern = dict(filter(lambda x: x[1] != r, rank_dict.items()))
|
| 166 |
+
rank_pattern = {k.split(".lora_B.")[0]: v for k, v in rank_pattern.items()}
|
| 167 |
+
|
| 168 |
+
if network_alpha_dict is not None and len(network_alpha_dict) > 0:
|
| 169 |
+
if len(set(network_alpha_dict.values())) > 1:
|
| 170 |
+
# get the alpha occurring the most number of times
|
| 171 |
+
lora_alpha = collections.Counter(network_alpha_dict.values()).most_common()[0][0]
|
| 172 |
+
|
| 173 |
+
# for modules with alpha different from the most occurring alpha, add it to the `alpha_pattern`
|
| 174 |
+
alpha_pattern = dict(filter(lambda x: x[1] != lora_alpha, network_alpha_dict.items()))
|
| 175 |
+
if is_unet:
|
| 176 |
+
alpha_pattern = {
|
| 177 |
+
".".join(k.split(".lora_A.")[0].split(".")).replace(".alpha", ""): v
|
| 178 |
+
for k, v in alpha_pattern.items()
|
| 179 |
+
}
|
| 180 |
+
else:
|
| 181 |
+
alpha_pattern = {".".join(k.split(".down.")[0].split(".")[:-1]): v for k, v in alpha_pattern.items()}
|
| 182 |
+
else:
|
| 183 |
+
lora_alpha = set(network_alpha_dict.values()).pop()
|
| 184 |
+
|
| 185 |
+
target_modules = list({name.split(".lora")[0] for name in peft_state_dict.keys()})
|
| 186 |
+
use_dora = any("lora_magnitude_vector" in k for k in peft_state_dict)
|
| 187 |
+
# for now we know that the "bias" keys are only associated with `lora_B`.
|
| 188 |
+
lora_bias = any("lora_B" in k and k.endswith(".bias") for k in peft_state_dict)
|
| 189 |
+
|
| 190 |
+
lora_config_kwargs = {
|
| 191 |
+
"r": r,
|
| 192 |
+
"lora_alpha": lora_alpha,
|
| 193 |
+
"rank_pattern": rank_pattern,
|
| 194 |
+
"alpha_pattern": alpha_pattern,
|
| 195 |
+
"target_modules": target_modules,
|
| 196 |
+
"use_dora": use_dora,
|
| 197 |
+
"lora_bias": lora_bias,
|
| 198 |
+
}
|
| 199 |
+
|
| 200 |
+
return lora_config_kwargs
|
| 201 |
+
|
| 202 |
+
|
| 203 |
+
def get_adapter_name(model):
|
| 204 |
+
from peft.tuners.tuners_utils import BaseTunerLayer
|
| 205 |
+
|
| 206 |
+
for module in model.modules():
|
| 207 |
+
if isinstance(module, BaseTunerLayer):
|
| 208 |
+
return f"default_{len(module.r)}"
|
| 209 |
+
return "default_0"
|
| 210 |
+
|
| 211 |
+
|
| 212 |
+
def set_adapter_layers(model, enabled=True):
|
| 213 |
+
from peft.tuners.tuners_utils import BaseTunerLayer
|
| 214 |
+
|
| 215 |
+
for module in model.modules():
|
| 216 |
+
if isinstance(module, BaseTunerLayer):
|
| 217 |
+
# The recent version of PEFT needs to call `enable_adapters` instead
|
| 218 |
+
if hasattr(module, "enable_adapters"):
|
| 219 |
+
module.enable_adapters(enabled=enabled)
|
| 220 |
+
else:
|
| 221 |
+
module.disable_adapters = not enabled
|
| 222 |
+
|
| 223 |
+
|
| 224 |
+
def delete_adapter_layers(model, adapter_name):
|
| 225 |
+
from peft.tuners.tuners_utils import BaseTunerLayer
|
| 226 |
+
|
| 227 |
+
for module in model.modules():
|
| 228 |
+
if isinstance(module, BaseTunerLayer):
|
| 229 |
+
if hasattr(module, "delete_adapter"):
|
| 230 |
+
module.delete_adapter(adapter_name)
|
| 231 |
+
else:
|
| 232 |
+
raise ValueError(
|
| 233 |
+
"The version of PEFT you are using is not compatible, please use a version that is greater than 0.6.1"
|
| 234 |
+
)
|
| 235 |
+
|
| 236 |
+
# For transformers integration - we need to pop the adapter from the config
|
| 237 |
+
if getattr(model, "_hf_peft_config_loaded", False) and hasattr(model, "peft_config"):
|
| 238 |
+
model.peft_config.pop(adapter_name, None)
|
| 239 |
+
# In case all adapters are deleted, we need to delete the config
|
| 240 |
+
# and make sure to set the flag to False
|
| 241 |
+
if len(model.peft_config) == 0:
|
| 242 |
+
del model.peft_config
|
| 243 |
+
model._hf_peft_config_loaded = None
|
| 244 |
+
|
| 245 |
+
|
| 246 |
+
def set_weights_and_activate_adapters(model, adapter_names, weights):
|
| 247 |
+
from peft.tuners.tuners_utils import BaseTunerLayer
|
| 248 |
+
|
| 249 |
+
def get_module_weight(weight_for_adapter, module_name):
|
| 250 |
+
if not isinstance(weight_for_adapter, dict):
|
| 251 |
+
# If weight_for_adapter is a single number, always return it.
|
| 252 |
+
return weight_for_adapter
|
| 253 |
+
|
| 254 |
+
for layer_name, weight_ in weight_for_adapter.items():
|
| 255 |
+
if layer_name in module_name:
|
| 256 |
+
return weight_
|
| 257 |
+
|
| 258 |
+
parts = module_name.split(".")
|
| 259 |
+
# e.g. key = "down_blocks.1.attentions.0"
|
| 260 |
+
key = f"{parts[0]}.{parts[1]}.attentions.{parts[3]}"
|
| 261 |
+
block_weight = weight_for_adapter.get(key, 1.0)
|
| 262 |
+
|
| 263 |
+
return block_weight
|
| 264 |
+
|
| 265 |
+
for module_name, module in model.named_modules():
|
| 266 |
+
if isinstance(module, BaseTunerLayer):
|
| 267 |
+
# For backward compatibility with previous PEFT versions, set multiple active adapters
|
| 268 |
+
if hasattr(module, "set_adapter"):
|
| 269 |
+
module.set_adapter(adapter_names)
|
| 270 |
+
else:
|
| 271 |
+
module.active_adapter = adapter_names
|
| 272 |
+
|
| 273 |
+
# Set the scaling weight for each adapter for this module
|
| 274 |
+
for adapter_name, weight in zip(adapter_names, weights):
|
| 275 |
+
module.set_scale(adapter_name, get_module_weight(weight, module_name))
|
| 276 |
+
|
| 277 |
+
|
| 278 |
+
def check_peft_version(min_version: str) -> None:
|
| 279 |
+
r"""
|
| 280 |
+
Checks if the version of PEFT is compatible.
|
| 281 |
+
|
| 282 |
+
Args:
|
| 283 |
+
version (`str`):
|
| 284 |
+
The version of PEFT to check against.
|
| 285 |
+
"""
|
| 286 |
+
if not is_peft_available():
|
| 287 |
+
raise ValueError("PEFT is not installed. Please install it with `pip install peft`")
|
| 288 |
+
|
| 289 |
+
is_peft_version_compatible = version.parse(importlib.metadata.version("peft")) > version.parse(min_version)
|
| 290 |
+
|
| 291 |
+
if not is_peft_version_compatible:
|
| 292 |
+
raise ValueError(
|
| 293 |
+
f"The version of PEFT you are using is not compatible, please use a version that is greater"
|
| 294 |
+
f" than {min_version}"
|
| 295 |
+
)
|
| 296 |
+
|
| 297 |
+
|
| 298 |
+
def _create_lora_config(
|
| 299 |
+
state_dict, network_alphas, metadata, rank_pattern_dict, is_unet=True, model_state_dict=None, adapter_name=None
|
| 300 |
+
):
|
| 301 |
+
from peft import LoraConfig
|
| 302 |
+
|
| 303 |
+
if metadata is not None:
|
| 304 |
+
lora_config_kwargs = metadata
|
| 305 |
+
else:
|
| 306 |
+
lora_config_kwargs = get_peft_kwargs(
|
| 307 |
+
rank_pattern_dict,
|
| 308 |
+
network_alpha_dict=network_alphas,
|
| 309 |
+
peft_state_dict=state_dict,
|
| 310 |
+
is_unet=is_unet,
|
| 311 |
+
model_state_dict=model_state_dict,
|
| 312 |
+
adapter_name=adapter_name,
|
| 313 |
+
)
|
| 314 |
+
|
| 315 |
+
_maybe_raise_error_for_ambiguous_keys(lora_config_kwargs)
|
| 316 |
+
|
| 317 |
+
# Version checks for DoRA and lora_bias
|
| 318 |
+
if "use_dora" in lora_config_kwargs and lora_config_kwargs["use_dora"]:
|
| 319 |
+
if is_peft_version("<", "0.9.0"):
|
| 320 |
+
raise ValueError("DoRA requires PEFT >= 0.9.0. Please upgrade.")
|
| 321 |
+
|
| 322 |
+
if "lora_bias" in lora_config_kwargs and lora_config_kwargs["lora_bias"]:
|
| 323 |
+
if is_peft_version("<=", "0.13.2"):
|
| 324 |
+
raise ValueError("lora_bias requires PEFT >= 0.14.0. Please upgrade.")
|
| 325 |
+
|
| 326 |
+
try:
|
| 327 |
+
return LoraConfig(**lora_config_kwargs)
|
| 328 |
+
except TypeError as e:
|
| 329 |
+
raise TypeError("`LoraConfig` class could not be instantiated.") from e
|
| 330 |
+
|
| 331 |
+
|
| 332 |
+
def _maybe_raise_error_for_ambiguous_keys(config):
|
| 333 |
+
rank_pattern = config["rank_pattern"].copy()
|
| 334 |
+
target_modules = config["target_modules"]
|
| 335 |
+
|
| 336 |
+
for key in list(rank_pattern.keys()):
|
| 337 |
+
# try to detect ambiguity
|
| 338 |
+
# `target_modules` can also be a str, in which case this loop would loop
|
| 339 |
+
# over the chars of the str. The technically correct way to match LoRA keys
|
| 340 |
+
# in PEFT is to use LoraModel._check_target_module_exists (lora_config, key).
|
| 341 |
+
# But this cuts it for now.
|
| 342 |
+
exact_matches = [mod for mod in target_modules if mod == key]
|
| 343 |
+
substring_matches = [mod for mod in target_modules if key in mod and mod != key]
|
| 344 |
+
|
| 345 |
+
if exact_matches and substring_matches:
|
| 346 |
+
if is_peft_version("<", "0.14.1"):
|
| 347 |
+
raise ValueError(
|
| 348 |
+
"There are ambiguous keys present in this LoRA. To load it, please update your `peft` installation - `pip install -U peft`."
|
| 349 |
+
)
|
| 350 |
+
|
| 351 |
+
|
| 352 |
+
def _maybe_warn_for_unhandled_keys(incompatible_keys, adapter_name):
|
| 353 |
+
warn_msg = ""
|
| 354 |
+
if incompatible_keys is not None:
|
| 355 |
+
# Check only for unexpected keys.
|
| 356 |
+
unexpected_keys = getattr(incompatible_keys, "unexpected_keys", None)
|
| 357 |
+
if unexpected_keys:
|
| 358 |
+
lora_unexpected_keys = [k for k in unexpected_keys if "lora_" in k and adapter_name in k]
|
| 359 |
+
if lora_unexpected_keys:
|
| 360 |
+
warn_msg = (
|
| 361 |
+
f"Loading adapter weights from state_dict led to unexpected keys found in the model:"
|
| 362 |
+
f" {', '.join(lora_unexpected_keys)}. "
|
| 363 |
+
)
|
| 364 |
+
|
| 365 |
+
# Filter missing keys specific to the current adapter.
|
| 366 |
+
missing_keys = getattr(incompatible_keys, "missing_keys", None)
|
| 367 |
+
if missing_keys:
|
| 368 |
+
lora_missing_keys = [k for k in missing_keys if "lora_" in k and adapter_name in k]
|
| 369 |
+
if lora_missing_keys:
|
| 370 |
+
warn_msg += (
|
| 371 |
+
f"Loading adapter weights from state_dict led to missing keys in the model:"
|
| 372 |
+
f" {', '.join(lora_missing_keys)}."
|
| 373 |
+
)
|
| 374 |
+
|
| 375 |
+
if warn_msg:
|
| 376 |
+
logger.warning(warn_msg)
|
exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/utils/pil_utils.py
ADDED
|
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import List
|
| 2 |
+
|
| 3 |
+
import PIL.Image
|
| 4 |
+
import PIL.ImageOps
|
| 5 |
+
from packaging import version
|
| 6 |
+
from PIL import Image
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"):
|
| 10 |
+
PIL_INTERPOLATION = {
|
| 11 |
+
"linear": PIL.Image.Resampling.BILINEAR,
|
| 12 |
+
"bilinear": PIL.Image.Resampling.BILINEAR,
|
| 13 |
+
"bicubic": PIL.Image.Resampling.BICUBIC,
|
| 14 |
+
"lanczos": PIL.Image.Resampling.LANCZOS,
|
| 15 |
+
"nearest": PIL.Image.Resampling.NEAREST,
|
| 16 |
+
}
|
| 17 |
+
else:
|
| 18 |
+
PIL_INTERPOLATION = {
|
| 19 |
+
"linear": PIL.Image.LINEAR,
|
| 20 |
+
"bilinear": PIL.Image.BILINEAR,
|
| 21 |
+
"bicubic": PIL.Image.BICUBIC,
|
| 22 |
+
"lanczos": PIL.Image.LANCZOS,
|
| 23 |
+
"nearest": PIL.Image.NEAREST,
|
| 24 |
+
}
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def pt_to_pil(images):
|
| 28 |
+
"""
|
| 29 |
+
Convert a torch image to a PIL image.
|
| 30 |
+
"""
|
| 31 |
+
images = (images / 2 + 0.5).clamp(0, 1)
|
| 32 |
+
images = images.cpu().permute(0, 2, 3, 1).float().numpy()
|
| 33 |
+
images = numpy_to_pil(images)
|
| 34 |
+
return images
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
def numpy_to_pil(images):
|
| 38 |
+
"""
|
| 39 |
+
Convert a numpy image or a batch of images to a PIL image.
|
| 40 |
+
"""
|
| 41 |
+
if images.ndim == 3:
|
| 42 |
+
images = images[None, ...]
|
| 43 |
+
images = (images * 255).round().astype("uint8")
|
| 44 |
+
if images.shape[-1] == 1:
|
| 45 |
+
# special case for grayscale (single channel) images
|
| 46 |
+
pil_images = [Image.fromarray(image.squeeze(), mode="L") for image in images]
|
| 47 |
+
else:
|
| 48 |
+
pil_images = [Image.fromarray(image) for image in images]
|
| 49 |
+
|
| 50 |
+
return pil_images
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
def make_image_grid(images: List[PIL.Image.Image], rows: int, cols: int, resize: int = None) -> PIL.Image.Image:
|
| 54 |
+
"""
|
| 55 |
+
Prepares a single grid of images. Useful for visualization purposes.
|
| 56 |
+
"""
|
| 57 |
+
assert len(images) == rows * cols
|
| 58 |
+
|
| 59 |
+
if resize is not None:
|
| 60 |
+
images = [img.resize((resize, resize)) for img in images]
|
| 61 |
+
|
| 62 |
+
w, h = images[0].size
|
| 63 |
+
grid = Image.new("RGB", size=(cols * w, rows * h))
|
| 64 |
+
|
| 65 |
+
for i, img in enumerate(images):
|
| 66 |
+
grid.paste(img, box=(i % cols * w, i // cols * h))
|
| 67 |
+
return grid
|
exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/utils/remote_utils.py
ADDED
|
@@ -0,0 +1,425 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2025 HuggingFace Inc.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
|
| 16 |
+
import io
|
| 17 |
+
import json
|
| 18 |
+
from typing import List, Literal, Optional, Union, cast
|
| 19 |
+
|
| 20 |
+
import requests
|
| 21 |
+
|
| 22 |
+
from .deprecation_utils import deprecate
|
| 23 |
+
from .import_utils import is_safetensors_available, is_torch_available
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
if is_torch_available():
|
| 27 |
+
import torch
|
| 28 |
+
|
| 29 |
+
from ..image_processor import VaeImageProcessor
|
| 30 |
+
from ..video_processor import VideoProcessor
|
| 31 |
+
|
| 32 |
+
if is_safetensors_available():
|
| 33 |
+
import safetensors.torch
|
| 34 |
+
|
| 35 |
+
DTYPE_MAP = {
|
| 36 |
+
"float16": torch.float16,
|
| 37 |
+
"float32": torch.float32,
|
| 38 |
+
"bfloat16": torch.bfloat16,
|
| 39 |
+
"uint8": torch.uint8,
|
| 40 |
+
}
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
from PIL import Image
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
def detect_image_type(data: bytes) -> str:
|
| 47 |
+
if data.startswith(b"\xff\xd8"):
|
| 48 |
+
return "jpeg"
|
| 49 |
+
elif data.startswith(b"\x89PNG\r\n\x1a\n"):
|
| 50 |
+
return "png"
|
| 51 |
+
elif data.startswith(b"GIF87a") or data.startswith(b"GIF89a"):
|
| 52 |
+
return "gif"
|
| 53 |
+
elif data.startswith(b"BM"):
|
| 54 |
+
return "bmp"
|
| 55 |
+
return "unknown"
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
def check_inputs_decode(
|
| 59 |
+
endpoint: str,
|
| 60 |
+
tensor: "torch.Tensor",
|
| 61 |
+
processor: Optional[Union["VaeImageProcessor", "VideoProcessor"]] = None,
|
| 62 |
+
do_scaling: bool = True,
|
| 63 |
+
scaling_factor: Optional[float] = None,
|
| 64 |
+
shift_factor: Optional[float] = None,
|
| 65 |
+
output_type: Literal["mp4", "pil", "pt"] = "pil",
|
| 66 |
+
return_type: Literal["mp4", "pil", "pt"] = "pil",
|
| 67 |
+
image_format: Literal["png", "jpg"] = "jpg",
|
| 68 |
+
partial_postprocess: bool = False,
|
| 69 |
+
input_tensor_type: Literal["binary"] = "binary",
|
| 70 |
+
output_tensor_type: Literal["binary"] = "binary",
|
| 71 |
+
height: Optional[int] = None,
|
| 72 |
+
width: Optional[int] = None,
|
| 73 |
+
):
|
| 74 |
+
if tensor.ndim == 3 and height is None and width is None:
|
| 75 |
+
raise ValueError("`height` and `width` required for packed latents.")
|
| 76 |
+
if (
|
| 77 |
+
output_type == "pt"
|
| 78 |
+
and return_type == "pil"
|
| 79 |
+
and not partial_postprocess
|
| 80 |
+
and not isinstance(processor, (VaeImageProcessor, VideoProcessor))
|
| 81 |
+
):
|
| 82 |
+
raise ValueError("`processor` is required.")
|
| 83 |
+
if do_scaling and scaling_factor is None:
|
| 84 |
+
deprecate(
|
| 85 |
+
"do_scaling",
|
| 86 |
+
"1.0.0",
|
| 87 |
+
"`do_scaling` is deprecated, pass `scaling_factor` and `shift_factor` if required.",
|
| 88 |
+
standard_warn=False,
|
| 89 |
+
)
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
def postprocess_decode(
|
| 93 |
+
response: requests.Response,
|
| 94 |
+
processor: Optional[Union["VaeImageProcessor", "VideoProcessor"]] = None,
|
| 95 |
+
output_type: Literal["mp4", "pil", "pt"] = "pil",
|
| 96 |
+
return_type: Literal["mp4", "pil", "pt"] = "pil",
|
| 97 |
+
partial_postprocess: bool = False,
|
| 98 |
+
):
|
| 99 |
+
if output_type == "pt" or (output_type == "pil" and processor is not None):
|
| 100 |
+
output_tensor = response.content
|
| 101 |
+
parameters = response.headers
|
| 102 |
+
shape = json.loads(parameters["shape"])
|
| 103 |
+
dtype = parameters["dtype"]
|
| 104 |
+
torch_dtype = DTYPE_MAP[dtype]
|
| 105 |
+
output_tensor = torch.frombuffer(bytearray(output_tensor), dtype=torch_dtype).reshape(shape)
|
| 106 |
+
if output_type == "pt":
|
| 107 |
+
if partial_postprocess:
|
| 108 |
+
if return_type == "pil":
|
| 109 |
+
output = [Image.fromarray(image.numpy()) for image in output_tensor]
|
| 110 |
+
if len(output) == 1:
|
| 111 |
+
output = output[0]
|
| 112 |
+
elif return_type == "pt":
|
| 113 |
+
output = output_tensor
|
| 114 |
+
else:
|
| 115 |
+
if processor is None or return_type == "pt":
|
| 116 |
+
output = output_tensor
|
| 117 |
+
else:
|
| 118 |
+
if isinstance(processor, VideoProcessor):
|
| 119 |
+
output = cast(
|
| 120 |
+
List[Image.Image],
|
| 121 |
+
processor.postprocess_video(output_tensor, output_type="pil")[0],
|
| 122 |
+
)
|
| 123 |
+
else:
|
| 124 |
+
output = cast(
|
| 125 |
+
Image.Image,
|
| 126 |
+
processor.postprocess(output_tensor, output_type="pil")[0],
|
| 127 |
+
)
|
| 128 |
+
elif output_type == "pil" and return_type == "pil" and processor is None:
|
| 129 |
+
output = Image.open(io.BytesIO(response.content)).convert("RGB")
|
| 130 |
+
detected_format = detect_image_type(response.content)
|
| 131 |
+
output.format = detected_format
|
| 132 |
+
elif output_type == "pil" and processor is not None:
|
| 133 |
+
if return_type == "pil":
|
| 134 |
+
output = [
|
| 135 |
+
Image.fromarray(image)
|
| 136 |
+
for image in (output_tensor.permute(0, 2, 3, 1).float().numpy() * 255).round().astype("uint8")
|
| 137 |
+
]
|
| 138 |
+
elif return_type == "pt":
|
| 139 |
+
output = output_tensor
|
| 140 |
+
elif output_type == "mp4" and return_type == "mp4":
|
| 141 |
+
output = response.content
|
| 142 |
+
return output
|
| 143 |
+
|
| 144 |
+
|
| 145 |
+
def prepare_decode(
|
| 146 |
+
tensor: "torch.Tensor",
|
| 147 |
+
processor: Optional[Union["VaeImageProcessor", "VideoProcessor"]] = None,
|
| 148 |
+
do_scaling: bool = True,
|
| 149 |
+
scaling_factor: Optional[float] = None,
|
| 150 |
+
shift_factor: Optional[float] = None,
|
| 151 |
+
output_type: Literal["mp4", "pil", "pt"] = "pil",
|
| 152 |
+
image_format: Literal["png", "jpg"] = "jpg",
|
| 153 |
+
partial_postprocess: bool = False,
|
| 154 |
+
height: Optional[int] = None,
|
| 155 |
+
width: Optional[int] = None,
|
| 156 |
+
):
|
| 157 |
+
headers = {}
|
| 158 |
+
parameters = {
|
| 159 |
+
"image_format": image_format,
|
| 160 |
+
"output_type": output_type,
|
| 161 |
+
"partial_postprocess": partial_postprocess,
|
| 162 |
+
"shape": list(tensor.shape),
|
| 163 |
+
"dtype": str(tensor.dtype).split(".")[-1],
|
| 164 |
+
}
|
| 165 |
+
if do_scaling and scaling_factor is not None:
|
| 166 |
+
parameters["scaling_factor"] = scaling_factor
|
| 167 |
+
if do_scaling and shift_factor is not None:
|
| 168 |
+
parameters["shift_factor"] = shift_factor
|
| 169 |
+
if do_scaling and scaling_factor is None:
|
| 170 |
+
parameters["do_scaling"] = do_scaling
|
| 171 |
+
elif do_scaling and scaling_factor is None and shift_factor is None:
|
| 172 |
+
parameters["do_scaling"] = do_scaling
|
| 173 |
+
if height is not None and width is not None:
|
| 174 |
+
parameters["height"] = height
|
| 175 |
+
parameters["width"] = width
|
| 176 |
+
headers["Content-Type"] = "tensor/binary"
|
| 177 |
+
headers["Accept"] = "tensor/binary"
|
| 178 |
+
if output_type == "pil" and image_format == "jpg" and processor is None:
|
| 179 |
+
headers["Accept"] = "image/jpeg"
|
| 180 |
+
elif output_type == "pil" and image_format == "png" and processor is None:
|
| 181 |
+
headers["Accept"] = "image/png"
|
| 182 |
+
elif output_type == "mp4":
|
| 183 |
+
headers["Accept"] = "text/plain"
|
| 184 |
+
tensor_data = safetensors.torch._tobytes(tensor, "tensor")
|
| 185 |
+
return {"data": tensor_data, "params": parameters, "headers": headers}
|
| 186 |
+
|
| 187 |
+
|
| 188 |
+
def remote_decode(
|
| 189 |
+
endpoint: str,
|
| 190 |
+
tensor: "torch.Tensor",
|
| 191 |
+
processor: Optional[Union["VaeImageProcessor", "VideoProcessor"]] = None,
|
| 192 |
+
do_scaling: bool = True,
|
| 193 |
+
scaling_factor: Optional[float] = None,
|
| 194 |
+
shift_factor: Optional[float] = None,
|
| 195 |
+
output_type: Literal["mp4", "pil", "pt"] = "pil",
|
| 196 |
+
return_type: Literal["mp4", "pil", "pt"] = "pil",
|
| 197 |
+
image_format: Literal["png", "jpg"] = "jpg",
|
| 198 |
+
partial_postprocess: bool = False,
|
| 199 |
+
input_tensor_type: Literal["binary"] = "binary",
|
| 200 |
+
output_tensor_type: Literal["binary"] = "binary",
|
| 201 |
+
height: Optional[int] = None,
|
| 202 |
+
width: Optional[int] = None,
|
| 203 |
+
) -> Union[Image.Image, List[Image.Image], bytes, "torch.Tensor"]:
|
| 204 |
+
"""
|
| 205 |
+
Hugging Face Hybrid Inference that allow running VAE decode remotely.
|
| 206 |
+
|
| 207 |
+
Args:
|
| 208 |
+
endpoint (`str`):
|
| 209 |
+
Endpoint for Remote Decode.
|
| 210 |
+
tensor (`torch.Tensor`):
|
| 211 |
+
Tensor to be decoded.
|
| 212 |
+
processor (`VaeImageProcessor` or `VideoProcessor`, *optional*):
|
| 213 |
+
Used with `return_type="pt"`, and `return_type="pil"` for Video models.
|
| 214 |
+
do_scaling (`bool`, default `True`, *optional*):
|
| 215 |
+
**DEPRECATED**. **pass `scaling_factor`/`shift_factor` instead.** **still set
|
| 216 |
+
do_scaling=None/do_scaling=False for no scaling until option is removed** When `True` scaling e.g. `latents
|
| 217 |
+
/ self.vae.config.scaling_factor` is applied remotely. If `False`, input must be passed with scaling
|
| 218 |
+
applied.
|
| 219 |
+
scaling_factor (`float`, *optional*):
|
| 220 |
+
Scaling is applied when passed e.g. [`latents /
|
| 221 |
+
self.vae.config.scaling_factor`](https://github.com/huggingface/diffusers/blob/7007febae5cff000d4df9059d9cf35133e8b2ca9/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py#L1083C37-L1083C77).
|
| 222 |
+
- SD v1: 0.18215
|
| 223 |
+
- SD XL: 0.13025
|
| 224 |
+
- Flux: 0.3611
|
| 225 |
+
If `None`, input must be passed with scaling applied.
|
| 226 |
+
shift_factor (`float`, *optional*):
|
| 227 |
+
Shift is applied when passed e.g. `latents + self.vae.config.shift_factor`.
|
| 228 |
+
- Flux: 0.1159
|
| 229 |
+
If `None`, input must be passed with scaling applied.
|
| 230 |
+
output_type (`"mp4"` or `"pil"` or `"pt", default `"pil"):
|
| 231 |
+
**Endpoint** output type. Subject to change. Report feedback on preferred type.
|
| 232 |
+
|
| 233 |
+
`"mp4": Supported by video models. Endpoint returns `bytes` of video. `"pil"`: Supported by image and video
|
| 234 |
+
models.
|
| 235 |
+
Image models: Endpoint returns `bytes` of an image in `image_format`. Video models: Endpoint returns
|
| 236 |
+
`torch.Tensor` with partial `postprocessing` applied.
|
| 237 |
+
Requires `processor` as a flag (any `None` value will work).
|
| 238 |
+
`"pt"`: Support by image and video models. Endpoint returns `torch.Tensor`.
|
| 239 |
+
With `partial_postprocess=True` the tensor is postprocessed `uint8` image tensor.
|
| 240 |
+
|
| 241 |
+
Recommendations:
|
| 242 |
+
`"pt"` with `partial_postprocess=True` is the smallest transfer for full quality. `"pt"` with
|
| 243 |
+
`partial_postprocess=False` is the most compatible with third party code. `"pil"` with
|
| 244 |
+
`image_format="jpg"` is the smallest transfer overall.
|
| 245 |
+
|
| 246 |
+
return_type (`"mp4"` or `"pil"` or `"pt", default `"pil"):
|
| 247 |
+
**Function** return type.
|
| 248 |
+
|
| 249 |
+
`"mp4": Function returns `bytes` of video. `"pil"`: Function returns `PIL.Image.Image`.
|
| 250 |
+
With `output_type="pil" no further processing is applied. With `output_type="pt" a `PIL.Image.Image` is
|
| 251 |
+
created.
|
| 252 |
+
`partial_postprocess=False` `processor` is required. `partial_postprocess=True` `processor` is
|
| 253 |
+
**not** required.
|
| 254 |
+
`"pt"`: Function returns `torch.Tensor`.
|
| 255 |
+
`processor` is **not** required. `partial_postprocess=False` tensor is `float16` or `bfloat16`, without
|
| 256 |
+
denormalization. `partial_postprocess=True` tensor is `uint8`, denormalized.
|
| 257 |
+
|
| 258 |
+
image_format (`"png"` or `"jpg"`, default `jpg`):
|
| 259 |
+
Used with `output_type="pil"`. Endpoint returns `jpg` or `png`.
|
| 260 |
+
|
| 261 |
+
partial_postprocess (`bool`, default `False`):
|
| 262 |
+
Used with `output_type="pt"`. `partial_postprocess=False` tensor is `float16` or `bfloat16`, without
|
| 263 |
+
denormalization. `partial_postprocess=True` tensor is `uint8`, denormalized.
|
| 264 |
+
|
| 265 |
+
input_tensor_type (`"binary"`, default `"binary"`):
|
| 266 |
+
Tensor transfer type.
|
| 267 |
+
|
| 268 |
+
output_tensor_type (`"binary"`, default `"binary"`):
|
| 269 |
+
Tensor transfer type.
|
| 270 |
+
|
| 271 |
+
height (`int`, **optional**):
|
| 272 |
+
Required for `"packed"` latents.
|
| 273 |
+
|
| 274 |
+
width (`int`, **optional**):
|
| 275 |
+
Required for `"packed"` latents.
|
| 276 |
+
|
| 277 |
+
Returns:
|
| 278 |
+
output (`Image.Image` or `List[Image.Image]` or `bytes` or `torch.Tensor`).
|
| 279 |
+
"""
|
| 280 |
+
if input_tensor_type == "base64":
|
| 281 |
+
deprecate(
|
| 282 |
+
"input_tensor_type='base64'",
|
| 283 |
+
"1.0.0",
|
| 284 |
+
"input_tensor_type='base64' is deprecated. Using `binary`.",
|
| 285 |
+
standard_warn=False,
|
| 286 |
+
)
|
| 287 |
+
input_tensor_type = "binary"
|
| 288 |
+
if output_tensor_type == "base64":
|
| 289 |
+
deprecate(
|
| 290 |
+
"output_tensor_type='base64'",
|
| 291 |
+
"1.0.0",
|
| 292 |
+
"output_tensor_type='base64' is deprecated. Using `binary`.",
|
| 293 |
+
standard_warn=False,
|
| 294 |
+
)
|
| 295 |
+
output_tensor_type = "binary"
|
| 296 |
+
check_inputs_decode(
|
| 297 |
+
endpoint,
|
| 298 |
+
tensor,
|
| 299 |
+
processor,
|
| 300 |
+
do_scaling,
|
| 301 |
+
scaling_factor,
|
| 302 |
+
shift_factor,
|
| 303 |
+
output_type,
|
| 304 |
+
return_type,
|
| 305 |
+
image_format,
|
| 306 |
+
partial_postprocess,
|
| 307 |
+
input_tensor_type,
|
| 308 |
+
output_tensor_type,
|
| 309 |
+
height,
|
| 310 |
+
width,
|
| 311 |
+
)
|
| 312 |
+
kwargs = prepare_decode(
|
| 313 |
+
tensor=tensor,
|
| 314 |
+
processor=processor,
|
| 315 |
+
do_scaling=do_scaling,
|
| 316 |
+
scaling_factor=scaling_factor,
|
| 317 |
+
shift_factor=shift_factor,
|
| 318 |
+
output_type=output_type,
|
| 319 |
+
image_format=image_format,
|
| 320 |
+
partial_postprocess=partial_postprocess,
|
| 321 |
+
height=height,
|
| 322 |
+
width=width,
|
| 323 |
+
)
|
| 324 |
+
response = requests.post(endpoint, **kwargs)
|
| 325 |
+
if not response.ok:
|
| 326 |
+
raise RuntimeError(response.json())
|
| 327 |
+
output = postprocess_decode(
|
| 328 |
+
response=response,
|
| 329 |
+
processor=processor,
|
| 330 |
+
output_type=output_type,
|
| 331 |
+
return_type=return_type,
|
| 332 |
+
partial_postprocess=partial_postprocess,
|
| 333 |
+
)
|
| 334 |
+
return output
|
| 335 |
+
|
| 336 |
+
|
| 337 |
+
def check_inputs_encode(
|
| 338 |
+
endpoint: str,
|
| 339 |
+
image: Union["torch.Tensor", Image.Image],
|
| 340 |
+
scaling_factor: Optional[float] = None,
|
| 341 |
+
shift_factor: Optional[float] = None,
|
| 342 |
+
):
|
| 343 |
+
pass
|
| 344 |
+
|
| 345 |
+
|
| 346 |
+
def postprocess_encode(
|
| 347 |
+
response: requests.Response,
|
| 348 |
+
):
|
| 349 |
+
output_tensor = response.content
|
| 350 |
+
parameters = response.headers
|
| 351 |
+
shape = json.loads(parameters["shape"])
|
| 352 |
+
dtype = parameters["dtype"]
|
| 353 |
+
torch_dtype = DTYPE_MAP[dtype]
|
| 354 |
+
output_tensor = torch.frombuffer(bytearray(output_tensor), dtype=torch_dtype).reshape(shape)
|
| 355 |
+
return output_tensor
|
| 356 |
+
|
| 357 |
+
|
| 358 |
+
def prepare_encode(
|
| 359 |
+
image: Union["torch.Tensor", Image.Image],
|
| 360 |
+
scaling_factor: Optional[float] = None,
|
| 361 |
+
shift_factor: Optional[float] = None,
|
| 362 |
+
):
|
| 363 |
+
headers = {}
|
| 364 |
+
parameters = {}
|
| 365 |
+
if scaling_factor is not None:
|
| 366 |
+
parameters["scaling_factor"] = scaling_factor
|
| 367 |
+
if shift_factor is not None:
|
| 368 |
+
parameters["shift_factor"] = shift_factor
|
| 369 |
+
if isinstance(image, torch.Tensor):
|
| 370 |
+
data = safetensors.torch._tobytes(image.contiguous(), "tensor")
|
| 371 |
+
parameters["shape"] = list(image.shape)
|
| 372 |
+
parameters["dtype"] = str(image.dtype).split(".")[-1]
|
| 373 |
+
else:
|
| 374 |
+
buffer = io.BytesIO()
|
| 375 |
+
image.save(buffer, format="PNG")
|
| 376 |
+
data = buffer.getvalue()
|
| 377 |
+
return {"data": data, "params": parameters, "headers": headers}
|
| 378 |
+
|
| 379 |
+
|
| 380 |
+
def remote_encode(
|
| 381 |
+
endpoint: str,
|
| 382 |
+
image: Union["torch.Tensor", Image.Image],
|
| 383 |
+
scaling_factor: Optional[float] = None,
|
| 384 |
+
shift_factor: Optional[float] = None,
|
| 385 |
+
) -> "torch.Tensor":
|
| 386 |
+
"""
|
| 387 |
+
Hugging Face Hybrid Inference that allow running VAE encode remotely.
|
| 388 |
+
|
| 389 |
+
Args:
|
| 390 |
+
endpoint (`str`):
|
| 391 |
+
Endpoint for Remote Decode.
|
| 392 |
+
image (`torch.Tensor` or `PIL.Image.Image`):
|
| 393 |
+
Image to be encoded.
|
| 394 |
+
scaling_factor (`float`, *optional*):
|
| 395 |
+
Scaling is applied when passed e.g. [`latents * self.vae.config.scaling_factor`].
|
| 396 |
+
- SD v1: 0.18215
|
| 397 |
+
- SD XL: 0.13025
|
| 398 |
+
- Flux: 0.3611
|
| 399 |
+
If `None`, input must be passed with scaling applied.
|
| 400 |
+
shift_factor (`float`, *optional*):
|
| 401 |
+
Shift is applied when passed e.g. `latents - self.vae.config.shift_factor`.
|
| 402 |
+
- Flux: 0.1159
|
| 403 |
+
If `None`, input must be passed with scaling applied.
|
| 404 |
+
|
| 405 |
+
Returns:
|
| 406 |
+
output (`torch.Tensor`).
|
| 407 |
+
"""
|
| 408 |
+
check_inputs_encode(
|
| 409 |
+
endpoint,
|
| 410 |
+
image,
|
| 411 |
+
scaling_factor,
|
| 412 |
+
shift_factor,
|
| 413 |
+
)
|
| 414 |
+
kwargs = prepare_encode(
|
| 415 |
+
image=image,
|
| 416 |
+
scaling_factor=scaling_factor,
|
| 417 |
+
shift_factor=shift_factor,
|
| 418 |
+
)
|
| 419 |
+
response = requests.post(endpoint, **kwargs)
|
| 420 |
+
if not response.ok:
|
| 421 |
+
raise RuntimeError(response.json())
|
| 422 |
+
output = postprocess_encode(
|
| 423 |
+
response=response,
|
| 424 |
+
)
|
| 425 |
+
return output
|
exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/utils/source_code_parsing_utils.py
ADDED
|
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import ast
|
| 2 |
+
import importlib
|
| 3 |
+
import inspect
|
| 4 |
+
import textwrap
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class ReturnNameVisitor(ast.NodeVisitor):
|
| 8 |
+
"""Thanks to ChatGPT for pairing."""
|
| 9 |
+
|
| 10 |
+
def __init__(self):
|
| 11 |
+
self.return_names = []
|
| 12 |
+
|
| 13 |
+
def visit_Return(self, node):
|
| 14 |
+
# Check if the return value is a tuple.
|
| 15 |
+
if isinstance(node.value, ast.Tuple):
|
| 16 |
+
for elt in node.value.elts:
|
| 17 |
+
if isinstance(elt, ast.Name):
|
| 18 |
+
self.return_names.append(elt.id)
|
| 19 |
+
else:
|
| 20 |
+
try:
|
| 21 |
+
self.return_names.append(ast.unparse(elt))
|
| 22 |
+
except Exception:
|
| 23 |
+
self.return_names.append(str(elt))
|
| 24 |
+
else:
|
| 25 |
+
if isinstance(node.value, ast.Name):
|
| 26 |
+
self.return_names.append(node.value.id)
|
| 27 |
+
else:
|
| 28 |
+
try:
|
| 29 |
+
self.return_names.append(ast.unparse(node.value))
|
| 30 |
+
except Exception:
|
| 31 |
+
self.return_names.append(str(node.value))
|
| 32 |
+
self.generic_visit(node)
|
| 33 |
+
|
| 34 |
+
def _determine_parent_module(self, cls):
|
| 35 |
+
from diffusers import DiffusionPipeline
|
| 36 |
+
from diffusers.models.modeling_utils import ModelMixin
|
| 37 |
+
|
| 38 |
+
if issubclass(cls, DiffusionPipeline):
|
| 39 |
+
return "pipelines"
|
| 40 |
+
elif issubclass(cls, ModelMixin):
|
| 41 |
+
return "models"
|
| 42 |
+
else:
|
| 43 |
+
raise NotImplementedError
|
| 44 |
+
|
| 45 |
+
def get_ast_tree(self, cls, attribute_name="encode_prompt"):
|
| 46 |
+
parent_module_name = self._determine_parent_module(cls)
|
| 47 |
+
main_module = importlib.import_module(f"diffusers.{parent_module_name}")
|
| 48 |
+
current_cls_module = getattr(main_module, cls.__name__)
|
| 49 |
+
source_code = inspect.getsource(getattr(current_cls_module, attribute_name))
|
| 50 |
+
source_code = textwrap.dedent(source_code)
|
| 51 |
+
tree = ast.parse(source_code)
|
| 52 |
+
return tree
|
exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/utils/state_dict_utils.py
ADDED
|
@@ -0,0 +1,366 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2025 The HuggingFace Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
"""
|
| 15 |
+
State dict utilities: utility methods for converting state dicts easily
|
| 16 |
+
"""
|
| 17 |
+
|
| 18 |
+
import enum
|
| 19 |
+
import json
|
| 20 |
+
|
| 21 |
+
from .import_utils import is_torch_available
|
| 22 |
+
from .logging import get_logger
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
if is_torch_available():
|
| 26 |
+
import torch
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
logger = get_logger(__name__)
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
class StateDictType(enum.Enum):
|
| 33 |
+
"""
|
| 34 |
+
The mode to use when converting state dicts.
|
| 35 |
+
"""
|
| 36 |
+
|
| 37 |
+
DIFFUSERS_OLD = "diffusers_old"
|
| 38 |
+
KOHYA_SS = "kohya_ss"
|
| 39 |
+
PEFT = "peft"
|
| 40 |
+
DIFFUSERS = "diffusers"
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
# We need to define a proper mapping for Unet since it uses different output keys than text encoder
|
| 44 |
+
# e.g. to_q_lora -> q_proj / to_q
|
| 45 |
+
UNET_TO_DIFFUSERS = {
|
| 46 |
+
".to_out_lora.up": ".to_out.0.lora_B",
|
| 47 |
+
".to_out_lora.down": ".to_out.0.lora_A",
|
| 48 |
+
".to_q_lora.down": ".to_q.lora_A",
|
| 49 |
+
".to_q_lora.up": ".to_q.lora_B",
|
| 50 |
+
".to_k_lora.down": ".to_k.lora_A",
|
| 51 |
+
".to_k_lora.up": ".to_k.lora_B",
|
| 52 |
+
".to_v_lora.down": ".to_v.lora_A",
|
| 53 |
+
".to_v_lora.up": ".to_v.lora_B",
|
| 54 |
+
".lora.up": ".lora_B",
|
| 55 |
+
".lora.down": ".lora_A",
|
| 56 |
+
".to_out.lora_magnitude_vector": ".to_out.0.lora_magnitude_vector",
|
| 57 |
+
}
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
DIFFUSERS_TO_PEFT = {
|
| 61 |
+
".q_proj.lora_linear_layer.up": ".q_proj.lora_B",
|
| 62 |
+
".q_proj.lora_linear_layer.down": ".q_proj.lora_A",
|
| 63 |
+
".k_proj.lora_linear_layer.up": ".k_proj.lora_B",
|
| 64 |
+
".k_proj.lora_linear_layer.down": ".k_proj.lora_A",
|
| 65 |
+
".v_proj.lora_linear_layer.up": ".v_proj.lora_B",
|
| 66 |
+
".v_proj.lora_linear_layer.down": ".v_proj.lora_A",
|
| 67 |
+
".out_proj.lora_linear_layer.up": ".out_proj.lora_B",
|
| 68 |
+
".out_proj.lora_linear_layer.down": ".out_proj.lora_A",
|
| 69 |
+
".lora_linear_layer.up": ".lora_B",
|
| 70 |
+
".lora_linear_layer.down": ".lora_A",
|
| 71 |
+
"text_projection.lora.down.weight": "text_projection.lora_A.weight",
|
| 72 |
+
"text_projection.lora.up.weight": "text_projection.lora_B.weight",
|
| 73 |
+
}
|
| 74 |
+
|
| 75 |
+
DIFFUSERS_OLD_TO_PEFT = {
|
| 76 |
+
".to_q_lora.up": ".q_proj.lora_B",
|
| 77 |
+
".to_q_lora.down": ".q_proj.lora_A",
|
| 78 |
+
".to_k_lora.up": ".k_proj.lora_B",
|
| 79 |
+
".to_k_lora.down": ".k_proj.lora_A",
|
| 80 |
+
".to_v_lora.up": ".v_proj.lora_B",
|
| 81 |
+
".to_v_lora.down": ".v_proj.lora_A",
|
| 82 |
+
".to_out_lora.up": ".out_proj.lora_B",
|
| 83 |
+
".to_out_lora.down": ".out_proj.lora_A",
|
| 84 |
+
".lora_linear_layer.up": ".lora_B",
|
| 85 |
+
".lora_linear_layer.down": ".lora_A",
|
| 86 |
+
}
|
| 87 |
+
|
| 88 |
+
PEFT_TO_DIFFUSERS = {
|
| 89 |
+
".q_proj.lora_B": ".q_proj.lora_linear_layer.up",
|
| 90 |
+
".q_proj.lora_A": ".q_proj.lora_linear_layer.down",
|
| 91 |
+
".k_proj.lora_B": ".k_proj.lora_linear_layer.up",
|
| 92 |
+
".k_proj.lora_A": ".k_proj.lora_linear_layer.down",
|
| 93 |
+
".v_proj.lora_B": ".v_proj.lora_linear_layer.up",
|
| 94 |
+
".v_proj.lora_A": ".v_proj.lora_linear_layer.down",
|
| 95 |
+
".out_proj.lora_B": ".out_proj.lora_linear_layer.up",
|
| 96 |
+
".out_proj.lora_A": ".out_proj.lora_linear_layer.down",
|
| 97 |
+
"to_k.lora_A": "to_k.lora.down",
|
| 98 |
+
"to_k.lora_B": "to_k.lora.up",
|
| 99 |
+
"to_q.lora_A": "to_q.lora.down",
|
| 100 |
+
"to_q.lora_B": "to_q.lora.up",
|
| 101 |
+
"to_v.lora_A": "to_v.lora.down",
|
| 102 |
+
"to_v.lora_B": "to_v.lora.up",
|
| 103 |
+
"to_out.0.lora_A": "to_out.0.lora.down",
|
| 104 |
+
"to_out.0.lora_B": "to_out.0.lora.up",
|
| 105 |
+
}
|
| 106 |
+
|
| 107 |
+
DIFFUSERS_OLD_TO_DIFFUSERS = {
|
| 108 |
+
".to_q_lora.up": ".q_proj.lora_linear_layer.up",
|
| 109 |
+
".to_q_lora.down": ".q_proj.lora_linear_layer.down",
|
| 110 |
+
".to_k_lora.up": ".k_proj.lora_linear_layer.up",
|
| 111 |
+
".to_k_lora.down": ".k_proj.lora_linear_layer.down",
|
| 112 |
+
".to_v_lora.up": ".v_proj.lora_linear_layer.up",
|
| 113 |
+
".to_v_lora.down": ".v_proj.lora_linear_layer.down",
|
| 114 |
+
".to_out_lora.up": ".out_proj.lora_linear_layer.up",
|
| 115 |
+
".to_out_lora.down": ".out_proj.lora_linear_layer.down",
|
| 116 |
+
".to_k.lora_magnitude_vector": ".k_proj.lora_magnitude_vector",
|
| 117 |
+
".to_v.lora_magnitude_vector": ".v_proj.lora_magnitude_vector",
|
| 118 |
+
".to_q.lora_magnitude_vector": ".q_proj.lora_magnitude_vector",
|
| 119 |
+
".to_out.lora_magnitude_vector": ".out_proj.lora_magnitude_vector",
|
| 120 |
+
}
|
| 121 |
+
|
| 122 |
+
PEFT_TO_KOHYA_SS = {
|
| 123 |
+
"lora_A": "lora_down",
|
| 124 |
+
"lora_B": "lora_up",
|
| 125 |
+
# This is not a comprehensive dict as kohya format requires replacing `.` with `_` in keys,
|
| 126 |
+
# adding prefixes and adding alpha values
|
| 127 |
+
# Check `convert_state_dict_to_kohya` for more
|
| 128 |
+
}
|
| 129 |
+
|
| 130 |
+
PEFT_STATE_DICT_MAPPINGS = {
|
| 131 |
+
StateDictType.DIFFUSERS_OLD: DIFFUSERS_OLD_TO_PEFT,
|
| 132 |
+
StateDictType.DIFFUSERS: DIFFUSERS_TO_PEFT,
|
| 133 |
+
}
|
| 134 |
+
|
| 135 |
+
DIFFUSERS_STATE_DICT_MAPPINGS = {
|
| 136 |
+
StateDictType.DIFFUSERS_OLD: DIFFUSERS_OLD_TO_DIFFUSERS,
|
| 137 |
+
StateDictType.PEFT: PEFT_TO_DIFFUSERS,
|
| 138 |
+
}
|
| 139 |
+
|
| 140 |
+
KOHYA_STATE_DICT_MAPPINGS = {StateDictType.PEFT: PEFT_TO_KOHYA_SS}
|
| 141 |
+
|
| 142 |
+
KEYS_TO_ALWAYS_REPLACE = {
|
| 143 |
+
".processor.": ".",
|
| 144 |
+
}
|
| 145 |
+
|
| 146 |
+
|
| 147 |
+
def convert_state_dict(state_dict, mapping):
|
| 148 |
+
r"""
|
| 149 |
+
Simply iterates over the state dict and replaces the patterns in `mapping` with the corresponding values.
|
| 150 |
+
|
| 151 |
+
Args:
|
| 152 |
+
state_dict (`dict[str, torch.Tensor]`):
|
| 153 |
+
The state dict to convert.
|
| 154 |
+
mapping (`dict[str, str]`):
|
| 155 |
+
The mapping to use for conversion, the mapping should be a dictionary with the following structure:
|
| 156 |
+
- key: the pattern to replace
|
| 157 |
+
- value: the pattern to replace with
|
| 158 |
+
|
| 159 |
+
Returns:
|
| 160 |
+
converted_state_dict (`dict`)
|
| 161 |
+
The converted state dict.
|
| 162 |
+
"""
|
| 163 |
+
converted_state_dict = {}
|
| 164 |
+
for k, v in state_dict.items():
|
| 165 |
+
# First, filter out the keys that we always want to replace
|
| 166 |
+
for pattern in KEYS_TO_ALWAYS_REPLACE.keys():
|
| 167 |
+
if pattern in k:
|
| 168 |
+
new_pattern = KEYS_TO_ALWAYS_REPLACE[pattern]
|
| 169 |
+
k = k.replace(pattern, new_pattern)
|
| 170 |
+
|
| 171 |
+
for pattern in mapping.keys():
|
| 172 |
+
if pattern in k:
|
| 173 |
+
new_pattern = mapping[pattern]
|
| 174 |
+
k = k.replace(pattern, new_pattern)
|
| 175 |
+
break
|
| 176 |
+
converted_state_dict[k] = v
|
| 177 |
+
return converted_state_dict
|
| 178 |
+
|
| 179 |
+
|
| 180 |
+
def convert_state_dict_to_peft(state_dict, original_type=None, **kwargs):
|
| 181 |
+
r"""
|
| 182 |
+
Converts a state dict to the PEFT format The state dict can be from previous diffusers format (`OLD_DIFFUSERS`), or
|
| 183 |
+
new diffusers format (`DIFFUSERS`). The method only supports the conversion from diffusers old/new to PEFT for now.
|
| 184 |
+
|
| 185 |
+
Args:
|
| 186 |
+
state_dict (`dict[str, torch.Tensor]`):
|
| 187 |
+
The state dict to convert.
|
| 188 |
+
original_type (`StateDictType`, *optional*):
|
| 189 |
+
The original type of the state dict, if not provided, the method will try to infer it automatically.
|
| 190 |
+
"""
|
| 191 |
+
if original_type is None:
|
| 192 |
+
# Old diffusers to PEFT
|
| 193 |
+
if any("to_out_lora" in k for k in state_dict.keys()):
|
| 194 |
+
original_type = StateDictType.DIFFUSERS_OLD
|
| 195 |
+
elif any("lora_linear_layer" in k for k in state_dict.keys()):
|
| 196 |
+
original_type = StateDictType.DIFFUSERS
|
| 197 |
+
else:
|
| 198 |
+
raise ValueError("Could not automatically infer state dict type")
|
| 199 |
+
|
| 200 |
+
if original_type not in PEFT_STATE_DICT_MAPPINGS.keys():
|
| 201 |
+
raise ValueError(f"Original type {original_type} is not supported")
|
| 202 |
+
|
| 203 |
+
mapping = PEFT_STATE_DICT_MAPPINGS[original_type]
|
| 204 |
+
return convert_state_dict(state_dict, mapping)
|
| 205 |
+
|
| 206 |
+
|
| 207 |
+
def convert_state_dict_to_diffusers(state_dict, original_type=None, **kwargs):
|
| 208 |
+
r"""
|
| 209 |
+
Converts a state dict to new diffusers format. The state dict can be from previous diffusers format
|
| 210 |
+
(`OLD_DIFFUSERS`), or PEFT format (`PEFT`) or new diffusers format (`DIFFUSERS`). In the last case the method will
|
| 211 |
+
return the state dict as is.
|
| 212 |
+
|
| 213 |
+
The method only supports the conversion from diffusers old, PEFT to diffusers new for now.
|
| 214 |
+
|
| 215 |
+
Args:
|
| 216 |
+
state_dict (`dict[str, torch.Tensor]`):
|
| 217 |
+
The state dict to convert.
|
| 218 |
+
original_type (`StateDictType`, *optional*):
|
| 219 |
+
The original type of the state dict, if not provided, the method will try to infer it automatically.
|
| 220 |
+
kwargs (`dict`, *args*):
|
| 221 |
+
Additional arguments to pass to the method.
|
| 222 |
+
|
| 223 |
+
- **adapter_name**: For example, in case of PEFT, some keys will be prepended
|
| 224 |
+
with the adapter name, therefore needs a special handling. By default PEFT also takes care of that in
|
| 225 |
+
`get_peft_model_state_dict` method:
|
| 226 |
+
https://github.com/huggingface/peft/blob/ba0477f2985b1ba311b83459d29895c809404e99/src/peft/utils/save_and_load.py#L92
|
| 227 |
+
but we add it here in case we don't want to rely on that method.
|
| 228 |
+
"""
|
| 229 |
+
peft_adapter_name = kwargs.pop("adapter_name", None)
|
| 230 |
+
if peft_adapter_name is not None:
|
| 231 |
+
peft_adapter_name = "." + peft_adapter_name
|
| 232 |
+
else:
|
| 233 |
+
peft_adapter_name = ""
|
| 234 |
+
|
| 235 |
+
if original_type is None:
|
| 236 |
+
# Old diffusers to PEFT
|
| 237 |
+
if any("to_out_lora" in k for k in state_dict.keys()):
|
| 238 |
+
original_type = StateDictType.DIFFUSERS_OLD
|
| 239 |
+
elif any(f".lora_A{peft_adapter_name}.weight" in k for k in state_dict.keys()):
|
| 240 |
+
original_type = StateDictType.PEFT
|
| 241 |
+
elif any("lora_linear_layer" in k for k in state_dict.keys()):
|
| 242 |
+
# nothing to do
|
| 243 |
+
return state_dict
|
| 244 |
+
else:
|
| 245 |
+
raise ValueError("Could not automatically infer state dict type")
|
| 246 |
+
|
| 247 |
+
if original_type not in DIFFUSERS_STATE_DICT_MAPPINGS.keys():
|
| 248 |
+
raise ValueError(f"Original type {original_type} is not supported")
|
| 249 |
+
|
| 250 |
+
mapping = DIFFUSERS_STATE_DICT_MAPPINGS[original_type]
|
| 251 |
+
return convert_state_dict(state_dict, mapping)
|
| 252 |
+
|
| 253 |
+
|
| 254 |
+
def convert_unet_state_dict_to_peft(state_dict):
|
| 255 |
+
r"""
|
| 256 |
+
Converts a state dict from UNet format to diffusers format - i.e. by removing some keys
|
| 257 |
+
"""
|
| 258 |
+
mapping = UNET_TO_DIFFUSERS
|
| 259 |
+
return convert_state_dict(state_dict, mapping)
|
| 260 |
+
|
| 261 |
+
|
| 262 |
+
def convert_all_state_dict_to_peft(state_dict):
|
| 263 |
+
r"""
|
| 264 |
+
Attempts to first `convert_state_dict_to_peft`, and if it doesn't detect `lora_linear_layer` for a valid
|
| 265 |
+
`DIFFUSERS` LoRA for example, attempts to exclusively convert the Unet `convert_unet_state_dict_to_peft`
|
| 266 |
+
"""
|
| 267 |
+
try:
|
| 268 |
+
peft_dict = convert_state_dict_to_peft(state_dict)
|
| 269 |
+
except Exception as e:
|
| 270 |
+
if str(e) == "Could not automatically infer state dict type":
|
| 271 |
+
peft_dict = convert_unet_state_dict_to_peft(state_dict)
|
| 272 |
+
else:
|
| 273 |
+
raise
|
| 274 |
+
|
| 275 |
+
if not any("lora_A" in key or "lora_B" in key for key in peft_dict.keys()):
|
| 276 |
+
raise ValueError("Your LoRA was not converted to PEFT")
|
| 277 |
+
|
| 278 |
+
return peft_dict
|
| 279 |
+
|
| 280 |
+
|
| 281 |
+
def convert_state_dict_to_kohya(state_dict, original_type=None, **kwargs):
|
| 282 |
+
r"""
|
| 283 |
+
Converts a `PEFT` state dict to `Kohya` format that can be used in AUTOMATIC1111, ComfyUI, SD.Next, InvokeAI, etc.
|
| 284 |
+
The method only supports the conversion from PEFT to Kohya for now.
|
| 285 |
+
|
| 286 |
+
Args:
|
| 287 |
+
state_dict (`dict[str, torch.Tensor]`):
|
| 288 |
+
The state dict to convert.
|
| 289 |
+
original_type (`StateDictType`, *optional*):
|
| 290 |
+
The original type of the state dict, if not provided, the method will try to infer it automatically.
|
| 291 |
+
kwargs (`dict`, *args*):
|
| 292 |
+
Additional arguments to pass to the method.
|
| 293 |
+
|
| 294 |
+
- **adapter_name**: For example, in case of PEFT, some keys will be prepended
|
| 295 |
+
with the adapter name, therefore needs a special handling. By default PEFT also takes care of that in
|
| 296 |
+
`get_peft_model_state_dict` method:
|
| 297 |
+
https://github.com/huggingface/peft/blob/ba0477f2985b1ba311b83459d29895c809404e99/src/peft/utils/save_and_load.py#L92
|
| 298 |
+
but we add it here in case we don't want to rely on that method.
|
| 299 |
+
"""
|
| 300 |
+
try:
|
| 301 |
+
import torch
|
| 302 |
+
except ImportError:
|
| 303 |
+
logger.error("Converting PEFT state dicts to Kohya requires torch to be installed.")
|
| 304 |
+
raise
|
| 305 |
+
|
| 306 |
+
peft_adapter_name = kwargs.pop("adapter_name", None)
|
| 307 |
+
if peft_adapter_name is not None:
|
| 308 |
+
peft_adapter_name = "." + peft_adapter_name
|
| 309 |
+
else:
|
| 310 |
+
peft_adapter_name = ""
|
| 311 |
+
|
| 312 |
+
if original_type is None:
|
| 313 |
+
if any(f".lora_A{peft_adapter_name}.weight" in k for k in state_dict.keys()):
|
| 314 |
+
original_type = StateDictType.PEFT
|
| 315 |
+
|
| 316 |
+
if original_type not in KOHYA_STATE_DICT_MAPPINGS.keys():
|
| 317 |
+
raise ValueError(f"Original type {original_type} is not supported")
|
| 318 |
+
|
| 319 |
+
# Use the convert_state_dict function with the appropriate mapping
|
| 320 |
+
kohya_ss_partial_state_dict = convert_state_dict(state_dict, KOHYA_STATE_DICT_MAPPINGS[StateDictType.PEFT])
|
| 321 |
+
kohya_ss_state_dict = {}
|
| 322 |
+
|
| 323 |
+
# Additional logic for replacing header, alpha parameters `.` with `_` in all keys
|
| 324 |
+
for kohya_key, weight in kohya_ss_partial_state_dict.items():
|
| 325 |
+
if "text_encoder_2." in kohya_key:
|
| 326 |
+
kohya_key = kohya_key.replace("text_encoder_2.", "lora_te2.")
|
| 327 |
+
elif "text_encoder." in kohya_key:
|
| 328 |
+
kohya_key = kohya_key.replace("text_encoder.", "lora_te1.")
|
| 329 |
+
elif "unet" in kohya_key:
|
| 330 |
+
kohya_key = kohya_key.replace("unet", "lora_unet")
|
| 331 |
+
elif "lora_magnitude_vector" in kohya_key:
|
| 332 |
+
kohya_key = kohya_key.replace("lora_magnitude_vector", "dora_scale")
|
| 333 |
+
|
| 334 |
+
kohya_key = kohya_key.replace(".", "_", kohya_key.count(".") - 2)
|
| 335 |
+
kohya_key = kohya_key.replace(peft_adapter_name, "") # Kohya doesn't take names
|
| 336 |
+
kohya_ss_state_dict[kohya_key] = weight
|
| 337 |
+
if "lora_down" in kohya_key:
|
| 338 |
+
alpha_key = f"{kohya_key.split('.')[0]}.alpha"
|
| 339 |
+
kohya_ss_state_dict[alpha_key] = torch.tensor(len(weight))
|
| 340 |
+
|
| 341 |
+
return kohya_ss_state_dict
|
| 342 |
+
|
| 343 |
+
|
| 344 |
+
def state_dict_all_zero(state_dict, filter_str=None):
|
| 345 |
+
if filter_str is not None:
|
| 346 |
+
if isinstance(filter_str, str):
|
| 347 |
+
filter_str = [filter_str]
|
| 348 |
+
state_dict = {k: v for k, v in state_dict.items() if any(f in k for f in filter_str)}
|
| 349 |
+
|
| 350 |
+
return all(torch.all(param == 0).item() for param in state_dict.values())
|
| 351 |
+
|
| 352 |
+
|
| 353 |
+
def _load_sft_state_dict_metadata(model_file: str):
|
| 354 |
+
import safetensors.torch
|
| 355 |
+
|
| 356 |
+
from ..loaders.lora_base import LORA_ADAPTER_METADATA_KEY
|
| 357 |
+
|
| 358 |
+
with safetensors.torch.safe_open(model_file, framework="pt", device="cpu") as f:
|
| 359 |
+
metadata = f.metadata() or {}
|
| 360 |
+
|
| 361 |
+
metadata.pop("format", None)
|
| 362 |
+
if metadata:
|
| 363 |
+
raw = metadata.get(LORA_ADAPTER_METADATA_KEY)
|
| 364 |
+
return json.loads(raw) if raw else None
|
| 365 |
+
else:
|
| 366 |
+
return None
|
exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/utils/testing_utils.py
ADDED
|
@@ -0,0 +1,1601 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import functools
|
| 2 |
+
import glob
|
| 3 |
+
import importlib
|
| 4 |
+
import importlib.metadata
|
| 5 |
+
import inspect
|
| 6 |
+
import io
|
| 7 |
+
import logging
|
| 8 |
+
import multiprocessing
|
| 9 |
+
import os
|
| 10 |
+
import random
|
| 11 |
+
import re
|
| 12 |
+
import struct
|
| 13 |
+
import sys
|
| 14 |
+
import tempfile
|
| 15 |
+
import time
|
| 16 |
+
import unittest
|
| 17 |
+
import urllib.parse
|
| 18 |
+
from collections import UserDict
|
| 19 |
+
from contextlib import contextmanager
|
| 20 |
+
from io import BytesIO, StringIO
|
| 21 |
+
from pathlib import Path
|
| 22 |
+
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Set, Tuple, Union
|
| 23 |
+
|
| 24 |
+
import numpy as np
|
| 25 |
+
import PIL.Image
|
| 26 |
+
import PIL.ImageOps
|
| 27 |
+
import requests
|
| 28 |
+
from numpy.linalg import norm
|
| 29 |
+
from packaging import version
|
| 30 |
+
|
| 31 |
+
from .constants import DIFFUSERS_REQUEST_TIMEOUT
|
| 32 |
+
from .import_utils import (
|
| 33 |
+
BACKENDS_MAPPING,
|
| 34 |
+
is_accelerate_available,
|
| 35 |
+
is_bitsandbytes_available,
|
| 36 |
+
is_compel_available,
|
| 37 |
+
is_flax_available,
|
| 38 |
+
is_gguf_available,
|
| 39 |
+
is_kernels_available,
|
| 40 |
+
is_note_seq_available,
|
| 41 |
+
is_onnx_available,
|
| 42 |
+
is_opencv_available,
|
| 43 |
+
is_optimum_quanto_available,
|
| 44 |
+
is_peft_available,
|
| 45 |
+
is_timm_available,
|
| 46 |
+
is_torch_available,
|
| 47 |
+
is_torch_version,
|
| 48 |
+
is_torchao_available,
|
| 49 |
+
is_torchsde_available,
|
| 50 |
+
is_transformers_available,
|
| 51 |
+
)
|
| 52 |
+
from .logging import get_logger
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
if is_torch_available():
|
| 56 |
+
import torch
|
| 57 |
+
|
| 58 |
+
IS_ROCM_SYSTEM = torch.version.hip is not None
|
| 59 |
+
IS_CUDA_SYSTEM = torch.version.cuda is not None
|
| 60 |
+
IS_XPU_SYSTEM = getattr(torch.version, "xpu", None) is not None
|
| 61 |
+
else:
|
| 62 |
+
IS_ROCM_SYSTEM = False
|
| 63 |
+
IS_CUDA_SYSTEM = False
|
| 64 |
+
IS_XPU_SYSTEM = False
|
| 65 |
+
|
| 66 |
+
global_rng = random.Random()
|
| 67 |
+
|
| 68 |
+
logger = get_logger(__name__)
|
| 69 |
+
logger.warning(
|
| 70 |
+
"diffusers.utils.testing_utils' is deprecated and will be removed in a future version. "
|
| 71 |
+
"Determinism and device backend utilities have been moved to `diffusers.utils.torch_utils`. "
|
| 72 |
+
)
|
| 73 |
+
_required_peft_version = is_peft_available() and version.parse(
|
| 74 |
+
version.parse(importlib.metadata.version("peft")).base_version
|
| 75 |
+
) > version.parse("0.5")
|
| 76 |
+
_required_transformers_version = is_transformers_available() and version.parse(
|
| 77 |
+
version.parse(importlib.metadata.version("transformers")).base_version
|
| 78 |
+
) > version.parse("4.33")
|
| 79 |
+
|
| 80 |
+
USE_PEFT_BACKEND = _required_peft_version and _required_transformers_version
|
| 81 |
+
BIG_GPU_MEMORY = int(os.getenv("BIG_GPU_MEMORY", 40))
|
| 82 |
+
|
| 83 |
+
if is_torch_available():
|
| 84 |
+
import torch
|
| 85 |
+
|
| 86 |
+
# Set a backend environment variable for any extra module import required for a custom accelerator
|
| 87 |
+
if "DIFFUSERS_TEST_BACKEND" in os.environ:
|
| 88 |
+
backend = os.environ["DIFFUSERS_TEST_BACKEND"]
|
| 89 |
+
try:
|
| 90 |
+
_ = importlib.import_module(backend)
|
| 91 |
+
except ModuleNotFoundError as e:
|
| 92 |
+
raise ModuleNotFoundError(
|
| 93 |
+
f"Failed to import `DIFFUSERS_TEST_BACKEND` '{backend}'! This should be the name of an installed module \
|
| 94 |
+
to enable a specified backend.):\n{e}"
|
| 95 |
+
) from e
|
| 96 |
+
|
| 97 |
+
if "DIFFUSERS_TEST_DEVICE" in os.environ:
|
| 98 |
+
torch_device = os.environ["DIFFUSERS_TEST_DEVICE"]
|
| 99 |
+
try:
|
| 100 |
+
# try creating device to see if provided device is valid
|
| 101 |
+
_ = torch.device(torch_device)
|
| 102 |
+
except RuntimeError as e:
|
| 103 |
+
raise RuntimeError(
|
| 104 |
+
f"Unknown testing device specified by environment variable `DIFFUSERS_TEST_DEVICE`: {torch_device}"
|
| 105 |
+
) from e
|
| 106 |
+
logger.info(f"torch_device overrode to {torch_device}")
|
| 107 |
+
else:
|
| 108 |
+
if torch.cuda.is_available():
|
| 109 |
+
torch_device = "cuda"
|
| 110 |
+
elif torch.xpu.is_available():
|
| 111 |
+
torch_device = "xpu"
|
| 112 |
+
else:
|
| 113 |
+
torch_device = "cpu"
|
| 114 |
+
is_torch_higher_equal_than_1_12 = version.parse(
|
| 115 |
+
version.parse(torch.__version__).base_version
|
| 116 |
+
) >= version.parse("1.12")
|
| 117 |
+
|
| 118 |
+
if is_torch_higher_equal_than_1_12:
|
| 119 |
+
# Some builds of torch 1.12 don't have the mps backend registered. See #892 for more details
|
| 120 |
+
mps_backend_registered = hasattr(torch.backends, "mps")
|
| 121 |
+
torch_device = "mps" if (mps_backend_registered and torch.backends.mps.is_available()) else torch_device
|
| 122 |
+
|
| 123 |
+
from .torch_utils import get_torch_cuda_device_capability
|
| 124 |
+
|
| 125 |
+
|
| 126 |
+
def torch_all_close(a, b, *args, **kwargs):
|
| 127 |
+
if not is_torch_available():
|
| 128 |
+
raise ValueError("PyTorch needs to be installed to use this function.")
|
| 129 |
+
if not torch.allclose(a, b, *args, **kwargs):
|
| 130 |
+
assert False, f"Max diff is absolute {(a - b).abs().max()}. Diff tensor is {(a - b).abs()}."
|
| 131 |
+
return True
|
| 132 |
+
|
| 133 |
+
|
| 134 |
+
def numpy_cosine_similarity_distance(a, b):
|
| 135 |
+
similarity = np.dot(a, b) / (norm(a) * norm(b))
|
| 136 |
+
distance = 1.0 - similarity.mean()
|
| 137 |
+
|
| 138 |
+
return distance
|
| 139 |
+
|
| 140 |
+
|
| 141 |
+
def check_if_dicts_are_equal(dict1, dict2):
|
| 142 |
+
dict1, dict2 = dict1.copy(), dict2.copy()
|
| 143 |
+
|
| 144 |
+
for key, value in dict1.items():
|
| 145 |
+
if isinstance(value, set):
|
| 146 |
+
dict1[key] = sorted(value)
|
| 147 |
+
for key, value in dict2.items():
|
| 148 |
+
if isinstance(value, set):
|
| 149 |
+
dict2[key] = sorted(value)
|
| 150 |
+
|
| 151 |
+
for key in dict1:
|
| 152 |
+
if key not in dict2:
|
| 153 |
+
return False
|
| 154 |
+
if dict1[key] != dict2[key]:
|
| 155 |
+
return False
|
| 156 |
+
|
| 157 |
+
for key in dict2:
|
| 158 |
+
if key not in dict1:
|
| 159 |
+
return False
|
| 160 |
+
|
| 161 |
+
return True
|
| 162 |
+
|
| 163 |
+
|
| 164 |
+
def print_tensor_test(
|
| 165 |
+
tensor,
|
| 166 |
+
limit_to_slices=None,
|
| 167 |
+
max_torch_print=None,
|
| 168 |
+
filename="test_corrections.txt",
|
| 169 |
+
expected_tensor_name="expected_slice",
|
| 170 |
+
):
|
| 171 |
+
if max_torch_print:
|
| 172 |
+
torch.set_printoptions(threshold=10_000)
|
| 173 |
+
|
| 174 |
+
test_name = os.environ.get("PYTEST_CURRENT_TEST")
|
| 175 |
+
if not torch.is_tensor(tensor):
|
| 176 |
+
tensor = torch.from_numpy(tensor)
|
| 177 |
+
if limit_to_slices:
|
| 178 |
+
tensor = tensor[0, -3:, -3:, -1]
|
| 179 |
+
|
| 180 |
+
tensor_str = str(tensor.detach().cpu().flatten().to(torch.float32)).replace("\n", "")
|
| 181 |
+
# format is usually:
|
| 182 |
+
# expected_slice = np.array([-0.5713, -0.3018, -0.9814, 0.04663, -0.879, 0.76, -1.734, 0.1044, 1.161])
|
| 183 |
+
output_str = tensor_str.replace("tensor", f"{expected_tensor_name} = np.array")
|
| 184 |
+
test_file, test_class, test_fn = test_name.split("::")
|
| 185 |
+
test_fn = test_fn.split()[0]
|
| 186 |
+
with open(filename, "a") as f:
|
| 187 |
+
print("::".join([test_file, test_class, test_fn, output_str]), file=f)
|
| 188 |
+
|
| 189 |
+
|
| 190 |
+
def get_tests_dir(append_path=None):
|
| 191 |
+
"""
|
| 192 |
+
Args:
|
| 193 |
+
append_path: optional path to append to the tests dir path
|
| 194 |
+
Return:
|
| 195 |
+
The full path to the `tests` dir, so that the tests can be invoked from anywhere. Optionally `append_path` is
|
| 196 |
+
joined after the `tests` dir the former is provided.
|
| 197 |
+
"""
|
| 198 |
+
# this function caller's __file__
|
| 199 |
+
caller__file__ = inspect.stack()[1][1]
|
| 200 |
+
tests_dir = os.path.abspath(os.path.dirname(caller__file__))
|
| 201 |
+
|
| 202 |
+
while not tests_dir.endswith("tests"):
|
| 203 |
+
tests_dir = os.path.dirname(tests_dir)
|
| 204 |
+
|
| 205 |
+
if append_path:
|
| 206 |
+
return Path(tests_dir, append_path).as_posix()
|
| 207 |
+
else:
|
| 208 |
+
return tests_dir
|
| 209 |
+
|
| 210 |
+
|
| 211 |
+
# Taken from the following PR:
|
| 212 |
+
# https://github.com/huggingface/accelerate/pull/1964
|
| 213 |
+
def str_to_bool(value) -> int:
|
| 214 |
+
"""
|
| 215 |
+
Converts a string representation of truth to `True` (1) or `False` (0). True values are `y`, `yes`, `t`, `true`,
|
| 216 |
+
`on`, and `1`; False value are `n`, `no`, `f`, `false`, `off`, and `0`;
|
| 217 |
+
"""
|
| 218 |
+
value = value.lower()
|
| 219 |
+
if value in ("y", "yes", "t", "true", "on", "1"):
|
| 220 |
+
return 1
|
| 221 |
+
elif value in ("n", "no", "f", "false", "off", "0"):
|
| 222 |
+
return 0
|
| 223 |
+
else:
|
| 224 |
+
raise ValueError(f"invalid truth value {value}")
|
| 225 |
+
|
| 226 |
+
|
| 227 |
+
def parse_flag_from_env(key, default=False):
|
| 228 |
+
try:
|
| 229 |
+
value = os.environ[key]
|
| 230 |
+
except KeyError:
|
| 231 |
+
# KEY isn't set, default to `default`.
|
| 232 |
+
_value = default
|
| 233 |
+
else:
|
| 234 |
+
# KEY is set, convert it to True or False.
|
| 235 |
+
try:
|
| 236 |
+
_value = str_to_bool(value)
|
| 237 |
+
except ValueError:
|
| 238 |
+
# More values are supported, but let's keep the message simple.
|
| 239 |
+
raise ValueError(f"If set, {key} must be yes or no.")
|
| 240 |
+
return _value
|
| 241 |
+
|
| 242 |
+
|
| 243 |
+
_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False)
|
| 244 |
+
_run_nightly_tests = parse_flag_from_env("RUN_NIGHTLY", default=False)
|
| 245 |
+
_run_compile_tests = parse_flag_from_env("RUN_COMPILE", default=False)
|
| 246 |
+
|
| 247 |
+
|
| 248 |
+
def floats_tensor(shape, scale=1.0, rng=None, name=None):
|
| 249 |
+
"""Creates a random float32 tensor"""
|
| 250 |
+
if rng is None:
|
| 251 |
+
rng = global_rng
|
| 252 |
+
|
| 253 |
+
total_dims = 1
|
| 254 |
+
for dim in shape:
|
| 255 |
+
total_dims *= dim
|
| 256 |
+
|
| 257 |
+
values = []
|
| 258 |
+
for _ in range(total_dims):
|
| 259 |
+
values.append(rng.random() * scale)
|
| 260 |
+
|
| 261 |
+
return torch.tensor(data=values, dtype=torch.float).view(shape).contiguous()
|
| 262 |
+
|
| 263 |
+
|
| 264 |
+
def slow(test_case):
|
| 265 |
+
"""
|
| 266 |
+
Decorator marking a test as slow.
|
| 267 |
+
|
| 268 |
+
Slow tests are skipped by default. Set the RUN_SLOW environment variable to a truthy value to run them.
|
| 269 |
+
|
| 270 |
+
"""
|
| 271 |
+
return unittest.skipUnless(_run_slow_tests, "test is slow")(test_case)
|
| 272 |
+
|
| 273 |
+
|
| 274 |
+
def nightly(test_case):
|
| 275 |
+
"""
|
| 276 |
+
Decorator marking a test that runs nightly in the diffusers CI.
|
| 277 |
+
|
| 278 |
+
Slow tests are skipped by default. Set the RUN_NIGHTLY environment variable to a truthy value to run them.
|
| 279 |
+
|
| 280 |
+
"""
|
| 281 |
+
return unittest.skipUnless(_run_nightly_tests, "test is nightly")(test_case)
|
| 282 |
+
|
| 283 |
+
|
| 284 |
+
def is_torch_compile(test_case):
|
| 285 |
+
"""
|
| 286 |
+
Decorator marking a test that runs compile tests in the diffusers CI.
|
| 287 |
+
|
| 288 |
+
Compile tests are skipped by default. Set the RUN_COMPILE environment variable to a truthy value to run them.
|
| 289 |
+
|
| 290 |
+
"""
|
| 291 |
+
return unittest.skipUnless(_run_compile_tests, "test is torch compile")(test_case)
|
| 292 |
+
|
| 293 |
+
|
| 294 |
+
def require_torch(test_case):
|
| 295 |
+
"""
|
| 296 |
+
Decorator marking a test that requires PyTorch. These tests are skipped when PyTorch isn't installed.
|
| 297 |
+
"""
|
| 298 |
+
return unittest.skipUnless(is_torch_available(), "test requires PyTorch")(test_case)
|
| 299 |
+
|
| 300 |
+
|
| 301 |
+
def require_torch_2(test_case):
|
| 302 |
+
"""
|
| 303 |
+
Decorator marking a test that requires PyTorch 2. These tests are skipped when it isn't installed.
|
| 304 |
+
"""
|
| 305 |
+
return unittest.skipUnless(is_torch_available() and is_torch_version(">=", "2.0.0"), "test requires PyTorch 2")(
|
| 306 |
+
test_case
|
| 307 |
+
)
|
| 308 |
+
|
| 309 |
+
|
| 310 |
+
def require_torch_version_greater_equal(torch_version):
|
| 311 |
+
"""Decorator marking a test that requires torch with a specific version or greater."""
|
| 312 |
+
|
| 313 |
+
def decorator(test_case):
|
| 314 |
+
correct_torch_version = is_torch_available() and is_torch_version(">=", torch_version)
|
| 315 |
+
return unittest.skipUnless(
|
| 316 |
+
correct_torch_version, f"test requires torch with the version greater than or equal to {torch_version}"
|
| 317 |
+
)(test_case)
|
| 318 |
+
|
| 319 |
+
return decorator
|
| 320 |
+
|
| 321 |
+
|
| 322 |
+
def require_torch_version_greater(torch_version):
|
| 323 |
+
"""Decorator marking a test that requires torch with a specific version greater."""
|
| 324 |
+
|
| 325 |
+
def decorator(test_case):
|
| 326 |
+
correct_torch_version = is_torch_available() and is_torch_version(">", torch_version)
|
| 327 |
+
return unittest.skipUnless(
|
| 328 |
+
correct_torch_version, f"test requires torch with the version greater than {torch_version}"
|
| 329 |
+
)(test_case)
|
| 330 |
+
|
| 331 |
+
return decorator
|
| 332 |
+
|
| 333 |
+
|
| 334 |
+
def require_torch_gpu(test_case):
|
| 335 |
+
"""Decorator marking a test that requires CUDA and PyTorch."""
|
| 336 |
+
return unittest.skipUnless(is_torch_available() and torch_device == "cuda", "test requires PyTorch+CUDA")(
|
| 337 |
+
test_case
|
| 338 |
+
)
|
| 339 |
+
|
| 340 |
+
|
| 341 |
+
def require_torch_cuda_compatibility(expected_compute_capability):
|
| 342 |
+
def decorator(test_case):
|
| 343 |
+
if torch.cuda.is_available():
|
| 344 |
+
current_compute_capability = get_torch_cuda_device_capability()
|
| 345 |
+
return unittest.skipUnless(
|
| 346 |
+
float(current_compute_capability) == float(expected_compute_capability),
|
| 347 |
+
"Test not supported for this compute capability.",
|
| 348 |
+
)
|
| 349 |
+
|
| 350 |
+
return decorator
|
| 351 |
+
|
| 352 |
+
|
| 353 |
+
# These decorators are for accelerator-specific behaviours that are not GPU-specific
|
| 354 |
+
def require_torch_accelerator(test_case):
|
| 355 |
+
"""Decorator marking a test that requires an accelerator backend and PyTorch."""
|
| 356 |
+
return unittest.skipUnless(is_torch_available() and torch_device != "cpu", "test requires accelerator+PyTorch")(
|
| 357 |
+
test_case
|
| 358 |
+
)
|
| 359 |
+
|
| 360 |
+
|
| 361 |
+
def require_torch_multi_gpu(test_case):
|
| 362 |
+
"""
|
| 363 |
+
Decorator marking a test that requires a multi-GPU setup (in PyTorch). These tests are skipped on a machine without
|
| 364 |
+
multiple GPUs. To run *only* the multi_gpu tests, assuming all test names contain multi_gpu: $ pytest -sv ./tests
|
| 365 |
+
-k "multi_gpu"
|
| 366 |
+
"""
|
| 367 |
+
if not is_torch_available():
|
| 368 |
+
return unittest.skip("test requires PyTorch")(test_case)
|
| 369 |
+
|
| 370 |
+
import torch
|
| 371 |
+
|
| 372 |
+
return unittest.skipUnless(torch.cuda.device_count() > 1, "test requires multiple GPUs")(test_case)
|
| 373 |
+
|
| 374 |
+
|
| 375 |
+
def require_torch_multi_accelerator(test_case):
|
| 376 |
+
"""
|
| 377 |
+
Decorator marking a test that requires a multi-accelerator setup (in PyTorch). These tests are skipped on a machine
|
| 378 |
+
without multiple hardware accelerators.
|
| 379 |
+
"""
|
| 380 |
+
if not is_torch_available():
|
| 381 |
+
return unittest.skip("test requires PyTorch")(test_case)
|
| 382 |
+
|
| 383 |
+
import torch
|
| 384 |
+
|
| 385 |
+
return unittest.skipUnless(
|
| 386 |
+
torch.cuda.device_count() > 1 or torch.xpu.device_count() > 1, "test requires multiple hardware accelerators"
|
| 387 |
+
)(test_case)
|
| 388 |
+
|
| 389 |
+
|
| 390 |
+
def require_torch_accelerator_with_fp16(test_case):
|
| 391 |
+
"""Decorator marking a test that requires an accelerator with support for the FP16 data type."""
|
| 392 |
+
return unittest.skipUnless(_is_torch_fp16_available(torch_device), "test requires accelerator with fp16 support")(
|
| 393 |
+
test_case
|
| 394 |
+
)
|
| 395 |
+
|
| 396 |
+
|
| 397 |
+
def require_torch_accelerator_with_fp64(test_case):
|
| 398 |
+
"""Decorator marking a test that requires an accelerator with support for the FP64 data type."""
|
| 399 |
+
return unittest.skipUnless(_is_torch_fp64_available(torch_device), "test requires accelerator with fp64 support")(
|
| 400 |
+
test_case
|
| 401 |
+
)
|
| 402 |
+
|
| 403 |
+
|
| 404 |
+
def require_big_gpu_with_torch_cuda(test_case):
|
| 405 |
+
"""
|
| 406 |
+
Decorator marking a test that requires a bigger GPU (24GB) for execution. Some example pipelines: Flux, SD3, Cog,
|
| 407 |
+
etc.
|
| 408 |
+
"""
|
| 409 |
+
if not is_torch_available():
|
| 410 |
+
return unittest.skip("test requires PyTorch")(test_case)
|
| 411 |
+
|
| 412 |
+
import torch
|
| 413 |
+
|
| 414 |
+
if not torch.cuda.is_available():
|
| 415 |
+
return unittest.skip("test requires PyTorch CUDA")(test_case)
|
| 416 |
+
|
| 417 |
+
device_properties = torch.cuda.get_device_properties(0)
|
| 418 |
+
total_memory = device_properties.total_memory / (1024**3)
|
| 419 |
+
return unittest.skipUnless(
|
| 420 |
+
total_memory >= BIG_GPU_MEMORY, f"test requires a GPU with at least {BIG_GPU_MEMORY} GB memory"
|
| 421 |
+
)(test_case)
|
| 422 |
+
|
| 423 |
+
|
| 424 |
+
def require_big_accelerator(test_case):
|
| 425 |
+
"""
|
| 426 |
+
Decorator marking a test that requires a bigger hardware accelerator (24GB) for execution. Some example pipelines:
|
| 427 |
+
Flux, SD3, Cog, etc.
|
| 428 |
+
"""
|
| 429 |
+
import pytest
|
| 430 |
+
|
| 431 |
+
test_case = pytest.mark.big_accelerator(test_case)
|
| 432 |
+
|
| 433 |
+
if not is_torch_available():
|
| 434 |
+
return unittest.skip("test requires PyTorch")(test_case)
|
| 435 |
+
|
| 436 |
+
import torch
|
| 437 |
+
|
| 438 |
+
if not (torch.cuda.is_available() or torch.xpu.is_available()):
|
| 439 |
+
return unittest.skip("test requires PyTorch CUDA")(test_case)
|
| 440 |
+
|
| 441 |
+
if torch.xpu.is_available():
|
| 442 |
+
device_properties = torch.xpu.get_device_properties(0)
|
| 443 |
+
else:
|
| 444 |
+
device_properties = torch.cuda.get_device_properties(0)
|
| 445 |
+
|
| 446 |
+
total_memory = device_properties.total_memory / (1024**3)
|
| 447 |
+
return unittest.skipUnless(
|
| 448 |
+
total_memory >= BIG_GPU_MEMORY,
|
| 449 |
+
f"test requires a hardware accelerator with at least {BIG_GPU_MEMORY} GB memory",
|
| 450 |
+
)(test_case)
|
| 451 |
+
|
| 452 |
+
|
| 453 |
+
def require_torch_accelerator_with_training(test_case):
|
| 454 |
+
"""Decorator marking a test that requires an accelerator with support for training."""
|
| 455 |
+
return unittest.skipUnless(
|
| 456 |
+
is_torch_available() and backend_supports_training(torch_device),
|
| 457 |
+
"test requires accelerator with training support",
|
| 458 |
+
)(test_case)
|
| 459 |
+
|
| 460 |
+
|
| 461 |
+
def skip_mps(test_case):
|
| 462 |
+
"""Decorator marking a test to skip if torch_device is 'mps'"""
|
| 463 |
+
return unittest.skipUnless(torch_device != "mps", "test requires non 'mps' device")(test_case)
|
| 464 |
+
|
| 465 |
+
|
| 466 |
+
def require_flax(test_case):
|
| 467 |
+
"""
|
| 468 |
+
Decorator marking a test that requires JAX & Flax. These tests are skipped when one / both are not installed
|
| 469 |
+
"""
|
| 470 |
+
return unittest.skipUnless(is_flax_available(), "test requires JAX & Flax")(test_case)
|
| 471 |
+
|
| 472 |
+
|
| 473 |
+
def require_compel(test_case):
|
| 474 |
+
"""
|
| 475 |
+
Decorator marking a test that requires compel: https://github.com/damian0815/compel. These tests are skipped when
|
| 476 |
+
the library is not installed.
|
| 477 |
+
"""
|
| 478 |
+
return unittest.skipUnless(is_compel_available(), "test requires compel")(test_case)
|
| 479 |
+
|
| 480 |
+
|
| 481 |
+
def require_onnxruntime(test_case):
|
| 482 |
+
"""
|
| 483 |
+
Decorator marking a test that requires onnxruntime. These tests are skipped when onnxruntime isn't installed.
|
| 484 |
+
"""
|
| 485 |
+
return unittest.skipUnless(is_onnx_available(), "test requires onnxruntime")(test_case)
|
| 486 |
+
|
| 487 |
+
|
| 488 |
+
def require_note_seq(test_case):
|
| 489 |
+
"""
|
| 490 |
+
Decorator marking a test that requires note_seq. These tests are skipped when note_seq isn't installed.
|
| 491 |
+
"""
|
| 492 |
+
return unittest.skipUnless(is_note_seq_available(), "test requires note_seq")(test_case)
|
| 493 |
+
|
| 494 |
+
|
| 495 |
+
def require_accelerator(test_case):
|
| 496 |
+
"""
|
| 497 |
+
Decorator marking a test that requires a hardware accelerator backend. These tests are skipped when there are no
|
| 498 |
+
hardware accelerator available.
|
| 499 |
+
"""
|
| 500 |
+
return unittest.skipUnless(torch_device != "cpu", "test requires a hardware accelerator")(test_case)
|
| 501 |
+
|
| 502 |
+
|
| 503 |
+
def require_torchsde(test_case):
|
| 504 |
+
"""
|
| 505 |
+
Decorator marking a test that requires torchsde. These tests are skipped when torchsde isn't installed.
|
| 506 |
+
"""
|
| 507 |
+
return unittest.skipUnless(is_torchsde_available(), "test requires torchsde")(test_case)
|
| 508 |
+
|
| 509 |
+
|
| 510 |
+
def require_peft_backend(test_case):
|
| 511 |
+
"""
|
| 512 |
+
Decorator marking a test that requires PEFT backend, this would require some specific versions of PEFT and
|
| 513 |
+
transformers.
|
| 514 |
+
"""
|
| 515 |
+
return unittest.skipUnless(USE_PEFT_BACKEND, "test requires PEFT backend")(test_case)
|
| 516 |
+
|
| 517 |
+
|
| 518 |
+
def require_timm(test_case):
|
| 519 |
+
"""
|
| 520 |
+
Decorator marking a test that requires timm. These tests are skipped when timm isn't installed.
|
| 521 |
+
"""
|
| 522 |
+
return unittest.skipUnless(is_timm_available(), "test requires timm")(test_case)
|
| 523 |
+
|
| 524 |
+
|
| 525 |
+
def require_bitsandbytes(test_case):
|
| 526 |
+
"""
|
| 527 |
+
Decorator marking a test that requires bitsandbytes. These tests are skipped when bitsandbytes isn't installed.
|
| 528 |
+
"""
|
| 529 |
+
return unittest.skipUnless(is_bitsandbytes_available(), "test requires bitsandbytes")(test_case)
|
| 530 |
+
|
| 531 |
+
|
| 532 |
+
def require_quanto(test_case):
|
| 533 |
+
"""
|
| 534 |
+
Decorator marking a test that requires quanto. These tests are skipped when quanto isn't installed.
|
| 535 |
+
"""
|
| 536 |
+
return unittest.skipUnless(is_optimum_quanto_available(), "test requires quanto")(test_case)
|
| 537 |
+
|
| 538 |
+
|
| 539 |
+
def require_accelerate(test_case):
|
| 540 |
+
"""
|
| 541 |
+
Decorator marking a test that requires accelerate. These tests are skipped when accelerate isn't installed.
|
| 542 |
+
"""
|
| 543 |
+
return unittest.skipUnless(is_accelerate_available(), "test requires accelerate")(test_case)
|
| 544 |
+
|
| 545 |
+
|
| 546 |
+
def require_peft_version_greater(peft_version):
|
| 547 |
+
"""
|
| 548 |
+
Decorator marking a test that requires PEFT backend with a specific version, this would require some specific
|
| 549 |
+
versions of PEFT and transformers.
|
| 550 |
+
"""
|
| 551 |
+
|
| 552 |
+
def decorator(test_case):
|
| 553 |
+
correct_peft_version = is_peft_available() and version.parse(
|
| 554 |
+
version.parse(importlib.metadata.version("peft")).base_version
|
| 555 |
+
) > version.parse(peft_version)
|
| 556 |
+
return unittest.skipUnless(
|
| 557 |
+
correct_peft_version, f"test requires PEFT backend with the version greater than {peft_version}"
|
| 558 |
+
)(test_case)
|
| 559 |
+
|
| 560 |
+
return decorator
|
| 561 |
+
|
| 562 |
+
|
| 563 |
+
def require_transformers_version_greater(transformers_version):
|
| 564 |
+
"""
|
| 565 |
+
Decorator marking a test that requires transformers with a specific version, this would require some specific
|
| 566 |
+
versions of PEFT and transformers.
|
| 567 |
+
"""
|
| 568 |
+
|
| 569 |
+
def decorator(test_case):
|
| 570 |
+
correct_transformers_version = is_transformers_available() and version.parse(
|
| 571 |
+
version.parse(importlib.metadata.version("transformers")).base_version
|
| 572 |
+
) > version.parse(transformers_version)
|
| 573 |
+
return unittest.skipUnless(
|
| 574 |
+
correct_transformers_version,
|
| 575 |
+
f"test requires transformers with the version greater than {transformers_version}",
|
| 576 |
+
)(test_case)
|
| 577 |
+
|
| 578 |
+
return decorator
|
| 579 |
+
|
| 580 |
+
|
| 581 |
+
def require_accelerate_version_greater(accelerate_version):
|
| 582 |
+
def decorator(test_case):
|
| 583 |
+
correct_accelerate_version = is_accelerate_available() and version.parse(
|
| 584 |
+
version.parse(importlib.metadata.version("accelerate")).base_version
|
| 585 |
+
) > version.parse(accelerate_version)
|
| 586 |
+
return unittest.skipUnless(
|
| 587 |
+
correct_accelerate_version, f"Test requires accelerate with the version greater than {accelerate_version}."
|
| 588 |
+
)(test_case)
|
| 589 |
+
|
| 590 |
+
return decorator
|
| 591 |
+
|
| 592 |
+
|
| 593 |
+
def require_bitsandbytes_version_greater(bnb_version):
|
| 594 |
+
def decorator(test_case):
|
| 595 |
+
correct_bnb_version = is_bitsandbytes_available() and version.parse(
|
| 596 |
+
version.parse(importlib.metadata.version("bitsandbytes")).base_version
|
| 597 |
+
) > version.parse(bnb_version)
|
| 598 |
+
return unittest.skipUnless(
|
| 599 |
+
correct_bnb_version, f"Test requires bitsandbytes with the version greater than {bnb_version}."
|
| 600 |
+
)(test_case)
|
| 601 |
+
|
| 602 |
+
return decorator
|
| 603 |
+
|
| 604 |
+
|
| 605 |
+
def require_hf_hub_version_greater(hf_hub_version):
|
| 606 |
+
def decorator(test_case):
|
| 607 |
+
correct_hf_hub_version = version.parse(
|
| 608 |
+
version.parse(importlib.metadata.version("huggingface_hub")).base_version
|
| 609 |
+
) > version.parse(hf_hub_version)
|
| 610 |
+
return unittest.skipUnless(
|
| 611 |
+
correct_hf_hub_version, f"Test requires huggingface_hub with the version greater than {hf_hub_version}."
|
| 612 |
+
)(test_case)
|
| 613 |
+
|
| 614 |
+
return decorator
|
| 615 |
+
|
| 616 |
+
|
| 617 |
+
def require_gguf_version_greater_or_equal(gguf_version):
|
| 618 |
+
def decorator(test_case):
|
| 619 |
+
correct_gguf_version = is_gguf_available() and version.parse(
|
| 620 |
+
version.parse(importlib.metadata.version("gguf")).base_version
|
| 621 |
+
) >= version.parse(gguf_version)
|
| 622 |
+
return unittest.skipUnless(
|
| 623 |
+
correct_gguf_version, f"Test requires gguf with the version greater than {gguf_version}."
|
| 624 |
+
)(test_case)
|
| 625 |
+
|
| 626 |
+
return decorator
|
| 627 |
+
|
| 628 |
+
|
| 629 |
+
def require_torchao_version_greater_or_equal(torchao_version):
|
| 630 |
+
def decorator(test_case):
|
| 631 |
+
correct_torchao_version = is_torchao_available() and version.parse(
|
| 632 |
+
version.parse(importlib.metadata.version("torchao")).base_version
|
| 633 |
+
) >= version.parse(torchao_version)
|
| 634 |
+
return unittest.skipUnless(
|
| 635 |
+
correct_torchao_version, f"Test requires torchao with version greater than {torchao_version}."
|
| 636 |
+
)(test_case)
|
| 637 |
+
|
| 638 |
+
return decorator
|
| 639 |
+
|
| 640 |
+
|
| 641 |
+
def require_kernels_version_greater_or_equal(kernels_version):
|
| 642 |
+
def decorator(test_case):
|
| 643 |
+
correct_kernels_version = is_kernels_available() and version.parse(
|
| 644 |
+
version.parse(importlib.metadata.version("kernels")).base_version
|
| 645 |
+
) >= version.parse(kernels_version)
|
| 646 |
+
return unittest.skipUnless(
|
| 647 |
+
correct_kernels_version, f"Test requires kernels with version greater than {kernels_version}."
|
| 648 |
+
)(test_case)
|
| 649 |
+
|
| 650 |
+
return decorator
|
| 651 |
+
|
| 652 |
+
|
| 653 |
+
def deprecate_after_peft_backend(test_case):
|
| 654 |
+
"""
|
| 655 |
+
Decorator marking a test that will be skipped after PEFT backend
|
| 656 |
+
"""
|
| 657 |
+
return unittest.skipUnless(not USE_PEFT_BACKEND, "test skipped in favor of PEFT backend")(test_case)
|
| 658 |
+
|
| 659 |
+
|
| 660 |
+
def get_python_version():
|
| 661 |
+
sys_info = sys.version_info
|
| 662 |
+
major, minor = sys_info.major, sys_info.minor
|
| 663 |
+
return major, minor
|
| 664 |
+
|
| 665 |
+
|
| 666 |
+
def load_numpy(arry: Union[str, np.ndarray], local_path: Optional[str] = None) -> np.ndarray:
|
| 667 |
+
if isinstance(arry, str):
|
| 668 |
+
if local_path is not None:
|
| 669 |
+
# local_path can be passed to correct images of tests
|
| 670 |
+
return Path(local_path, arry.split("/")[-5], arry.split("/")[-2], arry.split("/")[-1]).as_posix()
|
| 671 |
+
elif arry.startswith("http://") or arry.startswith("https://"):
|
| 672 |
+
response = requests.get(arry, timeout=DIFFUSERS_REQUEST_TIMEOUT)
|
| 673 |
+
response.raise_for_status()
|
| 674 |
+
arry = np.load(BytesIO(response.content))
|
| 675 |
+
elif os.path.isfile(arry):
|
| 676 |
+
arry = np.load(arry)
|
| 677 |
+
else:
|
| 678 |
+
raise ValueError(
|
| 679 |
+
f"Incorrect path or url, URLs must start with `http://` or `https://`, and {arry} is not a valid path"
|
| 680 |
+
)
|
| 681 |
+
elif isinstance(arry, np.ndarray):
|
| 682 |
+
pass
|
| 683 |
+
else:
|
| 684 |
+
raise ValueError(
|
| 685 |
+
"Incorrect format used for numpy ndarray. Should be an url linking to an image, a local path, or a"
|
| 686 |
+
" ndarray."
|
| 687 |
+
)
|
| 688 |
+
|
| 689 |
+
return arry
|
| 690 |
+
|
| 691 |
+
|
| 692 |
+
def load_pt(url: str, map_location: Optional[str] = None, weights_only: Optional[bool] = True):
|
| 693 |
+
response = requests.get(url, timeout=DIFFUSERS_REQUEST_TIMEOUT)
|
| 694 |
+
response.raise_for_status()
|
| 695 |
+
arry = torch.load(BytesIO(response.content), map_location=map_location, weights_only=weights_only)
|
| 696 |
+
return arry
|
| 697 |
+
|
| 698 |
+
|
| 699 |
+
def load_image(image: Union[str, PIL.Image.Image]) -> PIL.Image.Image:
|
| 700 |
+
"""
|
| 701 |
+
Loads `image` to a PIL Image.
|
| 702 |
+
|
| 703 |
+
Args:
|
| 704 |
+
image (`str` or `PIL.Image.Image`):
|
| 705 |
+
The image to convert to the PIL Image format.
|
| 706 |
+
Returns:
|
| 707 |
+
`PIL.Image.Image`:
|
| 708 |
+
A PIL Image.
|
| 709 |
+
"""
|
| 710 |
+
if isinstance(image, str):
|
| 711 |
+
if image.startswith("http://") or image.startswith("https://"):
|
| 712 |
+
image = PIL.Image.open(requests.get(image, stream=True, timeout=DIFFUSERS_REQUEST_TIMEOUT).raw)
|
| 713 |
+
elif os.path.isfile(image):
|
| 714 |
+
image = PIL.Image.open(image)
|
| 715 |
+
else:
|
| 716 |
+
raise ValueError(
|
| 717 |
+
f"Incorrect path or url, URLs must start with `http://` or `https://`, and {image} is not a valid path"
|
| 718 |
+
)
|
| 719 |
+
elif isinstance(image, PIL.Image.Image):
|
| 720 |
+
image = image
|
| 721 |
+
else:
|
| 722 |
+
raise ValueError(
|
| 723 |
+
"Incorrect format used for image. Should be an url linking to an image, a local path, or a PIL image."
|
| 724 |
+
)
|
| 725 |
+
image = PIL.ImageOps.exif_transpose(image)
|
| 726 |
+
image = image.convert("RGB")
|
| 727 |
+
return image
|
| 728 |
+
|
| 729 |
+
|
| 730 |
+
def preprocess_image(image: PIL.Image, batch_size: int):
|
| 731 |
+
w, h = image.size
|
| 732 |
+
w, h = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
|
| 733 |
+
image = image.resize((w, h), resample=PIL.Image.LANCZOS)
|
| 734 |
+
image = np.array(image).astype(np.float32) / 255.0
|
| 735 |
+
image = np.vstack([image[None].transpose(0, 3, 1, 2)] * batch_size)
|
| 736 |
+
image = torch.from_numpy(image)
|
| 737 |
+
return 2.0 * image - 1.0
|
| 738 |
+
|
| 739 |
+
|
| 740 |
+
def export_to_gif(image: List[PIL.Image.Image], output_gif_path: str = None) -> str:
|
| 741 |
+
if output_gif_path is None:
|
| 742 |
+
output_gif_path = tempfile.NamedTemporaryFile(suffix=".gif").name
|
| 743 |
+
|
| 744 |
+
image[0].save(
|
| 745 |
+
output_gif_path,
|
| 746 |
+
save_all=True,
|
| 747 |
+
append_images=image[1:],
|
| 748 |
+
optimize=False,
|
| 749 |
+
duration=100,
|
| 750 |
+
loop=0,
|
| 751 |
+
)
|
| 752 |
+
return output_gif_path
|
| 753 |
+
|
| 754 |
+
|
| 755 |
+
@contextmanager
|
| 756 |
+
def buffered_writer(raw_f):
|
| 757 |
+
f = io.BufferedWriter(raw_f)
|
| 758 |
+
yield f
|
| 759 |
+
f.flush()
|
| 760 |
+
|
| 761 |
+
|
| 762 |
+
def export_to_ply(mesh, output_ply_path: str = None):
|
| 763 |
+
"""
|
| 764 |
+
Write a PLY file for a mesh.
|
| 765 |
+
"""
|
| 766 |
+
if output_ply_path is None:
|
| 767 |
+
output_ply_path = tempfile.NamedTemporaryFile(suffix=".ply").name
|
| 768 |
+
|
| 769 |
+
coords = mesh.verts.detach().cpu().numpy()
|
| 770 |
+
faces = mesh.faces.cpu().numpy()
|
| 771 |
+
rgb = np.stack([mesh.vertex_channels[x].detach().cpu().numpy() for x in "RGB"], axis=1)
|
| 772 |
+
|
| 773 |
+
with buffered_writer(open(output_ply_path, "wb")) as f:
|
| 774 |
+
f.write(b"ply\n")
|
| 775 |
+
f.write(b"format binary_little_endian 1.0\n")
|
| 776 |
+
f.write(bytes(f"element vertex {len(coords)}\n", "ascii"))
|
| 777 |
+
f.write(b"property float x\n")
|
| 778 |
+
f.write(b"property float y\n")
|
| 779 |
+
f.write(b"property float z\n")
|
| 780 |
+
if rgb is not None:
|
| 781 |
+
f.write(b"property uchar red\n")
|
| 782 |
+
f.write(b"property uchar green\n")
|
| 783 |
+
f.write(b"property uchar blue\n")
|
| 784 |
+
if faces is not None:
|
| 785 |
+
f.write(bytes(f"element face {len(faces)}\n", "ascii"))
|
| 786 |
+
f.write(b"property list uchar int vertex_index\n")
|
| 787 |
+
f.write(b"end_header\n")
|
| 788 |
+
|
| 789 |
+
if rgb is not None:
|
| 790 |
+
rgb = (rgb * 255.499).round().astype(int)
|
| 791 |
+
vertices = [
|
| 792 |
+
(*coord, *rgb)
|
| 793 |
+
for coord, rgb in zip(
|
| 794 |
+
coords.tolist(),
|
| 795 |
+
rgb.tolist(),
|
| 796 |
+
)
|
| 797 |
+
]
|
| 798 |
+
format = struct.Struct("<3f3B")
|
| 799 |
+
for item in vertices:
|
| 800 |
+
f.write(format.pack(*item))
|
| 801 |
+
else:
|
| 802 |
+
format = struct.Struct("<3f")
|
| 803 |
+
for vertex in coords.tolist():
|
| 804 |
+
f.write(format.pack(*vertex))
|
| 805 |
+
|
| 806 |
+
if faces is not None:
|
| 807 |
+
for tri in faces.tolist():
|
| 808 |
+
f.write(format.pack(len(tri), *tri))
|
| 809 |
+
format = struct.Struct("<B3I")
|
| 810 |
+
return output_ply_path
|
| 811 |
+
|
| 812 |
+
|
| 813 |
+
def export_to_obj(mesh, output_obj_path: str = None):
|
| 814 |
+
if output_obj_path is None:
|
| 815 |
+
output_obj_path = tempfile.NamedTemporaryFile(suffix=".obj").name
|
| 816 |
+
|
| 817 |
+
verts = mesh.verts.detach().cpu().numpy()
|
| 818 |
+
faces = mesh.faces.cpu().numpy()
|
| 819 |
+
|
| 820 |
+
vertex_colors = np.stack([mesh.vertex_channels[x].detach().cpu().numpy() for x in "RGB"], axis=1)
|
| 821 |
+
vertices = [
|
| 822 |
+
"{} {} {} {} {} {}".format(*coord, *color) for coord, color in zip(verts.tolist(), vertex_colors.tolist())
|
| 823 |
+
]
|
| 824 |
+
|
| 825 |
+
faces = ["f {} {} {}".format(str(tri[0] + 1), str(tri[1] + 1), str(tri[2] + 1)) for tri in faces.tolist()]
|
| 826 |
+
|
| 827 |
+
combined_data = ["v " + vertex for vertex in vertices] + faces
|
| 828 |
+
|
| 829 |
+
with open(output_obj_path, "w") as f:
|
| 830 |
+
f.writelines("\n".join(combined_data))
|
| 831 |
+
|
| 832 |
+
|
| 833 |
+
def export_to_video(video_frames: List[np.ndarray], output_video_path: str = None) -> str:
|
| 834 |
+
if is_opencv_available():
|
| 835 |
+
import cv2
|
| 836 |
+
else:
|
| 837 |
+
raise ImportError(BACKENDS_MAPPING["opencv"][1].format("export_to_video"))
|
| 838 |
+
if output_video_path is None:
|
| 839 |
+
output_video_path = tempfile.NamedTemporaryFile(suffix=".mp4").name
|
| 840 |
+
|
| 841 |
+
fourcc = cv2.VideoWriter_fourcc(*"mp4v")
|
| 842 |
+
h, w, c = video_frames[0].shape
|
| 843 |
+
video_writer = cv2.VideoWriter(output_video_path, fourcc, fps=8, frameSize=(w, h))
|
| 844 |
+
for i in range(len(video_frames)):
|
| 845 |
+
img = cv2.cvtColor(video_frames[i], cv2.COLOR_RGB2BGR)
|
| 846 |
+
video_writer.write(img)
|
| 847 |
+
return output_video_path
|
| 848 |
+
|
| 849 |
+
|
| 850 |
+
def load_hf_numpy(path) -> np.ndarray:
|
| 851 |
+
base_url = "https://huggingface.co/datasets/fusing/diffusers-testing/resolve/main"
|
| 852 |
+
|
| 853 |
+
if not path.startswith("http://") and not path.startswith("https://"):
|
| 854 |
+
path = os.path.join(base_url, urllib.parse.quote(path))
|
| 855 |
+
|
| 856 |
+
return load_numpy(path)
|
| 857 |
+
|
| 858 |
+
|
| 859 |
+
# --- pytest conf functions --- #
|
| 860 |
+
|
| 861 |
+
# to avoid multiple invocation from tests/conftest.py and examples/conftest.py - make sure it's called only once
|
| 862 |
+
pytest_opt_registered = {}
|
| 863 |
+
|
| 864 |
+
|
| 865 |
+
def pytest_addoption_shared(parser):
|
| 866 |
+
"""
|
| 867 |
+
This function is to be called from `conftest.py` via `pytest_addoption` wrapper that has to be defined there.
|
| 868 |
+
|
| 869 |
+
It allows loading both `conftest.py` files at once without causing a failure due to adding the same `pytest`
|
| 870 |
+
option.
|
| 871 |
+
|
| 872 |
+
"""
|
| 873 |
+
option = "--make-reports"
|
| 874 |
+
if option not in pytest_opt_registered:
|
| 875 |
+
parser.addoption(
|
| 876 |
+
option,
|
| 877 |
+
action="store",
|
| 878 |
+
default=False,
|
| 879 |
+
help="generate report files. The value of this option is used as a prefix to report names",
|
| 880 |
+
)
|
| 881 |
+
pytest_opt_registered[option] = 1
|
| 882 |
+
|
| 883 |
+
|
| 884 |
+
def pytest_terminal_summary_main(tr, id):
|
| 885 |
+
"""
|
| 886 |
+
Generate multiple reports at the end of test suite run - each report goes into a dedicated file in the current
|
| 887 |
+
directory. The report files are prefixed with the test suite name.
|
| 888 |
+
|
| 889 |
+
This function emulates --duration and -rA pytest arguments.
|
| 890 |
+
|
| 891 |
+
This function is to be called from `conftest.py` via `pytest_terminal_summary` wrapper that has to be defined
|
| 892 |
+
there.
|
| 893 |
+
|
| 894 |
+
Args:
|
| 895 |
+
- tr: `terminalreporter` passed from `conftest.py`
|
| 896 |
+
- id: unique id like `tests` or `examples` that will be incorporated into the final reports filenames - this is
|
| 897 |
+
needed as some jobs have multiple runs of pytest, so we can't have them overwrite each other.
|
| 898 |
+
|
| 899 |
+
NB: this functions taps into a private _pytest API and while unlikely, it could break should
|
| 900 |
+
pytest do internal changes - also it calls default internal methods of terminalreporter which
|
| 901 |
+
can be hijacked by various `pytest-` plugins and interfere.
|
| 902 |
+
|
| 903 |
+
"""
|
| 904 |
+
from _pytest.config import create_terminal_writer
|
| 905 |
+
|
| 906 |
+
if not len(id):
|
| 907 |
+
id = "tests"
|
| 908 |
+
|
| 909 |
+
config = tr.config
|
| 910 |
+
orig_writer = config.get_terminal_writer()
|
| 911 |
+
orig_tbstyle = config.option.tbstyle
|
| 912 |
+
orig_reportchars = tr.reportchars
|
| 913 |
+
|
| 914 |
+
dir = "reports"
|
| 915 |
+
Path(dir).mkdir(parents=True, exist_ok=True)
|
| 916 |
+
report_files = {
|
| 917 |
+
k: f"{dir}/{id}_{k}.txt"
|
| 918 |
+
for k in [
|
| 919 |
+
"durations",
|
| 920 |
+
"errors",
|
| 921 |
+
"failures_long",
|
| 922 |
+
"failures_short",
|
| 923 |
+
"failures_line",
|
| 924 |
+
"passes",
|
| 925 |
+
"stats",
|
| 926 |
+
"summary_short",
|
| 927 |
+
"warnings",
|
| 928 |
+
]
|
| 929 |
+
}
|
| 930 |
+
|
| 931 |
+
# custom durations report
|
| 932 |
+
# note: there is no need to call pytest --durations=XX to get this separate report
|
| 933 |
+
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/runner.py#L66
|
| 934 |
+
dlist = []
|
| 935 |
+
for replist in tr.stats.values():
|
| 936 |
+
for rep in replist:
|
| 937 |
+
if hasattr(rep, "duration"):
|
| 938 |
+
dlist.append(rep)
|
| 939 |
+
if dlist:
|
| 940 |
+
dlist.sort(key=lambda x: x.duration, reverse=True)
|
| 941 |
+
with open(report_files["durations"], "w") as f:
|
| 942 |
+
durations_min = 0.05 # sec
|
| 943 |
+
f.write("slowest durations\n")
|
| 944 |
+
for i, rep in enumerate(dlist):
|
| 945 |
+
if rep.duration < durations_min:
|
| 946 |
+
f.write(f"{len(dlist) - i} durations < {durations_min} secs were omitted")
|
| 947 |
+
break
|
| 948 |
+
f.write(f"{rep.duration:02.2f}s {rep.when:<8} {rep.nodeid}\n")
|
| 949 |
+
|
| 950 |
+
def summary_failures_short(tr):
|
| 951 |
+
# expecting that the reports were --tb=long (default) so we chop them off here to the last frame
|
| 952 |
+
reports = tr.getreports("failed")
|
| 953 |
+
if not reports:
|
| 954 |
+
return
|
| 955 |
+
tr.write_sep("=", "FAILURES SHORT STACK")
|
| 956 |
+
for rep in reports:
|
| 957 |
+
msg = tr._getfailureheadline(rep)
|
| 958 |
+
tr.write_sep("_", msg, red=True, bold=True)
|
| 959 |
+
# chop off the optional leading extra frames, leaving only the last one
|
| 960 |
+
longrepr = re.sub(r".*_ _ _ (_ ){10,}_ _ ", "", rep.longreprtext, 0, re.M | re.S)
|
| 961 |
+
tr._tw.line(longrepr)
|
| 962 |
+
# note: not printing out any rep.sections to keep the report short
|
| 963 |
+
|
| 964 |
+
# use ready-made report funcs, we are just hijacking the filehandle to log to a dedicated file each
|
| 965 |
+
# adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/terminal.py#L814
|
| 966 |
+
# note: some pytest plugins may interfere by hijacking the default `terminalreporter` (e.g.
|
| 967 |
+
# pytest-instafail does that)
|
| 968 |
+
|
| 969 |
+
# report failures with line/short/long styles
|
| 970 |
+
config.option.tbstyle = "auto" # full tb
|
| 971 |
+
with open(report_files["failures_long"], "w") as f:
|
| 972 |
+
tr._tw = create_terminal_writer(config, f)
|
| 973 |
+
tr.summary_failures()
|
| 974 |
+
|
| 975 |
+
# config.option.tbstyle = "short" # short tb
|
| 976 |
+
with open(report_files["failures_short"], "w") as f:
|
| 977 |
+
tr._tw = create_terminal_writer(config, f)
|
| 978 |
+
summary_failures_short(tr)
|
| 979 |
+
|
| 980 |
+
config.option.tbstyle = "line" # one line per error
|
| 981 |
+
with open(report_files["failures_line"], "w") as f:
|
| 982 |
+
tr._tw = create_terminal_writer(config, f)
|
| 983 |
+
tr.summary_failures()
|
| 984 |
+
|
| 985 |
+
with open(report_files["errors"], "w") as f:
|
| 986 |
+
tr._tw = create_terminal_writer(config, f)
|
| 987 |
+
tr.summary_errors()
|
| 988 |
+
|
| 989 |
+
with open(report_files["warnings"], "w") as f:
|
| 990 |
+
tr._tw = create_terminal_writer(config, f)
|
| 991 |
+
tr.summary_warnings() # normal warnings
|
| 992 |
+
tr.summary_warnings() # final warnings
|
| 993 |
+
|
| 994 |
+
tr.reportchars = "wPpsxXEf" # emulate -rA (used in summary_passes() and short_test_summary())
|
| 995 |
+
with open(report_files["passes"], "w") as f:
|
| 996 |
+
tr._tw = create_terminal_writer(config, f)
|
| 997 |
+
tr.summary_passes()
|
| 998 |
+
|
| 999 |
+
with open(report_files["summary_short"], "w") as f:
|
| 1000 |
+
tr._tw = create_terminal_writer(config, f)
|
| 1001 |
+
tr.short_test_summary()
|
| 1002 |
+
|
| 1003 |
+
with open(report_files["stats"], "w") as f:
|
| 1004 |
+
tr._tw = create_terminal_writer(config, f)
|
| 1005 |
+
tr.summary_stats()
|
| 1006 |
+
|
| 1007 |
+
# restore:
|
| 1008 |
+
tr._tw = orig_writer
|
| 1009 |
+
tr.reportchars = orig_reportchars
|
| 1010 |
+
config.option.tbstyle = orig_tbstyle
|
| 1011 |
+
|
| 1012 |
+
|
| 1013 |
+
# Adapted from https://github.com/huggingface/transformers/blob/000e52aec8850d3fe2f360adc6fd256e5b47fe4c/src/transformers/testing_utils.py#L1905
|
| 1014 |
+
def is_flaky(max_attempts: int = 5, wait_before_retry: Optional[float] = None, description: Optional[str] = None):
|
| 1015 |
+
"""
|
| 1016 |
+
To decorate flaky tests (methods or entire classes). They will be retried on failures.
|
| 1017 |
+
|
| 1018 |
+
Args:
|
| 1019 |
+
max_attempts (`int`, *optional*, defaults to 5):
|
| 1020 |
+
The maximum number of attempts to retry the flaky test.
|
| 1021 |
+
wait_before_retry (`float`, *optional*):
|
| 1022 |
+
If provided, will wait that number of seconds before retrying the test.
|
| 1023 |
+
description (`str`, *optional*):
|
| 1024 |
+
A string to describe the situation (what / where / why is flaky, link to GH issue/PR comments, errors,
|
| 1025 |
+
etc.)
|
| 1026 |
+
"""
|
| 1027 |
+
|
| 1028 |
+
def decorator(obj):
|
| 1029 |
+
# If decorating a class, wrap each test method on it
|
| 1030 |
+
if inspect.isclass(obj):
|
| 1031 |
+
for attr_name, attr_value in list(obj.__dict__.items()):
|
| 1032 |
+
if callable(attr_value) and attr_name.startswith("test"):
|
| 1033 |
+
# recursively decorate the method
|
| 1034 |
+
setattr(obj, attr_name, decorator(attr_value))
|
| 1035 |
+
return obj
|
| 1036 |
+
|
| 1037 |
+
# Otherwise we're decorating a single test function / method
|
| 1038 |
+
@functools.wraps(obj)
|
| 1039 |
+
def wrapper(*args, **kwargs):
|
| 1040 |
+
retry_count = 1
|
| 1041 |
+
while retry_count < max_attempts:
|
| 1042 |
+
try:
|
| 1043 |
+
return obj(*args, **kwargs)
|
| 1044 |
+
except Exception as err:
|
| 1045 |
+
msg = (
|
| 1046 |
+
f"[FLAKY] {description or obj.__name__!r} "
|
| 1047 |
+
f"failed on attempt {retry_count}/{max_attempts}: {err}"
|
| 1048 |
+
)
|
| 1049 |
+
print(msg, file=sys.stderr)
|
| 1050 |
+
if wait_before_retry is not None:
|
| 1051 |
+
time.sleep(wait_before_retry)
|
| 1052 |
+
retry_count += 1
|
| 1053 |
+
|
| 1054 |
+
return obj(*args, **kwargs)
|
| 1055 |
+
|
| 1056 |
+
return wrapper
|
| 1057 |
+
|
| 1058 |
+
return decorator
|
| 1059 |
+
|
| 1060 |
+
|
| 1061 |
+
# Taken from: https://github.com/huggingface/transformers/blob/3658488ff77ff8d45101293e749263acf437f4d5/src/transformers/testing_utils.py#L1787
|
| 1062 |
+
def run_test_in_subprocess(test_case, target_func, inputs=None, timeout=None):
|
| 1063 |
+
"""
|
| 1064 |
+
To run a test in a subprocess. In particular, this can avoid (GPU) memory issue.
|
| 1065 |
+
|
| 1066 |
+
Args:
|
| 1067 |
+
test_case (`unittest.TestCase`):
|
| 1068 |
+
The test that will run `target_func`.
|
| 1069 |
+
target_func (`Callable`):
|
| 1070 |
+
The function implementing the actual testing logic.
|
| 1071 |
+
inputs (`dict`, *optional*, defaults to `None`):
|
| 1072 |
+
The inputs that will be passed to `target_func` through an (input) queue.
|
| 1073 |
+
timeout (`int`, *optional*, defaults to `None`):
|
| 1074 |
+
The timeout (in seconds) that will be passed to the input and output queues. If not specified, the env.
|
| 1075 |
+
variable `PYTEST_TIMEOUT` will be checked. If still `None`, its value will be set to `600`.
|
| 1076 |
+
"""
|
| 1077 |
+
if timeout is None:
|
| 1078 |
+
timeout = int(os.environ.get("PYTEST_TIMEOUT", 600))
|
| 1079 |
+
|
| 1080 |
+
start_methohd = "spawn"
|
| 1081 |
+
ctx = multiprocessing.get_context(start_methohd)
|
| 1082 |
+
|
| 1083 |
+
input_queue = ctx.Queue(1)
|
| 1084 |
+
output_queue = ctx.JoinableQueue(1)
|
| 1085 |
+
|
| 1086 |
+
# We can't send `unittest.TestCase` to the child, otherwise we get issues regarding pickle.
|
| 1087 |
+
input_queue.put(inputs, timeout=timeout)
|
| 1088 |
+
|
| 1089 |
+
process = ctx.Process(target=target_func, args=(input_queue, output_queue, timeout))
|
| 1090 |
+
process.start()
|
| 1091 |
+
# Kill the child process if we can't get outputs from it in time: otherwise, the hanging subprocess prevents
|
| 1092 |
+
# the test to exit properly.
|
| 1093 |
+
try:
|
| 1094 |
+
results = output_queue.get(timeout=timeout)
|
| 1095 |
+
output_queue.task_done()
|
| 1096 |
+
except Exception as e:
|
| 1097 |
+
process.terminate()
|
| 1098 |
+
test_case.fail(e)
|
| 1099 |
+
process.join(timeout=timeout)
|
| 1100 |
+
|
| 1101 |
+
if results["error"] is not None:
|
| 1102 |
+
test_case.fail(f"{results['error']}")
|
| 1103 |
+
|
| 1104 |
+
|
| 1105 |
+
class CaptureLogger:
|
| 1106 |
+
"""
|
| 1107 |
+
Args:
|
| 1108 |
+
Context manager to capture `logging` streams
|
| 1109 |
+
logger: 'logging` logger object
|
| 1110 |
+
Returns:
|
| 1111 |
+
The captured output is available via `self.out`
|
| 1112 |
+
Example:
|
| 1113 |
+
```python
|
| 1114 |
+
>>> from diffusers import logging
|
| 1115 |
+
>>> from diffusers.testing_utils import CaptureLogger
|
| 1116 |
+
|
| 1117 |
+
>>> msg = "Testing 1, 2, 3"
|
| 1118 |
+
>>> logging.set_verbosity_info()
|
| 1119 |
+
>>> logger = logging.get_logger("diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.py")
|
| 1120 |
+
>>> with CaptureLogger(logger) as cl:
|
| 1121 |
+
... logger.info(msg)
|
| 1122 |
+
>>> assert cl.out, msg + "\n"
|
| 1123 |
+
```
|
| 1124 |
+
"""
|
| 1125 |
+
|
| 1126 |
+
def __init__(self, logger):
|
| 1127 |
+
self.logger = logger
|
| 1128 |
+
self.io = StringIO()
|
| 1129 |
+
self.sh = logging.StreamHandler(self.io)
|
| 1130 |
+
self.out = ""
|
| 1131 |
+
|
| 1132 |
+
def __enter__(self):
|
| 1133 |
+
self.logger.addHandler(self.sh)
|
| 1134 |
+
return self
|
| 1135 |
+
|
| 1136 |
+
def __exit__(self, *exc):
|
| 1137 |
+
self.logger.removeHandler(self.sh)
|
| 1138 |
+
self.out = self.io.getvalue()
|
| 1139 |
+
|
| 1140 |
+
def __repr__(self):
|
| 1141 |
+
return f"captured: {self.out}\n"
|
| 1142 |
+
|
| 1143 |
+
|
| 1144 |
+
def enable_full_determinism():
|
| 1145 |
+
"""
|
| 1146 |
+
Helper function for reproducible behavior during distributed training. See
|
| 1147 |
+
- https://pytorch.org/docs/stable/notes/randomness.html for pytorch
|
| 1148 |
+
"""
|
| 1149 |
+
from .torch_utils import enable_full_determinism as _enable_full_determinism
|
| 1150 |
+
|
| 1151 |
+
logger.warning(
|
| 1152 |
+
"enable_full_determinism has been moved to diffusers.utils.torch_utils. "
|
| 1153 |
+
"Importing from diffusers.utils.testing_utils is deprecated and will be removed in a future version."
|
| 1154 |
+
)
|
| 1155 |
+
return _enable_full_determinism()
|
| 1156 |
+
|
| 1157 |
+
|
| 1158 |
+
def disable_full_determinism():
|
| 1159 |
+
from .torch_utils import disable_full_determinism as _disable_full_determinism
|
| 1160 |
+
|
| 1161 |
+
logger.warning(
|
| 1162 |
+
"disable_full_determinism has been moved to diffusers.utils.torch_utils. "
|
| 1163 |
+
"Importing from diffusers.utils.testing_utils is deprecated and will be removed in a future version."
|
| 1164 |
+
)
|
| 1165 |
+
return _disable_full_determinism()
|
| 1166 |
+
|
| 1167 |
+
|
| 1168 |
+
# Utils for custom and alternative accelerator devices
|
| 1169 |
+
def _is_torch_fp16_available(device):
|
| 1170 |
+
if not is_torch_available():
|
| 1171 |
+
return False
|
| 1172 |
+
|
| 1173 |
+
import torch
|
| 1174 |
+
|
| 1175 |
+
device = torch.device(device)
|
| 1176 |
+
|
| 1177 |
+
try:
|
| 1178 |
+
x = torch.zeros((2, 2), dtype=torch.float16).to(device)
|
| 1179 |
+
_ = torch.mul(x, x)
|
| 1180 |
+
return True
|
| 1181 |
+
|
| 1182 |
+
except Exception as e:
|
| 1183 |
+
if device.type == "cuda":
|
| 1184 |
+
raise ValueError(
|
| 1185 |
+
f"You have passed a device of type 'cuda' which should work with 'fp16', but 'cuda' does not seem to be correctly installed on your machine: {e}"
|
| 1186 |
+
)
|
| 1187 |
+
|
| 1188 |
+
return False
|
| 1189 |
+
|
| 1190 |
+
|
| 1191 |
+
def _is_torch_fp64_available(device):
|
| 1192 |
+
if not is_torch_available():
|
| 1193 |
+
return False
|
| 1194 |
+
|
| 1195 |
+
import torch
|
| 1196 |
+
|
| 1197 |
+
device = torch.device(device)
|
| 1198 |
+
|
| 1199 |
+
try:
|
| 1200 |
+
x = torch.zeros((2, 2), dtype=torch.float64).to(device)
|
| 1201 |
+
_ = torch.mul(x, x)
|
| 1202 |
+
return True
|
| 1203 |
+
|
| 1204 |
+
except Exception as e:
|
| 1205 |
+
if device.type == "cuda":
|
| 1206 |
+
raise ValueError(
|
| 1207 |
+
f"You have passed a device of type 'cuda' which should work with 'fp64', but 'cuda' does not seem to be correctly installed on your machine: {e}"
|
| 1208 |
+
)
|
| 1209 |
+
|
| 1210 |
+
return False
|
| 1211 |
+
|
| 1212 |
+
|
| 1213 |
+
# Guard these lookups for when Torch is not used - alternative accelerator support is for PyTorch
|
| 1214 |
+
if is_torch_available():
|
| 1215 |
+
# Behaviour flags
|
| 1216 |
+
BACKEND_SUPPORTS_TRAINING = {"cuda": True, "xpu": True, "cpu": True, "mps": False, "default": True}
|
| 1217 |
+
|
| 1218 |
+
# Function definitions
|
| 1219 |
+
BACKEND_EMPTY_CACHE = {
|
| 1220 |
+
"cuda": torch.cuda.empty_cache,
|
| 1221 |
+
"xpu": torch.xpu.empty_cache,
|
| 1222 |
+
"cpu": None,
|
| 1223 |
+
"mps": torch.mps.empty_cache,
|
| 1224 |
+
"default": None,
|
| 1225 |
+
}
|
| 1226 |
+
BACKEND_DEVICE_COUNT = {
|
| 1227 |
+
"cuda": torch.cuda.device_count,
|
| 1228 |
+
"xpu": torch.xpu.device_count,
|
| 1229 |
+
"cpu": lambda: 0,
|
| 1230 |
+
"mps": lambda: 0,
|
| 1231 |
+
"default": 0,
|
| 1232 |
+
}
|
| 1233 |
+
BACKEND_MANUAL_SEED = {
|
| 1234 |
+
"cuda": torch.cuda.manual_seed,
|
| 1235 |
+
"xpu": torch.xpu.manual_seed,
|
| 1236 |
+
"cpu": torch.manual_seed,
|
| 1237 |
+
"mps": torch.mps.manual_seed,
|
| 1238 |
+
"default": torch.manual_seed,
|
| 1239 |
+
}
|
| 1240 |
+
BACKEND_RESET_PEAK_MEMORY_STATS = {
|
| 1241 |
+
"cuda": torch.cuda.reset_peak_memory_stats,
|
| 1242 |
+
"xpu": getattr(torch.xpu, "reset_peak_memory_stats", None),
|
| 1243 |
+
"cpu": None,
|
| 1244 |
+
"mps": None,
|
| 1245 |
+
"default": None,
|
| 1246 |
+
}
|
| 1247 |
+
BACKEND_RESET_MAX_MEMORY_ALLOCATED = {
|
| 1248 |
+
"cuda": torch.cuda.reset_max_memory_allocated,
|
| 1249 |
+
"xpu": getattr(torch.xpu, "reset_peak_memory_stats", None),
|
| 1250 |
+
"cpu": None,
|
| 1251 |
+
"mps": None,
|
| 1252 |
+
"default": None,
|
| 1253 |
+
}
|
| 1254 |
+
BACKEND_MAX_MEMORY_ALLOCATED = {
|
| 1255 |
+
"cuda": torch.cuda.max_memory_allocated,
|
| 1256 |
+
"xpu": getattr(torch.xpu, "max_memory_allocated", None),
|
| 1257 |
+
"cpu": 0,
|
| 1258 |
+
"mps": 0,
|
| 1259 |
+
"default": 0,
|
| 1260 |
+
}
|
| 1261 |
+
BACKEND_SYNCHRONIZE = {
|
| 1262 |
+
"cuda": torch.cuda.synchronize,
|
| 1263 |
+
"xpu": getattr(torch.xpu, "synchronize", None),
|
| 1264 |
+
"cpu": None,
|
| 1265 |
+
"mps": None,
|
| 1266 |
+
"default": None,
|
| 1267 |
+
}
|
| 1268 |
+
|
| 1269 |
+
|
| 1270 |
+
# This dispatches a defined function according to the accelerator from the function definitions.
|
| 1271 |
+
def _device_agnostic_dispatch(device: str, dispatch_table: Dict[str, Callable], *args, **kwargs):
|
| 1272 |
+
if device not in dispatch_table:
|
| 1273 |
+
return dispatch_table["default"](*args, **kwargs)
|
| 1274 |
+
|
| 1275 |
+
fn = dispatch_table[device]
|
| 1276 |
+
|
| 1277 |
+
# Some device agnostic functions return values. Need to guard against 'None' instead at
|
| 1278 |
+
# user level
|
| 1279 |
+
if not callable(fn):
|
| 1280 |
+
return fn
|
| 1281 |
+
|
| 1282 |
+
return fn(*args, **kwargs)
|
| 1283 |
+
|
| 1284 |
+
|
| 1285 |
+
# These are callables which automatically dispatch the function specific to the accelerator
|
| 1286 |
+
def backend_manual_seed(device: str, seed: int):
|
| 1287 |
+
from .torch_utils import backend_manual_seed as _backend_manual_seed
|
| 1288 |
+
|
| 1289 |
+
logger.warning(
|
| 1290 |
+
"backend_manual_seed has been moved to diffusers.utils.torch_utils. "
|
| 1291 |
+
"diffusers.utils.testing_utils is deprecated and will be removed in a future version."
|
| 1292 |
+
)
|
| 1293 |
+
return _backend_manual_seed(device, seed)
|
| 1294 |
+
|
| 1295 |
+
|
| 1296 |
+
def backend_synchronize(device: str):
|
| 1297 |
+
from .torch_utils import backend_synchronize as _backend_synchronize
|
| 1298 |
+
|
| 1299 |
+
logger.warning(
|
| 1300 |
+
"backend_synchronize has been moved to diffusers.utils.torch_utils. "
|
| 1301 |
+
"diffusers.utils.testing_utils is deprecated and will be removed in a future version."
|
| 1302 |
+
)
|
| 1303 |
+
return _backend_synchronize(device)
|
| 1304 |
+
|
| 1305 |
+
|
| 1306 |
+
def backend_empty_cache(device: str):
|
| 1307 |
+
from .torch_utils import backend_empty_cache as _backend_empty_cache
|
| 1308 |
+
|
| 1309 |
+
logger.warning(
|
| 1310 |
+
"backend_empty_cache has been moved to diffusers.utils.torch_utils. "
|
| 1311 |
+
"diffusers.utils.testing_utils is deprecated and will be removed in a future version."
|
| 1312 |
+
)
|
| 1313 |
+
return _backend_empty_cache(device)
|
| 1314 |
+
|
| 1315 |
+
|
| 1316 |
+
def backend_device_count(device: str):
|
| 1317 |
+
from .torch_utils import backend_device_count as _backend_device_count
|
| 1318 |
+
|
| 1319 |
+
logger.warning(
|
| 1320 |
+
"backend_device_count has been moved to diffusers.utils.torch_utils. "
|
| 1321 |
+
"diffusers.utils.testing_utils is deprecated and will be removed in a future version."
|
| 1322 |
+
)
|
| 1323 |
+
return _backend_device_count(device)
|
| 1324 |
+
|
| 1325 |
+
|
| 1326 |
+
def backend_reset_peak_memory_stats(device: str):
|
| 1327 |
+
from .torch_utils import backend_reset_peak_memory_stats as _backend_reset_peak_memory_stats
|
| 1328 |
+
|
| 1329 |
+
logger.warning(
|
| 1330 |
+
"backend_reset_peak_memory_stats has been moved to diffusers.utils.torch_utils. "
|
| 1331 |
+
"diffusers.utils.testing_utils is deprecated and will be removed in a future version."
|
| 1332 |
+
)
|
| 1333 |
+
return _backend_reset_peak_memory_stats(device)
|
| 1334 |
+
|
| 1335 |
+
|
| 1336 |
+
def backend_reset_max_memory_allocated(device: str):
|
| 1337 |
+
from .torch_utils import backend_reset_max_memory_allocated as _backend_reset_max_memory_allocated
|
| 1338 |
+
|
| 1339 |
+
logger.warning(
|
| 1340 |
+
"backend_reset_max_memory_allocated has been moved to diffusers.utils.torch_utils. "
|
| 1341 |
+
"diffusers.utils.testing_utils is deprecated and will be removed in a future version."
|
| 1342 |
+
)
|
| 1343 |
+
return _backend_reset_max_memory_allocated(device)
|
| 1344 |
+
|
| 1345 |
+
|
| 1346 |
+
def backend_max_memory_allocated(device: str):
|
| 1347 |
+
from .torch_utils import backend_max_memory_allocated as _backend_max_memory_allocated
|
| 1348 |
+
|
| 1349 |
+
logger.warning(
|
| 1350 |
+
"backend_max_memory_allocated has been moved to diffusers.utils.torch_utils. "
|
| 1351 |
+
"diffusers.utils.testing_utils is deprecated and will be removed in a future version."
|
| 1352 |
+
)
|
| 1353 |
+
return _backend_max_memory_allocated(device)
|
| 1354 |
+
|
| 1355 |
+
|
| 1356 |
+
# These are callables which return boolean behaviour flags and can be used to specify some
|
| 1357 |
+
# device agnostic alternative where the feature is unsupported.
|
| 1358 |
+
def backend_supports_training(device: str):
|
| 1359 |
+
from .torch_utils import backend_supports_training as _backend_supports_training
|
| 1360 |
+
|
| 1361 |
+
logger.warning(
|
| 1362 |
+
"backend_supports_training has been moved to diffusers.utils.torch_utils. "
|
| 1363 |
+
"diffusers.utils.testing_utils is deprecated and will be removed in a future version."
|
| 1364 |
+
)
|
| 1365 |
+
return _backend_supports_training(device)
|
| 1366 |
+
|
| 1367 |
+
|
| 1368 |
+
# Guard for when Torch is not available
|
| 1369 |
+
if is_torch_available():
|
| 1370 |
+
# Update device function dict mapping
|
| 1371 |
+
def update_mapping_from_spec(device_fn_dict: Dict[str, Callable], attribute_name: str):
|
| 1372 |
+
try:
|
| 1373 |
+
# Try to import the function directly
|
| 1374 |
+
spec_fn = getattr(device_spec_module, attribute_name)
|
| 1375 |
+
device_fn_dict[torch_device] = spec_fn
|
| 1376 |
+
except AttributeError as e:
|
| 1377 |
+
# If the function doesn't exist, and there is no default, throw an error
|
| 1378 |
+
if "default" not in device_fn_dict:
|
| 1379 |
+
raise AttributeError(
|
| 1380 |
+
f"`{attribute_name}` not found in '{device_spec_path}' and no default fallback function found."
|
| 1381 |
+
) from e
|
| 1382 |
+
|
| 1383 |
+
if "DIFFUSERS_TEST_DEVICE_SPEC" in os.environ:
|
| 1384 |
+
device_spec_path = os.environ["DIFFUSERS_TEST_DEVICE_SPEC"]
|
| 1385 |
+
if not Path(device_spec_path).is_file():
|
| 1386 |
+
raise ValueError(f"Specified path to device specification file is not found. Received {device_spec_path}")
|
| 1387 |
+
|
| 1388 |
+
try:
|
| 1389 |
+
import_name = device_spec_path[: device_spec_path.index(".py")]
|
| 1390 |
+
except ValueError as e:
|
| 1391 |
+
raise ValueError(f"Provided device spec file is not a Python file! Received {device_spec_path}") from e
|
| 1392 |
+
|
| 1393 |
+
device_spec_module = importlib.import_module(import_name)
|
| 1394 |
+
|
| 1395 |
+
try:
|
| 1396 |
+
device_name = device_spec_module.DEVICE_NAME
|
| 1397 |
+
except AttributeError:
|
| 1398 |
+
raise AttributeError("Device spec file did not contain `DEVICE_NAME`")
|
| 1399 |
+
|
| 1400 |
+
if "DIFFUSERS_TEST_DEVICE" in os.environ and torch_device != device_name:
|
| 1401 |
+
msg = f"Mismatch between environment variable `DIFFUSERS_TEST_DEVICE` '{torch_device}' and device found in spec '{device_name}'\n"
|
| 1402 |
+
msg += "Either unset `DIFFUSERS_TEST_DEVICE` or ensure it matches device spec name."
|
| 1403 |
+
raise ValueError(msg)
|
| 1404 |
+
|
| 1405 |
+
torch_device = device_name
|
| 1406 |
+
|
| 1407 |
+
# Add one entry here for each `BACKEND_*` dictionary.
|
| 1408 |
+
update_mapping_from_spec(BACKEND_MANUAL_SEED, "MANUAL_SEED_FN")
|
| 1409 |
+
update_mapping_from_spec(BACKEND_EMPTY_CACHE, "EMPTY_CACHE_FN")
|
| 1410 |
+
update_mapping_from_spec(BACKEND_DEVICE_COUNT, "DEVICE_COUNT_FN")
|
| 1411 |
+
update_mapping_from_spec(BACKEND_SUPPORTS_TRAINING, "SUPPORTS_TRAINING")
|
| 1412 |
+
update_mapping_from_spec(BACKEND_RESET_PEAK_MEMORY_STATS, "RESET_PEAK_MEMORY_STATS_FN")
|
| 1413 |
+
update_mapping_from_spec(BACKEND_RESET_MAX_MEMORY_ALLOCATED, "RESET_MAX_MEMORY_ALLOCATED_FN")
|
| 1414 |
+
update_mapping_from_spec(BACKEND_MAX_MEMORY_ALLOCATED, "MAX_MEMORY_ALLOCATED_FN")
|
| 1415 |
+
|
| 1416 |
+
|
| 1417 |
+
# Modified from https://github.com/huggingface/transformers/blob/cdfb018d0300fef3b07d9220f3efe9c2a9974662/src/transformers/testing_utils.py#L3090
|
| 1418 |
+
|
| 1419 |
+
# Type definition of key used in `Expectations` class.
|
| 1420 |
+
DeviceProperties = Tuple[Union[str, None], Union[int, None]]
|
| 1421 |
+
|
| 1422 |
+
|
| 1423 |
+
@functools.lru_cache
|
| 1424 |
+
def get_device_properties() -> DeviceProperties:
|
| 1425 |
+
"""
|
| 1426 |
+
Get environment device properties.
|
| 1427 |
+
"""
|
| 1428 |
+
if IS_CUDA_SYSTEM or IS_ROCM_SYSTEM:
|
| 1429 |
+
import torch
|
| 1430 |
+
|
| 1431 |
+
major, _ = torch.cuda.get_device_capability()
|
| 1432 |
+
if IS_ROCM_SYSTEM:
|
| 1433 |
+
return ("rocm", major)
|
| 1434 |
+
else:
|
| 1435 |
+
return ("cuda", major)
|
| 1436 |
+
elif IS_XPU_SYSTEM:
|
| 1437 |
+
import torch
|
| 1438 |
+
|
| 1439 |
+
# To get more info of the architecture meaning and bit allocation, refer to https://github.com/intel/llvm/blob/sycl/sycl/include/sycl/ext/oneapi/experimental/device_architecture.def
|
| 1440 |
+
arch = torch.xpu.get_device_capability()["architecture"]
|
| 1441 |
+
gen_mask = 0x000000FF00000000
|
| 1442 |
+
gen = (arch & gen_mask) >> 32
|
| 1443 |
+
return ("xpu", gen)
|
| 1444 |
+
else:
|
| 1445 |
+
return (torch_device, None)
|
| 1446 |
+
|
| 1447 |
+
|
| 1448 |
+
if TYPE_CHECKING:
|
| 1449 |
+
DevicePropertiesUserDict = UserDict[DeviceProperties, Any]
|
| 1450 |
+
else:
|
| 1451 |
+
DevicePropertiesUserDict = UserDict
|
| 1452 |
+
|
| 1453 |
+
if is_torch_available():
|
| 1454 |
+
from diffusers.hooks._common import _GO_LC_SUPPORTED_PYTORCH_LAYERS
|
| 1455 |
+
from diffusers.hooks.group_offloading import (
|
| 1456 |
+
_GROUP_ID_LAZY_LEAF,
|
| 1457 |
+
_compute_group_hash,
|
| 1458 |
+
_find_parent_module_in_module_dict,
|
| 1459 |
+
_gather_buffers_with_no_group_offloading_parent,
|
| 1460 |
+
_gather_parameters_with_no_group_offloading_parent,
|
| 1461 |
+
)
|
| 1462 |
+
|
| 1463 |
+
def _get_expected_safetensors_files(
|
| 1464 |
+
module: torch.nn.Module,
|
| 1465 |
+
offload_to_disk_path: str,
|
| 1466 |
+
offload_type: str,
|
| 1467 |
+
num_blocks_per_group: Optional[int] = None,
|
| 1468 |
+
) -> Set[str]:
|
| 1469 |
+
expected_files = set()
|
| 1470 |
+
|
| 1471 |
+
def get_hashed_filename(group_id: str) -> str:
|
| 1472 |
+
short_hash = _compute_group_hash(group_id)
|
| 1473 |
+
return os.path.join(offload_to_disk_path, f"group_{short_hash}.safetensors")
|
| 1474 |
+
|
| 1475 |
+
if offload_type == "block_level":
|
| 1476 |
+
if num_blocks_per_group is None:
|
| 1477 |
+
raise ValueError("num_blocks_per_group must be provided for 'block_level' offloading.")
|
| 1478 |
+
|
| 1479 |
+
# Handle groups of ModuleList and Sequential blocks
|
| 1480 |
+
unmatched_modules = []
|
| 1481 |
+
for name, submodule in module.named_children():
|
| 1482 |
+
if not isinstance(submodule, (torch.nn.ModuleList, torch.nn.Sequential)):
|
| 1483 |
+
unmatched_modules.append(module)
|
| 1484 |
+
continue
|
| 1485 |
+
|
| 1486 |
+
for i in range(0, len(submodule), num_blocks_per_group):
|
| 1487 |
+
current_modules = submodule[i : i + num_blocks_per_group]
|
| 1488 |
+
if not current_modules:
|
| 1489 |
+
continue
|
| 1490 |
+
group_id = f"{name}_{i}_{i + len(current_modules) - 1}"
|
| 1491 |
+
expected_files.add(get_hashed_filename(group_id))
|
| 1492 |
+
|
| 1493 |
+
# Handle the group for unmatched top-level modules and parameters
|
| 1494 |
+
for module in unmatched_modules:
|
| 1495 |
+
expected_files.add(get_hashed_filename(f"{module.__class__.__name__}_unmatched_group"))
|
| 1496 |
+
|
| 1497 |
+
elif offload_type == "leaf_level":
|
| 1498 |
+
# Handle leaf-level module groups
|
| 1499 |
+
for name, submodule in module.named_modules():
|
| 1500 |
+
if isinstance(submodule, _GO_LC_SUPPORTED_PYTORCH_LAYERS):
|
| 1501 |
+
# These groups will always have parameters, so a file is expected
|
| 1502 |
+
expected_files.add(get_hashed_filename(name))
|
| 1503 |
+
|
| 1504 |
+
# Handle groups for non-leaf parameters/buffers
|
| 1505 |
+
modules_with_group_offloading = {
|
| 1506 |
+
name for name, sm in module.named_modules() if isinstance(sm, _GO_LC_SUPPORTED_PYTORCH_LAYERS)
|
| 1507 |
+
}
|
| 1508 |
+
parameters = _gather_parameters_with_no_group_offloading_parent(module, modules_with_group_offloading)
|
| 1509 |
+
buffers = _gather_buffers_with_no_group_offloading_parent(module, modules_with_group_offloading)
|
| 1510 |
+
|
| 1511 |
+
all_orphans = parameters + buffers
|
| 1512 |
+
if all_orphans:
|
| 1513 |
+
parent_to_tensors = {}
|
| 1514 |
+
module_dict = dict(module.named_modules())
|
| 1515 |
+
for tensor_name, _ in all_orphans:
|
| 1516 |
+
parent_name = _find_parent_module_in_module_dict(tensor_name, module_dict)
|
| 1517 |
+
if parent_name not in parent_to_tensors:
|
| 1518 |
+
parent_to_tensors[parent_name] = []
|
| 1519 |
+
parent_to_tensors[parent_name].append(tensor_name)
|
| 1520 |
+
|
| 1521 |
+
for parent_name in parent_to_tensors:
|
| 1522 |
+
# A file is expected for each parent that gathers orphaned tensors
|
| 1523 |
+
expected_files.add(get_hashed_filename(parent_name))
|
| 1524 |
+
expected_files.add(get_hashed_filename(_GROUP_ID_LAZY_LEAF))
|
| 1525 |
+
|
| 1526 |
+
else:
|
| 1527 |
+
raise ValueError(f"Unsupported offload_type: {offload_type}")
|
| 1528 |
+
|
| 1529 |
+
return expected_files
|
| 1530 |
+
|
| 1531 |
+
def _check_safetensors_serialization(
|
| 1532 |
+
module: torch.nn.Module,
|
| 1533 |
+
offload_to_disk_path: str,
|
| 1534 |
+
offload_type: str,
|
| 1535 |
+
num_blocks_per_group: Optional[int] = None,
|
| 1536 |
+
) -> bool:
|
| 1537 |
+
if not os.path.isdir(offload_to_disk_path):
|
| 1538 |
+
return False, None, None
|
| 1539 |
+
|
| 1540 |
+
expected_files = _get_expected_safetensors_files(
|
| 1541 |
+
module, offload_to_disk_path, offload_type, num_blocks_per_group
|
| 1542 |
+
)
|
| 1543 |
+
actual_files = set(glob.glob(os.path.join(offload_to_disk_path, "*.safetensors")))
|
| 1544 |
+
missing_files = expected_files - actual_files
|
| 1545 |
+
extra_files = actual_files - expected_files
|
| 1546 |
+
|
| 1547 |
+
is_correct = not missing_files and not extra_files
|
| 1548 |
+
return is_correct, extra_files, missing_files
|
| 1549 |
+
|
| 1550 |
+
|
| 1551 |
+
class Expectations(DevicePropertiesUserDict):
|
| 1552 |
+
def get_expectation(self) -> Any:
|
| 1553 |
+
"""
|
| 1554 |
+
Find best matching expectation based on environment device properties.
|
| 1555 |
+
"""
|
| 1556 |
+
return self.find_expectation(get_device_properties())
|
| 1557 |
+
|
| 1558 |
+
@staticmethod
|
| 1559 |
+
def is_default(key: DeviceProperties) -> bool:
|
| 1560 |
+
return all(p is None for p in key)
|
| 1561 |
+
|
| 1562 |
+
@staticmethod
|
| 1563 |
+
def score(key: DeviceProperties, other: DeviceProperties) -> int:
|
| 1564 |
+
"""
|
| 1565 |
+
Returns score indicating how similar two instances of the `Properties` tuple are. Points are calculated using
|
| 1566 |
+
bits, but documented as int. Rules are as follows:
|
| 1567 |
+
* Matching `type` gives 8 points.
|
| 1568 |
+
* Semi-matching `type`, for example cuda and rocm, gives 4 points.
|
| 1569 |
+
* Matching `major` (compute capability major version) gives 2 points.
|
| 1570 |
+
* Default expectation (if present) gives 1 points.
|
| 1571 |
+
"""
|
| 1572 |
+
(device_type, major) = key
|
| 1573 |
+
(other_device_type, other_major) = other
|
| 1574 |
+
|
| 1575 |
+
score = 0b0
|
| 1576 |
+
if device_type == other_device_type:
|
| 1577 |
+
score |= 0b1000
|
| 1578 |
+
elif device_type in ["cuda", "rocm"] and other_device_type in ["cuda", "rocm"]:
|
| 1579 |
+
score |= 0b100
|
| 1580 |
+
|
| 1581 |
+
if major == other_major and other_major is not None:
|
| 1582 |
+
score |= 0b10
|
| 1583 |
+
|
| 1584 |
+
if Expectations.is_default(other):
|
| 1585 |
+
score |= 0b1
|
| 1586 |
+
|
| 1587 |
+
return int(score)
|
| 1588 |
+
|
| 1589 |
+
def find_expectation(self, key: DeviceProperties = (None, None)) -> Any:
|
| 1590 |
+
"""
|
| 1591 |
+
Find best matching expectation based on provided device properties.
|
| 1592 |
+
"""
|
| 1593 |
+
(result_key, result) = max(self.data.items(), key=lambda x: Expectations.score(key, x[0]))
|
| 1594 |
+
|
| 1595 |
+
if Expectations.score(key, result_key) == 0:
|
| 1596 |
+
raise ValueError(f"No matching expectation found for {key}")
|
| 1597 |
+
|
| 1598 |
+
return result
|
| 1599 |
+
|
| 1600 |
+
def __repr__(self):
|
| 1601 |
+
return f"{self.data}"
|
exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/utils/torch_utils.py
ADDED
|
@@ -0,0 +1,334 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2025 The HuggingFace Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
"""
|
| 15 |
+
PyTorch utilities: Utilities related to PyTorch
|
| 16 |
+
"""
|
| 17 |
+
|
| 18 |
+
import functools
|
| 19 |
+
import os
|
| 20 |
+
from typing import Callable, Dict, List, Optional, Tuple, Union
|
| 21 |
+
|
| 22 |
+
from . import logging
|
| 23 |
+
from .import_utils import is_torch_available, is_torch_npu_available, is_torch_version
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
if is_torch_available():
|
| 27 |
+
import torch
|
| 28 |
+
from torch.fft import fftn, fftshift, ifftn, ifftshift
|
| 29 |
+
|
| 30 |
+
BACKEND_SUPPORTS_TRAINING = {"cuda": True, "xpu": True, "cpu": True, "mps": False, "default": True}
|
| 31 |
+
BACKEND_EMPTY_CACHE = {
|
| 32 |
+
"cuda": torch.cuda.empty_cache,
|
| 33 |
+
"xpu": torch.xpu.empty_cache,
|
| 34 |
+
"cpu": None,
|
| 35 |
+
"mps": torch.mps.empty_cache,
|
| 36 |
+
"default": None,
|
| 37 |
+
}
|
| 38 |
+
BACKEND_DEVICE_COUNT = {
|
| 39 |
+
"cuda": torch.cuda.device_count,
|
| 40 |
+
"xpu": torch.xpu.device_count,
|
| 41 |
+
"cpu": lambda: 0,
|
| 42 |
+
"mps": lambda: 0,
|
| 43 |
+
"default": 0,
|
| 44 |
+
}
|
| 45 |
+
BACKEND_MANUAL_SEED = {
|
| 46 |
+
"cuda": torch.cuda.manual_seed,
|
| 47 |
+
"xpu": torch.xpu.manual_seed,
|
| 48 |
+
"cpu": torch.manual_seed,
|
| 49 |
+
"mps": torch.mps.manual_seed,
|
| 50 |
+
"default": torch.manual_seed,
|
| 51 |
+
}
|
| 52 |
+
BACKEND_RESET_PEAK_MEMORY_STATS = {
|
| 53 |
+
"cuda": torch.cuda.reset_peak_memory_stats,
|
| 54 |
+
"xpu": getattr(torch.xpu, "reset_peak_memory_stats", None),
|
| 55 |
+
"cpu": None,
|
| 56 |
+
"mps": None,
|
| 57 |
+
"default": None,
|
| 58 |
+
}
|
| 59 |
+
BACKEND_RESET_MAX_MEMORY_ALLOCATED = {
|
| 60 |
+
"cuda": torch.cuda.reset_max_memory_allocated,
|
| 61 |
+
"xpu": getattr(torch.xpu, "reset_peak_memory_stats", None),
|
| 62 |
+
"cpu": None,
|
| 63 |
+
"mps": None,
|
| 64 |
+
"default": None,
|
| 65 |
+
}
|
| 66 |
+
BACKEND_MAX_MEMORY_ALLOCATED = {
|
| 67 |
+
"cuda": torch.cuda.max_memory_allocated,
|
| 68 |
+
"xpu": getattr(torch.xpu, "max_memory_allocated", None),
|
| 69 |
+
"cpu": 0,
|
| 70 |
+
"mps": 0,
|
| 71 |
+
"default": 0,
|
| 72 |
+
}
|
| 73 |
+
BACKEND_SYNCHRONIZE = {
|
| 74 |
+
"cuda": torch.cuda.synchronize,
|
| 75 |
+
"xpu": getattr(torch.xpu, "synchronize", None),
|
| 76 |
+
"cpu": None,
|
| 77 |
+
"mps": None,
|
| 78 |
+
"default": None,
|
| 79 |
+
}
|
| 80 |
+
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
| 81 |
+
|
| 82 |
+
try:
|
| 83 |
+
from torch._dynamo import allow_in_graph as maybe_allow_in_graph
|
| 84 |
+
except (ImportError, ModuleNotFoundError):
|
| 85 |
+
|
| 86 |
+
def maybe_allow_in_graph(cls):
|
| 87 |
+
return cls
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
# This dispatches a defined function according to the accelerator from the function definitions.
|
| 91 |
+
def _device_agnostic_dispatch(device: str, dispatch_table: Dict[str, Callable], *args, **kwargs):
|
| 92 |
+
if device not in dispatch_table:
|
| 93 |
+
return dispatch_table["default"](*args, **kwargs)
|
| 94 |
+
|
| 95 |
+
fn = dispatch_table[device]
|
| 96 |
+
|
| 97 |
+
# Some device agnostic functions return values. Need to guard against 'None' instead at
|
| 98 |
+
# user level
|
| 99 |
+
if not callable(fn):
|
| 100 |
+
return fn
|
| 101 |
+
|
| 102 |
+
return fn(*args, **kwargs)
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
# These are callables which automatically dispatch the function specific to the accelerator
|
| 106 |
+
def backend_manual_seed(device: str, seed: int):
|
| 107 |
+
return _device_agnostic_dispatch(device, BACKEND_MANUAL_SEED, seed)
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
def backend_synchronize(device: str):
|
| 111 |
+
return _device_agnostic_dispatch(device, BACKEND_SYNCHRONIZE)
|
| 112 |
+
|
| 113 |
+
|
| 114 |
+
def backend_empty_cache(device: str):
|
| 115 |
+
return _device_agnostic_dispatch(device, BACKEND_EMPTY_CACHE)
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
def backend_device_count(device: str):
|
| 119 |
+
return _device_agnostic_dispatch(device, BACKEND_DEVICE_COUNT)
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
def backend_reset_peak_memory_stats(device: str):
|
| 123 |
+
return _device_agnostic_dispatch(device, BACKEND_RESET_PEAK_MEMORY_STATS)
|
| 124 |
+
|
| 125 |
+
|
| 126 |
+
def backend_reset_max_memory_allocated(device: str):
|
| 127 |
+
return _device_agnostic_dispatch(device, BACKEND_RESET_MAX_MEMORY_ALLOCATED)
|
| 128 |
+
|
| 129 |
+
|
| 130 |
+
def backend_max_memory_allocated(device: str):
|
| 131 |
+
return _device_agnostic_dispatch(device, BACKEND_MAX_MEMORY_ALLOCATED)
|
| 132 |
+
|
| 133 |
+
|
| 134 |
+
# These are callables which return boolean behaviour flags and can be used to specify some
|
| 135 |
+
# device agnostic alternative where the feature is unsupported.
|
| 136 |
+
def backend_supports_training(device: str):
|
| 137 |
+
if not is_torch_available():
|
| 138 |
+
return False
|
| 139 |
+
|
| 140 |
+
if device not in BACKEND_SUPPORTS_TRAINING:
|
| 141 |
+
device = "default"
|
| 142 |
+
|
| 143 |
+
return BACKEND_SUPPORTS_TRAINING[device]
|
| 144 |
+
|
| 145 |
+
|
| 146 |
+
def randn_tensor(
|
| 147 |
+
shape: Union[Tuple, List],
|
| 148 |
+
generator: Optional[Union[List["torch.Generator"], "torch.Generator"]] = None,
|
| 149 |
+
device: Optional[Union[str, "torch.device"]] = None,
|
| 150 |
+
dtype: Optional["torch.dtype"] = None,
|
| 151 |
+
layout: Optional["torch.layout"] = None,
|
| 152 |
+
):
|
| 153 |
+
"""A helper function to create random tensors on the desired `device` with the desired `dtype`. When
|
| 154 |
+
passing a list of generators, you can seed each batch size individually. If CPU generators are passed, the tensor
|
| 155 |
+
is always created on the CPU.
|
| 156 |
+
"""
|
| 157 |
+
# device on which tensor is created defaults to device
|
| 158 |
+
if isinstance(device, str):
|
| 159 |
+
device = torch.device(device)
|
| 160 |
+
rand_device = device
|
| 161 |
+
batch_size = shape[0]
|
| 162 |
+
|
| 163 |
+
layout = layout or torch.strided
|
| 164 |
+
device = device or torch.device("cpu")
|
| 165 |
+
|
| 166 |
+
if generator is not None:
|
| 167 |
+
gen_device_type = generator.device.type if not isinstance(generator, list) else generator[0].device.type
|
| 168 |
+
if gen_device_type != device.type and gen_device_type == "cpu":
|
| 169 |
+
rand_device = "cpu"
|
| 170 |
+
if device != "mps":
|
| 171 |
+
logger.info(
|
| 172 |
+
f"The passed generator was created on 'cpu' even though a tensor on {device} was expected."
|
| 173 |
+
f" Tensors will be created on 'cpu' and then moved to {device}. Note that one can probably"
|
| 174 |
+
f" slightly speed up this function by passing a generator that was created on the {device} device."
|
| 175 |
+
)
|
| 176 |
+
elif gen_device_type != device.type and gen_device_type == "cuda":
|
| 177 |
+
raise ValueError(f"Cannot generate a {device} tensor from a generator of type {gen_device_type}.")
|
| 178 |
+
|
| 179 |
+
# make sure generator list of length 1 is treated like a non-list
|
| 180 |
+
if isinstance(generator, list) and len(generator) == 1:
|
| 181 |
+
generator = generator[0]
|
| 182 |
+
|
| 183 |
+
if isinstance(generator, list):
|
| 184 |
+
shape = (1,) + shape[1:]
|
| 185 |
+
latents = [
|
| 186 |
+
torch.randn(shape, generator=generator[i], device=rand_device, dtype=dtype, layout=layout)
|
| 187 |
+
for i in range(batch_size)
|
| 188 |
+
]
|
| 189 |
+
latents = torch.cat(latents, dim=0).to(device)
|
| 190 |
+
else:
|
| 191 |
+
latents = torch.randn(shape, generator=generator, device=rand_device, dtype=dtype, layout=layout).to(device)
|
| 192 |
+
|
| 193 |
+
return latents
|
| 194 |
+
|
| 195 |
+
|
| 196 |
+
def is_compiled_module(module) -> bool:
|
| 197 |
+
"""Check whether the module was compiled with torch.compile()"""
|
| 198 |
+
if is_torch_version("<", "2.0.0") or not hasattr(torch, "_dynamo"):
|
| 199 |
+
return False
|
| 200 |
+
return isinstance(module, torch._dynamo.eval_frame.OptimizedModule)
|
| 201 |
+
|
| 202 |
+
|
| 203 |
+
def unwrap_module(module):
|
| 204 |
+
"""Unwraps a module if it was compiled with torch.compile()"""
|
| 205 |
+
return module._orig_mod if is_compiled_module(module) else module
|
| 206 |
+
|
| 207 |
+
|
| 208 |
+
def fourier_filter(x_in: "torch.Tensor", threshold: int, scale: int) -> "torch.Tensor":
|
| 209 |
+
"""Fourier filter as introduced in FreeU (https://huggingface.co/papers/2309.11497).
|
| 210 |
+
|
| 211 |
+
This version of the method comes from here:
|
| 212 |
+
https://github.com/huggingface/diffusers/pull/5164#issuecomment-1732638706
|
| 213 |
+
"""
|
| 214 |
+
x = x_in
|
| 215 |
+
B, C, H, W = x.shape
|
| 216 |
+
|
| 217 |
+
# Non-power of 2 images must be float32
|
| 218 |
+
if (W & (W - 1)) != 0 or (H & (H - 1)) != 0:
|
| 219 |
+
x = x.to(dtype=torch.float32)
|
| 220 |
+
# fftn does not support bfloat16
|
| 221 |
+
elif x.dtype == torch.bfloat16:
|
| 222 |
+
x = x.to(dtype=torch.float32)
|
| 223 |
+
|
| 224 |
+
# FFT
|
| 225 |
+
x_freq = fftn(x, dim=(-2, -1))
|
| 226 |
+
x_freq = fftshift(x_freq, dim=(-2, -1))
|
| 227 |
+
|
| 228 |
+
B, C, H, W = x_freq.shape
|
| 229 |
+
mask = torch.ones((B, C, H, W), device=x.device)
|
| 230 |
+
|
| 231 |
+
crow, ccol = H // 2, W // 2
|
| 232 |
+
mask[..., crow - threshold : crow + threshold, ccol - threshold : ccol + threshold] = scale
|
| 233 |
+
x_freq = x_freq * mask
|
| 234 |
+
|
| 235 |
+
# IFFT
|
| 236 |
+
x_freq = ifftshift(x_freq, dim=(-2, -1))
|
| 237 |
+
x_filtered = ifftn(x_freq, dim=(-2, -1)).real
|
| 238 |
+
|
| 239 |
+
return x_filtered.to(dtype=x_in.dtype)
|
| 240 |
+
|
| 241 |
+
|
| 242 |
+
def apply_freeu(
|
| 243 |
+
resolution_idx: int, hidden_states: "torch.Tensor", res_hidden_states: "torch.Tensor", **freeu_kwargs
|
| 244 |
+
) -> Tuple["torch.Tensor", "torch.Tensor"]:
|
| 245 |
+
"""Applies the FreeU mechanism as introduced in https:
|
| 246 |
+
//arxiv.org/abs/2309.11497. Adapted from the official code repository: https://github.com/ChenyangSi/FreeU.
|
| 247 |
+
|
| 248 |
+
Args:
|
| 249 |
+
resolution_idx (`int`): Integer denoting the UNet block where FreeU is being applied.
|
| 250 |
+
hidden_states (`torch.Tensor`): Inputs to the underlying block.
|
| 251 |
+
res_hidden_states (`torch.Tensor`): Features from the skip block corresponding to the underlying block.
|
| 252 |
+
s1 (`float`): Scaling factor for stage 1 to attenuate the contributions of the skip features.
|
| 253 |
+
s2 (`float`): Scaling factor for stage 2 to attenuate the contributions of the skip features.
|
| 254 |
+
b1 (`float`): Scaling factor for stage 1 to amplify the contributions of backbone features.
|
| 255 |
+
b2 (`float`): Scaling factor for stage 2 to amplify the contributions of backbone features.
|
| 256 |
+
"""
|
| 257 |
+
if resolution_idx == 0:
|
| 258 |
+
num_half_channels = hidden_states.shape[1] // 2
|
| 259 |
+
hidden_states[:, :num_half_channels] = hidden_states[:, :num_half_channels] * freeu_kwargs["b1"]
|
| 260 |
+
res_hidden_states = fourier_filter(res_hidden_states, threshold=1, scale=freeu_kwargs["s1"])
|
| 261 |
+
if resolution_idx == 1:
|
| 262 |
+
num_half_channels = hidden_states.shape[1] // 2
|
| 263 |
+
hidden_states[:, :num_half_channels] = hidden_states[:, :num_half_channels] * freeu_kwargs["b2"]
|
| 264 |
+
res_hidden_states = fourier_filter(res_hidden_states, threshold=1, scale=freeu_kwargs["s2"])
|
| 265 |
+
|
| 266 |
+
return hidden_states, res_hidden_states
|
| 267 |
+
|
| 268 |
+
|
| 269 |
+
def get_torch_cuda_device_capability():
|
| 270 |
+
if torch.cuda.is_available():
|
| 271 |
+
device = torch.device("cuda")
|
| 272 |
+
compute_capability = torch.cuda.get_device_capability(device)
|
| 273 |
+
compute_capability = f"{compute_capability[0]}.{compute_capability[1]}"
|
| 274 |
+
return float(compute_capability)
|
| 275 |
+
else:
|
| 276 |
+
return None
|
| 277 |
+
|
| 278 |
+
|
| 279 |
+
@functools.lru_cache
|
| 280 |
+
def get_device():
|
| 281 |
+
if torch.cuda.is_available():
|
| 282 |
+
return "cuda"
|
| 283 |
+
elif is_torch_npu_available():
|
| 284 |
+
return "npu"
|
| 285 |
+
elif hasattr(torch, "xpu") and torch.xpu.is_available():
|
| 286 |
+
return "xpu"
|
| 287 |
+
elif torch.backends.mps.is_available():
|
| 288 |
+
return "mps"
|
| 289 |
+
else:
|
| 290 |
+
return "cpu"
|
| 291 |
+
|
| 292 |
+
|
| 293 |
+
def empty_device_cache(device_type: Optional[str] = None):
|
| 294 |
+
if device_type is None:
|
| 295 |
+
device_type = get_device()
|
| 296 |
+
if device_type in ["cpu"]:
|
| 297 |
+
return
|
| 298 |
+
device_mod = getattr(torch, device_type, torch.cuda)
|
| 299 |
+
device_mod.empty_cache()
|
| 300 |
+
|
| 301 |
+
|
| 302 |
+
def device_synchronize(device_type: Optional[str] = None):
|
| 303 |
+
if device_type is None:
|
| 304 |
+
device_type = get_device()
|
| 305 |
+
device_mod = getattr(torch, device_type, torch.cuda)
|
| 306 |
+
device_mod.synchronize()
|
| 307 |
+
|
| 308 |
+
|
| 309 |
+
def enable_full_determinism():
|
| 310 |
+
"""
|
| 311 |
+
Helper function for reproducible behavior during distributed training. See
|
| 312 |
+
- https://pytorch.org/docs/stable/notes/randomness.html for pytorch
|
| 313 |
+
"""
|
| 314 |
+
# Enable PyTorch deterministic mode. This potentially requires either the environment
|
| 315 |
+
# variable 'CUDA_LAUNCH_BLOCKING' or 'CUBLAS_WORKSPACE_CONFIG' to be set,
|
| 316 |
+
# depending on the CUDA version, so we set them both here
|
| 317 |
+
os.environ["CUDA_LAUNCH_BLOCKING"] = "1"
|
| 318 |
+
os.environ["CUBLAS_WORKSPACE_CONFIG"] = ":16:8"
|
| 319 |
+
torch.use_deterministic_algorithms(True)
|
| 320 |
+
|
| 321 |
+
# Enable CUDNN deterministic mode
|
| 322 |
+
torch.backends.cudnn.deterministic = True
|
| 323 |
+
torch.backends.cudnn.benchmark = False
|
| 324 |
+
torch.backends.cuda.matmul.allow_tf32 = False
|
| 325 |
+
|
| 326 |
+
|
| 327 |
+
def disable_full_determinism():
|
| 328 |
+
os.environ["CUDA_LAUNCH_BLOCKING"] = "0"
|
| 329 |
+
os.environ["CUBLAS_WORKSPACE_CONFIG"] = ""
|
| 330 |
+
torch.use_deterministic_algorithms(False)
|
| 331 |
+
|
| 332 |
+
|
| 333 |
+
if is_torch_available():
|
| 334 |
+
torch_device = get_device()
|
exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/utils/typing_utils.py
ADDED
|
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2025 The HuggingFace Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
"""
|
| 15 |
+
Typing utilities: Utilities related to type checking and validation
|
| 16 |
+
"""
|
| 17 |
+
|
| 18 |
+
from typing import Any, Dict, List, Set, Tuple, Type, Union, get_args, get_origin
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
def _is_valid_type(obj: Any, class_or_tuple: Union[Type, Tuple[Type, ...]]) -> bool:
|
| 22 |
+
"""
|
| 23 |
+
Checks if an object is an instance of any of the provided types. For collections, it checks if every element is of
|
| 24 |
+
the correct type as well.
|
| 25 |
+
"""
|
| 26 |
+
if not isinstance(class_or_tuple, tuple):
|
| 27 |
+
class_or_tuple = (class_or_tuple,)
|
| 28 |
+
|
| 29 |
+
# Unpack unions
|
| 30 |
+
unpacked_class_or_tuple = []
|
| 31 |
+
for t in class_or_tuple:
|
| 32 |
+
if get_origin(t) is Union:
|
| 33 |
+
unpacked_class_or_tuple.extend(get_args(t))
|
| 34 |
+
else:
|
| 35 |
+
unpacked_class_or_tuple.append(t)
|
| 36 |
+
class_or_tuple = tuple(unpacked_class_or_tuple)
|
| 37 |
+
|
| 38 |
+
if Any in class_or_tuple:
|
| 39 |
+
return True
|
| 40 |
+
|
| 41 |
+
obj_type = type(obj)
|
| 42 |
+
# Classes with obj's type
|
| 43 |
+
class_or_tuple = {t for t in class_or_tuple if isinstance(obj, get_origin(t) or t)}
|
| 44 |
+
|
| 45 |
+
# Singular types (e.g. int, ControlNet, ...)
|
| 46 |
+
# Untyped collections (e.g. List, but not List[int])
|
| 47 |
+
elem_class_or_tuple = {get_args(t) for t in class_or_tuple}
|
| 48 |
+
if () in elem_class_or_tuple:
|
| 49 |
+
return True
|
| 50 |
+
# Typed lists or sets
|
| 51 |
+
elif obj_type in (list, set):
|
| 52 |
+
return any(all(_is_valid_type(x, t) for x in obj) for t in elem_class_or_tuple)
|
| 53 |
+
# Typed tuples
|
| 54 |
+
elif obj_type is tuple:
|
| 55 |
+
return any(
|
| 56 |
+
# Tuples with any length and single type (e.g. Tuple[int, ...])
|
| 57 |
+
(len(t) == 2 and t[-1] is Ellipsis and all(_is_valid_type(x, t[0]) for x in obj))
|
| 58 |
+
or
|
| 59 |
+
# Tuples with fixed length and any types (e.g. Tuple[int, str])
|
| 60 |
+
(len(obj) == len(t) and all(_is_valid_type(x, tt) for x, tt in zip(obj, t)))
|
| 61 |
+
for t in elem_class_or_tuple
|
| 62 |
+
)
|
| 63 |
+
# Typed dicts
|
| 64 |
+
elif obj_type is dict:
|
| 65 |
+
return any(
|
| 66 |
+
all(_is_valid_type(k, kt) and _is_valid_type(v, vt) for k, v in obj.items())
|
| 67 |
+
for kt, vt in elem_class_or_tuple
|
| 68 |
+
)
|
| 69 |
+
|
| 70 |
+
else:
|
| 71 |
+
return False
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
def _get_detailed_type(obj: Any) -> Type:
|
| 75 |
+
"""
|
| 76 |
+
Gets a detailed type for an object, including nested types for collections.
|
| 77 |
+
"""
|
| 78 |
+
obj_type = type(obj)
|
| 79 |
+
|
| 80 |
+
if obj_type in (list, set):
|
| 81 |
+
obj_origin_type = List if obj_type is list else Set
|
| 82 |
+
elems_type = Union[tuple({_get_detailed_type(x) for x in obj})]
|
| 83 |
+
return obj_origin_type[elems_type]
|
| 84 |
+
elif obj_type is tuple:
|
| 85 |
+
return Tuple[tuple(_get_detailed_type(x) for x in obj)]
|
| 86 |
+
elif obj_type is dict:
|
| 87 |
+
keys_type = Union[tuple({_get_detailed_type(k) for k in obj.keys()})]
|
| 88 |
+
values_type = Union[tuple({_get_detailed_type(k) for k in obj.values()})]
|
| 89 |
+
return Dict[keys_type, values_type]
|
| 90 |
+
else:
|
| 91 |
+
return obj_type
|
exp_code/1_benchmark/diffusers-WanS2V/src/diffusers/utils/versions.py
ADDED
|
@@ -0,0 +1,117 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2020 The HuggingFace Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
"""
|
| 15 |
+
Utilities for working with package versions
|
| 16 |
+
"""
|
| 17 |
+
|
| 18 |
+
import importlib.metadata
|
| 19 |
+
import operator
|
| 20 |
+
import re
|
| 21 |
+
import sys
|
| 22 |
+
from typing import Optional
|
| 23 |
+
|
| 24 |
+
from packaging import version
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
ops = {
|
| 28 |
+
"<": operator.lt,
|
| 29 |
+
"<=": operator.le,
|
| 30 |
+
"==": operator.eq,
|
| 31 |
+
"!=": operator.ne,
|
| 32 |
+
">=": operator.ge,
|
| 33 |
+
">": operator.gt,
|
| 34 |
+
}
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
def _compare_versions(op, got_ver, want_ver, requirement, pkg, hint):
|
| 38 |
+
if got_ver is None or want_ver is None:
|
| 39 |
+
raise ValueError(
|
| 40 |
+
f"Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider"
|
| 41 |
+
f" reinstalling {pkg}."
|
| 42 |
+
)
|
| 43 |
+
if not ops[op](version.parse(got_ver), version.parse(want_ver)):
|
| 44 |
+
raise ImportError(
|
| 45 |
+
f"{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}"
|
| 46 |
+
)
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
def require_version(requirement: str, hint: Optional[str] = None) -> None:
|
| 50 |
+
"""
|
| 51 |
+
Perform a runtime check of the dependency versions, using the exact same syntax used by pip.
|
| 52 |
+
|
| 53 |
+
The installed module version comes from the *site-packages* dir via *importlib.metadata*.
|
| 54 |
+
|
| 55 |
+
Args:
|
| 56 |
+
requirement (`str`): pip style definition, e.g., "tokenizers==0.9.4", "tqdm>=4.27", "numpy"
|
| 57 |
+
hint (`str`, *optional*): what suggestion to print in case of requirements not being met
|
| 58 |
+
|
| 59 |
+
Example:
|
| 60 |
+
|
| 61 |
+
```python
|
| 62 |
+
require_version("pandas>1.1.2")
|
| 63 |
+
require_version("numpy>1.18.5", "this is important to have for whatever reason")
|
| 64 |
+
```"""
|
| 65 |
+
|
| 66 |
+
hint = f"\n{hint}" if hint is not None else ""
|
| 67 |
+
|
| 68 |
+
# non-versioned check
|
| 69 |
+
if re.match(r"^[\w_\-\d]+$", requirement):
|
| 70 |
+
pkg, op, want_ver = requirement, None, None
|
| 71 |
+
else:
|
| 72 |
+
match = re.findall(r"^([^!=<>\s]+)([\s!=<>]{1,2}.+)", requirement)
|
| 73 |
+
if not match:
|
| 74 |
+
raise ValueError(
|
| 75 |
+
"requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but"
|
| 76 |
+
f" got {requirement}"
|
| 77 |
+
)
|
| 78 |
+
pkg, want_full = match[0]
|
| 79 |
+
want_range = want_full.split(",") # there could be multiple requirements
|
| 80 |
+
wanted = {}
|
| 81 |
+
for w in want_range:
|
| 82 |
+
match = re.findall(r"^([\s!=<>]{1,2})(.+)", w)
|
| 83 |
+
if not match:
|
| 84 |
+
raise ValueError(
|
| 85 |
+
"requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,"
|
| 86 |
+
f" but got {requirement}"
|
| 87 |
+
)
|
| 88 |
+
op, want_ver = match[0]
|
| 89 |
+
wanted[op] = want_ver
|
| 90 |
+
if op not in ops:
|
| 91 |
+
raise ValueError(f"{requirement}: need one of {list(ops.keys())}, but got {op}")
|
| 92 |
+
|
| 93 |
+
# special case
|
| 94 |
+
if pkg == "python":
|
| 95 |
+
got_ver = ".".join([str(x) for x in sys.version_info[:3]])
|
| 96 |
+
for op, want_ver in wanted.items():
|
| 97 |
+
_compare_versions(op, got_ver, want_ver, requirement, pkg, hint)
|
| 98 |
+
return
|
| 99 |
+
|
| 100 |
+
# check if any version is installed
|
| 101 |
+
try:
|
| 102 |
+
got_ver = importlib.metadata.version(pkg)
|
| 103 |
+
except importlib.metadata.PackageNotFoundError:
|
| 104 |
+
raise importlib.metadata.PackageNotFoundError(
|
| 105 |
+
f"The '{requirement}' distribution was not found and is required by this application. {hint}"
|
| 106 |
+
)
|
| 107 |
+
|
| 108 |
+
# check that the right version is installed if version number or a range was provided
|
| 109 |
+
if want_ver is not None:
|
| 110 |
+
for op, want_ver in wanted.items():
|
| 111 |
+
_compare_versions(op, got_ver, want_ver, requirement, pkg, hint)
|
| 112 |
+
|
| 113 |
+
|
| 114 |
+
def require_version_core(requirement):
|
| 115 |
+
"""require_version wrapper which emits a core-specific hint on failure"""
|
| 116 |
+
hint = "Try: pip install transformers -U or pip install -e '.[dev]' if you're working with git main"
|
| 117 |
+
return require_version(requirement, hint)
|
exp_code/1_benchmark/diffusers-WanS2V/tests/__init__.py
ADDED
|
File without changes
|
exp_code/1_benchmark/diffusers-WanS2V/tests/conftest.py
ADDED
|
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2025 The HuggingFace Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
# tests directory-specific settings - this file is run automatically
|
| 16 |
+
# by pytest before any tests are run
|
| 17 |
+
|
| 18 |
+
import sys
|
| 19 |
+
import warnings
|
| 20 |
+
from os.path import abspath, dirname, join
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
# allow having multiple repository checkouts and not needing to remember to rerun
|
| 24 |
+
# 'pip install -e .[dev]' when switching between checkouts and running tests.
|
| 25 |
+
git_repo_path = abspath(join(dirname(dirname(__file__)), "src"))
|
| 26 |
+
sys.path.insert(1, git_repo_path)
|
| 27 |
+
|
| 28 |
+
# silence FutureWarning warnings in tests since often we can't act on them until
|
| 29 |
+
# they become normal warnings - i.e. the tests still need to test the current functionality
|
| 30 |
+
warnings.simplefilter(action="ignore", category=FutureWarning)
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
def pytest_configure(config):
|
| 34 |
+
config.addinivalue_line("markers", "big_accelerator: marks tests as requiring big accelerator resources")
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
def pytest_addoption(parser):
|
| 38 |
+
from .testing_utils import pytest_addoption_shared
|
| 39 |
+
|
| 40 |
+
pytest_addoption_shared(parser)
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
def pytest_terminal_summary(terminalreporter):
|
| 44 |
+
from .testing_utils import pytest_terminal_summary_main
|
| 45 |
+
|
| 46 |
+
make_reports = terminalreporter.config.getoption("--make-reports")
|
| 47 |
+
if make_reports:
|
| 48 |
+
pytest_terminal_summary_main(terminalreporter, id=make_reports)
|
exp_code/1_benchmark/diffusers-WanS2V/tests/fixtures/custom_pipeline/pipeline.py
ADDED
|
@@ -0,0 +1,102 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2025 The HuggingFace Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
# limitations under the License.
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
from typing import Optional, Tuple, Union
|
| 19 |
+
|
| 20 |
+
import torch
|
| 21 |
+
|
| 22 |
+
from diffusers import DiffusionPipeline, ImagePipelineOutput, SchedulerMixin, UNet2DModel
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
class CustomLocalPipeline(DiffusionPipeline):
|
| 26 |
+
r"""
|
| 27 |
+
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
|
| 28 |
+
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
|
| 29 |
+
|
| 30 |
+
Parameters:
|
| 31 |
+
unet ([`UNet2DModel`]): U-Net architecture to denoise the encoded image.
|
| 32 |
+
scheduler ([`SchedulerMixin`]):
|
| 33 |
+
A scheduler to be used in combination with `unet` to denoise the encoded image. Can be one of
|
| 34 |
+
[`DDPMScheduler`], or [`DDIMScheduler`].
|
| 35 |
+
"""
|
| 36 |
+
|
| 37 |
+
def __init__(self, unet: UNet2DModel, scheduler: SchedulerMixin):
|
| 38 |
+
super().__init__()
|
| 39 |
+
self.register_modules(unet=unet, scheduler=scheduler)
|
| 40 |
+
|
| 41 |
+
@torch.no_grad()
|
| 42 |
+
def __call__(
|
| 43 |
+
self,
|
| 44 |
+
batch_size: int = 1,
|
| 45 |
+
generator: Optional[torch.Generator] = None,
|
| 46 |
+
num_inference_steps: int = 50,
|
| 47 |
+
output_type: Optional[str] = "pil",
|
| 48 |
+
return_dict: bool = True,
|
| 49 |
+
**kwargs,
|
| 50 |
+
) -> Union[ImagePipelineOutput, Tuple]:
|
| 51 |
+
r"""
|
| 52 |
+
Args:
|
| 53 |
+
batch_size (`int`, *optional*, defaults to 1):
|
| 54 |
+
The number of images to generate.
|
| 55 |
+
generator (`torch.Generator`, *optional*):
|
| 56 |
+
A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
|
| 57 |
+
deterministic.
|
| 58 |
+
eta (`float`, *optional*, defaults to 0.0):
|
| 59 |
+
The eta parameter which controls the scale of the variance (0 is DDIM and 1 is one type of DDPM).
|
| 60 |
+
num_inference_steps (`int`, *optional*, defaults to 50):
|
| 61 |
+
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
| 62 |
+
expense of slower inference.
|
| 63 |
+
output_type (`str`, *optional*, defaults to `"pil"`):
|
| 64 |
+
The output format of the generate image. Choose between
|
| 65 |
+
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
|
| 66 |
+
return_dict (`bool`, *optional*, defaults to `True`):
|
| 67 |
+
Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple.
|
| 68 |
+
|
| 69 |
+
Returns:
|
| 70 |
+
[`~pipelines.ImagePipelineOutput`] or `tuple`: [`~pipelines.utils.ImagePipelineOutput`] if
|
| 71 |
+
`return_dict` is True, otherwise a `tuple. When returning a tuple, the first element is a list with the
|
| 72 |
+
generated images.
|
| 73 |
+
"""
|
| 74 |
+
|
| 75 |
+
# Sample gaussian noise to begin loop
|
| 76 |
+
image = torch.randn(
|
| 77 |
+
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size),
|
| 78 |
+
generator=generator,
|
| 79 |
+
)
|
| 80 |
+
image = image.to(self.device)
|
| 81 |
+
|
| 82 |
+
# set step values
|
| 83 |
+
self.scheduler.set_timesteps(num_inference_steps)
|
| 84 |
+
|
| 85 |
+
for t in self.progress_bar(self.scheduler.timesteps):
|
| 86 |
+
# 1. predict noise model_output
|
| 87 |
+
model_output = self.unet(image, t).sample
|
| 88 |
+
|
| 89 |
+
# 2. predict previous mean of image x_t-1 and add variance depending on eta
|
| 90 |
+
# eta corresponds to η in paper and should be between [0, 1]
|
| 91 |
+
# do x_t -> x_t-1
|
| 92 |
+
image = self.scheduler.step(model_output, t, image).prev_sample
|
| 93 |
+
|
| 94 |
+
image = (image / 2 + 0.5).clamp(0, 1)
|
| 95 |
+
image = image.cpu().permute(0, 2, 3, 1).numpy()
|
| 96 |
+
if output_type == "pil":
|
| 97 |
+
image = self.numpy_to_pil(image)
|
| 98 |
+
|
| 99 |
+
if not return_dict:
|
| 100 |
+
return (image,), "This is a local test"
|
| 101 |
+
|
| 102 |
+
return ImagePipelineOutput(images=image), "This is a local test"
|
exp_code/1_benchmark/diffusers-WanS2V/tests/fixtures/custom_pipeline/what_ever.py
ADDED
|
@@ -0,0 +1,103 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2025 The HuggingFace Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
# limitations under the License.
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
from typing import Optional, Tuple, Union
|
| 19 |
+
|
| 20 |
+
import torch
|
| 21 |
+
|
| 22 |
+
from diffusers import SchedulerMixin, UNet2DModel
|
| 23 |
+
from diffusers.pipelines.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
class CustomLocalPipeline(DiffusionPipeline):
|
| 27 |
+
r"""
|
| 28 |
+
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
|
| 29 |
+
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
|
| 30 |
+
|
| 31 |
+
Parameters:
|
| 32 |
+
unet ([`UNet2DModel`]): U-Net architecture to denoise the encoded image.
|
| 33 |
+
scheduler ([`SchedulerMixin`]):
|
| 34 |
+
A scheduler to be used in combination with `unet` to denoise the encoded image. Can be one of
|
| 35 |
+
[`DDPMScheduler`], or [`DDIMScheduler`].
|
| 36 |
+
"""
|
| 37 |
+
|
| 38 |
+
def __init__(self, unet: UNet2DModel, scheduler: SchedulerMixin):
|
| 39 |
+
super().__init__()
|
| 40 |
+
self.register_modules(unet=unet, scheduler=scheduler)
|
| 41 |
+
|
| 42 |
+
@torch.no_grad()
|
| 43 |
+
def __call__(
|
| 44 |
+
self,
|
| 45 |
+
batch_size: int = 1,
|
| 46 |
+
generator: Optional[torch.Generator] = None,
|
| 47 |
+
num_inference_steps: int = 50,
|
| 48 |
+
output_type: Optional[str] = "pil",
|
| 49 |
+
return_dict: bool = True,
|
| 50 |
+
**kwargs,
|
| 51 |
+
) -> Union[ImagePipelineOutput, Tuple]:
|
| 52 |
+
r"""
|
| 53 |
+
Args:
|
| 54 |
+
batch_size (`int`, *optional*, defaults to 1):
|
| 55 |
+
The number of images to generate.
|
| 56 |
+
generator (`torch.Generator`, *optional*):
|
| 57 |
+
A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
|
| 58 |
+
deterministic.
|
| 59 |
+
eta (`float`, *optional*, defaults to 0.0):
|
| 60 |
+
The eta parameter which controls the scale of the variance (0 is DDIM and 1 is one type of DDPM).
|
| 61 |
+
num_inference_steps (`int`, *optional*, defaults to 50):
|
| 62 |
+
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
| 63 |
+
expense of slower inference.
|
| 64 |
+
output_type (`str`, *optional*, defaults to `"pil"`):
|
| 65 |
+
The output format of the generate image. Choose between
|
| 66 |
+
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
|
| 67 |
+
return_dict (`bool`, *optional*, defaults to `True`):
|
| 68 |
+
Whether or not to return a [`~pipeline_utils.ImagePipelineOutput`] instead of a plain tuple.
|
| 69 |
+
|
| 70 |
+
Returns:
|
| 71 |
+
[`~pipeline_utils.ImagePipelineOutput`] or `tuple`: [`~pipelines.utils.ImagePipelineOutput`] if
|
| 72 |
+
`return_dict` is True, otherwise a `tuple. When returning a tuple, the first element is a list with the
|
| 73 |
+
generated images.
|
| 74 |
+
"""
|
| 75 |
+
|
| 76 |
+
# Sample gaussian noise to begin loop
|
| 77 |
+
image = torch.randn(
|
| 78 |
+
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size),
|
| 79 |
+
generator=generator,
|
| 80 |
+
)
|
| 81 |
+
image = image.to(self.device)
|
| 82 |
+
|
| 83 |
+
# set step values
|
| 84 |
+
self.scheduler.set_timesteps(num_inference_steps)
|
| 85 |
+
|
| 86 |
+
for t in self.progress_bar(self.scheduler.timesteps):
|
| 87 |
+
# 1. predict noise model_output
|
| 88 |
+
model_output = self.unet(image, t).sample
|
| 89 |
+
|
| 90 |
+
# 2. predict previous mean of image x_t-1 and add variance depending on eta
|
| 91 |
+
# eta corresponds to η in paper and should be between [0, 1]
|
| 92 |
+
# do x_t -> x_t-1
|
| 93 |
+
image = self.scheduler.step(model_output, t, image).prev_sample
|
| 94 |
+
|
| 95 |
+
image = (image / 2 + 0.5).clamp(0, 1)
|
| 96 |
+
image = image.cpu().permute(0, 2, 3, 1).numpy()
|
| 97 |
+
if output_type == "pil":
|
| 98 |
+
image = self.numpy_to_pil(image)
|
| 99 |
+
|
| 100 |
+
if not return_dict:
|
| 101 |
+
return (image,), "This is a local test"
|
| 102 |
+
|
| 103 |
+
return ImagePipelineOutput(images=image), "This is a local test"
|
exp_code/1_benchmark/diffusers-WanS2V/tests/fixtures/elise_format0.mid
ADDED
|
Binary file (14.2 kB). View file
|
|
|
exp_code/1_benchmark/diffusers-WanS2V/tests/hooks/__init__.py
ADDED
|
File without changes
|
exp_code/1_benchmark/diffusers-WanS2V/tests/hooks/test_group_offloading.py
ADDED
|
@@ -0,0 +1,364 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2025 HuggingFace Inc.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
import contextlib
|
| 16 |
+
import gc
|
| 17 |
+
import unittest
|
| 18 |
+
|
| 19 |
+
import torch
|
| 20 |
+
from parameterized import parameterized
|
| 21 |
+
|
| 22 |
+
from diffusers.hooks import HookRegistry, ModelHook
|
| 23 |
+
from diffusers.models import ModelMixin
|
| 24 |
+
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
|
| 25 |
+
from diffusers.utils import get_logger
|
| 26 |
+
from diffusers.utils.import_utils import compare_versions
|
| 27 |
+
|
| 28 |
+
from ..testing_utils import (
|
| 29 |
+
backend_empty_cache,
|
| 30 |
+
backend_max_memory_allocated,
|
| 31 |
+
backend_reset_peak_memory_stats,
|
| 32 |
+
require_torch_accelerator,
|
| 33 |
+
torch_device,
|
| 34 |
+
)
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
class DummyBlock(torch.nn.Module):
|
| 38 |
+
def __init__(self, in_features: int, hidden_features: int, out_features: int) -> None:
|
| 39 |
+
super().__init__()
|
| 40 |
+
|
| 41 |
+
self.proj_in = torch.nn.Linear(in_features, hidden_features)
|
| 42 |
+
self.activation = torch.nn.ReLU()
|
| 43 |
+
self.proj_out = torch.nn.Linear(hidden_features, out_features)
|
| 44 |
+
|
| 45 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 46 |
+
x = self.proj_in(x)
|
| 47 |
+
x = self.activation(x)
|
| 48 |
+
x = self.proj_out(x)
|
| 49 |
+
return x
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
class DummyModel(ModelMixin):
|
| 53 |
+
def __init__(self, in_features: int, hidden_features: int, out_features: int, num_layers: int) -> None:
|
| 54 |
+
super().__init__()
|
| 55 |
+
|
| 56 |
+
self.linear_1 = torch.nn.Linear(in_features, hidden_features)
|
| 57 |
+
self.activation = torch.nn.ReLU()
|
| 58 |
+
self.blocks = torch.nn.ModuleList(
|
| 59 |
+
[DummyBlock(hidden_features, hidden_features, hidden_features) for _ in range(num_layers)]
|
| 60 |
+
)
|
| 61 |
+
self.linear_2 = torch.nn.Linear(hidden_features, out_features)
|
| 62 |
+
|
| 63 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 64 |
+
x = self.linear_1(x)
|
| 65 |
+
x = self.activation(x)
|
| 66 |
+
for block in self.blocks:
|
| 67 |
+
x = block(x)
|
| 68 |
+
x = self.linear_2(x)
|
| 69 |
+
return x
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
# This model implementation contains one type of block (single_blocks) instantiated before another type of block (double_blocks).
|
| 73 |
+
# The invocation order of these blocks, however, is first the double_blocks and then the single_blocks.
|
| 74 |
+
# With group offloading implementation before https://github.com/huggingface/diffusers/pull/11375, such a modeling implementation
|
| 75 |
+
# would result in a device mismatch error because of the assumptions made by the code. The failure case occurs when using:
|
| 76 |
+
# offload_type="block_level", num_blocks_per_group=2, use_stream=True
|
| 77 |
+
# Post the linked PR, the implementation will work as expected.
|
| 78 |
+
class DummyModelWithMultipleBlocks(ModelMixin):
|
| 79 |
+
def __init__(
|
| 80 |
+
self, in_features: int, hidden_features: int, out_features: int, num_layers: int, num_single_layers: int
|
| 81 |
+
) -> None:
|
| 82 |
+
super().__init__()
|
| 83 |
+
|
| 84 |
+
self.linear_1 = torch.nn.Linear(in_features, hidden_features)
|
| 85 |
+
self.activation = torch.nn.ReLU()
|
| 86 |
+
self.single_blocks = torch.nn.ModuleList(
|
| 87 |
+
[DummyBlock(hidden_features, hidden_features, hidden_features) for _ in range(num_single_layers)]
|
| 88 |
+
)
|
| 89 |
+
self.double_blocks = torch.nn.ModuleList(
|
| 90 |
+
[DummyBlock(hidden_features, hidden_features, hidden_features) for _ in range(num_layers)]
|
| 91 |
+
)
|
| 92 |
+
self.linear_2 = torch.nn.Linear(hidden_features, out_features)
|
| 93 |
+
|
| 94 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 95 |
+
x = self.linear_1(x)
|
| 96 |
+
x = self.activation(x)
|
| 97 |
+
for block in self.double_blocks:
|
| 98 |
+
x = block(x)
|
| 99 |
+
for block in self.single_blocks:
|
| 100 |
+
x = block(x)
|
| 101 |
+
x = self.linear_2(x)
|
| 102 |
+
return x
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
# Test for https://github.com/huggingface/diffusers/pull/12077
|
| 106 |
+
class DummyModelWithLayerNorm(ModelMixin):
|
| 107 |
+
def __init__(self, in_features: int, hidden_features: int, out_features: int, num_layers: int) -> None:
|
| 108 |
+
super().__init__()
|
| 109 |
+
|
| 110 |
+
self.linear_1 = torch.nn.Linear(in_features, hidden_features)
|
| 111 |
+
self.activation = torch.nn.ReLU()
|
| 112 |
+
self.blocks = torch.nn.ModuleList(
|
| 113 |
+
[DummyBlock(hidden_features, hidden_features, hidden_features) for _ in range(num_layers)]
|
| 114 |
+
)
|
| 115 |
+
self.layer_norm = torch.nn.LayerNorm(hidden_features, elementwise_affine=True)
|
| 116 |
+
self.linear_2 = torch.nn.Linear(hidden_features, out_features)
|
| 117 |
+
|
| 118 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 119 |
+
x = self.linear_1(x)
|
| 120 |
+
x = self.activation(x)
|
| 121 |
+
for block in self.blocks:
|
| 122 |
+
x = block(x)
|
| 123 |
+
x = self.layer_norm(x)
|
| 124 |
+
x = self.linear_2(x)
|
| 125 |
+
return x
|
| 126 |
+
|
| 127 |
+
|
| 128 |
+
class DummyPipeline(DiffusionPipeline):
|
| 129 |
+
model_cpu_offload_seq = "model"
|
| 130 |
+
|
| 131 |
+
def __init__(self, model: torch.nn.Module) -> None:
|
| 132 |
+
super().__init__()
|
| 133 |
+
|
| 134 |
+
self.register_modules(model=model)
|
| 135 |
+
|
| 136 |
+
def __call__(self, x: torch.Tensor) -> torch.Tensor:
|
| 137 |
+
for _ in range(2):
|
| 138 |
+
x = x + 0.1 * self.model(x)
|
| 139 |
+
return x
|
| 140 |
+
|
| 141 |
+
|
| 142 |
+
class LayerOutputTrackerHook(ModelHook):
|
| 143 |
+
def __init__(self):
|
| 144 |
+
super().__init__()
|
| 145 |
+
self.outputs = []
|
| 146 |
+
|
| 147 |
+
def post_forward(self, module, output):
|
| 148 |
+
self.outputs.append(output)
|
| 149 |
+
return output
|
| 150 |
+
|
| 151 |
+
|
| 152 |
+
@require_torch_accelerator
|
| 153 |
+
class GroupOffloadTests(unittest.TestCase):
|
| 154 |
+
in_features = 64
|
| 155 |
+
hidden_features = 256
|
| 156 |
+
out_features = 64
|
| 157 |
+
num_layers = 4
|
| 158 |
+
|
| 159 |
+
def setUp(self):
|
| 160 |
+
with torch.no_grad():
|
| 161 |
+
self.model = self.get_model()
|
| 162 |
+
self.input = torch.randn((4, self.in_features)).to(torch_device)
|
| 163 |
+
|
| 164 |
+
def tearDown(self):
|
| 165 |
+
super().tearDown()
|
| 166 |
+
|
| 167 |
+
del self.model
|
| 168 |
+
del self.input
|
| 169 |
+
gc.collect()
|
| 170 |
+
backend_empty_cache(torch_device)
|
| 171 |
+
backend_reset_peak_memory_stats(torch_device)
|
| 172 |
+
|
| 173 |
+
def get_model(self):
|
| 174 |
+
torch.manual_seed(0)
|
| 175 |
+
return DummyModel(
|
| 176 |
+
in_features=self.in_features,
|
| 177 |
+
hidden_features=self.hidden_features,
|
| 178 |
+
out_features=self.out_features,
|
| 179 |
+
num_layers=self.num_layers,
|
| 180 |
+
)
|
| 181 |
+
|
| 182 |
+
def test_offloading_forward_pass(self):
|
| 183 |
+
@torch.no_grad()
|
| 184 |
+
def run_forward(model):
|
| 185 |
+
gc.collect()
|
| 186 |
+
backend_empty_cache(torch_device)
|
| 187 |
+
backend_reset_peak_memory_stats(torch_device)
|
| 188 |
+
self.assertTrue(
|
| 189 |
+
all(
|
| 190 |
+
module._diffusers_hook.get_hook("group_offloading") is not None
|
| 191 |
+
for module in model.modules()
|
| 192 |
+
if hasattr(module, "_diffusers_hook")
|
| 193 |
+
)
|
| 194 |
+
)
|
| 195 |
+
model.eval()
|
| 196 |
+
output = model(self.input)[0].cpu()
|
| 197 |
+
max_memory_allocated = backend_max_memory_allocated(torch_device)
|
| 198 |
+
return output, max_memory_allocated
|
| 199 |
+
|
| 200 |
+
self.model.to(torch_device)
|
| 201 |
+
output_without_group_offloading, mem_baseline = run_forward(self.model)
|
| 202 |
+
self.model.to("cpu")
|
| 203 |
+
|
| 204 |
+
model = self.get_model()
|
| 205 |
+
model.enable_group_offload(torch_device, offload_type="block_level", num_blocks_per_group=3)
|
| 206 |
+
output_with_group_offloading1, mem1 = run_forward(model)
|
| 207 |
+
|
| 208 |
+
model = self.get_model()
|
| 209 |
+
model.enable_group_offload(torch_device, offload_type="block_level", num_blocks_per_group=1)
|
| 210 |
+
output_with_group_offloading2, mem2 = run_forward(model)
|
| 211 |
+
|
| 212 |
+
model = self.get_model()
|
| 213 |
+
model.enable_group_offload(torch_device, offload_type="block_level", num_blocks_per_group=1, use_stream=True)
|
| 214 |
+
output_with_group_offloading3, mem3 = run_forward(model)
|
| 215 |
+
|
| 216 |
+
model = self.get_model()
|
| 217 |
+
model.enable_group_offload(torch_device, offload_type="leaf_level")
|
| 218 |
+
output_with_group_offloading4, mem4 = run_forward(model)
|
| 219 |
+
|
| 220 |
+
model = self.get_model()
|
| 221 |
+
model.enable_group_offload(torch_device, offload_type="leaf_level", use_stream=True)
|
| 222 |
+
output_with_group_offloading5, mem5 = run_forward(model)
|
| 223 |
+
|
| 224 |
+
# Precision assertions - offloading should not impact the output
|
| 225 |
+
self.assertTrue(torch.allclose(output_without_group_offloading, output_with_group_offloading1, atol=1e-5))
|
| 226 |
+
self.assertTrue(torch.allclose(output_without_group_offloading, output_with_group_offloading2, atol=1e-5))
|
| 227 |
+
self.assertTrue(torch.allclose(output_without_group_offloading, output_with_group_offloading3, atol=1e-5))
|
| 228 |
+
self.assertTrue(torch.allclose(output_without_group_offloading, output_with_group_offloading4, atol=1e-5))
|
| 229 |
+
self.assertTrue(torch.allclose(output_without_group_offloading, output_with_group_offloading5, atol=1e-5))
|
| 230 |
+
|
| 231 |
+
# Memory assertions - offloading should reduce memory usage
|
| 232 |
+
self.assertTrue(mem4 <= mem5 < mem2 <= mem3 < mem1 < mem_baseline)
|
| 233 |
+
|
| 234 |
+
def test_warning_logged_if_group_offloaded_module_moved_to_accelerator(self):
|
| 235 |
+
if torch.device(torch_device).type not in ["cuda", "xpu"]:
|
| 236 |
+
return
|
| 237 |
+
self.model.enable_group_offload(torch_device, offload_type="block_level", num_blocks_per_group=3)
|
| 238 |
+
logger = get_logger("diffusers.models.modeling_utils")
|
| 239 |
+
logger.setLevel("INFO")
|
| 240 |
+
with self.assertLogs(logger, level="WARNING") as cm:
|
| 241 |
+
self.model.to(torch_device)
|
| 242 |
+
self.assertIn(f"The module '{self.model.__class__.__name__}' is group offloaded", cm.output[0])
|
| 243 |
+
|
| 244 |
+
def test_warning_logged_if_group_offloaded_pipe_moved_to_accelerator(self):
|
| 245 |
+
if torch.device(torch_device).type not in ["cuda", "xpu"]:
|
| 246 |
+
return
|
| 247 |
+
pipe = DummyPipeline(self.model)
|
| 248 |
+
self.model.enable_group_offload(torch_device, offload_type="block_level", num_blocks_per_group=3)
|
| 249 |
+
logger = get_logger("diffusers.pipelines.pipeline_utils")
|
| 250 |
+
logger.setLevel("INFO")
|
| 251 |
+
with self.assertLogs(logger, level="WARNING") as cm:
|
| 252 |
+
pipe.to(torch_device)
|
| 253 |
+
self.assertIn(f"The module '{self.model.__class__.__name__}' is group offloaded", cm.output[0])
|
| 254 |
+
|
| 255 |
+
def test_error_raised_if_streams_used_and_no_accelerator_device(self):
|
| 256 |
+
torch_accelerator_module = getattr(torch, torch_device, torch.cuda)
|
| 257 |
+
original_is_available = torch_accelerator_module.is_available
|
| 258 |
+
torch_accelerator_module.is_available = lambda: False
|
| 259 |
+
with self.assertRaises(ValueError):
|
| 260 |
+
self.model.enable_group_offload(
|
| 261 |
+
onload_device=torch.device(torch_device), offload_type="leaf_level", use_stream=True
|
| 262 |
+
)
|
| 263 |
+
torch_accelerator_module.is_available = original_is_available
|
| 264 |
+
|
| 265 |
+
def test_error_raised_if_supports_group_offloading_false(self):
|
| 266 |
+
self.model._supports_group_offloading = False
|
| 267 |
+
with self.assertRaisesRegex(ValueError, "does not support group offloading"):
|
| 268 |
+
self.model.enable_group_offload(onload_device=torch.device(torch_device))
|
| 269 |
+
|
| 270 |
+
def test_error_raised_if_model_offloading_applied_on_group_offloaded_module(self):
|
| 271 |
+
pipe = DummyPipeline(self.model)
|
| 272 |
+
pipe.model.enable_group_offload(torch_device, offload_type="block_level", num_blocks_per_group=3)
|
| 273 |
+
with self.assertRaisesRegex(ValueError, "You are trying to apply model/sequential CPU offloading"):
|
| 274 |
+
pipe.enable_model_cpu_offload()
|
| 275 |
+
|
| 276 |
+
def test_error_raised_if_sequential_offloading_applied_on_group_offloaded_module(self):
|
| 277 |
+
pipe = DummyPipeline(self.model)
|
| 278 |
+
pipe.model.enable_group_offload(torch_device, offload_type="block_level", num_blocks_per_group=3)
|
| 279 |
+
with self.assertRaisesRegex(ValueError, "You are trying to apply model/sequential CPU offloading"):
|
| 280 |
+
pipe.enable_sequential_cpu_offload()
|
| 281 |
+
|
| 282 |
+
def test_error_raised_if_group_offloading_applied_on_model_offloaded_module(self):
|
| 283 |
+
pipe = DummyPipeline(self.model)
|
| 284 |
+
pipe.enable_model_cpu_offload()
|
| 285 |
+
with self.assertRaisesRegex(ValueError, "Cannot apply group offloading"):
|
| 286 |
+
pipe.model.enable_group_offload(torch_device, offload_type="block_level", num_blocks_per_group=3)
|
| 287 |
+
|
| 288 |
+
def test_error_raised_if_group_offloading_applied_on_sequential_offloaded_module(self):
|
| 289 |
+
pipe = DummyPipeline(self.model)
|
| 290 |
+
pipe.enable_sequential_cpu_offload()
|
| 291 |
+
with self.assertRaisesRegex(ValueError, "Cannot apply group offloading"):
|
| 292 |
+
pipe.model.enable_group_offload(torch_device, offload_type="block_level", num_blocks_per_group=3)
|
| 293 |
+
|
| 294 |
+
def test_block_level_stream_with_invocation_order_different_from_initialization_order(self):
|
| 295 |
+
if torch.device(torch_device).type not in ["cuda", "xpu"]:
|
| 296 |
+
return
|
| 297 |
+
|
| 298 |
+
model = DummyModelWithMultipleBlocks(
|
| 299 |
+
in_features=self.in_features,
|
| 300 |
+
hidden_features=self.hidden_features,
|
| 301 |
+
out_features=self.out_features,
|
| 302 |
+
num_layers=self.num_layers,
|
| 303 |
+
num_single_layers=self.num_layers + 1,
|
| 304 |
+
)
|
| 305 |
+
model.enable_group_offload(torch_device, offload_type="block_level", num_blocks_per_group=1, use_stream=True)
|
| 306 |
+
|
| 307 |
+
context = contextlib.nullcontext()
|
| 308 |
+
if compare_versions("diffusers", "<=", "0.33.0"):
|
| 309 |
+
# Will raise a device mismatch RuntimeError mentioning weights are on CPU but input is on device
|
| 310 |
+
context = self.assertRaisesRegex(RuntimeError, "Expected all tensors to be on the same device")
|
| 311 |
+
|
| 312 |
+
with context:
|
| 313 |
+
model(self.input)
|
| 314 |
+
|
| 315 |
+
@parameterized.expand([("block_level",), ("leaf_level",)])
|
| 316 |
+
def test_block_level_offloading_with_parameter_only_module_group(self, offload_type: str):
|
| 317 |
+
if torch.device(torch_device).type not in ["cuda", "xpu"]:
|
| 318 |
+
return
|
| 319 |
+
|
| 320 |
+
def apply_layer_output_tracker_hook(model: DummyModelWithLayerNorm):
|
| 321 |
+
for name, module in model.named_modules():
|
| 322 |
+
registry = HookRegistry.check_if_exists_or_initialize(module)
|
| 323 |
+
hook = LayerOutputTrackerHook()
|
| 324 |
+
registry.register_hook(hook, "layer_output_tracker")
|
| 325 |
+
|
| 326 |
+
model_ref = DummyModelWithLayerNorm(128, 256, 128, 2)
|
| 327 |
+
model = DummyModelWithLayerNorm(128, 256, 128, 2)
|
| 328 |
+
|
| 329 |
+
model.load_state_dict(model_ref.state_dict(), strict=True)
|
| 330 |
+
|
| 331 |
+
model_ref.to(torch_device)
|
| 332 |
+
model.enable_group_offload(torch_device, offload_type=offload_type, num_blocks_per_group=1, use_stream=True)
|
| 333 |
+
|
| 334 |
+
apply_layer_output_tracker_hook(model_ref)
|
| 335 |
+
apply_layer_output_tracker_hook(model)
|
| 336 |
+
|
| 337 |
+
x = torch.randn(2, 128).to(torch_device)
|
| 338 |
+
|
| 339 |
+
out_ref = model_ref(x)
|
| 340 |
+
out = model(x)
|
| 341 |
+
self.assertTrue(torch.allclose(out_ref, out, atol=1e-5), "Outputs do not match.")
|
| 342 |
+
|
| 343 |
+
num_repeats = 4
|
| 344 |
+
for i in range(num_repeats):
|
| 345 |
+
out_ref = model_ref(x)
|
| 346 |
+
out = model(x)
|
| 347 |
+
|
| 348 |
+
self.assertTrue(torch.allclose(out_ref, out, atol=1e-5), "Outputs do not match after multiple invocations.")
|
| 349 |
+
|
| 350 |
+
for (ref_name, ref_module), (name, module) in zip(model_ref.named_modules(), model.named_modules()):
|
| 351 |
+
assert ref_name == name
|
| 352 |
+
ref_outputs = (
|
| 353 |
+
HookRegistry.check_if_exists_or_initialize(ref_module).get_hook("layer_output_tracker").outputs
|
| 354 |
+
)
|
| 355 |
+
outputs = HookRegistry.check_if_exists_or_initialize(module).get_hook("layer_output_tracker").outputs
|
| 356 |
+
cumulated_absmax = 0.0
|
| 357 |
+
for i in range(len(outputs)):
|
| 358 |
+
diff = ref_outputs[0] - outputs[i]
|
| 359 |
+
absdiff = diff.abs()
|
| 360 |
+
absmax = absdiff.max().item()
|
| 361 |
+
cumulated_absmax += absmax
|
| 362 |
+
self.assertLess(
|
| 363 |
+
cumulated_absmax, 1e-5, f"Output differences for {name} exceeded threshold: {cumulated_absmax:.5f}"
|
| 364 |
+
)
|
exp_code/1_benchmark/diffusers-WanS2V/tests/hooks/test_hooks.py
ADDED
|
@@ -0,0 +1,377 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2025 HuggingFace Inc.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
import gc
|
| 16 |
+
import unittest
|
| 17 |
+
|
| 18 |
+
import torch
|
| 19 |
+
|
| 20 |
+
from diffusers.hooks import HookRegistry, ModelHook
|
| 21 |
+
from diffusers.training_utils import free_memory
|
| 22 |
+
from diffusers.utils.logging import get_logger
|
| 23 |
+
|
| 24 |
+
from ..testing_utils import CaptureLogger, torch_device
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
logger = get_logger(__name__) # pylint: disable=invalid-name
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
class DummyBlock(torch.nn.Module):
|
| 31 |
+
def __init__(self, in_features: int, hidden_features: int, out_features: int) -> None:
|
| 32 |
+
super().__init__()
|
| 33 |
+
|
| 34 |
+
self.proj_in = torch.nn.Linear(in_features, hidden_features)
|
| 35 |
+
self.activation = torch.nn.ReLU()
|
| 36 |
+
self.proj_out = torch.nn.Linear(hidden_features, out_features)
|
| 37 |
+
|
| 38 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 39 |
+
x = self.proj_in(x)
|
| 40 |
+
x = self.activation(x)
|
| 41 |
+
x = self.proj_out(x)
|
| 42 |
+
return x
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
class DummyModel(torch.nn.Module):
|
| 46 |
+
def __init__(self, in_features: int, hidden_features: int, out_features: int, num_layers: int) -> None:
|
| 47 |
+
super().__init__()
|
| 48 |
+
|
| 49 |
+
self.linear_1 = torch.nn.Linear(in_features, hidden_features)
|
| 50 |
+
self.activation = torch.nn.ReLU()
|
| 51 |
+
self.blocks = torch.nn.ModuleList(
|
| 52 |
+
[DummyBlock(hidden_features, hidden_features, hidden_features) for _ in range(num_layers)]
|
| 53 |
+
)
|
| 54 |
+
self.linear_2 = torch.nn.Linear(hidden_features, out_features)
|
| 55 |
+
|
| 56 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 57 |
+
x = self.linear_1(x)
|
| 58 |
+
x = self.activation(x)
|
| 59 |
+
for block in self.blocks:
|
| 60 |
+
x = block(x)
|
| 61 |
+
x = self.linear_2(x)
|
| 62 |
+
return x
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
class AddHook(ModelHook):
|
| 66 |
+
def __init__(self, value: int):
|
| 67 |
+
super().__init__()
|
| 68 |
+
self.value = value
|
| 69 |
+
|
| 70 |
+
def pre_forward(self, module: torch.nn.Module, *args, **kwargs):
|
| 71 |
+
logger.debug("AddHook pre_forward")
|
| 72 |
+
args = ((x + self.value) if torch.is_tensor(x) else x for x in args)
|
| 73 |
+
return args, kwargs
|
| 74 |
+
|
| 75 |
+
def post_forward(self, module, output):
|
| 76 |
+
logger.debug("AddHook post_forward")
|
| 77 |
+
return output
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
class MultiplyHook(ModelHook):
|
| 81 |
+
def __init__(self, value: int):
|
| 82 |
+
super().__init__()
|
| 83 |
+
self.value = value
|
| 84 |
+
|
| 85 |
+
def pre_forward(self, module, *args, **kwargs):
|
| 86 |
+
logger.debug("MultiplyHook pre_forward")
|
| 87 |
+
args = ((x * self.value) if torch.is_tensor(x) else x for x in args)
|
| 88 |
+
return args, kwargs
|
| 89 |
+
|
| 90 |
+
def post_forward(self, module, output):
|
| 91 |
+
logger.debug("MultiplyHook post_forward")
|
| 92 |
+
return output
|
| 93 |
+
|
| 94 |
+
def __repr__(self):
|
| 95 |
+
return f"MultiplyHook(value={self.value})"
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
class StatefulAddHook(ModelHook):
|
| 99 |
+
_is_stateful = True
|
| 100 |
+
|
| 101 |
+
def __init__(self, value: int):
|
| 102 |
+
super().__init__()
|
| 103 |
+
self.value = value
|
| 104 |
+
self.increment = 0
|
| 105 |
+
|
| 106 |
+
def pre_forward(self, module, *args, **kwargs):
|
| 107 |
+
logger.debug("StatefulAddHook pre_forward")
|
| 108 |
+
add_value = self.value + self.increment
|
| 109 |
+
self.increment += 1
|
| 110 |
+
args = ((x + add_value) if torch.is_tensor(x) else x for x in args)
|
| 111 |
+
return args, kwargs
|
| 112 |
+
|
| 113 |
+
def reset_state(self, module):
|
| 114 |
+
self.increment = 0
|
| 115 |
+
|
| 116 |
+
|
| 117 |
+
class SkipLayerHook(ModelHook):
|
| 118 |
+
def __init__(self, skip_layer: bool):
|
| 119 |
+
super().__init__()
|
| 120 |
+
self.skip_layer = skip_layer
|
| 121 |
+
|
| 122 |
+
def pre_forward(self, module, *args, **kwargs):
|
| 123 |
+
logger.debug("SkipLayerHook pre_forward")
|
| 124 |
+
return args, kwargs
|
| 125 |
+
|
| 126 |
+
def new_forward(self, module, *args, **kwargs):
|
| 127 |
+
logger.debug("SkipLayerHook new_forward")
|
| 128 |
+
if self.skip_layer:
|
| 129 |
+
return args[0]
|
| 130 |
+
return self.fn_ref.original_forward(*args, **kwargs)
|
| 131 |
+
|
| 132 |
+
def post_forward(self, module, output):
|
| 133 |
+
logger.debug("SkipLayerHook post_forward")
|
| 134 |
+
return output
|
| 135 |
+
|
| 136 |
+
|
| 137 |
+
class HookTests(unittest.TestCase):
|
| 138 |
+
in_features = 4
|
| 139 |
+
hidden_features = 8
|
| 140 |
+
out_features = 4
|
| 141 |
+
num_layers = 2
|
| 142 |
+
|
| 143 |
+
def setUp(self):
|
| 144 |
+
params = self.get_module_parameters()
|
| 145 |
+
self.model = DummyModel(**params)
|
| 146 |
+
self.model.to(torch_device)
|
| 147 |
+
|
| 148 |
+
def tearDown(self):
|
| 149 |
+
super().tearDown()
|
| 150 |
+
|
| 151 |
+
del self.model
|
| 152 |
+
gc.collect()
|
| 153 |
+
free_memory()
|
| 154 |
+
|
| 155 |
+
def get_module_parameters(self):
|
| 156 |
+
return {
|
| 157 |
+
"in_features": self.in_features,
|
| 158 |
+
"hidden_features": self.hidden_features,
|
| 159 |
+
"out_features": self.out_features,
|
| 160 |
+
"num_layers": self.num_layers,
|
| 161 |
+
}
|
| 162 |
+
|
| 163 |
+
def get_generator(self):
|
| 164 |
+
return torch.manual_seed(0)
|
| 165 |
+
|
| 166 |
+
def test_hook_registry(self):
|
| 167 |
+
registry = HookRegistry.check_if_exists_or_initialize(self.model)
|
| 168 |
+
registry.register_hook(AddHook(1), "add_hook")
|
| 169 |
+
registry.register_hook(MultiplyHook(2), "multiply_hook")
|
| 170 |
+
|
| 171 |
+
registry_repr = repr(registry)
|
| 172 |
+
expected_repr = "HookRegistry(\n (0) add_hook - AddHook\n (1) multiply_hook - MultiplyHook(value=2)\n)"
|
| 173 |
+
|
| 174 |
+
self.assertEqual(len(registry.hooks), 2)
|
| 175 |
+
self.assertEqual(registry._hook_order, ["add_hook", "multiply_hook"])
|
| 176 |
+
self.assertEqual(registry_repr, expected_repr)
|
| 177 |
+
|
| 178 |
+
registry.remove_hook("add_hook")
|
| 179 |
+
|
| 180 |
+
self.assertEqual(len(registry.hooks), 1)
|
| 181 |
+
self.assertEqual(registry._hook_order, ["multiply_hook"])
|
| 182 |
+
|
| 183 |
+
def test_stateful_hook(self):
|
| 184 |
+
registry = HookRegistry.check_if_exists_or_initialize(self.model)
|
| 185 |
+
registry.register_hook(StatefulAddHook(1), "stateful_add_hook")
|
| 186 |
+
|
| 187 |
+
self.assertEqual(registry.hooks["stateful_add_hook"].increment, 0)
|
| 188 |
+
|
| 189 |
+
input = torch.randn(1, 4, device=torch_device, generator=self.get_generator())
|
| 190 |
+
num_repeats = 3
|
| 191 |
+
|
| 192 |
+
for i in range(num_repeats):
|
| 193 |
+
result = self.model(input)
|
| 194 |
+
if i == 0:
|
| 195 |
+
output1 = result
|
| 196 |
+
|
| 197 |
+
self.assertEqual(registry.get_hook("stateful_add_hook").increment, num_repeats)
|
| 198 |
+
|
| 199 |
+
registry.reset_stateful_hooks()
|
| 200 |
+
output2 = self.model(input)
|
| 201 |
+
|
| 202 |
+
self.assertEqual(registry.get_hook("stateful_add_hook").increment, 1)
|
| 203 |
+
self.assertTrue(torch.allclose(output1, output2))
|
| 204 |
+
|
| 205 |
+
def test_inference(self):
|
| 206 |
+
registry = HookRegistry.check_if_exists_or_initialize(self.model)
|
| 207 |
+
registry.register_hook(AddHook(1), "add_hook")
|
| 208 |
+
registry.register_hook(MultiplyHook(2), "multiply_hook")
|
| 209 |
+
|
| 210 |
+
input = torch.randn(1, 4, device=torch_device, generator=self.get_generator())
|
| 211 |
+
output1 = self.model(input).mean().detach().cpu().item()
|
| 212 |
+
|
| 213 |
+
registry.remove_hook("multiply_hook")
|
| 214 |
+
new_input = input * 2
|
| 215 |
+
output2 = self.model(new_input).mean().detach().cpu().item()
|
| 216 |
+
|
| 217 |
+
registry.remove_hook("add_hook")
|
| 218 |
+
new_input = input * 2 + 1
|
| 219 |
+
output3 = self.model(new_input).mean().detach().cpu().item()
|
| 220 |
+
|
| 221 |
+
self.assertAlmostEqual(output1, output2, places=5)
|
| 222 |
+
self.assertAlmostEqual(output1, output3, places=5)
|
| 223 |
+
self.assertAlmostEqual(output2, output3, places=5)
|
| 224 |
+
|
| 225 |
+
def test_skip_layer_hook(self):
|
| 226 |
+
registry = HookRegistry.check_if_exists_or_initialize(self.model)
|
| 227 |
+
registry.register_hook(SkipLayerHook(skip_layer=True), "skip_layer_hook")
|
| 228 |
+
|
| 229 |
+
input = torch.zeros(1, 4, device=torch_device)
|
| 230 |
+
output = self.model(input).mean().detach().cpu().item()
|
| 231 |
+
self.assertEqual(output, 0.0)
|
| 232 |
+
|
| 233 |
+
registry.remove_hook("skip_layer_hook")
|
| 234 |
+
registry.register_hook(SkipLayerHook(skip_layer=False), "skip_layer_hook")
|
| 235 |
+
output = self.model(input).mean().detach().cpu().item()
|
| 236 |
+
self.assertNotEqual(output, 0.0)
|
| 237 |
+
|
| 238 |
+
def test_skip_layer_internal_block(self):
|
| 239 |
+
registry = HookRegistry.check_if_exists_or_initialize(self.model.linear_1)
|
| 240 |
+
input = torch.zeros(1, 4, device=torch_device)
|
| 241 |
+
|
| 242 |
+
registry.register_hook(SkipLayerHook(skip_layer=True), "skip_layer_hook")
|
| 243 |
+
with self.assertRaises(RuntimeError) as cm:
|
| 244 |
+
self.model(input).mean().detach().cpu().item()
|
| 245 |
+
self.assertIn("mat1 and mat2 shapes cannot be multiplied", str(cm.exception))
|
| 246 |
+
|
| 247 |
+
registry.remove_hook("skip_layer_hook")
|
| 248 |
+
output = self.model(input).mean().detach().cpu().item()
|
| 249 |
+
self.assertNotEqual(output, 0.0)
|
| 250 |
+
|
| 251 |
+
registry = HookRegistry.check_if_exists_or_initialize(self.model.blocks[1])
|
| 252 |
+
registry.register_hook(SkipLayerHook(skip_layer=True), "skip_layer_hook")
|
| 253 |
+
output = self.model(input).mean().detach().cpu().item()
|
| 254 |
+
self.assertNotEqual(output, 0.0)
|
| 255 |
+
|
| 256 |
+
def test_invocation_order_stateful_first(self):
|
| 257 |
+
registry = HookRegistry.check_if_exists_or_initialize(self.model)
|
| 258 |
+
registry.register_hook(StatefulAddHook(1), "add_hook")
|
| 259 |
+
registry.register_hook(AddHook(2), "add_hook_2")
|
| 260 |
+
registry.register_hook(MultiplyHook(3), "multiply_hook")
|
| 261 |
+
|
| 262 |
+
input = torch.randn(1, 4, device=torch_device, generator=self.get_generator())
|
| 263 |
+
|
| 264 |
+
logger = get_logger(__name__)
|
| 265 |
+
logger.setLevel("DEBUG")
|
| 266 |
+
|
| 267 |
+
with CaptureLogger(logger) as cap_logger:
|
| 268 |
+
self.model(input)
|
| 269 |
+
output = cap_logger.out.replace(" ", "").replace("\n", "")
|
| 270 |
+
expected_invocation_order_log = (
|
| 271 |
+
(
|
| 272 |
+
"MultiplyHook pre_forward\n"
|
| 273 |
+
"AddHook pre_forward\n"
|
| 274 |
+
"StatefulAddHook pre_forward\n"
|
| 275 |
+
"AddHook post_forward\n"
|
| 276 |
+
"MultiplyHook post_forward\n"
|
| 277 |
+
)
|
| 278 |
+
.replace(" ", "")
|
| 279 |
+
.replace("\n", "")
|
| 280 |
+
)
|
| 281 |
+
self.assertEqual(output, expected_invocation_order_log)
|
| 282 |
+
|
| 283 |
+
registry.remove_hook("add_hook")
|
| 284 |
+
with CaptureLogger(logger) as cap_logger:
|
| 285 |
+
self.model(input)
|
| 286 |
+
output = cap_logger.out.replace(" ", "").replace("\n", "")
|
| 287 |
+
expected_invocation_order_log = (
|
| 288 |
+
("MultiplyHook pre_forward\nAddHook pre_forward\nAddHook post_forward\nMultiplyHook post_forward\n")
|
| 289 |
+
.replace(" ", "")
|
| 290 |
+
.replace("\n", "")
|
| 291 |
+
)
|
| 292 |
+
self.assertEqual(output, expected_invocation_order_log)
|
| 293 |
+
|
| 294 |
+
def test_invocation_order_stateful_middle(self):
|
| 295 |
+
registry = HookRegistry.check_if_exists_or_initialize(self.model)
|
| 296 |
+
registry.register_hook(AddHook(2), "add_hook")
|
| 297 |
+
registry.register_hook(StatefulAddHook(1), "add_hook_2")
|
| 298 |
+
registry.register_hook(MultiplyHook(3), "multiply_hook")
|
| 299 |
+
|
| 300 |
+
input = torch.randn(1, 4, device=torch_device, generator=self.get_generator())
|
| 301 |
+
|
| 302 |
+
logger = get_logger(__name__)
|
| 303 |
+
logger.setLevel("DEBUG")
|
| 304 |
+
|
| 305 |
+
with CaptureLogger(logger) as cap_logger:
|
| 306 |
+
self.model(input)
|
| 307 |
+
output = cap_logger.out.replace(" ", "").replace("\n", "")
|
| 308 |
+
expected_invocation_order_log = (
|
| 309 |
+
(
|
| 310 |
+
"MultiplyHook pre_forward\n"
|
| 311 |
+
"StatefulAddHook pre_forward\n"
|
| 312 |
+
"AddHook pre_forward\n"
|
| 313 |
+
"AddHook post_forward\n"
|
| 314 |
+
"MultiplyHook post_forward\n"
|
| 315 |
+
)
|
| 316 |
+
.replace(" ", "")
|
| 317 |
+
.replace("\n", "")
|
| 318 |
+
)
|
| 319 |
+
self.assertEqual(output, expected_invocation_order_log)
|
| 320 |
+
|
| 321 |
+
registry.remove_hook("add_hook")
|
| 322 |
+
with CaptureLogger(logger) as cap_logger:
|
| 323 |
+
self.model(input)
|
| 324 |
+
output = cap_logger.out.replace(" ", "").replace("\n", "")
|
| 325 |
+
expected_invocation_order_log = (
|
| 326 |
+
("MultiplyHook pre_forward\nStatefulAddHook pre_forward\nMultiplyHook post_forward\n")
|
| 327 |
+
.replace(" ", "")
|
| 328 |
+
.replace("\n", "")
|
| 329 |
+
)
|
| 330 |
+
self.assertEqual(output, expected_invocation_order_log)
|
| 331 |
+
|
| 332 |
+
registry.remove_hook("add_hook_2")
|
| 333 |
+
with CaptureLogger(logger) as cap_logger:
|
| 334 |
+
self.model(input)
|
| 335 |
+
output = cap_logger.out.replace(" ", "").replace("\n", "")
|
| 336 |
+
expected_invocation_order_log = (
|
| 337 |
+
("MultiplyHook pre_forward\nMultiplyHook post_forward\n").replace(" ", "").replace("\n", "")
|
| 338 |
+
)
|
| 339 |
+
self.assertEqual(output, expected_invocation_order_log)
|
| 340 |
+
|
| 341 |
+
def test_invocation_order_stateful_last(self):
|
| 342 |
+
registry = HookRegistry.check_if_exists_or_initialize(self.model)
|
| 343 |
+
registry.register_hook(AddHook(1), "add_hook")
|
| 344 |
+
registry.register_hook(MultiplyHook(2), "multiply_hook")
|
| 345 |
+
registry.register_hook(StatefulAddHook(3), "add_hook_2")
|
| 346 |
+
|
| 347 |
+
input = torch.randn(1, 4, device=torch_device, generator=self.get_generator())
|
| 348 |
+
|
| 349 |
+
logger = get_logger(__name__)
|
| 350 |
+
logger.setLevel("DEBUG")
|
| 351 |
+
|
| 352 |
+
with CaptureLogger(logger) as cap_logger:
|
| 353 |
+
self.model(input)
|
| 354 |
+
output = cap_logger.out.replace(" ", "").replace("\n", "")
|
| 355 |
+
expected_invocation_order_log = (
|
| 356 |
+
(
|
| 357 |
+
"StatefulAddHook pre_forward\n"
|
| 358 |
+
"MultiplyHook pre_forward\n"
|
| 359 |
+
"AddHook pre_forward\n"
|
| 360 |
+
"AddHook post_forward\n"
|
| 361 |
+
"MultiplyHook post_forward\n"
|
| 362 |
+
)
|
| 363 |
+
.replace(" ", "")
|
| 364 |
+
.replace("\n", "")
|
| 365 |
+
)
|
| 366 |
+
self.assertEqual(output, expected_invocation_order_log)
|
| 367 |
+
|
| 368 |
+
registry.remove_hook("add_hook")
|
| 369 |
+
with CaptureLogger(logger) as cap_logger:
|
| 370 |
+
self.model(input)
|
| 371 |
+
output = cap_logger.out.replace(" ", "").replace("\n", "")
|
| 372 |
+
expected_invocation_order_log = (
|
| 373 |
+
("StatefulAddHook pre_forward\nMultiplyHook pre_forward\nMultiplyHook post_forward\n")
|
| 374 |
+
.replace(" ", "")
|
| 375 |
+
.replace("\n", "")
|
| 376 |
+
)
|
| 377 |
+
self.assertEqual(output, expected_invocation_order_log)
|
exp_code/1_benchmark/diffusers-WanS2V/tests/lora/__init__.py
ADDED
|
File without changes
|
exp_code/1_benchmark/diffusers-WanS2V/tests/lora/test_lora_layers_auraflow.py
ADDED
|
@@ -0,0 +1,137 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2025 HuggingFace Inc.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
import sys
|
| 16 |
+
import unittest
|
| 17 |
+
|
| 18 |
+
import torch
|
| 19 |
+
from transformers import AutoTokenizer, UMT5EncoderModel
|
| 20 |
+
|
| 21 |
+
from diffusers import (
|
| 22 |
+
AuraFlowPipeline,
|
| 23 |
+
AuraFlowTransformer2DModel,
|
| 24 |
+
FlowMatchEulerDiscreteScheduler,
|
| 25 |
+
)
|
| 26 |
+
|
| 27 |
+
from ..testing_utils import (
|
| 28 |
+
floats_tensor,
|
| 29 |
+
is_peft_available,
|
| 30 |
+
require_peft_backend,
|
| 31 |
+
)
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
if is_peft_available():
|
| 35 |
+
pass
|
| 36 |
+
|
| 37 |
+
sys.path.append(".")
|
| 38 |
+
|
| 39 |
+
from .utils import PeftLoraLoaderMixinTests # noqa: E402
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
@require_peft_backend
|
| 43 |
+
class AuraFlowLoRATests(unittest.TestCase, PeftLoraLoaderMixinTests):
|
| 44 |
+
pipeline_class = AuraFlowPipeline
|
| 45 |
+
scheduler_cls = FlowMatchEulerDiscreteScheduler
|
| 46 |
+
scheduler_classes = [FlowMatchEulerDiscreteScheduler]
|
| 47 |
+
scheduler_kwargs = {}
|
| 48 |
+
|
| 49 |
+
transformer_kwargs = {
|
| 50 |
+
"sample_size": 64,
|
| 51 |
+
"patch_size": 1,
|
| 52 |
+
"in_channels": 4,
|
| 53 |
+
"num_mmdit_layers": 1,
|
| 54 |
+
"num_single_dit_layers": 1,
|
| 55 |
+
"attention_head_dim": 16,
|
| 56 |
+
"num_attention_heads": 2,
|
| 57 |
+
"joint_attention_dim": 32,
|
| 58 |
+
"caption_projection_dim": 32,
|
| 59 |
+
"pos_embed_max_size": 64,
|
| 60 |
+
}
|
| 61 |
+
transformer_cls = AuraFlowTransformer2DModel
|
| 62 |
+
vae_kwargs = {
|
| 63 |
+
"sample_size": 32,
|
| 64 |
+
"in_channels": 3,
|
| 65 |
+
"out_channels": 3,
|
| 66 |
+
"block_out_channels": (4,),
|
| 67 |
+
"layers_per_block": 1,
|
| 68 |
+
"latent_channels": 4,
|
| 69 |
+
"norm_num_groups": 1,
|
| 70 |
+
"use_quant_conv": False,
|
| 71 |
+
"use_post_quant_conv": False,
|
| 72 |
+
"shift_factor": 0.0609,
|
| 73 |
+
"scaling_factor": 1.5035,
|
| 74 |
+
}
|
| 75 |
+
tokenizer_cls, tokenizer_id = AutoTokenizer, "hf-internal-testing/tiny-random-t5"
|
| 76 |
+
text_encoder_cls, text_encoder_id = UMT5EncoderModel, "hf-internal-testing/tiny-random-umt5"
|
| 77 |
+
text_encoder_target_modules = ["q", "k", "v", "o"]
|
| 78 |
+
denoiser_target_modules = ["to_q", "to_k", "to_v", "to_out.0", "linear_1"]
|
| 79 |
+
|
| 80 |
+
@property
|
| 81 |
+
def output_shape(self):
|
| 82 |
+
return (1, 8, 8, 3)
|
| 83 |
+
|
| 84 |
+
def get_dummy_inputs(self, with_generator=True):
|
| 85 |
+
batch_size = 1
|
| 86 |
+
sequence_length = 10
|
| 87 |
+
num_channels = 4
|
| 88 |
+
sizes = (32, 32)
|
| 89 |
+
|
| 90 |
+
generator = torch.manual_seed(0)
|
| 91 |
+
noise = floats_tensor((batch_size, num_channels) + sizes)
|
| 92 |
+
input_ids = torch.randint(1, sequence_length, size=(batch_size, sequence_length), generator=generator)
|
| 93 |
+
|
| 94 |
+
pipeline_inputs = {
|
| 95 |
+
"prompt": "A painting of a squirrel eating a burger",
|
| 96 |
+
"num_inference_steps": 4,
|
| 97 |
+
"guidance_scale": 0.0,
|
| 98 |
+
"height": 8,
|
| 99 |
+
"width": 8,
|
| 100 |
+
"output_type": "np",
|
| 101 |
+
}
|
| 102 |
+
if with_generator:
|
| 103 |
+
pipeline_inputs.update({"generator": generator})
|
| 104 |
+
|
| 105 |
+
return noise, input_ids, pipeline_inputs
|
| 106 |
+
|
| 107 |
+
@unittest.skip("Not supported in AuraFlow.")
|
| 108 |
+
def test_simple_inference_with_text_denoiser_block_scale(self):
|
| 109 |
+
pass
|
| 110 |
+
|
| 111 |
+
@unittest.skip("Not supported in AuraFlow.")
|
| 112 |
+
def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self):
|
| 113 |
+
pass
|
| 114 |
+
|
| 115 |
+
@unittest.skip("Not supported in AuraFlow.")
|
| 116 |
+
def test_modify_padding_mode(self):
|
| 117 |
+
pass
|
| 118 |
+
|
| 119 |
+
@unittest.skip("Text encoder LoRA is not supported in AuraFlow.")
|
| 120 |
+
def test_simple_inference_with_partial_text_lora(self):
|
| 121 |
+
pass
|
| 122 |
+
|
| 123 |
+
@unittest.skip("Text encoder LoRA is not supported in AuraFlow.")
|
| 124 |
+
def test_simple_inference_with_text_lora(self):
|
| 125 |
+
pass
|
| 126 |
+
|
| 127 |
+
@unittest.skip("Text encoder LoRA is not supported in AuraFlow.")
|
| 128 |
+
def test_simple_inference_with_text_lora_and_scale(self):
|
| 129 |
+
pass
|
| 130 |
+
|
| 131 |
+
@unittest.skip("Text encoder LoRA is not supported in AuraFlow.")
|
| 132 |
+
def test_simple_inference_with_text_lora_fused(self):
|
| 133 |
+
pass
|
| 134 |
+
|
| 135 |
+
@unittest.skip("Text encoder LoRA is not supported in AuraFlow.")
|
| 136 |
+
def test_simple_inference_with_text_lora_save_load(self):
|
| 137 |
+
pass
|
exp_code/1_benchmark/diffusers-WanS2V/tests/lora/test_lora_layers_cogvideox.py
ADDED
|
@@ -0,0 +1,174 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2025 HuggingFace Inc.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
import sys
|
| 16 |
+
import unittest
|
| 17 |
+
|
| 18 |
+
import torch
|
| 19 |
+
from parameterized import parameterized
|
| 20 |
+
from transformers import AutoTokenizer, T5EncoderModel
|
| 21 |
+
|
| 22 |
+
from diffusers import (
|
| 23 |
+
AutoencoderKLCogVideoX,
|
| 24 |
+
CogVideoXDDIMScheduler,
|
| 25 |
+
CogVideoXDPMScheduler,
|
| 26 |
+
CogVideoXPipeline,
|
| 27 |
+
CogVideoXTransformer3DModel,
|
| 28 |
+
)
|
| 29 |
+
|
| 30 |
+
from ..testing_utils import (
|
| 31 |
+
floats_tensor,
|
| 32 |
+
require_peft_backend,
|
| 33 |
+
require_torch_accelerator,
|
| 34 |
+
)
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
sys.path.append(".")
|
| 38 |
+
|
| 39 |
+
from .utils import PeftLoraLoaderMixinTests # noqa: E402
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
@require_peft_backend
|
| 43 |
+
class CogVideoXLoRATests(unittest.TestCase, PeftLoraLoaderMixinTests):
|
| 44 |
+
pipeline_class = CogVideoXPipeline
|
| 45 |
+
scheduler_cls = CogVideoXDPMScheduler
|
| 46 |
+
scheduler_kwargs = {"timestep_spacing": "trailing"}
|
| 47 |
+
scheduler_classes = [CogVideoXDDIMScheduler, CogVideoXDPMScheduler]
|
| 48 |
+
|
| 49 |
+
transformer_kwargs = {
|
| 50 |
+
"num_attention_heads": 4,
|
| 51 |
+
"attention_head_dim": 8,
|
| 52 |
+
"in_channels": 4,
|
| 53 |
+
"out_channels": 4,
|
| 54 |
+
"time_embed_dim": 2,
|
| 55 |
+
"text_embed_dim": 32,
|
| 56 |
+
"num_layers": 1,
|
| 57 |
+
"sample_width": 16,
|
| 58 |
+
"sample_height": 16,
|
| 59 |
+
"sample_frames": 9,
|
| 60 |
+
"patch_size": 2,
|
| 61 |
+
"temporal_compression_ratio": 4,
|
| 62 |
+
"max_text_seq_length": 16,
|
| 63 |
+
}
|
| 64 |
+
transformer_cls = CogVideoXTransformer3DModel
|
| 65 |
+
vae_kwargs = {
|
| 66 |
+
"in_channels": 3,
|
| 67 |
+
"out_channels": 3,
|
| 68 |
+
"down_block_types": (
|
| 69 |
+
"CogVideoXDownBlock3D",
|
| 70 |
+
"CogVideoXDownBlock3D",
|
| 71 |
+
"CogVideoXDownBlock3D",
|
| 72 |
+
"CogVideoXDownBlock3D",
|
| 73 |
+
),
|
| 74 |
+
"up_block_types": (
|
| 75 |
+
"CogVideoXUpBlock3D",
|
| 76 |
+
"CogVideoXUpBlock3D",
|
| 77 |
+
"CogVideoXUpBlock3D",
|
| 78 |
+
"CogVideoXUpBlock3D",
|
| 79 |
+
),
|
| 80 |
+
"block_out_channels": (8, 8, 8, 8),
|
| 81 |
+
"latent_channels": 4,
|
| 82 |
+
"layers_per_block": 1,
|
| 83 |
+
"norm_num_groups": 2,
|
| 84 |
+
"temporal_compression_ratio": 4,
|
| 85 |
+
}
|
| 86 |
+
vae_cls = AutoencoderKLCogVideoX
|
| 87 |
+
tokenizer_cls, tokenizer_id = AutoTokenizer, "hf-internal-testing/tiny-random-t5"
|
| 88 |
+
text_encoder_cls, text_encoder_id = T5EncoderModel, "hf-internal-testing/tiny-random-t5"
|
| 89 |
+
|
| 90 |
+
text_encoder_target_modules = ["q", "k", "v", "o"]
|
| 91 |
+
|
| 92 |
+
@property
|
| 93 |
+
def output_shape(self):
|
| 94 |
+
return (1, 9, 16, 16, 3)
|
| 95 |
+
|
| 96 |
+
def get_dummy_inputs(self, with_generator=True):
|
| 97 |
+
batch_size = 1
|
| 98 |
+
sequence_length = 16
|
| 99 |
+
num_channels = 4
|
| 100 |
+
num_frames = 9
|
| 101 |
+
num_latent_frames = 3 # (num_frames - 1) // temporal_compression_ratio + 1
|
| 102 |
+
sizes = (2, 2)
|
| 103 |
+
|
| 104 |
+
generator = torch.manual_seed(0)
|
| 105 |
+
noise = floats_tensor((batch_size, num_latent_frames, num_channels) + sizes)
|
| 106 |
+
input_ids = torch.randint(1, sequence_length, size=(batch_size, sequence_length), generator=generator)
|
| 107 |
+
|
| 108 |
+
pipeline_inputs = {
|
| 109 |
+
"prompt": "dance monkey",
|
| 110 |
+
"num_frames": num_frames,
|
| 111 |
+
"num_inference_steps": 4,
|
| 112 |
+
"guidance_scale": 6.0,
|
| 113 |
+
# Cannot reduce because convolution kernel becomes bigger than sample
|
| 114 |
+
"height": 16,
|
| 115 |
+
"width": 16,
|
| 116 |
+
"max_sequence_length": sequence_length,
|
| 117 |
+
"output_type": "np",
|
| 118 |
+
}
|
| 119 |
+
if with_generator:
|
| 120 |
+
pipeline_inputs.update({"generator": generator})
|
| 121 |
+
|
| 122 |
+
return noise, input_ids, pipeline_inputs
|
| 123 |
+
|
| 124 |
+
def test_simple_inference_with_text_lora_denoiser_fused_multi(self):
|
| 125 |
+
super().test_simple_inference_with_text_lora_denoiser_fused_multi(expected_atol=9e-3)
|
| 126 |
+
|
| 127 |
+
def test_simple_inference_with_text_denoiser_lora_unfused(self):
|
| 128 |
+
super().test_simple_inference_with_text_denoiser_lora_unfused(expected_atol=9e-3)
|
| 129 |
+
|
| 130 |
+
def test_lora_scale_kwargs_match_fusion(self):
|
| 131 |
+
super().test_lora_scale_kwargs_match_fusion(expected_atol=9e-3, expected_rtol=9e-3)
|
| 132 |
+
|
| 133 |
+
@parameterized.expand([("block_level", True), ("leaf_level", False)])
|
| 134 |
+
@require_torch_accelerator
|
| 135 |
+
def test_group_offloading_inference_denoiser(self, offload_type, use_stream):
|
| 136 |
+
# TODO: We don't run the (leaf_level, True) test here that is enabled for other models.
|
| 137 |
+
# The reason for this can be found here: https://github.com/huggingface/diffusers/pull/11804#issuecomment-3013325338
|
| 138 |
+
super()._test_group_offloading_inference_denoiser(offload_type, use_stream)
|
| 139 |
+
|
| 140 |
+
@unittest.skip("Not supported in CogVideoX.")
|
| 141 |
+
def test_simple_inference_with_text_denoiser_block_scale(self):
|
| 142 |
+
pass
|
| 143 |
+
|
| 144 |
+
@unittest.skip("Not supported in CogVideoX.")
|
| 145 |
+
def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self):
|
| 146 |
+
pass
|
| 147 |
+
|
| 148 |
+
@unittest.skip("Not supported in CogVideoX.")
|
| 149 |
+
def test_modify_padding_mode(self):
|
| 150 |
+
pass
|
| 151 |
+
|
| 152 |
+
@unittest.skip("Text encoder LoRA is not supported in CogVideoX.")
|
| 153 |
+
def test_simple_inference_with_partial_text_lora(self):
|
| 154 |
+
pass
|
| 155 |
+
|
| 156 |
+
@unittest.skip("Text encoder LoRA is not supported in CogVideoX.")
|
| 157 |
+
def test_simple_inference_with_text_lora(self):
|
| 158 |
+
pass
|
| 159 |
+
|
| 160 |
+
@unittest.skip("Text encoder LoRA is not supported in CogVideoX.")
|
| 161 |
+
def test_simple_inference_with_text_lora_and_scale(self):
|
| 162 |
+
pass
|
| 163 |
+
|
| 164 |
+
@unittest.skip("Text encoder LoRA is not supported in CogVideoX.")
|
| 165 |
+
def test_simple_inference_with_text_lora_fused(self):
|
| 166 |
+
pass
|
| 167 |
+
|
| 168 |
+
@unittest.skip("Text encoder LoRA is not supported in CogVideoX.")
|
| 169 |
+
def test_simple_inference_with_text_lora_save_load(self):
|
| 170 |
+
pass
|
| 171 |
+
|
| 172 |
+
@unittest.skip("Not supported in CogVideoX.")
|
| 173 |
+
def test_simple_inference_with_text_denoiser_multi_adapter_block_lora(self):
|
| 174 |
+
pass
|
exp_code/1_benchmark/diffusers-WanS2V/tests/lora/test_lora_layers_cogview4.py
ADDED
|
@@ -0,0 +1,189 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2025 HuggingFace Inc.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
import sys
|
| 16 |
+
import tempfile
|
| 17 |
+
import unittest
|
| 18 |
+
|
| 19 |
+
import numpy as np
|
| 20 |
+
import torch
|
| 21 |
+
from parameterized import parameterized
|
| 22 |
+
from transformers import AutoTokenizer, GlmModel
|
| 23 |
+
|
| 24 |
+
from diffusers import AutoencoderKL, CogView4Pipeline, CogView4Transformer2DModel, FlowMatchEulerDiscreteScheduler
|
| 25 |
+
|
| 26 |
+
from ..testing_utils import (
|
| 27 |
+
floats_tensor,
|
| 28 |
+
require_peft_backend,
|
| 29 |
+
require_torch_accelerator,
|
| 30 |
+
skip_mps,
|
| 31 |
+
torch_device,
|
| 32 |
+
)
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
sys.path.append(".")
|
| 36 |
+
|
| 37 |
+
from .utils import PeftLoraLoaderMixinTests # noqa: E402
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
class TokenizerWrapper:
|
| 41 |
+
@staticmethod
|
| 42 |
+
def from_pretrained(*args, **kwargs):
|
| 43 |
+
return AutoTokenizer.from_pretrained(
|
| 44 |
+
"hf-internal-testing/tiny-random-cogview4", subfolder="tokenizer", trust_remote_code=True
|
| 45 |
+
)
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
@require_peft_backend
|
| 49 |
+
@skip_mps
|
| 50 |
+
class CogView4LoRATests(unittest.TestCase, PeftLoraLoaderMixinTests):
|
| 51 |
+
pipeline_class = CogView4Pipeline
|
| 52 |
+
scheduler_cls = FlowMatchEulerDiscreteScheduler
|
| 53 |
+
scheduler_classes = [FlowMatchEulerDiscreteScheduler]
|
| 54 |
+
scheduler_kwargs = {}
|
| 55 |
+
|
| 56 |
+
transformer_kwargs = {
|
| 57 |
+
"patch_size": 2,
|
| 58 |
+
"in_channels": 4,
|
| 59 |
+
"num_layers": 2,
|
| 60 |
+
"attention_head_dim": 4,
|
| 61 |
+
"num_attention_heads": 4,
|
| 62 |
+
"out_channels": 4,
|
| 63 |
+
"text_embed_dim": 32,
|
| 64 |
+
"time_embed_dim": 8,
|
| 65 |
+
"condition_dim": 4,
|
| 66 |
+
}
|
| 67 |
+
transformer_cls = CogView4Transformer2DModel
|
| 68 |
+
vae_kwargs = {
|
| 69 |
+
"block_out_channels": [32, 64],
|
| 70 |
+
"in_channels": 3,
|
| 71 |
+
"out_channels": 3,
|
| 72 |
+
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
|
| 73 |
+
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
|
| 74 |
+
"latent_channels": 4,
|
| 75 |
+
"sample_size": 128,
|
| 76 |
+
}
|
| 77 |
+
vae_cls = AutoencoderKL
|
| 78 |
+
tokenizer_cls, tokenizer_id, tokenizer_subfolder = (
|
| 79 |
+
TokenizerWrapper,
|
| 80 |
+
"hf-internal-testing/tiny-random-cogview4",
|
| 81 |
+
"tokenizer",
|
| 82 |
+
)
|
| 83 |
+
text_encoder_cls, text_encoder_id, text_encoder_subfolder = (
|
| 84 |
+
GlmModel,
|
| 85 |
+
"hf-internal-testing/tiny-random-cogview4",
|
| 86 |
+
"text_encoder",
|
| 87 |
+
)
|
| 88 |
+
|
| 89 |
+
@property
|
| 90 |
+
def output_shape(self):
|
| 91 |
+
return (1, 32, 32, 3)
|
| 92 |
+
|
| 93 |
+
def get_dummy_inputs(self, with_generator=True):
|
| 94 |
+
batch_size = 1
|
| 95 |
+
sequence_length = 16
|
| 96 |
+
num_channels = 4
|
| 97 |
+
sizes = (4, 4)
|
| 98 |
+
|
| 99 |
+
generator = torch.manual_seed(0)
|
| 100 |
+
noise = floats_tensor((batch_size, num_channels) + sizes)
|
| 101 |
+
input_ids = torch.randint(1, sequence_length, size=(batch_size, sequence_length), generator=generator)
|
| 102 |
+
|
| 103 |
+
pipeline_inputs = {
|
| 104 |
+
"prompt": "",
|
| 105 |
+
"num_inference_steps": 1,
|
| 106 |
+
"guidance_scale": 6.0,
|
| 107 |
+
"height": 32,
|
| 108 |
+
"width": 32,
|
| 109 |
+
"max_sequence_length": sequence_length,
|
| 110 |
+
"output_type": "np",
|
| 111 |
+
}
|
| 112 |
+
if with_generator:
|
| 113 |
+
pipeline_inputs.update({"generator": generator})
|
| 114 |
+
|
| 115 |
+
return noise, input_ids, pipeline_inputs
|
| 116 |
+
|
| 117 |
+
def test_simple_inference_with_text_lora_denoiser_fused_multi(self):
|
| 118 |
+
super().test_simple_inference_with_text_lora_denoiser_fused_multi(expected_atol=9e-3)
|
| 119 |
+
|
| 120 |
+
def test_simple_inference_with_text_denoiser_lora_unfused(self):
|
| 121 |
+
super().test_simple_inference_with_text_denoiser_lora_unfused(expected_atol=9e-3)
|
| 122 |
+
|
| 123 |
+
def test_simple_inference_save_pretrained(self):
|
| 124 |
+
"""
|
| 125 |
+
Tests a simple usecase where users could use saving utilities for LoRA through save_pretrained
|
| 126 |
+
"""
|
| 127 |
+
for scheduler_cls in self.scheduler_classes:
|
| 128 |
+
components, _, _ = self.get_dummy_components(scheduler_cls)
|
| 129 |
+
pipe = self.pipeline_class(**components)
|
| 130 |
+
pipe = pipe.to(torch_device)
|
| 131 |
+
pipe.set_progress_bar_config(disable=None)
|
| 132 |
+
_, _, inputs = self.get_dummy_inputs(with_generator=False)
|
| 133 |
+
|
| 134 |
+
output_no_lora = pipe(**inputs, generator=torch.manual_seed(0))[0]
|
| 135 |
+
self.assertTrue(output_no_lora.shape == self.output_shape)
|
| 136 |
+
|
| 137 |
+
images_lora = pipe(**inputs, generator=torch.manual_seed(0))[0]
|
| 138 |
+
|
| 139 |
+
with tempfile.TemporaryDirectory() as tmpdirname:
|
| 140 |
+
pipe.save_pretrained(tmpdirname)
|
| 141 |
+
|
| 142 |
+
pipe_from_pretrained = self.pipeline_class.from_pretrained(tmpdirname)
|
| 143 |
+
pipe_from_pretrained.to(torch_device)
|
| 144 |
+
|
| 145 |
+
images_lora_save_pretrained = pipe_from_pretrained(**inputs, generator=torch.manual_seed(0))[0]
|
| 146 |
+
|
| 147 |
+
self.assertTrue(
|
| 148 |
+
np.allclose(images_lora, images_lora_save_pretrained, atol=1e-3, rtol=1e-3),
|
| 149 |
+
"Loading from saved checkpoints should give same results.",
|
| 150 |
+
)
|
| 151 |
+
|
| 152 |
+
@parameterized.expand([("block_level", True), ("leaf_level", False)])
|
| 153 |
+
@require_torch_accelerator
|
| 154 |
+
def test_group_offloading_inference_denoiser(self, offload_type, use_stream):
|
| 155 |
+
# TODO: We don't run the (leaf_level, True) test here that is enabled for other models.
|
| 156 |
+
# The reason for this can be found here: https://github.com/huggingface/diffusers/pull/11804#issuecomment-3013325338
|
| 157 |
+
super()._test_group_offloading_inference_denoiser(offload_type, use_stream)
|
| 158 |
+
|
| 159 |
+
@unittest.skip("Not supported in CogView4.")
|
| 160 |
+
def test_simple_inference_with_text_denoiser_block_scale(self):
|
| 161 |
+
pass
|
| 162 |
+
|
| 163 |
+
@unittest.skip("Not supported in CogView4.")
|
| 164 |
+
def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self):
|
| 165 |
+
pass
|
| 166 |
+
|
| 167 |
+
@unittest.skip("Not supported in CogView4.")
|
| 168 |
+
def test_modify_padding_mode(self):
|
| 169 |
+
pass
|
| 170 |
+
|
| 171 |
+
@unittest.skip("Text encoder LoRA is not supported in CogView4.")
|
| 172 |
+
def test_simple_inference_with_partial_text_lora(self):
|
| 173 |
+
pass
|
| 174 |
+
|
| 175 |
+
@unittest.skip("Text encoder LoRA is not supported in CogView4.")
|
| 176 |
+
def test_simple_inference_with_text_lora(self):
|
| 177 |
+
pass
|
| 178 |
+
|
| 179 |
+
@unittest.skip("Text encoder LoRA is not supported in CogView4.")
|
| 180 |
+
def test_simple_inference_with_text_lora_and_scale(self):
|
| 181 |
+
pass
|
| 182 |
+
|
| 183 |
+
@unittest.skip("Text encoder LoRA is not supported in CogView4.")
|
| 184 |
+
def test_simple_inference_with_text_lora_fused(self):
|
| 185 |
+
pass
|
| 186 |
+
|
| 187 |
+
@unittest.skip("Text encoder LoRA is not supported in CogView4.")
|
| 188 |
+
def test_simple_inference_with_text_lora_save_load(self):
|
| 189 |
+
pass
|
exp_code/1_benchmark/diffusers-WanS2V/tests/lora/test_lora_layers_flux.py
ADDED
|
@@ -0,0 +1,1054 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2025 HuggingFace Inc.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
import copy
|
| 16 |
+
import gc
|
| 17 |
+
import os
|
| 18 |
+
import sys
|
| 19 |
+
import tempfile
|
| 20 |
+
import unittest
|
| 21 |
+
|
| 22 |
+
import numpy as np
|
| 23 |
+
import safetensors.torch
|
| 24 |
+
import torch
|
| 25 |
+
from parameterized import parameterized
|
| 26 |
+
from PIL import Image
|
| 27 |
+
from transformers import AutoTokenizer, CLIPTextModel, CLIPTokenizer, T5EncoderModel
|
| 28 |
+
|
| 29 |
+
from diffusers import FlowMatchEulerDiscreteScheduler, FluxControlPipeline, FluxPipeline, FluxTransformer2DModel
|
| 30 |
+
from diffusers.utils import load_image, logging
|
| 31 |
+
|
| 32 |
+
from ..testing_utils import (
|
| 33 |
+
CaptureLogger,
|
| 34 |
+
backend_empty_cache,
|
| 35 |
+
floats_tensor,
|
| 36 |
+
is_peft_available,
|
| 37 |
+
nightly,
|
| 38 |
+
numpy_cosine_similarity_distance,
|
| 39 |
+
require_big_accelerator,
|
| 40 |
+
require_peft_backend,
|
| 41 |
+
require_torch_accelerator,
|
| 42 |
+
slow,
|
| 43 |
+
torch_device,
|
| 44 |
+
)
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
if is_peft_available():
|
| 48 |
+
from peft.utils import get_peft_model_state_dict
|
| 49 |
+
|
| 50 |
+
sys.path.append(".")
|
| 51 |
+
|
| 52 |
+
from .utils import PeftLoraLoaderMixinTests, check_if_lora_correctly_set # noqa: E402
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
@require_peft_backend
|
| 56 |
+
class FluxLoRATests(unittest.TestCase, PeftLoraLoaderMixinTests):
|
| 57 |
+
pipeline_class = FluxPipeline
|
| 58 |
+
scheduler_cls = FlowMatchEulerDiscreteScheduler()
|
| 59 |
+
scheduler_kwargs = {}
|
| 60 |
+
scheduler_classes = [FlowMatchEulerDiscreteScheduler]
|
| 61 |
+
transformer_kwargs = {
|
| 62 |
+
"patch_size": 1,
|
| 63 |
+
"in_channels": 4,
|
| 64 |
+
"num_layers": 1,
|
| 65 |
+
"num_single_layers": 1,
|
| 66 |
+
"attention_head_dim": 16,
|
| 67 |
+
"num_attention_heads": 2,
|
| 68 |
+
"joint_attention_dim": 32,
|
| 69 |
+
"pooled_projection_dim": 32,
|
| 70 |
+
"axes_dims_rope": [4, 4, 8],
|
| 71 |
+
}
|
| 72 |
+
transformer_cls = FluxTransformer2DModel
|
| 73 |
+
vae_kwargs = {
|
| 74 |
+
"sample_size": 32,
|
| 75 |
+
"in_channels": 3,
|
| 76 |
+
"out_channels": 3,
|
| 77 |
+
"block_out_channels": (4,),
|
| 78 |
+
"layers_per_block": 1,
|
| 79 |
+
"latent_channels": 1,
|
| 80 |
+
"norm_num_groups": 1,
|
| 81 |
+
"use_quant_conv": False,
|
| 82 |
+
"use_post_quant_conv": False,
|
| 83 |
+
"shift_factor": 0.0609,
|
| 84 |
+
"scaling_factor": 1.5035,
|
| 85 |
+
}
|
| 86 |
+
has_two_text_encoders = True
|
| 87 |
+
tokenizer_cls, tokenizer_id = CLIPTokenizer, "peft-internal-testing/tiny-clip-text-2"
|
| 88 |
+
tokenizer_2_cls, tokenizer_2_id = AutoTokenizer, "hf-internal-testing/tiny-random-t5"
|
| 89 |
+
text_encoder_cls, text_encoder_id = CLIPTextModel, "peft-internal-testing/tiny-clip-text-2"
|
| 90 |
+
text_encoder_2_cls, text_encoder_2_id = T5EncoderModel, "hf-internal-testing/tiny-random-t5"
|
| 91 |
+
|
| 92 |
+
@property
|
| 93 |
+
def output_shape(self):
|
| 94 |
+
return (1, 8, 8, 3)
|
| 95 |
+
|
| 96 |
+
def get_dummy_inputs(self, with_generator=True):
|
| 97 |
+
batch_size = 1
|
| 98 |
+
sequence_length = 10
|
| 99 |
+
num_channels = 4
|
| 100 |
+
sizes = (32, 32)
|
| 101 |
+
|
| 102 |
+
generator = torch.manual_seed(0)
|
| 103 |
+
noise = floats_tensor((batch_size, num_channels) + sizes)
|
| 104 |
+
input_ids = torch.randint(1, sequence_length, size=(batch_size, sequence_length), generator=generator)
|
| 105 |
+
|
| 106 |
+
pipeline_inputs = {
|
| 107 |
+
"prompt": "A painting of a squirrel eating a burger",
|
| 108 |
+
"num_inference_steps": 4,
|
| 109 |
+
"guidance_scale": 0.0,
|
| 110 |
+
"height": 8,
|
| 111 |
+
"width": 8,
|
| 112 |
+
"output_type": "np",
|
| 113 |
+
}
|
| 114 |
+
if with_generator:
|
| 115 |
+
pipeline_inputs.update({"generator": generator})
|
| 116 |
+
|
| 117 |
+
return noise, input_ids, pipeline_inputs
|
| 118 |
+
|
| 119 |
+
def test_with_alpha_in_state_dict(self):
|
| 120 |
+
components, _, denoiser_lora_config = self.get_dummy_components(FlowMatchEulerDiscreteScheduler)
|
| 121 |
+
pipe = self.pipeline_class(**components)
|
| 122 |
+
pipe = pipe.to(torch_device)
|
| 123 |
+
pipe.set_progress_bar_config(disable=None)
|
| 124 |
+
_, _, inputs = self.get_dummy_inputs(with_generator=False)
|
| 125 |
+
|
| 126 |
+
output_no_lora = pipe(**inputs, generator=torch.manual_seed(0)).images
|
| 127 |
+
self.assertTrue(output_no_lora.shape == self.output_shape)
|
| 128 |
+
|
| 129 |
+
pipe.transformer.add_adapter(denoiser_lora_config)
|
| 130 |
+
self.assertTrue(check_if_lora_correctly_set(pipe.transformer), "Lora not correctly set in transformer")
|
| 131 |
+
|
| 132 |
+
images_lora = pipe(**inputs, generator=torch.manual_seed(0)).images
|
| 133 |
+
|
| 134 |
+
with tempfile.TemporaryDirectory() as tmpdirname:
|
| 135 |
+
denoiser_state_dict = get_peft_model_state_dict(pipe.transformer)
|
| 136 |
+
self.pipeline_class.save_lora_weights(tmpdirname, transformer_lora_layers=denoiser_state_dict)
|
| 137 |
+
|
| 138 |
+
self.assertTrue(os.path.isfile(os.path.join(tmpdirname, "pytorch_lora_weights.safetensors")))
|
| 139 |
+
pipe.unload_lora_weights()
|
| 140 |
+
pipe.load_lora_weights(os.path.join(tmpdirname, "pytorch_lora_weights.safetensors"))
|
| 141 |
+
|
| 142 |
+
# modify the state dict to have alpha values following
|
| 143 |
+
# https://huggingface.co/TheLastBen/Jon_Snow_Flux_LoRA/blob/main/jon_snow.safetensors
|
| 144 |
+
state_dict_with_alpha = safetensors.torch.load_file(
|
| 145 |
+
os.path.join(tmpdirname, "pytorch_lora_weights.safetensors")
|
| 146 |
+
)
|
| 147 |
+
alpha_dict = {}
|
| 148 |
+
for k, v in state_dict_with_alpha.items():
|
| 149 |
+
# only do for `transformer` and for the k projections -- should be enough to test.
|
| 150 |
+
if "transformer" in k and "to_k" in k and "lora_A" in k:
|
| 151 |
+
alpha_dict[f"{k}.alpha"] = float(torch.randint(10, 100, size=()))
|
| 152 |
+
state_dict_with_alpha.update(alpha_dict)
|
| 153 |
+
|
| 154 |
+
images_lora_from_pretrained = pipe(**inputs, generator=torch.manual_seed(0)).images
|
| 155 |
+
self.assertTrue(check_if_lora_correctly_set(pipe.transformer), "Lora not correctly set in denoiser")
|
| 156 |
+
|
| 157 |
+
pipe.unload_lora_weights()
|
| 158 |
+
pipe.load_lora_weights(state_dict_with_alpha)
|
| 159 |
+
images_lora_with_alpha = pipe(**inputs, generator=torch.manual_seed(0)).images
|
| 160 |
+
|
| 161 |
+
self.assertTrue(
|
| 162 |
+
np.allclose(images_lora, images_lora_from_pretrained, atol=1e-3, rtol=1e-3),
|
| 163 |
+
"Loading from saved checkpoints should give same results.",
|
| 164 |
+
)
|
| 165 |
+
self.assertFalse(np.allclose(images_lora_with_alpha, images_lora, atol=1e-3, rtol=1e-3))
|
| 166 |
+
|
| 167 |
+
def test_lora_expansion_works_for_absent_keys(self):
|
| 168 |
+
components, _, denoiser_lora_config = self.get_dummy_components(FlowMatchEulerDiscreteScheduler)
|
| 169 |
+
pipe = self.pipeline_class(**components)
|
| 170 |
+
pipe = pipe.to(torch_device)
|
| 171 |
+
pipe.set_progress_bar_config(disable=None)
|
| 172 |
+
_, _, inputs = self.get_dummy_inputs(with_generator=False)
|
| 173 |
+
|
| 174 |
+
output_no_lora = pipe(**inputs, generator=torch.manual_seed(0)).images
|
| 175 |
+
self.assertTrue(output_no_lora.shape == self.output_shape)
|
| 176 |
+
|
| 177 |
+
# Modify the config to have a layer which won't be present in the second LoRA we will load.
|
| 178 |
+
modified_denoiser_lora_config = copy.deepcopy(denoiser_lora_config)
|
| 179 |
+
modified_denoiser_lora_config.target_modules.add("x_embedder")
|
| 180 |
+
|
| 181 |
+
pipe.transformer.add_adapter(modified_denoiser_lora_config)
|
| 182 |
+
self.assertTrue(check_if_lora_correctly_set(pipe.transformer), "Lora not correctly set in transformer")
|
| 183 |
+
|
| 184 |
+
images_lora = pipe(**inputs, generator=torch.manual_seed(0)).images
|
| 185 |
+
self.assertFalse(
|
| 186 |
+
np.allclose(images_lora, output_no_lora, atol=1e-3, rtol=1e-3),
|
| 187 |
+
"LoRA should lead to different results.",
|
| 188 |
+
)
|
| 189 |
+
|
| 190 |
+
with tempfile.TemporaryDirectory() as tmpdirname:
|
| 191 |
+
denoiser_state_dict = get_peft_model_state_dict(pipe.transformer)
|
| 192 |
+
self.pipeline_class.save_lora_weights(tmpdirname, transformer_lora_layers=denoiser_state_dict)
|
| 193 |
+
|
| 194 |
+
self.assertTrue(os.path.isfile(os.path.join(tmpdirname, "pytorch_lora_weights.safetensors")))
|
| 195 |
+
pipe.unload_lora_weights()
|
| 196 |
+
pipe.load_lora_weights(os.path.join(tmpdirname, "pytorch_lora_weights.safetensors"), adapter_name="one")
|
| 197 |
+
|
| 198 |
+
# Modify the state dict to exclude "x_embedder" related LoRA params.
|
| 199 |
+
lora_state_dict = safetensors.torch.load_file(os.path.join(tmpdirname, "pytorch_lora_weights.safetensors"))
|
| 200 |
+
lora_state_dict_without_xembedder = {k: v for k, v in lora_state_dict.items() if "x_embedder" not in k}
|
| 201 |
+
|
| 202 |
+
pipe.load_lora_weights(lora_state_dict_without_xembedder, adapter_name="two")
|
| 203 |
+
pipe.set_adapters(["one", "two"])
|
| 204 |
+
self.assertTrue(check_if_lora_correctly_set(pipe.transformer), "Lora not correctly set in transformer")
|
| 205 |
+
images_lora_with_absent_keys = pipe(**inputs, generator=torch.manual_seed(0)).images
|
| 206 |
+
|
| 207 |
+
self.assertFalse(
|
| 208 |
+
np.allclose(images_lora, images_lora_with_absent_keys, atol=1e-3, rtol=1e-3),
|
| 209 |
+
"Different LoRAs should lead to different results.",
|
| 210 |
+
)
|
| 211 |
+
self.assertFalse(
|
| 212 |
+
np.allclose(output_no_lora, images_lora_with_absent_keys, atol=1e-3, rtol=1e-3),
|
| 213 |
+
"LoRA should lead to different results.",
|
| 214 |
+
)
|
| 215 |
+
|
| 216 |
+
def test_lora_expansion_works_for_extra_keys(self):
|
| 217 |
+
components, _, denoiser_lora_config = self.get_dummy_components(FlowMatchEulerDiscreteScheduler)
|
| 218 |
+
pipe = self.pipeline_class(**components)
|
| 219 |
+
pipe = pipe.to(torch_device)
|
| 220 |
+
pipe.set_progress_bar_config(disable=None)
|
| 221 |
+
_, _, inputs = self.get_dummy_inputs(with_generator=False)
|
| 222 |
+
|
| 223 |
+
output_no_lora = pipe(**inputs, generator=torch.manual_seed(0)).images
|
| 224 |
+
self.assertTrue(output_no_lora.shape == self.output_shape)
|
| 225 |
+
|
| 226 |
+
# Modify the config to have a layer which won't be present in the first LoRA we will load.
|
| 227 |
+
modified_denoiser_lora_config = copy.deepcopy(denoiser_lora_config)
|
| 228 |
+
modified_denoiser_lora_config.target_modules.add("x_embedder")
|
| 229 |
+
|
| 230 |
+
pipe.transformer.add_adapter(modified_denoiser_lora_config)
|
| 231 |
+
self.assertTrue(check_if_lora_correctly_set(pipe.transformer), "Lora not correctly set in transformer")
|
| 232 |
+
|
| 233 |
+
images_lora = pipe(**inputs, generator=torch.manual_seed(0)).images
|
| 234 |
+
self.assertFalse(
|
| 235 |
+
np.allclose(images_lora, output_no_lora, atol=1e-3, rtol=1e-3),
|
| 236 |
+
"LoRA should lead to different results.",
|
| 237 |
+
)
|
| 238 |
+
|
| 239 |
+
with tempfile.TemporaryDirectory() as tmpdirname:
|
| 240 |
+
denoiser_state_dict = get_peft_model_state_dict(pipe.transformer)
|
| 241 |
+
self.pipeline_class.save_lora_weights(tmpdirname, transformer_lora_layers=denoiser_state_dict)
|
| 242 |
+
|
| 243 |
+
self.assertTrue(os.path.isfile(os.path.join(tmpdirname, "pytorch_lora_weights.safetensors")))
|
| 244 |
+
pipe.unload_lora_weights()
|
| 245 |
+
# Modify the state dict to exclude "x_embedder" related LoRA params.
|
| 246 |
+
lora_state_dict = safetensors.torch.load_file(os.path.join(tmpdirname, "pytorch_lora_weights.safetensors"))
|
| 247 |
+
lora_state_dict_without_xembedder = {k: v for k, v in lora_state_dict.items() if "x_embedder" not in k}
|
| 248 |
+
pipe.load_lora_weights(lora_state_dict_without_xembedder, adapter_name="one")
|
| 249 |
+
|
| 250 |
+
# Load state dict with `x_embedder`.
|
| 251 |
+
pipe.load_lora_weights(os.path.join(tmpdirname, "pytorch_lora_weights.safetensors"), adapter_name="two")
|
| 252 |
+
|
| 253 |
+
pipe.set_adapters(["one", "two"])
|
| 254 |
+
self.assertTrue(check_if_lora_correctly_set(pipe.transformer), "Lora not correctly set in transformer")
|
| 255 |
+
images_lora_with_extra_keys = pipe(**inputs, generator=torch.manual_seed(0)).images
|
| 256 |
+
|
| 257 |
+
self.assertFalse(
|
| 258 |
+
np.allclose(images_lora, images_lora_with_extra_keys, atol=1e-3, rtol=1e-3),
|
| 259 |
+
"Different LoRAs should lead to different results.",
|
| 260 |
+
)
|
| 261 |
+
self.assertFalse(
|
| 262 |
+
np.allclose(output_no_lora, images_lora_with_extra_keys, atol=1e-3, rtol=1e-3),
|
| 263 |
+
"LoRA should lead to different results.",
|
| 264 |
+
)
|
| 265 |
+
|
| 266 |
+
@unittest.skip("Not supported in Flux.")
|
| 267 |
+
def test_simple_inference_with_text_denoiser_block_scale(self):
|
| 268 |
+
pass
|
| 269 |
+
|
| 270 |
+
@unittest.skip("Not supported in Flux.")
|
| 271 |
+
def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self):
|
| 272 |
+
pass
|
| 273 |
+
|
| 274 |
+
@unittest.skip("Not supported in Flux.")
|
| 275 |
+
def test_modify_padding_mode(self):
|
| 276 |
+
pass
|
| 277 |
+
|
| 278 |
+
@unittest.skip("Not supported in Flux.")
|
| 279 |
+
def test_simple_inference_with_text_denoiser_multi_adapter_block_lora(self):
|
| 280 |
+
pass
|
| 281 |
+
|
| 282 |
+
|
| 283 |
+
class FluxControlLoRATests(unittest.TestCase, PeftLoraLoaderMixinTests):
|
| 284 |
+
pipeline_class = FluxControlPipeline
|
| 285 |
+
scheduler_cls = FlowMatchEulerDiscreteScheduler()
|
| 286 |
+
scheduler_kwargs = {}
|
| 287 |
+
scheduler_classes = [FlowMatchEulerDiscreteScheduler]
|
| 288 |
+
transformer_kwargs = {
|
| 289 |
+
"patch_size": 1,
|
| 290 |
+
"in_channels": 8,
|
| 291 |
+
"out_channels": 4,
|
| 292 |
+
"num_layers": 1,
|
| 293 |
+
"num_single_layers": 1,
|
| 294 |
+
"attention_head_dim": 16,
|
| 295 |
+
"num_attention_heads": 2,
|
| 296 |
+
"joint_attention_dim": 32,
|
| 297 |
+
"pooled_projection_dim": 32,
|
| 298 |
+
"axes_dims_rope": [4, 4, 8],
|
| 299 |
+
}
|
| 300 |
+
transformer_cls = FluxTransformer2DModel
|
| 301 |
+
vae_kwargs = {
|
| 302 |
+
"sample_size": 32,
|
| 303 |
+
"in_channels": 3,
|
| 304 |
+
"out_channels": 3,
|
| 305 |
+
"block_out_channels": (4,),
|
| 306 |
+
"layers_per_block": 1,
|
| 307 |
+
"latent_channels": 1,
|
| 308 |
+
"norm_num_groups": 1,
|
| 309 |
+
"use_quant_conv": False,
|
| 310 |
+
"use_post_quant_conv": False,
|
| 311 |
+
"shift_factor": 0.0609,
|
| 312 |
+
"scaling_factor": 1.5035,
|
| 313 |
+
}
|
| 314 |
+
has_two_text_encoders = True
|
| 315 |
+
tokenizer_cls, tokenizer_id = CLIPTokenizer, "peft-internal-testing/tiny-clip-text-2"
|
| 316 |
+
tokenizer_2_cls, tokenizer_2_id = AutoTokenizer, "hf-internal-testing/tiny-random-t5"
|
| 317 |
+
text_encoder_cls, text_encoder_id = CLIPTextModel, "peft-internal-testing/tiny-clip-text-2"
|
| 318 |
+
text_encoder_2_cls, text_encoder_2_id = T5EncoderModel, "hf-internal-testing/tiny-random-t5"
|
| 319 |
+
|
| 320 |
+
@property
|
| 321 |
+
def output_shape(self):
|
| 322 |
+
return (1, 8, 8, 3)
|
| 323 |
+
|
| 324 |
+
def get_dummy_inputs(self, with_generator=True):
|
| 325 |
+
batch_size = 1
|
| 326 |
+
sequence_length = 10
|
| 327 |
+
num_channels = 4
|
| 328 |
+
sizes = (32, 32)
|
| 329 |
+
|
| 330 |
+
generator = torch.manual_seed(0)
|
| 331 |
+
noise = floats_tensor((batch_size, num_channels) + sizes)
|
| 332 |
+
input_ids = torch.randint(1, sequence_length, size=(batch_size, sequence_length), generator=generator)
|
| 333 |
+
|
| 334 |
+
pipeline_inputs = {
|
| 335 |
+
"prompt": "A painting of a squirrel eating a burger",
|
| 336 |
+
"control_image": Image.fromarray(np.random.randint(0, 255, size=(32, 32, 3), dtype="uint8")),
|
| 337 |
+
"num_inference_steps": 4,
|
| 338 |
+
"guidance_scale": 0.0,
|
| 339 |
+
"height": 8,
|
| 340 |
+
"width": 8,
|
| 341 |
+
"output_type": "np",
|
| 342 |
+
}
|
| 343 |
+
if with_generator:
|
| 344 |
+
pipeline_inputs.update({"generator": generator})
|
| 345 |
+
|
| 346 |
+
return noise, input_ids, pipeline_inputs
|
| 347 |
+
|
| 348 |
+
def test_with_norm_in_state_dict(self):
|
| 349 |
+
components, _, denoiser_lora_config = self.get_dummy_components(FlowMatchEulerDiscreteScheduler)
|
| 350 |
+
pipe = self.pipeline_class(**components)
|
| 351 |
+
pipe = pipe.to(torch_device)
|
| 352 |
+
pipe.set_progress_bar_config(disable=None)
|
| 353 |
+
|
| 354 |
+
_, _, inputs = self.get_dummy_inputs(with_generator=False)
|
| 355 |
+
|
| 356 |
+
logger = logging.get_logger("diffusers.loaders.lora_pipeline")
|
| 357 |
+
logger.setLevel(logging.INFO)
|
| 358 |
+
|
| 359 |
+
original_output = pipe(**inputs, generator=torch.manual_seed(0))[0]
|
| 360 |
+
|
| 361 |
+
for norm_layer in ["norm_q", "norm_k", "norm_added_q", "norm_added_k"]:
|
| 362 |
+
norm_state_dict = {}
|
| 363 |
+
for name, module in pipe.transformer.named_modules():
|
| 364 |
+
if norm_layer not in name or not hasattr(module, "weight") or module.weight is None:
|
| 365 |
+
continue
|
| 366 |
+
norm_state_dict[f"transformer.{name}.weight"] = torch.randn(
|
| 367 |
+
module.weight.shape, device=module.weight.device, dtype=module.weight.dtype
|
| 368 |
+
)
|
| 369 |
+
|
| 370 |
+
with CaptureLogger(logger) as cap_logger:
|
| 371 |
+
pipe.load_lora_weights(norm_state_dict)
|
| 372 |
+
lora_load_output = pipe(**inputs, generator=torch.manual_seed(0))[0]
|
| 373 |
+
|
| 374 |
+
self.assertTrue(
|
| 375 |
+
"The provided state dict contains normalization layers in addition to LoRA layers"
|
| 376 |
+
in cap_logger.out
|
| 377 |
+
)
|
| 378 |
+
self.assertTrue(len(pipe.transformer._transformer_norm_layers) > 0)
|
| 379 |
+
|
| 380 |
+
pipe.unload_lora_weights()
|
| 381 |
+
lora_unload_output = pipe(**inputs, generator=torch.manual_seed(0))[0]
|
| 382 |
+
|
| 383 |
+
self.assertTrue(pipe.transformer._transformer_norm_layers is None)
|
| 384 |
+
self.assertTrue(np.allclose(original_output, lora_unload_output, atol=1e-5, rtol=1e-5))
|
| 385 |
+
self.assertFalse(
|
| 386 |
+
np.allclose(original_output, lora_load_output, atol=1e-6, rtol=1e-6), f"{norm_layer} is tested"
|
| 387 |
+
)
|
| 388 |
+
|
| 389 |
+
with CaptureLogger(logger) as cap_logger:
|
| 390 |
+
for key in list(norm_state_dict.keys()):
|
| 391 |
+
norm_state_dict[key.replace("norm", "norm_k_something_random")] = norm_state_dict.pop(key)
|
| 392 |
+
pipe.load_lora_weights(norm_state_dict)
|
| 393 |
+
|
| 394 |
+
self.assertTrue(
|
| 395 |
+
"Unsupported keys found in state dict when trying to load normalization layers" in cap_logger.out
|
| 396 |
+
)
|
| 397 |
+
|
| 398 |
+
def test_lora_parameter_expanded_shapes(self):
|
| 399 |
+
components, _, _ = self.get_dummy_components(FlowMatchEulerDiscreteScheduler)
|
| 400 |
+
pipe = self.pipeline_class(**components)
|
| 401 |
+
pipe = pipe.to(torch_device)
|
| 402 |
+
pipe.set_progress_bar_config(disable=None)
|
| 403 |
+
|
| 404 |
+
_, _, inputs = self.get_dummy_inputs(with_generator=False)
|
| 405 |
+
original_out = pipe(**inputs, generator=torch.manual_seed(0))[0]
|
| 406 |
+
|
| 407 |
+
logger = logging.get_logger("diffusers.loaders.lora_pipeline")
|
| 408 |
+
logger.setLevel(logging.DEBUG)
|
| 409 |
+
|
| 410 |
+
# Change the transformer config to mimic a real use case.
|
| 411 |
+
num_channels_without_control = 4
|
| 412 |
+
transformer = FluxTransformer2DModel.from_config(
|
| 413 |
+
components["transformer"].config, in_channels=num_channels_without_control
|
| 414 |
+
).to(torch_device)
|
| 415 |
+
self.assertTrue(
|
| 416 |
+
transformer.config.in_channels == num_channels_without_control,
|
| 417 |
+
f"Expected {num_channels_without_control} channels in the modified transformer but has {transformer.config.in_channels=}",
|
| 418 |
+
)
|
| 419 |
+
|
| 420 |
+
original_transformer_state_dict = pipe.transformer.state_dict()
|
| 421 |
+
x_embedder_weight = original_transformer_state_dict.pop("x_embedder.weight")
|
| 422 |
+
incompatible_keys = transformer.load_state_dict(original_transformer_state_dict, strict=False)
|
| 423 |
+
self.assertTrue(
|
| 424 |
+
"x_embedder.weight" in incompatible_keys.missing_keys,
|
| 425 |
+
"Could not find x_embedder.weight in the missing keys.",
|
| 426 |
+
)
|
| 427 |
+
transformer.x_embedder.weight.data.copy_(x_embedder_weight[..., :num_channels_without_control])
|
| 428 |
+
pipe.transformer = transformer
|
| 429 |
+
|
| 430 |
+
out_features, in_features = pipe.transformer.x_embedder.weight.shape
|
| 431 |
+
rank = 4
|
| 432 |
+
|
| 433 |
+
dummy_lora_A = torch.nn.Linear(2 * in_features, rank, bias=False)
|
| 434 |
+
dummy_lora_B = torch.nn.Linear(rank, out_features, bias=False)
|
| 435 |
+
lora_state_dict = {
|
| 436 |
+
"transformer.x_embedder.lora_A.weight": dummy_lora_A.weight,
|
| 437 |
+
"transformer.x_embedder.lora_B.weight": dummy_lora_B.weight,
|
| 438 |
+
}
|
| 439 |
+
with CaptureLogger(logger) as cap_logger:
|
| 440 |
+
pipe.load_lora_weights(lora_state_dict, "adapter-1")
|
| 441 |
+
|
| 442 |
+
self.assertTrue(check_if_lora_correctly_set(pipe.transformer), "Lora not correctly set in denoiser")
|
| 443 |
+
|
| 444 |
+
lora_out = pipe(**inputs, generator=torch.manual_seed(0))[0]
|
| 445 |
+
|
| 446 |
+
self.assertFalse(np.allclose(original_out, lora_out, rtol=1e-4, atol=1e-4))
|
| 447 |
+
self.assertTrue(pipe.transformer.x_embedder.weight.data.shape[1] == 2 * in_features)
|
| 448 |
+
self.assertTrue(pipe.transformer.config.in_channels == 2 * in_features)
|
| 449 |
+
self.assertTrue(cap_logger.out.startswith("Expanding the nn.Linear input/output features for module"))
|
| 450 |
+
|
| 451 |
+
# Testing opposite direction where the LoRA params are zero-padded.
|
| 452 |
+
components, _, _ = self.get_dummy_components(FlowMatchEulerDiscreteScheduler)
|
| 453 |
+
pipe = self.pipeline_class(**components)
|
| 454 |
+
pipe = pipe.to(torch_device)
|
| 455 |
+
pipe.set_progress_bar_config(disable=None)
|
| 456 |
+
dummy_lora_A = torch.nn.Linear(1, rank, bias=False)
|
| 457 |
+
dummy_lora_B = torch.nn.Linear(rank, out_features, bias=False)
|
| 458 |
+
lora_state_dict = {
|
| 459 |
+
"transformer.x_embedder.lora_A.weight": dummy_lora_A.weight,
|
| 460 |
+
"transformer.x_embedder.lora_B.weight": dummy_lora_B.weight,
|
| 461 |
+
}
|
| 462 |
+
with CaptureLogger(logger) as cap_logger:
|
| 463 |
+
pipe.load_lora_weights(lora_state_dict, "adapter-1")
|
| 464 |
+
|
| 465 |
+
self.assertTrue(check_if_lora_correctly_set(pipe.transformer), "Lora not correctly set in denoiser")
|
| 466 |
+
|
| 467 |
+
lora_out = pipe(**inputs, generator=torch.manual_seed(0))[0]
|
| 468 |
+
|
| 469 |
+
self.assertFalse(np.allclose(original_out, lora_out, rtol=1e-4, atol=1e-4))
|
| 470 |
+
self.assertTrue(pipe.transformer.x_embedder.weight.data.shape[1] == 2 * in_features)
|
| 471 |
+
self.assertTrue(pipe.transformer.config.in_channels == 2 * in_features)
|
| 472 |
+
self.assertTrue("The following LoRA modules were zero padded to match the state dict of" in cap_logger.out)
|
| 473 |
+
|
| 474 |
+
def test_normal_lora_with_expanded_lora_raises_error(self):
|
| 475 |
+
# Test the following situation. Load a regular LoRA (such as the ones trained on Flux.1-Dev). And then
|
| 476 |
+
# load shape expanded LoRA (such as Control LoRA).
|
| 477 |
+
components, _, _ = self.get_dummy_components(FlowMatchEulerDiscreteScheduler)
|
| 478 |
+
|
| 479 |
+
# Change the transformer config to mimic a real use case.
|
| 480 |
+
num_channels_without_control = 4
|
| 481 |
+
transformer = FluxTransformer2DModel.from_config(
|
| 482 |
+
components["transformer"].config, in_channels=num_channels_without_control
|
| 483 |
+
).to(torch_device)
|
| 484 |
+
components["transformer"] = transformer
|
| 485 |
+
|
| 486 |
+
pipe = self.pipeline_class(**components)
|
| 487 |
+
pipe = pipe.to(torch_device)
|
| 488 |
+
pipe.set_progress_bar_config(disable=None)
|
| 489 |
+
|
| 490 |
+
logger = logging.get_logger("diffusers.loaders.lora_pipeline")
|
| 491 |
+
logger.setLevel(logging.DEBUG)
|
| 492 |
+
|
| 493 |
+
out_features, in_features = pipe.transformer.x_embedder.weight.shape
|
| 494 |
+
rank = 4
|
| 495 |
+
|
| 496 |
+
shape_expander_lora_A = torch.nn.Linear(2 * in_features, rank, bias=False)
|
| 497 |
+
shape_expander_lora_B = torch.nn.Linear(rank, out_features, bias=False)
|
| 498 |
+
lora_state_dict = {
|
| 499 |
+
"transformer.x_embedder.lora_A.weight": shape_expander_lora_A.weight,
|
| 500 |
+
"transformer.x_embedder.lora_B.weight": shape_expander_lora_B.weight,
|
| 501 |
+
}
|
| 502 |
+
with CaptureLogger(logger) as cap_logger:
|
| 503 |
+
pipe.load_lora_weights(lora_state_dict, "adapter-1")
|
| 504 |
+
|
| 505 |
+
self.assertTrue(check_if_lora_correctly_set(pipe.transformer), "Lora not correctly set in denoiser")
|
| 506 |
+
self.assertTrue(pipe.get_active_adapters() == ["adapter-1"])
|
| 507 |
+
self.assertTrue(pipe.transformer.x_embedder.weight.data.shape[1] == 2 * in_features)
|
| 508 |
+
self.assertTrue(pipe.transformer.config.in_channels == 2 * in_features)
|
| 509 |
+
self.assertTrue(cap_logger.out.startswith("Expanding the nn.Linear input/output features for module"))
|
| 510 |
+
|
| 511 |
+
_, _, inputs = self.get_dummy_inputs(with_generator=False)
|
| 512 |
+
lora_output = pipe(**inputs, generator=torch.manual_seed(0))[0]
|
| 513 |
+
|
| 514 |
+
normal_lora_A = torch.nn.Linear(in_features, rank, bias=False)
|
| 515 |
+
normal_lora_B = torch.nn.Linear(rank, out_features, bias=False)
|
| 516 |
+
lora_state_dict = {
|
| 517 |
+
"transformer.x_embedder.lora_A.weight": normal_lora_A.weight,
|
| 518 |
+
"transformer.x_embedder.lora_B.weight": normal_lora_B.weight,
|
| 519 |
+
}
|
| 520 |
+
|
| 521 |
+
with CaptureLogger(logger) as cap_logger:
|
| 522 |
+
pipe.load_lora_weights(lora_state_dict, "adapter-2")
|
| 523 |
+
|
| 524 |
+
self.assertTrue(check_if_lora_correctly_set(pipe.transformer), "Lora not correctly set in denoiser")
|
| 525 |
+
self.assertTrue("The following LoRA modules were zero padded to match the state dict of" in cap_logger.out)
|
| 526 |
+
self.assertTrue(pipe.get_active_adapters() == ["adapter-2"])
|
| 527 |
+
|
| 528 |
+
lora_output_2 = pipe(**inputs, generator=torch.manual_seed(0))[0]
|
| 529 |
+
self.assertFalse(np.allclose(lora_output, lora_output_2, atol=1e-3, rtol=1e-3))
|
| 530 |
+
|
| 531 |
+
# Test the opposite case where the first lora has the correct input features and the second lora has expanded input features.
|
| 532 |
+
# This should raise a runtime error on input shapes being incompatible.
|
| 533 |
+
components, _, _ = self.get_dummy_components(FlowMatchEulerDiscreteScheduler)
|
| 534 |
+
# Change the transformer config to mimic a real use case.
|
| 535 |
+
num_channels_without_control = 4
|
| 536 |
+
transformer = FluxTransformer2DModel.from_config(
|
| 537 |
+
components["transformer"].config, in_channels=num_channels_without_control
|
| 538 |
+
).to(torch_device)
|
| 539 |
+
components["transformer"] = transformer
|
| 540 |
+
|
| 541 |
+
pipe = self.pipeline_class(**components)
|
| 542 |
+
pipe = pipe.to(torch_device)
|
| 543 |
+
pipe.set_progress_bar_config(disable=None)
|
| 544 |
+
|
| 545 |
+
logger = logging.get_logger("diffusers.loaders.lora_pipeline")
|
| 546 |
+
logger.setLevel(logging.DEBUG)
|
| 547 |
+
|
| 548 |
+
out_features, in_features = pipe.transformer.x_embedder.weight.shape
|
| 549 |
+
rank = 4
|
| 550 |
+
|
| 551 |
+
lora_state_dict = {
|
| 552 |
+
"transformer.x_embedder.lora_A.weight": normal_lora_A.weight,
|
| 553 |
+
"transformer.x_embedder.lora_B.weight": normal_lora_B.weight,
|
| 554 |
+
}
|
| 555 |
+
pipe.load_lora_weights(lora_state_dict, "adapter-1")
|
| 556 |
+
|
| 557 |
+
self.assertTrue(check_if_lora_correctly_set(pipe.transformer), "Lora not correctly set in denoiser")
|
| 558 |
+
self.assertTrue(pipe.transformer.x_embedder.weight.data.shape[1] == in_features)
|
| 559 |
+
self.assertTrue(pipe.transformer.config.in_channels == in_features)
|
| 560 |
+
|
| 561 |
+
lora_state_dict = {
|
| 562 |
+
"transformer.x_embedder.lora_A.weight": shape_expander_lora_A.weight,
|
| 563 |
+
"transformer.x_embedder.lora_B.weight": shape_expander_lora_B.weight,
|
| 564 |
+
}
|
| 565 |
+
|
| 566 |
+
# We should check for input shapes being incompatible here. But because above mentioned issue is
|
| 567 |
+
# not a supported use case, and because of the PEFT renaming, we will currently have a shape
|
| 568 |
+
# mismatch error.
|
| 569 |
+
self.assertRaisesRegex(
|
| 570 |
+
RuntimeError,
|
| 571 |
+
"size mismatch for x_embedder.lora_A.adapter-2.weight",
|
| 572 |
+
pipe.load_lora_weights,
|
| 573 |
+
lora_state_dict,
|
| 574 |
+
"adapter-2",
|
| 575 |
+
)
|
| 576 |
+
|
| 577 |
+
def test_fuse_expanded_lora_with_regular_lora(self):
|
| 578 |
+
# This test checks if it works when a lora with expanded shapes (like control loras) but
|
| 579 |
+
# another lora with correct shapes is loaded. The opposite direction isn't supported and is
|
| 580 |
+
# tested with it.
|
| 581 |
+
components, _, _ = self.get_dummy_components(FlowMatchEulerDiscreteScheduler)
|
| 582 |
+
|
| 583 |
+
# Change the transformer config to mimic a real use case.
|
| 584 |
+
num_channels_without_control = 4
|
| 585 |
+
transformer = FluxTransformer2DModel.from_config(
|
| 586 |
+
components["transformer"].config, in_channels=num_channels_without_control
|
| 587 |
+
).to(torch_device)
|
| 588 |
+
components["transformer"] = transformer
|
| 589 |
+
|
| 590 |
+
pipe = self.pipeline_class(**components)
|
| 591 |
+
pipe = pipe.to(torch_device)
|
| 592 |
+
pipe.set_progress_bar_config(disable=None)
|
| 593 |
+
|
| 594 |
+
logger = logging.get_logger("diffusers.loaders.lora_pipeline")
|
| 595 |
+
logger.setLevel(logging.DEBUG)
|
| 596 |
+
|
| 597 |
+
out_features, in_features = pipe.transformer.x_embedder.weight.shape
|
| 598 |
+
rank = 4
|
| 599 |
+
|
| 600 |
+
shape_expander_lora_A = torch.nn.Linear(2 * in_features, rank, bias=False)
|
| 601 |
+
shape_expander_lora_B = torch.nn.Linear(rank, out_features, bias=False)
|
| 602 |
+
lora_state_dict = {
|
| 603 |
+
"transformer.x_embedder.lora_A.weight": shape_expander_lora_A.weight,
|
| 604 |
+
"transformer.x_embedder.lora_B.weight": shape_expander_lora_B.weight,
|
| 605 |
+
}
|
| 606 |
+
pipe.load_lora_weights(lora_state_dict, "adapter-1")
|
| 607 |
+
self.assertTrue(check_if_lora_correctly_set(pipe.transformer), "Lora not correctly set in denoiser")
|
| 608 |
+
|
| 609 |
+
_, _, inputs = self.get_dummy_inputs(with_generator=False)
|
| 610 |
+
lora_output = pipe(**inputs, generator=torch.manual_seed(0))[0]
|
| 611 |
+
|
| 612 |
+
normal_lora_A = torch.nn.Linear(in_features, rank, bias=False)
|
| 613 |
+
normal_lora_B = torch.nn.Linear(rank, out_features, bias=False)
|
| 614 |
+
lora_state_dict = {
|
| 615 |
+
"transformer.x_embedder.lora_A.weight": normal_lora_A.weight,
|
| 616 |
+
"transformer.x_embedder.lora_B.weight": normal_lora_B.weight,
|
| 617 |
+
}
|
| 618 |
+
|
| 619 |
+
pipe.load_lora_weights(lora_state_dict, "adapter-2")
|
| 620 |
+
self.assertTrue(check_if_lora_correctly_set(pipe.transformer), "Lora not correctly set in denoiser")
|
| 621 |
+
|
| 622 |
+
lora_output_2 = pipe(**inputs, generator=torch.manual_seed(0))[0]
|
| 623 |
+
|
| 624 |
+
pipe.set_adapters(["adapter-1", "adapter-2"], [1.0, 1.0])
|
| 625 |
+
lora_output_3 = pipe(**inputs, generator=torch.manual_seed(0))[0]
|
| 626 |
+
|
| 627 |
+
self.assertFalse(np.allclose(lora_output, lora_output_2, atol=1e-3, rtol=1e-3))
|
| 628 |
+
self.assertFalse(np.allclose(lora_output, lora_output_3, atol=1e-3, rtol=1e-3))
|
| 629 |
+
self.assertFalse(np.allclose(lora_output_2, lora_output_3, atol=1e-3, rtol=1e-3))
|
| 630 |
+
|
| 631 |
+
pipe.fuse_lora(lora_scale=1.0, adapter_names=["adapter-1", "adapter-2"])
|
| 632 |
+
lora_output_4 = pipe(**inputs, generator=torch.manual_seed(0))[0]
|
| 633 |
+
self.assertTrue(np.allclose(lora_output_3, lora_output_4, atol=1e-3, rtol=1e-3))
|
| 634 |
+
|
| 635 |
+
def test_load_regular_lora(self):
|
| 636 |
+
# This test checks if a regular lora (think of one trained on Flux.1 Dev for example) can be loaded
|
| 637 |
+
# into the transformer with more input channels than Flux.1 Dev, for example. Some examples of those
|
| 638 |
+
# transformers include Flux Fill, Flux Control, etc.
|
| 639 |
+
components, _, _ = self.get_dummy_components(FlowMatchEulerDiscreteScheduler)
|
| 640 |
+
pipe = self.pipeline_class(**components)
|
| 641 |
+
pipe = pipe.to(torch_device)
|
| 642 |
+
pipe.set_progress_bar_config(disable=None)
|
| 643 |
+
_, _, inputs = self.get_dummy_inputs(with_generator=False)
|
| 644 |
+
|
| 645 |
+
original_output = pipe(**inputs, generator=torch.manual_seed(0))[0]
|
| 646 |
+
|
| 647 |
+
out_features, in_features = pipe.transformer.x_embedder.weight.shape
|
| 648 |
+
rank = 4
|
| 649 |
+
in_features = in_features // 2 # to mimic the Flux.1-Dev LoRA.
|
| 650 |
+
normal_lora_A = torch.nn.Linear(in_features, rank, bias=False)
|
| 651 |
+
normal_lora_B = torch.nn.Linear(rank, out_features, bias=False)
|
| 652 |
+
lora_state_dict = {
|
| 653 |
+
"transformer.x_embedder.lora_A.weight": normal_lora_A.weight,
|
| 654 |
+
"transformer.x_embedder.lora_B.weight": normal_lora_B.weight,
|
| 655 |
+
}
|
| 656 |
+
|
| 657 |
+
logger = logging.get_logger("diffusers.loaders.lora_pipeline")
|
| 658 |
+
logger.setLevel(logging.INFO)
|
| 659 |
+
with CaptureLogger(logger) as cap_logger:
|
| 660 |
+
pipe.load_lora_weights(lora_state_dict, "adapter-1")
|
| 661 |
+
self.assertTrue(check_if_lora_correctly_set(pipe.transformer), "Lora not correctly set in denoiser")
|
| 662 |
+
|
| 663 |
+
lora_output = pipe(**inputs, generator=torch.manual_seed(0))[0]
|
| 664 |
+
|
| 665 |
+
self.assertTrue("The following LoRA modules were zero padded to match the state dict of" in cap_logger.out)
|
| 666 |
+
self.assertTrue(pipe.transformer.x_embedder.weight.data.shape[1] == in_features * 2)
|
| 667 |
+
self.assertFalse(np.allclose(original_output, lora_output, atol=1e-3, rtol=1e-3))
|
| 668 |
+
|
| 669 |
+
def test_lora_unload_with_parameter_expanded_shapes(self):
|
| 670 |
+
components, _, _ = self.get_dummy_components(FlowMatchEulerDiscreteScheduler)
|
| 671 |
+
|
| 672 |
+
logger = logging.get_logger("diffusers.loaders.lora_pipeline")
|
| 673 |
+
logger.setLevel(logging.DEBUG)
|
| 674 |
+
|
| 675 |
+
# Change the transformer config to mimic a real use case.
|
| 676 |
+
num_channels_without_control = 4
|
| 677 |
+
transformer = FluxTransformer2DModel.from_config(
|
| 678 |
+
components["transformer"].config, in_channels=num_channels_without_control
|
| 679 |
+
).to(torch_device)
|
| 680 |
+
self.assertTrue(
|
| 681 |
+
transformer.config.in_channels == num_channels_without_control,
|
| 682 |
+
f"Expected {num_channels_without_control} channels in the modified transformer but has {transformer.config.in_channels=}",
|
| 683 |
+
)
|
| 684 |
+
|
| 685 |
+
# This should be initialized with a Flux pipeline variant that doesn't accept `control_image`.
|
| 686 |
+
components["transformer"] = transformer
|
| 687 |
+
pipe = FluxPipeline(**components)
|
| 688 |
+
pipe = pipe.to(torch_device)
|
| 689 |
+
pipe.set_progress_bar_config(disable=None)
|
| 690 |
+
|
| 691 |
+
_, _, inputs = self.get_dummy_inputs(with_generator=False)
|
| 692 |
+
control_image = inputs.pop("control_image")
|
| 693 |
+
original_out = pipe(**inputs, generator=torch.manual_seed(0))[0]
|
| 694 |
+
|
| 695 |
+
control_pipe = self.pipeline_class(**components)
|
| 696 |
+
out_features, in_features = control_pipe.transformer.x_embedder.weight.shape
|
| 697 |
+
rank = 4
|
| 698 |
+
|
| 699 |
+
dummy_lora_A = torch.nn.Linear(2 * in_features, rank, bias=False)
|
| 700 |
+
dummy_lora_B = torch.nn.Linear(rank, out_features, bias=False)
|
| 701 |
+
lora_state_dict = {
|
| 702 |
+
"transformer.x_embedder.lora_A.weight": dummy_lora_A.weight,
|
| 703 |
+
"transformer.x_embedder.lora_B.weight": dummy_lora_B.weight,
|
| 704 |
+
}
|
| 705 |
+
with CaptureLogger(logger) as cap_logger:
|
| 706 |
+
control_pipe.load_lora_weights(lora_state_dict, "adapter-1")
|
| 707 |
+
self.assertTrue(check_if_lora_correctly_set(pipe.transformer), "Lora not correctly set in denoiser")
|
| 708 |
+
|
| 709 |
+
inputs["control_image"] = control_image
|
| 710 |
+
lora_out = control_pipe(**inputs, generator=torch.manual_seed(0))[0]
|
| 711 |
+
|
| 712 |
+
self.assertFalse(np.allclose(original_out, lora_out, rtol=1e-4, atol=1e-4))
|
| 713 |
+
self.assertTrue(pipe.transformer.x_embedder.weight.data.shape[1] == 2 * in_features)
|
| 714 |
+
self.assertTrue(pipe.transformer.config.in_channels == 2 * in_features)
|
| 715 |
+
self.assertTrue(cap_logger.out.startswith("Expanding the nn.Linear input/output features for module"))
|
| 716 |
+
|
| 717 |
+
control_pipe.unload_lora_weights(reset_to_overwritten_params=True)
|
| 718 |
+
self.assertTrue(
|
| 719 |
+
control_pipe.transformer.config.in_channels == num_channels_without_control,
|
| 720 |
+
f"Expected {num_channels_without_control} channels in the modified transformer but has {control_pipe.transformer.config.in_channels=}",
|
| 721 |
+
)
|
| 722 |
+
loaded_pipe = FluxPipeline.from_pipe(control_pipe)
|
| 723 |
+
self.assertTrue(
|
| 724 |
+
loaded_pipe.transformer.config.in_channels == num_channels_without_control,
|
| 725 |
+
f"Expected {num_channels_without_control} channels in the modified transformer but has {loaded_pipe.transformer.config.in_channels=}",
|
| 726 |
+
)
|
| 727 |
+
inputs.pop("control_image")
|
| 728 |
+
unloaded_lora_out = loaded_pipe(**inputs, generator=torch.manual_seed(0))[0]
|
| 729 |
+
|
| 730 |
+
self.assertFalse(np.allclose(unloaded_lora_out, lora_out, rtol=1e-4, atol=1e-4))
|
| 731 |
+
self.assertTrue(np.allclose(unloaded_lora_out, original_out, atol=1e-4, rtol=1e-4))
|
| 732 |
+
self.assertTrue(pipe.transformer.x_embedder.weight.data.shape[1] == in_features)
|
| 733 |
+
self.assertTrue(pipe.transformer.config.in_channels == in_features)
|
| 734 |
+
|
| 735 |
+
def test_lora_unload_with_parameter_expanded_shapes_and_no_reset(self):
|
| 736 |
+
components, _, _ = self.get_dummy_components(FlowMatchEulerDiscreteScheduler)
|
| 737 |
+
|
| 738 |
+
logger = logging.get_logger("diffusers.loaders.lora_pipeline")
|
| 739 |
+
logger.setLevel(logging.DEBUG)
|
| 740 |
+
|
| 741 |
+
# Change the transformer config to mimic a real use case.
|
| 742 |
+
num_channels_without_control = 4
|
| 743 |
+
transformer = FluxTransformer2DModel.from_config(
|
| 744 |
+
components["transformer"].config, in_channels=num_channels_without_control
|
| 745 |
+
).to(torch_device)
|
| 746 |
+
self.assertTrue(
|
| 747 |
+
transformer.config.in_channels == num_channels_without_control,
|
| 748 |
+
f"Expected {num_channels_without_control} channels in the modified transformer but has {transformer.config.in_channels=}",
|
| 749 |
+
)
|
| 750 |
+
|
| 751 |
+
# This should be initialized with a Flux pipeline variant that doesn't accept `control_image`.
|
| 752 |
+
components["transformer"] = transformer
|
| 753 |
+
pipe = FluxPipeline(**components)
|
| 754 |
+
pipe = pipe.to(torch_device)
|
| 755 |
+
pipe.set_progress_bar_config(disable=None)
|
| 756 |
+
|
| 757 |
+
_, _, inputs = self.get_dummy_inputs(with_generator=False)
|
| 758 |
+
control_image = inputs.pop("control_image")
|
| 759 |
+
original_out = pipe(**inputs, generator=torch.manual_seed(0))[0]
|
| 760 |
+
|
| 761 |
+
control_pipe = self.pipeline_class(**components)
|
| 762 |
+
out_features, in_features = control_pipe.transformer.x_embedder.weight.shape
|
| 763 |
+
rank = 4
|
| 764 |
+
|
| 765 |
+
dummy_lora_A = torch.nn.Linear(2 * in_features, rank, bias=False)
|
| 766 |
+
dummy_lora_B = torch.nn.Linear(rank, out_features, bias=False)
|
| 767 |
+
lora_state_dict = {
|
| 768 |
+
"transformer.x_embedder.lora_A.weight": dummy_lora_A.weight,
|
| 769 |
+
"transformer.x_embedder.lora_B.weight": dummy_lora_B.weight,
|
| 770 |
+
}
|
| 771 |
+
with CaptureLogger(logger) as cap_logger:
|
| 772 |
+
control_pipe.load_lora_weights(lora_state_dict, "adapter-1")
|
| 773 |
+
self.assertTrue(check_if_lora_correctly_set(pipe.transformer), "Lora not correctly set in denoiser")
|
| 774 |
+
|
| 775 |
+
inputs["control_image"] = control_image
|
| 776 |
+
lora_out = control_pipe(**inputs, generator=torch.manual_seed(0))[0]
|
| 777 |
+
|
| 778 |
+
self.assertFalse(np.allclose(original_out, lora_out, rtol=1e-4, atol=1e-4))
|
| 779 |
+
self.assertTrue(pipe.transformer.x_embedder.weight.data.shape[1] == 2 * in_features)
|
| 780 |
+
self.assertTrue(pipe.transformer.config.in_channels == 2 * in_features)
|
| 781 |
+
self.assertTrue(cap_logger.out.startswith("Expanding the nn.Linear input/output features for module"))
|
| 782 |
+
|
| 783 |
+
control_pipe.unload_lora_weights(reset_to_overwritten_params=False)
|
| 784 |
+
self.assertTrue(
|
| 785 |
+
control_pipe.transformer.config.in_channels == 2 * num_channels_without_control,
|
| 786 |
+
f"Expected {num_channels_without_control} channels in the modified transformer but has {control_pipe.transformer.config.in_channels=}",
|
| 787 |
+
)
|
| 788 |
+
no_lora_out = control_pipe(**inputs, generator=torch.manual_seed(0))[0]
|
| 789 |
+
|
| 790 |
+
self.assertFalse(np.allclose(no_lora_out, lora_out, rtol=1e-4, atol=1e-4))
|
| 791 |
+
self.assertTrue(pipe.transformer.x_embedder.weight.data.shape[1] == in_features * 2)
|
| 792 |
+
self.assertTrue(pipe.transformer.config.in_channels == in_features * 2)
|
| 793 |
+
|
| 794 |
+
@unittest.skip("Not supported in Flux.")
|
| 795 |
+
def test_simple_inference_with_text_denoiser_block_scale(self):
|
| 796 |
+
pass
|
| 797 |
+
|
| 798 |
+
@unittest.skip("Not supported in Flux.")
|
| 799 |
+
def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self):
|
| 800 |
+
pass
|
| 801 |
+
|
| 802 |
+
@unittest.skip("Not supported in Flux.")
|
| 803 |
+
def test_modify_padding_mode(self):
|
| 804 |
+
pass
|
| 805 |
+
|
| 806 |
+
@unittest.skip("Not supported in Flux.")
|
| 807 |
+
def test_simple_inference_with_text_denoiser_multi_adapter_block_lora(self):
|
| 808 |
+
pass
|
| 809 |
+
|
| 810 |
+
|
| 811 |
+
@slow
|
| 812 |
+
@nightly
|
| 813 |
+
@require_torch_accelerator
|
| 814 |
+
@require_peft_backend
|
| 815 |
+
@require_big_accelerator
|
| 816 |
+
class FluxLoRAIntegrationTests(unittest.TestCase):
|
| 817 |
+
"""internal note: The integration slices were obtained on audace.
|
| 818 |
+
|
| 819 |
+
torch: 2.6.0.dev20241006+cu124 with CUDA 12.5. Need the same setup for the
|
| 820 |
+
assertions to pass.
|
| 821 |
+
"""
|
| 822 |
+
|
| 823 |
+
num_inference_steps = 10
|
| 824 |
+
seed = 0
|
| 825 |
+
|
| 826 |
+
def setUp(self):
|
| 827 |
+
super().setUp()
|
| 828 |
+
|
| 829 |
+
gc.collect()
|
| 830 |
+
backend_empty_cache(torch_device)
|
| 831 |
+
|
| 832 |
+
self.pipeline = FluxPipeline.from_pretrained("black-forest-labs/FLUX.1-dev", torch_dtype=torch.bfloat16)
|
| 833 |
+
|
| 834 |
+
def tearDown(self):
|
| 835 |
+
super().tearDown()
|
| 836 |
+
|
| 837 |
+
del self.pipeline
|
| 838 |
+
gc.collect()
|
| 839 |
+
backend_empty_cache(torch_device)
|
| 840 |
+
|
| 841 |
+
def test_flux_the_last_ben(self):
|
| 842 |
+
self.pipeline.load_lora_weights("TheLastBen/Jon_Snow_Flux_LoRA", weight_name="jon_snow.safetensors")
|
| 843 |
+
self.pipeline.fuse_lora()
|
| 844 |
+
self.pipeline.unload_lora_weights()
|
| 845 |
+
# Instead of calling `enable_model_cpu_offload()`, we do a accelerator placement here because the CI
|
| 846 |
+
# run supports it. We have about 34GB RAM in the CI runner which kills the test when run with
|
| 847 |
+
# `enable_model_cpu_offload()`. We repeat this for the other tests, too.
|
| 848 |
+
self.pipeline = self.pipeline.to(torch_device)
|
| 849 |
+
|
| 850 |
+
prompt = "jon snow eating pizza with ketchup"
|
| 851 |
+
|
| 852 |
+
out = self.pipeline(
|
| 853 |
+
prompt,
|
| 854 |
+
num_inference_steps=self.num_inference_steps,
|
| 855 |
+
guidance_scale=4.0,
|
| 856 |
+
output_type="np",
|
| 857 |
+
generator=torch.manual_seed(self.seed),
|
| 858 |
+
).images
|
| 859 |
+
out_slice = out[0, -3:, -3:, -1].flatten()
|
| 860 |
+
expected_slice = np.array([0.1855, 0.1855, 0.1836, 0.1855, 0.1836, 0.1875, 0.1777, 0.1758, 0.2246])
|
| 861 |
+
|
| 862 |
+
max_diff = numpy_cosine_similarity_distance(expected_slice.flatten(), out_slice)
|
| 863 |
+
|
| 864 |
+
assert max_diff < 1e-3
|
| 865 |
+
|
| 866 |
+
def test_flux_kohya(self):
|
| 867 |
+
self.pipeline.load_lora_weights("Norod78/brain-slug-flux")
|
| 868 |
+
self.pipeline.fuse_lora()
|
| 869 |
+
self.pipeline.unload_lora_weights()
|
| 870 |
+
self.pipeline = self.pipeline.to(torch_device)
|
| 871 |
+
|
| 872 |
+
prompt = "The cat with a brain slug earring"
|
| 873 |
+
out = self.pipeline(
|
| 874 |
+
prompt,
|
| 875 |
+
num_inference_steps=self.num_inference_steps,
|
| 876 |
+
guidance_scale=4.5,
|
| 877 |
+
output_type="np",
|
| 878 |
+
generator=torch.manual_seed(self.seed),
|
| 879 |
+
).images
|
| 880 |
+
|
| 881 |
+
out_slice = out[0, -3:, -3:, -1].flatten()
|
| 882 |
+
expected_slice = np.array([0.6367, 0.6367, 0.6328, 0.6367, 0.6328, 0.6289, 0.6367, 0.6328, 0.6484])
|
| 883 |
+
|
| 884 |
+
max_diff = numpy_cosine_similarity_distance(expected_slice.flatten(), out_slice)
|
| 885 |
+
|
| 886 |
+
assert max_diff < 1e-3
|
| 887 |
+
|
| 888 |
+
def test_flux_kohya_with_text_encoder(self):
|
| 889 |
+
self.pipeline.load_lora_weights("cocktailpeanut/optimus", weight_name="optimus.safetensors")
|
| 890 |
+
self.pipeline.fuse_lora()
|
| 891 |
+
self.pipeline.unload_lora_weights()
|
| 892 |
+
self.pipeline = self.pipeline.to(torch_device)
|
| 893 |
+
|
| 894 |
+
prompt = "optimus is cleaning the house with broomstick"
|
| 895 |
+
out = self.pipeline(
|
| 896 |
+
prompt,
|
| 897 |
+
num_inference_steps=self.num_inference_steps,
|
| 898 |
+
guidance_scale=4.5,
|
| 899 |
+
output_type="np",
|
| 900 |
+
generator=torch.manual_seed(self.seed),
|
| 901 |
+
).images
|
| 902 |
+
|
| 903 |
+
out_slice = out[0, -3:, -3:, -1].flatten()
|
| 904 |
+
expected_slice = np.array([0.4023, 0.4023, 0.4023, 0.3965, 0.3984, 0.3965, 0.3926, 0.3906, 0.4219])
|
| 905 |
+
|
| 906 |
+
max_diff = numpy_cosine_similarity_distance(expected_slice.flatten(), out_slice)
|
| 907 |
+
|
| 908 |
+
assert max_diff < 1e-3
|
| 909 |
+
|
| 910 |
+
def test_flux_xlabs(self):
|
| 911 |
+
self.pipeline.load_lora_weights("XLabs-AI/flux-lora-collection", weight_name="disney_lora.safetensors")
|
| 912 |
+
self.pipeline.fuse_lora()
|
| 913 |
+
self.pipeline.unload_lora_weights()
|
| 914 |
+
self.pipeline = self.pipeline.to(torch_device)
|
| 915 |
+
|
| 916 |
+
prompt = "A blue jay standing on a large basket of rainbow macarons, disney style"
|
| 917 |
+
|
| 918 |
+
out = self.pipeline(
|
| 919 |
+
prompt,
|
| 920 |
+
num_inference_steps=self.num_inference_steps,
|
| 921 |
+
guidance_scale=3.5,
|
| 922 |
+
output_type="np",
|
| 923 |
+
generator=torch.manual_seed(self.seed),
|
| 924 |
+
).images
|
| 925 |
+
out_slice = out[0, -3:, -3:, -1].flatten()
|
| 926 |
+
expected_slice = np.array([0.3965, 0.4180, 0.4434, 0.4082, 0.4375, 0.4590, 0.4141, 0.4375, 0.4980])
|
| 927 |
+
|
| 928 |
+
max_diff = numpy_cosine_similarity_distance(expected_slice.flatten(), out_slice)
|
| 929 |
+
|
| 930 |
+
assert max_diff < 1e-3
|
| 931 |
+
|
| 932 |
+
def test_flux_xlabs_load_lora_with_single_blocks(self):
|
| 933 |
+
self.pipeline.load_lora_weights(
|
| 934 |
+
"salinasr/test_xlabs_flux_lora_with_singleblocks", weight_name="lora.safetensors"
|
| 935 |
+
)
|
| 936 |
+
self.pipeline.fuse_lora()
|
| 937 |
+
self.pipeline.unload_lora_weights()
|
| 938 |
+
self.pipeline.enable_model_cpu_offload()
|
| 939 |
+
|
| 940 |
+
prompt = "a wizard mouse playing chess"
|
| 941 |
+
|
| 942 |
+
out = self.pipeline(
|
| 943 |
+
prompt,
|
| 944 |
+
num_inference_steps=self.num_inference_steps,
|
| 945 |
+
guidance_scale=3.5,
|
| 946 |
+
output_type="np",
|
| 947 |
+
generator=torch.manual_seed(self.seed),
|
| 948 |
+
).images
|
| 949 |
+
out_slice = out[0, -3:, -3:, -1].flatten()
|
| 950 |
+
expected_slice = np.array(
|
| 951 |
+
[0.04882812, 0.04101562, 0.04882812, 0.03710938, 0.02929688, 0.02734375, 0.0234375, 0.01757812, 0.0390625]
|
| 952 |
+
)
|
| 953 |
+
max_diff = numpy_cosine_similarity_distance(expected_slice.flatten(), out_slice)
|
| 954 |
+
|
| 955 |
+
assert max_diff < 1e-3
|
| 956 |
+
|
| 957 |
+
|
| 958 |
+
@nightly
|
| 959 |
+
@require_torch_accelerator
|
| 960 |
+
@require_peft_backend
|
| 961 |
+
@require_big_accelerator
|
| 962 |
+
class FluxControlLoRAIntegrationTests(unittest.TestCase):
|
| 963 |
+
num_inference_steps = 10
|
| 964 |
+
seed = 0
|
| 965 |
+
prompt = "A robot made of exotic candies and chocolates of different kinds."
|
| 966 |
+
|
| 967 |
+
def setUp(self):
|
| 968 |
+
super().setUp()
|
| 969 |
+
|
| 970 |
+
gc.collect()
|
| 971 |
+
backend_empty_cache(torch_device)
|
| 972 |
+
|
| 973 |
+
self.pipeline = FluxControlPipeline.from_pretrained(
|
| 974 |
+
"black-forest-labs/FLUX.1-dev", torch_dtype=torch.bfloat16
|
| 975 |
+
).to(torch_device)
|
| 976 |
+
|
| 977 |
+
def tearDown(self):
|
| 978 |
+
super().tearDown()
|
| 979 |
+
|
| 980 |
+
gc.collect()
|
| 981 |
+
backend_empty_cache(torch_device)
|
| 982 |
+
|
| 983 |
+
@parameterized.expand(["black-forest-labs/FLUX.1-Canny-dev-lora", "black-forest-labs/FLUX.1-Depth-dev-lora"])
|
| 984 |
+
def test_lora(self, lora_ckpt_id):
|
| 985 |
+
self.pipeline.load_lora_weights(lora_ckpt_id)
|
| 986 |
+
self.pipeline.fuse_lora()
|
| 987 |
+
self.pipeline.unload_lora_weights()
|
| 988 |
+
|
| 989 |
+
if "Canny" in lora_ckpt_id:
|
| 990 |
+
control_image = load_image(
|
| 991 |
+
"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/flux-control-lora/canny_condition_image.png"
|
| 992 |
+
)
|
| 993 |
+
else:
|
| 994 |
+
control_image = load_image(
|
| 995 |
+
"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/flux-control-lora/depth_condition_image.png"
|
| 996 |
+
)
|
| 997 |
+
|
| 998 |
+
image = self.pipeline(
|
| 999 |
+
prompt=self.prompt,
|
| 1000 |
+
control_image=control_image,
|
| 1001 |
+
height=1024,
|
| 1002 |
+
width=1024,
|
| 1003 |
+
num_inference_steps=self.num_inference_steps,
|
| 1004 |
+
guidance_scale=30.0 if "Canny" in lora_ckpt_id else 10.0,
|
| 1005 |
+
output_type="np",
|
| 1006 |
+
generator=torch.manual_seed(self.seed),
|
| 1007 |
+
).images
|
| 1008 |
+
|
| 1009 |
+
out_slice = image[0, -3:, -3:, -1].flatten()
|
| 1010 |
+
if "Canny" in lora_ckpt_id:
|
| 1011 |
+
expected_slice = np.array([0.8438, 0.8438, 0.8438, 0.8438, 0.8438, 0.8398, 0.8438, 0.8438, 0.8516])
|
| 1012 |
+
else:
|
| 1013 |
+
expected_slice = np.array([0.8203, 0.8320, 0.8359, 0.8203, 0.8281, 0.8281, 0.8203, 0.8242, 0.8359])
|
| 1014 |
+
|
| 1015 |
+
max_diff = numpy_cosine_similarity_distance(expected_slice.flatten(), out_slice)
|
| 1016 |
+
|
| 1017 |
+
assert max_diff < 1e-3
|
| 1018 |
+
|
| 1019 |
+
@parameterized.expand(["black-forest-labs/FLUX.1-Canny-dev-lora", "black-forest-labs/FLUX.1-Depth-dev-lora"])
|
| 1020 |
+
def test_lora_with_turbo(self, lora_ckpt_id):
|
| 1021 |
+
self.pipeline.load_lora_weights(lora_ckpt_id)
|
| 1022 |
+
self.pipeline.load_lora_weights("ByteDance/Hyper-SD", weight_name="Hyper-FLUX.1-dev-8steps-lora.safetensors")
|
| 1023 |
+
self.pipeline.fuse_lora()
|
| 1024 |
+
self.pipeline.unload_lora_weights()
|
| 1025 |
+
|
| 1026 |
+
if "Canny" in lora_ckpt_id:
|
| 1027 |
+
control_image = load_image(
|
| 1028 |
+
"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/flux-control-lora/canny_condition_image.png"
|
| 1029 |
+
)
|
| 1030 |
+
else:
|
| 1031 |
+
control_image = load_image(
|
| 1032 |
+
"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/flux-control-lora/depth_condition_image.png"
|
| 1033 |
+
)
|
| 1034 |
+
|
| 1035 |
+
image = self.pipeline(
|
| 1036 |
+
prompt=self.prompt,
|
| 1037 |
+
control_image=control_image,
|
| 1038 |
+
height=1024,
|
| 1039 |
+
width=1024,
|
| 1040 |
+
num_inference_steps=self.num_inference_steps,
|
| 1041 |
+
guidance_scale=30.0 if "Canny" in lora_ckpt_id else 10.0,
|
| 1042 |
+
output_type="np",
|
| 1043 |
+
generator=torch.manual_seed(self.seed),
|
| 1044 |
+
).images
|
| 1045 |
+
|
| 1046 |
+
out_slice = image[0, -3:, -3:, -1].flatten()
|
| 1047 |
+
if "Canny" in lora_ckpt_id:
|
| 1048 |
+
expected_slice = np.array([0.6562, 0.7266, 0.7578, 0.6367, 0.6758, 0.7031, 0.6172, 0.6602, 0.6484])
|
| 1049 |
+
else:
|
| 1050 |
+
expected_slice = np.array([0.6680, 0.7344, 0.7656, 0.6484, 0.6875, 0.7109, 0.6328, 0.6719, 0.6562])
|
| 1051 |
+
|
| 1052 |
+
max_diff = numpy_cosine_similarity_distance(expected_slice.flatten(), out_slice)
|
| 1053 |
+
|
| 1054 |
+
assert max_diff < 1e-3
|
exp_code/1_benchmark/diffusers-WanS2V/tests/lora/test_lora_layers_hunyuanvideo.py
ADDED
|
@@ -0,0 +1,264 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2025 HuggingFace Inc.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
import gc
|
| 16 |
+
import sys
|
| 17 |
+
import unittest
|
| 18 |
+
|
| 19 |
+
import numpy as np
|
| 20 |
+
import torch
|
| 21 |
+
from transformers import CLIPTextModel, CLIPTokenizer, LlamaModel, LlamaTokenizerFast
|
| 22 |
+
|
| 23 |
+
from diffusers import (
|
| 24 |
+
AutoencoderKLHunyuanVideo,
|
| 25 |
+
FlowMatchEulerDiscreteScheduler,
|
| 26 |
+
HunyuanVideoPipeline,
|
| 27 |
+
HunyuanVideoTransformer3DModel,
|
| 28 |
+
)
|
| 29 |
+
|
| 30 |
+
from ..testing_utils import (
|
| 31 |
+
Expectations,
|
| 32 |
+
backend_empty_cache,
|
| 33 |
+
floats_tensor,
|
| 34 |
+
nightly,
|
| 35 |
+
numpy_cosine_similarity_distance,
|
| 36 |
+
require_big_accelerator,
|
| 37 |
+
require_peft_backend,
|
| 38 |
+
require_torch_accelerator,
|
| 39 |
+
skip_mps,
|
| 40 |
+
torch_device,
|
| 41 |
+
)
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
sys.path.append(".")
|
| 45 |
+
|
| 46 |
+
from .utils import PeftLoraLoaderMixinTests # noqa: E402
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
@require_peft_backend
|
| 50 |
+
@skip_mps
|
| 51 |
+
class HunyuanVideoLoRATests(unittest.TestCase, PeftLoraLoaderMixinTests):
|
| 52 |
+
pipeline_class = HunyuanVideoPipeline
|
| 53 |
+
scheduler_cls = FlowMatchEulerDiscreteScheduler
|
| 54 |
+
scheduler_classes = [FlowMatchEulerDiscreteScheduler]
|
| 55 |
+
scheduler_kwargs = {}
|
| 56 |
+
|
| 57 |
+
transformer_kwargs = {
|
| 58 |
+
"in_channels": 4,
|
| 59 |
+
"out_channels": 4,
|
| 60 |
+
"num_attention_heads": 2,
|
| 61 |
+
"attention_head_dim": 10,
|
| 62 |
+
"num_layers": 1,
|
| 63 |
+
"num_single_layers": 1,
|
| 64 |
+
"num_refiner_layers": 1,
|
| 65 |
+
"patch_size": 1,
|
| 66 |
+
"patch_size_t": 1,
|
| 67 |
+
"guidance_embeds": True,
|
| 68 |
+
"text_embed_dim": 16,
|
| 69 |
+
"pooled_projection_dim": 8,
|
| 70 |
+
"rope_axes_dim": (2, 4, 4),
|
| 71 |
+
}
|
| 72 |
+
transformer_cls = HunyuanVideoTransformer3DModel
|
| 73 |
+
vae_kwargs = {
|
| 74 |
+
"in_channels": 3,
|
| 75 |
+
"out_channels": 3,
|
| 76 |
+
"latent_channels": 4,
|
| 77 |
+
"down_block_types": (
|
| 78 |
+
"HunyuanVideoDownBlock3D",
|
| 79 |
+
"HunyuanVideoDownBlock3D",
|
| 80 |
+
"HunyuanVideoDownBlock3D",
|
| 81 |
+
"HunyuanVideoDownBlock3D",
|
| 82 |
+
),
|
| 83 |
+
"up_block_types": (
|
| 84 |
+
"HunyuanVideoUpBlock3D",
|
| 85 |
+
"HunyuanVideoUpBlock3D",
|
| 86 |
+
"HunyuanVideoUpBlock3D",
|
| 87 |
+
"HunyuanVideoUpBlock3D",
|
| 88 |
+
),
|
| 89 |
+
"block_out_channels": (8, 8, 8, 8),
|
| 90 |
+
"layers_per_block": 1,
|
| 91 |
+
"act_fn": "silu",
|
| 92 |
+
"norm_num_groups": 4,
|
| 93 |
+
"scaling_factor": 0.476986,
|
| 94 |
+
"spatial_compression_ratio": 8,
|
| 95 |
+
"temporal_compression_ratio": 4,
|
| 96 |
+
"mid_block_add_attention": True,
|
| 97 |
+
}
|
| 98 |
+
vae_cls = AutoencoderKLHunyuanVideo
|
| 99 |
+
has_two_text_encoders = True
|
| 100 |
+
tokenizer_cls, tokenizer_id, tokenizer_subfolder = (
|
| 101 |
+
LlamaTokenizerFast,
|
| 102 |
+
"hf-internal-testing/tiny-random-hunyuanvideo",
|
| 103 |
+
"tokenizer",
|
| 104 |
+
)
|
| 105 |
+
tokenizer_2_cls, tokenizer_2_id, tokenizer_2_subfolder = (
|
| 106 |
+
CLIPTokenizer,
|
| 107 |
+
"hf-internal-testing/tiny-random-hunyuanvideo",
|
| 108 |
+
"tokenizer_2",
|
| 109 |
+
)
|
| 110 |
+
text_encoder_cls, text_encoder_id, text_encoder_subfolder = (
|
| 111 |
+
LlamaModel,
|
| 112 |
+
"hf-internal-testing/tiny-random-hunyuanvideo",
|
| 113 |
+
"text_encoder",
|
| 114 |
+
)
|
| 115 |
+
text_encoder_2_cls, text_encoder_2_id, text_encoder_2_subfolder = (
|
| 116 |
+
CLIPTextModel,
|
| 117 |
+
"hf-internal-testing/tiny-random-hunyuanvideo",
|
| 118 |
+
"text_encoder_2",
|
| 119 |
+
)
|
| 120 |
+
|
| 121 |
+
@property
|
| 122 |
+
def output_shape(self):
|
| 123 |
+
return (1, 9, 32, 32, 3)
|
| 124 |
+
|
| 125 |
+
def get_dummy_inputs(self, with_generator=True):
|
| 126 |
+
batch_size = 1
|
| 127 |
+
sequence_length = 16
|
| 128 |
+
num_channels = 4
|
| 129 |
+
num_frames = 9
|
| 130 |
+
num_latent_frames = 3 # (num_frames - 1) // temporal_compression_ratio + 1
|
| 131 |
+
sizes = (4, 4)
|
| 132 |
+
|
| 133 |
+
generator = torch.manual_seed(0)
|
| 134 |
+
noise = floats_tensor((batch_size, num_latent_frames, num_channels) + sizes)
|
| 135 |
+
input_ids = torch.randint(1, sequence_length, size=(batch_size, sequence_length), generator=generator)
|
| 136 |
+
|
| 137 |
+
pipeline_inputs = {
|
| 138 |
+
"prompt": "",
|
| 139 |
+
"num_frames": num_frames,
|
| 140 |
+
"num_inference_steps": 1,
|
| 141 |
+
"guidance_scale": 6.0,
|
| 142 |
+
"height": 32,
|
| 143 |
+
"width": 32,
|
| 144 |
+
"max_sequence_length": sequence_length,
|
| 145 |
+
"prompt_template": {"template": "{}", "crop_start": 0},
|
| 146 |
+
"output_type": "np",
|
| 147 |
+
}
|
| 148 |
+
if with_generator:
|
| 149 |
+
pipeline_inputs.update({"generator": generator})
|
| 150 |
+
|
| 151 |
+
return noise, input_ids, pipeline_inputs
|
| 152 |
+
|
| 153 |
+
def test_simple_inference_with_text_lora_denoiser_fused_multi(self):
|
| 154 |
+
super().test_simple_inference_with_text_lora_denoiser_fused_multi(expected_atol=9e-3)
|
| 155 |
+
|
| 156 |
+
def test_simple_inference_with_text_denoiser_lora_unfused(self):
|
| 157 |
+
super().test_simple_inference_with_text_denoiser_lora_unfused(expected_atol=9e-3)
|
| 158 |
+
|
| 159 |
+
# TODO(aryan): Fix the following test
|
| 160 |
+
@unittest.skip("This test fails with an error I haven't been able to debug yet.")
|
| 161 |
+
def test_simple_inference_save_pretrained(self):
|
| 162 |
+
pass
|
| 163 |
+
|
| 164 |
+
@unittest.skip("Not supported in HunyuanVideo.")
|
| 165 |
+
def test_simple_inference_with_text_denoiser_block_scale(self):
|
| 166 |
+
pass
|
| 167 |
+
|
| 168 |
+
@unittest.skip("Not supported in HunyuanVideo.")
|
| 169 |
+
def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self):
|
| 170 |
+
pass
|
| 171 |
+
|
| 172 |
+
@unittest.skip("Not supported in HunyuanVideo.")
|
| 173 |
+
def test_modify_padding_mode(self):
|
| 174 |
+
pass
|
| 175 |
+
|
| 176 |
+
@unittest.skip("Text encoder LoRA is not supported in HunyuanVideo.")
|
| 177 |
+
def test_simple_inference_with_partial_text_lora(self):
|
| 178 |
+
pass
|
| 179 |
+
|
| 180 |
+
@unittest.skip("Text encoder LoRA is not supported in HunyuanVideo.")
|
| 181 |
+
def test_simple_inference_with_text_lora(self):
|
| 182 |
+
pass
|
| 183 |
+
|
| 184 |
+
@unittest.skip("Text encoder LoRA is not supported in HunyuanVideo.")
|
| 185 |
+
def test_simple_inference_with_text_lora_and_scale(self):
|
| 186 |
+
pass
|
| 187 |
+
|
| 188 |
+
@unittest.skip("Text encoder LoRA is not supported in HunyuanVideo.")
|
| 189 |
+
def test_simple_inference_with_text_lora_fused(self):
|
| 190 |
+
pass
|
| 191 |
+
|
| 192 |
+
@unittest.skip("Text encoder LoRA is not supported in HunyuanVideo.")
|
| 193 |
+
def test_simple_inference_with_text_lora_save_load(self):
|
| 194 |
+
pass
|
| 195 |
+
|
| 196 |
+
|
| 197 |
+
@nightly
|
| 198 |
+
@require_torch_accelerator
|
| 199 |
+
@require_peft_backend
|
| 200 |
+
@require_big_accelerator
|
| 201 |
+
class HunyuanVideoLoRAIntegrationTests(unittest.TestCase):
|
| 202 |
+
"""internal note: The integration slices were obtained on DGX.
|
| 203 |
+
|
| 204 |
+
torch: 2.5.1+cu124 with CUDA 12.5. Need the same setup for the
|
| 205 |
+
assertions to pass.
|
| 206 |
+
"""
|
| 207 |
+
|
| 208 |
+
num_inference_steps = 10
|
| 209 |
+
seed = 0
|
| 210 |
+
|
| 211 |
+
def setUp(self):
|
| 212 |
+
super().setUp()
|
| 213 |
+
|
| 214 |
+
gc.collect()
|
| 215 |
+
backend_empty_cache(torch_device)
|
| 216 |
+
|
| 217 |
+
model_id = "hunyuanvideo-community/HunyuanVideo"
|
| 218 |
+
transformer = HunyuanVideoTransformer3DModel.from_pretrained(
|
| 219 |
+
model_id, subfolder="transformer", torch_dtype=torch.bfloat16
|
| 220 |
+
)
|
| 221 |
+
self.pipeline = HunyuanVideoPipeline.from_pretrained(
|
| 222 |
+
model_id, transformer=transformer, torch_dtype=torch.float16
|
| 223 |
+
).to(torch_device)
|
| 224 |
+
|
| 225 |
+
def tearDown(self):
|
| 226 |
+
super().tearDown()
|
| 227 |
+
|
| 228 |
+
gc.collect()
|
| 229 |
+
backend_empty_cache(torch_device)
|
| 230 |
+
|
| 231 |
+
def test_original_format_cseti(self):
|
| 232 |
+
self.pipeline.load_lora_weights(
|
| 233 |
+
"Cseti/HunyuanVideo-LoRA-Arcane_Jinx-v1", weight_name="csetiarcane-nfjinx-v1-6000.safetensors"
|
| 234 |
+
)
|
| 235 |
+
self.pipeline.fuse_lora()
|
| 236 |
+
self.pipeline.unload_lora_weights()
|
| 237 |
+
self.pipeline.vae.enable_tiling()
|
| 238 |
+
|
| 239 |
+
prompt = "CSETIARCANE. A cat walks on the grass, realistic"
|
| 240 |
+
|
| 241 |
+
out = self.pipeline(
|
| 242 |
+
prompt=prompt,
|
| 243 |
+
height=320,
|
| 244 |
+
width=512,
|
| 245 |
+
num_frames=9,
|
| 246 |
+
num_inference_steps=self.num_inference_steps,
|
| 247 |
+
output_type="np",
|
| 248 |
+
generator=torch.manual_seed(self.seed),
|
| 249 |
+
).frames[0]
|
| 250 |
+
out = out.flatten()
|
| 251 |
+
out_slice = np.concatenate((out[:8], out[-8:]))
|
| 252 |
+
|
| 253 |
+
# fmt: off
|
| 254 |
+
expected_slices = Expectations(
|
| 255 |
+
{
|
| 256 |
+
("cuda", 7): np.array([0.1013, 0.1924, 0.0078, 0.1021, 0.1929, 0.0078, 0.1023, 0.1919, 0.7402, 0.104, 0.4482, 0.7354, 0.0925, 0.4382, 0.7275, 0.0815]),
|
| 257 |
+
}
|
| 258 |
+
)
|
| 259 |
+
# fmt: on
|
| 260 |
+
expected_slice = expected_slices.get_expectation()
|
| 261 |
+
|
| 262 |
+
max_diff = numpy_cosine_similarity_distance(expected_slice.flatten(), out_slice)
|
| 263 |
+
|
| 264 |
+
assert max_diff < 1e-3
|
exp_code/1_benchmark/diffusers-WanS2V/tests/lora/test_lora_layers_ltx_video.py
ADDED
|
@@ -0,0 +1,148 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2025 HuggingFace Inc.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
import sys
|
| 16 |
+
import unittest
|
| 17 |
+
|
| 18 |
+
import torch
|
| 19 |
+
from transformers import AutoTokenizer, T5EncoderModel
|
| 20 |
+
|
| 21 |
+
from diffusers import (
|
| 22 |
+
AutoencoderKLLTXVideo,
|
| 23 |
+
FlowMatchEulerDiscreteScheduler,
|
| 24 |
+
LTXPipeline,
|
| 25 |
+
LTXVideoTransformer3DModel,
|
| 26 |
+
)
|
| 27 |
+
|
| 28 |
+
from ..testing_utils import floats_tensor, require_peft_backend
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
sys.path.append(".")
|
| 32 |
+
|
| 33 |
+
from .utils import PeftLoraLoaderMixinTests # noqa: E402
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
@require_peft_backend
|
| 37 |
+
class LTXVideoLoRATests(unittest.TestCase, PeftLoraLoaderMixinTests):
|
| 38 |
+
pipeline_class = LTXPipeline
|
| 39 |
+
scheduler_cls = FlowMatchEulerDiscreteScheduler
|
| 40 |
+
scheduler_classes = [FlowMatchEulerDiscreteScheduler]
|
| 41 |
+
scheduler_kwargs = {}
|
| 42 |
+
|
| 43 |
+
transformer_kwargs = {
|
| 44 |
+
"in_channels": 8,
|
| 45 |
+
"out_channels": 8,
|
| 46 |
+
"patch_size": 1,
|
| 47 |
+
"patch_size_t": 1,
|
| 48 |
+
"num_attention_heads": 4,
|
| 49 |
+
"attention_head_dim": 8,
|
| 50 |
+
"cross_attention_dim": 32,
|
| 51 |
+
"num_layers": 1,
|
| 52 |
+
"caption_channels": 32,
|
| 53 |
+
}
|
| 54 |
+
transformer_cls = LTXVideoTransformer3DModel
|
| 55 |
+
vae_kwargs = {
|
| 56 |
+
"in_channels": 3,
|
| 57 |
+
"out_channels": 3,
|
| 58 |
+
"latent_channels": 8,
|
| 59 |
+
"block_out_channels": (8, 8, 8, 8),
|
| 60 |
+
"decoder_block_out_channels": (8, 8, 8, 8),
|
| 61 |
+
"layers_per_block": (1, 1, 1, 1, 1),
|
| 62 |
+
"decoder_layers_per_block": (1, 1, 1, 1, 1),
|
| 63 |
+
"spatio_temporal_scaling": (True, True, False, False),
|
| 64 |
+
"decoder_spatio_temporal_scaling": (True, True, False, False),
|
| 65 |
+
"decoder_inject_noise": (False, False, False, False, False),
|
| 66 |
+
"upsample_residual": (False, False, False, False),
|
| 67 |
+
"upsample_factor": (1, 1, 1, 1),
|
| 68 |
+
"timestep_conditioning": False,
|
| 69 |
+
"patch_size": 1,
|
| 70 |
+
"patch_size_t": 1,
|
| 71 |
+
"encoder_causal": True,
|
| 72 |
+
"decoder_causal": False,
|
| 73 |
+
}
|
| 74 |
+
vae_cls = AutoencoderKLLTXVideo
|
| 75 |
+
tokenizer_cls, tokenizer_id = AutoTokenizer, "hf-internal-testing/tiny-random-t5"
|
| 76 |
+
text_encoder_cls, text_encoder_id = T5EncoderModel, "hf-internal-testing/tiny-random-t5"
|
| 77 |
+
|
| 78 |
+
text_encoder_target_modules = ["q", "k", "v", "o"]
|
| 79 |
+
|
| 80 |
+
@property
|
| 81 |
+
def output_shape(self):
|
| 82 |
+
return (1, 9, 32, 32, 3)
|
| 83 |
+
|
| 84 |
+
def get_dummy_inputs(self, with_generator=True):
|
| 85 |
+
batch_size = 1
|
| 86 |
+
sequence_length = 16
|
| 87 |
+
num_channels = 8
|
| 88 |
+
num_frames = 9
|
| 89 |
+
num_latent_frames = 3 # (num_frames - 1) // temporal_compression_ratio + 1
|
| 90 |
+
latent_height = 8
|
| 91 |
+
latent_width = 8
|
| 92 |
+
|
| 93 |
+
generator = torch.manual_seed(0)
|
| 94 |
+
noise = floats_tensor((batch_size, num_latent_frames, num_channels, latent_height, latent_width))
|
| 95 |
+
input_ids = torch.randint(1, sequence_length, size=(batch_size, sequence_length), generator=generator)
|
| 96 |
+
|
| 97 |
+
pipeline_inputs = {
|
| 98 |
+
"prompt": "dance monkey",
|
| 99 |
+
"num_frames": num_frames,
|
| 100 |
+
"num_inference_steps": 4,
|
| 101 |
+
"guidance_scale": 6.0,
|
| 102 |
+
"height": 32,
|
| 103 |
+
"width": 32,
|
| 104 |
+
"max_sequence_length": sequence_length,
|
| 105 |
+
"output_type": "np",
|
| 106 |
+
}
|
| 107 |
+
if with_generator:
|
| 108 |
+
pipeline_inputs.update({"generator": generator})
|
| 109 |
+
|
| 110 |
+
return noise, input_ids, pipeline_inputs
|
| 111 |
+
|
| 112 |
+
def test_simple_inference_with_text_lora_denoiser_fused_multi(self):
|
| 113 |
+
super().test_simple_inference_with_text_lora_denoiser_fused_multi(expected_atol=9e-3)
|
| 114 |
+
|
| 115 |
+
def test_simple_inference_with_text_denoiser_lora_unfused(self):
|
| 116 |
+
super().test_simple_inference_with_text_denoiser_lora_unfused(expected_atol=9e-3)
|
| 117 |
+
|
| 118 |
+
@unittest.skip("Not supported in LTXVideo.")
|
| 119 |
+
def test_simple_inference_with_text_denoiser_block_scale(self):
|
| 120 |
+
pass
|
| 121 |
+
|
| 122 |
+
@unittest.skip("Not supported in LTXVideo.")
|
| 123 |
+
def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self):
|
| 124 |
+
pass
|
| 125 |
+
|
| 126 |
+
@unittest.skip("Not supported in LTXVideo.")
|
| 127 |
+
def test_modify_padding_mode(self):
|
| 128 |
+
pass
|
| 129 |
+
|
| 130 |
+
@unittest.skip("Text encoder LoRA is not supported in LTXVideo.")
|
| 131 |
+
def test_simple_inference_with_partial_text_lora(self):
|
| 132 |
+
pass
|
| 133 |
+
|
| 134 |
+
@unittest.skip("Text encoder LoRA is not supported in LTXVideo.")
|
| 135 |
+
def test_simple_inference_with_text_lora(self):
|
| 136 |
+
pass
|
| 137 |
+
|
| 138 |
+
@unittest.skip("Text encoder LoRA is not supported in LTXVideo.")
|
| 139 |
+
def test_simple_inference_with_text_lora_and_scale(self):
|
| 140 |
+
pass
|
| 141 |
+
|
| 142 |
+
@unittest.skip("Text encoder LoRA is not supported in LTXVideo.")
|
| 143 |
+
def test_simple_inference_with_text_lora_fused(self):
|
| 144 |
+
pass
|
| 145 |
+
|
| 146 |
+
@unittest.skip("Text encoder LoRA is not supported in LTXVideo.")
|
| 147 |
+
def test_simple_inference_with_text_lora_save_load(self):
|
| 148 |
+
pass
|
exp_code/1_benchmark/diffusers-WanS2V/tests/lora/test_lora_layers_lumina2.py
ADDED
|
@@ -0,0 +1,173 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2025 HuggingFace Inc.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
import sys
|
| 16 |
+
import unittest
|
| 17 |
+
|
| 18 |
+
import numpy as np
|
| 19 |
+
import pytest
|
| 20 |
+
import torch
|
| 21 |
+
from transformers import AutoTokenizer, GemmaForCausalLM
|
| 22 |
+
|
| 23 |
+
from diffusers import (
|
| 24 |
+
AutoencoderKL,
|
| 25 |
+
FlowMatchEulerDiscreteScheduler,
|
| 26 |
+
Lumina2Pipeline,
|
| 27 |
+
Lumina2Transformer2DModel,
|
| 28 |
+
)
|
| 29 |
+
|
| 30 |
+
from ..testing_utils import floats_tensor, is_torch_version, require_peft_backend, skip_mps, torch_device
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
sys.path.append(".")
|
| 34 |
+
|
| 35 |
+
from .utils import PeftLoraLoaderMixinTests, check_if_lora_correctly_set # noqa: E402
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
@require_peft_backend
|
| 39 |
+
class Lumina2LoRATests(unittest.TestCase, PeftLoraLoaderMixinTests):
|
| 40 |
+
pipeline_class = Lumina2Pipeline
|
| 41 |
+
scheduler_cls = FlowMatchEulerDiscreteScheduler
|
| 42 |
+
scheduler_classes = [FlowMatchEulerDiscreteScheduler]
|
| 43 |
+
scheduler_kwargs = {}
|
| 44 |
+
|
| 45 |
+
transformer_kwargs = {
|
| 46 |
+
"sample_size": 4,
|
| 47 |
+
"patch_size": 2,
|
| 48 |
+
"in_channels": 4,
|
| 49 |
+
"hidden_size": 8,
|
| 50 |
+
"num_layers": 2,
|
| 51 |
+
"num_attention_heads": 1,
|
| 52 |
+
"num_kv_heads": 1,
|
| 53 |
+
"multiple_of": 16,
|
| 54 |
+
"ffn_dim_multiplier": None,
|
| 55 |
+
"norm_eps": 1e-5,
|
| 56 |
+
"scaling_factor": 1.0,
|
| 57 |
+
"axes_dim_rope": [4, 2, 2],
|
| 58 |
+
"cap_feat_dim": 8,
|
| 59 |
+
}
|
| 60 |
+
transformer_cls = Lumina2Transformer2DModel
|
| 61 |
+
vae_kwargs = {
|
| 62 |
+
"sample_size": 32,
|
| 63 |
+
"in_channels": 3,
|
| 64 |
+
"out_channels": 3,
|
| 65 |
+
"block_out_channels": (4,),
|
| 66 |
+
"layers_per_block": 1,
|
| 67 |
+
"latent_channels": 4,
|
| 68 |
+
"norm_num_groups": 1,
|
| 69 |
+
"use_quant_conv": False,
|
| 70 |
+
"use_post_quant_conv": False,
|
| 71 |
+
"shift_factor": 0.0609,
|
| 72 |
+
"scaling_factor": 1.5035,
|
| 73 |
+
}
|
| 74 |
+
vae_cls = AutoencoderKL
|
| 75 |
+
tokenizer_cls, tokenizer_id = AutoTokenizer, "hf-internal-testing/dummy-gemma"
|
| 76 |
+
text_encoder_cls, text_encoder_id = GemmaForCausalLM, "hf-internal-testing/dummy-gemma-diffusers"
|
| 77 |
+
|
| 78 |
+
@property
|
| 79 |
+
def output_shape(self):
|
| 80 |
+
return (1, 4, 4, 3)
|
| 81 |
+
|
| 82 |
+
def get_dummy_inputs(self, with_generator=True):
|
| 83 |
+
batch_size = 1
|
| 84 |
+
sequence_length = 16
|
| 85 |
+
num_channels = 4
|
| 86 |
+
sizes = (32, 32)
|
| 87 |
+
|
| 88 |
+
generator = torch.manual_seed(0)
|
| 89 |
+
noise = floats_tensor((batch_size, num_channels) + sizes)
|
| 90 |
+
input_ids = torch.randint(1, sequence_length, size=(batch_size, sequence_length), generator=generator)
|
| 91 |
+
|
| 92 |
+
pipeline_inputs = {
|
| 93 |
+
"prompt": "A painting of a squirrel eating a burger",
|
| 94 |
+
"num_inference_steps": 2,
|
| 95 |
+
"guidance_scale": 5.0,
|
| 96 |
+
"height": 32,
|
| 97 |
+
"width": 32,
|
| 98 |
+
"output_type": "np",
|
| 99 |
+
}
|
| 100 |
+
if with_generator:
|
| 101 |
+
pipeline_inputs.update({"generator": generator})
|
| 102 |
+
|
| 103 |
+
return noise, input_ids, pipeline_inputs
|
| 104 |
+
|
| 105 |
+
@unittest.skip("Not supported in Lumina2.")
|
| 106 |
+
def test_simple_inference_with_text_denoiser_block_scale(self):
|
| 107 |
+
pass
|
| 108 |
+
|
| 109 |
+
@unittest.skip("Not supported in Lumina2.")
|
| 110 |
+
def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self):
|
| 111 |
+
pass
|
| 112 |
+
|
| 113 |
+
@unittest.skip("Not supported in Lumina2.")
|
| 114 |
+
def test_modify_padding_mode(self):
|
| 115 |
+
pass
|
| 116 |
+
|
| 117 |
+
@unittest.skip("Text encoder LoRA is not supported in Lumina2.")
|
| 118 |
+
def test_simple_inference_with_partial_text_lora(self):
|
| 119 |
+
pass
|
| 120 |
+
|
| 121 |
+
@unittest.skip("Text encoder LoRA is not supported in Lumina2.")
|
| 122 |
+
def test_simple_inference_with_text_lora(self):
|
| 123 |
+
pass
|
| 124 |
+
|
| 125 |
+
@unittest.skip("Text encoder LoRA is not supported in Lumina2.")
|
| 126 |
+
def test_simple_inference_with_text_lora_and_scale(self):
|
| 127 |
+
pass
|
| 128 |
+
|
| 129 |
+
@unittest.skip("Text encoder LoRA is not supported in Lumina2.")
|
| 130 |
+
def test_simple_inference_with_text_lora_fused(self):
|
| 131 |
+
pass
|
| 132 |
+
|
| 133 |
+
@unittest.skip("Text encoder LoRA is not supported in Lumina2.")
|
| 134 |
+
def test_simple_inference_with_text_lora_save_load(self):
|
| 135 |
+
pass
|
| 136 |
+
|
| 137 |
+
@skip_mps
|
| 138 |
+
@pytest.mark.xfail(
|
| 139 |
+
condition=torch.device(torch_device).type == "cpu" and is_torch_version(">=", "2.5"),
|
| 140 |
+
reason="Test currently fails on CPU and PyTorch 2.5.1 but not on PyTorch 2.4.1.",
|
| 141 |
+
strict=False,
|
| 142 |
+
)
|
| 143 |
+
def test_lora_fuse_nan(self):
|
| 144 |
+
for scheduler_cls in self.scheduler_classes:
|
| 145 |
+
components, text_lora_config, denoiser_lora_config = self.get_dummy_components(scheduler_cls)
|
| 146 |
+
pipe = self.pipeline_class(**components)
|
| 147 |
+
pipe = pipe.to(torch_device)
|
| 148 |
+
pipe.set_progress_bar_config(disable=None)
|
| 149 |
+
_, _, inputs = self.get_dummy_inputs(with_generator=False)
|
| 150 |
+
|
| 151 |
+
if "text_encoder" in self.pipeline_class._lora_loadable_modules:
|
| 152 |
+
pipe.text_encoder.add_adapter(text_lora_config, "adapter-1")
|
| 153 |
+
self.assertTrue(
|
| 154 |
+
check_if_lora_correctly_set(pipe.text_encoder), "Lora not correctly set in text encoder"
|
| 155 |
+
)
|
| 156 |
+
|
| 157 |
+
denoiser = pipe.transformer if self.unet_kwargs is None else pipe.unet
|
| 158 |
+
denoiser.add_adapter(denoiser_lora_config, "adapter-1")
|
| 159 |
+
self.assertTrue(check_if_lora_correctly_set(denoiser), "Lora not correctly set in denoiser.")
|
| 160 |
+
|
| 161 |
+
# corrupt one LoRA weight with `inf` values
|
| 162 |
+
with torch.no_grad():
|
| 163 |
+
pipe.transformer.layers[0].attn.to_q.lora_A["adapter-1"].weight += float("inf")
|
| 164 |
+
|
| 165 |
+
# with `safe_fusing=True` we should see an Error
|
| 166 |
+
with self.assertRaises(ValueError):
|
| 167 |
+
pipe.fuse_lora(components=self.pipeline_class._lora_loadable_modules, safe_fusing=True)
|
| 168 |
+
|
| 169 |
+
# without we should not see an error, but every image will be black
|
| 170 |
+
pipe.fuse_lora(components=self.pipeline_class._lora_loadable_modules, safe_fusing=False)
|
| 171 |
+
out = pipe(**inputs)[0]
|
| 172 |
+
|
| 173 |
+
self.assertTrue(np.isnan(out).all())
|
exp_code/1_benchmark/diffusers-WanS2V/tests/lora/test_lora_layers_mochi.py
ADDED
|
@@ -0,0 +1,143 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2025 HuggingFace Inc.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
import sys
|
| 16 |
+
import unittest
|
| 17 |
+
|
| 18 |
+
import torch
|
| 19 |
+
from transformers import AutoTokenizer, T5EncoderModel
|
| 20 |
+
|
| 21 |
+
from diffusers import AutoencoderKLMochi, FlowMatchEulerDiscreteScheduler, MochiPipeline, MochiTransformer3DModel
|
| 22 |
+
|
| 23 |
+
from ..testing_utils import (
|
| 24 |
+
floats_tensor,
|
| 25 |
+
require_peft_backend,
|
| 26 |
+
skip_mps,
|
| 27 |
+
)
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
sys.path.append(".")
|
| 31 |
+
|
| 32 |
+
from .utils import PeftLoraLoaderMixinTests # noqa: E402
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
@require_peft_backend
|
| 36 |
+
@skip_mps
|
| 37 |
+
class MochiLoRATests(unittest.TestCase, PeftLoraLoaderMixinTests):
|
| 38 |
+
pipeline_class = MochiPipeline
|
| 39 |
+
scheduler_cls = FlowMatchEulerDiscreteScheduler
|
| 40 |
+
scheduler_classes = [FlowMatchEulerDiscreteScheduler]
|
| 41 |
+
scheduler_kwargs = {}
|
| 42 |
+
|
| 43 |
+
transformer_kwargs = {
|
| 44 |
+
"patch_size": 2,
|
| 45 |
+
"num_attention_heads": 2,
|
| 46 |
+
"attention_head_dim": 8,
|
| 47 |
+
"num_layers": 2,
|
| 48 |
+
"pooled_projection_dim": 16,
|
| 49 |
+
"in_channels": 12,
|
| 50 |
+
"out_channels": None,
|
| 51 |
+
"qk_norm": "rms_norm",
|
| 52 |
+
"text_embed_dim": 32,
|
| 53 |
+
"time_embed_dim": 4,
|
| 54 |
+
"activation_fn": "swiglu",
|
| 55 |
+
"max_sequence_length": 16,
|
| 56 |
+
}
|
| 57 |
+
transformer_cls = MochiTransformer3DModel
|
| 58 |
+
vae_kwargs = {
|
| 59 |
+
"latent_channels": 12,
|
| 60 |
+
"out_channels": 3,
|
| 61 |
+
"encoder_block_out_channels": (32, 32, 32, 32),
|
| 62 |
+
"decoder_block_out_channels": (32, 32, 32, 32),
|
| 63 |
+
"layers_per_block": (1, 1, 1, 1, 1),
|
| 64 |
+
}
|
| 65 |
+
vae_cls = AutoencoderKLMochi
|
| 66 |
+
tokenizer_cls, tokenizer_id = AutoTokenizer, "hf-internal-testing/tiny-random-t5"
|
| 67 |
+
text_encoder_cls, text_encoder_id = T5EncoderModel, "hf-internal-testing/tiny-random-t5"
|
| 68 |
+
|
| 69 |
+
text_encoder_target_modules = ["q", "k", "v", "o"]
|
| 70 |
+
|
| 71 |
+
@property
|
| 72 |
+
def output_shape(self):
|
| 73 |
+
return (1, 7, 16, 16, 3)
|
| 74 |
+
|
| 75 |
+
def get_dummy_inputs(self, with_generator=True):
|
| 76 |
+
batch_size = 1
|
| 77 |
+
sequence_length = 16
|
| 78 |
+
num_channels = 4
|
| 79 |
+
num_frames = 7
|
| 80 |
+
num_latent_frames = 3
|
| 81 |
+
sizes = (2, 2)
|
| 82 |
+
|
| 83 |
+
generator = torch.manual_seed(0)
|
| 84 |
+
noise = floats_tensor((batch_size, num_latent_frames, num_channels) + sizes)
|
| 85 |
+
input_ids = torch.randint(1, sequence_length, size=(batch_size, sequence_length), generator=generator)
|
| 86 |
+
|
| 87 |
+
pipeline_inputs = {
|
| 88 |
+
"prompt": "dance monkey",
|
| 89 |
+
"num_frames": num_frames,
|
| 90 |
+
"num_inference_steps": 4,
|
| 91 |
+
"guidance_scale": 6.0,
|
| 92 |
+
# Cannot reduce because convolution kernel becomes bigger than sample
|
| 93 |
+
"height": 16,
|
| 94 |
+
"width": 16,
|
| 95 |
+
"max_sequence_length": sequence_length,
|
| 96 |
+
"output_type": "np",
|
| 97 |
+
}
|
| 98 |
+
if with_generator:
|
| 99 |
+
pipeline_inputs.update({"generator": generator})
|
| 100 |
+
|
| 101 |
+
return noise, input_ids, pipeline_inputs
|
| 102 |
+
|
| 103 |
+
def test_simple_inference_with_text_lora_denoiser_fused_multi(self):
|
| 104 |
+
super().test_simple_inference_with_text_lora_denoiser_fused_multi(expected_atol=9e-3)
|
| 105 |
+
|
| 106 |
+
def test_simple_inference_with_text_denoiser_lora_unfused(self):
|
| 107 |
+
super().test_simple_inference_with_text_denoiser_lora_unfused(expected_atol=9e-3)
|
| 108 |
+
|
| 109 |
+
@unittest.skip("Not supported in Mochi.")
|
| 110 |
+
def test_simple_inference_with_text_denoiser_block_scale(self):
|
| 111 |
+
pass
|
| 112 |
+
|
| 113 |
+
@unittest.skip("Not supported in Mochi.")
|
| 114 |
+
def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self):
|
| 115 |
+
pass
|
| 116 |
+
|
| 117 |
+
@unittest.skip("Not supported in Mochi.")
|
| 118 |
+
def test_modify_padding_mode(self):
|
| 119 |
+
pass
|
| 120 |
+
|
| 121 |
+
@unittest.skip("Text encoder LoRA is not supported in Mochi.")
|
| 122 |
+
def test_simple_inference_with_partial_text_lora(self):
|
| 123 |
+
pass
|
| 124 |
+
|
| 125 |
+
@unittest.skip("Text encoder LoRA is not supported in Mochi.")
|
| 126 |
+
def test_simple_inference_with_text_lora(self):
|
| 127 |
+
pass
|
| 128 |
+
|
| 129 |
+
@unittest.skip("Text encoder LoRA is not supported in Mochi.")
|
| 130 |
+
def test_simple_inference_with_text_lora_and_scale(self):
|
| 131 |
+
pass
|
| 132 |
+
|
| 133 |
+
@unittest.skip("Text encoder LoRA is not supported in Mochi.")
|
| 134 |
+
def test_simple_inference_with_text_lora_fused(self):
|
| 135 |
+
pass
|
| 136 |
+
|
| 137 |
+
@unittest.skip("Text encoder LoRA is not supported in Mochi.")
|
| 138 |
+
def test_simple_inference_with_text_lora_save_load(self):
|
| 139 |
+
pass
|
| 140 |
+
|
| 141 |
+
@unittest.skip("Not supported in CogVideoX.")
|
| 142 |
+
def test_simple_inference_with_text_denoiser_multi_adapter_block_lora(self):
|
| 143 |
+
pass
|
exp_code/1_benchmark/diffusers-WanS2V/tests/lora/test_lora_layers_qwenimage.py
ADDED
|
@@ -0,0 +1,130 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2025 HuggingFace Inc.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
import sys
|
| 16 |
+
import unittest
|
| 17 |
+
|
| 18 |
+
import torch
|
| 19 |
+
from transformers import Qwen2_5_VLForConditionalGeneration, Qwen2Tokenizer
|
| 20 |
+
|
| 21 |
+
from diffusers import (
|
| 22 |
+
AutoencoderKLQwenImage,
|
| 23 |
+
FlowMatchEulerDiscreteScheduler,
|
| 24 |
+
QwenImagePipeline,
|
| 25 |
+
QwenImageTransformer2DModel,
|
| 26 |
+
)
|
| 27 |
+
|
| 28 |
+
from ..testing_utils import floats_tensor, require_peft_backend
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
sys.path.append(".")
|
| 32 |
+
|
| 33 |
+
from .utils import PeftLoraLoaderMixinTests # noqa: E402
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
@require_peft_backend
|
| 37 |
+
class QwenImageLoRATests(unittest.TestCase, PeftLoraLoaderMixinTests):
|
| 38 |
+
pipeline_class = QwenImagePipeline
|
| 39 |
+
scheduler_cls = FlowMatchEulerDiscreteScheduler
|
| 40 |
+
scheduler_classes = [FlowMatchEulerDiscreteScheduler]
|
| 41 |
+
scheduler_kwargs = {}
|
| 42 |
+
|
| 43 |
+
transformer_kwargs = {
|
| 44 |
+
"patch_size": 2,
|
| 45 |
+
"in_channels": 16,
|
| 46 |
+
"out_channels": 4,
|
| 47 |
+
"num_layers": 2,
|
| 48 |
+
"attention_head_dim": 16,
|
| 49 |
+
"num_attention_heads": 3,
|
| 50 |
+
"joint_attention_dim": 16,
|
| 51 |
+
"guidance_embeds": False,
|
| 52 |
+
"axes_dims_rope": (8, 4, 4),
|
| 53 |
+
}
|
| 54 |
+
transformer_cls = QwenImageTransformer2DModel
|
| 55 |
+
z_dim = 4
|
| 56 |
+
vae_kwargs = {
|
| 57 |
+
"base_dim": z_dim * 6,
|
| 58 |
+
"z_dim": z_dim,
|
| 59 |
+
"dim_mult": [1, 2, 4],
|
| 60 |
+
"num_res_blocks": 1,
|
| 61 |
+
"temperal_downsample": [False, True],
|
| 62 |
+
"latents_mean": [0.0] * 4,
|
| 63 |
+
"latents_std": [1.0] * 4,
|
| 64 |
+
}
|
| 65 |
+
vae_cls = AutoencoderKLQwenImage
|
| 66 |
+
tokenizer_cls, tokenizer_id = Qwen2Tokenizer, "hf-internal-testing/tiny-random-Qwen25VLForCondGen"
|
| 67 |
+
text_encoder_cls, text_encoder_id = (
|
| 68 |
+
Qwen2_5_VLForConditionalGeneration,
|
| 69 |
+
"hf-internal-testing/tiny-random-Qwen25VLForCondGen",
|
| 70 |
+
)
|
| 71 |
+
denoiser_target_modules = ["to_q", "to_k", "to_v", "to_out.0"]
|
| 72 |
+
|
| 73 |
+
@property
|
| 74 |
+
def output_shape(self):
|
| 75 |
+
return (1, 8, 8, 3)
|
| 76 |
+
|
| 77 |
+
def get_dummy_inputs(self, with_generator=True):
|
| 78 |
+
batch_size = 1
|
| 79 |
+
sequence_length = 10
|
| 80 |
+
num_channels = 4
|
| 81 |
+
sizes = (32, 32)
|
| 82 |
+
|
| 83 |
+
generator = torch.manual_seed(0)
|
| 84 |
+
noise = floats_tensor((batch_size, num_channels) + sizes)
|
| 85 |
+
input_ids = torch.randint(1, sequence_length, size=(batch_size, sequence_length), generator=generator)
|
| 86 |
+
|
| 87 |
+
pipeline_inputs = {
|
| 88 |
+
"prompt": "A painting of a squirrel eating a burger",
|
| 89 |
+
"num_inference_steps": 4,
|
| 90 |
+
"guidance_scale": 0.0,
|
| 91 |
+
"height": 8,
|
| 92 |
+
"width": 8,
|
| 93 |
+
"output_type": "np",
|
| 94 |
+
}
|
| 95 |
+
if with_generator:
|
| 96 |
+
pipeline_inputs.update({"generator": generator})
|
| 97 |
+
|
| 98 |
+
return noise, input_ids, pipeline_inputs
|
| 99 |
+
|
| 100 |
+
@unittest.skip("Not supported in Qwen Image.")
|
| 101 |
+
def test_simple_inference_with_text_denoiser_block_scale(self):
|
| 102 |
+
pass
|
| 103 |
+
|
| 104 |
+
@unittest.skip("Not supported in Qwen Image.")
|
| 105 |
+
def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self):
|
| 106 |
+
pass
|
| 107 |
+
|
| 108 |
+
@unittest.skip("Not supported in Qwen Image.")
|
| 109 |
+
def test_modify_padding_mode(self):
|
| 110 |
+
pass
|
| 111 |
+
|
| 112 |
+
@unittest.skip("Text encoder LoRA is not supported in Qwen Image.")
|
| 113 |
+
def test_simple_inference_with_partial_text_lora(self):
|
| 114 |
+
pass
|
| 115 |
+
|
| 116 |
+
@unittest.skip("Text encoder LoRA is not supported in Qwen Image.")
|
| 117 |
+
def test_simple_inference_with_text_lora(self):
|
| 118 |
+
pass
|
| 119 |
+
|
| 120 |
+
@unittest.skip("Text encoder LoRA is not supported in Qwen Image.")
|
| 121 |
+
def test_simple_inference_with_text_lora_and_scale(self):
|
| 122 |
+
pass
|
| 123 |
+
|
| 124 |
+
@unittest.skip("Text encoder LoRA is not supported in Qwen Image.")
|
| 125 |
+
def test_simple_inference_with_text_lora_fused(self):
|
| 126 |
+
pass
|
| 127 |
+
|
| 128 |
+
@unittest.skip("Text encoder LoRA is not supported in Qwen Image.")
|
| 129 |
+
def test_simple_inference_with_text_lora_save_load(self):
|
| 130 |
+
pass
|
exp_code/1_benchmark/diffusers-WanS2V/tests/lora/test_lora_layers_sana.py
ADDED
|
@@ -0,0 +1,139 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2025 HuggingFace Inc.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
import sys
|
| 16 |
+
import unittest
|
| 17 |
+
|
| 18 |
+
import torch
|
| 19 |
+
from transformers import Gemma2Model, GemmaTokenizer
|
| 20 |
+
|
| 21 |
+
from diffusers import AutoencoderDC, FlowMatchEulerDiscreteScheduler, SanaPipeline, SanaTransformer2DModel
|
| 22 |
+
|
| 23 |
+
from ..testing_utils import floats_tensor, require_peft_backend
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
sys.path.append(".")
|
| 27 |
+
|
| 28 |
+
from .utils import PeftLoraLoaderMixinTests # noqa: E402
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
@require_peft_backend
|
| 32 |
+
class SanaLoRATests(unittest.TestCase, PeftLoraLoaderMixinTests):
|
| 33 |
+
pipeline_class = SanaPipeline
|
| 34 |
+
scheduler_cls = FlowMatchEulerDiscreteScheduler(shift=7.0)
|
| 35 |
+
scheduler_kwargs = {}
|
| 36 |
+
scheduler_classes = [FlowMatchEulerDiscreteScheduler]
|
| 37 |
+
transformer_kwargs = {
|
| 38 |
+
"patch_size": 1,
|
| 39 |
+
"in_channels": 4,
|
| 40 |
+
"out_channels": 4,
|
| 41 |
+
"num_layers": 1,
|
| 42 |
+
"num_attention_heads": 2,
|
| 43 |
+
"attention_head_dim": 4,
|
| 44 |
+
"num_cross_attention_heads": 2,
|
| 45 |
+
"cross_attention_head_dim": 4,
|
| 46 |
+
"cross_attention_dim": 8,
|
| 47 |
+
"caption_channels": 8,
|
| 48 |
+
"sample_size": 32,
|
| 49 |
+
}
|
| 50 |
+
transformer_cls = SanaTransformer2DModel
|
| 51 |
+
vae_kwargs = {
|
| 52 |
+
"in_channels": 3,
|
| 53 |
+
"latent_channels": 4,
|
| 54 |
+
"attention_head_dim": 2,
|
| 55 |
+
"encoder_block_types": (
|
| 56 |
+
"ResBlock",
|
| 57 |
+
"EfficientViTBlock",
|
| 58 |
+
),
|
| 59 |
+
"decoder_block_types": (
|
| 60 |
+
"ResBlock",
|
| 61 |
+
"EfficientViTBlock",
|
| 62 |
+
),
|
| 63 |
+
"encoder_block_out_channels": (8, 8),
|
| 64 |
+
"decoder_block_out_channels": (8, 8),
|
| 65 |
+
"encoder_qkv_multiscales": ((), (5,)),
|
| 66 |
+
"decoder_qkv_multiscales": ((), (5,)),
|
| 67 |
+
"encoder_layers_per_block": (1, 1),
|
| 68 |
+
"decoder_layers_per_block": [1, 1],
|
| 69 |
+
"downsample_block_type": "conv",
|
| 70 |
+
"upsample_block_type": "interpolate",
|
| 71 |
+
"decoder_norm_types": "rms_norm",
|
| 72 |
+
"decoder_act_fns": "silu",
|
| 73 |
+
"scaling_factor": 0.41407,
|
| 74 |
+
}
|
| 75 |
+
vae_cls = AutoencoderDC
|
| 76 |
+
tokenizer_cls, tokenizer_id = GemmaTokenizer, "hf-internal-testing/dummy-gemma"
|
| 77 |
+
text_encoder_cls, text_encoder_id = Gemma2Model, "hf-internal-testing/dummy-gemma-for-diffusers"
|
| 78 |
+
|
| 79 |
+
@property
|
| 80 |
+
def output_shape(self):
|
| 81 |
+
return (1, 32, 32, 3)
|
| 82 |
+
|
| 83 |
+
def get_dummy_inputs(self, with_generator=True):
|
| 84 |
+
batch_size = 1
|
| 85 |
+
sequence_length = 16
|
| 86 |
+
num_channels = 4
|
| 87 |
+
sizes = (32, 32)
|
| 88 |
+
|
| 89 |
+
generator = torch.manual_seed(0)
|
| 90 |
+
noise = floats_tensor((batch_size, num_channels) + sizes)
|
| 91 |
+
input_ids = torch.randint(1, sequence_length, size=(batch_size, sequence_length), generator=generator)
|
| 92 |
+
|
| 93 |
+
pipeline_inputs = {
|
| 94 |
+
"prompt": "",
|
| 95 |
+
"negative_prompt": "",
|
| 96 |
+
"num_inference_steps": 4,
|
| 97 |
+
"guidance_scale": 4.5,
|
| 98 |
+
"height": 32,
|
| 99 |
+
"width": 32,
|
| 100 |
+
"max_sequence_length": sequence_length,
|
| 101 |
+
"output_type": "np",
|
| 102 |
+
"complex_human_instruction": None,
|
| 103 |
+
}
|
| 104 |
+
if with_generator:
|
| 105 |
+
pipeline_inputs.update({"generator": generator})
|
| 106 |
+
|
| 107 |
+
return noise, input_ids, pipeline_inputs
|
| 108 |
+
|
| 109 |
+
@unittest.skip("Not supported in SANA.")
|
| 110 |
+
def test_modify_padding_mode(self):
|
| 111 |
+
pass
|
| 112 |
+
|
| 113 |
+
@unittest.skip("Not supported in SANA.")
|
| 114 |
+
def test_simple_inference_with_text_denoiser_block_scale(self):
|
| 115 |
+
pass
|
| 116 |
+
|
| 117 |
+
@unittest.skip("Not supported in SANA.")
|
| 118 |
+
def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self):
|
| 119 |
+
pass
|
| 120 |
+
|
| 121 |
+
@unittest.skip("Text encoder LoRA is not supported in SANA.")
|
| 122 |
+
def test_simple_inference_with_partial_text_lora(self):
|
| 123 |
+
pass
|
| 124 |
+
|
| 125 |
+
@unittest.skip("Text encoder LoRA is not supported in SANA.")
|
| 126 |
+
def test_simple_inference_with_text_lora(self):
|
| 127 |
+
pass
|
| 128 |
+
|
| 129 |
+
@unittest.skip("Text encoder LoRA is not supported in SANA.")
|
| 130 |
+
def test_simple_inference_with_text_lora_and_scale(self):
|
| 131 |
+
pass
|
| 132 |
+
|
| 133 |
+
@unittest.skip("Text encoder LoRA is not supported in SANA.")
|
| 134 |
+
def test_simple_inference_with_text_lora_fused(self):
|
| 135 |
+
pass
|
| 136 |
+
|
| 137 |
+
@unittest.skip("Text encoder LoRA is not supported in SANA.")
|
| 138 |
+
def test_simple_inference_with_text_lora_save_load(self):
|
| 139 |
+
pass
|
exp_code/1_benchmark/diffusers-WanS2V/tests/lora/test_lora_layers_sd.py
ADDED
|
@@ -0,0 +1,769 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2025 HuggingFace Inc.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
import gc
|
| 16 |
+
import sys
|
| 17 |
+
import unittest
|
| 18 |
+
|
| 19 |
+
import numpy as np
|
| 20 |
+
import torch
|
| 21 |
+
import torch.nn as nn
|
| 22 |
+
from huggingface_hub import hf_hub_download
|
| 23 |
+
from safetensors.torch import load_file
|
| 24 |
+
from transformers import CLIPTextModel, CLIPTokenizer
|
| 25 |
+
|
| 26 |
+
from diffusers import (
|
| 27 |
+
AutoPipelineForImage2Image,
|
| 28 |
+
AutoPipelineForText2Image,
|
| 29 |
+
DDIMScheduler,
|
| 30 |
+
DiffusionPipeline,
|
| 31 |
+
LCMScheduler,
|
| 32 |
+
StableDiffusionPipeline,
|
| 33 |
+
)
|
| 34 |
+
from diffusers.utils.import_utils import is_accelerate_available
|
| 35 |
+
|
| 36 |
+
from ..testing_utils import (
|
| 37 |
+
Expectations,
|
| 38 |
+
backend_empty_cache,
|
| 39 |
+
load_image,
|
| 40 |
+
nightly,
|
| 41 |
+
numpy_cosine_similarity_distance,
|
| 42 |
+
require_peft_backend,
|
| 43 |
+
require_torch_accelerator,
|
| 44 |
+
slow,
|
| 45 |
+
torch_device,
|
| 46 |
+
)
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
sys.path.append(".")
|
| 50 |
+
|
| 51 |
+
from .utils import PeftLoraLoaderMixinTests, check_if_lora_correctly_set # noqa: E402
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
if is_accelerate_available():
|
| 55 |
+
from accelerate.utils import release_memory
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
class StableDiffusionLoRATests(PeftLoraLoaderMixinTests, unittest.TestCase):
|
| 59 |
+
pipeline_class = StableDiffusionPipeline
|
| 60 |
+
scheduler_cls = DDIMScheduler
|
| 61 |
+
scheduler_kwargs = {
|
| 62 |
+
"beta_start": 0.00085,
|
| 63 |
+
"beta_end": 0.012,
|
| 64 |
+
"beta_schedule": "scaled_linear",
|
| 65 |
+
"clip_sample": False,
|
| 66 |
+
"set_alpha_to_one": False,
|
| 67 |
+
"steps_offset": 1,
|
| 68 |
+
}
|
| 69 |
+
unet_kwargs = {
|
| 70 |
+
"block_out_channels": (32, 64),
|
| 71 |
+
"layers_per_block": 2,
|
| 72 |
+
"sample_size": 32,
|
| 73 |
+
"in_channels": 4,
|
| 74 |
+
"out_channels": 4,
|
| 75 |
+
"down_block_types": ("DownBlock2D", "CrossAttnDownBlock2D"),
|
| 76 |
+
"up_block_types": ("CrossAttnUpBlock2D", "UpBlock2D"),
|
| 77 |
+
"cross_attention_dim": 32,
|
| 78 |
+
}
|
| 79 |
+
vae_kwargs = {
|
| 80 |
+
"block_out_channels": [32, 64],
|
| 81 |
+
"in_channels": 3,
|
| 82 |
+
"out_channels": 3,
|
| 83 |
+
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
|
| 84 |
+
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
|
| 85 |
+
"latent_channels": 4,
|
| 86 |
+
}
|
| 87 |
+
text_encoder_cls, text_encoder_id = CLIPTextModel, "peft-internal-testing/tiny-clip-text-2"
|
| 88 |
+
tokenizer_cls, tokenizer_id = CLIPTokenizer, "peft-internal-testing/tiny-clip-text-2"
|
| 89 |
+
|
| 90 |
+
@property
|
| 91 |
+
def output_shape(self):
|
| 92 |
+
return (1, 64, 64, 3)
|
| 93 |
+
|
| 94 |
+
def setUp(self):
|
| 95 |
+
super().setUp()
|
| 96 |
+
gc.collect()
|
| 97 |
+
backend_empty_cache(torch_device)
|
| 98 |
+
|
| 99 |
+
def tearDown(self):
|
| 100 |
+
super().tearDown()
|
| 101 |
+
gc.collect()
|
| 102 |
+
backend_empty_cache(torch_device)
|
| 103 |
+
|
| 104 |
+
# Keeping this test here makes sense because it doesn't look any integration
|
| 105 |
+
# (value assertions on logits).
|
| 106 |
+
@slow
|
| 107 |
+
@require_torch_accelerator
|
| 108 |
+
def test_integration_move_lora_cpu(self):
|
| 109 |
+
path = "stable-diffusion-v1-5/stable-diffusion-v1-5"
|
| 110 |
+
lora_id = "takuma104/lora-test-text-encoder-lora-target"
|
| 111 |
+
|
| 112 |
+
pipe = StableDiffusionPipeline.from_pretrained(path, torch_dtype=torch.float16)
|
| 113 |
+
pipe.load_lora_weights(lora_id, adapter_name="adapter-1")
|
| 114 |
+
pipe.load_lora_weights(lora_id, adapter_name="adapter-2")
|
| 115 |
+
pipe = pipe.to(torch_device)
|
| 116 |
+
|
| 117 |
+
self.assertTrue(
|
| 118 |
+
check_if_lora_correctly_set(pipe.text_encoder),
|
| 119 |
+
"Lora not correctly set in text encoder",
|
| 120 |
+
)
|
| 121 |
+
|
| 122 |
+
self.assertTrue(
|
| 123 |
+
check_if_lora_correctly_set(pipe.unet),
|
| 124 |
+
"Lora not correctly set in unet",
|
| 125 |
+
)
|
| 126 |
+
|
| 127 |
+
# We will offload the first adapter in CPU and check if the offloading
|
| 128 |
+
# has been performed correctly
|
| 129 |
+
pipe.set_lora_device(["adapter-1"], "cpu")
|
| 130 |
+
|
| 131 |
+
for name, module in pipe.unet.named_modules():
|
| 132 |
+
if "adapter-1" in name and not isinstance(module, (nn.Dropout, nn.Identity)):
|
| 133 |
+
self.assertTrue(module.weight.device == torch.device("cpu"))
|
| 134 |
+
elif "adapter-2" in name and not isinstance(module, (nn.Dropout, nn.Identity)):
|
| 135 |
+
self.assertTrue(module.weight.device != torch.device("cpu"))
|
| 136 |
+
|
| 137 |
+
for name, module in pipe.text_encoder.named_modules():
|
| 138 |
+
if "adapter-1" in name and not isinstance(module, (nn.Dropout, nn.Identity)):
|
| 139 |
+
self.assertTrue(module.weight.device == torch.device("cpu"))
|
| 140 |
+
elif "adapter-2" in name and not isinstance(module, (nn.Dropout, nn.Identity)):
|
| 141 |
+
self.assertTrue(module.weight.device != torch.device("cpu"))
|
| 142 |
+
|
| 143 |
+
pipe.set_lora_device(["adapter-1"], 0)
|
| 144 |
+
|
| 145 |
+
for n, m in pipe.unet.named_modules():
|
| 146 |
+
if "adapter-1" in n and not isinstance(m, (nn.Dropout, nn.Identity)):
|
| 147 |
+
self.assertTrue(m.weight.device != torch.device("cpu"))
|
| 148 |
+
|
| 149 |
+
for n, m in pipe.text_encoder.named_modules():
|
| 150 |
+
if "adapter-1" in n and not isinstance(m, (nn.Dropout, nn.Identity)):
|
| 151 |
+
self.assertTrue(m.weight.device != torch.device("cpu"))
|
| 152 |
+
|
| 153 |
+
pipe.set_lora_device(["adapter-1", "adapter-2"], torch_device)
|
| 154 |
+
|
| 155 |
+
for n, m in pipe.unet.named_modules():
|
| 156 |
+
if ("adapter-1" in n or "adapter-2" in n) and not isinstance(m, (nn.Dropout, nn.Identity)):
|
| 157 |
+
self.assertTrue(m.weight.device != torch.device("cpu"))
|
| 158 |
+
|
| 159 |
+
for n, m in pipe.text_encoder.named_modules():
|
| 160 |
+
if ("adapter-1" in n or "adapter-2" in n) and not isinstance(m, (nn.Dropout, nn.Identity)):
|
| 161 |
+
self.assertTrue(m.weight.device != torch.device("cpu"))
|
| 162 |
+
|
| 163 |
+
@slow
|
| 164 |
+
@require_torch_accelerator
|
| 165 |
+
def test_integration_move_lora_dora_cpu(self):
|
| 166 |
+
from peft import LoraConfig
|
| 167 |
+
|
| 168 |
+
path = "stable-diffusion-v1-5/stable-diffusion-v1-5"
|
| 169 |
+
unet_lora_config = LoraConfig(
|
| 170 |
+
init_lora_weights="gaussian",
|
| 171 |
+
target_modules=["to_k", "to_q", "to_v", "to_out.0"],
|
| 172 |
+
use_dora=True,
|
| 173 |
+
)
|
| 174 |
+
text_lora_config = LoraConfig(
|
| 175 |
+
init_lora_weights="gaussian",
|
| 176 |
+
target_modules=["q_proj", "k_proj", "v_proj", "out_proj"],
|
| 177 |
+
use_dora=True,
|
| 178 |
+
)
|
| 179 |
+
|
| 180 |
+
pipe = StableDiffusionPipeline.from_pretrained(path, torch_dtype=torch.float16)
|
| 181 |
+
pipe.unet.add_adapter(unet_lora_config, "adapter-1")
|
| 182 |
+
pipe.text_encoder.add_adapter(text_lora_config, "adapter-1")
|
| 183 |
+
|
| 184 |
+
self.assertTrue(
|
| 185 |
+
check_if_lora_correctly_set(pipe.text_encoder),
|
| 186 |
+
"Lora not correctly set in text encoder",
|
| 187 |
+
)
|
| 188 |
+
|
| 189 |
+
self.assertTrue(
|
| 190 |
+
check_if_lora_correctly_set(pipe.unet),
|
| 191 |
+
"Lora not correctly set in unet",
|
| 192 |
+
)
|
| 193 |
+
|
| 194 |
+
for name, param in pipe.unet.named_parameters():
|
| 195 |
+
if "lora_" in name:
|
| 196 |
+
self.assertEqual(param.device, torch.device("cpu"))
|
| 197 |
+
|
| 198 |
+
for name, param in pipe.text_encoder.named_parameters():
|
| 199 |
+
if "lora_" in name:
|
| 200 |
+
self.assertEqual(param.device, torch.device("cpu"))
|
| 201 |
+
|
| 202 |
+
pipe.set_lora_device(["adapter-1"], torch_device)
|
| 203 |
+
|
| 204 |
+
for name, param in pipe.unet.named_parameters():
|
| 205 |
+
if "lora_" in name:
|
| 206 |
+
self.assertNotEqual(param.device, torch.device("cpu"))
|
| 207 |
+
|
| 208 |
+
for name, param in pipe.text_encoder.named_parameters():
|
| 209 |
+
if "lora_" in name:
|
| 210 |
+
self.assertNotEqual(param.device, torch.device("cpu"))
|
| 211 |
+
|
| 212 |
+
@slow
|
| 213 |
+
@require_torch_accelerator
|
| 214 |
+
def test_integration_set_lora_device_different_target_layers(self):
|
| 215 |
+
# fixes a bug that occurred when calling set_lora_device with multiple adapters loaded that target different
|
| 216 |
+
# layers, see #11833
|
| 217 |
+
from peft import LoraConfig
|
| 218 |
+
|
| 219 |
+
path = "stable-diffusion-v1-5/stable-diffusion-v1-5"
|
| 220 |
+
pipe = StableDiffusionPipeline.from_pretrained(path, torch_dtype=torch.float16)
|
| 221 |
+
# configs partly target the same, partly different layers
|
| 222 |
+
config0 = LoraConfig(target_modules=["to_k", "to_v"])
|
| 223 |
+
config1 = LoraConfig(target_modules=["to_k", "to_q"])
|
| 224 |
+
pipe.unet.add_adapter(config0, adapter_name="adapter-0")
|
| 225 |
+
pipe.unet.add_adapter(config1, adapter_name="adapter-1")
|
| 226 |
+
pipe = pipe.to(torch_device)
|
| 227 |
+
|
| 228 |
+
self.assertTrue(
|
| 229 |
+
check_if_lora_correctly_set(pipe.unet),
|
| 230 |
+
"Lora not correctly set in unet",
|
| 231 |
+
)
|
| 232 |
+
|
| 233 |
+
# sanity check that the adapters don't target the same layers, otherwise the test passes even without the fix
|
| 234 |
+
modules_adapter_0 = {n for n, _ in pipe.unet.named_modules() if n.endswith(".adapter-0")}
|
| 235 |
+
modules_adapter_1 = {n for n, _ in pipe.unet.named_modules() if n.endswith(".adapter-1")}
|
| 236 |
+
self.assertNotEqual(modules_adapter_0, modules_adapter_1)
|
| 237 |
+
self.assertTrue(modules_adapter_0 - modules_adapter_1)
|
| 238 |
+
self.assertTrue(modules_adapter_1 - modules_adapter_0)
|
| 239 |
+
|
| 240 |
+
# setting both separately works
|
| 241 |
+
pipe.set_lora_device(["adapter-0"], "cpu")
|
| 242 |
+
pipe.set_lora_device(["adapter-1"], "cpu")
|
| 243 |
+
|
| 244 |
+
for name, module in pipe.unet.named_modules():
|
| 245 |
+
if "adapter-0" in name and not isinstance(module, (nn.Dropout, nn.Identity)):
|
| 246 |
+
self.assertTrue(module.weight.device == torch.device("cpu"))
|
| 247 |
+
elif "adapter-1" in name and not isinstance(module, (nn.Dropout, nn.Identity)):
|
| 248 |
+
self.assertTrue(module.weight.device == torch.device("cpu"))
|
| 249 |
+
|
| 250 |
+
# setting both at once also works
|
| 251 |
+
pipe.set_lora_device(["adapter-0", "adapter-1"], torch_device)
|
| 252 |
+
|
| 253 |
+
for name, module in pipe.unet.named_modules():
|
| 254 |
+
if "adapter-0" in name and not isinstance(module, (nn.Dropout, nn.Identity)):
|
| 255 |
+
self.assertTrue(module.weight.device != torch.device("cpu"))
|
| 256 |
+
elif "adapter-1" in name and not isinstance(module, (nn.Dropout, nn.Identity)):
|
| 257 |
+
self.assertTrue(module.weight.device != torch.device("cpu"))
|
| 258 |
+
|
| 259 |
+
|
| 260 |
+
@slow
|
| 261 |
+
@nightly
|
| 262 |
+
@require_torch_accelerator
|
| 263 |
+
@require_peft_backend
|
| 264 |
+
class LoraIntegrationTests(unittest.TestCase):
|
| 265 |
+
def setUp(self):
|
| 266 |
+
super().setUp()
|
| 267 |
+
gc.collect()
|
| 268 |
+
backend_empty_cache(torch_device)
|
| 269 |
+
|
| 270 |
+
def tearDown(self):
|
| 271 |
+
super().tearDown()
|
| 272 |
+
gc.collect()
|
| 273 |
+
backend_empty_cache(torch_device)
|
| 274 |
+
|
| 275 |
+
def test_integration_logits_with_scale(self):
|
| 276 |
+
path = "stable-diffusion-v1-5/stable-diffusion-v1-5"
|
| 277 |
+
lora_id = "takuma104/lora-test-text-encoder-lora-target"
|
| 278 |
+
|
| 279 |
+
pipe = StableDiffusionPipeline.from_pretrained(path, torch_dtype=torch.float32)
|
| 280 |
+
pipe.load_lora_weights(lora_id)
|
| 281 |
+
pipe = pipe.to(torch_device)
|
| 282 |
+
|
| 283 |
+
self.assertTrue(
|
| 284 |
+
check_if_lora_correctly_set(pipe.text_encoder),
|
| 285 |
+
"Lora not correctly set in text encoder",
|
| 286 |
+
)
|
| 287 |
+
|
| 288 |
+
prompt = "a red sks dog"
|
| 289 |
+
|
| 290 |
+
images = pipe(
|
| 291 |
+
prompt=prompt,
|
| 292 |
+
num_inference_steps=15,
|
| 293 |
+
cross_attention_kwargs={"scale": 0.5},
|
| 294 |
+
generator=torch.manual_seed(0),
|
| 295 |
+
output_type="np",
|
| 296 |
+
).images
|
| 297 |
+
|
| 298 |
+
expected_slice_scale = np.array([0.307, 0.283, 0.310, 0.310, 0.300, 0.314, 0.336, 0.314, 0.321])
|
| 299 |
+
predicted_slice = images[0, -3:, -3:, -1].flatten()
|
| 300 |
+
|
| 301 |
+
max_diff = numpy_cosine_similarity_distance(expected_slice_scale, predicted_slice)
|
| 302 |
+
assert max_diff < 1e-3
|
| 303 |
+
|
| 304 |
+
pipe.unload_lora_weights()
|
| 305 |
+
release_memory(pipe)
|
| 306 |
+
|
| 307 |
+
def test_integration_logits_no_scale(self):
|
| 308 |
+
path = "stable-diffusion-v1-5/stable-diffusion-v1-5"
|
| 309 |
+
lora_id = "takuma104/lora-test-text-encoder-lora-target"
|
| 310 |
+
|
| 311 |
+
pipe = StableDiffusionPipeline.from_pretrained(path, torch_dtype=torch.float32)
|
| 312 |
+
pipe.load_lora_weights(lora_id)
|
| 313 |
+
pipe = pipe.to(torch_device)
|
| 314 |
+
|
| 315 |
+
self.assertTrue(
|
| 316 |
+
check_if_lora_correctly_set(pipe.text_encoder),
|
| 317 |
+
"Lora not correctly set in text encoder",
|
| 318 |
+
)
|
| 319 |
+
|
| 320 |
+
prompt = "a red sks dog"
|
| 321 |
+
|
| 322 |
+
images = pipe(prompt=prompt, num_inference_steps=30, generator=torch.manual_seed(0), output_type="np").images
|
| 323 |
+
|
| 324 |
+
expected_slice_scale = np.array([0.074, 0.064, 0.073, 0.0842, 0.069, 0.0641, 0.0794, 0.076, 0.084])
|
| 325 |
+
predicted_slice = images[0, -3:, -3:, -1].flatten()
|
| 326 |
+
|
| 327 |
+
max_diff = numpy_cosine_similarity_distance(expected_slice_scale, predicted_slice)
|
| 328 |
+
|
| 329 |
+
assert max_diff < 1e-3
|
| 330 |
+
|
| 331 |
+
pipe.unload_lora_weights()
|
| 332 |
+
release_memory(pipe)
|
| 333 |
+
|
| 334 |
+
def test_dreambooth_old_format(self):
|
| 335 |
+
generator = torch.Generator("cpu").manual_seed(0)
|
| 336 |
+
|
| 337 |
+
lora_model_id = "hf-internal-testing/lora_dreambooth_dog_example"
|
| 338 |
+
|
| 339 |
+
base_model_id = "stable-diffusion-v1-5/stable-diffusion-v1-5"
|
| 340 |
+
|
| 341 |
+
pipe = StableDiffusionPipeline.from_pretrained(base_model_id, safety_checker=None)
|
| 342 |
+
pipe = pipe.to(torch_device)
|
| 343 |
+
pipe.load_lora_weights(lora_model_id)
|
| 344 |
+
|
| 345 |
+
images = pipe(
|
| 346 |
+
"A photo of a sks dog floating in the river", output_type="np", generator=generator, num_inference_steps=2
|
| 347 |
+
).images
|
| 348 |
+
|
| 349 |
+
images = images[0, -3:, -3:, -1].flatten()
|
| 350 |
+
expected = np.array([0.7207, 0.6787, 0.6010, 0.7478, 0.6838, 0.6064, 0.6984, 0.6443, 0.5785])
|
| 351 |
+
|
| 352 |
+
max_diff = numpy_cosine_similarity_distance(expected, images)
|
| 353 |
+
assert max_diff < 1e-4
|
| 354 |
+
|
| 355 |
+
pipe.unload_lora_weights()
|
| 356 |
+
release_memory(pipe)
|
| 357 |
+
|
| 358 |
+
def test_dreambooth_text_encoder_new_format(self):
|
| 359 |
+
generator = torch.Generator().manual_seed(0)
|
| 360 |
+
|
| 361 |
+
lora_model_id = "hf-internal-testing/lora-trained"
|
| 362 |
+
|
| 363 |
+
base_model_id = "stable-diffusion-v1-5/stable-diffusion-v1-5"
|
| 364 |
+
|
| 365 |
+
pipe = StableDiffusionPipeline.from_pretrained(base_model_id, safety_checker=None)
|
| 366 |
+
pipe = pipe.to(torch_device)
|
| 367 |
+
pipe.load_lora_weights(lora_model_id)
|
| 368 |
+
|
| 369 |
+
images = pipe("A photo of a sks dog", output_type="np", generator=generator, num_inference_steps=2).images
|
| 370 |
+
|
| 371 |
+
images = images[0, -3:, -3:, -1].flatten()
|
| 372 |
+
|
| 373 |
+
expected = np.array([0.6628, 0.6138, 0.5390, 0.6625, 0.6130, 0.5463, 0.6166, 0.5788, 0.5359])
|
| 374 |
+
|
| 375 |
+
max_diff = numpy_cosine_similarity_distance(expected, images)
|
| 376 |
+
assert max_diff < 1e-4
|
| 377 |
+
|
| 378 |
+
pipe.unload_lora_weights()
|
| 379 |
+
release_memory(pipe)
|
| 380 |
+
|
| 381 |
+
def test_a1111(self):
|
| 382 |
+
generator = torch.Generator().manual_seed(0)
|
| 383 |
+
|
| 384 |
+
pipe = StableDiffusionPipeline.from_pretrained("hf-internal-testing/Counterfeit-V2.5", safety_checker=None).to(
|
| 385 |
+
torch_device
|
| 386 |
+
)
|
| 387 |
+
lora_model_id = "hf-internal-testing/civitai-light-shadow-lora"
|
| 388 |
+
lora_filename = "light_and_shadow.safetensors"
|
| 389 |
+
pipe.load_lora_weights(lora_model_id, weight_name=lora_filename)
|
| 390 |
+
|
| 391 |
+
images = pipe(
|
| 392 |
+
"masterpiece, best quality, mountain", output_type="np", generator=generator, num_inference_steps=2
|
| 393 |
+
).images
|
| 394 |
+
|
| 395 |
+
images = images[0, -3:, -3:, -1].flatten()
|
| 396 |
+
expected = np.array([0.3636, 0.3708, 0.3694, 0.3679, 0.3829, 0.3677, 0.3692, 0.3688, 0.3292])
|
| 397 |
+
|
| 398 |
+
max_diff = numpy_cosine_similarity_distance(expected, images)
|
| 399 |
+
assert max_diff < 1e-3
|
| 400 |
+
|
| 401 |
+
pipe.unload_lora_weights()
|
| 402 |
+
release_memory(pipe)
|
| 403 |
+
|
| 404 |
+
def test_lycoris(self):
|
| 405 |
+
generator = torch.Generator().manual_seed(0)
|
| 406 |
+
|
| 407 |
+
pipe = StableDiffusionPipeline.from_pretrained(
|
| 408 |
+
"hf-internal-testing/Amixx", safety_checker=None, use_safetensors=True, variant="fp16"
|
| 409 |
+
).to(torch_device)
|
| 410 |
+
lora_model_id = "hf-internal-testing/edgLycorisMugler-light"
|
| 411 |
+
lora_filename = "edgLycorisMugler-light.safetensors"
|
| 412 |
+
pipe.load_lora_weights(lora_model_id, weight_name=lora_filename)
|
| 413 |
+
|
| 414 |
+
images = pipe(
|
| 415 |
+
"masterpiece, best quality, mountain", output_type="np", generator=generator, num_inference_steps=2
|
| 416 |
+
).images
|
| 417 |
+
|
| 418 |
+
images = images[0, -3:, -3:, -1].flatten()
|
| 419 |
+
expected = np.array([0.6463, 0.658, 0.599, 0.6542, 0.6512, 0.6213, 0.658, 0.6485, 0.6017])
|
| 420 |
+
|
| 421 |
+
max_diff = numpy_cosine_similarity_distance(expected, images)
|
| 422 |
+
assert max_diff < 1e-3
|
| 423 |
+
|
| 424 |
+
pipe.unload_lora_weights()
|
| 425 |
+
release_memory(pipe)
|
| 426 |
+
|
| 427 |
+
def test_a1111_with_model_cpu_offload(self):
|
| 428 |
+
generator = torch.Generator().manual_seed(0)
|
| 429 |
+
|
| 430 |
+
pipe = StableDiffusionPipeline.from_pretrained("hf-internal-testing/Counterfeit-V2.5", safety_checker=None)
|
| 431 |
+
pipe.enable_model_cpu_offload(device=torch_device)
|
| 432 |
+
lora_model_id = "hf-internal-testing/civitai-light-shadow-lora"
|
| 433 |
+
lora_filename = "light_and_shadow.safetensors"
|
| 434 |
+
pipe.load_lora_weights(lora_model_id, weight_name=lora_filename)
|
| 435 |
+
|
| 436 |
+
images = pipe(
|
| 437 |
+
"masterpiece, best quality, mountain", output_type="np", generator=generator, num_inference_steps=2
|
| 438 |
+
).images
|
| 439 |
+
|
| 440 |
+
images = images[0, -3:, -3:, -1].flatten()
|
| 441 |
+
expected = np.array([0.3636, 0.3708, 0.3694, 0.3679, 0.3829, 0.3677, 0.3692, 0.3688, 0.3292])
|
| 442 |
+
|
| 443 |
+
max_diff = numpy_cosine_similarity_distance(expected, images)
|
| 444 |
+
assert max_diff < 1e-3
|
| 445 |
+
|
| 446 |
+
pipe.unload_lora_weights()
|
| 447 |
+
release_memory(pipe)
|
| 448 |
+
|
| 449 |
+
def test_a1111_with_sequential_cpu_offload(self):
|
| 450 |
+
generator = torch.Generator().manual_seed(0)
|
| 451 |
+
|
| 452 |
+
pipe = StableDiffusionPipeline.from_pretrained("hf-internal-testing/Counterfeit-V2.5", safety_checker=None)
|
| 453 |
+
pipe.enable_sequential_cpu_offload(device=torch_device)
|
| 454 |
+
lora_model_id = "hf-internal-testing/civitai-light-shadow-lora"
|
| 455 |
+
lora_filename = "light_and_shadow.safetensors"
|
| 456 |
+
pipe.load_lora_weights(lora_model_id, weight_name=lora_filename)
|
| 457 |
+
|
| 458 |
+
images = pipe(
|
| 459 |
+
"masterpiece, best quality, mountain", output_type="np", generator=generator, num_inference_steps=2
|
| 460 |
+
).images
|
| 461 |
+
|
| 462 |
+
images = images[0, -3:, -3:, -1].flatten()
|
| 463 |
+
expected = np.array([0.3636, 0.3708, 0.3694, 0.3679, 0.3829, 0.3677, 0.3692, 0.3688, 0.3292])
|
| 464 |
+
|
| 465 |
+
max_diff = numpy_cosine_similarity_distance(expected, images)
|
| 466 |
+
assert max_diff < 1e-3
|
| 467 |
+
|
| 468 |
+
pipe.unload_lora_weights()
|
| 469 |
+
release_memory(pipe)
|
| 470 |
+
|
| 471 |
+
def test_kohya_sd_v15_with_higher_dimensions(self):
|
| 472 |
+
generator = torch.Generator().manual_seed(0)
|
| 473 |
+
|
| 474 |
+
pipe = StableDiffusionPipeline.from_pretrained(
|
| 475 |
+
"stable-diffusion-v1-5/stable-diffusion-v1-5", safety_checker=None
|
| 476 |
+
).to(torch_device)
|
| 477 |
+
lora_model_id = "hf-internal-testing/urushisato-lora"
|
| 478 |
+
lora_filename = "urushisato_v15.safetensors"
|
| 479 |
+
pipe.load_lora_weights(lora_model_id, weight_name=lora_filename)
|
| 480 |
+
|
| 481 |
+
images = pipe(
|
| 482 |
+
"masterpiece, best quality, mountain", output_type="np", generator=generator, num_inference_steps=2
|
| 483 |
+
).images
|
| 484 |
+
|
| 485 |
+
images = images[0, -3:, -3:, -1].flatten()
|
| 486 |
+
expected = np.array([0.7165, 0.6616, 0.5833, 0.7504, 0.6718, 0.587, 0.6871, 0.6361, 0.5694])
|
| 487 |
+
|
| 488 |
+
max_diff = numpy_cosine_similarity_distance(expected, images)
|
| 489 |
+
assert max_diff < 1e-3
|
| 490 |
+
|
| 491 |
+
pipe.unload_lora_weights()
|
| 492 |
+
release_memory(pipe)
|
| 493 |
+
|
| 494 |
+
def test_vanilla_funetuning(self):
|
| 495 |
+
generator = torch.Generator().manual_seed(0)
|
| 496 |
+
|
| 497 |
+
lora_model_id = "hf-internal-testing/sd-model-finetuned-lora-t4"
|
| 498 |
+
|
| 499 |
+
base_model_id = "stable-diffusion-v1-5/stable-diffusion-v1-5"
|
| 500 |
+
|
| 501 |
+
pipe = StableDiffusionPipeline.from_pretrained(base_model_id, safety_checker=None)
|
| 502 |
+
pipe = pipe.to(torch_device)
|
| 503 |
+
pipe.load_lora_weights(lora_model_id)
|
| 504 |
+
|
| 505 |
+
images = pipe("A pokemon with blue eyes.", output_type="np", generator=generator, num_inference_steps=2).images
|
| 506 |
+
|
| 507 |
+
image_slice = images[0, -3:, -3:, -1].flatten()
|
| 508 |
+
|
| 509 |
+
expected_slices = Expectations(
|
| 510 |
+
{
|
| 511 |
+
("xpu", 3): np.array(
|
| 512 |
+
[
|
| 513 |
+
0.6544,
|
| 514 |
+
0.6127,
|
| 515 |
+
0.5397,
|
| 516 |
+
0.6845,
|
| 517 |
+
0.6047,
|
| 518 |
+
0.5469,
|
| 519 |
+
0.6349,
|
| 520 |
+
0.5906,
|
| 521 |
+
0.5382,
|
| 522 |
+
]
|
| 523 |
+
),
|
| 524 |
+
("cuda", 7): np.array(
|
| 525 |
+
[
|
| 526 |
+
0.7406,
|
| 527 |
+
0.699,
|
| 528 |
+
0.5963,
|
| 529 |
+
0.7493,
|
| 530 |
+
0.7045,
|
| 531 |
+
0.6096,
|
| 532 |
+
0.6886,
|
| 533 |
+
0.6388,
|
| 534 |
+
0.583,
|
| 535 |
+
]
|
| 536 |
+
),
|
| 537 |
+
("cuda", 8): np.array(
|
| 538 |
+
[
|
| 539 |
+
0.6542,
|
| 540 |
+
0.61253,
|
| 541 |
+
0.5396,
|
| 542 |
+
0.6843,
|
| 543 |
+
0.6044,
|
| 544 |
+
0.5468,
|
| 545 |
+
0.6349,
|
| 546 |
+
0.5905,
|
| 547 |
+
0.5381,
|
| 548 |
+
]
|
| 549 |
+
),
|
| 550 |
+
}
|
| 551 |
+
)
|
| 552 |
+
expected_slice = expected_slices.get_expectation()
|
| 553 |
+
|
| 554 |
+
max_diff = numpy_cosine_similarity_distance(expected_slice, image_slice)
|
| 555 |
+
assert max_diff < 1e-4
|
| 556 |
+
|
| 557 |
+
pipe.unload_lora_weights()
|
| 558 |
+
release_memory(pipe)
|
| 559 |
+
|
| 560 |
+
def test_unload_kohya_lora(self):
|
| 561 |
+
generator = torch.manual_seed(0)
|
| 562 |
+
prompt = "masterpiece, best quality, mountain"
|
| 563 |
+
num_inference_steps = 2
|
| 564 |
+
|
| 565 |
+
pipe = StableDiffusionPipeline.from_pretrained(
|
| 566 |
+
"stable-diffusion-v1-5/stable-diffusion-v1-5", safety_checker=None
|
| 567 |
+
).to(torch_device)
|
| 568 |
+
initial_images = pipe(
|
| 569 |
+
prompt, output_type="np", generator=generator, num_inference_steps=num_inference_steps
|
| 570 |
+
).images
|
| 571 |
+
initial_images = initial_images[0, -3:, -3:, -1].flatten()
|
| 572 |
+
|
| 573 |
+
lora_model_id = "hf-internal-testing/civitai-colored-icons-lora"
|
| 574 |
+
lora_filename = "Colored_Icons_by_vizsumit.safetensors"
|
| 575 |
+
|
| 576 |
+
pipe.load_lora_weights(lora_model_id, weight_name=lora_filename)
|
| 577 |
+
generator = torch.manual_seed(0)
|
| 578 |
+
lora_images = pipe(
|
| 579 |
+
prompt, output_type="np", generator=generator, num_inference_steps=num_inference_steps
|
| 580 |
+
).images
|
| 581 |
+
lora_images = lora_images[0, -3:, -3:, -1].flatten()
|
| 582 |
+
|
| 583 |
+
pipe.unload_lora_weights()
|
| 584 |
+
generator = torch.manual_seed(0)
|
| 585 |
+
unloaded_lora_images = pipe(
|
| 586 |
+
prompt, output_type="np", generator=generator, num_inference_steps=num_inference_steps
|
| 587 |
+
).images
|
| 588 |
+
unloaded_lora_images = unloaded_lora_images[0, -3:, -3:, -1].flatten()
|
| 589 |
+
|
| 590 |
+
self.assertFalse(np.allclose(initial_images, lora_images))
|
| 591 |
+
self.assertTrue(np.allclose(initial_images, unloaded_lora_images, atol=1e-3))
|
| 592 |
+
|
| 593 |
+
release_memory(pipe)
|
| 594 |
+
|
| 595 |
+
def test_load_unload_load_kohya_lora(self):
|
| 596 |
+
# This test ensures that a Kohya-style LoRA can be safely unloaded and then loaded
|
| 597 |
+
# without introducing any side-effects. Even though the test uses a Kohya-style
|
| 598 |
+
# LoRA, the underlying adapter handling mechanism is format-agnostic.
|
| 599 |
+
generator = torch.manual_seed(0)
|
| 600 |
+
prompt = "masterpiece, best quality, mountain"
|
| 601 |
+
num_inference_steps = 2
|
| 602 |
+
|
| 603 |
+
pipe = StableDiffusionPipeline.from_pretrained(
|
| 604 |
+
"stable-diffusion-v1-5/stable-diffusion-v1-5", safety_checker=None
|
| 605 |
+
).to(torch_device)
|
| 606 |
+
initial_images = pipe(
|
| 607 |
+
prompt, output_type="np", generator=generator, num_inference_steps=num_inference_steps
|
| 608 |
+
).images
|
| 609 |
+
initial_images = initial_images[0, -3:, -3:, -1].flatten()
|
| 610 |
+
|
| 611 |
+
lora_model_id = "hf-internal-testing/civitai-colored-icons-lora"
|
| 612 |
+
lora_filename = "Colored_Icons_by_vizsumit.safetensors"
|
| 613 |
+
|
| 614 |
+
pipe.load_lora_weights(lora_model_id, weight_name=lora_filename)
|
| 615 |
+
generator = torch.manual_seed(0)
|
| 616 |
+
lora_images = pipe(
|
| 617 |
+
prompt, output_type="np", generator=generator, num_inference_steps=num_inference_steps
|
| 618 |
+
).images
|
| 619 |
+
lora_images = lora_images[0, -3:, -3:, -1].flatten()
|
| 620 |
+
|
| 621 |
+
pipe.unload_lora_weights()
|
| 622 |
+
generator = torch.manual_seed(0)
|
| 623 |
+
unloaded_lora_images = pipe(
|
| 624 |
+
prompt, output_type="np", generator=generator, num_inference_steps=num_inference_steps
|
| 625 |
+
).images
|
| 626 |
+
unloaded_lora_images = unloaded_lora_images[0, -3:, -3:, -1].flatten()
|
| 627 |
+
|
| 628 |
+
self.assertFalse(np.allclose(initial_images, lora_images))
|
| 629 |
+
self.assertTrue(np.allclose(initial_images, unloaded_lora_images, atol=1e-3))
|
| 630 |
+
|
| 631 |
+
# make sure we can load a LoRA again after unloading and they don't have
|
| 632 |
+
# any undesired effects.
|
| 633 |
+
pipe.load_lora_weights(lora_model_id, weight_name=lora_filename)
|
| 634 |
+
generator = torch.manual_seed(0)
|
| 635 |
+
lora_images_again = pipe(
|
| 636 |
+
prompt, output_type="np", generator=generator, num_inference_steps=num_inference_steps
|
| 637 |
+
).images
|
| 638 |
+
lora_images_again = lora_images_again[0, -3:, -3:, -1].flatten()
|
| 639 |
+
|
| 640 |
+
self.assertTrue(np.allclose(lora_images, lora_images_again, atol=1e-3))
|
| 641 |
+
release_memory(pipe)
|
| 642 |
+
|
| 643 |
+
def test_not_empty_state_dict(self):
|
| 644 |
+
# Makes sure https://github.com/huggingface/diffusers/issues/7054 does not happen again
|
| 645 |
+
pipe = AutoPipelineForText2Image.from_pretrained(
|
| 646 |
+
"stable-diffusion-v1-5/stable-diffusion-v1-5", torch_dtype=torch.float16
|
| 647 |
+
).to(torch_device)
|
| 648 |
+
pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
|
| 649 |
+
|
| 650 |
+
cached_file = hf_hub_download("hf-internal-testing/lcm-lora-test-sd-v1-5", "test_lora.safetensors")
|
| 651 |
+
lcm_lora = load_file(cached_file)
|
| 652 |
+
|
| 653 |
+
pipe.load_lora_weights(lcm_lora, adapter_name="lcm")
|
| 654 |
+
self.assertTrue(lcm_lora != {})
|
| 655 |
+
release_memory(pipe)
|
| 656 |
+
|
| 657 |
+
def test_load_unload_load_state_dict(self):
|
| 658 |
+
# Makes sure https://github.com/huggingface/diffusers/issues/7054 does not happen again
|
| 659 |
+
pipe = AutoPipelineForText2Image.from_pretrained(
|
| 660 |
+
"stable-diffusion-v1-5/stable-diffusion-v1-5", torch_dtype=torch.float16
|
| 661 |
+
).to(torch_device)
|
| 662 |
+
pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
|
| 663 |
+
|
| 664 |
+
cached_file = hf_hub_download("hf-internal-testing/lcm-lora-test-sd-v1-5", "test_lora.safetensors")
|
| 665 |
+
lcm_lora = load_file(cached_file)
|
| 666 |
+
previous_state_dict = lcm_lora.copy()
|
| 667 |
+
|
| 668 |
+
pipe.load_lora_weights(lcm_lora, adapter_name="lcm")
|
| 669 |
+
self.assertDictEqual(lcm_lora, previous_state_dict)
|
| 670 |
+
|
| 671 |
+
pipe.unload_lora_weights()
|
| 672 |
+
pipe.load_lora_weights(lcm_lora, adapter_name="lcm")
|
| 673 |
+
self.assertDictEqual(lcm_lora, previous_state_dict)
|
| 674 |
+
|
| 675 |
+
release_memory(pipe)
|
| 676 |
+
|
| 677 |
+
def test_sdv1_5_lcm_lora(self):
|
| 678 |
+
pipe = DiffusionPipeline.from_pretrained(
|
| 679 |
+
"stable-diffusion-v1-5/stable-diffusion-v1-5", torch_dtype=torch.float16
|
| 680 |
+
)
|
| 681 |
+
pipe.to(torch_device)
|
| 682 |
+
pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
|
| 683 |
+
|
| 684 |
+
generator = torch.Generator("cpu").manual_seed(0)
|
| 685 |
+
|
| 686 |
+
lora_model_id = "latent-consistency/lcm-lora-sdv1-5"
|
| 687 |
+
pipe.load_lora_weights(lora_model_id)
|
| 688 |
+
|
| 689 |
+
image = pipe(
|
| 690 |
+
"masterpiece, best quality, mountain", generator=generator, num_inference_steps=4, guidance_scale=0.5
|
| 691 |
+
).images[0]
|
| 692 |
+
|
| 693 |
+
expected_image = load_image(
|
| 694 |
+
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/lcm_lora/sdv15_lcm_lora.png"
|
| 695 |
+
)
|
| 696 |
+
|
| 697 |
+
image_np = pipe.image_processor.pil_to_numpy(image)
|
| 698 |
+
expected_image_np = pipe.image_processor.pil_to_numpy(expected_image)
|
| 699 |
+
|
| 700 |
+
max_diff = numpy_cosine_similarity_distance(image_np.flatten(), expected_image_np.flatten())
|
| 701 |
+
assert max_diff < 1e-4
|
| 702 |
+
|
| 703 |
+
pipe.unload_lora_weights()
|
| 704 |
+
|
| 705 |
+
release_memory(pipe)
|
| 706 |
+
|
| 707 |
+
def test_sdv1_5_lcm_lora_img2img(self):
|
| 708 |
+
pipe = AutoPipelineForImage2Image.from_pretrained(
|
| 709 |
+
"stable-diffusion-v1-5/stable-diffusion-v1-5", torch_dtype=torch.float16
|
| 710 |
+
)
|
| 711 |
+
pipe.to(torch_device)
|
| 712 |
+
pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
|
| 713 |
+
|
| 714 |
+
init_image = load_image(
|
| 715 |
+
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape.png"
|
| 716 |
+
)
|
| 717 |
+
|
| 718 |
+
generator = torch.Generator("cpu").manual_seed(0)
|
| 719 |
+
|
| 720 |
+
lora_model_id = "latent-consistency/lcm-lora-sdv1-5"
|
| 721 |
+
pipe.load_lora_weights(lora_model_id)
|
| 722 |
+
|
| 723 |
+
image = pipe(
|
| 724 |
+
"snowy mountain",
|
| 725 |
+
generator=generator,
|
| 726 |
+
image=init_image,
|
| 727 |
+
strength=0.5,
|
| 728 |
+
num_inference_steps=4,
|
| 729 |
+
guidance_scale=0.5,
|
| 730 |
+
).images[0]
|
| 731 |
+
|
| 732 |
+
expected_image = load_image(
|
| 733 |
+
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/lcm_lora/sdv15_lcm_lora_img2img.png"
|
| 734 |
+
)
|
| 735 |
+
|
| 736 |
+
image_np = pipe.image_processor.pil_to_numpy(image)
|
| 737 |
+
expected_image_np = pipe.image_processor.pil_to_numpy(expected_image)
|
| 738 |
+
|
| 739 |
+
max_diff = numpy_cosine_similarity_distance(image_np.flatten(), expected_image_np.flatten())
|
| 740 |
+
assert max_diff < 1e-4
|
| 741 |
+
|
| 742 |
+
pipe.unload_lora_weights()
|
| 743 |
+
|
| 744 |
+
release_memory(pipe)
|
| 745 |
+
|
| 746 |
+
def test_sd_load_civitai_empty_network_alpha(self):
|
| 747 |
+
"""
|
| 748 |
+
This test simply checks that loading a LoRA with an empty network alpha works fine
|
| 749 |
+
See: https://github.com/huggingface/diffusers/issues/5606
|
| 750 |
+
"""
|
| 751 |
+
pipeline = StableDiffusionPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5")
|
| 752 |
+
pipeline.enable_sequential_cpu_offload(device=torch_device)
|
| 753 |
+
civitai_path = hf_hub_download("ybelkada/test-ahi-civitai", "ahi_lora_weights.safetensors")
|
| 754 |
+
pipeline.load_lora_weights(civitai_path, adapter_name="ahri")
|
| 755 |
+
|
| 756 |
+
images = pipeline(
|
| 757 |
+
"ahri, masterpiece, league of legends",
|
| 758 |
+
output_type="np",
|
| 759 |
+
generator=torch.manual_seed(156),
|
| 760 |
+
num_inference_steps=5,
|
| 761 |
+
).images
|
| 762 |
+
images = images[0, -3:, -3:, -1].flatten()
|
| 763 |
+
expected = np.array([0.0, 0.0, 0.0, 0.002557, 0.020954, 0.001792, 0.006581, 0.00591, 0.002995])
|
| 764 |
+
|
| 765 |
+
max_diff = numpy_cosine_similarity_distance(expected, images)
|
| 766 |
+
assert max_diff < 1e-3
|
| 767 |
+
|
| 768 |
+
pipeline.unload_lora_weights()
|
| 769 |
+
release_memory(pipeline)
|
exp_code/1_benchmark/diffusers-WanS2V/tests/lora/test_lora_layers_sd3.py
ADDED
|
@@ -0,0 +1,191 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2025 HuggingFace Inc.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
import gc
|
| 16 |
+
import sys
|
| 17 |
+
import unittest
|
| 18 |
+
|
| 19 |
+
import numpy as np
|
| 20 |
+
import torch
|
| 21 |
+
from transformers import AutoTokenizer, CLIPTextModelWithProjection, CLIPTokenizer, T5EncoderModel
|
| 22 |
+
|
| 23 |
+
from diffusers import (
|
| 24 |
+
FlowMatchEulerDiscreteScheduler,
|
| 25 |
+
SD3Transformer2DModel,
|
| 26 |
+
StableDiffusion3Img2ImgPipeline,
|
| 27 |
+
StableDiffusion3Pipeline,
|
| 28 |
+
)
|
| 29 |
+
from diffusers.utils import load_image
|
| 30 |
+
from diffusers.utils.import_utils import is_accelerate_available
|
| 31 |
+
|
| 32 |
+
from ..testing_utils import (
|
| 33 |
+
backend_empty_cache,
|
| 34 |
+
is_flaky,
|
| 35 |
+
nightly,
|
| 36 |
+
numpy_cosine_similarity_distance,
|
| 37 |
+
require_big_accelerator,
|
| 38 |
+
require_peft_backend,
|
| 39 |
+
require_torch_accelerator,
|
| 40 |
+
torch_device,
|
| 41 |
+
)
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
sys.path.append(".")
|
| 45 |
+
|
| 46 |
+
from .utils import PeftLoraLoaderMixinTests # noqa: E402
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
if is_accelerate_available():
|
| 50 |
+
from accelerate.utils import release_memory
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
@require_peft_backend
|
| 54 |
+
class SD3LoRATests(unittest.TestCase, PeftLoraLoaderMixinTests):
|
| 55 |
+
pipeline_class = StableDiffusion3Pipeline
|
| 56 |
+
scheduler_cls = FlowMatchEulerDiscreteScheduler
|
| 57 |
+
scheduler_kwargs = {}
|
| 58 |
+
scheduler_classes = [FlowMatchEulerDiscreteScheduler]
|
| 59 |
+
transformer_kwargs = {
|
| 60 |
+
"sample_size": 32,
|
| 61 |
+
"patch_size": 1,
|
| 62 |
+
"in_channels": 4,
|
| 63 |
+
"num_layers": 1,
|
| 64 |
+
"attention_head_dim": 8,
|
| 65 |
+
"num_attention_heads": 4,
|
| 66 |
+
"caption_projection_dim": 32,
|
| 67 |
+
"joint_attention_dim": 32,
|
| 68 |
+
"pooled_projection_dim": 64,
|
| 69 |
+
"out_channels": 4,
|
| 70 |
+
}
|
| 71 |
+
transformer_cls = SD3Transformer2DModel
|
| 72 |
+
vae_kwargs = {
|
| 73 |
+
"sample_size": 32,
|
| 74 |
+
"in_channels": 3,
|
| 75 |
+
"out_channels": 3,
|
| 76 |
+
"block_out_channels": (4,),
|
| 77 |
+
"layers_per_block": 1,
|
| 78 |
+
"latent_channels": 4,
|
| 79 |
+
"norm_num_groups": 1,
|
| 80 |
+
"use_quant_conv": False,
|
| 81 |
+
"use_post_quant_conv": False,
|
| 82 |
+
"shift_factor": 0.0609,
|
| 83 |
+
"scaling_factor": 1.5035,
|
| 84 |
+
}
|
| 85 |
+
has_three_text_encoders = True
|
| 86 |
+
tokenizer_cls, tokenizer_id = CLIPTokenizer, "hf-internal-testing/tiny-random-clip"
|
| 87 |
+
tokenizer_2_cls, tokenizer_2_id = CLIPTokenizer, "hf-internal-testing/tiny-random-clip"
|
| 88 |
+
tokenizer_3_cls, tokenizer_3_id = AutoTokenizer, "hf-internal-testing/tiny-random-t5"
|
| 89 |
+
text_encoder_cls, text_encoder_id = CLIPTextModelWithProjection, "hf-internal-testing/tiny-sd3-text_encoder"
|
| 90 |
+
text_encoder_2_cls, text_encoder_2_id = CLIPTextModelWithProjection, "hf-internal-testing/tiny-sd3-text_encoder-2"
|
| 91 |
+
text_encoder_3_cls, text_encoder_3_id = T5EncoderModel, "hf-internal-testing/tiny-random-t5"
|
| 92 |
+
|
| 93 |
+
@property
|
| 94 |
+
def output_shape(self):
|
| 95 |
+
return (1, 32, 32, 3)
|
| 96 |
+
|
| 97 |
+
@require_torch_accelerator
|
| 98 |
+
def test_sd3_lora(self):
|
| 99 |
+
"""
|
| 100 |
+
Test loading the loras that are saved with the diffusers and peft formats.
|
| 101 |
+
Related PR: https://github.com/huggingface/diffusers/pull/8584
|
| 102 |
+
"""
|
| 103 |
+
components = self.get_dummy_components()
|
| 104 |
+
pipe = self.pipeline_class(**components[0])
|
| 105 |
+
pipe = pipe.to(torch_device)
|
| 106 |
+
pipe.set_progress_bar_config(disable=None)
|
| 107 |
+
|
| 108 |
+
lora_model_id = "hf-internal-testing/tiny-sd3-loras"
|
| 109 |
+
|
| 110 |
+
lora_filename = "lora_diffusers_format.safetensors"
|
| 111 |
+
pipe.load_lora_weights(lora_model_id, weight_name=lora_filename)
|
| 112 |
+
pipe.unload_lora_weights()
|
| 113 |
+
|
| 114 |
+
lora_filename = "lora_peft_format.safetensors"
|
| 115 |
+
pipe.load_lora_weights(lora_model_id, weight_name=lora_filename)
|
| 116 |
+
|
| 117 |
+
@unittest.skip("Not supported in SD3.")
|
| 118 |
+
def test_simple_inference_with_text_denoiser_block_scale(self):
|
| 119 |
+
pass
|
| 120 |
+
|
| 121 |
+
@unittest.skip("Not supported in SD3.")
|
| 122 |
+
def test_simple_inference_with_text_denoiser_multi_adapter_block_lora(self):
|
| 123 |
+
pass
|
| 124 |
+
|
| 125 |
+
@unittest.skip("Not supported in SD3.")
|
| 126 |
+
def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self):
|
| 127 |
+
pass
|
| 128 |
+
|
| 129 |
+
@unittest.skip("Not supported in SD3.")
|
| 130 |
+
def test_modify_padding_mode(self):
|
| 131 |
+
pass
|
| 132 |
+
|
| 133 |
+
@is_flaky
|
| 134 |
+
def test_multiple_wrong_adapter_name_raises_error(self):
|
| 135 |
+
super().test_multiple_wrong_adapter_name_raises_error()
|
| 136 |
+
|
| 137 |
+
|
| 138 |
+
@nightly
|
| 139 |
+
@require_torch_accelerator
|
| 140 |
+
@require_peft_backend
|
| 141 |
+
@require_big_accelerator
|
| 142 |
+
class SD3LoraIntegrationTests(unittest.TestCase):
|
| 143 |
+
pipeline_class = StableDiffusion3Img2ImgPipeline
|
| 144 |
+
repo_id = "stabilityai/stable-diffusion-3-medium-diffusers"
|
| 145 |
+
|
| 146 |
+
def setUp(self):
|
| 147 |
+
super().setUp()
|
| 148 |
+
gc.collect()
|
| 149 |
+
backend_empty_cache(torch_device)
|
| 150 |
+
|
| 151 |
+
def tearDown(self):
|
| 152 |
+
super().tearDown()
|
| 153 |
+
gc.collect()
|
| 154 |
+
backend_empty_cache(torch_device)
|
| 155 |
+
|
| 156 |
+
def get_inputs(self, device, seed=0):
|
| 157 |
+
init_image = load_image(
|
| 158 |
+
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png"
|
| 159 |
+
)
|
| 160 |
+
if str(device).startswith("mps"):
|
| 161 |
+
generator = torch.manual_seed(seed)
|
| 162 |
+
else:
|
| 163 |
+
generator = torch.Generator(device="cpu").manual_seed(seed)
|
| 164 |
+
|
| 165 |
+
return {
|
| 166 |
+
"prompt": "corgi",
|
| 167 |
+
"num_inference_steps": 2,
|
| 168 |
+
"guidance_scale": 5.0,
|
| 169 |
+
"output_type": "np",
|
| 170 |
+
"generator": generator,
|
| 171 |
+
"image": init_image,
|
| 172 |
+
}
|
| 173 |
+
|
| 174 |
+
def test_sd3_img2img_lora(self):
|
| 175 |
+
pipe = self.pipeline_class.from_pretrained(self.repo_id, torch_dtype=torch.float16)
|
| 176 |
+
pipe.load_lora_weights("zwloong/sd3-lora-training-rank16-v2")
|
| 177 |
+
pipe.fuse_lora()
|
| 178 |
+
pipe.unload_lora_weights()
|
| 179 |
+
pipe = pipe.to(torch_device)
|
| 180 |
+
|
| 181 |
+
inputs = self.get_inputs(torch_device)
|
| 182 |
+
|
| 183 |
+
image = pipe(**inputs).images[0]
|
| 184 |
+
image_slice = image[0, -3:, -3:]
|
| 185 |
+
expected_slice = np.array([0.5649, 0.5405, 0.5488, 0.5688, 0.5449, 0.5513, 0.5337, 0.5107, 0.5059])
|
| 186 |
+
|
| 187 |
+
max_diff = numpy_cosine_similarity_distance(expected_slice.flatten(), image_slice.flatten())
|
| 188 |
+
|
| 189 |
+
assert max_diff < 1e-4, f"Outputs are not close enough, got {max_diff}"
|
| 190 |
+
pipe.unload_lora_weights()
|
| 191 |
+
release_memory(pipe)
|
exp_code/1_benchmark/diffusers-WanS2V/tests/lora/test_lora_layers_sdxl.py
ADDED
|
@@ -0,0 +1,681 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2025 HuggingFace Inc.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
import copy
|
| 16 |
+
import gc
|
| 17 |
+
import importlib
|
| 18 |
+
import sys
|
| 19 |
+
import time
|
| 20 |
+
import unittest
|
| 21 |
+
|
| 22 |
+
import numpy as np
|
| 23 |
+
import torch
|
| 24 |
+
from packaging import version
|
| 25 |
+
from transformers import CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
|
| 26 |
+
|
| 27 |
+
from diffusers import (
|
| 28 |
+
ControlNetModel,
|
| 29 |
+
EulerDiscreteScheduler,
|
| 30 |
+
LCMScheduler,
|
| 31 |
+
StableDiffusionXLAdapterPipeline,
|
| 32 |
+
StableDiffusionXLControlNetPipeline,
|
| 33 |
+
StableDiffusionXLPipeline,
|
| 34 |
+
T2IAdapter,
|
| 35 |
+
)
|
| 36 |
+
from diffusers.utils import logging
|
| 37 |
+
from diffusers.utils.import_utils import is_accelerate_available
|
| 38 |
+
|
| 39 |
+
from ..testing_utils import (
|
| 40 |
+
CaptureLogger,
|
| 41 |
+
backend_empty_cache,
|
| 42 |
+
is_flaky,
|
| 43 |
+
load_image,
|
| 44 |
+
nightly,
|
| 45 |
+
numpy_cosine_similarity_distance,
|
| 46 |
+
require_peft_backend,
|
| 47 |
+
require_torch_accelerator,
|
| 48 |
+
slow,
|
| 49 |
+
torch_device,
|
| 50 |
+
)
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
sys.path.append(".")
|
| 54 |
+
|
| 55 |
+
from .utils import PeftLoraLoaderMixinTests, check_if_lora_correctly_set, state_dicts_almost_equal # noqa: E402
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
if is_accelerate_available():
|
| 59 |
+
from accelerate.utils import release_memory
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
class StableDiffusionXLLoRATests(PeftLoraLoaderMixinTests, unittest.TestCase):
|
| 63 |
+
has_two_text_encoders = True
|
| 64 |
+
pipeline_class = StableDiffusionXLPipeline
|
| 65 |
+
scheduler_cls = EulerDiscreteScheduler
|
| 66 |
+
scheduler_kwargs = {
|
| 67 |
+
"beta_start": 0.00085,
|
| 68 |
+
"beta_end": 0.012,
|
| 69 |
+
"beta_schedule": "scaled_linear",
|
| 70 |
+
"timestep_spacing": "leading",
|
| 71 |
+
"steps_offset": 1,
|
| 72 |
+
}
|
| 73 |
+
unet_kwargs = {
|
| 74 |
+
"block_out_channels": (32, 64),
|
| 75 |
+
"layers_per_block": 2,
|
| 76 |
+
"sample_size": 32,
|
| 77 |
+
"in_channels": 4,
|
| 78 |
+
"out_channels": 4,
|
| 79 |
+
"down_block_types": ("DownBlock2D", "CrossAttnDownBlock2D"),
|
| 80 |
+
"up_block_types": ("CrossAttnUpBlock2D", "UpBlock2D"),
|
| 81 |
+
"attention_head_dim": (2, 4),
|
| 82 |
+
"use_linear_projection": True,
|
| 83 |
+
"addition_embed_type": "text_time",
|
| 84 |
+
"addition_time_embed_dim": 8,
|
| 85 |
+
"transformer_layers_per_block": (1, 2),
|
| 86 |
+
"projection_class_embeddings_input_dim": 80, # 6 * 8 + 32
|
| 87 |
+
"cross_attention_dim": 64,
|
| 88 |
+
}
|
| 89 |
+
vae_kwargs = {
|
| 90 |
+
"block_out_channels": [32, 64],
|
| 91 |
+
"in_channels": 3,
|
| 92 |
+
"out_channels": 3,
|
| 93 |
+
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
|
| 94 |
+
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
|
| 95 |
+
"latent_channels": 4,
|
| 96 |
+
"sample_size": 128,
|
| 97 |
+
}
|
| 98 |
+
text_encoder_cls, text_encoder_id = CLIPTextModel, "peft-internal-testing/tiny-clip-text-2"
|
| 99 |
+
tokenizer_cls, tokenizer_id = CLIPTokenizer, "peft-internal-testing/tiny-clip-text-2"
|
| 100 |
+
text_encoder_2_cls, text_encoder_2_id = CLIPTextModelWithProjection, "peft-internal-testing/tiny-clip-text-2"
|
| 101 |
+
tokenizer_2_cls, tokenizer_2_id = CLIPTokenizer, "peft-internal-testing/tiny-clip-text-2"
|
| 102 |
+
|
| 103 |
+
@property
|
| 104 |
+
def output_shape(self):
|
| 105 |
+
return (1, 64, 64, 3)
|
| 106 |
+
|
| 107 |
+
def setUp(self):
|
| 108 |
+
super().setUp()
|
| 109 |
+
gc.collect()
|
| 110 |
+
backend_empty_cache(torch_device)
|
| 111 |
+
|
| 112 |
+
def tearDown(self):
|
| 113 |
+
super().tearDown()
|
| 114 |
+
gc.collect()
|
| 115 |
+
backend_empty_cache(torch_device)
|
| 116 |
+
|
| 117 |
+
@is_flaky
|
| 118 |
+
def test_multiple_wrong_adapter_name_raises_error(self):
|
| 119 |
+
super().test_multiple_wrong_adapter_name_raises_error()
|
| 120 |
+
|
| 121 |
+
def test_simple_inference_with_text_denoiser_lora_unfused(self):
|
| 122 |
+
if torch.cuda.is_available():
|
| 123 |
+
expected_atol = 9e-2
|
| 124 |
+
expected_rtol = 9e-2
|
| 125 |
+
else:
|
| 126 |
+
expected_atol = 1e-3
|
| 127 |
+
expected_rtol = 1e-3
|
| 128 |
+
|
| 129 |
+
super().test_simple_inference_with_text_denoiser_lora_unfused(
|
| 130 |
+
expected_atol=expected_atol, expected_rtol=expected_rtol
|
| 131 |
+
)
|
| 132 |
+
|
| 133 |
+
def test_simple_inference_with_text_lora_denoiser_fused_multi(self):
|
| 134 |
+
if torch.cuda.is_available():
|
| 135 |
+
expected_atol = 9e-2
|
| 136 |
+
expected_rtol = 9e-2
|
| 137 |
+
else:
|
| 138 |
+
expected_atol = 1e-3
|
| 139 |
+
expected_rtol = 1e-3
|
| 140 |
+
|
| 141 |
+
super().test_simple_inference_with_text_lora_denoiser_fused_multi(
|
| 142 |
+
expected_atol=expected_atol, expected_rtol=expected_rtol
|
| 143 |
+
)
|
| 144 |
+
|
| 145 |
+
def test_lora_scale_kwargs_match_fusion(self):
|
| 146 |
+
if torch.cuda.is_available():
|
| 147 |
+
expected_atol = 9e-2
|
| 148 |
+
expected_rtol = 9e-2
|
| 149 |
+
else:
|
| 150 |
+
expected_atol = 1e-3
|
| 151 |
+
expected_rtol = 1e-3
|
| 152 |
+
|
| 153 |
+
super().test_lora_scale_kwargs_match_fusion(expected_atol=expected_atol, expected_rtol=expected_rtol)
|
| 154 |
+
|
| 155 |
+
|
| 156 |
+
@slow
|
| 157 |
+
@nightly
|
| 158 |
+
@require_torch_accelerator
|
| 159 |
+
@require_peft_backend
|
| 160 |
+
class LoraSDXLIntegrationTests(unittest.TestCase):
|
| 161 |
+
def setUp(self):
|
| 162 |
+
super().setUp()
|
| 163 |
+
gc.collect()
|
| 164 |
+
backend_empty_cache(torch_device)
|
| 165 |
+
|
| 166 |
+
def tearDown(self):
|
| 167 |
+
super().tearDown()
|
| 168 |
+
gc.collect()
|
| 169 |
+
backend_empty_cache(torch_device)
|
| 170 |
+
|
| 171 |
+
def test_sdxl_1_0_lora(self):
|
| 172 |
+
generator = torch.Generator("cpu").manual_seed(0)
|
| 173 |
+
|
| 174 |
+
pipe = StableDiffusionXLPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0")
|
| 175 |
+
pipe.enable_model_cpu_offload()
|
| 176 |
+
lora_model_id = "hf-internal-testing/sdxl-1.0-lora"
|
| 177 |
+
lora_filename = "sd_xl_offset_example-lora_1.0.safetensors"
|
| 178 |
+
pipe.load_lora_weights(lora_model_id, weight_name=lora_filename)
|
| 179 |
+
|
| 180 |
+
images = pipe(
|
| 181 |
+
"masterpiece, best quality, mountain", output_type="np", generator=generator, num_inference_steps=2
|
| 182 |
+
).images
|
| 183 |
+
|
| 184 |
+
images = images[0, -3:, -3:, -1].flatten()
|
| 185 |
+
expected = np.array([0.4468, 0.4061, 0.4134, 0.3637, 0.3202, 0.365, 0.3786, 0.3725, 0.3535])
|
| 186 |
+
|
| 187 |
+
max_diff = numpy_cosine_similarity_distance(expected, images)
|
| 188 |
+
assert max_diff < 1e-4
|
| 189 |
+
|
| 190 |
+
pipe.unload_lora_weights()
|
| 191 |
+
release_memory(pipe)
|
| 192 |
+
|
| 193 |
+
def test_sdxl_1_0_blockwise_lora(self):
|
| 194 |
+
generator = torch.Generator("cpu").manual_seed(0)
|
| 195 |
+
|
| 196 |
+
pipe = StableDiffusionXLPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0")
|
| 197 |
+
pipe.enable_model_cpu_offload()
|
| 198 |
+
lora_model_id = "hf-internal-testing/sdxl-1.0-lora"
|
| 199 |
+
lora_filename = "sd_xl_offset_example-lora_1.0.safetensors"
|
| 200 |
+
pipe.load_lora_weights(lora_model_id, weight_name=lora_filename, adapter_name="offset")
|
| 201 |
+
scales = {
|
| 202 |
+
"unet": {
|
| 203 |
+
"down": {"block_1": [1.0, 1.0], "block_2": [1.0, 1.0]},
|
| 204 |
+
"mid": 1.0,
|
| 205 |
+
"up": {"block_0": [1.0, 1.0, 1.0], "block_1": [1.0, 1.0, 1.0]},
|
| 206 |
+
},
|
| 207 |
+
}
|
| 208 |
+
pipe.set_adapters(["offset"], [scales])
|
| 209 |
+
|
| 210 |
+
images = pipe(
|
| 211 |
+
"masterpiece, best quality, mountain", output_type="np", generator=generator, num_inference_steps=2
|
| 212 |
+
).images
|
| 213 |
+
|
| 214 |
+
images = images[0, -3:, -3:, -1].flatten()
|
| 215 |
+
expected = np.array([00.4468, 0.4061, 0.4134, 0.3637, 0.3202, 0.365, 0.3786, 0.3725, 0.3535])
|
| 216 |
+
|
| 217 |
+
max_diff = numpy_cosine_similarity_distance(expected, images)
|
| 218 |
+
assert max_diff < 1e-4
|
| 219 |
+
|
| 220 |
+
pipe.unload_lora_weights()
|
| 221 |
+
release_memory(pipe)
|
| 222 |
+
|
| 223 |
+
def test_sdxl_lcm_lora(self):
|
| 224 |
+
pipe = StableDiffusionXLPipeline.from_pretrained(
|
| 225 |
+
"stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16
|
| 226 |
+
)
|
| 227 |
+
pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
|
| 228 |
+
pipe.enable_model_cpu_offload()
|
| 229 |
+
|
| 230 |
+
generator = torch.Generator("cpu").manual_seed(0)
|
| 231 |
+
|
| 232 |
+
lora_model_id = "latent-consistency/lcm-lora-sdxl"
|
| 233 |
+
|
| 234 |
+
pipe.load_lora_weights(lora_model_id)
|
| 235 |
+
|
| 236 |
+
image = pipe(
|
| 237 |
+
"masterpiece, best quality, mountain", generator=generator, num_inference_steps=4, guidance_scale=0.5
|
| 238 |
+
).images[0]
|
| 239 |
+
|
| 240 |
+
expected_image = load_image(
|
| 241 |
+
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/lcm_lora/sdxl_lcm_lora.png"
|
| 242 |
+
)
|
| 243 |
+
|
| 244 |
+
image_np = pipe.image_processor.pil_to_numpy(image)
|
| 245 |
+
expected_image_np = pipe.image_processor.pil_to_numpy(expected_image)
|
| 246 |
+
|
| 247 |
+
max_diff = numpy_cosine_similarity_distance(image_np.flatten(), expected_image_np.flatten())
|
| 248 |
+
assert max_diff < 1e-4
|
| 249 |
+
|
| 250 |
+
pipe.unload_lora_weights()
|
| 251 |
+
release_memory(pipe)
|
| 252 |
+
|
| 253 |
+
def test_sdxl_1_0_lora_fusion(self):
|
| 254 |
+
generator = torch.Generator().manual_seed(0)
|
| 255 |
+
|
| 256 |
+
pipe = StableDiffusionXLPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0")
|
| 257 |
+
lora_model_id = "hf-internal-testing/sdxl-1.0-lora"
|
| 258 |
+
lora_filename = "sd_xl_offset_example-lora_1.0.safetensors"
|
| 259 |
+
pipe.load_lora_weights(lora_model_id, weight_name=lora_filename)
|
| 260 |
+
|
| 261 |
+
pipe.fuse_lora()
|
| 262 |
+
# We need to unload the lora weights since in the previous API `fuse_lora` led to lora weights being
|
| 263 |
+
# silently deleted - otherwise this will CPU OOM
|
| 264 |
+
pipe.unload_lora_weights()
|
| 265 |
+
|
| 266 |
+
pipe.enable_model_cpu_offload()
|
| 267 |
+
|
| 268 |
+
images = pipe(
|
| 269 |
+
"masterpiece, best quality, mountain", output_type="np", generator=generator, num_inference_steps=2
|
| 270 |
+
).images
|
| 271 |
+
|
| 272 |
+
images = images[0, -3:, -3:, -1].flatten()
|
| 273 |
+
# This way we also test equivalence between LoRA fusion and the non-fusion behaviour.
|
| 274 |
+
expected = np.array([0.4468, 0.4061, 0.4134, 0.3637, 0.3202, 0.365, 0.3786, 0.3725, 0.3535])
|
| 275 |
+
|
| 276 |
+
max_diff = numpy_cosine_similarity_distance(expected, images)
|
| 277 |
+
assert max_diff < 1e-4
|
| 278 |
+
|
| 279 |
+
release_memory(pipe)
|
| 280 |
+
|
| 281 |
+
def test_sdxl_1_0_lora_unfusion(self):
|
| 282 |
+
generator = torch.Generator("cpu").manual_seed(0)
|
| 283 |
+
|
| 284 |
+
pipe = StableDiffusionXLPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0")
|
| 285 |
+
lora_model_id = "hf-internal-testing/sdxl-1.0-lora"
|
| 286 |
+
lora_filename = "sd_xl_offset_example-lora_1.0.safetensors"
|
| 287 |
+
pipe.load_lora_weights(lora_model_id, weight_name=lora_filename)
|
| 288 |
+
pipe.fuse_lora()
|
| 289 |
+
|
| 290 |
+
pipe.enable_model_cpu_offload()
|
| 291 |
+
|
| 292 |
+
images = pipe(
|
| 293 |
+
"masterpiece, best quality, mountain", output_type="np", generator=generator, num_inference_steps=3
|
| 294 |
+
).images
|
| 295 |
+
images_with_fusion = images.flatten()
|
| 296 |
+
|
| 297 |
+
pipe.unfuse_lora()
|
| 298 |
+
generator = torch.Generator("cpu").manual_seed(0)
|
| 299 |
+
images = pipe(
|
| 300 |
+
"masterpiece, best quality, mountain", output_type="np", generator=generator, num_inference_steps=3
|
| 301 |
+
).images
|
| 302 |
+
images_without_fusion = images.flatten()
|
| 303 |
+
|
| 304 |
+
max_diff = numpy_cosine_similarity_distance(images_with_fusion, images_without_fusion)
|
| 305 |
+
assert max_diff < 1e-4
|
| 306 |
+
|
| 307 |
+
release_memory(pipe)
|
| 308 |
+
|
| 309 |
+
def test_sdxl_1_0_lora_unfusion_effectivity(self):
|
| 310 |
+
pipe = StableDiffusionXLPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0")
|
| 311 |
+
pipe.enable_model_cpu_offload()
|
| 312 |
+
|
| 313 |
+
generator = torch.Generator().manual_seed(0)
|
| 314 |
+
images = pipe(
|
| 315 |
+
"masterpiece, best quality, mountain", output_type="np", generator=generator, num_inference_steps=2
|
| 316 |
+
).images
|
| 317 |
+
original_image_slice = images[0, -3:, -3:, -1].flatten()
|
| 318 |
+
|
| 319 |
+
lora_model_id = "hf-internal-testing/sdxl-1.0-lora"
|
| 320 |
+
lora_filename = "sd_xl_offset_example-lora_1.0.safetensors"
|
| 321 |
+
pipe.load_lora_weights(lora_model_id, weight_name=lora_filename)
|
| 322 |
+
pipe.fuse_lora()
|
| 323 |
+
|
| 324 |
+
generator = torch.Generator().manual_seed(0)
|
| 325 |
+
_ = pipe(
|
| 326 |
+
"masterpiece, best quality, mountain", output_type="np", generator=generator, num_inference_steps=2
|
| 327 |
+
).images
|
| 328 |
+
|
| 329 |
+
pipe.unfuse_lora()
|
| 330 |
+
|
| 331 |
+
# We need to unload the lora weights - in the old API unfuse led to unloading the adapter weights
|
| 332 |
+
pipe.unload_lora_weights()
|
| 333 |
+
|
| 334 |
+
generator = torch.Generator().manual_seed(0)
|
| 335 |
+
images = pipe(
|
| 336 |
+
"masterpiece, best quality, mountain", output_type="np", generator=generator, num_inference_steps=2
|
| 337 |
+
).images
|
| 338 |
+
images_without_fusion_slice = images[0, -3:, -3:, -1].flatten()
|
| 339 |
+
|
| 340 |
+
max_diff = numpy_cosine_similarity_distance(images_without_fusion_slice, original_image_slice)
|
| 341 |
+
assert max_diff < 1e-3
|
| 342 |
+
|
| 343 |
+
release_memory(pipe)
|
| 344 |
+
|
| 345 |
+
def test_sdxl_1_0_lora_fusion_efficiency(self):
|
| 346 |
+
generator = torch.Generator().manual_seed(0)
|
| 347 |
+
lora_model_id = "hf-internal-testing/sdxl-1.0-lora"
|
| 348 |
+
lora_filename = "sd_xl_offset_example-lora_1.0.safetensors"
|
| 349 |
+
|
| 350 |
+
pipe = StableDiffusionXLPipeline.from_pretrained(
|
| 351 |
+
"stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16
|
| 352 |
+
)
|
| 353 |
+
pipe.load_lora_weights(lora_model_id, weight_name=lora_filename, torch_dtype=torch.float16)
|
| 354 |
+
pipe.enable_model_cpu_offload()
|
| 355 |
+
|
| 356 |
+
start_time = time.time()
|
| 357 |
+
for _ in range(3):
|
| 358 |
+
pipe(
|
| 359 |
+
"masterpiece, best quality, mountain", output_type="np", generator=generator, num_inference_steps=2
|
| 360 |
+
).images
|
| 361 |
+
end_time = time.time()
|
| 362 |
+
elapsed_time_non_fusion = end_time - start_time
|
| 363 |
+
|
| 364 |
+
del pipe
|
| 365 |
+
|
| 366 |
+
pipe = StableDiffusionXLPipeline.from_pretrained(
|
| 367 |
+
"stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16
|
| 368 |
+
)
|
| 369 |
+
pipe.load_lora_weights(lora_model_id, weight_name=lora_filename, torch_dtype=torch.float16)
|
| 370 |
+
pipe.fuse_lora()
|
| 371 |
+
|
| 372 |
+
# We need to unload the lora weights since in the previous API `fuse_lora` led to lora weights being
|
| 373 |
+
# silently deleted - otherwise this will CPU OOM
|
| 374 |
+
pipe.unload_lora_weights()
|
| 375 |
+
pipe.enable_model_cpu_offload()
|
| 376 |
+
|
| 377 |
+
generator = torch.Generator().manual_seed(0)
|
| 378 |
+
start_time = time.time()
|
| 379 |
+
for _ in range(3):
|
| 380 |
+
pipe(
|
| 381 |
+
"masterpiece, best quality, mountain", output_type="np", generator=generator, num_inference_steps=2
|
| 382 |
+
).images
|
| 383 |
+
end_time = time.time()
|
| 384 |
+
elapsed_time_fusion = end_time - start_time
|
| 385 |
+
|
| 386 |
+
self.assertTrue(elapsed_time_fusion < elapsed_time_non_fusion)
|
| 387 |
+
|
| 388 |
+
release_memory(pipe)
|
| 389 |
+
|
| 390 |
+
def test_sdxl_1_0_last_ben(self):
|
| 391 |
+
generator = torch.Generator().manual_seed(0)
|
| 392 |
+
|
| 393 |
+
pipe = StableDiffusionXLPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0")
|
| 394 |
+
pipe.enable_model_cpu_offload()
|
| 395 |
+
lora_model_id = "TheLastBen/Papercut_SDXL"
|
| 396 |
+
lora_filename = "papercut.safetensors"
|
| 397 |
+
pipe.load_lora_weights(lora_model_id, weight_name=lora_filename)
|
| 398 |
+
|
| 399 |
+
images = pipe("papercut.safetensors", output_type="np", generator=generator, num_inference_steps=2).images
|
| 400 |
+
|
| 401 |
+
images = images[0, -3:, -3:, -1].flatten()
|
| 402 |
+
expected = np.array([0.5244, 0.4347, 0.4312, 0.4246, 0.4398, 0.4409, 0.4884, 0.4938, 0.4094])
|
| 403 |
+
|
| 404 |
+
max_diff = numpy_cosine_similarity_distance(expected, images)
|
| 405 |
+
assert max_diff < 1e-3
|
| 406 |
+
|
| 407 |
+
pipe.unload_lora_weights()
|
| 408 |
+
release_memory(pipe)
|
| 409 |
+
|
| 410 |
+
def test_sdxl_1_0_fuse_unfuse_all(self):
|
| 411 |
+
pipe = StableDiffusionXLPipeline.from_pretrained(
|
| 412 |
+
"stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16
|
| 413 |
+
)
|
| 414 |
+
text_encoder_1_sd = copy.deepcopy(pipe.text_encoder.state_dict())
|
| 415 |
+
text_encoder_2_sd = copy.deepcopy(pipe.text_encoder_2.state_dict())
|
| 416 |
+
unet_sd = copy.deepcopy(pipe.unet.state_dict())
|
| 417 |
+
|
| 418 |
+
pipe.load_lora_weights(
|
| 419 |
+
"davizca87/sun-flower", weight_name="snfw3rXL-000004.safetensors", torch_dtype=torch.float16
|
| 420 |
+
)
|
| 421 |
+
|
| 422 |
+
fused_te_state_dict = pipe.text_encoder.state_dict()
|
| 423 |
+
fused_te_2_state_dict = pipe.text_encoder_2.state_dict()
|
| 424 |
+
unet_state_dict = pipe.unet.state_dict()
|
| 425 |
+
|
| 426 |
+
peft_ge_070 = version.parse(importlib.metadata.version("peft")) >= version.parse("0.7.0")
|
| 427 |
+
|
| 428 |
+
def remap_key(key, sd):
|
| 429 |
+
# some keys have moved around for PEFT >= 0.7.0, but they should still be loaded correctly
|
| 430 |
+
if (key in sd) or (not peft_ge_070):
|
| 431 |
+
return key
|
| 432 |
+
|
| 433 |
+
# instead of linear.weight, we now have linear.base_layer.weight, etc.
|
| 434 |
+
if key.endswith(".weight"):
|
| 435 |
+
key = key[:-7] + ".base_layer.weight"
|
| 436 |
+
elif key.endswith(".bias"):
|
| 437 |
+
key = key[:-5] + ".base_layer.bias"
|
| 438 |
+
return key
|
| 439 |
+
|
| 440 |
+
for key, value in text_encoder_1_sd.items():
|
| 441 |
+
key = remap_key(key, fused_te_state_dict)
|
| 442 |
+
self.assertTrue(torch.allclose(fused_te_state_dict[key], value))
|
| 443 |
+
|
| 444 |
+
for key, value in text_encoder_2_sd.items():
|
| 445 |
+
key = remap_key(key, fused_te_2_state_dict)
|
| 446 |
+
self.assertTrue(torch.allclose(fused_te_2_state_dict[key], value))
|
| 447 |
+
|
| 448 |
+
for key, value in unet_state_dict.items():
|
| 449 |
+
self.assertTrue(torch.allclose(unet_state_dict[key], value))
|
| 450 |
+
|
| 451 |
+
pipe.fuse_lora()
|
| 452 |
+
pipe.unload_lora_weights()
|
| 453 |
+
|
| 454 |
+
assert not state_dicts_almost_equal(text_encoder_1_sd, pipe.text_encoder.state_dict())
|
| 455 |
+
assert not state_dicts_almost_equal(text_encoder_2_sd, pipe.text_encoder_2.state_dict())
|
| 456 |
+
assert not state_dicts_almost_equal(unet_sd, pipe.unet.state_dict())
|
| 457 |
+
|
| 458 |
+
release_memory(pipe)
|
| 459 |
+
del unet_sd, text_encoder_1_sd, text_encoder_2_sd
|
| 460 |
+
|
| 461 |
+
def test_sdxl_1_0_lora_with_sequential_cpu_offloading(self):
|
| 462 |
+
generator = torch.Generator().manual_seed(0)
|
| 463 |
+
|
| 464 |
+
pipe = StableDiffusionXLPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0")
|
| 465 |
+
pipe.enable_sequential_cpu_offload()
|
| 466 |
+
lora_model_id = "hf-internal-testing/sdxl-1.0-lora"
|
| 467 |
+
lora_filename = "sd_xl_offset_example-lora_1.0.safetensors"
|
| 468 |
+
|
| 469 |
+
pipe.load_lora_weights(lora_model_id, weight_name=lora_filename)
|
| 470 |
+
|
| 471 |
+
images = pipe(
|
| 472 |
+
"masterpiece, best quality, mountain", output_type="np", generator=generator, num_inference_steps=2
|
| 473 |
+
).images
|
| 474 |
+
|
| 475 |
+
images = images[0, -3:, -3:, -1].flatten()
|
| 476 |
+
expected = np.array([0.4468, 0.4087, 0.4134, 0.366, 0.3202, 0.3505, 0.3786, 0.387, 0.3535])
|
| 477 |
+
|
| 478 |
+
max_diff = numpy_cosine_similarity_distance(expected, images)
|
| 479 |
+
assert max_diff < 1e-3
|
| 480 |
+
|
| 481 |
+
pipe.unload_lora_weights()
|
| 482 |
+
release_memory(pipe)
|
| 483 |
+
|
| 484 |
+
def test_controlnet_canny_lora(self):
|
| 485 |
+
controlnet = ControlNetModel.from_pretrained("diffusers/controlnet-canny-sdxl-1.0")
|
| 486 |
+
|
| 487 |
+
pipe = StableDiffusionXLControlNetPipeline.from_pretrained(
|
| 488 |
+
"stabilityai/stable-diffusion-xl-base-1.0", controlnet=controlnet
|
| 489 |
+
)
|
| 490 |
+
pipe.load_lora_weights("nerijs/pixel-art-xl", weight_name="pixel-art-xl.safetensors")
|
| 491 |
+
pipe.enable_sequential_cpu_offload()
|
| 492 |
+
|
| 493 |
+
generator = torch.Generator(device="cpu").manual_seed(0)
|
| 494 |
+
prompt = "corgi"
|
| 495 |
+
image = load_image(
|
| 496 |
+
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png"
|
| 497 |
+
)
|
| 498 |
+
images = pipe(prompt, image=image, generator=generator, output_type="np", num_inference_steps=3).images
|
| 499 |
+
|
| 500 |
+
assert images[0].shape == (768, 512, 3)
|
| 501 |
+
|
| 502 |
+
original_image = images[0, -3:, -3:, -1].flatten()
|
| 503 |
+
expected_image = np.array([0.4574, 0.4487, 0.4435, 0.5163, 0.4396, 0.4411, 0.518, 0.4465, 0.4333])
|
| 504 |
+
|
| 505 |
+
max_diff = numpy_cosine_similarity_distance(expected_image, original_image)
|
| 506 |
+
assert max_diff < 1e-4
|
| 507 |
+
|
| 508 |
+
pipe.unload_lora_weights()
|
| 509 |
+
release_memory(pipe)
|
| 510 |
+
|
| 511 |
+
def test_sdxl_t2i_adapter_canny_lora(self):
|
| 512 |
+
adapter = T2IAdapter.from_pretrained("TencentARC/t2i-adapter-lineart-sdxl-1.0", torch_dtype=torch.float16).to(
|
| 513 |
+
"cpu"
|
| 514 |
+
)
|
| 515 |
+
pipe = StableDiffusionXLAdapterPipeline.from_pretrained(
|
| 516 |
+
"stabilityai/stable-diffusion-xl-base-1.0",
|
| 517 |
+
adapter=adapter,
|
| 518 |
+
torch_dtype=torch.float16,
|
| 519 |
+
variant="fp16",
|
| 520 |
+
)
|
| 521 |
+
pipe.load_lora_weights("CiroN2022/toy-face", weight_name="toy_face_sdxl.safetensors")
|
| 522 |
+
pipe.enable_model_cpu_offload()
|
| 523 |
+
pipe.set_progress_bar_config(disable=None)
|
| 524 |
+
|
| 525 |
+
generator = torch.Generator(device="cpu").manual_seed(0)
|
| 526 |
+
prompt = "toy"
|
| 527 |
+
image = load_image(
|
| 528 |
+
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/toy_canny.png"
|
| 529 |
+
)
|
| 530 |
+
|
| 531 |
+
images = pipe(prompt, image=image, generator=generator, output_type="np", num_inference_steps=3).images
|
| 532 |
+
|
| 533 |
+
assert images[0].shape == (768, 512, 3)
|
| 534 |
+
|
| 535 |
+
image_slice = images[0, -3:, -3:, -1].flatten()
|
| 536 |
+
expected_slice = np.array([0.4284, 0.4337, 0.4319, 0.4255, 0.4329, 0.4280, 0.4338, 0.4420, 0.4226])
|
| 537 |
+
assert numpy_cosine_similarity_distance(image_slice, expected_slice) < 1e-4
|
| 538 |
+
|
| 539 |
+
@nightly
|
| 540 |
+
def test_sequential_fuse_unfuse(self):
|
| 541 |
+
pipe = StableDiffusionXLPipeline.from_pretrained(
|
| 542 |
+
"stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16
|
| 543 |
+
)
|
| 544 |
+
|
| 545 |
+
# 1. round
|
| 546 |
+
pipe.load_lora_weights("Pclanglais/TintinIA", torch_dtype=torch.float16)
|
| 547 |
+
pipe.to(torch_device)
|
| 548 |
+
pipe.fuse_lora()
|
| 549 |
+
|
| 550 |
+
generator = torch.Generator().manual_seed(0)
|
| 551 |
+
images = pipe(
|
| 552 |
+
"masterpiece, best quality, mountain", output_type="np", generator=generator, num_inference_steps=2
|
| 553 |
+
).images
|
| 554 |
+
image_slice = images[0, -3:, -3:, -1].flatten()
|
| 555 |
+
|
| 556 |
+
pipe.unfuse_lora()
|
| 557 |
+
|
| 558 |
+
# 2. round
|
| 559 |
+
pipe.load_lora_weights("ProomptEngineer/pe-balloon-diffusion-style", torch_dtype=torch.float16)
|
| 560 |
+
pipe.fuse_lora()
|
| 561 |
+
pipe.unfuse_lora()
|
| 562 |
+
|
| 563 |
+
# 3. round
|
| 564 |
+
pipe.load_lora_weights("ostris/crayon_style_lora_sdxl", torch_dtype=torch.float16)
|
| 565 |
+
pipe.fuse_lora()
|
| 566 |
+
pipe.unfuse_lora()
|
| 567 |
+
|
| 568 |
+
# 4. back to 1st round
|
| 569 |
+
pipe.load_lora_weights("Pclanglais/TintinIA", torch_dtype=torch.float16)
|
| 570 |
+
pipe.fuse_lora()
|
| 571 |
+
|
| 572 |
+
generator = torch.Generator().manual_seed(0)
|
| 573 |
+
images_2 = pipe(
|
| 574 |
+
"masterpiece, best quality, mountain", output_type="np", generator=generator, num_inference_steps=2
|
| 575 |
+
).images
|
| 576 |
+
image_slice_2 = images_2[0, -3:, -3:, -1].flatten()
|
| 577 |
+
|
| 578 |
+
max_diff = numpy_cosine_similarity_distance(image_slice, image_slice_2)
|
| 579 |
+
assert max_diff < 1e-3
|
| 580 |
+
pipe.unload_lora_weights()
|
| 581 |
+
release_memory(pipe)
|
| 582 |
+
|
| 583 |
+
@nightly
|
| 584 |
+
def test_integration_logits_multi_adapter(self):
|
| 585 |
+
path = "stabilityai/stable-diffusion-xl-base-1.0"
|
| 586 |
+
lora_id = "CiroN2022/toy-face"
|
| 587 |
+
|
| 588 |
+
pipe = StableDiffusionXLPipeline.from_pretrained(path, torch_dtype=torch.float16)
|
| 589 |
+
pipe.load_lora_weights(lora_id, weight_name="toy_face_sdxl.safetensors", adapter_name="toy")
|
| 590 |
+
pipe = pipe.to(torch_device)
|
| 591 |
+
|
| 592 |
+
self.assertTrue(check_if_lora_correctly_set(pipe.unet), "Lora not correctly set in Unet")
|
| 593 |
+
|
| 594 |
+
prompt = "toy_face of a hacker with a hoodie"
|
| 595 |
+
|
| 596 |
+
lora_scale = 0.9
|
| 597 |
+
|
| 598 |
+
images = pipe(
|
| 599 |
+
prompt=prompt,
|
| 600 |
+
num_inference_steps=30,
|
| 601 |
+
generator=torch.manual_seed(0),
|
| 602 |
+
cross_attention_kwargs={"scale": lora_scale},
|
| 603 |
+
output_type="np",
|
| 604 |
+
).images
|
| 605 |
+
expected_slice_scale = np.array([0.538, 0.539, 0.540, 0.540, 0.542, 0.539, 0.538, 0.541, 0.539])
|
| 606 |
+
|
| 607 |
+
predicted_slice = images[0, -3:, -3:, -1].flatten()
|
| 608 |
+
max_diff = numpy_cosine_similarity_distance(expected_slice_scale, predicted_slice)
|
| 609 |
+
assert max_diff < 1e-3
|
| 610 |
+
|
| 611 |
+
pipe.load_lora_weights("nerijs/pixel-art-xl", weight_name="pixel-art-xl.safetensors", adapter_name="pixel")
|
| 612 |
+
pipe.set_adapters("pixel")
|
| 613 |
+
|
| 614 |
+
prompt = "pixel art, a hacker with a hoodie, simple, flat colors"
|
| 615 |
+
images = pipe(
|
| 616 |
+
prompt,
|
| 617 |
+
num_inference_steps=30,
|
| 618 |
+
guidance_scale=7.5,
|
| 619 |
+
cross_attention_kwargs={"scale": lora_scale},
|
| 620 |
+
generator=torch.manual_seed(0),
|
| 621 |
+
output_type="np",
|
| 622 |
+
).images
|
| 623 |
+
|
| 624 |
+
predicted_slice = images[0, -3:, -3:, -1].flatten()
|
| 625 |
+
expected_slice_scale = np.array(
|
| 626 |
+
[0.61973065, 0.62018543, 0.62181497, 0.61933696, 0.6208608, 0.620576, 0.6200281, 0.62258327, 0.6259889]
|
| 627 |
+
)
|
| 628 |
+
max_diff = numpy_cosine_similarity_distance(expected_slice_scale, predicted_slice)
|
| 629 |
+
assert max_diff < 1e-3
|
| 630 |
+
|
| 631 |
+
# multi-adapter inference
|
| 632 |
+
pipe.set_adapters(["pixel", "toy"], adapter_weights=[0.5, 1.0])
|
| 633 |
+
images = pipe(
|
| 634 |
+
prompt,
|
| 635 |
+
num_inference_steps=30,
|
| 636 |
+
guidance_scale=7.5,
|
| 637 |
+
cross_attention_kwargs={"scale": 1.0},
|
| 638 |
+
generator=torch.manual_seed(0),
|
| 639 |
+
output_type="np",
|
| 640 |
+
).images
|
| 641 |
+
predicted_slice = images[0, -3:, -3:, -1].flatten()
|
| 642 |
+
expected_slice_scale = np.array([0.5888, 0.5897, 0.5946, 0.5888, 0.5935, 0.5946, 0.5857, 0.5891, 0.5909])
|
| 643 |
+
max_diff = numpy_cosine_similarity_distance(expected_slice_scale, predicted_slice)
|
| 644 |
+
assert max_diff < 1e-3
|
| 645 |
+
|
| 646 |
+
# Lora disabled
|
| 647 |
+
pipe.disable_lora()
|
| 648 |
+
images = pipe(
|
| 649 |
+
prompt,
|
| 650 |
+
num_inference_steps=30,
|
| 651 |
+
guidance_scale=7.5,
|
| 652 |
+
cross_attention_kwargs={"scale": lora_scale},
|
| 653 |
+
generator=torch.manual_seed(0),
|
| 654 |
+
output_type="np",
|
| 655 |
+
).images
|
| 656 |
+
predicted_slice = images[0, -3:, -3:, -1].flatten()
|
| 657 |
+
expected_slice_scale = np.array([0.5456, 0.5466, 0.5487, 0.5458, 0.5469, 0.5454, 0.5446, 0.5479, 0.5487])
|
| 658 |
+
max_diff = numpy_cosine_similarity_distance(expected_slice_scale, predicted_slice)
|
| 659 |
+
assert max_diff < 1e-3
|
| 660 |
+
|
| 661 |
+
@nightly
|
| 662 |
+
def test_integration_logits_for_dora_lora(self):
|
| 663 |
+
pipeline = StableDiffusionXLPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0")
|
| 664 |
+
|
| 665 |
+
logger = logging.get_logger("diffusers.loaders.lora_pipeline")
|
| 666 |
+
logger.setLevel(30)
|
| 667 |
+
with CaptureLogger(logger) as cap_logger:
|
| 668 |
+
pipeline.load_lora_weights("hf-internal-testing/dora-trained-on-kohya")
|
| 669 |
+
pipeline.enable_model_cpu_offload()
|
| 670 |
+
images = pipeline(
|
| 671 |
+
"photo of ohwx dog",
|
| 672 |
+
num_inference_steps=10,
|
| 673 |
+
generator=torch.manual_seed(0),
|
| 674 |
+
output_type="np",
|
| 675 |
+
).images
|
| 676 |
+
assert "It seems like you are using a DoRA checkpoint" in cap_logger.out
|
| 677 |
+
|
| 678 |
+
predicted_slice = images[0, -3:, -3:, -1].flatten()
|
| 679 |
+
expected_slice_scale = np.array([0.1817, 0.0697, 0.2346, 0.0900, 0.1261, 0.2279, 0.1767, 0.1991, 0.2886])
|
| 680 |
+
max_diff = numpy_cosine_similarity_distance(expected_slice_scale, predicted_slice)
|
| 681 |
+
assert max_diff < 1e-3
|
exp_code/1_benchmark/diffusers-WanS2V/tests/lora/test_lora_layers_wan.py
ADDED
|
@@ -0,0 +1,144 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2025 HuggingFace Inc.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
import sys
|
| 16 |
+
import unittest
|
| 17 |
+
|
| 18 |
+
import torch
|
| 19 |
+
from transformers import AutoTokenizer, T5EncoderModel
|
| 20 |
+
|
| 21 |
+
from diffusers import (
|
| 22 |
+
AutoencoderKLWan,
|
| 23 |
+
FlowMatchEulerDiscreteScheduler,
|
| 24 |
+
WanPipeline,
|
| 25 |
+
WanTransformer3DModel,
|
| 26 |
+
)
|
| 27 |
+
|
| 28 |
+
from ..testing_utils import (
|
| 29 |
+
floats_tensor,
|
| 30 |
+
require_peft_backend,
|
| 31 |
+
skip_mps,
|
| 32 |
+
)
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
sys.path.append(".")
|
| 36 |
+
|
| 37 |
+
from .utils import PeftLoraLoaderMixinTests # noqa: E402
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
@require_peft_backend
|
| 41 |
+
@skip_mps
|
| 42 |
+
class WanLoRATests(unittest.TestCase, PeftLoraLoaderMixinTests):
|
| 43 |
+
pipeline_class = WanPipeline
|
| 44 |
+
scheduler_cls = FlowMatchEulerDiscreteScheduler
|
| 45 |
+
scheduler_classes = [FlowMatchEulerDiscreteScheduler]
|
| 46 |
+
scheduler_kwargs = {}
|
| 47 |
+
|
| 48 |
+
transformer_kwargs = {
|
| 49 |
+
"patch_size": (1, 2, 2),
|
| 50 |
+
"num_attention_heads": 2,
|
| 51 |
+
"attention_head_dim": 12,
|
| 52 |
+
"in_channels": 16,
|
| 53 |
+
"out_channels": 16,
|
| 54 |
+
"text_dim": 32,
|
| 55 |
+
"freq_dim": 256,
|
| 56 |
+
"ffn_dim": 32,
|
| 57 |
+
"num_layers": 2,
|
| 58 |
+
"cross_attn_norm": True,
|
| 59 |
+
"qk_norm": "rms_norm_across_heads",
|
| 60 |
+
"rope_max_seq_len": 32,
|
| 61 |
+
}
|
| 62 |
+
transformer_cls = WanTransformer3DModel
|
| 63 |
+
vae_kwargs = {
|
| 64 |
+
"base_dim": 3,
|
| 65 |
+
"z_dim": 16,
|
| 66 |
+
"dim_mult": [1, 1, 1, 1],
|
| 67 |
+
"num_res_blocks": 1,
|
| 68 |
+
"temperal_downsample": [False, True, True],
|
| 69 |
+
}
|
| 70 |
+
vae_cls = AutoencoderKLWan
|
| 71 |
+
has_two_text_encoders = True
|
| 72 |
+
tokenizer_cls, tokenizer_id = AutoTokenizer, "hf-internal-testing/tiny-random-t5"
|
| 73 |
+
text_encoder_cls, text_encoder_id = T5EncoderModel, "hf-internal-testing/tiny-random-t5"
|
| 74 |
+
|
| 75 |
+
text_encoder_target_modules = ["q", "k", "v", "o"]
|
| 76 |
+
|
| 77 |
+
@property
|
| 78 |
+
def output_shape(self):
|
| 79 |
+
return (1, 9, 32, 32, 3)
|
| 80 |
+
|
| 81 |
+
def get_dummy_inputs(self, with_generator=True):
|
| 82 |
+
batch_size = 1
|
| 83 |
+
sequence_length = 16
|
| 84 |
+
num_channels = 4
|
| 85 |
+
num_frames = 9
|
| 86 |
+
num_latent_frames = 3 # (num_frames - 1) // temporal_compression_ratio + 1
|
| 87 |
+
sizes = (4, 4)
|
| 88 |
+
|
| 89 |
+
generator = torch.manual_seed(0)
|
| 90 |
+
noise = floats_tensor((batch_size, num_latent_frames, num_channels) + sizes)
|
| 91 |
+
input_ids = torch.randint(1, sequence_length, size=(batch_size, sequence_length), generator=generator)
|
| 92 |
+
|
| 93 |
+
pipeline_inputs = {
|
| 94 |
+
"prompt": "",
|
| 95 |
+
"num_frames": num_frames,
|
| 96 |
+
"num_inference_steps": 1,
|
| 97 |
+
"guidance_scale": 6.0,
|
| 98 |
+
"height": 32,
|
| 99 |
+
"width": 32,
|
| 100 |
+
"max_sequence_length": sequence_length,
|
| 101 |
+
"output_type": "np",
|
| 102 |
+
}
|
| 103 |
+
if with_generator:
|
| 104 |
+
pipeline_inputs.update({"generator": generator})
|
| 105 |
+
|
| 106 |
+
return noise, input_ids, pipeline_inputs
|
| 107 |
+
|
| 108 |
+
def test_simple_inference_with_text_lora_denoiser_fused_multi(self):
|
| 109 |
+
super().test_simple_inference_with_text_lora_denoiser_fused_multi(expected_atol=9e-3)
|
| 110 |
+
|
| 111 |
+
def test_simple_inference_with_text_denoiser_lora_unfused(self):
|
| 112 |
+
super().test_simple_inference_with_text_denoiser_lora_unfused(expected_atol=9e-3)
|
| 113 |
+
|
| 114 |
+
@unittest.skip("Not supported in Wan.")
|
| 115 |
+
def test_simple_inference_with_text_denoiser_block_scale(self):
|
| 116 |
+
pass
|
| 117 |
+
|
| 118 |
+
@unittest.skip("Not supported in Wan.")
|
| 119 |
+
def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self):
|
| 120 |
+
pass
|
| 121 |
+
|
| 122 |
+
@unittest.skip("Not supported in Wan.")
|
| 123 |
+
def test_modify_padding_mode(self):
|
| 124 |
+
pass
|
| 125 |
+
|
| 126 |
+
@unittest.skip("Text encoder LoRA is not supported in Wan.")
|
| 127 |
+
def test_simple_inference_with_partial_text_lora(self):
|
| 128 |
+
pass
|
| 129 |
+
|
| 130 |
+
@unittest.skip("Text encoder LoRA is not supported in Wan.")
|
| 131 |
+
def test_simple_inference_with_text_lora(self):
|
| 132 |
+
pass
|
| 133 |
+
|
| 134 |
+
@unittest.skip("Text encoder LoRA is not supported in Wan.")
|
| 135 |
+
def test_simple_inference_with_text_lora_and_scale(self):
|
| 136 |
+
pass
|
| 137 |
+
|
| 138 |
+
@unittest.skip("Text encoder LoRA is not supported in Wan.")
|
| 139 |
+
def test_simple_inference_with_text_lora_fused(self):
|
| 140 |
+
pass
|
| 141 |
+
|
| 142 |
+
@unittest.skip("Text encoder LoRA is not supported in Wan.")
|
| 143 |
+
def test_simple_inference_with_text_lora_save_load(self):
|
| 144 |
+
pass
|
exp_code/1_benchmark/diffusers-WanS2V/tests/lora/test_lora_layers_wanvace.py
ADDED
|
@@ -0,0 +1,217 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2025 HuggingFace Inc.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
import os
|
| 16 |
+
import sys
|
| 17 |
+
import tempfile
|
| 18 |
+
import unittest
|
| 19 |
+
|
| 20 |
+
import numpy as np
|
| 21 |
+
import safetensors.torch
|
| 22 |
+
import torch
|
| 23 |
+
from PIL import Image
|
| 24 |
+
from transformers import AutoTokenizer, T5EncoderModel
|
| 25 |
+
|
| 26 |
+
from diffusers import AutoencoderKLWan, FlowMatchEulerDiscreteScheduler, WanVACEPipeline, WanVACETransformer3DModel
|
| 27 |
+
from diffusers.utils.import_utils import is_peft_available
|
| 28 |
+
|
| 29 |
+
from ..testing_utils import (
|
| 30 |
+
floats_tensor,
|
| 31 |
+
is_flaky,
|
| 32 |
+
require_peft_backend,
|
| 33 |
+
require_peft_version_greater,
|
| 34 |
+
skip_mps,
|
| 35 |
+
torch_device,
|
| 36 |
+
)
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
if is_peft_available():
|
| 40 |
+
from peft.utils import get_peft_model_state_dict
|
| 41 |
+
|
| 42 |
+
sys.path.append(".")
|
| 43 |
+
|
| 44 |
+
from .utils import PeftLoraLoaderMixinTests # noqa: E402
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
@require_peft_backend
|
| 48 |
+
@skip_mps
|
| 49 |
+
@is_flaky(max_attempts=10, description="very flaky class")
|
| 50 |
+
class WanVACELoRATests(unittest.TestCase, PeftLoraLoaderMixinTests):
|
| 51 |
+
pipeline_class = WanVACEPipeline
|
| 52 |
+
scheduler_cls = FlowMatchEulerDiscreteScheduler
|
| 53 |
+
scheduler_classes = [FlowMatchEulerDiscreteScheduler]
|
| 54 |
+
scheduler_kwargs = {}
|
| 55 |
+
|
| 56 |
+
transformer_kwargs = {
|
| 57 |
+
"patch_size": (1, 2, 2),
|
| 58 |
+
"num_attention_heads": 2,
|
| 59 |
+
"attention_head_dim": 8,
|
| 60 |
+
"in_channels": 4,
|
| 61 |
+
"out_channels": 4,
|
| 62 |
+
"text_dim": 32,
|
| 63 |
+
"freq_dim": 16,
|
| 64 |
+
"ffn_dim": 16,
|
| 65 |
+
"num_layers": 2,
|
| 66 |
+
"cross_attn_norm": True,
|
| 67 |
+
"qk_norm": "rms_norm_across_heads",
|
| 68 |
+
"rope_max_seq_len": 16,
|
| 69 |
+
"vace_layers": [0],
|
| 70 |
+
"vace_in_channels": 72,
|
| 71 |
+
}
|
| 72 |
+
transformer_cls = WanVACETransformer3DModel
|
| 73 |
+
vae_kwargs = {
|
| 74 |
+
"base_dim": 3,
|
| 75 |
+
"z_dim": 4,
|
| 76 |
+
"dim_mult": [1, 1, 1, 1],
|
| 77 |
+
"latents_mean": torch.randn(4).numpy().tolist(),
|
| 78 |
+
"latents_std": torch.randn(4).numpy().tolist(),
|
| 79 |
+
"num_res_blocks": 1,
|
| 80 |
+
"temperal_downsample": [False, True, True],
|
| 81 |
+
}
|
| 82 |
+
vae_cls = AutoencoderKLWan
|
| 83 |
+
has_two_text_encoders = True
|
| 84 |
+
tokenizer_cls, tokenizer_id = AutoTokenizer, "hf-internal-testing/tiny-random-t5"
|
| 85 |
+
text_encoder_cls, text_encoder_id = T5EncoderModel, "hf-internal-testing/tiny-random-t5"
|
| 86 |
+
|
| 87 |
+
text_encoder_target_modules = ["q", "k", "v", "o"]
|
| 88 |
+
|
| 89 |
+
@property
|
| 90 |
+
def output_shape(self):
|
| 91 |
+
return (1, 9, 16, 16, 3)
|
| 92 |
+
|
| 93 |
+
def get_dummy_inputs(self, with_generator=True):
|
| 94 |
+
batch_size = 1
|
| 95 |
+
sequence_length = 16
|
| 96 |
+
num_channels = 4
|
| 97 |
+
num_frames = 9
|
| 98 |
+
num_latent_frames = 3 # (num_frames - 1) // temporal_compression_ratio + 1
|
| 99 |
+
sizes = (4, 4)
|
| 100 |
+
height, width = 16, 16
|
| 101 |
+
|
| 102 |
+
generator = torch.manual_seed(0)
|
| 103 |
+
noise = floats_tensor((batch_size, num_latent_frames, num_channels) + sizes)
|
| 104 |
+
input_ids = torch.randint(1, sequence_length, size=(batch_size, sequence_length), generator=generator)
|
| 105 |
+
video = [Image.new("RGB", (height, width))] * num_frames
|
| 106 |
+
mask = [Image.new("L", (height, width), 0)] * num_frames
|
| 107 |
+
|
| 108 |
+
pipeline_inputs = {
|
| 109 |
+
"video": video,
|
| 110 |
+
"mask": mask,
|
| 111 |
+
"prompt": "",
|
| 112 |
+
"num_frames": num_frames,
|
| 113 |
+
"num_inference_steps": 1,
|
| 114 |
+
"guidance_scale": 6.0,
|
| 115 |
+
"height": height,
|
| 116 |
+
"width": height,
|
| 117 |
+
"max_sequence_length": sequence_length,
|
| 118 |
+
"output_type": "np",
|
| 119 |
+
}
|
| 120 |
+
if with_generator:
|
| 121 |
+
pipeline_inputs.update({"generator": generator})
|
| 122 |
+
|
| 123 |
+
return noise, input_ids, pipeline_inputs
|
| 124 |
+
|
| 125 |
+
def test_simple_inference_with_text_lora_denoiser_fused_multi(self):
|
| 126 |
+
super().test_simple_inference_with_text_lora_denoiser_fused_multi(expected_atol=9e-3)
|
| 127 |
+
|
| 128 |
+
def test_simple_inference_with_text_denoiser_lora_unfused(self):
|
| 129 |
+
super().test_simple_inference_with_text_denoiser_lora_unfused(expected_atol=9e-3)
|
| 130 |
+
|
| 131 |
+
@unittest.skip("Not supported in Wan VACE.")
|
| 132 |
+
def test_simple_inference_with_text_denoiser_block_scale(self):
|
| 133 |
+
pass
|
| 134 |
+
|
| 135 |
+
@unittest.skip("Not supported in Wan VACE.")
|
| 136 |
+
def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self):
|
| 137 |
+
pass
|
| 138 |
+
|
| 139 |
+
@unittest.skip("Not supported in Wan VACE.")
|
| 140 |
+
def test_modify_padding_mode(self):
|
| 141 |
+
pass
|
| 142 |
+
|
| 143 |
+
@unittest.skip("Text encoder LoRA is not supported in Wan VACE.")
|
| 144 |
+
def test_simple_inference_with_partial_text_lora(self):
|
| 145 |
+
pass
|
| 146 |
+
|
| 147 |
+
@unittest.skip("Text encoder LoRA is not supported in Wan VACE.")
|
| 148 |
+
def test_simple_inference_with_text_lora(self):
|
| 149 |
+
pass
|
| 150 |
+
|
| 151 |
+
@unittest.skip("Text encoder LoRA is not supported in Wan VACE.")
|
| 152 |
+
def test_simple_inference_with_text_lora_and_scale(self):
|
| 153 |
+
pass
|
| 154 |
+
|
| 155 |
+
@unittest.skip("Text encoder LoRA is not supported in Wan VACE.")
|
| 156 |
+
def test_simple_inference_with_text_lora_fused(self):
|
| 157 |
+
pass
|
| 158 |
+
|
| 159 |
+
@unittest.skip("Text encoder LoRA is not supported in Wan VACE.")
|
| 160 |
+
def test_simple_inference_with_text_lora_save_load(self):
|
| 161 |
+
pass
|
| 162 |
+
|
| 163 |
+
def test_layerwise_casting_inference_denoiser(self):
|
| 164 |
+
super().test_layerwise_casting_inference_denoiser()
|
| 165 |
+
|
| 166 |
+
@require_peft_version_greater("0.13.2")
|
| 167 |
+
def test_lora_exclude_modules_wanvace(self):
|
| 168 |
+
scheduler_cls = self.scheduler_classes[0]
|
| 169 |
+
exclude_module_name = "vace_blocks.0.proj_out"
|
| 170 |
+
components, text_lora_config, denoiser_lora_config = self.get_dummy_components(scheduler_cls)
|
| 171 |
+
pipe = self.pipeline_class(**components).to(torch_device)
|
| 172 |
+
_, _, inputs = self.get_dummy_inputs(with_generator=False)
|
| 173 |
+
|
| 174 |
+
output_no_lora = pipe(**inputs, generator=torch.manual_seed(0))[0]
|
| 175 |
+
self.assertTrue(output_no_lora.shape == self.output_shape)
|
| 176 |
+
|
| 177 |
+
# only supported for `denoiser` now
|
| 178 |
+
denoiser_lora_config.target_modules = ["proj_out"]
|
| 179 |
+
denoiser_lora_config.exclude_modules = [exclude_module_name]
|
| 180 |
+
pipe, _ = self.add_adapters_to_pipeline(
|
| 181 |
+
pipe, text_lora_config=text_lora_config, denoiser_lora_config=denoiser_lora_config
|
| 182 |
+
)
|
| 183 |
+
# The state dict shouldn't contain the modules to be excluded from LoRA.
|
| 184 |
+
state_dict_from_model = get_peft_model_state_dict(pipe.transformer, adapter_name="default")
|
| 185 |
+
self.assertTrue(not any(exclude_module_name in k for k in state_dict_from_model))
|
| 186 |
+
self.assertTrue(any("proj_out" in k for k in state_dict_from_model))
|
| 187 |
+
output_lora_exclude_modules = pipe(**inputs, generator=torch.manual_seed(0))[0]
|
| 188 |
+
|
| 189 |
+
with tempfile.TemporaryDirectory() as tmpdir:
|
| 190 |
+
modules_to_save = self._get_modules_to_save(pipe, has_denoiser=True)
|
| 191 |
+
lora_state_dicts = self._get_lora_state_dicts(modules_to_save)
|
| 192 |
+
self.pipeline_class.save_lora_weights(save_directory=tmpdir, **lora_state_dicts)
|
| 193 |
+
pipe.unload_lora_weights()
|
| 194 |
+
|
| 195 |
+
# Check in the loaded state dict.
|
| 196 |
+
loaded_state_dict = safetensors.torch.load_file(os.path.join(tmpdir, "pytorch_lora_weights.safetensors"))
|
| 197 |
+
self.assertTrue(not any(exclude_module_name in k for k in loaded_state_dict))
|
| 198 |
+
self.assertTrue(any("proj_out" in k for k in loaded_state_dict))
|
| 199 |
+
|
| 200 |
+
# Check in the state dict obtained after loading LoRA.
|
| 201 |
+
pipe.load_lora_weights(tmpdir)
|
| 202 |
+
state_dict_from_model = get_peft_model_state_dict(pipe.transformer, adapter_name="default_0")
|
| 203 |
+
self.assertTrue(not any(exclude_module_name in k for k in state_dict_from_model))
|
| 204 |
+
self.assertTrue(any("proj_out" in k for k in state_dict_from_model))
|
| 205 |
+
|
| 206 |
+
output_lora_pretrained = pipe(**inputs, generator=torch.manual_seed(0))[0]
|
| 207 |
+
self.assertTrue(
|
| 208 |
+
not np.allclose(output_no_lora, output_lora_exclude_modules, atol=1e-3, rtol=1e-3),
|
| 209 |
+
"LoRA should change outputs.",
|
| 210 |
+
)
|
| 211 |
+
self.assertTrue(
|
| 212 |
+
np.allclose(output_lora_exclude_modules, output_lora_pretrained, atol=1e-3, rtol=1e-3),
|
| 213 |
+
"Lora outputs should match.",
|
| 214 |
+
)
|
| 215 |
+
|
| 216 |
+
def test_simple_inference_with_text_denoiser_lora_and_scale(self):
|
| 217 |
+
super().test_simple_inference_with_text_denoiser_lora_and_scale()
|
exp_code/1_benchmark/diffusers-WanS2V/tests/lora/utils.py
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
exp_code/1_benchmark/diffusers-WanS2V/tests/models/__init__.py
ADDED
|
File without changes
|
exp_code/1_benchmark/diffusers-WanS2V/tests/models/autoencoders/__init__.py
ADDED
|
File without changes
|
exp_code/1_benchmark/diffusers-WanS2V/tests/models/autoencoders/test_models_asymmetric_autoencoder_kl.py
ADDED
|
@@ -0,0 +1,276 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2025 HuggingFace Inc.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
|
| 16 |
+
import gc
|
| 17 |
+
import unittest
|
| 18 |
+
|
| 19 |
+
import torch
|
| 20 |
+
from parameterized import parameterized
|
| 21 |
+
|
| 22 |
+
from diffusers import AsymmetricAutoencoderKL
|
| 23 |
+
from diffusers.utils.import_utils import is_xformers_available
|
| 24 |
+
|
| 25 |
+
from ...testing_utils import (
|
| 26 |
+
Expectations,
|
| 27 |
+
backend_empty_cache,
|
| 28 |
+
enable_full_determinism,
|
| 29 |
+
floats_tensor,
|
| 30 |
+
load_hf_numpy,
|
| 31 |
+
require_torch_accelerator,
|
| 32 |
+
require_torch_gpu,
|
| 33 |
+
skip_mps,
|
| 34 |
+
slow,
|
| 35 |
+
torch_all_close,
|
| 36 |
+
torch_device,
|
| 37 |
+
)
|
| 38 |
+
from ..test_modeling_common import ModelTesterMixin, UNetTesterMixin
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
enable_full_determinism()
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
class AutoencoderKLTests(ModelTesterMixin, UNetTesterMixin, unittest.TestCase):
|
| 45 |
+
model_class = AsymmetricAutoencoderKL
|
| 46 |
+
main_input_name = "sample"
|
| 47 |
+
base_precision = 1e-2
|
| 48 |
+
|
| 49 |
+
def get_asym_autoencoder_kl_config(self, block_out_channels=None, norm_num_groups=None):
|
| 50 |
+
block_out_channels = block_out_channels or [2, 4]
|
| 51 |
+
norm_num_groups = norm_num_groups or 2
|
| 52 |
+
init_dict = {
|
| 53 |
+
"in_channels": 3,
|
| 54 |
+
"out_channels": 3,
|
| 55 |
+
"down_block_types": ["DownEncoderBlock2D"] * len(block_out_channels),
|
| 56 |
+
"down_block_out_channels": block_out_channels,
|
| 57 |
+
"layers_per_down_block": 1,
|
| 58 |
+
"up_block_types": ["UpDecoderBlock2D"] * len(block_out_channels),
|
| 59 |
+
"up_block_out_channels": block_out_channels,
|
| 60 |
+
"layers_per_up_block": 1,
|
| 61 |
+
"act_fn": "silu",
|
| 62 |
+
"latent_channels": 4,
|
| 63 |
+
"norm_num_groups": norm_num_groups,
|
| 64 |
+
"sample_size": 32,
|
| 65 |
+
"scaling_factor": 0.18215,
|
| 66 |
+
}
|
| 67 |
+
return init_dict
|
| 68 |
+
|
| 69 |
+
@property
|
| 70 |
+
def dummy_input(self):
|
| 71 |
+
batch_size = 4
|
| 72 |
+
num_channels = 3
|
| 73 |
+
sizes = (32, 32)
|
| 74 |
+
|
| 75 |
+
image = floats_tensor((batch_size, num_channels) + sizes).to(torch_device)
|
| 76 |
+
mask = torch.ones((batch_size, 1) + sizes).to(torch_device)
|
| 77 |
+
|
| 78 |
+
return {"sample": image, "mask": mask}
|
| 79 |
+
|
| 80 |
+
@property
|
| 81 |
+
def input_shape(self):
|
| 82 |
+
return (3, 32, 32)
|
| 83 |
+
|
| 84 |
+
@property
|
| 85 |
+
def output_shape(self):
|
| 86 |
+
return (3, 32, 32)
|
| 87 |
+
|
| 88 |
+
def prepare_init_args_and_inputs_for_common(self):
|
| 89 |
+
init_dict = self.get_asym_autoencoder_kl_config()
|
| 90 |
+
inputs_dict = self.dummy_input
|
| 91 |
+
return init_dict, inputs_dict
|
| 92 |
+
|
| 93 |
+
@unittest.skip("Unsupported test.")
|
| 94 |
+
def test_forward_with_norm_groups(self):
|
| 95 |
+
pass
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
@slow
|
| 99 |
+
class AsymmetricAutoencoderKLIntegrationTests(unittest.TestCase):
|
| 100 |
+
def get_file_format(self, seed, shape):
|
| 101 |
+
return f"gaussian_noise_s={seed}_shape={'_'.join([str(s) for s in shape])}.npy"
|
| 102 |
+
|
| 103 |
+
def tearDown(self):
|
| 104 |
+
# clean up the VRAM after each test
|
| 105 |
+
super().tearDown()
|
| 106 |
+
gc.collect()
|
| 107 |
+
backend_empty_cache(torch_device)
|
| 108 |
+
|
| 109 |
+
def get_sd_image(self, seed=0, shape=(4, 3, 512, 512), fp16=False):
|
| 110 |
+
dtype = torch.float16 if fp16 else torch.float32
|
| 111 |
+
image = torch.from_numpy(load_hf_numpy(self.get_file_format(seed, shape))).to(torch_device).to(dtype)
|
| 112 |
+
return image
|
| 113 |
+
|
| 114 |
+
def get_sd_vae_model(self, model_id="cross-attention/asymmetric-autoencoder-kl-x-1-5", fp16=False):
|
| 115 |
+
revision = "main"
|
| 116 |
+
torch_dtype = torch.float32
|
| 117 |
+
|
| 118 |
+
model = AsymmetricAutoencoderKL.from_pretrained(
|
| 119 |
+
model_id,
|
| 120 |
+
torch_dtype=torch_dtype,
|
| 121 |
+
revision=revision,
|
| 122 |
+
)
|
| 123 |
+
model.to(torch_device).eval()
|
| 124 |
+
|
| 125 |
+
return model
|
| 126 |
+
|
| 127 |
+
def get_generator(self, seed=0):
|
| 128 |
+
generator_device = "cpu" if not torch_device.startswith(torch_device) else torch_device
|
| 129 |
+
if torch_device != "mps":
|
| 130 |
+
return torch.Generator(device=generator_device).manual_seed(seed)
|
| 131 |
+
return torch.manual_seed(seed)
|
| 132 |
+
|
| 133 |
+
@parameterized.expand(
|
| 134 |
+
[
|
| 135 |
+
# fmt: off
|
| 136 |
+
[
|
| 137 |
+
33,
|
| 138 |
+
Expectations(
|
| 139 |
+
{
|
| 140 |
+
("xpu", 3): torch.tensor([-0.0343, 0.2873, 0.1680, -0.0140, -0.3459, 0.3522, -0.1336, 0.1075]),
|
| 141 |
+
("cuda", 7): torch.tensor([-0.0336, 0.3011, 0.1764, 0.0087, -0.3401, 0.3645, -0.1247, 0.1205]),
|
| 142 |
+
("mps", None): torch.tensor(
|
| 143 |
+
[-0.1603, 0.9878, -0.0495, -0.0790, -0.2709, 0.8375, -0.2060, -0.0824]
|
| 144 |
+
),
|
| 145 |
+
}
|
| 146 |
+
),
|
| 147 |
+
],
|
| 148 |
+
[
|
| 149 |
+
47,
|
| 150 |
+
Expectations(
|
| 151 |
+
{
|
| 152 |
+
("xpu", 3): torch.tensor([0.4400, 0.0543, 0.2873, 0.2946, 0.0553, 0.0839, -0.1585, 0.2529]),
|
| 153 |
+
("cuda", 7): torch.tensor([0.4400, 0.0543, 0.2873, 0.2946, 0.0553, 0.0839, -0.1585, 0.2529]),
|
| 154 |
+
("mps", None): torch.tensor(
|
| 155 |
+
[-0.2376, 0.1168, 0.1332, -0.4840, -0.2508, -0.0791, -0.0493, -0.4089]
|
| 156 |
+
),
|
| 157 |
+
}
|
| 158 |
+
),
|
| 159 |
+
],
|
| 160 |
+
# fmt: on
|
| 161 |
+
]
|
| 162 |
+
)
|
| 163 |
+
def test_stable_diffusion(self, seed, expected_slices):
|
| 164 |
+
model = self.get_sd_vae_model()
|
| 165 |
+
image = self.get_sd_image(seed)
|
| 166 |
+
generator = self.get_generator(seed)
|
| 167 |
+
|
| 168 |
+
with torch.no_grad():
|
| 169 |
+
sample = model(image, generator=generator, sample_posterior=True).sample
|
| 170 |
+
|
| 171 |
+
assert sample.shape == image.shape
|
| 172 |
+
|
| 173 |
+
output_slice = sample[-1, -2:, -2:, :2].flatten().float().cpu()
|
| 174 |
+
|
| 175 |
+
expected_slice = expected_slices.get_expectation()
|
| 176 |
+
assert torch_all_close(output_slice, expected_slice, atol=5e-3)
|
| 177 |
+
|
| 178 |
+
@parameterized.expand(
|
| 179 |
+
[
|
| 180 |
+
# fmt: off
|
| 181 |
+
[
|
| 182 |
+
33,
|
| 183 |
+
[-0.0340, 0.2870, 0.1698, -0.0105, -0.3448, 0.3529, -0.1321, 0.1097],
|
| 184 |
+
[-0.0344, 0.2912, 0.1687, -0.0137, -0.3462, 0.3552, -0.1337, 0.1078],
|
| 185 |
+
],
|
| 186 |
+
[
|
| 187 |
+
47,
|
| 188 |
+
[0.4397, 0.0550, 0.2873, 0.2946, 0.0567, 0.0855, -0.1580, 0.2531],
|
| 189 |
+
[0.4397, 0.0550, 0.2873, 0.2946, 0.0567, 0.0855, -0.1580, 0.2531],
|
| 190 |
+
],
|
| 191 |
+
# fmt: on
|
| 192 |
+
]
|
| 193 |
+
)
|
| 194 |
+
def test_stable_diffusion_mode(self, seed, expected_slice, expected_slice_mps):
|
| 195 |
+
model = self.get_sd_vae_model()
|
| 196 |
+
image = self.get_sd_image(seed)
|
| 197 |
+
|
| 198 |
+
with torch.no_grad():
|
| 199 |
+
sample = model(image).sample
|
| 200 |
+
|
| 201 |
+
assert sample.shape == image.shape
|
| 202 |
+
|
| 203 |
+
output_slice = sample[-1, -2:, -2:, :2].flatten().float().cpu()
|
| 204 |
+
expected_output_slice = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice)
|
| 205 |
+
|
| 206 |
+
assert torch_all_close(output_slice, expected_output_slice, atol=3e-3)
|
| 207 |
+
|
| 208 |
+
@parameterized.expand(
|
| 209 |
+
[
|
| 210 |
+
# fmt: off
|
| 211 |
+
[13, [-0.0521, -0.2939, 0.1540, -0.1855, -0.5936, -0.3138, -0.4579, -0.2275]],
|
| 212 |
+
[37, [-0.1820, -0.4345, -0.0455, -0.2923, -0.8035, -0.5089, -0.4795, -0.3106]],
|
| 213 |
+
# fmt: on
|
| 214 |
+
]
|
| 215 |
+
)
|
| 216 |
+
@require_torch_accelerator
|
| 217 |
+
@skip_mps
|
| 218 |
+
def test_stable_diffusion_decode(self, seed, expected_slice):
|
| 219 |
+
model = self.get_sd_vae_model()
|
| 220 |
+
encoding = self.get_sd_image(seed, shape=(3, 4, 64, 64))
|
| 221 |
+
|
| 222 |
+
with torch.no_grad():
|
| 223 |
+
sample = model.decode(encoding).sample
|
| 224 |
+
|
| 225 |
+
assert list(sample.shape) == [3, 3, 512, 512]
|
| 226 |
+
|
| 227 |
+
output_slice = sample[-1, -2:, :2, -2:].flatten().cpu()
|
| 228 |
+
expected_output_slice = torch.tensor(expected_slice)
|
| 229 |
+
|
| 230 |
+
assert torch_all_close(output_slice, expected_output_slice, atol=2e-3)
|
| 231 |
+
|
| 232 |
+
@parameterized.expand([(13,), (16,), (37,)])
|
| 233 |
+
@require_torch_gpu
|
| 234 |
+
@unittest.skipIf(
|
| 235 |
+
not is_xformers_available(),
|
| 236 |
+
reason="xformers is not required when using PyTorch 2.0.",
|
| 237 |
+
)
|
| 238 |
+
def test_stable_diffusion_decode_xformers_vs_2_0(self, seed):
|
| 239 |
+
model = self.get_sd_vae_model()
|
| 240 |
+
encoding = self.get_sd_image(seed, shape=(3, 4, 64, 64))
|
| 241 |
+
|
| 242 |
+
with torch.no_grad():
|
| 243 |
+
sample = model.decode(encoding).sample
|
| 244 |
+
|
| 245 |
+
model.enable_xformers_memory_efficient_attention()
|
| 246 |
+
with torch.no_grad():
|
| 247 |
+
sample_2 = model.decode(encoding).sample
|
| 248 |
+
|
| 249 |
+
assert list(sample.shape) == [3, 3, 512, 512]
|
| 250 |
+
|
| 251 |
+
assert torch_all_close(sample, sample_2, atol=5e-2)
|
| 252 |
+
|
| 253 |
+
@parameterized.expand(
|
| 254 |
+
[
|
| 255 |
+
# fmt: off
|
| 256 |
+
[33, [-0.3001, 0.0918, -2.6984, -3.9720, -3.2099, -5.0353, 1.7338, -0.2065, 3.4267]],
|
| 257 |
+
[47, [-1.5030, -4.3871, -6.0355, -9.1157, -1.6661, -2.7853, 2.1607, -5.0823, 2.5633]],
|
| 258 |
+
# fmt: on
|
| 259 |
+
]
|
| 260 |
+
)
|
| 261 |
+
def test_stable_diffusion_encode_sample(self, seed, expected_slice):
|
| 262 |
+
model = self.get_sd_vae_model()
|
| 263 |
+
image = self.get_sd_image(seed)
|
| 264 |
+
generator = self.get_generator(seed)
|
| 265 |
+
|
| 266 |
+
with torch.no_grad():
|
| 267 |
+
dist = model.encode(image).latent_dist
|
| 268 |
+
sample = dist.sample(generator=generator)
|
| 269 |
+
|
| 270 |
+
assert list(sample.shape) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
|
| 271 |
+
|
| 272 |
+
output_slice = sample[0, -1, -3:, -3:].flatten().cpu()
|
| 273 |
+
expected_output_slice = torch.tensor(expected_slice)
|
| 274 |
+
|
| 275 |
+
tolerance = 3e-3 if torch_device != "mps" else 1e-2
|
| 276 |
+
assert torch_all_close(output_slice, expected_output_slice, atol=tolerance)
|
exp_code/1_benchmark/diffusers-WanS2V/tests/models/autoencoders/test_models_autoencoder_cosmos.py
ADDED
|
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2025 HuggingFace Inc.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
import unittest
|
| 16 |
+
|
| 17 |
+
from diffusers import AutoencoderKLCosmos
|
| 18 |
+
|
| 19 |
+
from ...testing_utils import enable_full_determinism, floats_tensor, torch_device
|
| 20 |
+
from ..test_modeling_common import ModelTesterMixin, UNetTesterMixin
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
enable_full_determinism()
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
class AutoencoderKLCosmosTests(ModelTesterMixin, UNetTesterMixin, unittest.TestCase):
|
| 27 |
+
model_class = AutoencoderKLCosmos
|
| 28 |
+
main_input_name = "sample"
|
| 29 |
+
base_precision = 1e-2
|
| 30 |
+
|
| 31 |
+
def get_autoencoder_kl_cosmos_config(self):
|
| 32 |
+
return {
|
| 33 |
+
"in_channels": 3,
|
| 34 |
+
"out_channels": 3,
|
| 35 |
+
"latent_channels": 4,
|
| 36 |
+
"encoder_block_out_channels": (8, 8, 8, 8),
|
| 37 |
+
"decode_block_out_channels": (8, 8, 8, 8),
|
| 38 |
+
"attention_resolutions": (8,),
|
| 39 |
+
"resolution": 64,
|
| 40 |
+
"num_layers": 2,
|
| 41 |
+
"patch_size": 4,
|
| 42 |
+
"patch_type": "haar",
|
| 43 |
+
"scaling_factor": 1.0,
|
| 44 |
+
"spatial_compression_ratio": 4,
|
| 45 |
+
"temporal_compression_ratio": 4,
|
| 46 |
+
}
|
| 47 |
+
|
| 48 |
+
@property
|
| 49 |
+
def dummy_input(self):
|
| 50 |
+
batch_size = 2
|
| 51 |
+
num_frames = 9
|
| 52 |
+
num_channels = 3
|
| 53 |
+
height = 32
|
| 54 |
+
width = 32
|
| 55 |
+
|
| 56 |
+
image = floats_tensor((batch_size, num_channels, num_frames, height, width)).to(torch_device)
|
| 57 |
+
|
| 58 |
+
return {"sample": image}
|
| 59 |
+
|
| 60 |
+
@property
|
| 61 |
+
def input_shape(self):
|
| 62 |
+
return (3, 9, 32, 32)
|
| 63 |
+
|
| 64 |
+
@property
|
| 65 |
+
def output_shape(self):
|
| 66 |
+
return (3, 9, 32, 32)
|
| 67 |
+
|
| 68 |
+
def prepare_init_args_and_inputs_for_common(self):
|
| 69 |
+
init_dict = self.get_autoencoder_kl_cosmos_config()
|
| 70 |
+
inputs_dict = self.dummy_input
|
| 71 |
+
return init_dict, inputs_dict
|
| 72 |
+
|
| 73 |
+
def test_gradient_checkpointing_is_applied(self):
|
| 74 |
+
expected_set = {
|
| 75 |
+
"CosmosEncoder3d",
|
| 76 |
+
"CosmosDecoder3d",
|
| 77 |
+
}
|
| 78 |
+
super().test_gradient_checkpointing_is_applied(expected_set=expected_set)
|
| 79 |
+
|
| 80 |
+
@unittest.skip("Not sure why this test fails. Investigate later.")
|
| 81 |
+
def test_effective_gradient_checkpointing(self):
|
| 82 |
+
pass
|
| 83 |
+
|
| 84 |
+
@unittest.skip("Unsupported test.")
|
| 85 |
+
def test_forward_with_norm_groups(self):
|
| 86 |
+
pass
|
exp_code/1_benchmark/diffusers-WanS2V/tests/models/autoencoders/test_models_autoencoder_dc.py
ADDED
|
@@ -0,0 +1,87 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2025 HuggingFace Inc.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
|
| 16 |
+
import unittest
|
| 17 |
+
|
| 18 |
+
from diffusers import AutoencoderDC
|
| 19 |
+
|
| 20 |
+
from ...testing_utils import (
|
| 21 |
+
enable_full_determinism,
|
| 22 |
+
floats_tensor,
|
| 23 |
+
torch_device,
|
| 24 |
+
)
|
| 25 |
+
from ..test_modeling_common import ModelTesterMixin, UNetTesterMixin
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
enable_full_determinism()
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
class AutoencoderDCTests(ModelTesterMixin, UNetTesterMixin, unittest.TestCase):
|
| 32 |
+
model_class = AutoencoderDC
|
| 33 |
+
main_input_name = "sample"
|
| 34 |
+
base_precision = 1e-2
|
| 35 |
+
|
| 36 |
+
def get_autoencoder_dc_config(self):
|
| 37 |
+
return {
|
| 38 |
+
"in_channels": 3,
|
| 39 |
+
"latent_channels": 4,
|
| 40 |
+
"attention_head_dim": 2,
|
| 41 |
+
"encoder_block_types": (
|
| 42 |
+
"ResBlock",
|
| 43 |
+
"EfficientViTBlock",
|
| 44 |
+
),
|
| 45 |
+
"decoder_block_types": (
|
| 46 |
+
"ResBlock",
|
| 47 |
+
"EfficientViTBlock",
|
| 48 |
+
),
|
| 49 |
+
"encoder_block_out_channels": (8, 8),
|
| 50 |
+
"decoder_block_out_channels": (8, 8),
|
| 51 |
+
"encoder_qkv_multiscales": ((), (5,)),
|
| 52 |
+
"decoder_qkv_multiscales": ((), (5,)),
|
| 53 |
+
"encoder_layers_per_block": (1, 1),
|
| 54 |
+
"decoder_layers_per_block": [1, 1],
|
| 55 |
+
"downsample_block_type": "conv",
|
| 56 |
+
"upsample_block_type": "interpolate",
|
| 57 |
+
"decoder_norm_types": "rms_norm",
|
| 58 |
+
"decoder_act_fns": "silu",
|
| 59 |
+
"scaling_factor": 0.41407,
|
| 60 |
+
}
|
| 61 |
+
|
| 62 |
+
@property
|
| 63 |
+
def dummy_input(self):
|
| 64 |
+
batch_size = 4
|
| 65 |
+
num_channels = 3
|
| 66 |
+
sizes = (32, 32)
|
| 67 |
+
|
| 68 |
+
image = floats_tensor((batch_size, num_channels) + sizes).to(torch_device)
|
| 69 |
+
|
| 70 |
+
return {"sample": image}
|
| 71 |
+
|
| 72 |
+
@property
|
| 73 |
+
def input_shape(self):
|
| 74 |
+
return (3, 32, 32)
|
| 75 |
+
|
| 76 |
+
@property
|
| 77 |
+
def output_shape(self):
|
| 78 |
+
return (3, 32, 32)
|
| 79 |
+
|
| 80 |
+
def prepare_init_args_and_inputs_for_common(self):
|
| 81 |
+
init_dict = self.get_autoencoder_dc_config()
|
| 82 |
+
inputs_dict = self.dummy_input
|
| 83 |
+
return init_dict, inputs_dict
|
| 84 |
+
|
| 85 |
+
@unittest.skip("AutoencoderDC does not support `norm_num_groups` because it does not use GroupNorm.")
|
| 86 |
+
def test_forward_with_norm_groups(self):
|
| 87 |
+
pass
|
exp_code/1_benchmark/diffusers-WanS2V/tests/models/autoencoders/test_models_autoencoder_hunyuan_video.py
ADDED
|
@@ -0,0 +1,210 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2025 HuggingFace Inc.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
|
| 16 |
+
import unittest
|
| 17 |
+
|
| 18 |
+
import torch
|
| 19 |
+
|
| 20 |
+
from diffusers import AutoencoderKLHunyuanVideo
|
| 21 |
+
from diffusers.models.autoencoders.autoencoder_kl_hunyuan_video import prepare_causal_attention_mask
|
| 22 |
+
|
| 23 |
+
from ...testing_utils import (
|
| 24 |
+
enable_full_determinism,
|
| 25 |
+
floats_tensor,
|
| 26 |
+
torch_device,
|
| 27 |
+
)
|
| 28 |
+
from ..test_modeling_common import ModelTesterMixin, UNetTesterMixin
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
enable_full_determinism()
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
class AutoencoderKLHunyuanVideoTests(ModelTesterMixin, UNetTesterMixin, unittest.TestCase):
|
| 35 |
+
model_class = AutoencoderKLHunyuanVideo
|
| 36 |
+
main_input_name = "sample"
|
| 37 |
+
base_precision = 1e-2
|
| 38 |
+
|
| 39 |
+
def get_autoencoder_kl_hunyuan_video_config(self):
|
| 40 |
+
return {
|
| 41 |
+
"in_channels": 3,
|
| 42 |
+
"out_channels": 3,
|
| 43 |
+
"latent_channels": 4,
|
| 44 |
+
"down_block_types": (
|
| 45 |
+
"HunyuanVideoDownBlock3D",
|
| 46 |
+
"HunyuanVideoDownBlock3D",
|
| 47 |
+
"HunyuanVideoDownBlock3D",
|
| 48 |
+
"HunyuanVideoDownBlock3D",
|
| 49 |
+
),
|
| 50 |
+
"up_block_types": (
|
| 51 |
+
"HunyuanVideoUpBlock3D",
|
| 52 |
+
"HunyuanVideoUpBlock3D",
|
| 53 |
+
"HunyuanVideoUpBlock3D",
|
| 54 |
+
"HunyuanVideoUpBlock3D",
|
| 55 |
+
),
|
| 56 |
+
"block_out_channels": (8, 8, 8, 8),
|
| 57 |
+
"layers_per_block": 1,
|
| 58 |
+
"act_fn": "silu",
|
| 59 |
+
"norm_num_groups": 4,
|
| 60 |
+
"scaling_factor": 0.476986,
|
| 61 |
+
"spatial_compression_ratio": 8,
|
| 62 |
+
"temporal_compression_ratio": 4,
|
| 63 |
+
"mid_block_add_attention": True,
|
| 64 |
+
}
|
| 65 |
+
|
| 66 |
+
@property
|
| 67 |
+
def dummy_input(self):
|
| 68 |
+
batch_size = 2
|
| 69 |
+
num_frames = 9
|
| 70 |
+
num_channels = 3
|
| 71 |
+
sizes = (16, 16)
|
| 72 |
+
|
| 73 |
+
image = floats_tensor((batch_size, num_channels, num_frames) + sizes).to(torch_device)
|
| 74 |
+
|
| 75 |
+
return {"sample": image}
|
| 76 |
+
|
| 77 |
+
@property
|
| 78 |
+
def input_shape(self):
|
| 79 |
+
return (3, 9, 16, 16)
|
| 80 |
+
|
| 81 |
+
@property
|
| 82 |
+
def output_shape(self):
|
| 83 |
+
return (3, 9, 16, 16)
|
| 84 |
+
|
| 85 |
+
def prepare_init_args_and_inputs_for_common(self):
|
| 86 |
+
init_dict = self.get_autoencoder_kl_hunyuan_video_config()
|
| 87 |
+
inputs_dict = self.dummy_input
|
| 88 |
+
return init_dict, inputs_dict
|
| 89 |
+
|
| 90 |
+
def test_enable_disable_tiling(self):
|
| 91 |
+
init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()
|
| 92 |
+
|
| 93 |
+
torch.manual_seed(0)
|
| 94 |
+
model = self.model_class(**init_dict).to(torch_device)
|
| 95 |
+
|
| 96 |
+
inputs_dict.update({"return_dict": False})
|
| 97 |
+
|
| 98 |
+
torch.manual_seed(0)
|
| 99 |
+
output_without_tiling = model(**inputs_dict, generator=torch.manual_seed(0))[0]
|
| 100 |
+
|
| 101 |
+
torch.manual_seed(0)
|
| 102 |
+
model.enable_tiling()
|
| 103 |
+
output_with_tiling = model(**inputs_dict, generator=torch.manual_seed(0))[0]
|
| 104 |
+
|
| 105 |
+
self.assertLess(
|
| 106 |
+
(output_without_tiling.detach().cpu().numpy() - output_with_tiling.detach().cpu().numpy()).max(),
|
| 107 |
+
0.5,
|
| 108 |
+
"VAE tiling should not affect the inference results",
|
| 109 |
+
)
|
| 110 |
+
|
| 111 |
+
torch.manual_seed(0)
|
| 112 |
+
model.disable_tiling()
|
| 113 |
+
output_without_tiling_2 = model(**inputs_dict, generator=torch.manual_seed(0))[0]
|
| 114 |
+
|
| 115 |
+
self.assertEqual(
|
| 116 |
+
output_without_tiling.detach().cpu().numpy().all(),
|
| 117 |
+
output_without_tiling_2.detach().cpu().numpy().all(),
|
| 118 |
+
"Without tiling outputs should match with the outputs when tiling is manually disabled.",
|
| 119 |
+
)
|
| 120 |
+
|
| 121 |
+
def test_enable_disable_slicing(self):
|
| 122 |
+
init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()
|
| 123 |
+
|
| 124 |
+
torch.manual_seed(0)
|
| 125 |
+
model = self.model_class(**init_dict).to(torch_device)
|
| 126 |
+
|
| 127 |
+
inputs_dict.update({"return_dict": False})
|
| 128 |
+
|
| 129 |
+
torch.manual_seed(0)
|
| 130 |
+
output_without_slicing = model(**inputs_dict, generator=torch.manual_seed(0))[0]
|
| 131 |
+
|
| 132 |
+
torch.manual_seed(0)
|
| 133 |
+
model.enable_slicing()
|
| 134 |
+
output_with_slicing = model(**inputs_dict, generator=torch.manual_seed(0))[0]
|
| 135 |
+
|
| 136 |
+
self.assertLess(
|
| 137 |
+
(output_without_slicing.detach().cpu().numpy() - output_with_slicing.detach().cpu().numpy()).max(),
|
| 138 |
+
0.5,
|
| 139 |
+
"VAE slicing should not affect the inference results",
|
| 140 |
+
)
|
| 141 |
+
|
| 142 |
+
torch.manual_seed(0)
|
| 143 |
+
model.disable_slicing()
|
| 144 |
+
output_without_slicing_2 = model(**inputs_dict, generator=torch.manual_seed(0))[0]
|
| 145 |
+
|
| 146 |
+
self.assertEqual(
|
| 147 |
+
output_without_slicing.detach().cpu().numpy().all(),
|
| 148 |
+
output_without_slicing_2.detach().cpu().numpy().all(),
|
| 149 |
+
"Without slicing outputs should match with the outputs when slicing is manually disabled.",
|
| 150 |
+
)
|
| 151 |
+
|
| 152 |
+
def test_gradient_checkpointing_is_applied(self):
|
| 153 |
+
expected_set = {
|
| 154 |
+
"HunyuanVideoDecoder3D",
|
| 155 |
+
"HunyuanVideoDownBlock3D",
|
| 156 |
+
"HunyuanVideoEncoder3D",
|
| 157 |
+
"HunyuanVideoMidBlock3D",
|
| 158 |
+
"HunyuanVideoUpBlock3D",
|
| 159 |
+
}
|
| 160 |
+
super().test_gradient_checkpointing_is_applied(expected_set=expected_set)
|
| 161 |
+
|
| 162 |
+
# We need to overwrite this test because the base test does not account length of down_block_types
|
| 163 |
+
def test_forward_with_norm_groups(self):
|
| 164 |
+
init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()
|
| 165 |
+
|
| 166 |
+
init_dict["norm_num_groups"] = 16
|
| 167 |
+
init_dict["block_out_channels"] = (16, 16, 16, 16)
|
| 168 |
+
|
| 169 |
+
model = self.model_class(**init_dict)
|
| 170 |
+
model.to(torch_device)
|
| 171 |
+
model.eval()
|
| 172 |
+
|
| 173 |
+
with torch.no_grad():
|
| 174 |
+
output = model(**inputs_dict)
|
| 175 |
+
|
| 176 |
+
if isinstance(output, dict):
|
| 177 |
+
output = output.to_tuple()[0]
|
| 178 |
+
|
| 179 |
+
self.assertIsNotNone(output)
|
| 180 |
+
expected_shape = inputs_dict["sample"].shape
|
| 181 |
+
self.assertEqual(output.shape, expected_shape, "Input and output shapes do not match")
|
| 182 |
+
|
| 183 |
+
@unittest.skip("Unsupported test.")
|
| 184 |
+
def test_outputs_equivalence(self):
|
| 185 |
+
pass
|
| 186 |
+
|
| 187 |
+
def test_prepare_causal_attention_mask(self):
|
| 188 |
+
def prepare_causal_attention_mask_orig(
|
| 189 |
+
num_frames: int, height_width: int, dtype: torch.dtype, device: torch.device, batch_size: int = None
|
| 190 |
+
) -> torch.Tensor:
|
| 191 |
+
seq_len = num_frames * height_width
|
| 192 |
+
mask = torch.full((seq_len, seq_len), float("-inf"), dtype=dtype, device=device)
|
| 193 |
+
for i in range(seq_len):
|
| 194 |
+
i_frame = i // height_width
|
| 195 |
+
mask[i, : (i_frame + 1) * height_width] = 0
|
| 196 |
+
if batch_size is not None:
|
| 197 |
+
mask = mask.unsqueeze(0).expand(batch_size, -1, -1)
|
| 198 |
+
return mask
|
| 199 |
+
|
| 200 |
+
# test with some odd shapes
|
| 201 |
+
original_mask = prepare_causal_attention_mask_orig(
|
| 202 |
+
num_frames=31, height_width=111, dtype=torch.float32, device=torch_device
|
| 203 |
+
)
|
| 204 |
+
new_mask = prepare_causal_attention_mask(
|
| 205 |
+
num_frames=31, height_width=111, dtype=torch.float32, device=torch_device
|
| 206 |
+
)
|
| 207 |
+
self.assertTrue(
|
| 208 |
+
torch.allclose(original_mask, new_mask),
|
| 209 |
+
"Causal attention mask should be the same",
|
| 210 |
+
)
|
exp_code/1_benchmark/diffusers-WanS2V/tests/models/autoencoders/test_models_autoencoder_kl.py
ADDED
|
@@ -0,0 +1,468 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2025 HuggingFace Inc.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
|
| 16 |
+
import gc
|
| 17 |
+
import unittest
|
| 18 |
+
|
| 19 |
+
import torch
|
| 20 |
+
from parameterized import parameterized
|
| 21 |
+
|
| 22 |
+
from diffusers import AutoencoderKL
|
| 23 |
+
from diffusers.utils.import_utils import is_xformers_available
|
| 24 |
+
|
| 25 |
+
from ...testing_utils import (
|
| 26 |
+
backend_empty_cache,
|
| 27 |
+
enable_full_determinism,
|
| 28 |
+
floats_tensor,
|
| 29 |
+
load_hf_numpy,
|
| 30 |
+
require_torch_accelerator,
|
| 31 |
+
require_torch_accelerator_with_fp16,
|
| 32 |
+
require_torch_gpu,
|
| 33 |
+
skip_mps,
|
| 34 |
+
slow,
|
| 35 |
+
torch_all_close,
|
| 36 |
+
torch_device,
|
| 37 |
+
)
|
| 38 |
+
from ..test_modeling_common import ModelTesterMixin, UNetTesterMixin
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
enable_full_determinism()
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
class AutoencoderKLTests(ModelTesterMixin, UNetTesterMixin, unittest.TestCase):
|
| 45 |
+
model_class = AutoencoderKL
|
| 46 |
+
main_input_name = "sample"
|
| 47 |
+
base_precision = 1e-2
|
| 48 |
+
|
| 49 |
+
def get_autoencoder_kl_config(self, block_out_channels=None, norm_num_groups=None):
|
| 50 |
+
block_out_channels = block_out_channels or [2, 4]
|
| 51 |
+
norm_num_groups = norm_num_groups or 2
|
| 52 |
+
init_dict = {
|
| 53 |
+
"block_out_channels": block_out_channels,
|
| 54 |
+
"in_channels": 3,
|
| 55 |
+
"out_channels": 3,
|
| 56 |
+
"down_block_types": ["DownEncoderBlock2D"] * len(block_out_channels),
|
| 57 |
+
"up_block_types": ["UpDecoderBlock2D"] * len(block_out_channels),
|
| 58 |
+
"latent_channels": 4,
|
| 59 |
+
"norm_num_groups": norm_num_groups,
|
| 60 |
+
}
|
| 61 |
+
return init_dict
|
| 62 |
+
|
| 63 |
+
@property
|
| 64 |
+
def dummy_input(self):
|
| 65 |
+
batch_size = 4
|
| 66 |
+
num_channels = 3
|
| 67 |
+
sizes = (32, 32)
|
| 68 |
+
|
| 69 |
+
image = floats_tensor((batch_size, num_channels) + sizes).to(torch_device)
|
| 70 |
+
|
| 71 |
+
return {"sample": image}
|
| 72 |
+
|
| 73 |
+
@property
|
| 74 |
+
def input_shape(self):
|
| 75 |
+
return (3, 32, 32)
|
| 76 |
+
|
| 77 |
+
@property
|
| 78 |
+
def output_shape(self):
|
| 79 |
+
return (3, 32, 32)
|
| 80 |
+
|
| 81 |
+
def prepare_init_args_and_inputs_for_common(self):
|
| 82 |
+
init_dict = self.get_autoencoder_kl_config()
|
| 83 |
+
inputs_dict = self.dummy_input
|
| 84 |
+
return init_dict, inputs_dict
|
| 85 |
+
|
| 86 |
+
def test_enable_disable_tiling(self):
|
| 87 |
+
init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()
|
| 88 |
+
|
| 89 |
+
torch.manual_seed(0)
|
| 90 |
+
model = self.model_class(**init_dict).to(torch_device)
|
| 91 |
+
|
| 92 |
+
inputs_dict.update({"return_dict": False})
|
| 93 |
+
|
| 94 |
+
torch.manual_seed(0)
|
| 95 |
+
output_without_tiling = model(**inputs_dict, generator=torch.manual_seed(0))[0]
|
| 96 |
+
|
| 97 |
+
torch.manual_seed(0)
|
| 98 |
+
model.enable_tiling()
|
| 99 |
+
output_with_tiling = model(**inputs_dict, generator=torch.manual_seed(0))[0]
|
| 100 |
+
|
| 101 |
+
self.assertLess(
|
| 102 |
+
(output_without_tiling.detach().cpu().numpy() - output_with_tiling.detach().cpu().numpy()).max(),
|
| 103 |
+
0.5,
|
| 104 |
+
"VAE tiling should not affect the inference results",
|
| 105 |
+
)
|
| 106 |
+
|
| 107 |
+
torch.manual_seed(0)
|
| 108 |
+
model.disable_tiling()
|
| 109 |
+
output_without_tiling_2 = model(**inputs_dict, generator=torch.manual_seed(0))[0]
|
| 110 |
+
|
| 111 |
+
self.assertEqual(
|
| 112 |
+
output_without_tiling.detach().cpu().numpy().all(),
|
| 113 |
+
output_without_tiling_2.detach().cpu().numpy().all(),
|
| 114 |
+
"Without tiling outputs should match with the outputs when tiling is manually disabled.",
|
| 115 |
+
)
|
| 116 |
+
|
| 117 |
+
def test_enable_disable_slicing(self):
|
| 118 |
+
init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()
|
| 119 |
+
|
| 120 |
+
torch.manual_seed(0)
|
| 121 |
+
model = self.model_class(**init_dict).to(torch_device)
|
| 122 |
+
|
| 123 |
+
inputs_dict.update({"return_dict": False})
|
| 124 |
+
|
| 125 |
+
torch.manual_seed(0)
|
| 126 |
+
output_without_slicing = model(**inputs_dict, generator=torch.manual_seed(0))[0]
|
| 127 |
+
|
| 128 |
+
torch.manual_seed(0)
|
| 129 |
+
model.enable_slicing()
|
| 130 |
+
output_with_slicing = model(**inputs_dict, generator=torch.manual_seed(0))[0]
|
| 131 |
+
|
| 132 |
+
self.assertLess(
|
| 133 |
+
(output_without_slicing.detach().cpu().numpy() - output_with_slicing.detach().cpu().numpy()).max(),
|
| 134 |
+
0.5,
|
| 135 |
+
"VAE slicing should not affect the inference results",
|
| 136 |
+
)
|
| 137 |
+
|
| 138 |
+
torch.manual_seed(0)
|
| 139 |
+
model.disable_slicing()
|
| 140 |
+
output_without_slicing_2 = model(**inputs_dict, generator=torch.manual_seed(0))[0]
|
| 141 |
+
|
| 142 |
+
self.assertEqual(
|
| 143 |
+
output_without_slicing.detach().cpu().numpy().all(),
|
| 144 |
+
output_without_slicing_2.detach().cpu().numpy().all(),
|
| 145 |
+
"Without slicing outputs should match with the outputs when slicing is manually disabled.",
|
| 146 |
+
)
|
| 147 |
+
|
| 148 |
+
def test_gradient_checkpointing_is_applied(self):
|
| 149 |
+
expected_set = {"Decoder", "Encoder", "UNetMidBlock2D"}
|
| 150 |
+
super().test_gradient_checkpointing_is_applied(expected_set=expected_set)
|
| 151 |
+
|
| 152 |
+
def test_from_pretrained_hub(self):
|
| 153 |
+
model, loading_info = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy", output_loading_info=True)
|
| 154 |
+
self.assertIsNotNone(model)
|
| 155 |
+
self.assertEqual(len(loading_info["missing_keys"]), 0)
|
| 156 |
+
|
| 157 |
+
model.to(torch_device)
|
| 158 |
+
image = model(**self.dummy_input)
|
| 159 |
+
|
| 160 |
+
assert image is not None, "Make sure output is not None"
|
| 161 |
+
|
| 162 |
+
def test_output_pretrained(self):
|
| 163 |
+
model = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy")
|
| 164 |
+
model = model.to(torch_device)
|
| 165 |
+
model.eval()
|
| 166 |
+
|
| 167 |
+
# Keep generator on CPU for non-CUDA devices to compare outputs with CPU result tensors
|
| 168 |
+
generator_device = "cpu" if not torch_device.startswith(torch_device) else torch_device
|
| 169 |
+
if torch_device != "mps":
|
| 170 |
+
generator = torch.Generator(device=generator_device).manual_seed(0)
|
| 171 |
+
else:
|
| 172 |
+
generator = torch.manual_seed(0)
|
| 173 |
+
|
| 174 |
+
image = torch.randn(
|
| 175 |
+
1,
|
| 176 |
+
model.config.in_channels,
|
| 177 |
+
model.config.sample_size,
|
| 178 |
+
model.config.sample_size,
|
| 179 |
+
generator=torch.manual_seed(0),
|
| 180 |
+
)
|
| 181 |
+
image = image.to(torch_device)
|
| 182 |
+
with torch.no_grad():
|
| 183 |
+
output = model(image, sample_posterior=True, generator=generator).sample
|
| 184 |
+
|
| 185 |
+
output_slice = output[0, -1, -3:, -3:].flatten().cpu()
|
| 186 |
+
|
| 187 |
+
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
|
| 188 |
+
# the expected output slices are not the same for CPU and GPU.
|
| 189 |
+
if torch_device == "mps":
|
| 190 |
+
expected_output_slice = torch.tensor(
|
| 191 |
+
[
|
| 192 |
+
-4.0078e-01,
|
| 193 |
+
-3.8323e-04,
|
| 194 |
+
-1.2681e-01,
|
| 195 |
+
-1.1462e-01,
|
| 196 |
+
2.0095e-01,
|
| 197 |
+
1.0893e-01,
|
| 198 |
+
-8.8247e-02,
|
| 199 |
+
-3.0361e-01,
|
| 200 |
+
-9.8644e-03,
|
| 201 |
+
]
|
| 202 |
+
)
|
| 203 |
+
elif generator_device == "cpu":
|
| 204 |
+
expected_output_slice = torch.tensor(
|
| 205 |
+
[
|
| 206 |
+
-0.1352,
|
| 207 |
+
0.0878,
|
| 208 |
+
0.0419,
|
| 209 |
+
-0.0818,
|
| 210 |
+
-0.1069,
|
| 211 |
+
0.0688,
|
| 212 |
+
-0.1458,
|
| 213 |
+
-0.4446,
|
| 214 |
+
-0.0026,
|
| 215 |
+
]
|
| 216 |
+
)
|
| 217 |
+
else:
|
| 218 |
+
expected_output_slice = torch.tensor(
|
| 219 |
+
[
|
| 220 |
+
-0.2421,
|
| 221 |
+
0.4642,
|
| 222 |
+
0.2507,
|
| 223 |
+
-0.0438,
|
| 224 |
+
0.0682,
|
| 225 |
+
0.3160,
|
| 226 |
+
-0.2018,
|
| 227 |
+
-0.0727,
|
| 228 |
+
0.2485,
|
| 229 |
+
]
|
| 230 |
+
)
|
| 231 |
+
|
| 232 |
+
self.assertTrue(torch_all_close(output_slice, expected_output_slice, rtol=1e-2))
|
| 233 |
+
|
| 234 |
+
|
| 235 |
+
@slow
|
| 236 |
+
class AutoencoderKLIntegrationTests(unittest.TestCase):
|
| 237 |
+
def get_file_format(self, seed, shape):
|
| 238 |
+
return f"gaussian_noise_s={seed}_shape={'_'.join([str(s) for s in shape])}.npy"
|
| 239 |
+
|
| 240 |
+
def tearDown(self):
|
| 241 |
+
# clean up the VRAM after each test
|
| 242 |
+
super().tearDown()
|
| 243 |
+
gc.collect()
|
| 244 |
+
backend_empty_cache(torch_device)
|
| 245 |
+
|
| 246 |
+
def get_sd_image(self, seed=0, shape=(4, 3, 512, 512), fp16=False):
|
| 247 |
+
dtype = torch.float16 if fp16 else torch.float32
|
| 248 |
+
image = torch.from_numpy(load_hf_numpy(self.get_file_format(seed, shape))).to(torch_device).to(dtype)
|
| 249 |
+
return image
|
| 250 |
+
|
| 251 |
+
def get_sd_vae_model(self, model_id="CompVis/stable-diffusion-v1-4", fp16=False):
|
| 252 |
+
revision = "fp16" if fp16 else None
|
| 253 |
+
torch_dtype = torch.float16 if fp16 else torch.float32
|
| 254 |
+
|
| 255 |
+
model = AutoencoderKL.from_pretrained(
|
| 256 |
+
model_id,
|
| 257 |
+
subfolder="vae",
|
| 258 |
+
torch_dtype=torch_dtype,
|
| 259 |
+
revision=revision,
|
| 260 |
+
)
|
| 261 |
+
model.to(torch_device)
|
| 262 |
+
|
| 263 |
+
return model
|
| 264 |
+
|
| 265 |
+
def get_generator(self, seed=0):
|
| 266 |
+
generator_device = "cpu" if not torch_device.startswith(torch_device) else torch_device
|
| 267 |
+
if torch_device != "mps":
|
| 268 |
+
return torch.Generator(device=generator_device).manual_seed(seed)
|
| 269 |
+
return torch.manual_seed(seed)
|
| 270 |
+
|
| 271 |
+
@parameterized.expand(
|
| 272 |
+
[
|
| 273 |
+
# fmt: off
|
| 274 |
+
[
|
| 275 |
+
33,
|
| 276 |
+
[-0.1556, 0.9848, -0.0410, -0.0642, -0.2685, 0.8381, -0.2004, -0.0700],
|
| 277 |
+
[-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824],
|
| 278 |
+
],
|
| 279 |
+
[
|
| 280 |
+
47,
|
| 281 |
+
[-0.2376, 0.1200, 0.1337, -0.4830, -0.2504, -0.0759, -0.0486, -0.4077],
|
| 282 |
+
[0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131],
|
| 283 |
+
],
|
| 284 |
+
# fmt: on
|
| 285 |
+
]
|
| 286 |
+
)
|
| 287 |
+
def test_stable_diffusion(self, seed, expected_slice, expected_slice_mps):
|
| 288 |
+
model = self.get_sd_vae_model()
|
| 289 |
+
image = self.get_sd_image(seed)
|
| 290 |
+
generator = self.get_generator(seed)
|
| 291 |
+
|
| 292 |
+
with torch.no_grad():
|
| 293 |
+
sample = model(image, generator=generator, sample_posterior=True).sample
|
| 294 |
+
|
| 295 |
+
assert sample.shape == image.shape
|
| 296 |
+
|
| 297 |
+
output_slice = sample[-1, -2:, -2:, :2].flatten().float().cpu()
|
| 298 |
+
expected_output_slice = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice)
|
| 299 |
+
|
| 300 |
+
assert torch_all_close(output_slice, expected_output_slice, atol=3e-3)
|
| 301 |
+
|
| 302 |
+
@parameterized.expand(
|
| 303 |
+
[
|
| 304 |
+
# fmt: off
|
| 305 |
+
[33, [-0.0513, 0.0289, 1.3799, 0.2166, -0.2573, -0.0871, 0.5103, -0.0999]],
|
| 306 |
+
[47, [-0.4128, -0.1320, -0.3704, 0.1965, -0.4116, -0.2332, -0.3340, 0.2247]],
|
| 307 |
+
# fmt: on
|
| 308 |
+
]
|
| 309 |
+
)
|
| 310 |
+
@require_torch_accelerator_with_fp16
|
| 311 |
+
def test_stable_diffusion_fp16(self, seed, expected_slice):
|
| 312 |
+
model = self.get_sd_vae_model(fp16=True)
|
| 313 |
+
image = self.get_sd_image(seed, fp16=True)
|
| 314 |
+
generator = self.get_generator(seed)
|
| 315 |
+
|
| 316 |
+
with torch.no_grad():
|
| 317 |
+
sample = model(image, generator=generator, sample_posterior=True).sample
|
| 318 |
+
|
| 319 |
+
assert sample.shape == image.shape
|
| 320 |
+
|
| 321 |
+
output_slice = sample[-1, -2:, :2, -2:].flatten().float().cpu()
|
| 322 |
+
expected_output_slice = torch.tensor(expected_slice)
|
| 323 |
+
|
| 324 |
+
assert torch_all_close(output_slice, expected_output_slice, atol=1e-2)
|
| 325 |
+
|
| 326 |
+
@parameterized.expand(
|
| 327 |
+
[
|
| 328 |
+
# fmt: off
|
| 329 |
+
[
|
| 330 |
+
33,
|
| 331 |
+
[-0.1609, 0.9866, -0.0487, -0.0777, -0.2716, 0.8368, -0.2055, -0.0814],
|
| 332 |
+
[-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824],
|
| 333 |
+
],
|
| 334 |
+
[
|
| 335 |
+
47,
|
| 336 |
+
[-0.2377, 0.1147, 0.1333, -0.4841, -0.2506, -0.0805, -0.0491, -0.4085],
|
| 337 |
+
[0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131],
|
| 338 |
+
],
|
| 339 |
+
# fmt: on
|
| 340 |
+
]
|
| 341 |
+
)
|
| 342 |
+
def test_stable_diffusion_mode(self, seed, expected_slice, expected_slice_mps):
|
| 343 |
+
model = self.get_sd_vae_model()
|
| 344 |
+
image = self.get_sd_image(seed)
|
| 345 |
+
|
| 346 |
+
with torch.no_grad():
|
| 347 |
+
sample = model(image).sample
|
| 348 |
+
|
| 349 |
+
assert sample.shape == image.shape
|
| 350 |
+
|
| 351 |
+
output_slice = sample[-1, -2:, -2:, :2].flatten().float().cpu()
|
| 352 |
+
expected_output_slice = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice)
|
| 353 |
+
|
| 354 |
+
assert torch_all_close(output_slice, expected_output_slice, atol=3e-3)
|
| 355 |
+
|
| 356 |
+
@parameterized.expand(
|
| 357 |
+
[
|
| 358 |
+
# fmt: off
|
| 359 |
+
[13, [-0.2051, -0.1803, -0.2311, -0.2114, -0.3292, -0.3574, -0.2953, -0.3323]],
|
| 360 |
+
[37, [-0.2632, -0.2625, -0.2199, -0.2741, -0.4539, -0.4990, -0.3720, -0.4925]],
|
| 361 |
+
# fmt: on
|
| 362 |
+
]
|
| 363 |
+
)
|
| 364 |
+
@require_torch_accelerator
|
| 365 |
+
@skip_mps
|
| 366 |
+
def test_stable_diffusion_decode(self, seed, expected_slice):
|
| 367 |
+
model = self.get_sd_vae_model()
|
| 368 |
+
encoding = self.get_sd_image(seed, shape=(3, 4, 64, 64))
|
| 369 |
+
|
| 370 |
+
with torch.no_grad():
|
| 371 |
+
sample = model.decode(encoding).sample
|
| 372 |
+
|
| 373 |
+
assert list(sample.shape) == [3, 3, 512, 512]
|
| 374 |
+
|
| 375 |
+
output_slice = sample[-1, -2:, :2, -2:].flatten().cpu()
|
| 376 |
+
expected_output_slice = torch.tensor(expected_slice)
|
| 377 |
+
|
| 378 |
+
assert torch_all_close(output_slice, expected_output_slice, atol=1e-3)
|
| 379 |
+
|
| 380 |
+
@parameterized.expand(
|
| 381 |
+
[
|
| 382 |
+
# fmt: off
|
| 383 |
+
[27, [-0.0369, 0.0207, -0.0776, -0.0682, -0.1747, -0.1930, -0.1465, -0.2039]],
|
| 384 |
+
[16, [-0.1628, -0.2134, -0.2747, -0.2642, -0.3774, -0.4404, -0.3687, -0.4277]],
|
| 385 |
+
# fmt: on
|
| 386 |
+
]
|
| 387 |
+
)
|
| 388 |
+
@require_torch_accelerator_with_fp16
|
| 389 |
+
def test_stable_diffusion_decode_fp16(self, seed, expected_slice):
|
| 390 |
+
model = self.get_sd_vae_model(fp16=True)
|
| 391 |
+
encoding = self.get_sd_image(seed, shape=(3, 4, 64, 64), fp16=True)
|
| 392 |
+
|
| 393 |
+
with torch.no_grad():
|
| 394 |
+
sample = model.decode(encoding).sample
|
| 395 |
+
|
| 396 |
+
assert list(sample.shape) == [3, 3, 512, 512]
|
| 397 |
+
|
| 398 |
+
output_slice = sample[-1, -2:, :2, -2:].flatten().float().cpu()
|
| 399 |
+
expected_output_slice = torch.tensor(expected_slice)
|
| 400 |
+
|
| 401 |
+
assert torch_all_close(output_slice, expected_output_slice, atol=5e-3)
|
| 402 |
+
|
| 403 |
+
@parameterized.expand([(13,), (16,), (27,)])
|
| 404 |
+
@require_torch_gpu
|
| 405 |
+
@unittest.skipIf(
|
| 406 |
+
not is_xformers_available(),
|
| 407 |
+
reason="xformers is not required when using PyTorch 2.0.",
|
| 408 |
+
)
|
| 409 |
+
def test_stable_diffusion_decode_xformers_vs_2_0_fp16(self, seed):
|
| 410 |
+
model = self.get_sd_vae_model(fp16=True)
|
| 411 |
+
encoding = self.get_sd_image(seed, shape=(3, 4, 64, 64), fp16=True)
|
| 412 |
+
|
| 413 |
+
with torch.no_grad():
|
| 414 |
+
sample = model.decode(encoding).sample
|
| 415 |
+
|
| 416 |
+
model.enable_xformers_memory_efficient_attention()
|
| 417 |
+
with torch.no_grad():
|
| 418 |
+
sample_2 = model.decode(encoding).sample
|
| 419 |
+
|
| 420 |
+
assert list(sample.shape) == [3, 3, 512, 512]
|
| 421 |
+
|
| 422 |
+
assert torch_all_close(sample, sample_2, atol=1e-1)
|
| 423 |
+
|
| 424 |
+
@parameterized.expand([(13,), (16,), (37,)])
|
| 425 |
+
@require_torch_gpu
|
| 426 |
+
@unittest.skipIf(
|
| 427 |
+
not is_xformers_available(),
|
| 428 |
+
reason="xformers is not required when using PyTorch 2.0.",
|
| 429 |
+
)
|
| 430 |
+
def test_stable_diffusion_decode_xformers_vs_2_0(self, seed):
|
| 431 |
+
model = self.get_sd_vae_model()
|
| 432 |
+
encoding = self.get_sd_image(seed, shape=(3, 4, 64, 64))
|
| 433 |
+
|
| 434 |
+
with torch.no_grad():
|
| 435 |
+
sample = model.decode(encoding).sample
|
| 436 |
+
|
| 437 |
+
model.enable_xformers_memory_efficient_attention()
|
| 438 |
+
with torch.no_grad():
|
| 439 |
+
sample_2 = model.decode(encoding).sample
|
| 440 |
+
|
| 441 |
+
assert list(sample.shape) == [3, 3, 512, 512]
|
| 442 |
+
|
| 443 |
+
assert torch_all_close(sample, sample_2, atol=1e-2)
|
| 444 |
+
|
| 445 |
+
@parameterized.expand(
|
| 446 |
+
[
|
| 447 |
+
# fmt: off
|
| 448 |
+
[33, [-0.3001, 0.0918, -2.6984, -3.9720, -3.2099, -5.0353, 1.7338, -0.2065, 3.4267]],
|
| 449 |
+
[47, [-1.5030, -4.3871, -6.0355, -9.1157, -1.6661, -2.7853, 2.1607, -5.0823, 2.5633]],
|
| 450 |
+
# fmt: on
|
| 451 |
+
]
|
| 452 |
+
)
|
| 453 |
+
def test_stable_diffusion_encode_sample(self, seed, expected_slice):
|
| 454 |
+
model = self.get_sd_vae_model()
|
| 455 |
+
image = self.get_sd_image(seed)
|
| 456 |
+
generator = self.get_generator(seed)
|
| 457 |
+
|
| 458 |
+
with torch.no_grad():
|
| 459 |
+
dist = model.encode(image).latent_dist
|
| 460 |
+
sample = dist.sample(generator=generator)
|
| 461 |
+
|
| 462 |
+
assert list(sample.shape) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
|
| 463 |
+
|
| 464 |
+
output_slice = sample[0, -1, -3:, -3:].flatten().cpu()
|
| 465 |
+
expected_output_slice = torch.tensor(expected_slice)
|
| 466 |
+
|
| 467 |
+
tolerance = 3e-3 if torch_device != "mps" else 1e-2
|
| 468 |
+
assert torch_all_close(output_slice, expected_output_slice, atol=tolerance)
|