Spaces:
Running
Running
File size: 9,746 Bytes
613c9ab |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 |
from pathlib import Path
import torch
import comfy.sample as comfy_sample
from comfy.model_patcher import ModelPatcher
from .ad_settings import AnimateDiffSettings
from .context import ContextOptions, ContextOptionsGroup, ContextSchedules
from .logger import logger
from .utils_model import BIGMAX, BetaSchedules, get_available_motion_loras, get_available_motion_models, get_motion_lora_path
from .utils_motion import ADKeyframeGroup, ADKeyframe
from .motion_lora import MotionLoraInfo, MotionLoraList
from .model_injection import (InjectionParams, ModelPatcherAndInjector, MotionModelGroup, MotionModelPatcher, create_fresh_motion_module,
load_motion_module_gen1, load_motion_module_gen2, load_motion_lora_as_patches, validate_model_compatibility_gen2)
from .sample_settings import SampleSettings, SeedNoiseGeneration
from .sampling import motion_sample_factory
class UseEvolvedSamplingNode:
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"model": ("MODEL",),
"beta_schedule": (BetaSchedules.ALIAS_LIST, {"default": BetaSchedules.AUTOSELECT}),
},
"optional": {
"m_models": ("M_MODELS",),
"context_options": ("CONTEXT_OPTIONS",),
"sample_settings": ("SAMPLE_SETTINGS",),
#"beta_schedule_override": ("BETA_SCHEDULE",),
}
}
RETURN_TYPES = ("MODEL",)
CATEGORY = "Animate Diff ππ
π
/β‘ Gen2 nodes β‘"
FUNCTION = "use_evolved_sampling"
def use_evolved_sampling(self, model: ModelPatcher, beta_schedule: str, m_models: MotionModelGroup=None, context_options: ContextOptionsGroup=None,
sample_settings: SampleSettings=None, beta_schedule_override=None):
if m_models is not None:
m_models = m_models.clone()
# for each motion model, confirm that it is compatible with SD model
for motion_model in m_models.models:
validate_model_compatibility_gen2(model=model, motion_model=motion_model)
# create injection params
model_name_list = [motion_model.model.mm_info.mm_name for motion_model in m_models.models]
model_names = ",".join(model_name_list)
# TODO: check if any apply_v2_properly is set to False
params = InjectionParams(unlimited_area_hack=False, model_name=model_names)
else:
params = InjectionParams()
# apply context options
if context_options:
params.set_context(context_options)
# need to use a ModelPatcher that supports injection of motion modules into unet
model = ModelPatcherAndInjector(model)
model.motion_models = m_models
model.sample_settings = sample_settings if sample_settings is not None else SampleSettings()
model.motion_injection_params = params
if model.sample_settings.custom_cfg is not None:
logger.info("[Sample Settings] custom_cfg is set; will override any KSampler cfg values or patches.")
if model.sample_settings.sigma_schedule is not None:
logger.info("[Sample Settings] sigma_schedule is set; will override beta_schedule.")
model.add_object_patch("model_sampling", model.sample_settings.sigma_schedule.clone().model_sampling)
else:
# save model_sampling from BetaSchedule as object patch
# if autoselect, get suggested beta_schedule from motion model
if beta_schedule == BetaSchedules.AUTOSELECT and not model.motion_models.is_empty():
beta_schedule = model.motion_models[0].model.get_best_beta_schedule(log=True)
new_model_sampling = BetaSchedules.to_model_sampling(beta_schedule, model)
if new_model_sampling is not None:
model.add_object_patch("model_sampling", new_model_sampling)
del m_models
return (model,)
class ApplyAnimateDiffModelNode:
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"motion_model": ("MOTION_MODEL_ADE",),
"start_percent": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}),
"end_percent": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001}),
},
"optional": {
"motion_lora": ("MOTION_LORA",),
"scale_multival": ("MULTIVAL",),
"effect_multival": ("MULTIVAL",),
"ad_keyframes": ("AD_KEYFRAMES",),
"prev_m_models": ("M_MODELS",),
}
}
RETURN_TYPES = ("M_MODELS",)
CATEGORY = "Animate Diff ππ
π
/β‘ Gen2 nodes β‘"
FUNCTION = "apply_motion_model"
def apply_motion_model(self, motion_model: MotionModelPatcher, start_percent: float=0.0, end_percent: float=1.0,
motion_lora: MotionLoraList=None, ad_keyframes: ADKeyframeGroup=None,
scale_multival=None, effect_multival=None,
prev_m_models: MotionModelGroup=None,):
# set up motion models list
if prev_m_models is None:
prev_m_models = MotionModelGroup()
prev_m_models = prev_m_models.clone()
motion_model = motion_model.clone()
# check if internal motion model already present in previous model - create new if so
for prev_model in prev_m_models.models:
if motion_model.model is prev_model.model:
# need to create new internal model based on same state_dict
motion_model = create_fresh_motion_module(motion_model)
# apply motion model to loaded_mm
if motion_lora is not None:
for lora in motion_lora.loras:
load_motion_lora_as_patches(motion_model, lora)
motion_model.scale_multival = scale_multival
motion_model.effect_multival = effect_multival
motion_model.keyframes = ad_keyframes.clone() if ad_keyframes else ADKeyframeGroup()
motion_model.timestep_percent_range = (start_percent, end_percent)
# add to beginning, so that after injection, it will be the earliest of prev_m_models to be run
prev_m_models.add_to_start(mm=motion_model)
return (prev_m_models,)
class ApplyAnimateDiffModelBasicNode:
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"motion_model": ("MOTION_MODEL_ADE",),
},
"optional": {
"motion_lora": ("MOTION_LORA",),
"scale_multival": ("MULTIVAL",),
"effect_multival": ("MULTIVAL",),
"ad_keyframes": ("AD_KEYFRAMES",),
}
}
RETURN_TYPES = ("M_MODELS",)
CATEGORY = "Animate Diff ππ
π
/β‘ Gen2 nodes β‘"
FUNCTION = "apply_motion_model"
def apply_motion_model(self,
motion_model: MotionModelPatcher, motion_lora: MotionLoraList=None,
scale_multival=None, effect_multival=None, ad_keyframes=None):
# just a subset of normal ApplyAnimateDiffModelNode inputs
return ApplyAnimateDiffModelNode.apply_motion_model(self, motion_model, motion_lora=motion_lora,
scale_multival=scale_multival, effect_multival=effect_multival,
ad_keyframes=ad_keyframes)
class LoadAnimateDiffModelNode:
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"model_name": (get_available_motion_models(),),
},
"optional": {
"ad_settings": ("AD_SETTINGS",),
}
}
RETURN_TYPES = ("MOTION_MODEL_ADE",)
RETURN_NAMES = ("MOTION_MODEL",)
CATEGORY = "Animate Diff ππ
π
/β‘ Gen2 nodes β‘"
FUNCTION = "load_motion_model"
def load_motion_model(self, model_name: str, ad_settings: AnimateDiffSettings=None):
# load motion module and motion settings, if included
motion_model = load_motion_module_gen2(model_name=model_name, motion_model_settings=ad_settings)
return (motion_model,)
class ADKeyframeNode:
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"start_percent": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}, ),
},
"optional": {
"prev_ad_keyframes": ("AD_KEYFRAMES", ),
"scale_multival": ("MULTIVAL",),
"effect_multival": ("MULTIVAL",),
"inherit_missing": ("BOOLEAN", {"default": True}, ),
"guarantee_steps": ("INT", {"default": 1, "min": 0, "max": BIGMAX}),
}
}
RETURN_TYPES = ("AD_KEYFRAMES", )
FUNCTION = "load_keyframe"
CATEGORY = "Animate Diff ππ
π
"
def load_keyframe(self,
start_percent: float, prev_ad_keyframes=None,
scale_multival: [float, torch.Tensor]=None, effect_multival: [float, torch.Tensor]=None,
inherit_missing: bool=True, guarantee_steps: int=1):
if not prev_ad_keyframes:
prev_ad_keyframes = ADKeyframeGroup()
prev_ad_keyframes = prev_ad_keyframes.clone()
keyframe = ADKeyframe(start_percent=start_percent, scale_multival=scale_multival, effect_multival=effect_multival,
inherit_missing=inherit_missing, guarantee_steps=guarantee_steps)
prev_ad_keyframes.add(keyframe)
return (prev_ad_keyframes,)
|