|
import torch |
|
from modules import devices |
|
|
|
module_in_gpu = None |
|
cpu = torch.device("cpu") |
|
|
|
|
|
def send_everything_to_cpu(): |
|
global module_in_gpu |
|
|
|
if module_in_gpu is not None: |
|
module_in_gpu.to(cpu) |
|
|
|
module_in_gpu = None |
|
|
|
|
|
def setup_for_low_vram(sd_model, use_medvram): |
|
parents = {} |
|
|
|
def send_me_to_gpu(module, _): |
|
"""send this module to GPU; send whatever tracked module was previous in GPU to CPU; |
|
we add this as forward_pre_hook to a lot of modules and this way all but one of them will |
|
be in CPU |
|
""" |
|
global module_in_gpu |
|
|
|
module = parents.get(module, module) |
|
|
|
if module_in_gpu == module: |
|
return |
|
|
|
if module_in_gpu is not None: |
|
module_in_gpu.to(cpu) |
|
|
|
module.to(devices.device) |
|
module_in_gpu = module |
|
|
|
|
|
|
|
|
|
|
|
first_stage_model = sd_model.first_stage_model |
|
first_stage_model_encode = sd_model.first_stage_model.encode |
|
first_stage_model_decode = sd_model.first_stage_model.decode |
|
|
|
def first_stage_model_encode_wrap(x): |
|
send_me_to_gpu(first_stage_model, None) |
|
return first_stage_model_encode(x) |
|
|
|
def first_stage_model_decode_wrap(z): |
|
send_me_to_gpu(first_stage_model, None) |
|
return first_stage_model_decode(z) |
|
|
|
|
|
if hasattr(sd_model.cond_stage_model, 'model'): |
|
sd_model.cond_stage_model.transformer = sd_model.cond_stage_model.model |
|
|
|
|
|
|
|
stored = sd_model.cond_stage_model.transformer, sd_model.first_stage_model, getattr(sd_model, 'depth_model', None), sd_model.model |
|
sd_model.cond_stage_model.transformer, sd_model.first_stage_model, sd_model.depth_model, sd_model.model = None, None, None, None |
|
sd_model.to(devices.device) |
|
sd_model.cond_stage_model.transformer, sd_model.first_stage_model, sd_model.depth_model, sd_model.model = stored |
|
|
|
|
|
sd_model.cond_stage_model.transformer.register_forward_pre_hook(send_me_to_gpu) |
|
sd_model.first_stage_model.register_forward_pre_hook(send_me_to_gpu) |
|
sd_model.first_stage_model.encode = first_stage_model_encode_wrap |
|
sd_model.first_stage_model.decode = first_stage_model_decode_wrap |
|
if sd_model.depth_model: |
|
sd_model.depth_model.register_forward_pre_hook(send_me_to_gpu) |
|
parents[sd_model.cond_stage_model.transformer] = sd_model.cond_stage_model |
|
|
|
if hasattr(sd_model.cond_stage_model, 'model'): |
|
sd_model.cond_stage_model.model = sd_model.cond_stage_model.transformer |
|
del sd_model.cond_stage_model.transformer |
|
|
|
if use_medvram: |
|
sd_model.model.register_forward_pre_hook(send_me_to_gpu) |
|
else: |
|
diff_model = sd_model.model.diffusion_model |
|
|
|
|
|
|
|
stored = diff_model.input_blocks, diff_model.middle_block, diff_model.output_blocks, diff_model.time_embed |
|
diff_model.input_blocks, diff_model.middle_block, diff_model.output_blocks, diff_model.time_embed = None, None, None, None |
|
sd_model.model.to(devices.device) |
|
diff_model.input_blocks, diff_model.middle_block, diff_model.output_blocks, diff_model.time_embed = stored |
|
|
|
|
|
diff_model.time_embed.register_forward_pre_hook(send_me_to_gpu) |
|
for block in diff_model.input_blocks: |
|
block.register_forward_pre_hook(send_me_to_gpu) |
|
diff_model.middle_block.register_forward_pre_hook(send_me_to_gpu) |
|
for block in diff_model.output_blocks: |
|
block.register_forward_pre_hook(send_me_to_gpu) |
|
|