from omegaconf import OmegaConf import torch from lib.smplfusion import DDIM, share, scheduler from .common import * DOWNLOAD_URL = 'https://huggingface.co/runwayml/stable-diffusion-inpainting/resolve/main/sd-v1-5-inpainting.ckpt?download=true' MODEL_PATH = f'{MODEL_FOLDER}/sd-1-5-inpainting/sd-v1-5-inpainting.ckpt' # pre-download download_file(DOWNLOAD_URL, MODEL_PATH) def load_model(dtype=torch.float16): download_file(DOWNLOAD_URL, MODEL_PATH) state_dict = torch.load(MODEL_PATH)['state_dict'] config = OmegaConf.load(f'{CONFIG_FOLDER}/ddpm/v1.yaml') print ("Loading model: Stable-Inpainting 1.5") unet = load_obj(f'{CONFIG_FOLDER}/unet/inpainting/v1.yaml').eval().cuda() vae = load_obj(f'{CONFIG_FOLDER}/vae.yaml').eval().cuda() encoder = load_obj(f'{CONFIG_FOLDER}/encoders/clip.yaml').eval().cuda() extract = lambda state_dict, model: {x[len(model)+1:]:y for x,y in state_dict.items() if model in x} unet_state = extract(state_dict, 'model.diffusion_model') encoder_state = extract(state_dict, 'cond_stage_model') vae_state = extract(state_dict, 'first_stage_model') unet.load_state_dict(unet_state) encoder.load_state_dict(encoder_state) vae.load_state_dict(vae_state) if dtype == torch.float16: unet.convert_to_fp16() vae.to(dtype) encoder.to(dtype) unet = unet.requires_grad_(False) encoder = encoder.requires_grad_(False) vae = vae.requires_grad_(False) ddim = DDIM(config, vae, encoder, unet) share.schedule = scheduler.linear(config.timesteps, config.linear_start, config.linear_end) return ddim