#!/usr/bin/env python3 | |
import torch | |
from diffusers import ConsistencyModelPipeline, UNet2DModel | |
device = "cpu" | |
# Load the cd_bedroom256_lpips checkpoint. | |
model_id_or_path = "openai/diffusers-cd_bedroom256_lpips" | |
pipe = ConsistencyModelPipeline.from_pretrained(model_id_or_path) | |
pipe.to(device) | |
# Multistep sampling | |
# Timesteps can be explicitly specified; the particular timesteps below are from the original Github repo: | |
# https://github.com/openai/consistency_models/blob/main/scripts/launch.sh#L83 | |
for _ in range(10): | |
image = pipe(timesteps=[17, 0]).images[0] | |
image.show() | |