import gradio as gr import torch from clip2latent import models from PIL import Image device = "cuda" model_choices = { "faces": { "checkpoint": "https://huggingface.co/lambdalabs/clip2latent/resolve/main/ffhq-sg2-510.ckpt", "config": "https://huggingface.co/lambdalabs/clip2latent/resolve/main/ffhq-sg2-510.yaml", }, "landscape": { "checkpoint": "https://huggingface.co/lambdalabs/clip2latent/resolve/main/lhq-sg3-410.ckpt", "config": "https://huggingface.co/lambdalabs/clip2latent/resolve/main/lhq-sg3-410.yaml", } } model_cache = {} for k, v in model_choices.items(): checkpoint = v["checkpoint"] cfg_file = v["config"] # Moving to the cpu seems to break the model, so just put all on the gpu model_cache[k] = models.Clip2StyleGAN(cfg_file, device, checkpoint) @torch.no_grad() def infer(prompt, model_select, n_samples, scale): model = model_cache[model_select] images, _ = model(prompt, n_samples_per_txt=n_samples, cond_scale=scale, skips=250, clip_sort=True) images = images.cpu() make_im = lambda x: (255*x.clamp(-1, 1)/2 + 127.5).to(torch.uint8).permute(1,2,0).numpy() images = [Image.fromarray(make_im(x)) for x in images] return images css = """ a { color: inherit; text-decoration: underline; } .gradio-container { font-family: 'IBM Plex Sans', sans-serif; } .gr-button { color: white; border-color: #9d66e5; background: #9d66e5; } input[type='range'] { accent-color: #9d66e5; } .dark input[type='range'] { accent-color: #dfdfdf; } .container { max-width: 730px; margin: auto; padding-top: 1.5rem; } #gallery { min-height: 22rem; margin-bottom: 15px; margin-left: auto; margin-right: auto; border-bottom-right-radius: .5rem !important; border-bottom-left-radius: .5rem !important; } #gallery>div>.h-full { min-height: 20rem; } .details:hover { text-decoration: underline; } .gr-button { white-space: nowrap; } .gr-button:focus { border-color: rgb(147 197 253 / var(--tw-border-opacity)); outline: none; box-shadow: var(--tw-ring-offset-shadow), var(--tw-ring-shadow), var(--tw-shadow, 0 0 #0000); --tw-border-opacity: 1; --tw-ring-offset-shadow: var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color); --tw-ring-shadow: var(--tw-ring-inset) 0 0 0 calc(3px var(--tw-ring-offset-width)) var(--tw-ring-color); --tw-ring-color: rgb(191 219 254 / var(--tw-ring-opacity)); --tw-ring-opacity: .5; } #advanced-options { margin-bottom: 20px; } .footer { margin-bottom: 45px; margin-top: 35px; text-align: center; border-bottom: 1px solid #e5e5e5; } .footer>p { font-size: .8rem; display: inline-block; padding: 0 10px; transform: translateY(10px); background: white; } .dark .logo{ filter: invert(1); } .dark .footer { border-color: #303030; } .dark .footer>p { background: #0b0f19; } .acknowledgments h4{ margin: 1.25em 0 .25em 0; font-weight: bold; font-size: 115%; } """ examples = [ [ 'a photograph of a happy person wearing sunglasses by the sea', 'faces', 2, 2, ], [ 'a photograph of Captain Jean Luc Picard', 'faces', 2, 2, ], [ 'a mountain in the middle of the sea', 'landscape', 2, 2, ], [ 'The sun setting over the sea', 'landscape', 2, 2, ], ] def main(): block = gr.Blocks(css=css) with block: gr.HTML( """
Official demo for clip2latent: Text driven sampling of a pre-trained StyleGAN using denoising diffusion and CLIP, accepted to BMVC 2022
Get the code on GitHub, see the paper on Arxiv.
Justin N. M. Pinkney and Chuan Li @ Lambda Inc.
Abstract:
We introduce a new method to efficiently create text-to-image models from a pre-trained CLIP and StyleGAN.
It enables text driven sampling with an existing generative model without any external data or fine-tuning.
This is achieved by training a diffusion model conditioned on CLIP embeddings to sample latent vectors of a pre-trained StyleGAN, which we call clip2latent.
We leverage the alignment between CLIP’s image and text embeddings to avoid the need for any text labelled data for training the conditional diffusion model.
We demonstrate that clip2latent allows us to generate high-resolution (1024x1024 pixels) images based on text prompts with fast sampling, high image quality, and low training compute and data requirements.
We also show that the use of the well studied StyleGAN architecture, without further fine-tuning, allows us to directly apply existing methods to control and modify the generated images adding a further layer of control to our text-to-image pipeline.
Trained using Lambda GPU Cloud