Spaces:
Sleeping
Sleeping
File size: 1,778 Bytes
ab87e9c 31d1b41 ab87e9c 1e44006 ab87e9c 1e44006 ab87e9c 26ecc95 ce6f596 ab87e9c ce6f596 ab87e9c 26ecc95 ab87e9c ce6f596 ab87e9c d27ba6d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 |
import gradio as gr
import os
import torch
from diffusers import AutoencoderTiny
from torchvision.transforms.functional import to_pil_image, to_tensor
device = 'cuda' if torch.cuda.is_available() else 'mps' if torch.backends.mps.is_available() else 'cpu'
model_id = "madebyollin/taesd"
vae = AutoencoderTiny.from_pretrained(model_id, safetensors=True).to(device)
@torch.no_grad()
def decode(image):
t = to_tensor(image).unsqueeze(0).to(device)
unscaled_t = vae.unscale_latents(t)
reconstructed = vae.decoder(unscaled_t).clamp(0, 1)
return to_pil_image(reconstructed[0])
astronaut = os.path.join(os.path.dirname(__file__), "images/21.encoded.png")
def app():
return gr.Interface(decode,
gr.Image(type="pil",
image_mode="RGBA",
mirror_webcam=False,
label='64x64',
value=astronaut),
gr.Image(type="pil",
image_mode="RGB",
label='512x512',
show_share_button=True,
height=512,
width=512
),
css=".upload-container {width: 256px !important; height:256px !important} ",
examples=[
astronaut,
os.path.join(os.path.dirname(__file__), "images/18.encoded.png"),
os.path.join(os.path.dirname(__file__), "images/20.encoded.png")
], allow_flagging='never', title='Latents Decoder')
if __name__ == "__main__":
app().launch()
|