Spaces:
Runtime error
Runtime error
File size: 1,543 Bytes
3cb13f6 6e27413 3cb13f6 6e27413 3cb13f6 6e27413 02dbbde 6e27413 02dbbde 6e27413 bd4432f 6e27413 3cb13f6 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 |
import gradio as gr
import torch
import diffusers
from diffusers import DiffusionPipeline
from zero123 import Zero123Pipeline
diffusers.Zero123Pipeline = Zero123Pipeline
def generate_view(source_img, elevation, azimuth, camera_distance, num_inference_steps):
# Prepare pipeline
pipeline = DiffusionPipeline.from_pretrained("ashawkey/stable-zero123-diffusers",
torch_dtype=torch.float16, trust_remote_code=True)
pipeline.to('cuda:0')
# Prepare input data:
image = source_img.resize((256, 256)).convert("RGB")
# Generate and save images:
images = pipeline([image],
torch.tensor([elevation], dtype=torch.float16).to('cuda:0'),
torch.tensor([azimuth], dtype=torch.float16).to('cuda:0'),
torch.tensor([camera_distance], dtype=torch.float16).to('cuda:0'),
num_inference_steps=int(num_inference_steps)).images
return images[0]
iface = gr.Interface(fn=generate_view, inputs=[gr.Image(type="pil", value="images/bottle.png"),
gr.Number(label="elevation", value=0.),
gr.Number(label="azimuth", value=45.),
gr.Number(label="camera_distance", value=1.2),
gr.Number(label="num_inference_steps", value=20)],
outputs=gr.Image())
iface.launch() |