fjenett's picture
text
9fe573e
raw
history blame
575 Bytes
import gradio as gr
from diffusers import DiffusionPipeline
import torch
pipeline = DiffusionPipeline.from_pretrained("dreamlike-art/dreamlike-photoreal-2.0")
# move to GPU if available
if torch.cuda.is_available():
pipeline = pipeline.to("cuda")
def generate(prompts):
images = pipeline(list(prompts)).images
return [images]
demo = gr.Interface(generate,
"textbox",
"image",
batch=True,
max_batch_size=4 # Set the batch size based on your CPU/GPU memory
).queue()
if __name__ == "__main__":
demo.launch()