import torch import gradio as gr from PIL import Image import PIL.Image from diffusers import StableDiffusionPipeline from diffusers.utils import load_image from accelerate import Accelerator from optimum.onnxruntime import ORTStableDiffusionPipeline accelerator = Accelerator(cpu=True) pipe = accelerator.prepare(ORTStableDiffusionPipeline.from_pretrained("JoPmt/AbsoluteReality-onnx")) pipe.to("cpu") def plex(prompt,neg_prompt,stips,scaly): apol=[] image = pipe(prompt=prompt, negative_prompt=neg_prompt, num_inference_steps=stips, guidance_scale=scaly) for a, imze in enumerate(image["images"]): apol.append(imze) return apol iface = gr.Interface(fn=plex,inputs=[gr.Textbox(label="Prompt"), gr.Textbox(label="negative_prompt", value="low quality, bad quality"), gr.Slider(label="num inference steps",minimum=1,step=1,maximum=30,value=25), gr.Slider(label="guidance_scale",minimum=1,step=1,maximum=10,value=7)],outputs=gr.Gallery(label="Generated Output Image", columns=1),description="Running on cpu, very slow! by JoPmt.") iface.queue(max_size=1,api_open=False) iface.launch(max_threads=1)