# import gradio as gr # from diffusers import DiffusionPipeline # # get_completion = pipeline("image-to-text",model="nlpconnect/vit-gpt2-image-captioning") # # pipeline = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0") # pipeline = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5") # # def summarize(input): # # output = get_completion(input) # # return output[0]['generated_text'] # # def captioner(image): # # result = get_completion(image) # # return result[0]['generated_text'] # def generate(prompt): # return pipeline(prompt).images[0] # gr.close_all() # demo = gr.Interface(fn=generate, # inputs=[gr.Textbox(label="Your prompt")], # outputs=[gr.Image(label="Result")], # title="Image Generation with Stable Diffusion", # description="Generate any image with Stable Diffusion", # allow_flagging="never", # examples=["the spirit of a tamagotchi wandering in the city of Vienna","a mecha robot in a favela"]) # demo.launch() import gradio as gr gr.close_all() demo = gr.load(name="models/stabilityai/stable-diffusion-2-1", title='PicassoBot', description='Because paint splatters are so last century') demo.launch()