import gradio as gr from PIL import Image from authtoken import auth_token import torch import torch.cuda.amp as amp from diffusers import StableDiffusionPipeline modelid = "CompVis/stable-diffusion-v1-4" device = torch.device("cpu") # Default to CPU device if torch.cuda.is_available(): device = torch.device("cuda") pipe = StableDiffusionPipeline.from_pretrained(modelid, use_auth_token=auth_token) pipe.to(device) def generate(prompt): with torch.no_grad(), amp.autocast(enabled=device != torch.device("cpu")): image = pipe(prompt, guidance_scale=8.5)["sample"][0] image.save('generatedimage.png') return image def predict_text(prompt): image = generate(prompt) return image def predict_image(input_image): input_image.save('input_image.png') prompt = input("Enter your prompt: ") image = generate(prompt) return image iface = gr.Interface( fn=predict_text, inputs="text", outputs="image", capture_session=True, ) iface.launch(share=True)