File size: 1,090 Bytes
b7aacca
 
 
 
 
 
 
4644672
 
a3b62d4
4644672
b7aacca
 
 
4644672
 
 
b7aacca
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
db75ef7
 
b7aacca
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
import gradio as gr
from PIL import Image
from authtoken import auth_token
import torch
import torch.cuda.amp as amp
from diffusers import StableDiffusionPipeline



model_id = "stabilityai/stable-diffusion-2-1"

device = torch.device("cpu")  # Default to CPU device
if torch.cuda.is_available():
    device = torch.device("cuda")

# Use the DPMSolverMultistepScheduler (DPM-Solver++) scheduler here instead
pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16)
pipe.to(device)

def generate(prompt):
    with torch.no_grad(), amp.autocast(enabled=device != torch.device("cpu")):
        image = pipe(prompt, guidance_scale=8.5)["sample"][0]

    image.save('generatedimage.png')
    return image

def predict_text(prompt):
    image = generate(prompt)
    return image

def predict_image(input_image):
    input_image.save('input_image.png')
    prompt = input("Enter your prompt: ")
    image = generate(prompt)
    return image

iface = gr.Interface(
    fn=predict_text,
    inputs="text",
    outputs="image",
    capture_session=True,
)
iface.launch()