File size: 2,297 Bytes
f20af30
 
 
 
 
 
 
 
 
 
dcc438f
f20af30
 
 
 
 
 
 
 
 
 
 
 
 
 
d5f211f
f20af30
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4add3f0
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
import random
import matplotlib.pyplot as plt
from PIL import Image


import torch
from torch import autocast
from diffusers import StableDiffusionPipeline, DDIMScheduler


import gradio 

from gradio.components import Textbox, Image

import torch
from torch import autocast
from diffusers import StableDiffusionPipeline, DDIMScheduler

pipe = StableDiffusionPipeline.from_pretrained("taltaf9133/finetuned-stable-diffusion-log", torch_dtype=torch.float32)   #.to('cuda')
#pipe.enable_xformers_memory_efficient_attention()

prompt = "tv with sofa, realistic, hd, vivid"
negative_prompt = "bad anatomy, ugly, deformed, desfigured, distorted, blurry, low quality, low definition, lowres, out of frame, out of image, cropped, cut off, signature, watermark"
num_samples = 1
guidance_scale = 7.5
num_inference_steps = 5
height = 512
width = 512

#seed = random.randint(0, 2147483647)
#print("Seed: {}".format(str(seed)))
#generator = torch.Generator(device='cuda').manual_seed(seed)

def predict(prompt, negative_prompt):
    #with autocast("cuda"), torch.inference_mode():
    img = pipe(
            prompt,
            negative_prompt=negative_prompt,
            height=height, width=width,
            num_images_per_prompt=num_samples,
            num_inference_steps=num_inference_steps,
            guidance_scale=guidance_scale,
            #generator=generator
        ).images[0]
    return img

title = "Stable Diffusion Demo"
description = "Stable diffusion demo"


# Input from user
neg_p = "bad anatomy, ugly, deformed, desfigured, distorted, blurry, low quality, low definition, lowres, out of frame, out of image, cropped, cut off, signature, watermark"
in_prompt = gradio.inputs.Textbox(lines=5, placeholder=None, default="ldg with scn style", label='Enter prompt')
in_neg_prompt = gradio.inputs.Textbox(lines=5, placeholder=None, default=neg_p, label='Enter negative prompt')

# Output response
out_response = Image(label="Generated image:")

# Create the Gradio demo
demo = gradio.Interface(fn=predict, # mapping function from input to output
                        inputs=[in_prompt, in_neg_prompt],
                        outputs=gradio.Image(),
                        title=title,
                        description=description,)

# Launch the demo!
demo.launch(debug = True)