File size: 2,009 Bytes
9d29870
 
 
 
83ca111
9d29870
 
 
 
 
 
 
 
 
 
bbea880
 
9d29870
 
bbea880
 
 
 
 
9d29870
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
05745ef
 
9d29870
 
 
 
 
 
 
 
f605d64
9d29870
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
from diffusers import StableDiffusionControlNetPipeline, ControlNetModel, UniPCMultistepScheduler
from diffusers.utils import load_image, make_image_grid
from PIL import Image
import numpy as np
import gradio as gr
import torch
import cv2
import random

def launch(image_input, prompt, negative_prompt, sd_models_dropdown):
    
    return predict(image_input, prompt, negative_prompt, sd_models_dropdown)

def predict(image_input, prompt, negative_prompt, sd_models_dropdown):
    
    print(type(image_input))
    image = np.array(image_input)

    # Get canny image
    t_lower = 100
    t_upper = 200
    image = cv2.Canny(image, t_lower, t_upper)
    # image = image[:, :, None]
    # image = np.concatenate([image, image, image], axis=2)
    canny_image = Image.fromarray(image)
    
    selected_model = f"""{sd_models_dropdown}"""

    pipe = StableDiffusionPipeline.from_pretrained(
        selected_model, 
        torch_dtype=torch.float16
    )
    pipe = pipe.to("cuda")

    prompt = "a photo of an astronaut riding a horse on mars"
    negative_promt=""
    
    generator = torch.manual_seed(random.randint(0, 1000))
    
    image = pipe(
        prompt=prompt,
        negative_prompt=negative_prompt,
        num_inference_steps=65,
        generator=generator,
        image=canny_image,
        strength=0.15,
        guidance_scale=15.5
        
    ).images[0]  

    return image

# SYSTEM INPUTS

image_input = gr.Image(label="Upload iamge candidate", type="filepath")
prompt = gr.Textbox(type="text", label="Prompt")
negative_prompt = gr.Textbox(type="text", label="Negative prompt")

sd_models_dropdown = gr.Dropdown(
    [
        "runwayml/stable-diffusion-v1-5",
    ], 
    label="Stable Diffusion Models")

# SYSTEM OUTPUTS
output_image = gr.Image()

demo = gr.Interface(
    launch,
    inputs=[image_input, prompt, negative_prompt, sd_models_dropdown],
    outputs=output_image,
    title="Stable Diffusion with ControlNet",
)

if __name__ == "__main__":
    demo.launch()