groggy84 commited on
Commit
523e494
1 Parent(s): fe14c40
__pycache__/app.cpython-38.pyc ADDED
Binary file (2.1 kB). View file
 
__pycache__/app.cpython-39.pyc CHANGED
Binary files a/__pycache__/app.cpython-39.pyc and b/__pycache__/app.cpython-39.pyc differ
 
app.py CHANGED
@@ -1,4 +1,10 @@
1
  import gradio as gr
 
 
 
 
 
 
2
 
3
  is_show_controlnet = True
4
  prompts = ""
@@ -10,8 +16,38 @@ def change_radio(input):
10
  def output_radio(output):
11
  print(output)
12
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
  with gr.Blocks() as demo:
14
- gr.Markdown("# Greetings from Gradio!")
15
 
16
  with gr.Row():
17
  with gr.Column() as controlnet:
@@ -19,19 +55,21 @@ with gr.Blocks() as demo:
19
  canny_image = gr.Image(label="cannyimage", visible=is_show_controlnet , shape=(512,512), interactive=True)
20
 
21
  controlnet_radio = gr.Radio([True, False], label="Use ControlNet")
22
- lt = gr.Slider(1, 50, 3, step=1, label="Low threshold")
23
- ht = gr.Slider(1, 50, 3, step=1, label="High threshold")
24
 
25
  with gr.Column():
26
  out_image = gr.Image()
 
 
 
27
 
28
- prompt = gr.Textbox(placeholder="prompts", label="prompt")
29
- neg_prompt = gr.Textbox(placeholder="negative prompts", label="negative prompt")
30
-
31
- ins = gr.Slider(1, 50, 3, label="inference steps")
32
- gs = gr.Slider(1, 50, 3, label="guidance scale")
33
 
 
34
  btn1 = gr.Button("실행")
 
35
 
36
  if __name__ == "__main__":
37
  demo.launch()
 
1
  import gradio as gr
2
+ from diffusers import StableDiffusionPipeline, ControlNetModel, StableDiffusionControlNetPipeline
3
+ from diffusers.utils import load_image
4
+ import torch
5
+ import cv2
6
+ import numpy as np
7
+ from PIL import Image
8
 
9
  is_show_controlnet = True
10
  prompts = ""
 
16
  def output_radio(output):
17
  print(output)
18
 
19
+ def predict(canny, lt, ht, prompt, neg_prompt, ins, gs, seed):
20
+ print(canny, lt, ht, prompt, neg_prompt, ins, gs)
21
+ '''
22
+ np_image = np.array(canny)
23
+
24
+ low_threshold = lt
25
+ high_threshold = ht
26
+
27
+ np_image = cv2.Canny(np_image, low_threshold, high_threshold)
28
+ np_image = np_image[:, :, None]
29
+ np_image = np.concatenate([np_image, np_image, np_image], axis=2)
30
+ canny_image = Image.fromarray(np_image)
31
+ controlnet_repo_id = "calihyper/kor-portrait-controlnet"
32
+ controlnet = ControlNetModel.from_pretrained(controlnet_repo_id, torch_dtype=torch.float16)
33
+ '''
34
+ repo_id = "calihyper/trad-kor-landscape-black"
35
+ pipe = StableDiffusionControlNetPipeline.from_pretrained(
36
+ repo_id, torch_dtype=torch.float16
37
+ )
38
+ generator = torch.manual_seed(seed)
39
+
40
+ output = pipe(
41
+ prompt,
42
+ negative_prompt=neg_prompt,
43
+ generator=generator,
44
+ num_inference_steps=ins,
45
+ guidance_scale=gs
46
+ )
47
+ return output.images[0]
48
+
49
  with gr.Blocks() as demo:
50
+ gr.Markdown("# Aiffelthon Choosa Project")
51
 
52
  with gr.Row():
53
  with gr.Column() as controlnet:
 
55
  canny_image = gr.Image(label="cannyimage", visible=is_show_controlnet , shape=(512,512), interactive=True)
56
 
57
  controlnet_radio = gr.Radio([True, False], label="Use ControlNet")
58
+ lt = gr.Slider(50, 300, 120, step=1, label="Low threshold")
59
+ ht = gr.Slider(50, 300, 120, step=1, label="High threshold")
60
 
61
  with gr.Column():
62
  out_image = gr.Image()
63
+ with gr.Column() as diff:
64
+ prompt = gr.Textbox(placeholder="prompts", label="prompt")
65
+ neg_prompt = gr.Textbox(placeholder="negative prompts", value=neg_prompt, label="negative prompt")
66
 
67
+ ins = gr.Slider(1, 60, 30, label="inference steps")
68
+ gs = gr.Slider(1, 10, 2.5, step=1, label="guidance scale")
 
 
 
69
 
70
+ seed = gr.Slider(0, 10, 2, step=1, label="seed")
71
  btn1 = gr.Button("실행")
72
+ btn1.click(predict, [canny_image, lt, ht, prompt, neg_prompt, ins, gs, seed], out_image)
73
 
74
  if __name__ == "__main__":
75
  demo.launch()