groggy84 commited on
Commit
79118df
1 Parent(s): fe1bb47

add application file

Browse files
Files changed (2) hide show
  1. app.py +78 -0
  2. requirements.txt +14 -0
app.py ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from diffusers import StableDiffusionPipeline, ControlNetModel, StableDiffusionControlNetPipeline
3
+ from diffusers.utils import load_image
4
+ import torch
5
+ import cv2
6
+ import numpy as np
7
+ from PIL import Image
8
+
9
+ is_show_controlnet = True
10
+ prompts = ""
11
+ neg_prompt = "chinese letter"
12
+
13
+ controlnet_repo_id = "calihyper/trad-kor-controlnet"
14
+ repo_id = "calihyper/trad-kor-landscape-black"
15
+ controlnet = ControlNetModel.from_pretrained(controlnet_repo_id)
16
+ pipe = StableDiffusionControlNetPipeline.from_pretrained(repo_id, controlnet=controlnet).to("cuda")
17
+
18
+
19
+ def change_radio(input):
20
+ return input
21
+
22
+ def output_radio(output):
23
+ print(output)
24
+
25
+ def predict(canny, lt, ht, prompt, style_prompt, neg_prompt, ins, gs, seed):
26
+ np_image = np.array(canny)
27
+ low_threshold = lt
28
+ high_threshold = ht
29
+ np_image = cv2.Canny(np_image, low_threshold, high_threshold)
30
+ np_image = np_image[:, :, None]
31
+ np_image = np.concatenate([np_image, np_image, np_image], axis=2)
32
+ canny_image = Image.fromarray(np_image)
33
+
34
+ generator = torch.manual_seed(seed)
35
+
36
+ global pipe
37
+
38
+ output = pipe(
39
+ prompt + style_prompt,
40
+ canny_image,
41
+ negative_prompt=neg_prompt,
42
+ generator=generator,
43
+ num_inference_steps=ins,
44
+ guidance_scale=gs
45
+ )
46
+ return output.images[0]
47
+
48
+ with gr.Blocks() as demo:
49
+ gr.Markdown("# Aiffelthon Choosa Project")
50
+
51
+ with gr.Row():
52
+ with gr.Column() as controlnet:
53
+
54
+ canny_image = gr.Image(label="input_image", visible=is_show_controlnet , shape=(512,512), interactive=True)
55
+
56
+ controlnet_radio = gr.Radio([True, False], label="Use ControlNet")
57
+ lt = gr.Slider(50, 300, 120, step=1, label="Low threshold")
58
+ ht = gr.Slider(50, 300, 120, step=1, label="High threshold")
59
+
60
+ with gr.Column():
61
+ out_image = gr.Image()
62
+ with gr.Column() as diff:
63
+ prompt = gr.Textbox(placeholder="prompts", label="prompt")
64
+ style_prompt = gr.Textbox(placeholder="style prompts", label="style prompt")
65
+ examples = gr.Examples(examples=["<trad-kor-landscape-black>", "<trad-kor-landscape-ink-wash-painting>", "<trad-kor-landscape-thick-brush-strokes>", "<trad-kor-plants-black>", "<trad-kor-plants-color>"],
66
+ inputs=style_prompt, label="style examples")
67
+
68
+ neg_prompt = gr.Textbox(placeholder="negative prompts", value=neg_prompt, label="negative prompt")
69
+
70
+ ins = gr.Slider(1, 60, 30, label="inference steps")
71
+ gs = gr.Slider(1, 10, 2.5, step=1, label="guidance scale")
72
+
73
+ seed = gr.Slider(0, 10, 2, step=1, label="seed")
74
+ btn1 = gr.Button("실행")
75
+ btn1.click(predict, [canny_image, lt, ht, prompt,style_prompt, neg_prompt, ins, gs, seed ], out_image)
76
+
77
+ if __name__ == "__main__":
78
+ demo.launch()
requirements.txt ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ -f https://download.pytorch.org/whl/cpu/torch_stable.html
2
+ -f https://data.pyg.org/whl/torch-2.0.0+cpu.html
3
+ torch
4
+ torchvision
5
+ diffusers
6
+ gradio
7
+ Pillow
8
+ numpy
9
+ transformers
10
+ torchvision
11
+ ftfy
12
+ altair
13
+ opencv-python
14
+ accelerate