AisingioroHao0 commited on
Commit
7a4c31b
1 Parent(s): c74d280
Files changed (2) hide show
  1. README.md +1 -1
  2. app.py +152 -154
README.md CHANGED
@@ -4,7 +4,7 @@ emoji: ✏️
4
  colorFrom: yellow
5
  colorTo: gray
6
  sdk: gradio
7
- sdk_version: 4.0.2
8
  app_file: app.py
9
  pinned: true
10
  license: apache-2.0
 
4
  colorFrom: yellow
5
  colorTo: gray
6
  sdk: gradio
7
+ sdk_version: 3.48.0
8
  app_file: app.py
9
  pinned: true
10
  license: apache-2.0
app.py CHANGED
@@ -11,174 +11,172 @@ import cv2
11
  import numpy as np
12
  import os
13
  import torch
 
 
 
 
 
14
 
15
- if __name__ == "__main__":
16
-
17
- print(f"Is CUDA available: {torch.cuda.is_available()}")
18
- if torch.cuda.is_available():
19
- device = "cuda"
20
- else:
21
- device = "cpu"
22
-
23
- automatic_coloring_pipeline = StableDiffusionReferenceOnlyPipeline.from_pretrained(
24
- "AisingioroHao0/stable-diffusion-reference-only-automatic-coloring-0.1.2"
25
- ).to(device)
26
- automatic_coloring_pipeline.scheduler = UniPCMultistepScheduler.from_config(
27
- automatic_coloring_pipeline.scheduler.config
28
- )
29
 
30
- segment_model = get_anime_segmentation_model(
31
- model_path=huggingface_hub.hf_hub_download("skytnt/anime-seg", "isnetis.ckpt")
32
- ).to(device)
33
 
34
- def character_segment(img):
35
- if img is None:
36
- return None
37
- img = anime_character_segment(segment_model, img)
38
- img = cv2.cvtColor(img, cv2.COLOR_RGBA2RGB)
39
- return img
40
 
41
- def color_inversion(img):
42
- if img is None:
43
- return None
44
- return 255 - img
45
 
46
 
47
- def get_line_art(img):
48
- if img is None:
49
- return None
50
- img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
51
- img = cv2.adaptiveThreshold(
52
- img,
53
- 255,
54
- cv2.ADAPTIVE_THRESH_MEAN_C,
55
- cv2.THRESH_BINARY,
56
- blockSize=5,
57
- C=7,
58
- )
59
- img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
60
- return img
61
 
62
 
63
- def inference(prompt, blueprint, num_inference_steps):
64
- if prompt is None or blueprint is None:
65
- return None
66
- return np.array(
67
- automatic_coloring_pipeline(
68
- prompt=Image.fromarray(prompt),
69
- blueprint=Image.fromarray(blueprint),
70
- num_inference_steps=num_inference_steps,
71
- ).images[0]
72
- )
73
 
74
 
75
- def automatic_coloring(prompt, blueprint, num_inference_steps):
76
- if prompt is None or blueprint is None:
77
- return None
78
- blueprint = color_inversion(blueprint)
79
- return inference(prompt, blueprint, num_inference_steps)
80
 
81
 
82
- def style_transfer(prompt, blueprint, num_inference_steps):
83
- if prompt is None or blueprint is None:
84
- return None
85
- prompt = character_segment(prompt)
86
- blueprint = character_segment(blueprint)
87
- blueprint = get_line_art(blueprint)
88
- blueprint = color_inversion(blueprint)
89
- return inference(prompt, blueprint, num_inference_steps)
90
- with gr.Blocks() as demo:
91
- gr.Markdown(
92
- """
93
- # Stable Diffusion Reference Only Automatic Coloring 0.1.2\n\n
94
- demo for [https://github.com/aihao2000/stable-diffusion-reference-only](https://github.com/aihao2000/stable-diffusion-reference-only)
95
  """
96
- )
97
- with gr.Row():
98
- with gr.Column():
99
- prompt_input_compoent = gr.Image(shape=(512, 512), label="prompt")
100
- prompt_character_segment_button = gr.Button(
101
- "character segment",
102
- )
103
- prompt_character_segment_button.click(
104
- character_segment,
105
- inputs=prompt_input_compoent,
106
- outputs=prompt_input_compoent,
107
- )
108
- with gr.Column():
109
- blueprint_input_compoent = gr.Image(shape=(512, 512), label="blueprint")
110
- blueprint_character_segment_button = gr.Button("character segment")
111
- blueprint_character_segment_button.click(
112
- character_segment,
113
- inputs=blueprint_input_compoent,
114
- outputs=blueprint_input_compoent,
115
- )
116
- get_line_art_button = gr.Button(
117
- "get line art",
118
- )
119
- get_line_art_button.click(
120
- get_line_art,
121
- inputs=blueprint_input_compoent,
122
- outputs=blueprint_input_compoent,
123
- )
124
- color_inversion_button = gr.Button(
125
- "color inversion",
126
- )
127
- color_inversion_button.click(
128
- color_inversion,
129
- inputs=blueprint_input_compoent,
130
- outputs=blueprint_input_compoent,
131
- )
132
- with gr.Column():
133
- result_output_component = gr.Image(shape=(512, 512), label="result")
134
- num_inference_steps_input_component = gr.Number(
135
- 20, label="num inference steps", minimum=1, maximum=1000, step=1
136
- )
137
- inference_button = gr.Button("inference")
138
- inference_button.click(
139
- inference,
140
- inputs=[
141
- prompt_input_compoent,
142
- blueprint_input_compoent,
143
- num_inference_steps_input_component,
144
- ],
145
- outputs=result_output_component,
146
- )
147
- automatic_coloring_button = gr.Button("automatic coloring")
148
- automatic_coloring_button.click(
149
- automatic_coloring,
150
- inputs=[
151
- prompt_input_compoent,
152
- blueprint_input_compoent,
153
- num_inference_steps_input_component,
154
- ],
155
- outputs=result_output_component,
156
- )
157
- style_transfer_button = gr.Button("style transfer")
158
- style_transfer_button.click(
159
- style_transfer,
160
- inputs=[
161
- prompt_input_compoent,
162
- blueprint_input_compoent,
163
- num_inference_steps_input_component,
164
- ],
165
- outputs=result_output_component,
166
- )
167
- with gr.Row():
168
- gr.Examples(
169
- examples=[
170
- [
171
- os.path.join(
172
- os.path.dirname(__file__), "README.assets", "3x9_prompt.png"
173
- ),
174
- os.path.join(
175
- os.path.dirname(__file__), "README.assets", "3x9_blueprint.png"
176
- ),
177
- ],
178
  ],
179
- inputs=[prompt_input_compoent, blueprint_input_compoent],
180
  outputs=result_output_component,
181
- fn=lambda x, y: None,
182
- cache_examples=True,
183
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
184
  demo.queue(max_size=10).launch()
 
11
  import numpy as np
12
  import os
13
  import torch
14
+ print(f"Is CUDA available: {torch.cuda.is_available()}")
15
+ if torch.cuda.is_available():
16
+ device = "cuda"
17
+ else:
18
+ device = "cpu"
19
 
20
+ automatic_coloring_pipeline = StableDiffusionReferenceOnlyPipeline.from_pretrained(
21
+ "AisingioroHao0/stable-diffusion-reference-only-automatic-coloring-0.1.2"
22
+ ).to(device)
23
+ automatic_coloring_pipeline.scheduler = UniPCMultistepScheduler.from_config(
24
+ automatic_coloring_pipeline.scheduler.config
25
+ )
 
 
 
 
 
 
 
 
26
 
27
+ segment_model = get_anime_segmentation_model(
28
+ model_path=huggingface_hub.hf_hub_download("skytnt/anime-seg", "isnetis.ckpt")
29
+ ).to(device)
30
 
31
+ def character_segment(img):
32
+ if img is None:
33
+ return None
34
+ img = anime_character_segment(segment_model, img)
35
+ img = cv2.cvtColor(img, cv2.COLOR_RGBA2RGB)
36
+ return img
37
 
38
+ def color_inversion(img):
39
+ if img is None:
40
+ return None
41
+ return 255 - img
42
 
43
 
44
+ def get_line_art(img):
45
+ if img is None:
46
+ return None
47
+ img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
48
+ img = cv2.adaptiveThreshold(
49
+ img,
50
+ 255,
51
+ cv2.ADAPTIVE_THRESH_MEAN_C,
52
+ cv2.THRESH_BINARY,
53
+ blockSize=5,
54
+ C=7,
55
+ )
56
+ img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
57
+ return img
58
 
59
 
60
+ def inference(prompt, blueprint, num_inference_steps):
61
+ if prompt is None or blueprint is None:
62
+ return None
63
+ return np.array(
64
+ automatic_coloring_pipeline(
65
+ prompt=Image.fromarray(prompt),
66
+ blueprint=Image.fromarray(blueprint),
67
+ num_inference_steps=num_inference_steps,
68
+ ).images[0]
69
+ )
70
 
71
 
72
+ def automatic_coloring(prompt, blueprint, num_inference_steps):
73
+ if prompt is None or blueprint is None:
74
+ return None
75
+ blueprint = color_inversion(blueprint)
76
+ return inference(prompt, blueprint, num_inference_steps)
77
 
78
 
79
+ def style_transfer(prompt, blueprint, num_inference_steps):
80
+ if prompt is None or blueprint is None:
81
+ return None
82
+ prompt = character_segment(prompt)
83
+ blueprint = character_segment(blueprint)
84
+ blueprint = get_line_art(blueprint)
85
+ blueprint = color_inversion(blueprint)
86
+ return inference(prompt, blueprint, num_inference_steps)
87
+ with gr.Blocks() as demo:
88
+ gr.Markdown(
 
 
 
89
  """
90
+ # Stable Diffusion Reference Only Automatic Coloring 0.1.2\n\n
91
+ demo for [https://github.com/aihao2000/stable-diffusion-reference-only](https://github.com/aihao2000/stable-diffusion-reference-only)
92
+ """
93
+ )
94
+ with gr.Row():
95
+ with gr.Column():
96
+ prompt_input_compoent = gr.Image(shape=(512, 512), label="prompt")
97
+ prompt_character_segment_button = gr.Button(
98
+ "character segment",
99
+ )
100
+ prompt_character_segment_button.click(
101
+ character_segment,
102
+ inputs=prompt_input_compoent,
103
+ outputs=prompt_input_compoent,
104
+ )
105
+ with gr.Column():
106
+ blueprint_input_compoent = gr.Image(shape=(512, 512), label="blueprint")
107
+ blueprint_character_segment_button = gr.Button("character segment")
108
+ blueprint_character_segment_button.click(
109
+ character_segment,
110
+ inputs=blueprint_input_compoent,
111
+ outputs=blueprint_input_compoent,
112
+ )
113
+ get_line_art_button = gr.Button(
114
+ "get line art",
115
+ )
116
+ get_line_art_button.click(
117
+ get_line_art,
118
+ inputs=blueprint_input_compoent,
119
+ outputs=blueprint_input_compoent,
120
+ )
121
+ color_inversion_button = gr.Button(
122
+ "color inversion",
123
+ )
124
+ color_inversion_button.click(
125
+ color_inversion,
126
+ inputs=blueprint_input_compoent,
127
+ outputs=blueprint_input_compoent,
128
+ )
129
+ with gr.Column():
130
+ result_output_component = gr.Image(shape=(512, 512), label="result")
131
+ num_inference_steps_input_component = gr.Number(
132
+ 20, label="num inference steps", minimum=1, maximum=1000, step=1
133
+ )
134
+ inference_button = gr.Button("inference")
135
+ inference_button.click(
136
+ inference,
137
+ inputs=[
138
+ prompt_input_compoent,
139
+ blueprint_input_compoent,
140
+ num_inference_steps_input_component,
141
+ ],
142
+ outputs=result_output_component,
143
+ )
144
+ automatic_coloring_button = gr.Button("automatic coloring")
145
+ automatic_coloring_button.click(
146
+ automatic_coloring,
147
+ inputs=[
148
+ prompt_input_compoent,
149
+ blueprint_input_compoent,
150
+ num_inference_steps_input_component,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
151
  ],
 
152
  outputs=result_output_component,
 
 
153
  )
154
+ style_transfer_button = gr.Button("style transfer")
155
+ style_transfer_button.click(
156
+ style_transfer,
157
+ inputs=[
158
+ prompt_input_compoent,
159
+ blueprint_input_compoent,
160
+ num_inference_steps_input_component,
161
+ ],
162
+ outputs=result_output_component,
163
+ )
164
+ with gr.Row():
165
+ gr.Examples(
166
+ examples=[
167
+ [
168
+ os.path.join(
169
+ os.path.dirname(__file__), "README.assets", "3x9_prompt.png"
170
+ ),
171
+ os.path.join(
172
+ os.path.dirname(__file__), "README.assets", "3x9_blueprint.png"
173
+ ),
174
+ ],
175
+ ],
176
+ inputs=[prompt_input_compoent, blueprint_input_compoent],
177
+ outputs=result_output_component,
178
+ fn=lambda x, y: None,
179
+ cache_examples=True,
180
+ )
181
+ if __name__ == "__main__":
182
  demo.queue(max_size=10).launch()