Spaces:
Paused
Paused
Fabrice-TIERCELIN
commited on
Commit
•
d459b6a
1
Parent(s):
752f198
Add logs
Browse files- gradio_demo.py +20 -17
gradio_demo.py
CHANGED
@@ -69,40 +69,38 @@ if torch.cuda.device_count() > 0:
|
|
69 |
|
70 |
@spaces.GPU(duration=120)
|
71 |
def stage1_process(input_image, gamma_correction):
|
|
|
72 |
if torch.cuda.device_count() == 0:
|
73 |
gr.Warning('Set this space to GPU config to make it work.')
|
74 |
return None
|
75 |
-
|
|
|
|
|
76 |
torch.cuda.set_device(SUPIR_device)
|
77 |
-
print ("stage1_process 2")
|
78 |
LQ = HWC3(input_image)
|
79 |
-
print ("stage1_process 3")
|
80 |
LQ = fix_resize(LQ, 512)
|
81 |
-
print ("stage1_process 4")
|
82 |
# stage1
|
83 |
LQ = np.array(LQ) / 255 * 2 - 1
|
84 |
-
print ("stage1_process 5")
|
85 |
LQ = torch.tensor(LQ, dtype=torch.float32).permute(2, 0, 1).unsqueeze(0).to(SUPIR_device)[:, :3, :, :]
|
86 |
-
print ("stage1_process 6")
|
87 |
LQ = model.batchify_denoise(LQ, is_stage1=True)
|
88 |
-
print ("stage1_process 7")
|
89 |
LQ = (LQ[0].permute(1, 2, 0) * 127.5 + 127.5).cpu().numpy().round().clip(0, 255).astype(np.uint8)
|
90 |
# gamma correction
|
91 |
LQ = LQ / 255.0
|
92 |
-
print ("stage1_process 8")
|
93 |
LQ = np.power(LQ, gamma_correction)
|
94 |
-
print ("stage1_process 9")
|
95 |
LQ *= 255.0
|
96 |
-
print ("stage1_process 10")
|
97 |
LQ = LQ.round().clip(0, 255).astype(np.uint8)
|
98 |
-
print
|
99 |
return LQ
|
100 |
|
101 |
@spaces.GPU(duration=120)
|
102 |
def llave_process(input_image, temperature, top_p, qs=None):
|
|
|
103 |
if torch.cuda.device_count() == 0:
|
104 |
gr.Warning('Set this space to GPU config to make it work.')
|
105 |
return 'Set this space to GPU config to make it work.'
|
|
|
|
|
|
|
106 |
torch.cuda.set_device(LLaVA_device)
|
107 |
if use_llava:
|
108 |
LQ = HWC3(input_image)
|
@@ -110,15 +108,20 @@ def llave_process(input_image, temperature, top_p, qs=None):
|
|
110 |
captions = llava_agent.gen_image_caption([LQ], temperature=temperature, top_p=top_p, qs=qs)
|
111 |
else:
|
112 |
captions = ['LLaVA is not available. Please add text manually.']
|
|
|
113 |
return captions[0]
|
114 |
|
115 |
@spaces.GPU(duration=120)
|
116 |
def stage2_process(input_image, prompt, a_prompt, n_prompt, num_samples, upscale, edm_steps, s_stage1, s_stage2,
|
117 |
s_cfg, seed, s_churn, s_noise, color_fix_type, diff_dtype, ae_dtype, gamma_correction,
|
118 |
linear_CFG, linear_s_stage2, spt_linear_CFG, spt_linear_s_stage2, model_select):
|
|
|
119 |
if torch.cuda.device_count() == 0:
|
120 |
gr.Warning('Set this space to GPU config to make it work.')
|
121 |
return None, None, None, None
|
|
|
|
|
|
|
122 |
torch.cuda.set_device(SUPIR_device)
|
123 |
event_id = str(time.time_ns())
|
124 |
event_dict = {'event_id': event_id, 'localtime': time.ctime(), 'prompt': prompt, 'a_prompt': a_prompt,
|
@@ -130,14 +133,12 @@ def stage2_process(input_image, prompt, a_prompt, n_prompt, num_samples, upscale
|
|
130 |
'model_select': model_select}
|
131 |
|
132 |
if model_select != model.current_model:
|
|
|
133 |
if model_select == 'v0-Q':
|
134 |
-
print('load v0-Q')
|
135 |
model.load_state_dict(ckpt_Q, strict=False)
|
136 |
-
model.current_model = 'v0-Q'
|
137 |
elif model_select == 'v0-F':
|
138 |
-
print('load v0-F')
|
139 |
model.load_state_dict(ckpt_F, strict=False)
|
140 |
-
|
141 |
input_image = HWC3(input_image)
|
142 |
input_image = upscale_image(input_image, upscale, unit_resolution=32,
|
143 |
min_size=1024)
|
@@ -174,10 +175,11 @@ def stage2_process(input_image, prompt, a_prompt, n_prompt, num_samples, upscale
|
|
174 |
Image.fromarray(input_image).save(f'./history/{event_id[:5]}/{event_id[5:]}/LQ.png')
|
175 |
for i, result in enumerate(results):
|
176 |
Image.fromarray(result).save(f'./history/{event_id[:5]}/{event_id[5:]}/HQ_{i}.png')
|
|
|
177 |
return [input_image] + results, event_id, 3, ''
|
178 |
|
179 |
-
@spaces.GPU(duration=120)
|
180 |
def load_and_reset(param_setting):
|
|
|
181 |
if torch.cuda.device_count() == 0:
|
182 |
gr.Warning('Set this space to GPU config to make it work.')
|
183 |
return None, None, None, None, None, None, None, None, None, None, None, None, None
|
@@ -204,6 +206,7 @@ def load_and_reset(param_setting):
|
|
204 |
spt_linear_CFG = default_setting.spt_linear_CFG_Fidelity
|
205 |
else:
|
206 |
raise NotImplementedError
|
|
|
207 |
return edm_steps, s_cfg, s_stage2, s_stage1, s_churn, s_noise, a_prompt, n_prompt, color_fix_type, linear_CFG, \
|
208 |
linear_s_stage2, spt_linear_CFG, spt_linear_s_stage2
|
209 |
|
@@ -237,7 +240,7 @@ else:
|
|
237 |
⚠️SUPIR is still a research project under tested and is not yet a stable commercial product.
|
238 |
|
239 |
<a href="https://arxiv.org/abs/2401.13627">Paper</a>   <a href="http://supir.xpixel.group/">Project Page</a>   <a href="https://github.com/Fanghua-Yu/SUPIR/blob/master/assets/DemoGuide.png">How to play</a>   <a href="https://huggingface.co/blog/MonsterMMORPG/supir-sota-image-upscale-better-than-magnific-ai">Local Install Guide</a>
|
240 |
-
<p style="background-color: blue;">For now, only the
|
241 |
"""
|
242 |
|
243 |
|
|
|
69 |
|
70 |
@spaces.GPU(duration=120)
|
71 |
def stage1_process(input_image, gamma_correction):
|
72 |
+
print('Start stage1_process')
|
73 |
if torch.cuda.device_count() == 0:
|
74 |
gr.Warning('Set this space to GPU config to make it work.')
|
75 |
return None
|
76 |
+
if input_image is None:
|
77 |
+
gr.Warning('Please provide an image to restore.')
|
78 |
+
return None
|
79 |
torch.cuda.set_device(SUPIR_device)
|
|
|
80 |
LQ = HWC3(input_image)
|
|
|
81 |
LQ = fix_resize(LQ, 512)
|
|
|
82 |
# stage1
|
83 |
LQ = np.array(LQ) / 255 * 2 - 1
|
|
|
84 |
LQ = torch.tensor(LQ, dtype=torch.float32).permute(2, 0, 1).unsqueeze(0).to(SUPIR_device)[:, :3, :, :]
|
|
|
85 |
LQ = model.batchify_denoise(LQ, is_stage1=True)
|
|
|
86 |
LQ = (LQ[0].permute(1, 2, 0) * 127.5 + 127.5).cpu().numpy().round().clip(0, 255).astype(np.uint8)
|
87 |
# gamma correction
|
88 |
LQ = LQ / 255.0
|
|
|
89 |
LQ = np.power(LQ, gamma_correction)
|
|
|
90 |
LQ *= 255.0
|
|
|
91 |
LQ = LQ.round().clip(0, 255).astype(np.uint8)
|
92 |
+
print('End stage1_process')
|
93 |
return LQ
|
94 |
|
95 |
@spaces.GPU(duration=120)
|
96 |
def llave_process(input_image, temperature, top_p, qs=None):
|
97 |
+
print('Start llave_process')
|
98 |
if torch.cuda.device_count() == 0:
|
99 |
gr.Warning('Set this space to GPU config to make it work.')
|
100 |
return 'Set this space to GPU config to make it work.'
|
101 |
+
if input_image is None:
|
102 |
+
gr.Warning('Please provide an image to restore.')
|
103 |
+
return 'Please provide an image to restore.'
|
104 |
torch.cuda.set_device(LLaVA_device)
|
105 |
if use_llava:
|
106 |
LQ = HWC3(input_image)
|
|
|
108 |
captions = llava_agent.gen_image_caption([LQ], temperature=temperature, top_p=top_p, qs=qs)
|
109 |
else:
|
110 |
captions = ['LLaVA is not available. Please add text manually.']
|
111 |
+
print('End llave_process')
|
112 |
return captions[0]
|
113 |
|
114 |
@spaces.GPU(duration=120)
|
115 |
def stage2_process(input_image, prompt, a_prompt, n_prompt, num_samples, upscale, edm_steps, s_stage1, s_stage2,
|
116 |
s_cfg, seed, s_churn, s_noise, color_fix_type, diff_dtype, ae_dtype, gamma_correction,
|
117 |
linear_CFG, linear_s_stage2, spt_linear_CFG, spt_linear_s_stage2, model_select):
|
118 |
+
print('Start stage2_process')
|
119 |
if torch.cuda.device_count() == 0:
|
120 |
gr.Warning('Set this space to GPU config to make it work.')
|
121 |
return None, None, None, None
|
122 |
+
if input_image is None:
|
123 |
+
gr.Warning('Please provide an image to restore.')
|
124 |
+
return None, None, None, None
|
125 |
torch.cuda.set_device(SUPIR_device)
|
126 |
event_id = str(time.time_ns())
|
127 |
event_dict = {'event_id': event_id, 'localtime': time.ctime(), 'prompt': prompt, 'a_prompt': a_prompt,
|
|
|
133 |
'model_select': model_select}
|
134 |
|
135 |
if model_select != model.current_model:
|
136 |
+
print('load ' + model_select)
|
137 |
if model_select == 'v0-Q':
|
|
|
138 |
model.load_state_dict(ckpt_Q, strict=False)
|
|
|
139 |
elif model_select == 'v0-F':
|
|
|
140 |
model.load_state_dict(ckpt_F, strict=False)
|
141 |
+
model.current_model = model_select
|
142 |
input_image = HWC3(input_image)
|
143 |
input_image = upscale_image(input_image, upscale, unit_resolution=32,
|
144 |
min_size=1024)
|
|
|
175 |
Image.fromarray(input_image).save(f'./history/{event_id[:5]}/{event_id[5:]}/LQ.png')
|
176 |
for i, result in enumerate(results):
|
177 |
Image.fromarray(result).save(f'./history/{event_id[:5]}/{event_id[5:]}/HQ_{i}.png')
|
178 |
+
print('End stage2_process')
|
179 |
return [input_image] + results, event_id, 3, ''
|
180 |
|
|
|
181 |
def load_and_reset(param_setting):
|
182 |
+
print('Start load_and_reset')
|
183 |
if torch.cuda.device_count() == 0:
|
184 |
gr.Warning('Set this space to GPU config to make it work.')
|
185 |
return None, None, None, None, None, None, None, None, None, None, None, None, None
|
|
|
206 |
spt_linear_CFG = default_setting.spt_linear_CFG_Fidelity
|
207 |
else:
|
208 |
raise NotImplementedError
|
209 |
+
print('End load_and_reset')
|
210 |
return edm_steps, s_cfg, s_stage2, s_stage1, s_churn, s_noise, a_prompt, n_prompt, color_fix_type, linear_CFG, \
|
211 |
linear_s_stage2, spt_linear_CFG, spt_linear_s_stage2
|
212 |
|
|
|
240 |
⚠️SUPIR is still a research project under tested and is not yet a stable commercial product.
|
241 |
|
242 |
<a href="https://arxiv.org/abs/2401.13627">Paper</a>   <a href="http://supir.xpixel.group/">Project Page</a>   <a href="https://github.com/Fanghua-Yu/SUPIR/blob/master/assets/DemoGuide.png">How to play</a>   <a href="https://huggingface.co/blog/MonsterMMORPG/supir-sota-image-upscale-better-than-magnific-ai">Local Install Guide</a>
|
243 |
+
<p style="background-color: blue;">For now, only the restoring is working (the most important one). The pre-denoising and LLaVa description are failing. LLaVa is disabled.</p>
|
244 |
"""
|
245 |
|
246 |
|