Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -1,5 +1,4 @@
|
|
1 |
import os
|
2 |
-
|
3 |
import cv2
|
4 |
import gradio as gr
|
5 |
import torch
|
@@ -8,107 +7,108 @@ from gfpgan.utils import GFPGANer
|
|
8 |
from realesrgan.utils import RealESRGANer
|
9 |
import spaces
|
10 |
|
|
|
11 |
os.system("pip freeze")
|
12 |
-
|
13 |
-
if not
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
os.
|
25 |
-
|
26 |
-
|
27 |
-
#
|
28 |
model = SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=32, upscale=4, act_type='prelu')
|
29 |
model_path = 'realesr-general-x4v3.pth'
|
30 |
-
half =
|
31 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
32 |
|
33 |
os.makedirs('output', exist_ok=True)
|
34 |
|
35 |
-
|
36 |
-
# def inference(img, version, scale, weight):
|
37 |
@spaces.GPU(enable_queue=True)
|
38 |
def inference(img, version, scale):
|
39 |
print(img, version, scale)
|
40 |
if scale > 4:
|
41 |
scale = 4
|
|
|
42 |
try:
|
43 |
extension = os.path.splitext(os.path.basename(str(img)))[1]
|
44 |
img = cv2.imread(img, cv2.IMREAD_UNCHANGED)
|
|
|
45 |
if len(img.shape) == 3 and img.shape[2] == 4:
|
46 |
img_mode = 'RGBA'
|
47 |
elif len(img.shape) == 2:
|
48 |
-
img_mode = None
|
49 |
img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
|
|
|
50 |
else:
|
51 |
img_mode = None
|
52 |
|
53 |
-
h, w = img.shape[
|
54 |
if h > 3500 or w > 3500:
|
55 |
print('too large size')
|
56 |
return None, None
|
57 |
-
|
58 |
if h < 300:
|
59 |
img = cv2.resize(img, (w * 2, h * 2), interpolation=cv2.INTER_LANCZOS4)
|
60 |
|
61 |
if version == 'v1.2':
|
62 |
-
face_enhancer = GFPGANer(
|
63 |
-
model_path='GFPGANv1.2.pth', upscale=2, arch='clean', channel_multiplier=2, bg_upsampler=upsampler)
|
64 |
elif version == 'v1.3':
|
65 |
-
face_enhancer = GFPGANer(
|
66 |
-
model_path='GFPGANv1.3.pth', upscale=2, arch='clean', channel_multiplier=2, bg_upsampler=upsampler)
|
67 |
elif version == 'v1.4':
|
68 |
-
face_enhancer = GFPGANer(
|
69 |
-
model_path='GFPGANv1.4.pth', upscale=2, arch='clean', channel_multiplier=2, bg_upsampler=upsampler)
|
70 |
elif version == 'RestoreFormer':
|
71 |
-
face_enhancer = GFPGANer(
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
h, w = img.shape[0:2]
|
82 |
-
output = cv2.resize(output, (int(w * scale / 2), int(h * scale / 2)), interpolation=interpolation)
|
83 |
-
except Exception as error:
|
84 |
-
print('wrong scale input.', error)
|
85 |
-
if img_mode == 'RGBA':
|
86 |
-
extension = 'png'
|
87 |
-
else:
|
88 |
-
extension = 'jpg'
|
89 |
save_path = f'output/out.{extension}'
|
90 |
cv2.imwrite(save_path, output)
|
91 |
|
92 |
output = cv2.cvtColor(output, cv2.COLOR_BGR2RGB)
|
93 |
return output, save_path
|
|
|
94 |
except Exception as error:
|
95 |
print('global exception', error)
|
96 |
return None, None
|
97 |
|
98 |
-
|
99 |
description = "⚠ Sorry for the inconvenience. The Space is currently running on the CPU, which might affect performance. We appreciate your understanding."
|
100 |
|
101 |
demo = gr.Interface(
|
102 |
-
inference,
|
|
|
103 |
gr.Image(type="filepath", label="Input"),
|
104 |
gr.Radio(['v1.2', 'v1.3', 'v1.4', 'RestoreFormer'], type="value", value='v1.4', label='version'),
|
105 |
-
gr.Number(label="Rescaling factor", value=2)
|
106 |
-
|
107 |
-
|
108 |
gr.Image(type="numpy", label="Output (The whole image)"),
|
109 |
gr.File(label="Download the output image")
|
110 |
],
|
111 |
description=description,
|
112 |
-
|
|
|
113 |
|
114 |
demo.queue(max_size=50).launch()
|
|
|
1 |
import os
|
|
|
2 |
import cv2
|
3 |
import gradio as gr
|
4 |
import torch
|
|
|
7 |
from realesrgan.utils import RealESRGANer
|
8 |
import spaces
|
9 |
|
10 |
+
# List packages (optional debug)
|
11 |
os.system("pip freeze")
|
12 |
+
|
13 |
+
# Download weights if not already present
|
14 |
+
weights = {
|
15 |
+
'realesr-general-x4v3.pth': 'https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-x4v3.pth',
|
16 |
+
'GFPGANv1.2.pth': 'https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.2.pth',
|
17 |
+
'GFPGANv1.3.pth': 'https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.3.pth',
|
18 |
+
'GFPGANv1.4.pth': 'https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.4.pth',
|
19 |
+
'RestoreFormer.pth': 'https://github.com/TencentARC/GFPGAN/releases/download/v1.3.4/RestoreFormer.pth',
|
20 |
+
'CodeFormer.pth': 'https://github.com/TencentARC/GFPGAN/releases/download/v1.3.4/CodeFormer.pth'
|
21 |
+
}
|
22 |
+
|
23 |
+
for file_name, url in weights.items():
|
24 |
+
if not os.path.exists(file_name):
|
25 |
+
os.system(f"wget {url} -P .")
|
26 |
+
|
27 |
+
# Load Real-ESRGAN for background upscaling
|
28 |
model = SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=32, upscale=4, act_type='prelu')
|
29 |
model_path = 'realesr-general-x4v3.pth'
|
30 |
+
half = torch.cuda.is_available()
|
31 |
+
|
32 |
+
upsampler = RealESRGANer(
|
33 |
+
scale=4,
|
34 |
+
model_path=model_path,
|
35 |
+
model=model,
|
36 |
+
tile=0,
|
37 |
+
tile_pad=10,
|
38 |
+
pre_pad=0,
|
39 |
+
half=half
|
40 |
+
)
|
41 |
|
42 |
os.makedirs('output', exist_ok=True)
|
43 |
|
|
|
|
|
44 |
@spaces.GPU(enable_queue=True)
|
45 |
def inference(img, version, scale):
|
46 |
print(img, version, scale)
|
47 |
if scale > 4:
|
48 |
scale = 4
|
49 |
+
|
50 |
try:
|
51 |
extension = os.path.splitext(os.path.basename(str(img)))[1]
|
52 |
img = cv2.imread(img, cv2.IMREAD_UNCHANGED)
|
53 |
+
|
54 |
if len(img.shape) == 3 and img.shape[2] == 4:
|
55 |
img_mode = 'RGBA'
|
56 |
elif len(img.shape) == 2:
|
|
|
57 |
img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
|
58 |
+
img_mode = None
|
59 |
else:
|
60 |
img_mode = None
|
61 |
|
62 |
+
h, w = img.shape[:2]
|
63 |
if h > 3500 or w > 3500:
|
64 |
print('too large size')
|
65 |
return None, None
|
66 |
+
|
67 |
if h < 300:
|
68 |
img = cv2.resize(img, (w * 2, h * 2), interpolation=cv2.INTER_LANCZOS4)
|
69 |
|
70 |
if version == 'v1.2':
|
71 |
+
face_enhancer = GFPGANer(model_path='GFPGANv1.2.pth', upscale=2, arch='clean', channel_multiplier=2, bg_upsampler=upsampler)
|
|
|
72 |
elif version == 'v1.3':
|
73 |
+
face_enhancer = GFPGANer(model_path='GFPGANv1.3.pth', upscale=2, arch='clean', channel_multiplier=2, bg_upsampler=upsampler)
|
|
|
74 |
elif version == 'v1.4':
|
75 |
+
face_enhancer = GFPGANer(model_path='GFPGANv1.4.pth', upscale=2, arch='clean', channel_multiplier=2, bg_upsampler=upsampler)
|
|
|
76 |
elif version == 'RestoreFormer':
|
77 |
+
face_enhancer = GFPGANer(model_path='RestoreFormer.pth', upscale=2, arch='RestoreFormer', channel_multiplier=2, bg_upsampler=upsampler)
|
78 |
+
|
79 |
+
_, _, output = face_enhancer.enhance(img, has_aligned=False, only_center_face=False, paste_back=True)
|
80 |
+
|
81 |
+
if scale != 2:
|
82 |
+
interpolation = cv2.INTER_AREA if scale < 2 else cv2.INTER_LANCZOS4
|
83 |
+
h, w = img.shape[:2]
|
84 |
+
output = cv2.resize(output, (int(w * scale / 2), int(h * scale / 2)), interpolation=interpolation)
|
85 |
+
|
86 |
+
extension = 'png' if img_mode == 'RGBA' else 'jpg'
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
87 |
save_path = f'output/out.{extension}'
|
88 |
cv2.imwrite(save_path, output)
|
89 |
|
90 |
output = cv2.cvtColor(output, cv2.COLOR_BGR2RGB)
|
91 |
return output, save_path
|
92 |
+
|
93 |
except Exception as error:
|
94 |
print('global exception', error)
|
95 |
return None, None
|
96 |
|
|
|
97 |
description = "⚠ Sorry for the inconvenience. The Space is currently running on the CPU, which might affect performance. We appreciate your understanding."
|
98 |
|
99 |
demo = gr.Interface(
|
100 |
+
fn=inference,
|
101 |
+
inputs=[
|
102 |
gr.Image(type="filepath", label="Input"),
|
103 |
gr.Radio(['v1.2', 'v1.3', 'v1.4', 'RestoreFormer'], type="value", value='v1.4', label='version'),
|
104 |
+
gr.Number(label="Rescaling factor", value=2)
|
105 |
+
],
|
106 |
+
outputs=[
|
107 |
gr.Image(type="numpy", label="Output (The whole image)"),
|
108 |
gr.File(label="Download the output image")
|
109 |
],
|
110 |
description=description,
|
111 |
+
theme="Yntec/HaleyCH_Theme_Orange"
|
112 |
+
)
|
113 |
|
114 |
demo.queue(max_size=50).launch()
|