Update app.py
Browse files
app.py
CHANGED
@@ -105,7 +105,7 @@ def patch2img(outs, idxes, sr_size, scale=4, crop_size=512):
|
|
105 |
return (preds / count_mt).to(outs.device)
|
106 |
|
107 |
|
108 |
-
def load_img (filename, norm=True
|
109 |
img = np.array(Image.open(filename).convert("RGB"))
|
110 |
h, w = img.shape[:2]
|
111 |
|
@@ -116,20 +116,21 @@ def load_img (filename, norm=True,):
|
|
116 |
|
117 |
|
118 |
def inference(image, upscale, large_input_flag, color_fix):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
119 |
upscale = int(upscale) # convert type to int
|
120 |
if upscale > 4:
|
121 |
upscale = 4
|
122 |
if 0 < upscale < 3:
|
123 |
upscale = 2
|
124 |
|
125 |
-
model = set_safmn(upscale)
|
126 |
-
img = np.array(image)
|
127 |
-
img = img / 255.
|
128 |
-
img = img.astype(np.float32)
|
129 |
-
|
130 |
-
# img2tensor
|
131 |
-
y = torch.tensor(img).permute(2,0,1).unsqueeze(0).to(device)
|
132 |
-
|
133 |
# inference
|
134 |
if large_input_flag:
|
135 |
patches, idx, size = img2patch(y, scale=upscale)
|
|
|
105 |
return (preds / count_mt).to(outs.device)
|
106 |
|
107 |
|
108 |
+
def load_img (filename, norm=True):
|
109 |
img = np.array(Image.open(filename).convert("RGB"))
|
110 |
h, w = img.shape[:2]
|
111 |
|
|
|
116 |
|
117 |
|
118 |
def inference(image, upscale, large_input_flag, color_fix):
|
119 |
+
model = set_safmn(upscale)
|
120 |
+
|
121 |
+
img = np.array(image)
|
122 |
+
img = img.astype(np.float32) / 255.
|
123 |
+
|
124 |
+
# img2tensor
|
125 |
+
y = torch.from_numpy(np.transpose(img[:, :, [2, 1, 0]], (2, 0, 1))).float()
|
126 |
+
y = y.unsqueeze(0).to(device)
|
127 |
+
|
128 |
upscale = int(upscale) # convert type to int
|
129 |
if upscale > 4:
|
130 |
upscale = 4
|
131 |
if 0 < upscale < 3:
|
132 |
upscale = 2
|
133 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
134 |
# inference
|
135 |
if large_input_flag:
|
136 |
patches, idx, size = img2patch(y, scale=upscale)
|