Commit
·
577ec0e
1
Parent(s):
667b581
Add ghost pixel thresholding support with thresholding flip button
Browse files- handler.py +127 -12
- test_api_endpoint.ipynb +1 -1
- test_data/0cd7ea5b-a6fc-4cbb-a536-57d9dbcc12b4.png +3 -0
- test_data/1f0d03a1-9f4b-4927-ad48-de84abc09eaf.png +3 -0
- test_hander.py +0 -0
- test_handler.ipynb +53 -23
- test_handler_thresholding.ipynb +0 -0
handler.py
CHANGED
@@ -110,6 +110,7 @@ class EndpointHandler:
|
|
110 |
- 'inputs': A dictionary with the following keys:
|
111 |
- 'image_url' (str): The URL of the image to be upscaled.
|
112 |
- 'outscale' (float): The scaling factor for the upscaling process.
|
|
|
113 |
|
114 |
Returns:
|
115 |
Dict[str, List[float]]: A dictionary containing the results of the upscaling process, which includes:
|
@@ -121,11 +122,13 @@ class EndpointHandler:
|
|
121 |
############################################################
|
122 |
# get inputs and download image
|
123 |
############################################################
|
124 |
-
self.logger.info(">>> 1/
|
125 |
try:
|
126 |
inputs = data.pop("inputs", data)
|
127 |
outscale = float(inputs.pop("outscale", 3))
|
|
|
128 |
self.logger.info(f"outscale: {outscale}")
|
|
|
129 |
image_url = inputs["image_url"]
|
130 |
except Exception as e:
|
131 |
self.logger.error(f"Error getting inputs: {e}")
|
@@ -143,7 +146,7 @@ class EndpointHandler:
|
|
143 |
############################################################
|
144 |
# run assertions
|
145 |
############################################################
|
146 |
-
self.logger.info(">>> 2/
|
147 |
|
148 |
# get image size and mode
|
149 |
in_size, in_mode = image.size, image.mode
|
@@ -151,7 +154,7 @@ class EndpointHandler:
|
|
151 |
|
152 |
# check image size and mode and return dict
|
153 |
try:
|
154 |
-
assert in_mode in
|
155 |
if self.tiling_size == 0:
|
156 |
assert in_size[0] * in_size[1] < self.max_image_size, f"Image is too large: {in_size}: {in_size[0] * in_size[1]} is greater than {self.max_image_size}"
|
157 |
assert outscale > 1 and outscale <= 10, f"Outscale must be between 1 and 10: {outscale}"
|
@@ -163,7 +166,7 @@ class EndpointHandler:
|
|
163 |
############################################################
|
164 |
# Convert RGB to BGR (PIL uses RGB, OpenCV expects BGR)
|
165 |
############################################################
|
166 |
-
self.logger.info(f">>> 3/
|
167 |
try:
|
168 |
opencv_image = np.array(image)
|
169 |
except Exception as e:
|
@@ -188,7 +191,7 @@ class EndpointHandler:
|
|
188 |
############################################################
|
189 |
# upscale image
|
190 |
############################################################
|
191 |
-
self.logger.info(f">>> 4/
|
192 |
|
193 |
try:
|
194 |
output, _ = self.model.enhance(opencv_image, outscale=outscale)
|
@@ -202,7 +205,7 @@ class EndpointHandler:
|
|
202 |
############################################################
|
203 |
# convert to RGB/RGBA format
|
204 |
############################################################
|
205 |
-
self.logger.info(f">>> 5/
|
206 |
out_shape = output.shape
|
207 |
if len(out_shape) == 3:
|
208 |
if out_shape[2] == 3:
|
@@ -213,22 +216,39 @@ class EndpointHandler:
|
|
213 |
output = cv2.cvtColor(output, cv2.COLOR_GRAY2RGB)
|
214 |
|
215 |
|
216 |
-
|
217 |
# convert to PIL image
|
218 |
############################################################
|
219 |
-
self.logger.info(f">>> 6/
|
220 |
-
try
|
221 |
-
img_byte_arr = BytesIO()
|
222 |
output = Image.fromarray(output)
|
223 |
except Exception as e:
|
224 |
self.logger.error(f"Error converting upscaled image to PIL: {e}")
|
225 |
return {"image_url": None, "image_key": None, "error": f"Failed to convert upscaled image to PIL: {e}"}
|
226 |
|
227 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
228 |
############################################################
|
229 |
# upload to s3
|
230 |
############################################################
|
231 |
-
self.logger.info(f">>>
|
232 |
try:
|
233 |
image_url, key = self.upload_to_s3(output)
|
234 |
self.logger.info(f"image uploaded to s3: {image_url}")
|
@@ -280,4 +300,99 @@ class EndpointHandler:
|
|
280 |
"""
|
281 |
response = requests.get(image_url)
|
282 |
image = Image.open(BytesIO(response.content))
|
283 |
-
return image
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
110 |
- 'inputs': A dictionary with the following keys:
|
111 |
- 'image_url' (str): The URL of the image to be upscaled.
|
112 |
- 'outscale' (float): The scaling factor for the upscaling process.
|
113 |
+
- 'apply_thresholding' (bool): Whether to apply thresholding to the upscaled image.
|
114 |
|
115 |
Returns:
|
116 |
Dict[str, List[float]]: A dictionary containing the results of the upscaling process, which includes:
|
|
|
122 |
############################################################
|
123 |
# get inputs and download image
|
124 |
############################################################
|
125 |
+
self.logger.info(">>> 1/8: GETTING INPUTS....")
|
126 |
try:
|
127 |
inputs = data.pop("inputs", data)
|
128 |
outscale = float(inputs.pop("outscale", 3))
|
129 |
+
apply_thresholding = inputs.pop("apply_thresholding", False)
|
130 |
self.logger.info(f"outscale: {outscale}")
|
131 |
+
self.logger.info(f"apply_thresholding: {apply_thresholding}")
|
132 |
image_url = inputs["image_url"]
|
133 |
except Exception as e:
|
134 |
self.logger.error(f"Error getting inputs: {e}")
|
|
|
146 |
############################################################
|
147 |
# run assertions
|
148 |
############################################################
|
149 |
+
self.logger.info(">>> 2/8: RUNNING ASSERTIONS ON IMAGE....")
|
150 |
|
151 |
# get image size and mode
|
152 |
in_size, in_mode = image.size, image.mode
|
|
|
154 |
|
155 |
# check image size and mode and return dict
|
156 |
try:
|
157 |
+
assert in_mode in Image.MODES, f"Unsupported image mode: {in_mode}"
|
158 |
if self.tiling_size == 0:
|
159 |
assert in_size[0] * in_size[1] < self.max_image_size, f"Image is too large: {in_size}: {in_size[0] * in_size[1]} is greater than {self.max_image_size}"
|
160 |
assert outscale > 1 and outscale <= 10, f"Outscale must be between 1 and 10: {outscale}"
|
|
|
166 |
############################################################
|
167 |
# Convert RGB to BGR (PIL uses RGB, OpenCV expects BGR)
|
168 |
############################################################
|
169 |
+
self.logger.info(f">>> 3/8: CONVERTING IMAGE TO OPENCV BGR/BGRA FORMAT....")
|
170 |
try:
|
171 |
opencv_image = np.array(image)
|
172 |
except Exception as e:
|
|
|
191 |
############################################################
|
192 |
# upscale image
|
193 |
############################################################
|
194 |
+
self.logger.info(f">>> 4/8: UPSCALING IMAGE....")
|
195 |
|
196 |
try:
|
197 |
output, _ = self.model.enhance(opencv_image, outscale=outscale)
|
|
|
205 |
############################################################
|
206 |
# convert to RGB/RGBA format
|
207 |
############################################################
|
208 |
+
self.logger.info(f">>> 5/8: CONVERTING IMAGE TO RGB/RGBA FORMAT....")
|
209 |
out_shape = output.shape
|
210 |
if len(out_shape) == 3:
|
211 |
if out_shape[2] == 3:
|
|
|
216 |
output = cv2.cvtColor(output, cv2.COLOR_GRAY2RGB)
|
217 |
|
218 |
|
219 |
+
###########################################################
|
220 |
# convert to PIL image
|
221 |
############################################################
|
222 |
+
self.logger.info(f">>> 6/8: CONVERTING IMAGE TO PIL....")
|
223 |
+
try:#
|
|
|
224 |
output = Image.fromarray(output)
|
225 |
except Exception as e:
|
226 |
self.logger.error(f"Error converting upscaled image to PIL: {e}")
|
227 |
return {"image_url": None, "image_key": None, "error": f"Failed to convert upscaled image to PIL: {e}"}
|
228 |
|
229 |
|
230 |
+
############################################################
|
231 |
+
# apply thresholding
|
232 |
+
############################################################
|
233 |
+
self.logger.info(f">>> 7/8: APPLYING THRESHOLDING....")
|
234 |
+
if apply_thresholding:
|
235 |
+
try:
|
236 |
+
if self.has_alpha_channel(image):
|
237 |
+
self.logger.info(f"input image mode: {image.mode}, it has alpha channel, applying thresholding")
|
238 |
+
output = self.apply_thresholding(image, output)
|
239 |
+
else:
|
240 |
+
self.logger.info(f"input image mode: {image.mode}, it does not have alpha channel, skipping thresholding")
|
241 |
+
except Exception as e:
|
242 |
+
self.logger.error(f"Error applying thresholding: {e}")
|
243 |
+
return {"image_url": None, "image_key": None, "error": f"Failed to apply thresholding: {e}"}
|
244 |
+
else:
|
245 |
+
self.logger.info(f"thresholding is not enabled, skipping thresholding")
|
246 |
+
|
247 |
+
|
248 |
############################################################
|
249 |
# upload to s3
|
250 |
############################################################
|
251 |
+
self.logger.info(f">>> 8/8: UPLOADING IMAGE TO S3....")
|
252 |
try:
|
253 |
image_url, key = self.upload_to_s3(output)
|
254 |
self.logger.info(f"image uploaded to s3: {image_url}")
|
|
|
300 |
"""
|
301 |
response = requests.get(image_url)
|
302 |
image = Image.open(BytesIO(response.content))
|
303 |
+
return image
|
304 |
+
|
305 |
+
def has_alpha_channel(self, image: Image.Image) -> bool:
|
306 |
+
"""
|
307 |
+
Check if the image has an alpha channel.
|
308 |
+
"""
|
309 |
+
return image.mode in ("RGBA", "LA") or (image.mode == "P" and "transparency" in image.info)
|
310 |
+
|
311 |
+
def replace_mask_with_edge_aware_clamping(
|
312 |
+
self,
|
313 |
+
original_img: Image.Image,
|
314 |
+
esrgan_img: Image.Image,
|
315 |
+
edge_buffer: int = 2,
|
316 |
+
smooth_buffer: int = 10,
|
317 |
+
default_lower: int = 10,
|
318 |
+
default_upper: int = 245
|
319 |
+
) -> Image.Image:
|
320 |
+
"""
|
321 |
+
Apply edge-aware alpha channel clamping to merge original and ESRGAN images.
|
322 |
+
|
323 |
+
Args:
|
324 |
+
original_img: Original image as PIL Image
|
325 |
+
esrgan_img: ESRGAN upscaled image as PIL Image
|
326 |
+
edge_buffer: Buffer for edge regions
|
327 |
+
smooth_buffer: Buffer for smooth regions
|
328 |
+
default_lower: Default lower threshold
|
329 |
+
default_upper: Default upper threshold
|
330 |
+
|
331 |
+
Returns:
|
332 |
+
PIL Image with merged alpha channel
|
333 |
+
"""
|
334 |
+
# Convert images to RGBA if needed
|
335 |
+
original_img = original_img.convert("RGBA")
|
336 |
+
esrgan_img = esrgan_img.convert("RGBA")
|
337 |
+
esr_w, esr_h = esrgan_img.size
|
338 |
+
|
339 |
+
# Upscale original alpha with bicubic resampling
|
340 |
+
orig_alpha = original_img.getchannel("A")
|
341 |
+
upscaled_alpha = orig_alpha.resize((esr_w, esr_h), resample=Image.Resampling.BICUBIC)
|
342 |
+
alpha_arr = np.array(upscaled_alpha, dtype=np.uint8)
|
343 |
+
|
344 |
+
# Edge detection on original alpha (for alignment)
|
345 |
+
edge_mask = cv2.Canny(alpha_arr, threshold1=50, threshold2=150)
|
346 |
+
edge_mask = (edge_mask > 0).astype(np.uint8) * 255
|
347 |
+
|
348 |
+
# Determine dynamic cutoffs for edge vs. smooth regions
|
349 |
+
semi_transparent_mask = (alpha_arr > 0) & (alpha_arr < 255)
|
350 |
+
if np.any(semi_transparent_mask):
|
351 |
+
min_alpha = int(np.min(alpha_arr[semi_transparent_mask]))
|
352 |
+
max_alpha = int(np.max(alpha_arr[semi_transparent_mask]))
|
353 |
+
lower_edge = max(0, min_alpha - edge_buffer)
|
354 |
+
upper_edge = min(255, max_alpha + edge_buffer)
|
355 |
+
lower_smooth = max(0, min_alpha - smooth_buffer)
|
356 |
+
upper_smooth = min(255, max_alpha + smooth_buffer)
|
357 |
+
else:
|
358 |
+
lower_edge = lower_smooth = default_lower
|
359 |
+
upper_edge = upper_smooth = default_upper
|
360 |
+
|
361 |
+
# Apply selective clamping based on edge regions
|
362 |
+
clamped_alpha = alpha_arr.copy()
|
363 |
+
clamped_alpha[edge_mask == 255] = np.clip(clamped_alpha[edge_mask == 255], lower_edge, upper_edge)
|
364 |
+
clamped_alpha[edge_mask == 0] = np.clip(clamped_alpha[edge_mask == 0], lower_smooth, upper_smooth)
|
365 |
+
|
366 |
+
# Blend with ESRGAN's alpha (50% weight to original in edges)
|
367 |
+
esrgan_alpha = np.array(esrgan_img.getchannel("A"), dtype=np.uint8)
|
368 |
+
blended_alpha = np.where(edge_mask == 255,
|
369 |
+
(clamped_alpha * 0.5 + esrgan_alpha * 0.5).astype(np.uint8),
|
370 |
+
clamped_alpha)
|
371 |
+
|
372 |
+
# Replace and return
|
373 |
+
esrgan_array = np.array(esrgan_img, dtype=np.uint8)
|
374 |
+
esrgan_array[..., 3] = blended_alpha
|
375 |
+
return Image.fromarray(esrgan_array, mode="RGBA")
|
376 |
+
|
377 |
+
def apply_thresholding(self, input_image: Image.Image, esrgan_image: Image.Image):
|
378 |
+
"""
|
379 |
+
Apply thresholding to the image to account for ghost pixels
|
380 |
+
|
381 |
+
Args:
|
382 |
+
input_image (Image.Image): The input image.
|
383 |
+
esrgan_image (Image.Image): The ESRGAN image.
|
384 |
+
"""
|
385 |
+
|
386 |
+
self.logger.info(f"Applying edge-aware clamping")
|
387 |
+
|
388 |
+
# Using method 3: Edge-aware clamping
|
389 |
+
self.logger.info(f"Input image size: {input_image.size}")
|
390 |
+
edge_aware_image = self.replace_mask_with_edge_aware_clamping(
|
391 |
+
input_image,
|
392 |
+
esrgan_image,
|
393 |
+
edge_buffer=2,
|
394 |
+
smooth_buffer=10
|
395 |
+
)
|
396 |
+
self.logger.info(f"Edge-aware clamping applied, image size: {edge_aware_image.size}")
|
397 |
+
|
398 |
+
return edge_aware_image
|
test_api_endpoint.ipynb
CHANGED
@@ -55,7 +55,7 @@
|
|
55 |
"out_scales = [3, 3, 2.49]\n",
|
56 |
"for img_url, outscale in zip(img_urls, out_scales):\n",
|
57 |
"\t# create payload\n",
|
58 |
-
"\tpayload = {\"inputs\": {\"image_url\": img_url, \"outscale\": outscale}}\n",
|
59 |
"\t\t\n",
|
60 |
"\tresponse = requests.post(API_URL, headers=headers, json=payload)\n",
|
61 |
"\toutput_payload = response.json()\t\n",
|
|
|
55 |
"out_scales = [3, 3, 2.49]\n",
|
56 |
"for img_url, outscale in zip(img_urls, out_scales):\n",
|
57 |
"\t# create payload\n",
|
58 |
+
"\tpayload = {\"inputs\": {\"image_url\": img_url, \"outscale\": outscale, \"apply_thresholding\": True}}\n",
|
59 |
"\t\t\n",
|
60 |
"\tresponse = requests.post(API_URL, headers=headers, json=payload)\n",
|
61 |
"\toutput_payload = response.json()\t\n",
|
test_data/0cd7ea5b-a6fc-4cbb-a536-57d9dbcc12b4.png
ADDED
![]() |
Git LFS Details
|
test_data/1f0d03a1-9f4b-4927-ad48-de84abc09eaf.png
ADDED
![]() |
Git LFS Details
|
test_hander.py
DELETED
File without changes
|
test_handler.ipynb
CHANGED
@@ -9,7 +9,7 @@
|
|
9 |
"name": "stderr",
|
10 |
"output_type": "stream",
|
11 |
"text": [
|
12 |
-
"/
|
13 |
" warnings.warn(\n"
|
14 |
]
|
15 |
}
|
@@ -48,7 +48,7 @@
|
|
48 |
"INFO - model_path: /workspace/real-esrgan/weights/Real-ESRGAN-x4plus.pth\n",
|
49 |
"INFO - TILING_SIZE is 0, not using tiling\n",
|
50 |
"INFO - initializing model\n",
|
51 |
-
"INFO - model initialized in
|
52 |
]
|
53 |
}
|
54 |
],
|
@@ -59,49 +59,79 @@
|
|
59 |
},
|
60 |
{
|
61 |
"cell_type": "code",
|
62 |
-
"execution_count":
|
63 |
"metadata": {},
|
64 |
"outputs": [
|
65 |
{
|
66 |
"name": "stderr",
|
67 |
"output_type": "stream",
|
68 |
"text": [
|
69 |
-
"INFO - >>> 1/
|
70 |
-
"INFO - outscale:
|
71 |
-
"INFO -
|
72 |
-
"INFO -
|
73 |
-
"INFO -
|
74 |
-
"INFO -
|
75 |
-
"INFO -
|
76 |
-
"INFO -
|
77 |
-
"INFO -
|
78 |
-
"INFO - >>> 5/7: CONVERTING IMAGE TO RGB/RGBA FORMAT....\n",
|
79 |
-
"INFO - >>> 6/7: CONVERTING IMAGE TO PIL....\n",
|
80 |
-
"INFO - >>> 7/7: UPLOADING IMAGE TO S3....\n",
|
81 |
-
"INFO - image uploaded to s3: https://jiffy-staging-upscaled-images.s3.amazonaws.com/25b91e15-b785-47ca-81a6-ad5fbdf8b92a.png\n"
|
82 |
]
|
83 |
},
|
84 |
{
|
85 |
"name": "stdout",
|
86 |
"output_type": "stream",
|
87 |
"text": [
|
88 |
-
"https://jiffy-staging-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
89 |
]
|
90 |
}
|
91 |
],
|
92 |
"source": [
|
93 |
"img_dir = \"test_data/\"\n",
|
94 |
-
"img_urls = [
|
95 |
-
"
|
96 |
-
"
|
|
|
|
|
|
|
97 |
" ]\n",
|
98 |
"\n",
|
99 |
-
"out_scales = [4
|
|
|
|
|
|
|
|
|
|
|
|
|
100 |
"for img_url, outscale in zip(img_urls, out_scales):\n",
|
|
|
101 |
" # create payload\n",
|
102 |
" payload = {\n",
|
103 |
" \"inputs\": {\"image_url\": img_url, \n",
|
104 |
-
" \"outscale\": outscale
|
|
|
105 |
" }\n",
|
106 |
" }\n",
|
107 |
" \n",
|
@@ -134,7 +164,7 @@
|
|
134 |
"name": "python",
|
135 |
"nbconvert_exporter": "python",
|
136 |
"pygments_lexer": "ipython3",
|
137 |
-
"version": "3.10.
|
138 |
}
|
139 |
},
|
140 |
"nbformat": 4,
|
|
|
9 |
"name": "stderr",
|
10 |
"output_type": "stream",
|
11 |
"text": [
|
12 |
+
"/workspace/miniconda3/envs/real-esrgan-hf/lib/python3.10/site-packages/torchvision/transforms/functional_tensor.py:5: UserWarning: The torchvision.transforms.functional_tensor module is deprecated in 0.15 and will be **removed in 0.17**. Please don't rely on it. You probably just need to use APIs in torchvision.transforms.functional or in torchvision.transforms.v2.functional.\n",
|
13 |
" warnings.warn(\n"
|
14 |
]
|
15 |
}
|
|
|
48 |
"INFO - model_path: /workspace/real-esrgan/weights/Real-ESRGAN-x4plus.pth\n",
|
49 |
"INFO - TILING_SIZE is 0, not using tiling\n",
|
50 |
"INFO - initializing model\n",
|
51 |
+
"INFO - model initialized in 90.91657543182373 seconds\n"
|
52 |
]
|
53 |
}
|
54 |
],
|
|
|
59 |
},
|
60 |
{
|
61 |
"cell_type": "code",
|
62 |
+
"execution_count": 6,
|
63 |
"metadata": {},
|
64 |
"outputs": [
|
65 |
{
|
66 |
"name": "stderr",
|
67 |
"output_type": "stream",
|
68 |
"text": [
|
69 |
+
"INFO - >>> 1/8: GETTING INPUTS....\n",
|
70 |
+
"INFO - outscale: 2.3\n",
|
71 |
+
"INFO - apply_thresholding: True\n",
|
72 |
+
"INFO - downloading image from URL: https://jiffy-staging-transfers.imgix.net/2/ai-research/test-ghost-pixels/10816322.png\n",
|
73 |
+
"INFO - >>> 2/8: RUNNING ASSERTIONS ON IMAGE....\n",
|
74 |
+
"INFO - image.size: (1080, 1080), image.mode: RGBA\n",
|
75 |
+
"INFO - >>> 3/8: CONVERTING IMAGE TO OPENCV BGR/BGRA FORMAT....\n",
|
76 |
+
"INFO - converting RGBA image to BGRA\n",
|
77 |
+
"INFO - >>> 4/8: UPSCALING IMAGE....\n"
|
|
|
|
|
|
|
|
|
78 |
]
|
79 |
},
|
80 |
{
|
81 |
"name": "stdout",
|
82 |
"output_type": "stream",
|
83 |
"text": [
|
84 |
+
"https://jiffy-staging-transfers.imgix.net/2/ai-research/test-ghost-pixels/10816322.png 2.3\n"
|
85 |
+
]
|
86 |
+
},
|
87 |
+
{
|
88 |
+
"name": "stderr",
|
89 |
+
"output_type": "stream",
|
90 |
+
"text": [
|
91 |
+
"INFO - output.shape: (2484, 2484, 4)\n",
|
92 |
+
"INFO - >>> 5/8: CONVERTING IMAGE TO RGB/RGBA FORMAT....\n",
|
93 |
+
"INFO - >>> 6/8: CONVERTING IMAGE TO PIL....\n",
|
94 |
+
"INFO - >>> 7/8: APPLYING THRESHOLDING....\n",
|
95 |
+
"INFO - input image mode: RGBA, it has alpha channel, applying thresholding\n",
|
96 |
+
"INFO - Applying edge-aware clamping\n",
|
97 |
+
"INFO - Input image size: (1080, 1080)\n",
|
98 |
+
"INFO - Edge-aware clamping applied, image size: (2484, 2484)\n",
|
99 |
+
"INFO - >>> 8/8: UPLOADING IMAGE TO S3....\n",
|
100 |
+
"INFO - image uploaded to s3: https://jiffy-staging-upscaled-images.s3.amazonaws.com/0cd7ea5b-a6fc-4cbb-a536-57d9dbcc12b4.png\n"
|
101 |
+
]
|
102 |
+
},
|
103 |
+
{
|
104 |
+
"name": "stdout",
|
105 |
+
"output_type": "stream",
|
106 |
+
"text": [
|
107 |
+
"https://jiffy-staging-upscaled-images.s3.amazonaws.com/0cd7ea5b-a6fc-4cbb-a536-57d9dbcc12b4.png 0cd7ea5b-a6fc-4cbb-a536-57d9dbcc12b4.png\n"
|
108 |
]
|
109 |
}
|
110 |
],
|
111 |
"source": [
|
112 |
"img_dir = \"test_data/\"\n",
|
113 |
+
"img_urls = [#\"https://jiffy-transfers.imgix.net/2/attachments/r267odvvfmkp6c5lccj1y6f9trb0\",\n",
|
114 |
+
"# \"https://jiffy-staging-transfers.imgix.net/2/development/attachments/zo31eau0ykhbwoddrjtlbyz6w9mp\", # larger than > 1.96M pixels\n",
|
115 |
+
"# \"https://jiffy-staging-transfers.imgix.net/2/development/attachments/b8ecchms9rr9wk3g71kfpfprqg1v\", # larger than > 1.96M pixels,\n",
|
116 |
+
"# \"https://jiffy-staging-transfers.imgix.net/2/ai-research/test-ghost-pixels/9B500B09.png\", # thresholding example 1\n",
|
117 |
+
" \"https://jiffy-staging-transfers.imgix.net/2/ai-research/test-ghost-pixels/10816322.png\" # thresholding example 2\n",
|
118 |
+
"\n",
|
119 |
" ]\n",
|
120 |
"\n",
|
121 |
+
"out_scales = [#4, \n",
|
122 |
+
" #3, \n",
|
123 |
+
" #2, \n",
|
124 |
+
" #2,\n",
|
125 |
+
" 2.3\n",
|
126 |
+
"]\n",
|
127 |
+
"\n",
|
128 |
"for img_url, outscale in zip(img_urls, out_scales):\n",
|
129 |
+
" print(img_url, outscale)\n",
|
130 |
" # create payload\n",
|
131 |
" payload = {\n",
|
132 |
" \"inputs\": {\"image_url\": img_url, \n",
|
133 |
+
" \"outscale\": outscale,\n",
|
134 |
+
" \"apply_thresholding\": True,\n",
|
135 |
" }\n",
|
136 |
" }\n",
|
137 |
" \n",
|
|
|
164 |
"name": "python",
|
165 |
"nbconvert_exporter": "python",
|
166 |
"pygments_lexer": "ipython3",
|
167 |
+
"version": "3.10.16"
|
168 |
}
|
169 |
},
|
170 |
"nbformat": 4,
|
test_handler_thresholding.ipynb
ADDED
The diff for this file is too large to render.
See raw diff
|
|