English
Inference Endpoints
garg-aayush commited on
Commit
aa93695
·
1 Parent(s): 6efd64a

Update the codebase to log intermediate steps and to add try-except blocks

Browse files
Files changed (2) hide show
  1. handler.py +89 -37
  2. test_handler.ipynb +19 -11
handler.py CHANGED
@@ -14,6 +14,7 @@ import uuid, io
14
  import torch
15
  import base64
16
  import requests
 
17
 
18
 
19
  class EndpointHandler:
@@ -48,48 +49,96 @@ class EndpointHandler:
48
  # Get the S3 bucket name from environment variables
49
  self.bucket_name = os.environ["S3_BUCKET_NAME"]
50
 
 
 
 
 
51
 
52
  def __call__(self, data: Any) -> Dict[str, List[float]]:
53
 
54
  try:
55
-
56
- # get inputs
 
 
57
  inputs = data.pop("inputs", data)
58
 
59
  # get outscale
60
  outscale = float(inputs.pop("outscale", 3))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
61
 
62
- # decode base64 image to PIL
63
- image = self.download_image_url(inputs['image_url'])
64
  in_size, in_mode = image.size, image.mode
 
65
 
66
  # check image size and mode and return dict
67
- assert in_mode in ["RGB", "RGBA", "L"], f"Unsupported image mode: {in_mode}"
68
- if self.tiling_size == 0:
69
- assert in_size[0] * in_size[1] < 1400*1400, f"Image is too large: {in_size}: {in_size[0] * in_size[1]} is greater than {self.tiling_size*self.tiling_size}"
70
- assert outscale > 1 and outscale <=10, f"Outscale must be between 1 and 10: {outscale}"
 
 
 
 
71
 
72
- # debug
73
- print(f"image.size: {in_size}, image.mode: {in_mode}, outscale: {outscale}")
74
 
 
75
  # Convert RGB to BGR (PIL uses RGB, OpenCV expects BGR)
76
- opencv_image = np.array(image)
 
 
 
 
 
 
 
 
77
  if in_mode == "RGB":
 
78
  opencv_image = cv2.cvtColor(opencv_image, cv2.COLOR_RGB2BGR)
79
  elif in_mode == "RGBA":
 
80
  opencv_image = cv2.cvtColor(opencv_image, cv2.COLOR_RGBA2BGRA)
81
  elif in_mode == "L":
 
82
  opencv_image = cv2.cvtColor(opencv_image, cv2.COLOR_GRAY2RGB)
83
  else:
84
- raise ValueError(f"Unsupported image mode: {in_mode}")
 
 
85
 
86
- # enhance image
87
- output, _ = self.model.enhance(opencv_image, outscale=outscale)
 
 
88
 
 
 
 
 
 
89
  # debug
90
- print(f"output.shape: {output.shape}")
 
91
 
 
92
  # convert to RGB/RGBA format
 
 
93
  out_shape = output.shape
94
  if len(out_shape) == 3:
95
  if out_shape[2] == 3:
@@ -99,37 +148,40 @@ class EndpointHandler:
99
  else:
100
  output = cv2.cvtColor(output, cv2.COLOR_GRAY2RGB)
101
 
 
 
102
  # convert to PIL image
103
- img_byte_arr = BytesIO()
104
- output = Image.fromarray(output)
 
 
 
 
 
 
 
105
 
106
- # # save to BytesIO
107
- # output.save(img_byte_arr, format='PNG')
108
- # img_str = base64.b64encode(img_byte_arr.getvalue())
109
- # img_str = img_str.decode()
110
- image_url, key = self.upload_to_s3(output)
 
 
 
 
 
111
 
112
  return {"image_url": image_url,
113
  "image_key": key,
114
  "error": None
115
  }
116
 
117
- # handle errors
118
- except AssertionError as e:
119
- print(f"AssertionError: {e}")
120
- return {"out_image": None, "error": str(e)}
121
- except KeyError as e:
122
- print(f"KeyError: {e}")
123
- return {"out_image": None, "error": f"Missing key: {e}"}
124
- except ValueError as e:
125
- print(f"ValueError: {e}")
126
- return {"out_image": None, "error": str(e)}
127
- except PIL.UnidentifiedImageError as e:
128
- print(f"PIL.UnidentifiedImageError: {e}")
129
- return {"out_image": None, "error": "Invalid image format"}
130
  except Exception as e:
131
- print(f"Exception: {e}")
132
- return {"out_image": None, "error": "An unexpected error occurred"}
 
133
 
134
  def upload_to_s3(self, image):
135
  "Upload the image to s3 and return the url."
 
14
  import torch
15
  import base64
16
  import requests
17
+ import logging
18
 
19
 
20
  class EndpointHandler:
 
49
  # Get the S3 bucket name from environment variables
50
  self.bucket_name = os.environ["S3_BUCKET_NAME"]
51
 
52
+ # get the logging level from environment variables
53
+ logging.basicConfig(level=logging.INFO, format='%(levelname)s - %(message)s')
54
+ self.logger = logging.getLogger(__name__)
55
+
56
 
57
  def __call__(self, data: Any) -> Dict[str, List[float]]:
58
 
59
  try:
60
+ ############################################################
61
+ # get inputs and download image
62
+ ############################################################
63
+ self.logger.info(">>> 1/7: GETTING INPUTS....")
64
  inputs = data.pop("inputs", data)
65
 
66
  # get outscale
67
  outscale = float(inputs.pop("outscale", 3))
68
+ self.logger.info(f"outscale: {outscale}")
69
+
70
+ # download image
71
+ try:
72
+ self.logger.info(f"downloading image from URL: {inputs['image_url']}")
73
+ image = self.download_image_url(inputs['image_url'])
74
+ except Exception as e:
75
+ logging.error(f"Error downloading image from URL: {inputs['image_url']}. Exception: {e}")
76
+ return {"out_image": None, "error": f"Failed to download image: {e}"}
77
+
78
+
79
+ ############################################################
80
+ # run assertions
81
+ ############################################################
82
+ self.logger.info(">>> 2/7: RUNNING ASSERTIONS ON IMAGE....")
83
 
84
+ # get image size and mode
 
85
  in_size, in_mode = image.size, image.mode
86
+ self.logger.info(f"image.size: {image.size}, image.mode: {image.mode}")
87
 
88
  # check image size and mode and return dict
89
+ try:
90
+ assert in_mode in ["RGB", "RGBA", "L"], f"Unsupported image mode: {in_mode}"
91
+ if self.tiling_size == 0:
92
+ assert in_size[0] * in_size[1] < 1400*1400, f"Image is too large: {in_size}: {in_size[0] * in_size[1]} is greater than {self.tiling_size*self.tiling_size}"
93
+ assert outscale > 1 and outscale <= 10, f"Outscale must be between 1 and 10: {outscale}"
94
+ except AssertionError as e:
95
+ self.logger.error(f"Assertion error: {e}")
96
+ return {"out_image": None, "error": str(e)}
97
 
 
 
98
 
99
+ ############################################################
100
  # Convert RGB to BGR (PIL uses RGB, OpenCV expects BGR)
101
+ ############################################################
102
+ self.logger.info(f">>> 3/7: CONVERTING IMAGE TO OPENCV BGR/BGRA FORMAT....")
103
+ try:
104
+ opencv_image = np.array(image)
105
+ except Exception as e:
106
+ self.logger.error(f"Error converting image to opencv format: {e}")
107
+ return {"out_image": None, "error": f"Failed to convert image to opencv format: {e}"}
108
+
109
+ # convert image to BGR
110
  if in_mode == "RGB":
111
+ self.logger.info(f"converting RGB image to BGR")
112
  opencv_image = cv2.cvtColor(opencv_image, cv2.COLOR_RGB2BGR)
113
  elif in_mode == "RGBA":
114
+ self.logger.info(f"converting RGBA image to BGRA")
115
  opencv_image = cv2.cvtColor(opencv_image, cv2.COLOR_RGBA2BGRA)
116
  elif in_mode == "L":
117
+ self.logger.info(f"converting grayscale image to BGR")
118
  opencv_image = cv2.cvtColor(opencv_image, cv2.COLOR_GRAY2RGB)
119
  else:
120
+ self.logger.error(f"Unsupported image mode: {in_mode}")
121
+ return {"out_image": None, "error": f"Unsupported image mode: {in_mode}"}
122
+
123
 
124
+ ############################################################
125
+ # upscale image
126
+ ############################################################
127
+ self.logger.info(f">>> 4/7: UPSCALING IMAGE....")
128
 
129
+ try:
130
+ output, _ = self.model.enhance(opencv_image, outscale=outscale)
131
+ except Exception as e:
132
+ self.logger.error(f"Error enhancing image: {e}")
133
+ return {"out_image": None, "error": "Image enhancement failed."}
134
  # debug
135
+ self.logger.info(f"output.shape: {output.shape}")
136
+
137
 
138
+ ############################################################
139
  # convert to RGB/RGBA format
140
+ ############################################################
141
+ self.logger.info(f">>> 5/7: CONVERTING IMAGE TO RGB/RGBA FORMAT....")
142
  out_shape = output.shape
143
  if len(out_shape) == 3:
144
  if out_shape[2] == 3:
 
148
  else:
149
  output = cv2.cvtColor(output, cv2.COLOR_GRAY2RGB)
150
 
151
+
152
+ ############################################################
153
  # convert to PIL image
154
+ ############################################################
155
+ self.logger.info(f">>> 6/7: CONVERTING IMAGE TO PIL....")
156
+ try:
157
+ img_byte_arr = BytesIO()
158
+ output = Image.fromarray(output)
159
+ except Exception as e:
160
+ self.logger.error(f"Error converting upscaled image to PIL: {e}")
161
+ return {"out_image": None, "error": f"Failed to convert upscaled image to PIL: {e}"}
162
+
163
 
164
+ ############################################################
165
+ # upload to s3
166
+ ############################################################
167
+ self.logger.info(f">>> 7/7: UPLOADING IMAGE TO S3....")
168
+ try:
169
+ image_url, key = self.upload_to_s3(output)
170
+ self.logger.info(f"image uploaded to s3: {image_url}")
171
+ except Exception as e:
172
+ self.logger.error(f"Error uploading image to s3: {e}")
173
+ return {"out_image": None, "error": f"Failed to upload image to s3: {e}"}
174
 
175
  return {"image_url": image_url,
176
  "image_key": key,
177
  "error": None
178
  }
179
 
180
+ # handle unexpected errors
 
 
 
 
 
 
 
 
 
 
 
 
181
  except Exception as e:
182
+ self.logger.error(f"An unexpected error occurred: {e}")
183
+ return {"out_image": None, "error": f"An unexpected error occurred: {e}"}
184
+
185
 
186
  def upload_to_s3(self, image):
187
  "Upload the image to s3 and return the url."
test_handler.ipynb CHANGED
@@ -33,7 +33,7 @@
33
  "os.environ[\"AWS_ACCESS_KEY_ID\"] = \"\"\n",
34
  "os.environ[\"AWS_SECRET_ACCESS_KEY\"] = \"\"\n",
35
  "os.environ[\"S3_BUCKET_NAME\"] = \"\"\n",
36
- "os.environ[\"TILING_SIZE\"] = \"1000\""
37
  ]
38
  },
39
  {
@@ -52,22 +52,29 @@
52
  "metadata": {},
53
  "outputs": [
54
  {
55
- "name": "stdout",
56
  "output_type": "stream",
57
  "text": [
58
- "image.size: (1024, 1024), image.mode: RGB, outscale: 4.0\n"
 
 
 
 
 
 
 
 
 
 
 
 
59
  ]
60
  },
61
  {
62
  "name": "stdout",
63
  "output_type": "stream",
64
  "text": [
65
- "\tTile 1/4\n",
66
- "\tTile 2/4\n",
67
- "\tTile 3/4\n",
68
- "\tTile 4/4\n",
69
- "output.shape: (4096, 4096, 3)\n",
70
- "https://jiffy-staging-upscaled-images.s3.amazonaws.com/d91323cb-0801-45b7-8109-9739212037ed.png d91323cb-0801-45b7-8109-9739212037ed.png\n"
71
  ]
72
  }
73
  ],
@@ -78,7 +85,7 @@
78
  " # \"https://jiffy-staging-transfers.imgix.net/2/development/attachments/b8ecchms9rr9wk3g71kfpfprqg1v?ixlib=rb-0.3.5\" # larger than > 1.96M pixels\n",
79
  " ]\n",
80
  "\n",
81
- "out_scales = [4, 3, 2]\n",
82
  "for img_url, outscale in zip(img_urls, out_scales):\n",
83
  " # create payload\n",
84
  " payload = {\n",
@@ -88,7 +95,8 @@
88
  " }\n",
89
  " \n",
90
  " output_payload = my_handler(payload)\n",
91
- " print(output_payload[\"image_url\"], output_payload[\"image_key\"])\n"
 
92
  ]
93
  },
94
  {
 
33
  "os.environ[\"AWS_ACCESS_KEY_ID\"] = \"\"\n",
34
  "os.environ[\"AWS_SECRET_ACCESS_KEY\"] = \"\"\n",
35
  "os.environ[\"S3_BUCKET_NAME\"] = \"\"\n",
36
+ "os.environ[\"TILING_SIZE\"] = \"0\""
37
  ]
38
  },
39
  {
 
52
  "metadata": {},
53
  "outputs": [
54
  {
55
+ "name": "stderr",
56
  "output_type": "stream",
57
  "text": [
58
+ "INFO - >>> 1/7: GETTING INPUTS....\n",
59
+ "INFO - outscale: 4.0\n",
60
+ "INFO - downloading image from URL: https://jiffy-transfers.imgix.net/2/attachments/r267odvvfmkp6c5lccj1y6f9trb0?ixlib=rb-0.3.5\n",
61
+ "INFO - >>> 2/7: RUNNING ASSERTIONS ON IMAGE....\n",
62
+ "INFO - image.size: (1024, 1024), image.mode: RGB\n",
63
+ "INFO - >>> 3/7: CONVERTING IMAGE TO OPENCV BGR/BGRA FORMAT....\n",
64
+ "INFO - converting RGB image to BGR\n",
65
+ "INFO - >>> 4/7: UPSCALING IMAGE....\n",
66
+ "INFO - output.shape: (4096, 4096, 3)\n",
67
+ "INFO - >>> 5/7: CONVERTING IMAGE TO RGB/RGBA FORMAT....\n",
68
+ "INFO - >>> 6/7: CONVERTING IMAGE TO PIL....\n",
69
+ "INFO - >>> 7/7: UPLOADING IMAGE TO S3....\n",
70
+ "INFO - image uploaded to s3: https://upscale-process-results.s3.amazonaws.com/fe21c683-ee2d-4e1d-9fbc-af56823c664c.png\n"
71
  ]
72
  },
73
  {
74
  "name": "stdout",
75
  "output_type": "stream",
76
  "text": [
77
+ "https://upscale-process-results.s3.amazonaws.com/fe21c683-ee2d-4e1d-9fbc-af56823c664c.png fe21c683-ee2d-4e1d-9fbc-af56823c664c.png\n"
 
 
 
 
 
78
  ]
79
  }
80
  ],
 
85
  " # \"https://jiffy-staging-transfers.imgix.net/2/development/attachments/b8ecchms9rr9wk3g71kfpfprqg1v?ixlib=rb-0.3.5\" # larger than > 1.96M pixels\n",
86
  " ]\n",
87
  "\n",
88
+ "out_scales = [4]#, 3, 2]\n",
89
  "for img_url, outscale in zip(img_urls, out_scales):\n",
90
  " # create payload\n",
91
  " payload = {\n",
 
95
  " }\n",
96
  " \n",
97
  " output_payload = my_handler(payload)\n",
98
+ " print(output_payload[\"image_url\"], output_payload[\"image_key\"])\n",
99
+ " \n"
100
  ]
101
  },
102
  {