Siromanec commited on
Commit
ad705df
1 Parent(s): e3b8726

entries become suddenly empty

Browse files
Files changed (2) hide show
  1. handcrafted_solution.py +17 -24
  2. test_solution.ipynb +204 -0
handcrafted_solution.py CHANGED
@@ -110,13 +110,12 @@ def get_vertices_and_edges_from_segmentation(gest_seg_np, edge_th=50.0):
110
 
111
  vertices = np.concatenate([apex_centroids, eave_end_point_centroids])
112
 
113
-
114
  scale = 1
115
  vertex_size = np.zeros(vertices.shape[0])
116
  for i, coords in enumerate(vertices):
117
  # coords = np.round(coords).astype(np.uint32)
118
- radius = 25#np.clip(int(max_depth//2 + depth_np[coords[1], coords[0]]), 10, 30)#int(np.clip(max_depth - depth_np[coords[1], coords[0]], 10, 20))
119
- vertex_size[i] = (scale*radius) ** 2 # because we are using squared distances
120
 
121
  for edge_class in ['eave', 'ridge', 'rake', 'valley', 'flashing', 'step_flashing']:
122
  if len(vertices) < 2:
@@ -124,18 +123,13 @@ def get_vertices_and_edges_from_segmentation(gest_seg_np, edge_th=50.0):
124
  edge_color = np.array(gestalt_color_mapping[edge_class])
125
 
126
  mask = cv2.inRange(gest_seg_np,
127
- edge_color-color_range,
128
- edge_color+color_range)
129
  mask = cv2.morphologyEx(mask,
130
  cv2.MORPH_DILATE, np.ones((3, 3)), iterations=1)
131
 
132
-
133
-
134
  if np.any(mask):
135
 
136
-
137
-
138
-
139
  rho = 1 # distance resolution in pixels of the Hough grid
140
  theta = np.pi / 180 # angular resolution in radians of the Hough grid
141
  threshold = 20 # minimum number of votes (intersections in Hough grid cell)
@@ -144,11 +138,10 @@ def get_vertices_and_edges_from_segmentation(gest_seg_np, edge_th=50.0):
144
 
145
  # Run Hough on edge detected image
146
  # Output "lines" is an array containing endpoints of detected line segments
147
- cv2.GaussianBlur(mask, (11,11), 0, mask)
148
  lines = cv2.HoughLinesP(mask, rho, theta, threshold, np.array([]),
149
  min_line_length, max_line_gap)
150
 
151
-
152
  edges = []
153
 
154
  if lines is None:
@@ -156,7 +149,7 @@ def get_vertices_and_edges_from_segmentation(gest_seg_np, edge_th=50.0):
156
 
157
  line_directions = np.zeros((len(lines), 2))
158
  for line_idx, line in enumerate(lines):
159
- for x1,y1,x2,y2 in line:
160
  extend = 35
161
  if x1 < x2:
162
  x1, y1, x2, y2 = x2, y2, x1, y1
@@ -166,16 +159,13 @@ def get_vertices_and_edges_from_segmentation(gest_seg_np, edge_th=50.0):
166
 
167
  direction = extend * direction
168
 
169
- x1,y1 = (-direction + (x1, y1)).astype(np.int32)
170
- x2,y2 = (+ direction + (x2, y2)).astype(np.int32)
171
 
172
  edges.append((x1, y1, x2, y2))
173
 
174
-
175
-
176
-
177
  edges = np.array(edges)
178
- if len(edges) <1:
179
  continue
180
  # calculate the distances between the vertices and the edge ends
181
  begin_distances = cdist(vertices, edges[:, :2], metric="sqeuclidean")
@@ -187,7 +177,6 @@ def get_vertices_and_edges_from_segmentation(gest_seg_np, edge_th=50.0):
187
  in_range_connected_mask = np.logical_and(np.any(begin_in_range_mask, axis=0),
188
  np.any(end_in_range_mask, axis=0))
189
 
190
-
191
  # where both ends are in range
192
  begin_in_range_mask = np.logical_and(begin_in_range_mask, in_range_connected_mask)
193
  end_in_range_mask = np.logical_and(end_in_range_mask, in_range_connected_mask)
@@ -201,7 +190,6 @@ def get_vertices_and_edges_from_segmentation(gest_seg_np, edge_th=50.0):
201
  begin_candidates = begin_candidates[:, sorted_begin_indices]
202
  end_candidates = end_candidates[:, sorted_end_indices]
203
 
204
-
205
  # create all possible connections between begin and end candidates that correspond to a line
206
  grouped_begins = np.split(begin_candidates[0], np.unique(begin_candidates[1], return_index=True)[1][1:])
207
  grouped_ends = np.split(end_candidates[0], np.unique(end_candidates[1], return_index=True)[1][1:])
@@ -231,12 +219,13 @@ def get_vertices_and_edges_from_segmentation(gest_seg_np, edge_th=50.0):
231
 
232
  # precalculate the possible direction vectors
233
  possible_direction_vectors = vertices[possible_connections[0]] - vertices[possible_connections[1]]
234
- possible_direction_vectors = possible_direction_vectors / np.linalg.norm(possible_direction_vectors, axis=1)[:, np.newaxis]
 
235
 
236
  owned_lines_per_possible_connections = [list() for i in range(possible_connections.shape[1])]
237
 
238
  # assign lines to possible connections
239
- for line_idx, i,j in zip(line_idx_list, begin_vertex_list, end_vertex_list):
240
  if i == j:
241
  continue
242
  i, j = min(i, j), max(i, j)
@@ -247,7 +236,8 @@ def get_vertices_and_edges_from_segmentation(gest_seg_np, edge_th=50.0):
247
 
248
  # check if the lines are in the same direction as the possible connection
249
  for fitted_line_idx, owned_lines_per_possible_connection in enumerate(owned_lines_per_possible_connections):
250
- line_deviations = np.abs(np.dot(line_directions[owned_lines_per_possible_connection], possible_direction_vectors[fitted_line_idx]))
 
251
  if np.any(line_deviations > deviation_threshold):
252
  connections.append(possible_connections[:, fitted_line_idx])
253
 
@@ -350,6 +340,9 @@ def prune_not_connected(all_3d_vertices, connections_3d):
350
 
351
  def predict(entry, visualize=False) -> Tuple[np.ndarray, List[int]]:
352
  good_entry = convert_entry_to_human_readable(entry)
 
 
 
353
  vert_edge_per_image = {}
354
  for i, (gest, depth, K, R, t) in enumerate(zip(good_entry['gestalt'],
355
  good_entry['depthcm'],
 
110
 
111
  vertices = np.concatenate([apex_centroids, eave_end_point_centroids])
112
 
 
113
  scale = 1
114
  vertex_size = np.zeros(vertices.shape[0])
115
  for i, coords in enumerate(vertices):
116
  # coords = np.round(coords).astype(np.uint32)
117
+ radius = 25 # np.clip(int(max_depth//2 + depth_np[coords[1], coords[0]]), 10, 30)#int(np.clip(max_depth - depth_np[coords[1], coords[0]], 10, 20))
118
+ vertex_size[i] = (scale * radius) ** 2 # because we are using squared distances
119
 
120
  for edge_class in ['eave', 'ridge', 'rake', 'valley', 'flashing', 'step_flashing']:
121
  if len(vertices) < 2:
 
123
  edge_color = np.array(gestalt_color_mapping[edge_class])
124
 
125
  mask = cv2.inRange(gest_seg_np,
126
+ edge_color - color_range,
127
+ edge_color + color_range)
128
  mask = cv2.morphologyEx(mask,
129
  cv2.MORPH_DILATE, np.ones((3, 3)), iterations=1)
130
 
 
 
131
  if np.any(mask):
132
 
 
 
 
133
  rho = 1 # distance resolution in pixels of the Hough grid
134
  theta = np.pi / 180 # angular resolution in radians of the Hough grid
135
  threshold = 20 # minimum number of votes (intersections in Hough grid cell)
 
138
 
139
  # Run Hough on edge detected image
140
  # Output "lines" is an array containing endpoints of detected line segments
141
+ cv2.GaussianBlur(mask, (11, 11), 0, mask)
142
  lines = cv2.HoughLinesP(mask, rho, theta, threshold, np.array([]),
143
  min_line_length, max_line_gap)
144
 
 
145
  edges = []
146
 
147
  if lines is None:
 
149
 
150
  line_directions = np.zeros((len(lines), 2))
151
  for line_idx, line in enumerate(lines):
152
+ for x1, y1, x2, y2 in line:
153
  extend = 35
154
  if x1 < x2:
155
  x1, y1, x2, y2 = x2, y2, x1, y1
 
159
 
160
  direction = extend * direction
161
 
162
+ x1, y1 = (-direction + (x1, y1)).astype(np.int32)
163
+ x2, y2 = (+ direction + (x2, y2)).astype(np.int32)
164
 
165
  edges.append((x1, y1, x2, y2))
166
 
 
 
 
167
  edges = np.array(edges)
168
+ if len(edges) < 1:
169
  continue
170
  # calculate the distances between the vertices and the edge ends
171
  begin_distances = cdist(vertices, edges[:, :2], metric="sqeuclidean")
 
177
  in_range_connected_mask = np.logical_and(np.any(begin_in_range_mask, axis=0),
178
  np.any(end_in_range_mask, axis=0))
179
 
 
180
  # where both ends are in range
181
  begin_in_range_mask = np.logical_and(begin_in_range_mask, in_range_connected_mask)
182
  end_in_range_mask = np.logical_and(end_in_range_mask, in_range_connected_mask)
 
190
  begin_candidates = begin_candidates[:, sorted_begin_indices]
191
  end_candidates = end_candidates[:, sorted_end_indices]
192
 
 
193
  # create all possible connections between begin and end candidates that correspond to a line
194
  grouped_begins = np.split(begin_candidates[0], np.unique(begin_candidates[1], return_index=True)[1][1:])
195
  grouped_ends = np.split(end_candidates[0], np.unique(end_candidates[1], return_index=True)[1][1:])
 
219
 
220
  # precalculate the possible direction vectors
221
  possible_direction_vectors = vertices[possible_connections[0]] - vertices[possible_connections[1]]
222
+ possible_direction_vectors = possible_direction_vectors / np.linalg.norm(possible_direction_vectors,
223
+ axis=1)[:, np.newaxis]
224
 
225
  owned_lines_per_possible_connections = [list() for i in range(possible_connections.shape[1])]
226
 
227
  # assign lines to possible connections
228
+ for line_idx, i, j in zip(line_idx_list, begin_vertex_list, end_vertex_list):
229
  if i == j:
230
  continue
231
  i, j = min(i, j), max(i, j)
 
236
 
237
  # check if the lines are in the same direction as the possible connection
238
  for fitted_line_idx, owned_lines_per_possible_connection in enumerate(owned_lines_per_possible_connections):
239
+ line_deviations = np.abs(np.dot(line_directions[owned_lines_per_possible_connection],
240
+ possible_direction_vectors[fitted_line_idx]))
241
  if np.any(line_deviations > deviation_threshold):
242
  connections.append(possible_connections[:, fitted_line_idx])
243
 
 
340
 
341
  def predict(entry, visualize=False) -> Tuple[np.ndarray, List[int]]:
342
  good_entry = convert_entry_to_human_readable(entry)
343
+ if 'gestalt' not in good_entry or 'depthcm' not in good_entry or 'K' not in good_entry or 'R' not in good_entry or 't' not in good_entry:
344
+ print('Missing required fields in the entry')
345
+ return (good_entry['__key__'], *empty_solution())
346
  vert_edge_per_image = {}
347
  for i, (gest, depth, K, R, t) in enumerate(zip(good_entry['gestalt'],
348
  good_entry['depthcm'],
test_solution.ipynb ADDED
@@ -0,0 +1,204 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "id": "initial_id",
6
+ "metadata": {
7
+ "collapsed": true,
8
+ "ExecuteTime": {
9
+ "end_time": "2024-05-26T20:37:02.290189Z",
10
+ "start_time": "2024-05-26T20:36:59.535549Z"
11
+ }
12
+ },
13
+ "source": [
14
+ "from pprint import pprint\n",
15
+ "\n",
16
+ "\n",
17
+ "import matplotlib.pyplot as plt\n",
18
+ "\n",
19
+ "import numpy as np\n",
20
+ "\n",
21
+ "\n",
22
+ "import hoho\n",
23
+ "from hoho import compute_WED\n",
24
+ "from hoho import vis\n",
25
+ "from hoho.vis import line\n",
26
+ "\n",
27
+ "from scipy.spatial.distance import cdist\n",
28
+ "import cv2\n",
29
+ "hoho.LOCAL_DATADIR = hoho.setup(\"../data\")\n",
30
+ "from handcrafted_solution import predict"
31
+ ],
32
+ "outputs": [
33
+ {
34
+ "name": "stdout",
35
+ "output_type": "stream",
36
+ "text": [
37
+ "Using ..\\data as the data directory (we are running locally)\n"
38
+ ]
39
+ }
40
+ ],
41
+ "execution_count": 1
42
+ },
43
+ {
44
+ "metadata": {},
45
+ "cell_type": "code",
46
+ "outputs": [],
47
+ "execution_count": null,
48
+ "source": "# from ..s23dr-hoho-competition import handcrafted_solution",
49
+ "id": "4e9860e9763942ac"
50
+ },
51
+ {
52
+ "metadata": {},
53
+ "cell_type": "code",
54
+ "outputs": [],
55
+ "execution_count": null,
56
+ "source": "",
57
+ "id": "679a2b4f8e46a308"
58
+ },
59
+ {
60
+ "metadata": {
61
+ "ExecuteTime": {
62
+ "end_time": "2024-05-26T20:29:23.028196Z",
63
+ "start_time": "2024-05-26T20:29:23.024659Z"
64
+ }
65
+ },
66
+ "cell_type": "code",
67
+ "source": "",
68
+ "id": "1146933b5aabff1c",
69
+ "outputs": [],
70
+ "execution_count": 2
71
+ },
72
+ {
73
+ "metadata": {
74
+ "ExecuteTime": {
75
+ "end_time": "2024-05-26T20:37:02.297233Z",
76
+ "start_time": "2024-05-26T20:37:02.292201Z"
77
+ }
78
+ },
79
+ "cell_type": "code",
80
+ "source": [
81
+ "split = \"val\"\n",
82
+ "\n",
83
+ "dataset = hoho.get_dataset(decode=None, split=split)"
84
+ ],
85
+ "id": "55c712587c694e96",
86
+ "outputs": [],
87
+ "execution_count": 2
88
+ },
89
+ {
90
+ "metadata": {},
91
+ "cell_type": "code",
92
+ "outputs": [],
93
+ "execution_count": null,
94
+ "source": "",
95
+ "id": "7daaea23230c7bb6"
96
+ },
97
+ {
98
+ "metadata": {
99
+ "ExecuteTime": {
100
+ "end_time": "2024-05-26T20:40:18.110964Z",
101
+ "start_time": "2024-05-26T20:39:24.449965Z"
102
+ }
103
+ },
104
+ "cell_type": "code",
105
+ "source": [
106
+ "from tqdm import tqdm\n",
107
+ "\n",
108
+ "for i, sample in tqdm(enumerate(dataset)):\n",
109
+ " # if i > 170:\n",
110
+ " predict(sample, visualize=False)"
111
+ ],
112
+ "id": "f36ee7b8f0427f72",
113
+ "outputs": [
114
+ {
115
+ "name": "stderr",
116
+ "output_type": "stream",
117
+ "text": [
118
+ "2it [00:02, 1.26s/it]"
119
+ ]
120
+ },
121
+ {
122
+ "name": "stdout",
123
+ "output_type": "stream",
124
+ "text": [
125
+ "Not enough vertices or connections in image 1\n"
126
+ ]
127
+ },
128
+ {
129
+ "name": "stderr",
130
+ "output_type": "stream",
131
+ "text": [
132
+ "10it [00:15, 1.45s/it]"
133
+ ]
134
+ },
135
+ {
136
+ "name": "stdout",
137
+ "output_type": "stream",
138
+ "text": [
139
+ "Not enough vertices or connections in image 0\n"
140
+ ]
141
+ },
142
+ {
143
+ "name": "stderr",
144
+ "output_type": "stream",
145
+ "text": [
146
+ "29it [00:47, 1.22s/it]"
147
+ ]
148
+ },
149
+ {
150
+ "name": "stdout",
151
+ "output_type": "stream",
152
+ "text": [
153
+ "Not enough vertices or connections in image 4\n"
154
+ ]
155
+ },
156
+ {
157
+ "name": "stderr",
158
+ "output_type": "stream",
159
+ "text": [
160
+ "33it [00:52, 1.60s/it]\n"
161
+ ]
162
+ },
163
+ {
164
+ "ename": "KeyboardInterrupt",
165
+ "evalue": "",
166
+ "output_type": "error",
167
+ "traceback": [
168
+ "\u001B[1;31m---------------------------------------------------------------------------\u001B[0m",
169
+ "\u001B[1;31mKeyboardInterrupt\u001B[0m Traceback (most recent call last)",
170
+ "Cell \u001B[1;32mIn[4], line 5\u001B[0m\n\u001B[0;32m 1\u001B[0m \u001B[38;5;28;01mfrom\u001B[39;00m \u001B[38;5;21;01mtqdm\u001B[39;00m \u001B[38;5;28;01mimport\u001B[39;00m tqdm\n\u001B[0;32m 3\u001B[0m \u001B[38;5;28;01mfor\u001B[39;00m i, sample \u001B[38;5;129;01min\u001B[39;00m tqdm(\u001B[38;5;28menumerate\u001B[39m(dataset)):\n\u001B[0;32m 4\u001B[0m \u001B[38;5;66;03m# if i > 170:\u001B[39;00m\n\u001B[1;32m----> 5\u001B[0m predict(sample, visualize\u001B[38;5;241m=\u001B[39m\u001B[38;5;28;01mFalse\u001B[39;00m)\n",
171
+ "File \u001B[1;32m~\\edu\\sem6\\cv\\huggingface\\s23dr-hoho-competition\\handcrafted_solution.py:342\u001B[0m, in \u001B[0;36mpredict\u001B[1;34m(entry, visualize)\u001B[0m\n\u001B[0;32m 341\u001B[0m \u001B[38;5;28;01mdef\u001B[39;00m \u001B[38;5;21mpredict\u001B[39m(entry, visualize\u001B[38;5;241m=\u001B[39m\u001B[38;5;28;01mFalse\u001B[39;00m) \u001B[38;5;241m-\u001B[39m\u001B[38;5;241m>\u001B[39m Tuple[np\u001B[38;5;241m.\u001B[39mndarray, List[\u001B[38;5;28mint\u001B[39m]]:\n\u001B[1;32m--> 342\u001B[0m good_entry \u001B[38;5;241m=\u001B[39m convert_entry_to_human_readable(entry)\n\u001B[0;32m 343\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m \u001B[38;5;124m'\u001B[39m\u001B[38;5;124mgestalt\u001B[39m\u001B[38;5;124m'\u001B[39m \u001B[38;5;129;01mnot\u001B[39;00m \u001B[38;5;129;01min\u001B[39;00m good_entry \u001B[38;5;129;01mor\u001B[39;00m \u001B[38;5;124m'\u001B[39m\u001B[38;5;124mdepthcm\u001B[39m\u001B[38;5;124m'\u001B[39m \u001B[38;5;129;01mnot\u001B[39;00m \u001B[38;5;129;01min\u001B[39;00m good_entry \u001B[38;5;129;01mor\u001B[39;00m \u001B[38;5;124m'\u001B[39m\u001B[38;5;124mK\u001B[39m\u001B[38;5;124m'\u001B[39m \u001B[38;5;129;01mnot\u001B[39;00m \u001B[38;5;129;01min\u001B[39;00m good_entry \u001B[38;5;129;01mor\u001B[39;00m \u001B[38;5;124m'\u001B[39m\u001B[38;5;124mR\u001B[39m\u001B[38;5;124m'\u001B[39m \u001B[38;5;129;01mnot\u001B[39;00m \u001B[38;5;129;01min\u001B[39;00m good_entry \u001B[38;5;129;01mor\u001B[39;00m \u001B[38;5;124m'\u001B[39m\u001B[38;5;124mt\u001B[39m\u001B[38;5;124m'\u001B[39m \u001B[38;5;129;01mnot\u001B[39;00m \u001B[38;5;129;01min\u001B[39;00m good_entry:\n\u001B[0;32m 344\u001B[0m \u001B[38;5;28mprint\u001B[39m(\u001B[38;5;124m'\u001B[39m\u001B[38;5;124mMissing required fields in the entry\u001B[39m\u001B[38;5;124m'\u001B[39m)\n",
172
+ "File \u001B[1;32m~\\edu\\sem6\\cv\\huggingface\\s23dr-hoho-competition\\handcrafted_solution.py:95\u001B[0m, in \u001B[0;36mconvert_entry_to_human_readable\u001B[1;34m(entry)\u001B[0m\n\u001B[0;32m 85\u001B[0m out[k] \u001B[38;5;241m=\u001B[39m v\n\u001B[0;32m 86\u001B[0m \u001B[38;5;28;01mcontinue\u001B[39;00m\n\u001B[0;32m 87\u001B[0m \u001B[38;5;28;01mmatch\u001B[39;00m k:\n\u001B[0;32m 88\u001B[0m \u001B[38;5;28;01mcase\u001B[39;00m \u001B[38;5;124m'\u001B[39m\u001B[38;5;124mpoints3d\u001B[39m\u001B[38;5;124m'\u001B[39m:\n\u001B[0;32m 89\u001B[0m out[k] \u001B[38;5;241m=\u001B[39m read_points3D_binary(fid\u001B[38;5;241m=\u001B[39mio\u001B[38;5;241m.\u001B[39mBytesIO(v))\n\u001B[0;32m 90\u001B[0m \u001B[38;5;28;01mcase\u001B[39;00m \u001B[38;5;124m'\u001B[39m\u001B[38;5;124mcameras\u001B[39m\u001B[38;5;124m'\u001B[39m:\n\u001B[0;32m 91\u001B[0m out[k] \u001B[38;5;241m=\u001B[39m read_cameras_binary(fid\u001B[38;5;241m=\u001B[39mio\u001B[38;5;241m.\u001B[39mBytesIO(v))\n\u001B[0;32m 92\u001B[0m \u001B[38;5;28;01mcase\u001B[39;00m \u001B[38;5;124m'\u001B[39m\u001B[38;5;124mimages\u001B[39m\u001B[38;5;124m'\u001B[39m:\n\u001B[0;32m 93\u001B[0m out[k] \u001B[38;5;241m=\u001B[39m read_images_binary(fid\u001B[38;5;241m=\u001B[39mio\u001B[38;5;241m.\u001B[39mBytesIO(v))\n\u001B[0;32m 94\u001B[0m \u001B[38;5;28;01mcase\u001B[39;00m \u001B[38;5;124m'\u001B[39m\u001B[38;5;124made20k\u001B[39m\u001B[38;5;124m'\u001B[39m \u001B[38;5;241m|\u001B[39m \u001B[38;5;124m'\u001B[39m\u001B[38;5;124mgestalt\u001B[39m\u001B[38;5;124m'\u001B[39m:\n\u001B[1;32m---> 95\u001B[0m out[k] \u001B[38;5;241m=\u001B[39m [PImage\u001B[38;5;241m.\u001B[39mopen(io\u001B[38;5;241m.\u001B[39mBytesIO(x))\u001B[38;5;241m.\u001B[39mconvert(\u001B[38;5;124m'\u001B[39m\u001B[38;5;124mRGB\u001B[39m\u001B[38;5;124m'\u001B[39m) \u001B[38;5;28;01mfor\u001B[39;00m x \u001B[38;5;129;01min\u001B[39;00m v]\n\u001B[0;32m 96\u001B[0m \u001B[38;5;28;01mcase\u001B[39;00m \u001B[38;5;124m'\u001B[39m\u001B[38;5;124mdepthcm\u001B[39m\u001B[38;5;124m'\u001B[39m:\n\u001B[0;32m 97\u001B[0m out[k] \u001B[38;5;241m=\u001B[39m [PImage\u001B[38;5;241m.\u001B[39mopen(io\u001B[38;5;241m.\u001B[39mBytesIO(x)) \u001B[38;5;28;01mfor\u001B[39;00m x \u001B[38;5;129;01min\u001B[39;00m entry[\u001B[38;5;124m'\u001B[39m\u001B[38;5;124mdepthcm\u001B[39m\u001B[38;5;124m'\u001B[39m]]\n\u001B[0;32m 98\u001B[0m \u001B[38;5;28;01mreturn\u001B[39;00m out\n",
173
+ "File \u001B[1;32m~\\edu\\sem6\\cv\\huggingface\\s23dr-hoho-competition\\handcrafted_solution.py:95\u001B[0m, in \u001B[0;36m<listcomp>\u001B[1;34m(.0)\u001B[0m\n\u001B[0;32m 85\u001B[0m out[k] \u001B[38;5;241m=\u001B[39m v\n\u001B[0;32m 86\u001B[0m \u001B[38;5;28;01mcontinue\u001B[39;00m\n\u001B[0;32m 87\u001B[0m \u001B[38;5;28;01mmatch\u001B[39;00m k:\n\u001B[0;32m 88\u001B[0m \u001B[38;5;28;01mcase\u001B[39;00m \u001B[38;5;124m'\u001B[39m\u001B[38;5;124mpoints3d\u001B[39m\u001B[38;5;124m'\u001B[39m:\n\u001B[0;32m 89\u001B[0m out[k] \u001B[38;5;241m=\u001B[39m read_points3D_binary(fid\u001B[38;5;241m=\u001B[39mio\u001B[38;5;241m.\u001B[39mBytesIO(v))\n\u001B[0;32m 90\u001B[0m \u001B[38;5;28;01mcase\u001B[39;00m \u001B[38;5;124m'\u001B[39m\u001B[38;5;124mcameras\u001B[39m\u001B[38;5;124m'\u001B[39m:\n\u001B[0;32m 91\u001B[0m out[k] \u001B[38;5;241m=\u001B[39m read_cameras_binary(fid\u001B[38;5;241m=\u001B[39mio\u001B[38;5;241m.\u001B[39mBytesIO(v))\n\u001B[0;32m 92\u001B[0m \u001B[38;5;28;01mcase\u001B[39;00m \u001B[38;5;124m'\u001B[39m\u001B[38;5;124mimages\u001B[39m\u001B[38;5;124m'\u001B[39m:\n\u001B[0;32m 93\u001B[0m out[k] \u001B[38;5;241m=\u001B[39m read_images_binary(fid\u001B[38;5;241m=\u001B[39mio\u001B[38;5;241m.\u001B[39mBytesIO(v))\n\u001B[0;32m 94\u001B[0m \u001B[38;5;28;01mcase\u001B[39;00m \u001B[38;5;124m'\u001B[39m\u001B[38;5;124made20k\u001B[39m\u001B[38;5;124m'\u001B[39m \u001B[38;5;241m|\u001B[39m \u001B[38;5;124m'\u001B[39m\u001B[38;5;124mgestalt\u001B[39m\u001B[38;5;124m'\u001B[39m:\n\u001B[1;32m---> 95\u001B[0m out[k] \u001B[38;5;241m=\u001B[39m [PImage\u001B[38;5;241m.\u001B[39mopen(io\u001B[38;5;241m.\u001B[39mBytesIO(x))\u001B[38;5;241m.\u001B[39mconvert(\u001B[38;5;124m'\u001B[39m\u001B[38;5;124mRGB\u001B[39m\u001B[38;5;124m'\u001B[39m) \u001B[38;5;28;01mfor\u001B[39;00m x \u001B[38;5;129;01min\u001B[39;00m v]\n\u001B[0;32m 96\u001B[0m \u001B[38;5;28;01mcase\u001B[39;00m \u001B[38;5;124m'\u001B[39m\u001B[38;5;124mdepthcm\u001B[39m\u001B[38;5;124m'\u001B[39m:\n\u001B[0;32m 97\u001B[0m out[k] \u001B[38;5;241m=\u001B[39m [PImage\u001B[38;5;241m.\u001B[39mopen(io\u001B[38;5;241m.\u001B[39mBytesIO(x)) \u001B[38;5;28;01mfor\u001B[39;00m x \u001B[38;5;129;01min\u001B[39;00m entry[\u001B[38;5;124m'\u001B[39m\u001B[38;5;124mdepthcm\u001B[39m\u001B[38;5;124m'\u001B[39m]]\n\u001B[0;32m 98\u001B[0m \u001B[38;5;28;01mreturn\u001B[39;00m out\n",
174
+ "File \u001B[1;32m~\\anaconda3\\Lib\\site-packages\\PIL\\Image.py:922\u001B[0m, in \u001B[0;36mImage.convert\u001B[1;34m(self, mode, matrix, dither, palette, colors)\u001B[0m\n\u001B[0;32m 874\u001B[0m \u001B[38;5;28;01mdef\u001B[39;00m \u001B[38;5;21mconvert\u001B[39m(\n\u001B[0;32m 875\u001B[0m \u001B[38;5;28mself\u001B[39m, mode\u001B[38;5;241m=\u001B[39m\u001B[38;5;28;01mNone\u001B[39;00m, matrix\u001B[38;5;241m=\u001B[39m\u001B[38;5;28;01mNone\u001B[39;00m, dither\u001B[38;5;241m=\u001B[39m\u001B[38;5;28;01mNone\u001B[39;00m, palette\u001B[38;5;241m=\u001B[39mPalette\u001B[38;5;241m.\u001B[39mWEB, colors\u001B[38;5;241m=\u001B[39m\u001B[38;5;241m256\u001B[39m\n\u001B[0;32m 876\u001B[0m ):\n\u001B[0;32m 877\u001B[0m \u001B[38;5;250m \u001B[39m\u001B[38;5;124;03m\"\"\"\u001B[39;00m\n\u001B[0;32m 878\u001B[0m \u001B[38;5;124;03m Returns a converted copy of this image. For the \"P\" mode, this\u001B[39;00m\n\u001B[0;32m 879\u001B[0m \u001B[38;5;124;03m method translates pixels through the palette. If mode is\u001B[39;00m\n\u001B[1;32m (...)\u001B[0m\n\u001B[0;32m 919\u001B[0m \u001B[38;5;124;03m :returns: An :py:class:`~PIL.Image.Image` object.\u001B[39;00m\n\u001B[0;32m 920\u001B[0m \u001B[38;5;124;03m \"\"\"\u001B[39;00m\n\u001B[1;32m--> 922\u001B[0m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mload()\n\u001B[0;32m 924\u001B[0m has_transparency \u001B[38;5;241m=\u001B[39m \u001B[38;5;124m\"\u001B[39m\u001B[38;5;124mtransparency\u001B[39m\u001B[38;5;124m\"\u001B[39m \u001B[38;5;129;01min\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39minfo\n\u001B[0;32m 925\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m \u001B[38;5;129;01mnot\u001B[39;00m mode \u001B[38;5;129;01mand\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mmode \u001B[38;5;241m==\u001B[39m \u001B[38;5;124m\"\u001B[39m\u001B[38;5;124mP\u001B[39m\u001B[38;5;124m\"\u001B[39m:\n\u001B[0;32m 926\u001B[0m \u001B[38;5;66;03m# determine default mode\u001B[39;00m\n",
175
+ "File \u001B[1;32m~\\anaconda3\\Lib\\site-packages\\PIL\\WebPImagePlugin.py:168\u001B[0m, in \u001B[0;36mWebPImageFile.load\u001B[1;34m(self)\u001B[0m\n\u001B[0;32m 166\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mfp \u001B[38;5;129;01mand\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_exclusive_fp:\n\u001B[0;32m 167\u001B[0m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mfp\u001B[38;5;241m.\u001B[39mclose()\n\u001B[1;32m--> 168\u001B[0m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mfp \u001B[38;5;241m=\u001B[39m BytesIO(data)\n\u001B[0;32m 169\u001B[0m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mtile \u001B[38;5;241m=\u001B[39m [(\u001B[38;5;124m\"\u001B[39m\u001B[38;5;124mraw\u001B[39m\u001B[38;5;124m\"\u001B[39m, (\u001B[38;5;241m0\u001B[39m, \u001B[38;5;241m0\u001B[39m) \u001B[38;5;241m+\u001B[39m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39msize, \u001B[38;5;241m0\u001B[39m, \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mrawmode)]\n\u001B[0;32m 171\u001B[0m \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[38;5;28msuper\u001B[39m()\u001B[38;5;241m.\u001B[39mload()\n",
176
+ "\u001B[1;31mKeyboardInterrupt\u001B[0m: "
177
+ ]
178
+ }
179
+ ],
180
+ "execution_count": 4
181
+ }
182
+ ],
183
+ "metadata": {
184
+ "kernelspec": {
185
+ "display_name": "Python 3",
186
+ "language": "python",
187
+ "name": "python3"
188
+ },
189
+ "language_info": {
190
+ "codemirror_mode": {
191
+ "name": "ipython",
192
+ "version": 2
193
+ },
194
+ "file_extension": ".py",
195
+ "mimetype": "text/x-python",
196
+ "name": "python",
197
+ "nbconvert_exporter": "python",
198
+ "pygments_lexer": "ipython2",
199
+ "version": "2.7.6"
200
+ }
201
+ },
202
+ "nbformat": 4,
203
+ "nbformat_minor": 5
204
+ }