gatesla commited on
Commit
a206cd8
1 Parent(s): 0971307

Moved functions and comments around

Browse files
Files changed (2) hide show
  1. interpretter_notes.py +128 -0
  2. understand.py +0 -136
interpretter_notes.py ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+
3
+ """
4
+ # How to get ID
5
+ >>> model.config.id2label
6
+ {0: 'person', 1: 'bicycle', 2: 'car', 3: 'motorcycle', 4: 'airplane', 5: 'bus', 6: 'train', 7: 'truck', 8: 'boat', 9: 'traffic light', 10: 'fire hydrant', 11: 'stop sign', 12: 'parking meter',
7
+ 13: 'bench', 14: 'bird', 15: 'cat', 16: 'dog', 17: 'horse', 18: 'sheep', 19: 'cow', 20: 'elephant', 21: 'bear', 22: 'zebra', 23: 'giraffe', 24: 'backpack', 25: 'umbrella', 26: 'handbag',
8
+ 27: 'tie', 28: 'suitcase', 29: 'frisbee', 30: 'skis', 31: 'snowboard', 32: 'sports ball', 33: 'kite', 34: 'baseball bat', 35: 'baseball glove', 36: 'skateboard', 37: 'surfboard', 38: 'tennis racket',
9
+ 39: 'bottle', 40: 'wine glass', 41: 'cup', 42: 'fork', 43: 'knife', 44: 'spoon', 45: 'bowl', 46: 'banana', 47: 'apple', 48: 'sandwich', 49: 'orange', 50: 'broccoli', 51: 'carrot', 52: 'hot dog', 53: 'pizza',
10
+ 54: 'donut', 55: 'cake', 56: 'chair', 57: 'couch', 58: 'potted plant', 59: 'bed', 60: 'dining table', 61: 'toilet', 62: 'tv', 63: 'laptop', 64: 'mouse', 65: 'remote', 66: 'keyboard', 67: 'cell phone',
11
+ 68: 'microwave', 69: 'oven', 70: 'toaster', 71: 'sink', 72: 'refrigerator', 73: 'book', 74: 'clock', 75: 'vase', 76: 'scissors', 77: 'teddy bear', 78: 'hair drier', 79: 'toothbrush', 80: 'banner', 81: 'blanket',
12
+ 82: 'bridge', 83: 'cardboard', 84: 'counter', 85: 'curtain', 86: 'door-stuff', 87: 'floor-wood', 88: 'flower', 89: 'fruit', 90: 'gravel', 91: 'house', 92: 'light', 93: 'mirror-stuff', 94: 'net', 95: 'pillow',
13
+ 96: 'platform', 97: 'playingfield', 98: 'railroad', 99: 'river', 100: 'road', 101: 'roof', 102: 'sand', 103: 'sea', 104: 'shelf', 105: 'snow', 106: 'stairs', 107: 'tent', 108: 'towel', 109: 'wall-brick',
14
+ 110: 'wall-stone', 111: 'wall-tile', 112: 'wall-wood', 113: 'water-other', 114: 'window-blind', 115: 'window-other', 116: 'tree-merged', 117: 'fence-merged', 118: 'ceiling-merged', 119: 'sky-other-merged',
15
+ 120: 'cabinet-merged', 121: 'table-merged', 122: 'floor-other-merged', 123: 'pavement-merged', 124: 'mountain-merged', 125: 'grass-merged', 126: 'dirt-merged', 127: 'paper-merged', 128: 'food-other-merged',
16
+ 129: 'building-other-merged', 130: 'rock-merged', 131: 'wall-other-merged', 132: 'rug-merged'}
17
+ >>> model.config.id2label[123]
18
+ 'pavement-merged'
19
+ >>> results["segments_info"][1]
20
+ {'id': 2, 'label_id': 123, 'was_fused': False, 'score': 0.995813}
21
+ """
22
+ # Above labels don't correspond to anything ... https://github.com/nightrome/cocostuff/blob/master/labels.md
23
+ # This one was closest to helping: https://github.com/NielsRogge/Transformers-Tutorials/blob/master/MaskFormer/Inference/Inference_with_MaskFormer_for_semantic_%2B_panoptic_segmentation.ipynb
24
+
25
+ """
26
+ >>> Image.fromarray((mask * 255).cpu().numpy().astype(np.uint8))
27
+ <PIL.Image.Image image mode=L size=2000x1500 at 0x7F07773691C0>
28
+ >>> temp = Image.fromarray((mask * 255).cpu().numpy().astype(np.uint8))
29
+ """
30
+
31
+ """
32
+ >>> mask = (results["segmentation"].cpu().numpy == 4)
33
+ >>> mask = (results["segmentation"].cpu().numpy() == 4)
34
+ >>> mask
35
+ array([[False, False, False, ..., False, False, False],
36
+ [False, False, False, ..., False, False, False],
37
+ [False, False, False, ..., False, False, False],
38
+ ...,
39
+ [False, False, False, ..., False, False, False],
40
+ [False, False, False, ..., False, False, False],
41
+ [False, False, False, ..., False, False, False]])
42
+ >>> visual_mask = (mask * 255).astype(np.uint8)
43
+ >>> visual_mask = Image.fromarray(visual_mask)
44
+ >>> plt.imshow(visual_mask)
45
+ <matplotlib.image.AxesImage object at 0x7f0761e78040>
46
+ >>> plt.show()
47
+ """
48
+
49
+ """
50
+ >>> mask = (results["segmentation"].cpu().numpy() == 1)
51
+ >>> visual_mask = (mask*255).astype(np.uint8)
52
+ >>> visual_mask = Image.fromarray(visual_mask)
53
+ >>> plt.imshow(visual_mask)
54
+ <matplotlib.image.AxesImage object at 0x7f0760298550>
55
+ >>> plt.show()
56
+ >>> results["segments_info"][0]
57
+ {'id': 1, 'label_id': 25, 'was_fused': False, 'score': 0.998022}
58
+ >>>
59
+ """
60
+
61
+ """
62
+ >>> np.where(mask==True)
63
+ (array([300, 300, 300, ..., 392, 392, 392]), array([452, 453, 454, ..., 473, 474, 475]))
64
+ >>> max(np.where(mask==True)[0])
65
+ 392
66
+ >>> min(np.where(mask==True)[0])
67
+ 300
68
+ >>> max(np.where(mask==True)[1])
69
+ 538
70
+ >>> min(np.where(mask==True)[1])
71
+ 399
72
+ """
73
+
74
+
75
+ """
76
+ >>> mask = (results["segmentation"].cpu().numpy() == 1)
77
+ >>> visual_mask = (mask* 255).astype(np.uint8)
78
+ >>> import cv2 as cv
79
+ >>> contours, hierarchy = cv.findContours(visual_mask, cv.RETR_LIST, cv.CHAIN_APPROX_SIMPLE)
80
+ >>> contours.shape
81
+ Traceback (most recent call last):
82
+ File "<stdin>", line 1, in <module>
83
+ AttributeError: 'tuple' object has no attribute 'shape'
84
+ >>> contours[0].shape
85
+ (7, 1, 2)
86
+ >>> shrunk = contours[0][:, 0, :]
87
+ >>> shrunk
88
+ array([[400, 340],
89
+ [399, 341],
90
+ [400, 342],
91
+ [401, 342],
92
+ [402, 341],
93
+ [403, 341],
94
+ [402, 340]], dtype=int32)
95
+ >>> get_coordinates_for_bb_simple(results["segmentation"], 1)
96
+ ((300, 399), (392, 538))
97
+ >>> shrunk = contours[1][:, 0, :]
98
+ >>> max(shrunk[:, 0])
99
+ 538
100
+ >>> min(shrunk[:, 0])
101
+ 409
102
+ >>> min(shrunk[:, 1])
103
+ 300
104
+ >>> max(shrunk[:, 1])
105
+ 392
106
+ >>>
107
+ """
108
+
109
+
110
+
111
+ """
112
+ import cv2 as cv
113
+ contours, hierarchy = cv.findContours(visual_mask, cv.RETR_LIST, cv.CHAIN_APPROX_SIMPLE)
114
+ shrunk = contours[0][:, 0, :]
115
+
116
+ >>> shrunk[0, :]
117
+ array([1907, 887], dtype=int32)
118
+ >>> shrunk[:, 0]
119
+ array([1907, 1907, 1908, 1908, 1908], dtype=int32)
120
+ >>> shrunk[:, 1]
121
+ array([887, 888, 889, 890, 888], dtype=int32)
122
+ >>> shrunk
123
+ array([[1907, 887],
124
+ [1907, 888],
125
+ [1908, 889],
126
+ [1908, 890],
127
+ [1908, 888]], dtype=int32)
128
+ """
understand.py CHANGED
@@ -147,89 +147,6 @@ def test(map_to_use, label_id):
147
  plt.imshow(visual_mask)
148
  plt.show()
149
 
150
-
151
-
152
- # From Tutorial (Box 79)
153
- # def get_mask(segment_idx):
154
- # segment = results['segments_info'][segment_idx]
155
- # print("Visualizing mask for:", id2label[segment['label_id']])
156
- # mask = (predicted_panoptic_seg == segment['id'])
157
- # visual_mask = (mask * 255).astype(np.uint8)
158
- # return Image.fromarray(visual_mask)
159
-
160
- # How to get ID
161
-
162
- """
163
- >>> model.config.id2label
164
- {0: 'person', 1: 'bicycle', 2: 'car', 3: 'motorcycle', 4: 'airplane', 5: 'bus', 6: 'train', 7: 'truck', 8: 'boat', 9: 'traffic light', 10: 'fire hydrant', 11: 'stop sign', 12: 'parking meter',
165
- 13: 'bench', 14: 'bird', 15: 'cat', 16: 'dog', 17: 'horse', 18: 'sheep', 19: 'cow', 20: 'elephant', 21: 'bear', 22: 'zebra', 23: 'giraffe', 24: 'backpack', 25: 'umbrella', 26: 'handbag',
166
- 27: 'tie', 28: 'suitcase', 29: 'frisbee', 30: 'skis', 31: 'snowboard', 32: 'sports ball', 33: 'kite', 34: 'baseball bat', 35: 'baseball glove', 36: 'skateboard', 37: 'surfboard', 38: 'tennis racket',
167
- 39: 'bottle', 40: 'wine glass', 41: 'cup', 42: 'fork', 43: 'knife', 44: 'spoon', 45: 'bowl', 46: 'banana', 47: 'apple', 48: 'sandwich', 49: 'orange', 50: 'broccoli', 51: 'carrot', 52: 'hot dog', 53: 'pizza',
168
- 54: 'donut', 55: 'cake', 56: 'chair', 57: 'couch', 58: 'potted plant', 59: 'bed', 60: 'dining table', 61: 'toilet', 62: 'tv', 63: 'laptop', 64: 'mouse', 65: 'remote', 66: 'keyboard', 67: 'cell phone',
169
- 68: 'microwave', 69: 'oven', 70: 'toaster', 71: 'sink', 72: 'refrigerator', 73: 'book', 74: 'clock', 75: 'vase', 76: 'scissors', 77: 'teddy bear', 78: 'hair drier', 79: 'toothbrush', 80: 'banner', 81: 'blanket',
170
- 82: 'bridge', 83: 'cardboard', 84: 'counter', 85: 'curtain', 86: 'door-stuff', 87: 'floor-wood', 88: 'flower', 89: 'fruit', 90: 'gravel', 91: 'house', 92: 'light', 93: 'mirror-stuff', 94: 'net', 95: 'pillow',
171
- 96: 'platform', 97: 'playingfield', 98: 'railroad', 99: 'river', 100: 'road', 101: 'roof', 102: 'sand', 103: 'sea', 104: 'shelf', 105: 'snow', 106: 'stairs', 107: 'tent', 108: 'towel', 109: 'wall-brick',
172
- 110: 'wall-stone', 111: 'wall-tile', 112: 'wall-wood', 113: 'water-other', 114: 'window-blind', 115: 'window-other', 116: 'tree-merged', 117: 'fence-merged', 118: 'ceiling-merged', 119: 'sky-other-merged',
173
- 120: 'cabinet-merged', 121: 'table-merged', 122: 'floor-other-merged', 123: 'pavement-merged', 124: 'mountain-merged', 125: 'grass-merged', 126: 'dirt-merged', 127: 'paper-merged', 128: 'food-other-merged',
174
- 129: 'building-other-merged', 130: 'rock-merged', 131: 'wall-other-merged', 132: 'rug-merged'}
175
- >>> model.config.id2label[123]
176
- 'pavement-merged'
177
- >>> results["segments_info"][1]
178
- {'id': 2, 'label_id': 123, 'was_fused': False, 'score': 0.995813}
179
- """
180
- # Above labels don't correspond to anything ... https://github.com/nightrome/cocostuff/blob/master/labels.md
181
- # This one was closest to helping: https://github.com/NielsRogge/Transformers-Tutorials/blob/master/MaskFormer/Inference/Inference_with_MaskFormer_for_semantic_%2B_panoptic_segmentation.ipynb
182
-
183
- """
184
- >>> Image.fromarray((mask * 255).cpu().numpy().astype(np.uint8))
185
- <PIL.Image.Image image mode=L size=2000x1500 at 0x7F07773691C0>
186
- >>> temp = Image.fromarray((mask * 255).cpu().numpy().astype(np.uint8))
187
- """
188
-
189
- """
190
- >>> mask = (results["segmentation"].cpu().numpy == 4)
191
- >>> mask = (results["segmentation"].cpu().numpy() == 4)
192
- >>> mask
193
- array([[False, False, False, ..., False, False, False],
194
- [False, False, False, ..., False, False, False],
195
- [False, False, False, ..., False, False, False],
196
- ...,
197
- [False, False, False, ..., False, False, False],
198
- [False, False, False, ..., False, False, False],
199
- [False, False, False, ..., False, False, False]])
200
- >>> visual_mask = (mask * 255).astype(np.uint8)
201
- >>> visual_mask = Image.fromarray(visual_mask)
202
- >>> plt.imshow(visual_mask)
203
- <matplotlib.image.AxesImage object at 0x7f0761e78040>
204
- >>> plt.show()
205
- """
206
-
207
- """
208
- >>> mask = (results["segmentation"].cpu().numpy() == 1)
209
- >>> visual_mask = (mask*255).astype(np.uint8)
210
- >>> visual_mask = Image.fromarray(visual_mask)
211
- >>> plt.imshow(visual_mask)
212
- <matplotlib.image.AxesImage object at 0x7f0760298550>
213
- >>> plt.show()
214
- >>> results["segments_info"][0]
215
- {'id': 1, 'label_id': 25, 'was_fused': False, 'score': 0.998022}
216
- >>>
217
- """
218
-
219
- """
220
- >>> np.where(mask==True)
221
- (array([300, 300, 300, ..., 392, 392, 392]), array([452, 453, 454, ..., 473, 474, 475]))
222
- >>> max(np.where(mask==True)[0])
223
- 392
224
- >>> min(np.where(mask==True)[0])
225
- 300
226
- >>> max(np.where(mask==True)[1])
227
- 538
228
- >>> min(np.where(mask==True)[1])
229
- 399
230
- """
231
-
232
-
233
  def contour_map(map_to_use, label_id):
234
  """
235
  map_to_use: You have to pass in `results["segmentation"]`
@@ -243,57 +160,4 @@ def contour_map(map_to_use, label_id):
243
  contours, hierarchy = cv.findContours(visual_mask, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)
244
  return contours, hierarchy
245
 
246
- """
247
- >>> mask = (results["segmentation"].cpu().numpy() == 1)
248
- >>> visual_mask = (mask* 255).astype(np.uint8)
249
- >>> import cv2 as cv
250
- >>> contours, hierarchy = cv.findContours(visual_mask, cv.RETR_LIST, cv.CHAIN_APPROX_SIMPLE)
251
- >>> contours.shape
252
- Traceback (most recent call last):
253
- File "<stdin>", line 1, in <module>
254
- AttributeError: 'tuple' object has no attribute 'shape'
255
- >>> contours[0].shape
256
- (7, 1, 2)
257
- >>> shrunk = contours[0][:, 0, :]
258
- >>> shrunk
259
- array([[400, 340],
260
- [399, 341],
261
- [400, 342],
262
- [401, 342],
263
- [402, 341],
264
- [403, 341],
265
- [402, 340]], dtype=int32)
266
- >>> get_coordinates_for_bb_simple(results["segmentation"], 1)
267
- ((300, 399), (392, 538))
268
- >>> shrunk = contours[1][:, 0, :]
269
- >>> max(shrunk[:, 0])
270
- 538
271
- >>> min(shrunk[:, 0])
272
- 409
273
- >>> min(shrunk[:, 1])
274
- 300
275
- >>> max(shrunk[:, 1])
276
- 392
277
- >>>
278
- """
279
-
280
-
281
-
282
- """
283
- import cv2 as cv
284
- contours, hierarchy = cv.findContours(visual_mask, cv.RETR_LIST, cv.CHAIN_APPROX_SIMPLE)
285
- shrunk = contours[0][:, 0, :]
286
 
287
- >>> shrunk[0, :]
288
- array([1907, 887], dtype=int32)
289
- >>> shrunk[:, 0]
290
- array([1907, 1907, 1908, 1908, 1908], dtype=int32)
291
- >>> shrunk[:, 1]
292
- array([887, 888, 889, 890, 888], dtype=int32)
293
- >>> shrunk
294
- array([[1907, 887],
295
- [1907, 888],
296
- [1908, 889],
297
- [1908, 890],
298
- [1908, 888]], dtype=int32)
299
- """
 
147
  plt.imshow(visual_mask)
148
  plt.show()
149
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
150
  def contour_map(map_to_use, label_id):
151
  """
152
  map_to_use: You have to pass in `results["segmentation"]`
 
160
  contours, hierarchy = cv.findContours(visual_mask, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)
161
  return contours, hierarchy
162
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
163