Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -35,6 +35,42 @@ predictor = DefaultPredictor(cfg)
|
|
35 |
my_metadata = MetadataCatalog.get("car_dataset_val")
|
36 |
my_metadata.thing_classes = ["damage"]
|
37 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
38 |
def inference(image):
|
39 |
print(image.height)
|
40 |
|
@@ -43,14 +79,17 @@ def inference(image):
|
|
43 |
# img = np.array(image.resize((500, height)))
|
44 |
img = np.array(image)
|
45 |
outputs = predictor(img)
|
|
|
|
|
|
|
46 |
v = Visualizer(img[:, :, ::-1],
|
47 |
metadata=my_metadata,
|
48 |
scale=0.5,
|
49 |
instance_mode=ColorMode.SEGMENTATION # remove the colors of unsegmented pixels. This option is only available for segmentation models
|
50 |
)
|
51 |
# v = Visualizer(img,scale=1.2)
|
52 |
-
print(outputs["instances"].to('cpu'))
|
53 |
-
out = v.draw_instance_predictions(
|
54 |
|
55 |
return out.get_image()[:, :, ::-1]
|
56 |
|
|
|
35 |
my_metadata = MetadataCatalog.get("car_dataset_val")
|
36 |
my_metadata.thing_classes = ["damage"]
|
37 |
|
38 |
+
def merge_segment(pred_segm):
|
39 |
+
merge_dict = {}
|
40 |
+
for i in range(len(pred_segm)):
|
41 |
+
merge_dict[i] = []
|
42 |
+
for j in range(i+1,len(pred_segm)):
|
43 |
+
if torch.sum(pred_segm[i]*pred_segm[j])>0:
|
44 |
+
merge_dict[i].append(j)
|
45 |
+
|
46 |
+
to_delete = []
|
47 |
+
for key in merge_dict:
|
48 |
+
for element in merge_dict[key]:
|
49 |
+
to_delete.append(element)
|
50 |
+
|
51 |
+
for element in to_delete:
|
52 |
+
merge_dict.pop(element,None)
|
53 |
+
|
54 |
+
empty_delete = []
|
55 |
+
for key in merge_dict:
|
56 |
+
if merge_dict[key] == []:
|
57 |
+
empty_delete.append(key)
|
58 |
+
|
59 |
+
for element in empty_delete:
|
60 |
+
merge_dict.pop(element,None)
|
61 |
+
|
62 |
+
for key in merge_dict:
|
63 |
+
for element in merge_dict[key]:
|
64 |
+
pred_segm[key]+=pred_segm[element]
|
65 |
+
|
66 |
+
except_elem = list(set(to_delete))
|
67 |
+
|
68 |
+
new_indexes = list(range(len(pred_segm)))
|
69 |
+
for elem in except_elem:
|
70 |
+
new_indexes.remove(elem)
|
71 |
+
|
72 |
+
return pred_segm[new_indexes]
|
73 |
+
|
74 |
def inference(image):
|
75 |
print(image.height)
|
76 |
|
|
|
79 |
# img = np.array(image.resize((500, height)))
|
80 |
img = np.array(image)
|
81 |
outputs = predictor(img)
|
82 |
+
out_dict = outputs["instances"].to("cpu").get_fields()
|
83 |
+
new_inst = detectron2.structures.Instances((1024,1024))
|
84 |
+
new_inst.set('pred_masks',merge_segment(out_dict['pred_masks']))
|
85 |
v = Visualizer(img[:, :, ::-1],
|
86 |
metadata=my_metadata,
|
87 |
scale=0.5,
|
88 |
instance_mode=ColorMode.SEGMENTATION # remove the colors of unsegmented pixels. This option is only available for segmentation models
|
89 |
)
|
90 |
# v = Visualizer(img,scale=1.2)
|
91 |
+
#print(outputs["instances"].to('cpu'))
|
92 |
+
out = v.draw_instance_predictions(new_inst)
|
93 |
|
94 |
return out.get_image()[:, :, ::-1]
|
95 |
|