File size: 12,780 Bytes
56bd2b5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b11fef5
56bd2b5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
31686b0
56bd2b5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
import numpy as np
import gradio as gr
from io import BytesIO
import matplotlib.pyplot as plt

import logging
import os
import sys
import numpy as np
import torch

from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import get_cfg
from detectron2.data import transforms as T
from cubercnn.data.generate_depth_maps import depth_of_images, setup_depth_model
from cubercnn.data.generate_ground_segmentations import find_ground, init_segmentation, load_image2
from cubercnn.modeling.backbone import build_dla_from_vision_fpn_backbone
from groundingdino.util.inference import load_model

sys.path.append(os.getcwd())
np.set_printoptions(suppress=True)

from cubercnn.config import get_cfg_defaults
from cubercnn.modeling.meta_arch import build_model
from cubercnn import util, vis

model_loaded = None
model, depth_model, ground_model, segmentor = None, None, None, None

def generate_imshow_plot(depth_map):
    # Generate a dummy depth map for demonstration
    # Create a Matplotlib figure and axis
    fig, ax = plt.subplots(dpi=200, tight_layout=True)
    
    # Display the depth map using imshow
    cax = ax.imshow(depth_map, cmap='viridis')
    
    # Add a colorbar to the plot
    fig.colorbar(cax, shrink=0.8)
    return fig

def do_test(im, threshold, model_str):
    if im is None:
        return None, None
    # have to do this small workaround to only load the models once
    global model_loaded
    global model, depth_model, ground_model, segmentor
    print(model_loaded)
    if model_loaded != model_str:
        model, depth_model, ground_model, segmentor = load_model_config(model_str)
        model_loaded = model_str
    
    model.eval()
    
    thres = threshold

    min_size = 512
    max_size = 4096
    augmentations = T.AugmentationList([T.ResizeShortestEdge(min_size, max_size, "choice")])

    category_path = 'configs/category_meta.json'
        
    # store locally if needed
    if category_path.startswith(util.CubeRCNNHandler.PREFIX):
        category_path = util.CubeRCNNHandler._get_local_path(util.CubeRCNNHandler, category_path)

    metadata = util.load_json(category_path)
    cats = metadata['thing_classes']
    
    image_shape = im.shape[:2]  # h, w

    h, w = image_shape
    
    focal_length_ndc = 4.0
    focal_length = focal_length_ndc * h / 2

    px, py = w/2, h/2

    K = np.array([
        [focal_length, 0.0, px], 
        [0.0, focal_length, py], 
        [0.0, 0.0, 1.0]
    ])

    # dummy
    aug_input = T.AugInput(im)
    tfms = augmentations(aug_input)
    image = aug_input.image
    # model.to(device)
    if model_str == "Proposal method":
        image_source, image_tensor =  load_image2(im, device='cpu')
        ground_map = find_ground(image_source, image_tensor, ground_model, segmentor, device='cpu')
        if ground_map is not None: is_ground = True 
        else: is_ground = False
        depth_map = depth_of_images(im, depth_model)


        if is_ground:
            ground_map = tfms.apply_image(ground_map*1.0)
            ground_map = torch.as_tensor(ground_map)
        else:
            ground_map = None
        depth_map = tfms.apply_image(depth_map)

        # first you must run the scripts to get the ground and depth map for the images
        batched = [{
            'image': torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1))), 
            'depth_map': torch.as_tensor(depth_map),
            'ground_map': ground_map,
            'height': image_shape[0], 'width': image_shape[1], 'K': K
        }]
    else:
        batched = [{
            'image': torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1))), 
            'height': image_shape[0], 'width': image_shape[1], 'K': K
        }]
    with torch.no_grad():
        dets = model(batched)[0]['instances']

    n_det = len(dets)

    meshes = []
    meshes_text = []

    if n_det > 0:
        for idx, (corners3D, center_cam, center_2D, dimensions, pose, score, cat_idx) in enumerate(zip(
                dets.pred_bbox3D, dets.pred_center_cam, dets.pred_center_2D, dets.pred_dimensions, 
                dets.pred_pose, dets.scores, dets.pred_classes
            )):

            # skip
            if score < thres:
                continue
            
            cat = cats[cat_idx]

            bbox3D = center_cam.tolist() + dimensions.tolist()
            meshes_text.append('{} {:.2f}'.format(cat, score))
            color = [c/255.0 for c in util.get_color(idx)]
            box_mesh = util.mesh_cuboid(bbox3D, pose.tolist(), color=color)
            meshes.append(box_mesh)
    
    # print('File with {} dets'.format(len(meshes)))

    if len(meshes) > 0:
        im_drawn_rgb, im_topdown, _ = vis.draw_scene_view(im, K, meshes, text=meshes_text, scale=im.shape[0], blend_weight=0.5, blend_weight_overlay=0.85)
        im_drawn_rgb, im_topdown = im_drawn_rgb.astype(np.uint8), im_topdown.astype(np.uint8)
    else:
        im_drawn_rgb, im_topdown = im.astype(np.uint8), None
    if model_str == "Proposal method":
        if ground_map is None:
            ground_map = torch.zeros(image_shape[0], image_shape[1])
        
        return im_drawn_rgb, im_topdown, generate_imshow_plot(depth_map), ground_map.numpy()
    return im_drawn_rgb, im_topdown

def setup(config_file):
    """
    Create configs and perform basic setups.
    """
    cfg = get_cfg()
    get_cfg_defaults(cfg)

    # store locally if needed
    if config_file.startswith(util.CubeRCNNHandler.PREFIX):    
        config_file = util.CubeRCNNHandler._get_local_path(util.CubeRCNNHandler, config_file)

    cfg.merge_from_file(config_file)
    cfg.freeze()
    return cfg

def main(config_file, weigths=None):
    cfg = setup(config_file)
    model = build_model(cfg)
    
    DetectionCheckpointer(model).resume_or_load(
        weigths, resume=True
    )
    return cfg, model


if __name__ == "__main__":
    def load_model_config(model_str):
        if model_str == "Proposal method":
            config_file =  "configs/BoxNet.yaml"
            MODEL_WEIGHTS = "output/Baseline_sgd/model_final.pth"
            cfg, model = main(config_file, MODEL_WEIGHTS) 
            device = 'cuda' if torch.cuda.is_available() else 'cpu'

            depth_model = 'zoedepth'
            pretrained_resource = 'local::depth/checkpoints/depth_anything_metric_depth_indoor.pt'
            depth_model = setup_depth_model(depth_model, pretrained_resource)
            ground_model = load_model("GroundingDINO/groundingdino/config/GroundingDINO_SwinT_OGC.py", "GroundingDINO/weights/groundingdino_swint_ogc.pth", device=device)
            segmentor = init_segmentation(device=device)
            return model, depth_model, ground_model, segmentor
        elif model_str == "Pseudo GT method":
            config_file =  "configs/Base_Omni3D.yaml"
            MODEL_WEIGHTS = "output/omni_pseudo_gt/model_final.pth"
            cfg, model = main(config_file, MODEL_WEIGHTS) 
            return model, None, None, None
        elif model_str == "Weak Cube R-CNN":
            config_file =  "configs/Omni_combined.yaml"
            MODEL_WEIGHTS = "output/exp_10_iou_zpseudogt_dims_depthrange_rotalign_ground/model_recent.pth"
            cfg, model = main(config_file, MODEL_WEIGHTS) 
            return model, None, None, None
        elif model_str == "Time-equalized Cube R-CNN":
            config_file =  "configs/Base_Omni3D.yaml"
            MODEL_WEIGHTS = "output/omni_equalised/model_final.pth"
            cfg, model = main(config_file, MODEL_WEIGHTS) 
            return model, None, None, None
        elif model_str == "Cube R-CNN":
            config_file =  "configs/Base_Omni3D.yaml"
            MODEL_WEIGHTS = "output/Baseline_sgd/model_final.pth"
            cfg, model = main(config_file, MODEL_WEIGHTS) 
            return model, None, None, None
        else:
            raise ValueError("Model not found")


    title = None
    description = "This showcases the different models, developed for our thesis \"weakly supervised 3D object detection\". You can choose between the different methods by selecting the tabs above. \n Upload an image, or use one of the example images below. \n We have created three methods using 2D box annotations: A **proposal-and-scoring method**, a **pseudo-ground-truth method**, and a **weak Cube R-CNN**. The proposal method generates 1000 cubes per object and scores them. The prediction of this method is used as a pseudo ground truth in the [[`Cube R-CNN framework`](https://garrickbrazil.com/omni3d)]. To create a weak Cube RCNN, we modify the framework by replacing its 3D loss functions with ones based solely on 2D annotations. Our methods rely heavily on external, strong generalised deep learning models to infer spatial information in scenes. Experimental results show that all models perform comparably to an annotation time-equalised Cube R-CNN, whereof the pseudo ground truth method achieves the highest accuracy. The results show the methods' ability to understand scenes in 3D, providing satisfactory visual results. Although not precise enough for centimetre accurate measurements, the methods provide a solid foundation for further research. \n Check out the code on [GitHub](https://github.com/luchsonice/3dod)"

    proposal = gr.Interface(
        fn=do_test, 
        inputs=[
            gr.Image(label="Input Image"), 
            gr.Slider(0, 1, value=0.25, label="Threshold", info="Only show predictions with a score above this threshold"),
            gr.Textbox(value="Proposal method", visible=False, render=False)
            ],
        outputs=[gr.Image(label="Predictions"), gr.Image(label="Top view"), gr.Plot(label="Depth map"), gr.Image(label="Ground map")],
            title=title,
            description=description + ". Note that the proposal method is very dependent on the finding the ground. You can see in the bottom image how the ground is detected.", 
            allow_flagging='never',
            examples=[["datasets/examples/ex2.jpg"],[],[],["datasets/examples/ex1.jpg"]],)

    pseudo_gt = gr.Interface(
        fn=do_test, 
        inputs=[
            gr.Image(label="Input Image"), 
            gr.Slider(0, 1, value=0.25, label="Threshold", info="Only show predictions with a confidence above this threshold"),
            gr.Textbox(value="Pseudo GT method", visible=False, render=False)
            ],
        outputs=[gr.Image(label="Predictions"), gr.Image(label="Top view")],
            title=title,
            description=description, 
            allow_flagging='never',
            examples=[["datasets/examples/ex2.jpg"],[],[],["datasets/examples/ex1.jpg"]],)
    
    
    weak_cube = gr.Interface(
        fn=do_test, 
        inputs=[
            gr.Image(label="Input Image"), 
            gr.Slider(0, 1, value=0.25, label="Threshold", info="Only show predictions with a confidence above this threshold"),
            gr.Textbox(value="Weak Cube R-CNN", visible=False, render=False)
            ],
        outputs=[gr.Image(label="Predictions"), gr.Image(label="Top view")],
            title=title,
            description=description, 
            allow_flagging='never',
            examples=[["datasets/examples/ex2.jpg"],[],[],["datasets/examples/ex1.jpg"]],)
    
    time_cube = gr.Interface(
        fn=do_test, 
        inputs=[
            gr.Image(label="Input Image"), 
            gr.Slider(0, 1, value=0.25, label="Threshold", info="Only show predictions with a confidence above this threshold"),
            gr.Textbox(value="Time-equalized Cube R-CNN", visible=False, render=False)
            ],
        outputs=[gr.Image(label="Predictions"), gr.Image(label="Top view")],
            title=title,
            description=description, 
            allow_flagging='never',
            examples=[["datasets/examples/ex2.jpg"],[],[],["datasets/examples/ex1.jpg"]],)
    cube_rcnn = gr.Interface(
        fn=do_test, 
        inputs=[
            gr.Image(label="Input Image"), 
            gr.Slider(0, 1, value=0.25, label="Threshold", info="Only show predictions with a confidence above this threshold"),
            gr.Textbox(value="Cube R-CNN", visible=False, render=False)
            ],
        outputs=[gr.Image(label="Predictions"), gr.Image(label="Top view")],
            title=title,
            description=description, 
            allow_flagging='never',
            examples=[["datasets/examples/ex2.jpg"],[],[],["datasets/examples/ex1.jpg"]],)


    demo = gr.TabbedInterface([pseudo_gt, proposal, weak_cube, time_cube, cube_rcnn], ["Pseudo GT method", "Proposal method", "Weak Cube R-CNN", "Time-equalized Cube R-CNN", "Cube R-CNN"], title="Weakly supervised 3D Cube Prediction")
    
    demo.launch(server_name="0.0.0.0", server_port=7860)