Add Batch c0513c76-8060-45e2-9889-beb79507d6ee
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- 12in1multitaskvisionandlanguagerepresentationlearning/22c26506-3388-45ee-af0f-f53251df929c_content_list.json +3 -0
- 12in1multitaskvisionandlanguagerepresentationlearning/22c26506-3388-45ee-af0f-f53251df929c_model.json +3 -0
- 12in1multitaskvisionandlanguagerepresentationlearning/22c26506-3388-45ee-af0f-f53251df929c_origin.pdf +3 -0
- 12in1multitaskvisionandlanguagerepresentationlearning/full.md +321 -0
- 12in1multitaskvisionandlanguagerepresentationlearning/images.zip +3 -0
- 12in1multitaskvisionandlanguagerepresentationlearning/layout.json +3 -0
- 15keypointsisallyouneed/e5ac5156-93c5-41e3-8de5-74f59d1fd56d_content_list.json +3 -0
- 15keypointsisallyouneed/e5ac5156-93c5-41e3-8de5-74f59d1fd56d_model.json +3 -0
- 15keypointsisallyouneed/e5ac5156-93c5-41e3-8de5-74f59d1fd56d_origin.pdf +3 -0
- 15keypointsisallyouneed/full.md +325 -0
- 15keypointsisallyouneed/images.zip +3 -0
- 15keypointsisallyouneed/layout.json +3 -0
- 3dhumanmeshregressionwithdensecorrespondence/5ce609af-653e-4f2d-bb43-2bcd175021d4_content_list.json +3 -0
- 3dhumanmeshregressionwithdensecorrespondence/5ce609af-653e-4f2d-bb43-2bcd175021d4_model.json +3 -0
- 3dhumanmeshregressionwithdensecorrespondence/5ce609af-653e-4f2d-bb43-2bcd175021d4_origin.pdf +3 -0
- 3dhumanmeshregressionwithdensecorrespondence/full.md +373 -0
- 3dhumanmeshregressionwithdensecorrespondence/images.zip +3 -0
- 3dhumanmeshregressionwithdensecorrespondence/layout.json +3 -0
- 3dmpamultiproposalaggregationfor3dsemanticinstancesegmentation/f5f1d7aa-96ff-441f-b2d2-c3511cdca894_content_list.json +3 -0
- 3dmpamultiproposalaggregationfor3dsemanticinstancesegmentation/f5f1d7aa-96ff-441f-b2d2-c3511cdca894_model.json +3 -0
- 3dmpamultiproposalaggregationfor3dsemanticinstancesegmentation/f5f1d7aa-96ff-441f-b2d2-c3511cdca894_origin.pdf +3 -0
- 3dmpamultiproposalaggregationfor3dsemanticinstancesegmentation/full.md +301 -0
- 3dmpamultiproposalaggregationfor3dsemanticinstancesegmentation/images.zip +3 -0
- 3dmpamultiproposalaggregationfor3dsemanticinstancesegmentation/layout.json +3 -0
- 3dpackingforselfsupervisedmonoculardepthestimation/98cb13b1-a587-4d6d-b10f-2fb39663ea60_content_list.json +3 -0
- 3dpackingforselfsupervisedmonoculardepthestimation/98cb13b1-a587-4d6d-b10f-2fb39663ea60_model.json +3 -0
- 3dpackingforselfsupervisedmonoculardepthestimation/98cb13b1-a587-4d6d-b10f-2fb39663ea60_origin.pdf +3 -0
- 3dpackingforselfsupervisedmonoculardepthestimation/full.md +280 -0
- 3dpackingforselfsupervisedmonoculardepthestimation/images.zip +3 -0
- 3dpackingforselfsupervisedmonoculardepthestimation/layout.json +3 -0
- 3dpartguidedimageeditingforfinegrainedobjectunderstanding/48654681-1e22-4cee-a2f1-9f712cc0b228_content_list.json +3 -0
- 3dpartguidedimageeditingforfinegrainedobjectunderstanding/48654681-1e22-4cee-a2f1-9f712cc0b228_model.json +3 -0
- 3dpartguidedimageeditingforfinegrainedobjectunderstanding/48654681-1e22-4cee-a2f1-9f712cc0b228_origin.pdf +3 -0
- 3dpartguidedimageeditingforfinegrainedobjectunderstanding/full.md +322 -0
- 3dpartguidedimageeditingforfinegrainedobjectunderstanding/images.zip +3 -0
- 3dpartguidedimageeditingforfinegrainedobjectunderstanding/layout.json +3 -0
- 3dphotographyusingcontextawarelayereddepthinpainting/d1b0b4a6-ad26-4d18-aac0-e2f991cc179b_content_list.json +3 -0
- 3dphotographyusingcontextawarelayereddepthinpainting/d1b0b4a6-ad26-4d18-aac0-e2f991cc179b_model.json +3 -0
- 3dphotographyusingcontextawarelayereddepthinpainting/d1b0b4a6-ad26-4d18-aac0-e2f991cc179b_origin.pdf +3 -0
- 3dphotographyusingcontextawarelayereddepthinpainting/full.md +491 -0
- 3dphotographyusingcontextawarelayereddepthinpainting/images.zip +3 -0
- 3dphotographyusingcontextawarelayereddepthinpainting/layout.json +3 -0
- 3dregnetadeepneuralnetworkfor3dpointregistration/a5f4920e-2130-4def-a31a-c357b9131bbf_content_list.json +3 -0
- 3dregnetadeepneuralnetworkfor3dpointregistration/a5f4920e-2130-4def-a31a-c357b9131bbf_model.json +3 -0
- 3dregnetadeepneuralnetworkfor3dpointregistration/a5f4920e-2130-4def-a31a-c357b9131bbf_origin.pdf +3 -0
- 3dregnetadeepneuralnetworkfor3dpointregistration/full.md +394 -0
- 3dregnetadeepneuralnetworkfor3dpointregistration/images.zip +3 -0
- 3dregnetadeepneuralnetworkfor3dpointregistration/layout.json +3 -0
- 3dsketchawaresemanticscenecompletionviasemisupervisedstructureprior/21e2be0e-0fb4-4da1-b0aa-370b4456f88a_content_list.json +3 -0
- 3dsketchawaresemanticscenecompletionviasemisupervisedstructureprior/21e2be0e-0fb4-4da1-b0aa-370b4456f88a_model.json +3 -0
12in1multitaskvisionandlanguagerepresentationlearning/22c26506-3388-45ee-af0f-f53251df929c_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ddc4047c95a5971d1a26f6899171b865c501c39ab28e4211ae2e72a5efa1214e
|
| 3 |
+
size 88167
|
12in1multitaskvisionandlanguagerepresentationlearning/22c26506-3388-45ee-af0f-f53251df929c_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:8936d1c3a4926361caa02afc0220be359565e873eca36d9dcef787960318c5e4
|
| 3 |
+
size 106714
|
12in1multitaskvisionandlanguagerepresentationlearning/22c26506-3388-45ee-af0f-f53251df929c_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e7830c5610c6d3dea2d99f9266d349cb1786deacb6a9a936db62b6e2461c1082
|
| 3 |
+
size 393469
|
12in1multitaskvisionandlanguagerepresentationlearning/full.md
ADDED
|
@@ -0,0 +1,321 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# 12-in-1: Multi-Task Vision and Language Representation Learning
|
| 2 |
+
|
| 3 |
+
Jiasen Lu $^{3*}$ Vedanuj Goswami $^{1*}$ Marcus Rohrbach $^{1}$ Devi Parikh $^{1,3}$ Stefan Lee $^{2}$ $^{1}$ Facebook AI Research $^{2}$ Oregon State University $^{3}$ Georgia Institute of Technology
|
| 4 |
+
|
| 5 |
+
{vedanuj, mrf}@fb.com leestef@oregonstate.edu {jiasenlu, parikh}@gatech.edu
|
| 6 |
+
|
| 7 |
+
# Abstract
|
| 8 |
+
|
| 9 |
+
Much of vision-and-language research focuses on a small but diverse set of independent tasks and supporting datasets often studied in isolation; however, the visually-grounded language understanding skills required for success at these tasks overlap significantly. In this work, we investigate these relationships between vision-and-language tasks by developing a large-scale, multi-task training regime. Our approach culminates in a single model on 12 datasets from four broad categories of task including visual question answering, caption-based image retrieval, grounding referring expressions, and multi-modal verification. Compared to independently trained single-task models, this represents a reduction from approximately 3 billion parameters to 270 million while simultaneously improving performance by 2.05 points on average across tasks. We use our multi-task framework to perform in-depth analysis of the effect of joint training diverse tasks. Further, we show that finetuning task-specific models from our single multi-task model can lead to further improvements, achieving performance at or above the state-of-the-art.
|
| 10 |
+
|
| 11 |
+
# 1. Introduction
|
| 12 |
+
|
| 13 |
+
A compelling reason to study language and vision jointly is the promise of language as a universal and natural interface for visual reasoning problems – useful both in specifying a wide range of problems and in communicating AI responses. However, the current research landscape for visually-grounded language understanding is a patchwork of many specialized tasks like question answering or caption generation, each supported by a handful of datasets. As such, progress in this field has been measured by the independent improvement of bespoke models designed and trained for each of these specific tasks and datasets.
|
| 14 |
+
|
| 15 |
+
The recent rise of general architectures for vision-and-language [1, 23, 24, 27, 43, 45, 54] reduces the architectural differences across tasks. These models pretrain common architectures on self-supervised tasks to learn general visio-linguistic representations then fine-tune for specific
|
| 16 |
+
|
| 17 |
+

|
| 18 |
+
Figure 1: We introduce an approach for effective multi-task learning, training a single model on 12 popular vision-and-language datasets. This single model performs at par or even better than independent task-specific state-of-the-art approaches for many tasks.
|
| 19 |
+
|
| 20 |
+
<table><tr><td>Visual Question Answering
|
| 21 |
+
What color is the child's outfit? Orange</td></tr><tr><td>Referring Expressions
|
| 22 |
+
child sheep basket people sitting on chair</td></tr><tr><td>Multi-modal Verification
|
| 23 |
+
The child is petting a dog. false</td></tr><tr><td>Caption-based Image Retrieval
|
| 24 |
+
A child in orange clothes plays with sheep.</td></tr></table>
|
| 25 |
+
|
| 26 |
+
datasets; however, the result is still a menagerie of independent task-specific models rather than a single unified model. This is dissatisfying in practice – the model that understands questions cannot ground noun phrases, the grounding model cannot retrieve images based on a description, and so forth. Further, this approach does not scale well as each new task requires storing a new model.
|
| 27 |
+
|
| 28 |
+
Beyond being intellectually dissatisfying, this task-based fracturing leaves quite a lot on the table. While individual tasks present different challenges and diverse interfaces, the underlying associations between language and visual concepts are often common across tasks. For example, learning to ground the referring expression "small red vase" requires understanding the same concepts as answering the question "What color is the small vase?" Training multiple tasks jointly can potentially pool these different sources of grounding supervision. Further, developing models that can perform well on a wide range of tasks simultaneously can help guard against the research community overfitting to specific datasets and metrics.
|
| 29 |
+
|
| 30 |
+
In this work, we develop a multi-task model for discriminative vision-and-language tasks based on the recently proposed ViLBERT [27] model. We consider four categories of tasks – training jointly on a total of 12 different datasets. Our results not only show that a single model can perform all these tasks, but also that joint training can improve the performance compared to single-task training with the same architecture. Before undertaking this effort, it was not obvious to us that this would be the case – multitask training
|
| 31 |
+
|
| 32 |
+
is notoriously challenging and vision-and-language datasets vary greatly in size, interface, and difficulty. Our model attains improvements of 0.25 to 4.19 absolute points from multi-task training – improving over corresponding single-task models for 11 out of 12 tasks. Further, we demonstrate that multi-task training is an effective pretraining step for single-task models – leading to further gains and setting a new state-of-the-art for 7 out of 12 tasks.
|
| 33 |
+
|
| 34 |
+
Large-scale multi-task learning is challenging as datasets can vary in size and difficulty. To address these issues, we introduce a dynamic stop-and-go training scheduler, task-dependent input tokens, and simple hyper-parameter heuristics. Using our proposed pipeline, we were able to train many multi-task models with varying datasets - assessing the relationships between different vision-and-language tasks in terms of their performance when trained together.
|
| 35 |
+
|
| 36 |
+
To summarize, we make the following contributions:
|
| 37 |
+
|
| 38 |
+
- We systematically analyze the joint training relationships between different of vision-and-language datasets and tasks and present a Clean V&L Multi-Task setup, which ensures no train-test leaks across task.
|
| 39 |
+
- We develop a single multi-task model trained on 12 popular V&L datasets. Compared to a set of independent models, this represents a reduction from $\sim 3$ billion parameters to $\sim 270$ million while simultaneously improving average performance by 2.05 points.
|
| 40 |
+
- We demonstrate that multi-task training is useful even in cases where single-task performance is paramount. On average, fine-tuning from our multi-task model for single tasks resulted in an average improvement of 2.98 points over baseline single-task trained models.
|
| 41 |
+
|
| 42 |
+
# 2. Vision-and-Language Tasks
|
| 43 |
+
|
| 44 |
+
# 2.1. Task-Groups and Datasets
|
| 45 |
+
|
| 46 |
+
We consider 12 popular vision and language datasets. These datasets cover a wide range of tasks and require diverse grounding granularity and reasoning skills. We group related datasets into four groups to facilitate our analysis:
|
| 47 |
+
|
| 48 |
+
Vocab-based VQA. Given an image and a natural-language question, select an answer from a fixed vocabulary. We consider three popular datasets for this group - VQAv2 [15], GQA [17], and Visual Genome (VG) QA [21].
|
| 49 |
+
|
| 50 |
+
Image Retrieval. Given a caption and a pool of images, retrieve the target image that is best-described by the caption. We consider COCO [7] and Flickr30K [35] captioning datasets for this task-group.
|
| 51 |
+
|
| 52 |
+
Referring Expressions. Given a natural language expression and an image, identify the target region that is referred to by expression. The expression can vary greatly across datasets from simple noun phrases to multi-round dialogs.
|
| 53 |
+
|
| 54 |
+
<table><tr><td></td><td colspan="12">% Row-Task Test Images in Column-Task Train/Val Set</td></tr><tr><td></td><td>[A]</td><td>[B]</td><td>[C]</td><td>[D]</td><td>[E]</td><td>[F]</td><td>[G]</td><td>[H]</td><td>[I]</td><td>[J]</td><td>[K]</td><td>[L]</td></tr><tr><td>[A] VQA2.0 [15]</td><td>0%</td><td>0%</td><td>0%</td><td>0%</td><td>0%</td><td>0%</td><td>0%</td><td>0%</td><td>0%</td><td>0%</td><td>0%</td><td>0%</td></tr><tr><td>[B] VG QA [21]</td><td>0%</td><td>0%</td><td>0%</td><td>0%</td><td>0%</td><td>0%</td><td>0%</td><td>0%</td><td>0%</td><td>0%</td><td>0%</td><td>0%</td></tr><tr><td>[C] GQA [17]</td><td>0%</td><td>0%</td><td>0%</td><td>0%</td><td>0%</td><td>0%</td><td>0%</td><td>0%</td><td>0%</td><td>0%</td><td>0%</td><td>0%</td></tr><tr><td>[D] COCO [7]</td><td>100%</td><td>43%</td><td>33%</td><td>0%</td><td>0%</td><td>0%</td><td>0%</td><td>0%</td><td>7%</td><td>46%</td><td>0%</td><td>0%</td></tr><tr><td>[E] Flickr30k [35]</td><td>0%</td><td>0%</td><td>0%</td><td>0%</td><td>0%</td><td>0%</td><td>0%</td><td>0%</td><td>0%</td><td>0%</td><td>98%</td><td>0%</td></tr><tr><td>[F] RefCOCO [19]</td><td>100%</td><td>36%</td><td>27%</td><td>100%</td><td>0%</td><td>0%</td><td>0%</td><td>66%</td><td>8%</td><td>62%</td><td>0%</td><td>0%</td></tr><tr><td>[G] RefCOCO+ [19]</td><td>100%</td><td>38%</td><td>27%</td><td>100%</td><td>0%</td><td>0%</td><td>0%</td><td>66%</td><td>8%</td><td>62%</td><td>0%</td><td>0%</td></tr><tr><td>[H] RefCOCOg [30]</td><td>100%</td><td>41%</td><td>31%</td><td>100%</td><td>0%</td><td>53%</td><td>53%</td><td>0%</td><td>8%</td><td>63%</td><td>0%</td><td>0%</td></tr><tr><td>[I] Visual 7W [55]</td><td>50%</td><td>100%</td><td>79%</td><td>48%</td><td>0%</td><td>8%</td><td>8%</td><td>10%</td><td>0%</td><td>24%</td><td>0%</td><td>0%</td></tr><tr><td>[J] GuessWhat [13]</td><td>100%</td><td>40%</td><td>31%</td><td>96%</td><td>0%</td><td>20%</td><td>20%</td><td>26%</td><td>7%</td><td>0%</td><td>0%</td><td>0%</td></tr><tr><td>[K] SNLI-VE [49]</td><td>0%</td><td>0%</td><td>0%</td><td>0%</td><td>94%</td><td>0%</td><td>0%</td><td>0%</td><td>0%</td><td>0%</td><td>0%</td><td>0%</td></tr><tr><td>[L] NLVRg2 [44]</td><td>0%</td><td>0%</td><td>0%</td><td>0%</td><td>0%</td><td>0%</td><td>0%</td><td>0%</td><td>0%</td><td>0%</td><td>0%</td><td>0%</td></tr></table>
|
| 55 |
+
|
| 56 |
+
Table 1: Percentage of row-task test images that are present in column-tasks train/val images.
|
| 57 |
+
|
| 58 |
+
We consider phrase grounding in RefCOCO(+/g) [19, 30], Pointing questions in Visual7W [55], and dialog sequences in the GuessWhat [13]. We note that these language inputs vary significantly in terms of detail and structure.
|
| 59 |
+
|
| 60 |
+
Multi-modal Verification. Given one or more images and a natural language statement, judge the correctness or predict their semantic relationship. We consider $\mathrm{NLVR}^2$ [44] and SNLI-VE [49]. In $\mathrm{NLVR}^2$ , two images are given and the statement must be true for both to be true. In SNLI-VE, image-statement pairs are classified as representing an entailment, contradiction, or neutral. That is, whether the content of the image confirms, refutes, or is insufficient to comment on the truth of the corresponding statement.
|
| 61 |
+
|
| 62 |
+
# 2.2. A Clean V&L Multi-Task Setup
|
| 63 |
+
|
| 64 |
+
Many V&L tasks are built on top of each other and share significant overlap in terms of individual images. However, as each task is often examined in isolation, there does not exist an in-depth analysis of this overlap across different V&L tasks. Table 1 shows the percentage of test images for the target tasks which are present in other tasks' train/val sets. As we can see, there exists significant overlap across tasks. Even though different tasks require different inputs and outputs, other task annotations will provide clues about the visual grounding – for example, a referring expression for a "blue striped ball" at training could unfairly improve a VQA model's ability to answer "What color is the striped ball?" for the same image at test time. To avoid information leakage from the annotations of other tasks, we propose a cleaned multi-task split for V&L tasks where test images are removed from train/val for all the tasks. We stress that the test sets are not modified in any way, so our results are comparable to prior work. Cleaning results in about an $11\%$ reduction in training data on average across datasets. Full details of this process and statistics regarding cleaned dataset size are available in the supplement.
|
| 65 |
+
|
| 66 |
+
# 3. Approach
|
| 67 |
+
|
| 68 |
+
# 3.1. Base Architecture
|
| 69 |
+
|
| 70 |
+
There has been a flurry of recent work developing general vision-and-language model architectures that are amenable to large-scale self-supervised pretraining. [1, 23,
|
| 71 |
+
|
| 72 |
+
24, 27, 43, 45, 54]. By pretraining general representations and then finetuning on single downstream tasks, these models set state-of-the-art in many tasks. For the base architecture in our experiments, we take the ViLBERT model proposed by Lu et al. [27]. We describe it here briefly.
|
| 73 |
+
|
| 74 |
+
At the interface level, ViLBERT takes as input an image $I$ and text segment $Q$ represented as the sequence $\{\mathrm{IMG}, v_1, \ldots, v_T, \mathrm{CLS}, w_1, \ldots, w_T, \mathrm{SEP}\}$ where $\{v_i\}_{i=1}^T$ are image region features [2], $\{w_j\}_{j=1}^T$ are word tokens, and the IMG, CLS, and SEP tokens are special markers. The model then outputs embeddings for each input $\{h_{v_i}\}_{i=1}^T$ , $\{h_{w_j}\}_{j=1}^T$ , $h_{\mathrm{IMG}}$ , $h_{\mathrm{CLS}}$ , and $h_{\mathrm{SEP}}$ . As in [27], we take $h_{\mathrm{IMG}}$ and $h_{\mathrm{CLS}}$ as holistic image and text representations.
|
| 75 |
+
|
| 76 |
+
Internally, ViLBERT consists of two parallel BERT-style [14] models operating over image regions and text segments. Each stream is a series of transformer blocks (TRM) [48] connected by co-attentional transformer layers (CoTRM) which enable information exchange between modalities. We use the default parameter setting, which has 6/12 layers of TRM for visual / linguistic streams respectively.
|
| 77 |
+
|
| 78 |
+
Like many of the models of this class, ViLBERT is pretrained on the Conceptual Caption dataset [39] with two 'proxy' tasks: masked multi-modal modelling and multimodal alignment prediction. The first randomly masks approximately $15\%$ of both words and image tokens and reconstructs them given the remaining inputs. The later tasks the model with predicting whether an image and caption correspond or not. After pretraining, the model can be finetuned for strong performance for various downstream tasks.
|
| 79 |
+
|
| 80 |
+
We make two important modifications to this pretraining process. First, when masking visual regions we also mask other regions with significant overlap ( $>0.4$ IoU) to avoid leaking visual information. This forces the model to rely more heavily on language to predict image content. Second, we do not enforce the masked multi-modal modelling loss when sampling a negative (unmatching) caption for multimodal alignment prediction. This will effectively remove the noise introduced by negative samples. While orthogonal to our primary contribution of multi-task learning, we found these modifications to make the baseline model more effective. For further discussion, see the supplemental material. All models we present are first pretrained in this manner.
|
| 81 |
+
|
| 82 |
+
# 3.2. Multi-Task Learning
|
| 83 |
+
|
| 84 |
+
We consider a simple multi-task model where each task has a task-specific 'head' network that branches off a common, shared 'trunk' ViLBERT model. As such, we learn shared trunk parameters $\theta_{s}$ and a set of task-specific layers $\{\theta_t\}_{t=1}^T$ for $\mathcal{T}$ tasks. Our goal is to learn parameters $\theta_{s} \cup \{\theta_{t}\}_{t=1}^{T}$ that minimize loss across all tasks. Details on heads and other modifications follow.
|
| 85 |
+
|
| 86 |
+
Task Token. While relying on the same groundings, different tasks may still require the model to process inputs differently - e.g. referring expressions just require grounding while VQA must follow grounding with additional reasoning. To enable this, we augment the query with a task token $\text{TASK}_t$ such that the new input format is $\{\text{IMG}, v_1, \ldots, v_n, \text{CLS}, \text{TASK}_t, w_1, \ldots, w_m, \text{SEP}\}$ . The architecture can then leverage this task information in a bottom-up manner. In what follows, we describe the task-specific heads by task groups.
|
| 87 |
+
|
| 88 |
+
Vocab-Based VQA Output: We compute an overall image-query representation as an element-wise product between the holistic $h_{\mathrm{IMG}}$ and $h_{\mathrm{CLS}}$ representations. As in [2, 17], we treat vocab-based VQA as a multi-label classification task - assigning a soft target score to each answer based on its relevancy to the ground truth answer. We compute scores for a set of the pre-defined answers $A$ by using a two-layer MLP on top of the overall representation:
|
| 89 |
+
|
| 90 |
+
$$
|
| 91 |
+
P _ {v} (A \mid I, Q) = \sigma \left(\mathrm {M L P} \left(h _ {\mathrm {I M G}} \odot h _ {\mathrm {C L S}}\right)\right) \tag {1}
|
| 92 |
+
$$
|
| 93 |
+
|
| 94 |
+
where $\sigma$ is the sigmoid function. Due to the answer vocabulary differences, VQA and VG QA share the MLP and answer vocabulary while GQA learns a separate one.
|
| 95 |
+
|
| 96 |
+
Image Retrieval Output: Using the same overall representation, we compute an alignment score between image-caption pairs as:
|
| 97 |
+
|
| 98 |
+
$$
|
| 99 |
+
\operatorname {R e l} (I, Q) = W _ {i} \left(h _ {\text {I M G}} \odot h _ {\text {C L S}}\right) \tag {2}
|
| 100 |
+
$$
|
| 101 |
+
|
| 102 |
+
where $W_{i} \in \mathbb{R}^{d \times 1}$ is shared across COCO and Flickr30k image retrieval tasks. As in [27], we train a 4-way multiple-choice against hard-negatives selected off-line and then fixed. Recent work has used online hard-negative mining [8, 23] but this is costly to compute.
|
| 103 |
+
|
| 104 |
+
Referring Expressions Output: We rerank a set of region proposals [50] given the referring expression. We pass the final representation $h_{v_i}$ for each image region $i$ into a learned projection $W_r \in \mathbb{R}^{d \times 1}$ to predict a matching score.
|
| 105 |
+
|
| 106 |
+
$$
|
| 107 |
+
\operatorname {R e l} \left(v _ {i}, Q\right) = W _ {r} h _ {v i} \tag {3}
|
| 108 |
+
$$
|
| 109 |
+
|
| 110 |
+
Note that $Q$ may be either a phrase, question or dialog based on different tasks (RefCOCO+/g, Visual7W, GuessWhat). $W_{r}$ is shared across all the referring expression tasks.
|
| 111 |
+
|
| 112 |
+
Multi-modal Verification Output: Taking $\mathrm{NLVR}^2$ as an example, the input is a concatenation of two images $(I_0$ and $I_{1})$ and a statement $Q$ , that the model must judge the validity of the statement given the images. We consider this a classification problem given an embedding that encodes the two image-statement pairs $(I_0, Q)$ and $(I_1, Q)$ . The output probability is predicted by a 2-layer MLP with softmax:
|
| 113 |
+
|
| 114 |
+
$$
|
| 115 |
+
P _ {v} \left(C \mid I _ {0}, I _ {1}, Q\right) = \text {s o f t m a x} \left(\operatorname {M L P} \left(\left[ \begin{array}{l} h _ {\mathrm {I M G}} ^ {0} \odot h _ {\mathrm {C L S}} ^ {0} \\ h _ {\mathrm {I M G}} ^ {1} \odot h _ {\mathrm {C L S}} ^ {1} \end{array} \right]\right)\right) \tag {4}
|
| 116 |
+
$$
|
| 117 |
+
|
| 118 |
+
where $[ ]$ is concatenation. For SNLI-VE, the input is a single image and statement. We thus learn a separate classifier of the same form that predicts the sentiment (entailment, neutral, contradiction) from the inputs.
|
| 119 |
+
|
| 120 |
+
# 3.3. Large-Scale Multitask Training
|
| 121 |
+
|
| 122 |
+
With 6 task heads, 12 datasets, and over 4.4 million individual training instances – training our multi-task ViLBERT model is a daunting proposition. Multi-task learning (especially at this scale) poses significant challenges as learning objectives have complex and unknown dynamics and may compete [41]. Further, vision-and-language datasets vary significantly in size and difficulty. For instance, a single epoch of VG (our largest dataset) corresponds to 19.8 epochs of RefCOCOg (our smallest). Likewise, when trained in isolation RefCOCOg converges in 5K iterations whereas VQAv2 takes 84K iterations (over 16 times more). Below, we describe the details of our multi-task training approach and techniques to overcome these challenges.
|
| 123 |
+
|
| 124 |
+
Pretraining. All our models are pretrained on Conceptual Caption dataset [39] including our self-supervised task modifications as described in Sec. 3.1.
|
| 125 |
+
|
| 126 |
+
Round-Robin Batch-Level Sampling. We consider a round-robin batch-level sampling regime that cycles through each task from the beginning of multi-task training. As such, one multi-task iteration consists of each task forwarding a batch and updating parameters in sequence.
|
| 127 |
+
|
| 128 |
+
Dynamic Stop-and-Go. As noted earlier, different tasks have different difficulties and dataset sizes. Consequently, simply cycling through all tasks may drastically overtrain smaller tasks leading to overfitting. Typically early-stopping provides a strong defense to this phenomenon; however, stopping a task in multi-task training introduces problems with catastrophic forgetting as the base network drifts over time due to other tasks. We introduce an intuitive but effective dynamic stop and go (DSG) mechanism to avoid these problems. We monitor the validation loss $s_t$ of each task $t$ , computing it once per task epoch. If performance improvement is less than 0.1% over 2 epochs, we consider it Converged and shift it into stop mode. In DSG stop mode, a task only updates every iter-gap $(\Delta)$ iterations. If validation performance degrades by 0.5% from the task's best measured performance while in stop mode, the task is considered Divered and is returned to DSG go. This procedure is shown in Algorithm 1.
|
| 129 |
+
|
| 130 |
+
Curriculum Learning. Inspired by prior multi-task literature [4] [31], we experimented with both curriculum and anti-curriculum strategies based on task difficulty. Specifically, for anti-curriculum we first train on the slowest-converging task-group G1 (Vocab-Based VQA) before starting full round-robin multi-task training. Inversely for the curriculum setting we first train on our fastest
|
| 131 |
+
|
| 132 |
+
Algorithm 1: DSG for Multi-Task Learning
|
| 133 |
+
$n_t\gets$ number of iterations per epoch for task $t$ $\Delta \leftarrow$ size of gap between iterations in stop mode
|
| 134 |
+
$\mathrm{DSG}_t\gets \mathrm{go}$
|
| 135 |
+
for $i\gets 1$ to MaxIter:
|
| 136 |
+
for $t\in \mathsf{Tasks}$ .. if $\mathrm{DSG}_t = \mathrm{go}$ or $(\mathrm{DSG}_t = \mathrm{stop}$ and $i\bmod \Delta = 0)$ : Compute task loss $L_{t}(\theta)$ and gradient $\nabla_t(\theta)$ Update $\theta \leftarrow \theta -\epsilon \nabla_t(\theta)$ , where $\theta = \theta_{s}\cup \theta_{t}$ if $i\bmod n_t = 0$ . Compute validation score $s_t$ on task t if $\mathrm{DSG}_t = \mathrm{go}$ and Converged $(s_t)$ .. DSGt $\leftarrow$ stop else if $\mathrm{DSG}_t = \mathrm{stop}$ and Divered $(s_t)$ .. DSGt $\leftarrow$ go
|
| 137 |
+
end
|
| 138 |
+
|
| 139 |
+
converging task-group G3 (Referring Expressions). Different from previous observation [31, 33], we found that using no curriculum leads to superior performance when combined with other strategies proposed in this section.
|
| 140 |
+
|
| 141 |
+
Setting Multi-Task Hyperparameters. We follow a simple design philosophy – identify simple heuristics based on hyper-parameters tuned for each task in single-task training. This significantly reduces the burden of searching for joint-training hyper-parameters. See the supplement for a full list of per task learning rates, batch sizes, and other settings. Our code has been made available<sup>1</sup>.
|
| 142 |
+
|
| 143 |
+
Batch Size: For multi-task, we keep the batch size tuned for single-task training for each task.
|
| 144 |
+
|
| 145 |
+
Warm-up Duration: We found it important to set warm-up duration relative to the largest dataset. Specifically, we run linear warm-up over $\eta * N$ iterations where $N$ is the max. number of iterations taken to train any dataset in the single-task setting. We observe significant performance degradation for harder tasks when warm-up was shorter. We set $\eta$ to 0.1 for our experiments.
|
| 146 |
+
|
| 147 |
+
Loss Scaling: Our model has shared and task-specific parameters and we found it important to maintain separate learning rates. For the shared base model, we set the base learning rate to the minimum over all single-task dataset parameters. To accommodate variable learning rates for each dataset, we scale the task loss for each dataset by the ratio of task target learning rate over base learning rate.
|
| 148 |
+
|
| 149 |
+
# 4. Experiments and Results
|
| 150 |
+
|
| 151 |
+
# 4.1. Single-Task Performance
|
| 152 |
+
|
| 153 |
+
To establish baseline performance for the ViLBERT architecture that forms the backbone of our multi-task experiments, we first train single-task models on top of the base ViLBERT architecture (Section 3) for each of our 12 datasets. Rows 1 and 2 in Table 2 show the performance of these models trained on the full and cleaned
|
| 154 |
+
|
| 155 |
+
<table><tr><td rowspan="3"></td><td rowspan="3">Clean</td><td colspan="3">Vocab-based VQA (G1)</td><td colspan="2">Image Retrieval (G2)</td><td colspan="5">Referring Expression (G3)</td><td colspan="2">Verification (G4)</td><td rowspan="3"># params (# models)</td><td rowspan="3">All Tasks Average</td></tr><tr><td>VQAv2</td><td>GQA</td><td>VG QA</td><td>COCO</td><td>Flickr30k</td><td>COCO</td><td>COCO+</td><td>COCOg</td><td>V7W</td><td>GW</td><td>NLVR2</td><td>SNLI-VE</td></tr><tr><td>test-dev</td><td>test-dev</td><td>val</td><td>test(R1)</td><td>test(R1)</td><td>test</td><td>test</td><td>test</td><td>test</td><td>test</td><td>testP</td><td>test</td></tr><tr><td>1 Single-Task (ST)</td><td></td><td>71.82</td><td>58.19</td><td>34.38</td><td>65.28</td><td>61.14</td><td>78.63</td><td>71.11</td><td>72.24</td><td>80.51</td><td>62.81</td><td>74.25</td><td>76.72</td><td>3B (12)</td><td>67.25</td></tr><tr><td>2 Single-Task (ST)</td><td>✓</td><td>71.24</td><td>59.09</td><td>34.10</td><td>64.80</td><td>61.46</td><td>78.17</td><td>69.47</td><td>72.21</td><td>80.51</td><td>62.53</td><td>74.25</td><td>76.53</td><td>3B (12)</td><td>67.03</td></tr><tr><td>3 Group-Tasks (GT)</td><td>✓</td><td>72.03</td><td>59.60</td><td>36.18</td><td>65.06</td><td>66.00</td><td>80.23</td><td>72.79</td><td>75.30</td><td>81.54</td><td>64.78</td><td>74.62</td><td>76.52</td><td>1B (4)</td><td>68.72</td></tr><tr><td>4 All-Tasks (AT)</td><td>✓</td><td>72.57</td><td>60.12</td><td>36.36</td><td>63.70</td><td>63.52</td><td>80.58</td><td>73.25</td><td>75.96</td><td>82.75</td><td>65.04</td><td>78.44</td><td>76.78</td><td>270M (1)</td><td>69.08</td></tr><tr><td>5 All-Tasksw/o G4</td><td>✓</td><td>72.68</td><td>62.09</td><td>36.74</td><td>64.88</td><td>64.62</td><td>80.76</td><td>73.60</td><td>75.80</td><td>83.03</td><td>65.41</td><td>-</td><td>-</td><td>266M (1)</td><td>-</td></tr><tr><td>6 GT finetune→ST</td><td>✓</td><td>72.61</td><td>59.96</td><td>35.81</td><td>66.26</td><td>66.98</td><td>79.94</td><td>72.12</td><td>75.18</td><td>81.57</td><td>64.56</td><td>74.47</td><td>76.34</td><td>3B (12)</td><td>68.81</td></tr><tr><td>7 AT finetune→ST</td><td>✓</td><td>72.92</td><td>60.48</td><td>36.56</td><td>65.46</td><td>65.14</td><td>80.86</td><td>73.45</td><td>76.00</td><td>83.01</td><td>65.15</td><td>78.87</td><td>76.73</td><td>3B (12)</td><td>69.55</td></tr><tr><td>8 AT finetune→ST</td><td></td><td>73.15</td><td>60.65</td><td>36.64</td><td>68.00</td><td>67.90</td><td>81.20</td><td>74.22</td><td>76.35</td><td>83.35</td><td>65.69</td><td>78.87</td><td>76.95</td><td>3B (12)</td><td>70.24</td></tr></table>
|
| 156 |
+
|
| 157 |
+
Table 2: Comparison of our multi-task models to single-task performance. We find multi-task training (rows 3-5) provides significant gains over single-task training (rows 1-2) while reducing the parameter count from over 3 billion to 270 million. Further, following multi-task training by task-specific fine-tuning (rows 6-9) further gains can be made at the cost of increased parameters.
|
| 158 |
+
|
| 159 |
+
<table><tr><td rowspan="7">Relative PERF</td><td rowspan="2">G1 (VQAv2)</td><td colspan="4">Trained With</td><td rowspan="2">Avg.</td><td colspan="6">Trained With</td><td rowspan="2">Avg.</td></tr><tr><td>G1</td><td>G2</td><td>G3</td><td>G4</td><td>G1 & G2</td><td>G1&G3</td><td>G1 & G4</td><td>G2 & G3</td><td>G2 & G4</td><td>G3 & G4</td></tr><tr><td>0.46%</td><td>-</td><td>0.38%</td><td>0.38%</td><td>-0.20%</td><td>0.19%</td><td>-</td><td>-</td><td>-</td><td>0.63%</td><td>-0.08%</td><td>0.18%</td><td>0.24%</td></tr><tr><td>0.39%</td><td>0.78%</td><td>-</td><td>0.23%</td><td>-4.13%</td><td>-1.15%</td><td>-</td><td>1.24%</td><td>0.49%</td><td>-</td><td>-</td><td>-4.36%</td><td>-0.88%</td></tr><tr><td>0.29%</td><td>1.47%</td><td>0.67%</td><td>-</td><td>0.47%</td><td>0.47%</td><td>0.86%</td><td>-</td><td>0.19%</td><td>-</td><td>0.29%</td><td>-</td><td>0.44%</td></tr><tr><td>2.29%</td><td>1.47%</td><td>0.67%</td><td>-</td><td>1.48%</td><td>3.69%</td><td>3.22%</td><td>-</td><td>2.73%</td><td>-</td><td>-</td><td>-</td><td>3.21%</td></tr><tr><td>Avg.</td><td>1.04%</td><td>0.88%</td><td>0.43%</td><td>-1.36%</td><td>-</td><td>2.27%</td><td>2.23%</td><td>0.34%</td><td>1.68%</td><td>0.10%</td><td>-2.09%</td><td>-</td></tr></table>
|
| 160 |
+
|
| 161 |
+
Table 3: Pair-wise (left) and triple-wise (right) inter-group representative task analysis. Each entry is the relative performance change from single-task training for the row-task when jointly trained with the column-task(s).
|
| 162 |
+
|
| 163 |
+
datasets, respectively. As expected, reducing the training set size through cleaning results in lower performance in most cases. Our improvements over the pretraining objective (Sec 3.1) results in better downstream tasks performance (71.82 vs. 70.55 on VQA and 61.46 vs. 58.20 on Flickr30k Recall@1). See the supplementary for full comparison. Overall, our base architecture is competitive with prior work and a good starting point for multi-task learning.
|
| 164 |
+
|
| 165 |
+
# 4.2. Intra-Group Multi-task Performance
|
| 166 |
+
|
| 167 |
+
We begin with the most intuitive multi-task setting – jointly training tasks within the same groups. As grouped tasks are typically highly related, this is akin to some existing data augmentation practices (e.g. adding Visual Genome (VG) QA data when training VQA). Note this corresponds to four separate multi-task models – one for each group.
|
| 168 |
+
|
| 169 |
+
Table 2 row 3 shows the result of intra-group multi-task training. Comparing with single-task models trained on the same data (row 2), we see meaningful improvements of between $0.37\%$ $(\mathrm{NLVR}^2)$ and $4.54\%$ (Flickr30k retrieval) points for 11 out of 12 tasks (only SNLI-VE did not improve). Comparing to row 1, we see that intra-group multitask training overcomes the data-loss from cleaning with an average score of 68.72, outperforming the single-task models trained on the full datasets which have an average score of 67.25. Further, the total number of parameters drops by a factor of $3\times -$ going from 12 full models to only 4.
|
| 170 |
+
|
| 171 |
+
# 4.3. Inter-Group Multi-task Performance
|
| 172 |
+
|
| 173 |
+
Representative Task Analysis. We next consider the interplay between different task-groups. For efficiency, we consider multi-task training with representative tasks from each group - specifically VQA (G1), Retrieval Flickr30k (G2), Visual7W (G3), and NLVR $^2$ (G4). These were selected to maximize diversity in underlying image sources. We examine their relationships by jointly training all pairs and triplets of tasks under our multi-task training approach.
|
| 174 |
+
|
| 175 |
+
Table 3 (left) shows the results of training each representative task pair. Each entry is the percent change from single-task performance for the row-task when jointly trained with the column-task. As such, the Avg. row (bottom) shows the mean impact each column-task has on other tasks, and likewise the Avg. column (right) shows the mean impact other tasks have on each row-task. For instance, we find that adding VQA (G1) benefits other tasks with an average improvement of $+1.04\%$ . Interestingly, adding $\mathrm{NLVR}^2$ (G4) degrades other tasks on average $(-1.36\%)$ while making significant gains itself $(+1.48\%)$ . This is primarily due to a $-4.13\%$ interaction with G2. Table 3 (right) shows all task triplets. Gains in the paired-experiments are not simply additive. In the pair-wise analysis, G3 gained $+0.39\%$ and $+0.78\%$ from G1 and G2 respectively. As before, G4 has some strong negative effects on other groups $(-4.36\%)$ G2 with G3 & G4) but these effects can be regulated by other tasks $(+0.49\%)$ G2 with G1 & G4).
|
| 176 |
+
|
| 177 |
+
<table><tr><td rowspan="2">Task</td><td rowspan="2">Split</td><td rowspan="2">SOTA</td><td colspan="2">UNITER [8]</td><td>OursAT</td><td>OursAT->ST</td></tr><tr><td>BERTB</td><td>BERTL</td><td>BERTB</td><td>BERTB</td></tr><tr><td>VQA</td><td>test-dev</td><td>-</td><td>72.27</td><td>73.24</td><td>72.57</td><td>73.15</td></tr><tr><td>VG QA</td><td>val</td><td>-</td><td>-</td><td>-</td><td>36.36</td><td>36.64</td></tr><tr><td>GQA</td><td>test-dev</td><td>60.00 [45]</td><td>-</td><td>-</td><td>60.12</td><td>60.65</td></tr><tr><td>IR COCO</td><td>test (R1)</td><td>68.50 [23]</td><td>-</td><td>-</td><td>63.70</td><td>68.00</td></tr><tr><td>IR Flickr30k</td><td>test (R1)</td><td>-</td><td>71.50</td><td>73.66</td><td>63.52</td><td>67.90</td></tr><tr><td>RefCOCO</td><td>test</td><td>-</td><td>80.21</td><td>80.88</td><td>80.58</td><td>81.20</td></tr><tr><td>RefCOCO+</td><td>test</td><td>-</td><td>72.90</td><td>73.73</td><td>73.25</td><td>74.22</td></tr><tr><td>RefCOCOg</td><td>test</td><td>-</td><td>74.41</td><td>75.77</td><td>75.96</td><td>76.35</td></tr><tr><td>Visual 7W</td><td>test</td><td>72.53 [16]</td><td>-</td><td>-</td><td>82.75</td><td>83.35</td></tr><tr><td>GuessWhat</td><td>test</td><td>61.30 [13]</td><td>-</td><td>-</td><td>65.04</td><td>65.69</td></tr><tr><td>NLVR2</td><td>testP</td><td>-</td><td>77.87</td><td>79.50</td><td>78.44</td><td>78.87</td></tr><tr><td>SNLI-VE</td><td>test</td><td>-</td><td>78.02</td><td>78.98</td><td>76.78</td><td>76.95</td></tr><tr><td rowspan="2"># params (# models)</td><td></td><td></td><td>602M</td><td>2.1B</td><td>270M</td><td>3B</td></tr><tr><td></td><td></td><td>(7 x 86M)</td><td>(7 x 303M)</td><td>(1 x 270M)</td><td>(12 x 250M)</td></tr></table>
|
| 178 |
+
|
| 179 |
+
Full Multi-task Results. We move to our main result - a single model trained on all 12 datasets. The results of this All-Tasks (AT) model are shown in Table 2 row 4. This model outperforms independent single-task models trained on the same data (row 2) for 11 out of 12 tasks and improve the average score by 2.05 points (69.08 vs. 67.03). We reiterate for emphasis, average performance improves by 2.05 points while reducing the number of parameters from over 3 billion to 270 million (a $12 \times$ reduction). This is also true for comparison with single-task models trained on full datasets (row 1) by a similar margin of 1.83 points.
|
| 180 |
+
|
| 181 |
+
Our AT model also outperforms the Group-Task (GT) models (row 3) despite having $4\mathrm{x}$ fewer parameters (avg. 69.08 vs 68.72). This implies that despite their diversity, tasks across different groups can benefit from joint training.
|
| 182 |
+
|
| 183 |
+
We observed from the representative task analysis that G4 tends to have a negatively effect other groups during joint training. To validate this observation on all tasks, we train an All-Task model without G4 (row 5). This model achieves higher avg. score of 67.96 for $\mathrm{G1 + G2 + G3}$ compared to the full AT model's 67.39. NLVR $^2$ (G4) presents two images per description and often one matches while the other does not. Despite the alignment with one image, the instance as a whole is negative. We speculate that this supervision may interfere with the standard caption-image alignment objective in Flickr30k.
|
| 184 |
+
|
| 185 |
+
# 4.4. Multi-Task Learning as Pretraining
|
| 186 |
+
|
| 187 |
+
For some applications, single task performance may be paramount and justify storing a task-specific model. Even then, fine-tuning from a multi-task trained model may allow the model to take advantage of the additional, diverse supervision captured during multi-task training. Following [26], we finetune our trained multi-task models (GT and AT) on each downstream task and show results in Table 2. Rows 6 and 7 show that finetuning from the all-task model (AT) outperforms finetuning from the group-task models (GT) with
|
| 188 |
+
|
| 189 |
+
Table 4: Comparison to recent SOTA. For image retrieval (IR) COCO and Flickr we report R1 scores on the 1K test set.
|
| 190 |
+
|
| 191 |
+
<table><tr><td rowspan="2"></td><td rowspan="2">VQA</td><td colspan="3">COCO Retrieval</td><td colspan="3">Flickr Retrieval</td><td>FG</td></tr><tr><td>R1</td><td>R5</td><td>R10</td><td>R1</td><td>R5</td><td>R10</td><td>R1</td></tr><tr><td>OmniNet [36]</td><td>55.76</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td></tr><tr><td>HDC [33]</td><td>69.28</td><td>57.40</td><td>88.40</td><td>95.60</td><td>56.10</td><td>82.90</td><td>89.40</td><td>57.39</td></tr><tr><td>Ours</td><td>72.70</td><td>65.16</td><td>91.00</td><td>96.20</td><td>65.06</td><td>88.66</td><td>93.52</td><td>64.61</td></tr></table>
|
| 192 |
+
|
| 193 |
+
Table 5: Comparison with other multi-task models. VQA score is on test-dev and the retrieval tasks on their respective 1K test split. For Flickr Grounding (FG) we report R1 on Flickr30K test.
|
| 194 |
+
|
| 195 |
+
an average score of 69.51 vs. 68.81. For comparison with our multi-task models, these are finetuned on the cleaned datasets which are $11\%$ smaller on average. To compare to prior work, we also finetune on the full dataset for individual tasks (Row 8) and observe further improvements. Recall that our multi-task model was trained on cleaned data so there is no possibility of test leak here. These model outperform single-task models without multi-task pretraining (row 1) by a large margin (70.24 vs. 67.25 avg. score).
|
| 196 |
+
|
| 197 |
+
# 4.5. Comparison with Existing Work
|
| 198 |
+
|
| 199 |
+
In Table 4 we compare with existing state-of-the-art. We draw special comparison with the recent UNITER [8] architecture as it is similar to our base ViLBERT model. Like ViLBERT, UNITER is a general BERT-based vision-and-language architecture pretrained through self-supervised tasks and then finetuned for each downstream task. We show two UNITER columns corresponding to their underlying BERT model - either Base B or Large L. Our ViLBERT model uses the smaller $\mathrm{BERT}_{\mathrm{B}}$ . Our single all-task model $(\mathrm{Ours}_{\mathrm{AT}})$ achieves competitive performance to state-of-the-art task-specific models. Our single-task finetuned models $(\mathrm{Ours}_{\mathrm{AT} - > \mathrm{ST}})$ surpass state-of-the-art on 7 out of 12 tasks.
|
| 200 |
+
|
| 201 |
+
Table 5 compares our method with other recently proposed multi-modal, multi-task learning approaches - OmniNet [36] and Hierarchical Dense Co-Attention (HDC) [33]. OmniNet is trained on part-of-speech tagging, image captioning, visual question answering, and video activity recognition, while HDC is trained on image caption retrieval, visual question answering, and visual grounding. We train a multi-task model on the same tasks and cleaned datasets used in HDC [33]. Flickr Grounding is a new task that we include for this comparison. Our multi-task model outperforms these approaches by a large margin.
|
| 202 |
+
|
| 203 |
+
# 5. Analysis and Ablation Study
|
| 204 |
+
|
| 205 |
+
Ablations on task token and training strategies. To verify our design choices, we perform ablations for different task token granularity and multi-task training strategies. The results are shown in Table 6. We report average group and overall average performance. Detailed breakdown for each task can be found in supplement.
|
| 206 |
+
|
| 207 |
+
For task tokens, our default setting is with a different
|
| 208 |
+
|
| 209 |
+
<table><tr><td></td><td>Task Token</td><td>Dynamic Stop-and-Go</td><td>G1</td><td>G2</td><td>G3</td><td>G4</td><td>All Tasks Average</td></tr><tr><td colspan="8">AT (our)</td></tr><tr><td>1 token per dataset</td><td>✓</td><td>✓</td><td>56.35</td><td>63.61</td><td>75.52</td><td>77.61</td><td>69.08</td></tr><tr><td>2 token per head</td><td>✓</td><td>✓</td><td>55.95</td><td>61.48</td><td>75.35</td><td>77.37</td><td>68.52</td></tr><tr><td>3w/o task token</td><td></td><td>✓</td><td>55.67</td><td>62.55</td><td>75.38</td><td>76.73</td><td>68.53</td></tr><tr><td>4w/o DSG</td><td>✓</td><td></td><td>55.50</td><td>62.92</td><td>75.24</td><td>76.31</td><td>68.52</td></tr><tr><td>5w/ curriculum</td><td></td><td></td><td>54.68</td><td>61.21</td><td>75.19</td><td>76.70</td><td>67.24</td></tr><tr><td>6w/ anti-curriculum</td><td></td><td></td><td>55.82</td><td>59.58</td><td>73.69</td><td>75.94</td><td>67.98</td></tr><tr><td>7vanilla multitask</td><td></td><td></td><td>54.09</td><td>61.45</td><td>75.28</td><td>76.71</td><td>67.92</td></tr></table>
|
| 210 |
+
|
| 211 |
+
Table 6: Ablations on our design choices and comparison to curriculum and anti-curriculum learning multi-task approaches.
|
| 212 |
+
|
| 213 |
+
task token per dataset (12 total, Row 1). We compare this with two ablations: one task token per output head (4 total, Row 2) and no task tokens (Row 3). We observe that task-specific tokens lead to better performance compared to head-based tokens (avg. 69.08 vs. 68.52) and no task tokens (avg. 69.08 vs. 68.53). This shows that task-aware feature embedding is useful even within the same output space; e.g. per-task tokens may help differentiate noun phrases and pointing questions in Referring Expression.
|
| 214 |
+
|
| 215 |
+
For multi-task training schedule, we compare our dynamic stop-and-go (DSG) (Row 3) with Curriculum (Row 5) and Anti-Curriculum (Row 6) approaches discussed in Sec. 3. We consider convergence rate as a measure of task difficulty. For Curriculum, we first train tasks in G4 and then train all tasks together (easier $\longrightarrow$ harder). For Anti-Curriculum, we train G1 tasks first and then train on all tasks together (harder $\longrightarrow$ easier). Table 6 shows our dynamic stop-and-go training schedule outperforms anti-curriculum (avg. 68.52 vs. 67.98) and curriculum (avg. 68.53 vs. 67.24). Row 7 shows results of a 'vanilla', round-robin training scheme with no task tokens or training scheduling. The average score of vanilla multitask is close to anti-curriculum (67.92 vs. 67.98). Consistent with prior work [31], performance on harder tasks (G1) is worse compared to anti-curriculum. Our full training regime outperforms this significantly (avg. 69.08 vs. 67.92).
|
| 216 |
+
|
| 217 |
+
Behavior of Dynamic Stop-and-Go training. To characterize our dynamic stop-and-go training scheme, we visualize the dynamic training schedule in Fig. 2 (left) – bold lines indicate normal go training and thin lines are stop states when datasets receive sparser updates at a fixed iteration gap (every 4th iteration here). We see that smaller datasets quickly converge and enter stop state training early. As the base model drifts over time, they periodically return to full go state training to adjust. Interestingly, after some cycles of this, they enter the stop state and continue with only sparse updates for the rest of training.
|
| 218 |
+
|
| 219 |
+
Another aspect of dynamic stop-and-go training is the sparsity of updates in the stop state. Fig. 2 (right) shows the mean normalized accuracy for each group for multi-task models trained with different iteration gaps $(\Delta)$ . We observe that raising $\Delta$ (i.e. updating more sparsely) improves performance initially but degrades for larger values. Abso
|
| 220 |
+
|
| 221 |
+

|
| 222 |
+
Figure 2: Left: Visualization of Dynamic stop-and-go during multi-task training. Solid line indicates in the go mode while thin line indicates stop mode. Right: Mean accuracy (normalized group-wise for easier comparison) for each group with different iter-gap $\Delta$ for Dynamic stop-and-go.
|
| 223 |
+
|
| 224 |
+

|
| 225 |
+
|
| 226 |
+
lute and per-task scores are provided in the supplement.
|
| 227 |
+
|
| 228 |
+
Multi-Task visual grounding consistency. Given the common shared base model, one question is whether multitask models exhibit more consistent visual groundings than independent task-specific models. For example, does a model that correctly answers "What color is the largest dog?" also correctly ground the referring expression "largest dog"? To assess this, we consider 1500 images from the RefCOCO/+ test sets that also have VQA annotations such that for each image $I_{i}$ there are associated questions $\{q^{(i)}\}$ and referring expressions $\{r^{(i)}\}$ . To measure the overlap in visual concepts between a question $q_{j}^{(i)}$ and reference $r_{k}^{(i)}$ , we count overlapping nouns and adjectives (identified using a part-of-speech tagger [47]) and denote this $d(q_{j}^{(i)}, r_{k}^{(i)})$ . Armed with this notion of similarity, we consider each question-reference pair for each image (total 111,275 combinations) and compute a weighted accuracy. A pair is considered correct if the question was answered correctly and the referent was localized. Each pair is weighed by their overlap $d(q_{j}^{(i)}, r_{k}^{(i)})$ . Note that if $q_{j}^{(i)}$ and $r_{k}^{(i)}$ do not have any common visual concept $(d(q_{j}^{(i)}, r_{k}^{(i)}))$ , the correctness of this pair does not affect the overall metric.
|
| 229 |
+
|
| 230 |
+
We evaluate our Single-Task (ST), All-Task (AT), and finetuned from All-Task (AT->ST) models on the proposed metric. AT consistently outperforms ST (55.40% vs. 58.30%) and AT->ST achieves the best performance (64.64%). This shows our model trained on multiple tasks achieve better visual grounding consistency across different tasks. Further analysis can be found in the supplement.
|
| 231 |
+
|
| 232 |
+
Regularizing effects of multi-task learning. We find multi-task training to have a regularizing effect on tasks which overfit when trained separately. In Fig. 4 we plot the training and validation curves for two tasks (SNLI-VE and Flickr Grounding) where single task training overfits quickly. On the other hand when trained in a multi-task setup with all other tasks, the validation score improves and there is no overfitting.
|
| 233 |
+
|
| 234 |
+
Qualitative examples. Figure 3 shows example outputs of
|
| 235 |
+
|
| 236 |
+

|
| 237 |
+
Figure 3: Our single model $(\mathrm{Our}_{\mathrm{AT}})$ can perform a multitude of V&L tasks: caption and image retrieval, question answering, grounding phrases, guessing image regions based on a dialog, verifying facts about a pair of images, natural language inferences from an image, etc. Here we show outputs of our model for a variety of inputs (that mimic tasks from the 12 datasets it has been trained on).
|
| 238 |
+
|
| 239 |
+

|
| 240 |
+
Figure 4: Multi-Task training acts as a regularizer.
|
| 241 |
+
|
| 242 |
+

|
| 243 |
+
|
| 244 |
+
our models. Due to space limitation, we provide extensive visualizations in the supplement.
|
| 245 |
+
|
| 246 |
+
# 6. Related Work
|
| 247 |
+
|
| 248 |
+
Multi-task learning. There has been substantial interest in multi-task learning [6,38], i.e. training a single model for multiple tasks at once. Advances in multi-task learning have been developed in the context of vision [5,20,32,42,52,53], language [10, 25, 26, 31, 37], and robotics [18, 34, 46]. Among them, Standley et al. [41] studies how different vision tasks are related to each other. Strezoski et al. [42] studies layer-wise task routing for different vision tasks. McCann et al. [31] pose ten natural language processing (NLP) tasks as question answering tasks. MT-DNN [26] combines multi-task learning with pretraining [14] to improve the learning of text representations. Despite this progress, it is still challenging to train a single model on many tasks that can outperform or even match their single-task counterparts. To enhance the training scheme, BAM [9] applies knowledge distillation where single-task models teach the multi-task model. Raffel et al. [37] explore different sampling strategies for NLP tasks. We focus on multi-task learning for V&L tasks.
|
| 249 |
+
|
| 250 |
+
Vision and language. While we address 12 V&L tasks in Sec. 2.1, we do miss some families of tasks including image and video captioning [7], visual dialog [12], embodied question answering [11] and instruction following [3]. Different from earlier work [16,22,28,29,50,51,55] which de
|
| 251 |
+
|
| 252 |
+
sign bespoke architecture for different tasks, recently proposed models for V&L [1, 8, 23, 24, 27, 43, 45, 54] provide a common architecture that can be pretrained using self-supervised losses and adapted to many vision and language tasks. However, these models still require task specific finetuning, which may easily overfit on small dataset. Our single model jointly learns from multiple V&L tasks and achieves competitive performance. Further, multi-task training provides a better visolinguistic representation for task specific finetuning than self-supervised objectives.
|
| 253 |
+
|
| 254 |
+
Multi-task V&L learning. Recent work [33, 36, 40] also explores multi-task learning in V&L. HDC [33] trains a multi-task network on multiple datasets and uses a hyperparameter search method to determine which layer output should be taken for each task. Our method does not need any hyperparameter search to choose outputs for different tasks and outperforms both [36] and [33]. [40] is a concurrent work that does multi-task training on 12 dialogue datasets (only two with images). Our work differs in that we focus on a variety of vision and language tasks.
|
| 255 |
+
|
| 256 |
+
# 7. Conclusion
|
| 257 |
+
|
| 258 |
+
In this work, we develop a training regime and experimental setting for large-scale, multi-modal, multi-task learning. As one part of this, we introduce a novel task scheduling approach to help avoid over- or under-training tasks with differing sizes or difficulties. Using this framework, we explore the relationships between 12 vision-and-language datasets - our single multi-task model outperforms 12 single-task models. We find multi-task training can lead to significant gains over independent task training. Further, we show that multi-task learning is an effective pretraining task for training state-of-the-art single-task models.
|
| 259 |
+
|
| 260 |
+
Acknowledgement. The GaTech effort was supported in part by NSF, AFRL, DARPA, ONR YIPs, ARO PECASE, Amazon. The views and conclusions contained herein are those of the authors and should not be interpreted as necessarily representing the official policies or endorsements, either expressed or implied, of the U.S. Government, or any sponsor.
|
| 261 |
+
|
| 262 |
+
# References
|
| 263 |
+
|
| 264 |
+
[1] Chris Alberti, Jeffrey Ling, Michael Collins, and David Reitter. Fusion of detected objects in text for visual question answering. arXiv preprint arXiv:1908.05054, 2019. 1, 2, 8
|
| 265 |
+
[2] Peter Anderson, Xiaodong He, Chris Buehler, Damien Teney, Mark Johnson, Stephen Gould, and Lei Zhang. Bottom-up and top-down attention for image captioning and visual question answering. In CVPR, pages 6077-6086, 2018. 3
|
| 266 |
+
[3] Peter Anderson, Qi Wu, Damien Teney, Jake Bruce, Mark Johnson, Niko Sünderhauf, Ian Reid, Stephen Gould, and Anton van den Hengel. Vision-and-language navigation: Interpreting visually-grounded navigation instructions in real environments. In CVPR), 2018. 8
|
| 267 |
+
[4] Yoshua Bengio, Jérôme Louradour, Ronan Collobert, and Jason Weston. Curriculum learning. In Proceedings of the 26th annual international conference on machine learning, pages 41-48. ACM, 2009. 4
|
| 268 |
+
[5] Felix JS Bragman, Ryutaro Tanno, Sebastien Ourselin, Daniel C Alexander, and Jorge Cardoso. Stochastic filter groups for multi-task cnns: Learning specialist and generalist convolution kernels. In Proceedings of the IEEE International Conference on Computer Vision, pages 1385-1394, 2019. 8
|
| 269 |
+
[6] Rich Caruana. Multitask learning. Machine learning, 28(1):41-75, 1997. 8
|
| 270 |
+
[7] Xinlei Chen, Hao Fang, Tsung-Yi Lin, Ramakrishna Vedantam, Saurabh Gupta, Piotr Dolkar, and C. Lawrence Zitnick. Microsoft COCO captions: Data collection and evaluation server. CoRR, abs/1504.00325, 2015. 2, 8
|
| 271 |
+
[8] Yen-Chun Chen, Linjie Li, Licheng Yu, Ahmed El Kholy, Faisal Ahmed, Zhe Gan, Yu Cheng, and Jingjing Liu. Uniter: Learning universal image-text representations. arXiv preprint arXiv:1909.11740, 2019. 3, 6, 8
|
| 272 |
+
[9] Kevin Clark, Minh-Thang Luong, Urvashi Khandelwal, Christopher D Manning, and Quoc V Le. Bam! bornagain multi-task networks for natural language understanding. arXiv preprint arXiv:1907.04829, 2019. 8
|
| 273 |
+
[10] Ronan Collobert and Jason Weston. A unified architecture for natural language processing: Deep neural networks with multitask learning. In Proceedings of the 25th international conference on Machine learning, pages 160-167. ACM, 2008. 8
|
| 274 |
+
[11] Abhishek Das, Samyak Datta, Georgia Gkioxari, Stefan Lee, Devi Parikh, and Dhruv Batra. Embodied Question Answering. In CVPR, 2018. 8
|
| 275 |
+
[12] Abhishek Das, Satwik Kottur, Khushi Gupta, Avi Singh, Deshraj Yadav, Jose M. F. Moura, Devi Parikh, and Dhruv Batra. Visual dialog. In CVPR, 2017. 8
|
| 276 |
+
[13] Harm De Vries, Florian Strub, Sarath Chandar, Olivier Pietquin, Hugo Larochelle, and Aaron Courville. Guess-what?! visual object discovery through multi-modal dialogue. In CVPR, 2017. 2, 6
|
| 277 |
+
[14] Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. Bert: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805, 2018. 3, 8
|
| 278 |
+
|
| 279 |
+
[15] Yash Goyal, Tejas Khot, Douglas Summers-Stay, Dhruv Bartra, and Devi Parikh. Making the v in vqa matter: Elevating the role of image understanding in visual question answering. In CVPR, 2017. 2
|
| 280 |
+
[16] Ronghang Hu, Marcus Rohrbach, Jacob Andreas, Trevor Darrell, and Kate Saenko. Modeling relationships in referential expressions with compositional modular networks. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, 2017. 6, 8
|
| 281 |
+
[17] Drew A Hudson and Christopher D Manning. Gqa: a new dataset for compositional question answering over real-world images. arXiv preprint arXiv:1902.09506, 2019. 2, 3
|
| 282 |
+
[18] Max Jaderberg, Volodymyr Mnih, Wojciech Marian Czarnecki, Tom Schaul, Joel Z Leibo, David Silver, and Koray Kavukcuoglu. Reinforcement learning with unsupervised auxiliary tasks. arXiv preprint arXiv:1611.05397, 2016. 8
|
| 283 |
+
[19] Sahar Kazemzadeh, Vicente Ordonez, Mark Matten, and Tamara Berg. Referitag: Referring to objects in photographs of natural scenes. In EMNLP, 2014. 2
|
| 284 |
+
[20] Iasonas Kokkinos. Ethernet: Training a universal convolutional neural network for low-, mid-, and high-level vision using diverse datasets and limited memory. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 6129-6138, 2017. 8
|
| 285 |
+
[21] Ranjay Krishna, Yuke Zhu, Oliver Groth, Justin Johnson, Kenji Hata, Joshua Kravitz, Stephanie Chen, Yannis Kalantidis, Li-Jia Li, David A Shamma, et al. Visual genome: Connecting language and vision using crowdsourced dense image annotations. IJCV, 123(1):32-73, 2017. 2
|
| 286 |
+
[22] Kuang-Huei Lee, Xi Chen, Gang Hua, Houdong Hu, and Xiaodong He. Stacked cross attention for image-text matching. In Proceedings of the European Conference on Computer Vision (ECCV), pages 201-216, 2018. 8
|
| 287 |
+
[23] Gen Li, Nan Duan, Yuejian Fang, Daxin Jiang, and Ming Zhou. Unicoder-vl: A universal encoder for vision and language by cross-modal pre-training. arXiv preprint arXiv:1908.06066, 2019. 1, 2, 3, 6, 8
|
| 288 |
+
[24] Liunian Harold Li, Mark Yatskar, Da Yin, Cho-Jui Hsieh, and Kai-Wei Chang. Visualbert: A simple and perform-. mant baseline for vision and language. arXiv preprint arXiv:1908.03557, 2019. 1, 2, 8
|
| 289 |
+
[25] Xiaodong Liu, Jianfeng Gao, Xiaodong He, Li Deng, Kevin Duh, and Ye-Yi Wang. Representation learning using multitask deep neural networks for semantic classification and information retrieval. 2015. 8
|
| 290 |
+
[26] Xiaodong Liu, Pengcheng He, Weizhu Chen, and Jianfeng Gao. Multi-task deep neural networks for natural language understanding. arXiv preprint arXiv:1901.11504, 2019. 6, 8
|
| 291 |
+
[27] Jiasen Lu, Dhruv Batra, Devi Parikh, and Stefan Lee. Vilbert: Pretraining task-agnostic visiolinguistic representations for vision-and-language tasks. arXiv preprint arXiv:1908.02265, 2019. 1, 2, 3, 8
|
| 292 |
+
[28] Jiasen Lu, Jianwei Yang, Dhruv Batra, and Devi Parikh. Hierarchical question-image co-attention for visual question answering. In Advances In Neural Information Processing Systems, pages 289–297, 2016. 8
|
| 293 |
+
|
| 294 |
+
[29] Jiasen Lu, Jianwei Yang, Dhruv Batra, and Devi Parikh. Neural baby talk. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 7219-7228, 2018. 8
|
| 295 |
+
[30] Junhua Mao, Jonathan Huang, Alexander Toshev, Oana Camburu, Alan L Yuille, and Kevin Murphy. Generation and comprehension of unambiguous object descriptions. In CVPR, 2016. 2
|
| 296 |
+
[31] Bryan McCann, Nitish Shirish Keskar, Caiming Xiong, and Richard Socher. The natural language decathlon: Multitask learning as question answering. arXiv preprint arXiv:1806.08730, 2018. 4, 7, 8
|
| 297 |
+
[32] Ishan Misra, Abhinav Shrivastava, Abhinav Gupta, and Martial Hebert. Cross-stitch networks for multi-task learning. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 3994-4003, 2016. 8
|
| 298 |
+
[33] Duy-Kien Nguyen and Takayuki Okatani. Multi-task learning of hierarchical vision-language representation. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 10492-10501, 2019. 4, 6, 8
|
| 299 |
+
[34] Emilio Parisotto, Jimmy Lei Ba, and Ruslan Salakhutdinov. Actor-mimic: Deep multitask and transfer reinforcement learning. arXiv preprint arXiv:1511.06342, 2015. 8
|
| 300 |
+
[35] Bryan A Plummer, Liwei Wang, Chris M Cervantes, Juan C Caicedo, Julia Hockenmaier, and Svetlana Lazebnik. Flickr30k entities: Collecting region-to-phrase correspondences for richer image-to-sentence models. In ICCV, 2015. 2
|
| 301 |
+
[36] Subhojeet Pramanik, Priyanka Agrawal, and Aman Hussain. Omninet: A unified architecture for multi-modal multi-task learning. arXiv preprint arXiv:1907.07804, 2019. 6, 8
|
| 302 |
+
[37] Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, and Peter J Liu. Exploring the limits of transfer learning with a unified text-to-text transformer. arXiv preprint arXiv:1910.10683, 2019. 8
|
| 303 |
+
[38] Sebastian Ruder. An overview of multi-task learning in deep neural networks. arXiv preprint arXiv:1706.05098, 2017. 8
|
| 304 |
+
[39] Piyush Sharma, Nan Ding, Sebastian Goodman, and Radu Soricut. Conceptual captions: A cleaned, hypernymed, image alt-text dataset for automatic image captioning. In ACL, 2018. 3, 4
|
| 305 |
+
[40] Kurt Shuster, Da Ju, Stephen Roller, Emily Dinan, Y-Lan Boureau, and Jason Weston. The dialogue dodecathlon: Open-domain knowledge and image grounded conversational agents, 2019. 8
|
| 306 |
+
[41] Trevor Standley, Amir R Zamir, Dawn Chen, Leonidas Guibas, Jitendra Malik, and Silvio Savarese. Which tasks should be learned together in multi-task learning? arXiv preprint arXiv:1905.07553, 2019. 4, 8
|
| 307 |
+
[42] Gjorgji Strezoski, Nanne van Noord, and Marcel Worring. Many task learning with task routing. In Proceedings of the IEEE International Conference on Computer Vision, pages 1375–1384, 2019. 8
|
| 308 |
+
[43] Weijie Su, Xizhou Zhu, Yue Cao, Bin Li, Lewei Lu, Furu Wei, and Jifeng Dai. Vl-bert: Pre-training of generic visual-linguistic representations. arXiv preprint arXiv:1908.08530, 2019. 1, 2, 8
|
| 309 |
+
|
| 310 |
+
[44] Alane Suhr, Stephanie Zhou, Ally Zhang, Iris Zhang, Huajun Bai, and Yoav Artzi. A corpus for reasoning about natural language grounded in photographs. In ACL, 2019. 2
|
| 311 |
+
[45] Hao Tan and Mohit Bansal. Lxmert: Learning cross-modality encoder representations from transformers. arXiv preprint arXiv:1908.07490, 2019. 1, 2, 6, 8
|
| 312 |
+
[46] Yee Teh, Victor Bapat, Wojciech M Czarnecki, John Quan, James Kirkpatrick, Raia Hadsell, Nicolas Heess, and Razvan Pascanu. Distral: Robust multitask reinforcement learning. In Advances in Neural Information Processing Systems, pages 4496-4506, 2017. 8
|
| 313 |
+
[47] Kristina Toutanova, Dan Klein, Christopher D Manning, and Yoram Singer. Feature-rich part-of-speech tagging with a cyclic dependency network. In Proceedings of the 2003 Conference of the North American Chapter of the Association for Computational Linguistics on Human Language Technology-Volume 1, pages 173–180. Association for computational linguistics, 2003. 7
|
| 314 |
+
[48] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. In NeurIPS, 2017. 3
|
| 315 |
+
[49] Ning Xie, Farley Lai, Derek Doran, and Asim Kadav. Visual entailment task for visually-grounded language learning. arXiv preprint arXiv:1811.10582, 2018. 2
|
| 316 |
+
[50] Licheng Yu, Zhe Lin, Xiaohui Shen, Jimei Yang, Xin Lu, Mohit Bansal, and Tamara L Berg. Mattnet: Modular attention network for referring expression comprehension. In CVPR, 2018. 3, 8
|
| 317 |
+
[51] Rowan Zellers, Yonatan Bisk, Ali Farhadi, and Yejin Choi. From recognition to cognition: Visual commonsense reasoning. In The IEEE Conference on Computer Vision and Pattern Recognition (CVPR), June 2019. 8
|
| 318 |
+
[52] Tianzhu Zhang, Bernard Ghanem, Si Liu, and Narendra Ahuja. Robust visual tracking via structured multi-task sparse learning. International journal of computer vision, 101(2):367-383, 2013. 8
|
| 319 |
+
[53] Zhanpeng Zhang, Ping Luo, Chen Change Loy, and Xiaou Tang. Facial landmark detection by deep multi-task learning. In European conference on computer vision, pages 94-108. Springer, 2014. 8
|
| 320 |
+
[54] Luowei Zhou, Hamid Palangi, Lei Zhang, Houdong Hu, Jason J Corso, and Jianfeng Gao. Unified vision-language pre-training for image captioning and vqa. arXiv preprint arXiv:1909.11059, 2019. 1, 2, 8
|
| 321 |
+
[55] Yuke Zhu, Oliver Groth, Michael Bernstein, and Li Fei-Fei. Visual7w: Grounded question answering in images. In CVPR, 2016. 2, 8
|
12in1multitaskvisionandlanguagerepresentationlearning/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5e7df86c132a3ec601a69b208ed6e9aca78c2d3117c0fc82a7abc7f287222725
|
| 3 |
+
size 488771
|
12in1multitaskvisionandlanguagerepresentationlearning/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:183d55febecda7be2d5c22f11215ece0d5918e1af930a41c0c4e6fc89c6d7e20
|
| 3 |
+
size 416731
|
15keypointsisallyouneed/e5ac5156-93c5-41e3-8de5-74f59d1fd56d_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:286bde086b028c36b3741ad58f9de5eb6f887ba4f3c662d73b312bdd2dc827e0
|
| 3 |
+
size 84147
|
15keypointsisallyouneed/e5ac5156-93c5-41e3-8de5-74f59d1fd56d_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:df194b06026180a44eb69ef6423503e221844c89c0d939a0229997bd9f8abdfd
|
| 3 |
+
size 104315
|
15keypointsisallyouneed/e5ac5156-93c5-41e3-8de5-74f59d1fd56d_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e515bc658233a009c6b70cfad9ef66cefe73660ba57a61413bdc645397566a88
|
| 3 |
+
size 1489487
|
15keypointsisallyouneed/full.md
ADDED
|
@@ -0,0 +1,325 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# 15 Keypoints Is All You Need
|
| 2 |
+
|
| 3 |
+
Michael Snower†* Asim Kadav‡ Farley Lai‡ Hans Peter Graf‡
|
| 4 |
+
†Brown University ‡NEC Labs America
|
| 5 |
+
|
| 6 |
+
michael_SNower@brown.edu {asim,farleylai,hpg}@nec-labs.com
|
| 7 |
+
|
| 8 |
+
# Abstract
|
| 9 |
+
|
| 10 |
+
Pose tracking is an important problem that requires identifying unique human pose-instances and matching them temporally across different frames of a video. However, existing pose tracking methods are unable to accurately model temporal relationships and require significant computation, often computing the tracks offline. We present an efficient multi-person pose tracking method, KeyTrack, that only relies on keypoint information without using any RGB or optical flow information to track human keypoints in real-time. Keypoints are tracked using our Pose Entailment method, in which, first, a pair of pose estimates is sampled from different frames in a video and tokenized. Then, a Transformer-based network makes a binary classification as to whether one pose temporally follows another. Furthermore, we improve our top-down pose estimation method with a novel, parameter-free, keypoint refinement technique that improves the keypoint estimates used during the Pose Entailment step. We achieve state-of-the-art results on the PoseTrack'17 and the PoseTrack'18 benchmarks while using only a fraction of the computation required by most other methods for computing the tracking information.
|
| 11 |
+
|
| 12 |
+
# 1. Introduction
|
| 13 |
+
|
| 14 |
+
Multi-person Pose Tracking is an important problem for human action recognition and video understanding. It occurs in two steps: first, estimation, where keypoints of individual persons are localized; second, the tracking step, where each keypoint is assigned to a unique person. Pose tracking methods rely on deep convolutional neural networks for the first step [48, 47, 57, 52], but approaches in the second step vary. This is a challenging problem because tracks must be created for each unique person, while overcoming occlusion and complex motion. Moreover, individuals may appear visually similar because they are wearing the same uniform. It is also important for tracking to be performed online. Commonly used methods, such
|
| 15 |
+
|
| 16 |
+

|
| 17 |
+
|
| 18 |
+

|
| 19 |
+
Figure 1. They look alike, how do we decide who's who? In the Pose Entailment framework, given a video frame, we track individuals by comparing pairs of poses, using temporal motion cues to determine who's who. Using a novel tokenization scheme to create pose pair inputs interpretable by Transformers [49], our network divides its attention equally between both poses in matching pairs, and focuses more on a single pose in non-matching pairs because motion cues between keypoints are not present. We visualize this above; bright red keypoints correspond to high attention.
|
| 20 |
+
|
| 21 |
+
as optical flow and graph convolutional networks (GCNs) are effective at modeling spatio-temporal keypoint relationships [45], [35], but are dependent on high spatial resolution, making them computationally costly. Non-learning based methods, such as spatial consistency, are faster than the convolution-based methods, but are not as accurate.
|
| 22 |
+
|
| 23 |
+
To address the above limitations, we propose an efficient pose tracking method, KeyTrack, that leverages temporal relationships to improve multi-person pose estimation and tracking. Hence, KeyTrack follows the tracking by detection approach by first localizing humans, estimating human pose keypoints and then encoding the keypoint information in a novel entailment setting using transformer building blocks [49]. Similar to the textual entailment task where one has to predict if one sentence follows one another, we propose the Pose Entailment task, where the model learns to make a binary classification if two keypoints pose temporally follow or entail each other. Hence, rather than extracting information from a high-dimensional image representation using deep CNNs, we extract information from a sentence of 15 tokens, and each token corresponds to a key-
|
| 24 |
+
|
| 25 |
+
point on a pose. Similar to how BERT tokenizes words [14], we propose an embedding scheme for pose data that captures spatio-temporal relationships and feed our transformer network these embeddings. Since these embeddings contain information beyond spatial location, our network outperforms convolution-based approaches in terms of accuracy and speed, particularly at very low resolutions.
|
| 26 |
+
|
| 27 |
+
Additionally, in order to improve the keypoint estimates used by the transformer network, we propose a Temporal Object Keypoint Similarity (TOKS) method. TOKs refines the pose estimation output by augmenting missed detections and thresholding low quality estimates using a keypoint similarity metric. TOKs adds no learned parameters to the estimation step, and is superior to existing bounding box propagation methods that often rely on NMS and optical flow. KeyTrack makes the following contributions:
|
| 28 |
+
|
| 29 |
+
1. KeyTrack introduces Pose Entailment, where a binary classification is made as to whether two poses from different timesteps are the same person. We model this task in a transformer-based network which learns temporal pose relationships even in datasets with complex motion. Furthermore, we present a tokenization scheme for pose information that allows transformers to outperform convolutions at low spatial resolutions when tracking keypoints.
|
| 30 |
+
2. KeyTrack introduces a temporal method for improving keypoint estimates. TOKs is more accurate than bounding box propagation, faster than a detector ensemble, and does not require learned parameters.
|
| 31 |
+
|
| 32 |
+
Using the above methods, we develop an efficient multiperson pose tracking pipeline which sets a new SOTA on the PoseTrack test set. We achieve $61.2\%$ tracking accuracy on the PoseTrack'17 Test Set and $66.6\%$ on the PoseTrack'18 Val set using a model that consists of just 0.43M parameters in the tracking step. This portion of our pipeline 500X more efficient than the leading optical flow method [45]. Our training is performed on a single NVIDIA 1080Ti GPU. Not reliant on RGB or optical flow information in the tracking step, our model is suitable to perform pose tracking using other non-visual pose estimation sensors that only provide 15 keypoints for each person [3].
|
| 33 |
+
|
| 34 |
+
# 2. Related Work
|
| 35 |
+
|
| 36 |
+
We are inspired by related work on pose estimation and tracking methods, and recent work on applying the transformer network to video understanding.
|
| 37 |
+
|
| 38 |
+
Pose estimation Early work on pose estimation uses graphical models to learn spatial correlations and interactions between various joints [5, 16]. These models often perform poorly due to occlusions and long range temporal relationships, which need to be explicitly modeled [12, 42, 51]. More recent work involves using convolutional neural networks (CNNs) to directly regress cartesian
|
| 39 |
+
|
| 40 |
+
<table><tr><td>Method</td><td>Estimation</td><td>Detection Improvement</td><td>Tracking</td></tr><tr><td>Ours</td><td>HRNet</td><td>Temporal OKS</td><td>Pose Entailment</td></tr><tr><td>HRNet [45]</td><td>HRNet</td><td>BBox Prop.</td><td>Optical Flow</td></tr><tr><td>POINet [40]</td><td>VGG, T-VGG</td><td>-</td><td>Ovonic Insight Net</td></tr><tr><td>MDPN [20]</td><td>MDPN</td><td>Ensemble</td><td>Optical Flow</td></tr><tr><td>LightTrack [35]</td><td>Simple Baselines</td><td>Ensemble/BBox Prop.</td><td>GCN</td></tr><tr><td>ProTracker [19]</td><td>3D Mask RCNN</td><td>-</td><td>IoU</td></tr><tr><td>Affinity Fields [38]</td><td>VGG/STFields</td><td>-</td><td>STFields</td></tr><tr><td>STEboldings [28]</td><td>STEboldings</td><td>-</td><td>STEboldings</td></tr><tr><td>JointFlow</td><td>Siamese CNN</td><td>-</td><td>Flow Fields</td></tr></table>
|
| 41 |
+
|
| 42 |
+
Table 1. How different approaches address each step of the Pose Tracking problem. Our contributions are in bold.
|
| 43 |
+
|
| 44 |
+
coordinates of the joints [48] or to generate heatmaps of the probability of a joint's location [47, 57, 52]. A majority of the convolutional approaches can be classified into top-down and bottom-up methods – the top-down methods use a separate detection step to identify person candidates [21, 37, 10, 24, 37]. The single person pose estimation step is then performed on these person candidates. Bottom-up methods calculate keypoints from all candidates and then correlate these keypoints into individual human joints [53, 25]. The latter method is more efficient since all keypoints are calculated in a single step; however, the former is more accurate since the object detection step limits the regression boundaries. However, top-down methods work poorly on small objects and recent work (HRNet) [45] uses parallel networks at different resolutions to maximize spatial information. PoseWarper [8] uses a pair of labeled and unlabeled frames to predict human pose by learning the pose-warping using deformable convolutions. Finally, since the earliest applications of deep learning to pose estimation [48], iterative predictions have improved accuracy. Pose estimation has shown to benefit from cascaded predictions [10] and pose-refinement methods [17, 34] refine the pose estimation results of previous stages using a separate post-processing network. In that spirit, our work, KeyTrack relies on HRNet to generate keypoints and refines keypoint estimates by temporally aggregating and suppressing low confidence keypoints with TOKS instead of commonly used bounding box propagation approaches.
|
| 45 |
+
|
| 46 |
+
Pose tracking Methods Pose tracking methods assign unique IDs to individual keypoints, estimated with techniques described in the previous subsection, to track them through time [4, 26, 27, 1]. Some methods perform tracking by learning spatio-temporal pose relationships across video frames using convolutions [50, 40, 35]. [40], in an end-to-end fashion, predicts track ids with embedded visual features from its estimation step, making predictions in multiple temporal directions. [35] uses a GCN to track poses based on spatio-temporal keypoint relationships. These networks require high spatial resolutions. In contrast, we create keypoint embeddings from the keypoint's spatial location and other information making our network less reliant
|
| 47 |
+
|
| 48 |
+

|
| 49 |
+
Figure 2. a) Keypoints are estimated with HRNet. b) TOKS improves detection accuracy. c) Pose pairs are collected from multiple past timesteps. Poses of the same color have the same track id, the color black indicates the track id is unknown. d) Each pair is tokenized independently from the other pairs. e) Our Transformer Matching Network calculates match scores independently for each pair. f) The maximum match score is greedily chosen and the corresponding track id is assigned.
|
| 50 |
+
|
| 51 |
+
on spatial resolution, and thus more efficient. We can also model more fine-grained spatio-temporal relationships.
|
| 52 |
+
|
| 53 |
+
Among non-learned tracking methods, optical flow propagates poses from one frame to the next to determine which pose they are most similar to in the next frame [45, 20]. This improves over spatial consistency, which measures the IoU between bounding boxes of poses from temporally adjacent frames [19]. Other methods use graph-partitioning based approaches to group pose tracks [26, 27, 29]. Another method, PoseFlow [55], uses inter/intra-frame pose distance and NMS to construct pose flows. However, our method does not require hard-coded parameters during inference, this limits the ability of non-learned methods to model scenes with complex motion and requires time-intensive manual tuning. Table 1 shows top-down methods similar to our work as well as competitive bottom-up methods.
|
| 54 |
+
|
| 55 |
+
Transformer Models Recently, there have been successful implementations of transformer-based models for image and video input modalities often substituting convolutions and recurrence mechanisms. These methods can efficiently model higher-order relationships between various scene elements unlike pair-wise methods [11, 22, 41, 56]. They have been applied for image classification [39], visual question-answering [30, 31, 46, 60], action-recognition [23, 32], video captioning [44, 61] and other video problems. VideoAction Transformer [18] solves the action localization problem using transformers by learning the context and interactions for every person in the video. BERT [13] uses transformers by pretraining a transformer-based network in a multi-task transfer learning scheme over the unsupervised tasks of predicting missing words or next sentences. Instead, in a supervised setting, KeyTrack uses transformers to learn spatio-temporal keypoint relationships for the visual problem of pose tracking.
|
| 56 |
+
|
| 57 |
+
# 3. Method
|
| 58 |
+
|
| 59 |
+
# 3.1. Overview of Our Approach
|
| 60 |
+
|
| 61 |
+
We now describe the keypoint estimation and tracking approach used in KeyTrack as shown in Figure 2. For frame $\mathcal{F}^t$ at timestep $t$ , we wish to assign a track id to the $i$ th
|
| 62 |
+
|
| 63 |
+
pose $p^{t,i} \in \mathcal{P}^t$ . First, each of the pose's $k^j \in \mathcal{K}$ keypoints are detected. This is done by localizing a bounding box around each pose with an object detector and then estimating keypoint locations in the box. Keypoint predictions are improved with temporal OKS (TOKS). Please see 3.3 for more details. From here, this pose with no tracking id, $p_{\emptyset}^{t,i}$ , is assigned its appropriate one. This is based on the pose's similarity to a pose in a previous timestep, which has an id, $p_{id}^{t - \delta ,j}$ . Similarity is measured with the match score, $m_{id}^{t - \delta ,j}$ , using Pose Entailment (3.2).
|
| 64 |
+
|
| 65 |
+
False negatives are an inevitable problem in keypoint detection, and hurt the downstream tracking step because poses with the correct track id may appear to be no longer in the video. We mitigate this by calculating match scores for poses in not just one previous frame, but multiple frames $\{\mathcal{F}^1,\mathcal{F}^2,\dots \mathcal{F}^\delta \}$ . Thus, we compare to each pose $p_{id}^{t - d,j}$ where $1\leqslant d\leqslant \delta$ and $1\leqslant j\leqslant |\mathcal{P}^{t - d}|$ . In practice, we limit the number of poses we compare to in a given frame to the $n$ spatially nearest poses. This is just as accurate as comparing to everyone in the frame and bounds our runtime to $O(\delta n)$ . This gives us a set of match scores $\mathcal{M}$ , and we assign $p_{\emptyset}^{t,i}$ the track id corresponding to the maximum match score $m^{*} = \max (\mathcal{M})$ , where $id^{*} = m_{id}^{*}$ . Thus, we assign the tracking id to the pose, $p_{id*}^{t,i}$ .
|
| 66 |
+
|
| 67 |
+
# 3.2. Pose Entailment
|
| 68 |
+
|
| 69 |
+
To effectively solve the multi-person pose tracking problem, we need to understand how human poses move through time based on spatial joint configurations as well as in the presence of multiple persons and occluding objects. Hence, we need to learn if a pose in timestep $t$ , can be inferred from timestep $t - 1$ . Textual entailment provides us with a similar framework in the NLP domain where one needs to understand if one sentence can be implied from the next. More specifically, the textual entailment model classifies whether a premise sentence implies a hypothesis sentence in a sentence pair [9]. The typical approach to this problem consists of first projecting the pair of sentences to an embedding space and then feeding them through a neural network which outputs a binary classification for the sentence pair.
|
| 70 |
+
|
| 71 |
+
Hence, we propose the Pose Entailment problem. More formally, we seek to classify whether a pose in a timestep
|
| 72 |
+
|
| 73 |
+

|
| 74 |
+
Figure 3. Orange box: Visualizations to intuitively explain our tokenization. In the Position column, the matching poses are spatially closer together than the non-matching ones. This is because their spatial locations in the image are similar. The axis limit is 432 because the image has been downsampled to width * height = 432. In the following column, the matching contours are similar, since the poses are in similar orientations. The Segment axis in the last column represents the temporal distance of the pair. Green box: A series of transformers (Tx) compute self-attention, extracting the temporal relationship between the pair. Binary classification follows.
|
| 75 |
+
|
| 76 |
+
$p^{t - \delta}$ , i.e. the premise, and a pose in timestep $p^t$ , i.e. the hypothesis, are the same person. To solve this problem, instead of using visual feature based similarity that incurs large computational cost, we use the set of human keypoints, $\mathcal{K}$ , detected by our pose estimator. It is computationally efficient to use these as there are a limited number of them (in our case $|\mathcal{K}| = 15$ ), and they are not affected by unexpected visual variations such as lighting changes in the tracking step. In addition, as we show in the next section, keypoints are amenable to tokenization. Thus, during the tracking stage, we use only the keypoints estimated by the detector as our pose representation.
|
| 77 |
+
|
| 78 |
+
Tokenizing Pose Pairs The goal of tokenization is to transform pose information into a representation that facilitates learning spatio-temporal human pose relationships. To achieve this goal, for each pose token, we need to provide (i) the spatial location of each keypoint in the scene to allow the network to spatially correlate keypoints across frames, (ii) type information of each keypoint (i.e. head, shoulder etc.) to learn spatial joint relationships in each human pose, and finally (iii) the temporal location index for each keypoint within a temporal window $\delta$ , to learn temporal keypoint transitions. Hence, we use three different types of tokens for each keypoint as shown in Figure 3. There are 2 poses, and thus $2|\mathcal{K}|$ tokens of each type. Each token is linearly projected to an embedding, $E \in \mathbb{R}^{2|\mathcal{K}|,H}$ where $H$ is the transformer hidden size. Embeddings are a learned lookup table. We now describe the individual tokens in de
|
| 79 |
+
|
| 80 |
+
tail:
|
| 81 |
+
|
| 82 |
+
Position Token: The absolute spatial location of each keypoint is the Position token, $\rho$ , and its values fall in the range $[1, w^{\mathcal{F}} h^{\mathcal{F}}]$ . In practice, the absolute spatial location of a downsampled version of the original frame is used. This not only improves the efficiency of our method, but also makes it more accurate, as is discussed in 5.2. We give a general expression for the Position tokens of poses $p^t$ and $p^{t - \delta}$ , where $\rho_j^{p^t}$ corresponds to the Position token of the $j$ th keypoint of $p^t$ :
|
| 83 |
+
|
| 84 |
+
$$
|
| 85 |
+
\left\{\rho_ {1} ^ {p ^ {t}}, \rho_ {2} ^ {p ^ {t}}, \dots \rho_ {| \mathcal {K} |} ^ {p ^ {t}}, \rho_ {1} ^ {p ^ {t - \delta}}, \rho_ {2} ^ {p ^ {t - \delta}}, \dots \rho_ {| \mathcal {K} |} ^ {p ^ {t - \delta}} \right\} \tag {1}
|
| 86 |
+
$$
|
| 87 |
+
|
| 88 |
+
Type Token: The Type token corresponds to the unique type of the keypoint: e.g. the head, left shoulder, right ankle, etc... The Type keypoints fall in the range $[1,|\mathcal{K}|]$ . These add information about the orientation of the pose and are crucial for achieving high accuracy at low resolution, when keypoints have similar spatial locations. A general expression for the Type tokens of poses $p^t$ and $p^{t - \delta}$ is below, where $j^{p^t}$ corresponds to the Type token of the $j$ th keypoint of $p^t$ :
|
| 89 |
+
|
| 90 |
+
$$
|
| 91 |
+
\left\{1 ^ {p ^ {t}}, 2 ^ {p ^ {t}}, \dots \left| \mathcal {K} \right| ^ {p ^ {t}}, 1 ^ {p ^ {t - \delta}}, 2 ^ {p ^ {t - \delta}}, \dots \left| \mathcal {K} \right| ^ {p ^ {t - \delta}} \right\} \tag {2}
|
| 92 |
+
$$
|
| 93 |
+
|
| 94 |
+
Segment Token: The Segment token indicates the number of timesteps the pose is from the current one. The segment token is in range $[1, \delta]$ , where $\delta$ is a chosen constant. (We set $\delta$ to be 4.) This also allows our method to adapt
|
| 95 |
+
|
| 96 |
+
to irregular frame rates. Or, if a person is not detected in a frame, we can look back two timesteps, conditioning our model on temporal token value of 2 instead of 1.
|
| 97 |
+
|
| 98 |
+
$$
|
| 99 |
+
\left\{1 ^ {p ^ {t}}, 1 ^ {p ^ {t}}, \dots 1 ^ {p ^ {t}}, \delta^ {p ^ {t - \delta}}, \delta^ {p ^ {t - \delta}}, \dots \delta^ {p ^ {t - \delta}} \right\} \tag {3}
|
| 100 |
+
$$
|
| 101 |
+
|
| 102 |
+
After each token is embedded, we sum the embeddings, $E_{sum} = E_{Position} + E_{Type} + E_{Segment}$ , to combine the information from each class of token. This is fed to our Transformer Matching Network.
|
| 103 |
+
|
| 104 |
+
Transformer Matching Network: The goal of our network is to learn motion cues indicative of whether a pose pair matches. The self-attention mechanism of transformers allows us to accomplish this by learning which temporal relationships between the keypoints are representative of a match. Transformers compute scaled dot-product attention over a set of Queries $(Q)$ , Keys $(K)$ , and Values $(V)$ each of which is a linear projection of the input $E_{sum} \in \mathbb{R}^{2|\mathcal{K}|,H}$ . We compute the softmax attention with respect to every keypoint embedding in the pair, with the input to the softmax operation being of dimensions $[2|K|,2|K|]$ . In fact, we can generate heatmaps from the attention distribution over the pair's keypoints, as displayed in 5.3. In practice, we use multi-headed attention, which leads to the heads specializing, also visualized.
|
| 105 |
+
|
| 106 |
+
Additionally, we use an attention mask to account for keypoints which are not visible due to occlusion. This attention mask is implemented exactly as the attention mask in [49], resulting in no attention being paid to the keypoints which are not visible due to occlusion. The attention equation is as follows, and we detail each operation in a single transformer in Table 5 of the Supplement:
|
| 107 |
+
|
| 108 |
+
$$
|
| 109 |
+
\operatorname {A t t e n t i o n} (Q, K, V) = \operatorname {s o f t m a x} \left(\frac {Q K ^ {T}}{\sqrt {d _ {k}}}\right) V \tag {4}
|
| 110 |
+
$$
|
| 111 |
+
|
| 112 |
+
After computing self-attention through a series of stacked transformers, similar to BERT, we feed this representation to a Pooler, which "pools" the input, by selecting the first token in the sequence and then inputting that token into a learned linear projection. This is fed to another linear layer, functioning as a binary classifier, which outputs the likelihood two given poses match. We govern training with a binary cross entropy loss providing our network only with the supervision of whether the pose pair is a match. See Figure 3 for more details.
|
| 113 |
+
|
| 114 |
+
# 3.3. Improved Multi-Frame Pose Estimation
|
| 115 |
+
|
| 116 |
+
We now describe how we improve keypoint estimation. Top-down methods suffer from two primary classes of errors from the object detector: 1. Missed bounding boxes 2. Imperfect bounding boxes. We use the box detections from adjacent timesteps in addition to the one in the current
|
| 117 |
+
|
| 118 |
+
timestep to make pose predictions, thereby combating these issues. This is based on the intuition that the spatial location of each person does not change dramatically from frame to frame when the frame rate is relatively high, typical in most modern datasets and cameras. Thus, pasting a bounding box for the $ith$ person in frame, $\mathcal{F}^{t - 1}$ , $p^{t - 1,i}$ , in its same spatial location in frame $\mathcal{F}^t$ is a good approximation of the true bounding box for person $p^{t,i}$ . Bounding boxes are enlarged by a small factor to account for changes in spatial location from frame to frame. Previous approaches, such as [54], use standard non-maximal suppression (NMS) to choose which of these boxes to input into the estimator. Though this addresses the 1st issue of missed boxes, it does not fully address the second issue. NMS relies on the confidence score of the boxes. We make pose predictions for the box in the current frame and temporally adjacent boxes. Then we use object-keypoint similarity (OKS) to determine which of the poses should be kept. This is more accurate than using NMS because we use the confidence scores of the keypoints, not the bounding boxes. The steps of TOKs are enumerated below:
|
| 119 |
+
|
| 120 |
+
# Algorithm 1 Temporal OKS
|
| 121 |
+
|
| 122 |
+
Input: $p^{t - 1}, p^t, \mathcal{F}^t$
|
| 123 |
+
|
| 124 |
+
1. Retrieve bounding box, $B$ , enclosing $p^{t - 1}$ , and dilate by a factor, $\alpha$
|
| 125 |
+
2. Estimate a new pose, $p^t$ , in $\mathcal{F}^t$ from $B$
|
| 126 |
+
3. Use OKS to determine which pose to keep, $p^* = OKS(p'^t, p^t)$
|
| 127 |
+
|
| 128 |
+
Output: $p^*$
|
| 129 |
+
|
| 130 |
+
# 4. Experiments
|
| 131 |
+
|
| 132 |
+
# 4.1. The PoseTrack Dataset
|
| 133 |
+
|
| 134 |
+
The PoseTrack 2017 training, validation, and test sets consist of 250, 50, and 208 videos, respectively. Annotations for the test set are held out. We evaluate on the PoseTrack 17 Test set because the PoseTrack 18 Test set has yet to be released. We use the official evaluation server on the test set, which can be submitted to up to 4 times. [4, 1] We conduct the rest of comparisons on the PoseTrack ECCV 2018 Challenge Validation Set, a superset of PoseTrack 17 with 550 training, 74 validation, and 375 test videos [2].
|
| 135 |
+
|
| 136 |
+
Metrics Per-joint Average Precision (AP) is used to evaluate keypoint estimation based on the formulation in [6]. Multi-Object Tracking Accuracy (MOTA [7], [33]) scores tracking. It penalizes False Negatives (FN), False Positives (FP), and ID Switches (IDSW) under the following formulation for each keypoint $k^i$ , where $t$ is the current timestep. Our final MOTA is the average of all keypoints $k^i \in \mathcal{K}$ :
|
| 137 |
+
|
| 138 |
+
$$
|
| 139 |
+
1 - \frac {\sum_ {t} (F N _ {t} ^ {i} + F P _ {t} ^ {i} + I D S W _ {t} ^ {i})}{\sum_ {t} G T _ {t} ^ {i}}
|
| 140 |
+
$$
|
| 141 |
+
|
| 142 |
+
<table><tr><td rowspan="2">Tracking Method</td><td rowspan="2">Detection Method</td><td rowspan="2">AP ↑ Total</td><td colspan="8">% IDSW ↓</td><td rowspan="2">MOTA ↑ Total</td></tr><tr><td>Head</td><td>Shou</td><td>Elb</td><td>Wri</td><td>Hip</td><td>Knee</td><td>Ankl</td><td>Total</td></tr><tr><td>Pose Entailment</td><td></td><td></td><td>0.7</td><td>0.7</td><td>0.6</td><td>0.6</td><td>0.6</td><td>0.7</td><td>0.7</td><td>0.7</td><td>99.3</td></tr><tr><td>GCN</td><td>GT Boxes, GT Keypoints</td><td>100</td><td>1.4</td><td>1.4</td><td>1.4</td><td>1.5</td><td>1.4</td><td>1.6</td><td>1.6</td><td>1.5</td><td>98.5</td></tr><tr><td>Optical Flow</td><td></td><td></td><td>1.1</td><td>1.2</td><td>1.2</td><td>1.2</td><td>1.2</td><td>1.3</td><td>1.4</td><td>1.2</td><td>98.7</td></tr><tr><td>Pose Entailment</td><td></td><td></td><td>0.9</td><td>0.9</td><td>0.8</td><td>0.8</td><td>0.7</td><td>0.8</td><td>0.8</td><td>0.8</td><td>72.2</td></tr><tr><td>GCN</td><td>GT Boxes, Predicted Keypoints</td><td>86.7</td><td>1.6</td><td>1.6</td><td>1.6</td><td>1.6</td><td>1.3</td><td>1.5</td><td>1.4</td><td>1.5</td><td>71.6</td></tr><tr><td>Optical Flow</td><td></td><td></td><td>1.2</td><td>1.2</td><td>1.2</td><td>1.1</td><td>1.0</td><td>1.1</td><td>1.1</td><td>1.1</td><td>71.8</td></tr><tr><td>Pose Entailment</td><td></td><td></td><td>0.9</td><td>1.0</td><td>0.9</td><td>0.8</td><td>0.7</td><td>0.8</td><td>0.8</td><td>0.8</td><td>66.6</td></tr><tr><td>GCN</td><td>Predicted Boxes, Predicted Keypoints</td><td>81.6</td><td>1.7</td><td>1.7</td><td>1.7</td><td>1.7</td><td>1.4</td><td>1.5</td><td>1.4</td><td>1.6</td><td>65.9</td></tr><tr><td>Optical Flow</td><td></td><td></td><td>1.3</td><td>1.2</td><td>1.2</td><td>1.2</td><td>1.1</td><td>1.1</td><td>1.1</td><td>1.1</td><td>66.3</td></tr></table>
|
| 143 |
+
|
| 144 |
+
Our approach assigns track ids and estimates keypoints independently. This is also true of competing methods with MOTA scores closest to ours. In light of this, we use the same keypoint estimations to compare Pose Entailment to competing tracking methods in 4.2. This makes the IDSW the only component of the MOTA metric that changes, and we calculate $\% IDSW^{i} = \sum_{t} IDSW_{t}^{i} / \sum_{t} GT_{t}^{i}$ . In 4.3, we compare our estimation method to others without evaluating tracking. Finally, in 4.4, we compare our entire tracking pipeline to other pipelines.
|
| 145 |
+
|
| 146 |
+
# 4.2. Improving Tracking with Pose Entailment
|
| 147 |
+
|
| 148 |
+
We compare with the optical flow tracking method [54], and the Graph Convolutional Network [35] (GCN) as shown in Figure 4. We do not compare with IoU because, GCN and optical flow [35], [54] have shown to outperform it, nor do we compare to the network from [40] because it is trained in an end-to-end fashion. We follow [54] for Optical Flow and use the pre-trained GCN provided by [35]. IDSW is calculated with three sets of keypoints. Regardless of the keypoint AP, we find that KeyTrack's Pose Entailment maintains a consistent improvement over other methods. We incur approximately half as many IDSW as the GCN and $30\%$ less than Optical Flow.
|
| 149 |
+
|
| 150 |
+
Our improvement over GCN stems from the fact that it relies only on keypoint spatial locations. By using additional information beyond the spatial location of each keypoint, our model can make better inferences about the temporal relationship of poses. The optical flow CNNs are not specific to pose tracking and require manual tuning. For example, to scale the CNN's raw output, which is normalized from -1 to 1, to pixel flow offsets, a universal constant, given by the author of the original optical flow network (not [54]), must be applied. However, we found that this constant required adjustment. In contrast, our learned method requires no tuning during inference.
|
| 151 |
+
|
| 152 |
+
# 4.3. Improving Detection with TOKS
|
| 153 |
+
|
| 154 |
+
Table 2 shows offers a greater improvement in keypoint detection quality than other methods. In the absence of
|
| 155 |
+
|
| 156 |
+
Figure 4. Compares accuracy of tracking methods on the PoseTrack 18 Val set, given the same keypoints. GT stands for Ground Truth, "predicted" means a neural net is used. Lower % IDSW is better, higher MOTA is better. "Total" averages all joint scores.
|
| 157 |
+
|
| 158 |
+
<table><tr><td rowspan="2">Detection Method</td><td colspan="8">AP</td></tr><tr><td>Head</td><td>Shou</td><td>Elb</td><td>Wri</td><td>Hip</td><td>Knee</td><td>Ankl</td><td>Total</td></tr><tr><td>GT</td><td>90.2</td><td>91.4</td><td>88.7</td><td>83.6</td><td>81.4</td><td>86.1</td><td>83.7</td><td>86.7</td></tr><tr><td>Det.</td><td>68.8</td><td>72.8</td><td>73.1</td><td>68.4</td><td>68.0</td><td>72.4</td><td>69.8</td><td>70.4</td></tr><tr><td>Det. + Box Prop.</td><td>79.3</td><td>82.0</td><td>80.8</td><td>75.6</td><td>72.4</td><td>76.5</td><td>72.4</td><td>77.1</td></tr><tr><td>Det. + TOKS@0.3</td><td>83.6</td><td>86.6</td><td>84.9</td><td>78.9</td><td>76.4</td><td>80.2</td><td>76.2</td><td>81.1</td></tr><tr><td>Det. + TOKS@0.35 (ours)</td><td>84.1</td><td>87.2</td><td>85.3</td><td>79.2</td><td>77.1</td><td>80.6</td><td>76.5</td><td>81.6</td></tr><tr><td>Det. + TOKS@0.5</td><td>83.9</td><td>87.2</td><td>85.2</td><td>79.1</td><td>77.1</td><td>80.7</td><td>76.4</td><td>81.5</td></tr></table>
|
| 159 |
+
|
| 160 |
+
Table 2. Per-joint AP when the pose estimator is conditioned on different boxes. GT indicates ground truth boxes are used, and serves as an upper bound for accuracy. Det. indicates a detector was used to estimate boxes. @OKS* is the OKS threshold used.
|
| 161 |
+
|
| 162 |
+
boundong box improvement, the AP performance is $6.6\%$ lower, highlighting the issue of False Negatives. The further improvement from TOKs emphasizes the usefulness of estimating every pose. By using NMS, bounding box propagation methods miss the opportunity to use the confidence scores of the keypoints, which lead to better pose selection.
|
| 163 |
+
|
| 164 |
+
# 4.4. Tracking Pipeline Comparison to the SOTA
|
| 165 |
+
|
| 166 |
+
Now that we have analyzed the benefits of Pose Entailment and TOKs, we put them together and compare to other approaches. Figure 5 shows that we achieve the highest MOTA score. We improve over the original HRNet paper by 3.3 MOTA points on the Test set. [25], nearest our score on the 2018 Validation set, is much further away on the 2017 Test set. Additionally, our FPS is improved over all methods with similar MOTA scores, with many methods being offline due to their use of ensembles. (Frames per second (FPS) is calculated by diving the number of frames in the dataset by the runtime of the approach.) Moreover, our method outperforms all others in terms of AP, showing the benefits of TOKs. $\mathrm{AP}^T$ is also reported, which is the AP score after tracking post-processing has been applied. This post-processing is beneficial to the MOTA score, but lowers AP. See section A.3 for more details on this post-processing. As we have the highest AP, but not the highest $\mathrm{AP}^T$ it appears the effect of tracking post-processing varies from paper to paper. Only $\mathrm{AP}^T$ is given on the test set because each paper is given 4 submissions, so these are used to optimize MOTA, rather than AP.
|
| 167 |
+
|
| 168 |
+
PoseTrack 2018 ECCV Challenge Val Set
|
| 169 |
+
|
| 170 |
+
<table><tr><td>No.</td><td>Method</td><td>Extra Data</td><td>APT</td><td>AP</td><td>FPS</td><td>MOTA</td></tr><tr><td>1.</td><td>KeyTrack (ours)</td><td>X</td><td>74.3</td><td>81.6</td><td>1.0</td><td>66.6</td></tr><tr><td>2.</td><td>MIPAL [25]</td><td>X</td><td>74.6</td><td>-</td><td>-</td><td>65.7</td></tr><tr><td>3.</td><td>LightTrack (offline) [35]</td><td>X</td><td>71.2</td><td>77.3</td><td>E</td><td>64.9</td></tr><tr><td>4.</td><td>LightTrack (online) [35]</td><td>X</td><td>72.4</td><td>77.2</td><td>0.7</td><td>64.6</td></tr><tr><td>5.</td><td>Miracle [58]</td><td>✓</td><td>-</td><td>80.9</td><td>E</td><td>64.0</td></tr><tr><td>6.</td><td>OpenSVAI [36]</td><td>X</td><td>69.7</td><td>76.3</td><td>-</td><td>62.4</td></tr><tr><td>7.</td><td>STAF [38]</td><td>✓</td><td>70.4</td><td>-</td><td>3</td><td>60.9</td></tr><tr><td>8.</td><td>MDPN [20]</td><td>✓</td><td>71.7</td><td>75.0</td><td>E</td><td>50.6</td></tr></table>
|
| 171 |
+
|
| 172 |
+
Figure 5. Top scores on the PoseTrack leaderboards. E indicates an ensemble of detectors is used, and results in the method being offline. A check indicates external training data is used beyond COCO and PoseTrack. A “-” indicates the information has not been made publicly available. FPS calculations for JointFlow and FlowTrack are taken from [59]. HRNet FPS is approximated from FlowTrack since the methods are very similar. The AP column has the best AP score. $\mathbf{AP}^T$ is the AP score after tracking post-processing.
|
| 173 |
+
|
| 174 |
+
PoseTrack 2017 Test Set Leaderboard
|
| 175 |
+
|
| 176 |
+
<table><tr><td>No.</td><td>Method</td><td>Extra Data</td><td>APT</td><td>FPS</td><td>MOTA</td></tr><tr><td>1.</td><td>KeyTrack (ours)</td><td>X</td><td>74.0</td><td>1.0</td><td>61.2</td></tr><tr><td>2.</td><td>POINet [40]</td><td>X</td><td>72.5</td><td>-</td><td>58.4</td></tr><tr><td>3.</td><td>LightTrack [35]</td><td>X</td><td>66.7</td><td>E</td><td>58.0</td></tr><tr><td>4.</td><td>HRNet [45]</td><td>X</td><td>75.0</td><td>0.2</td><td>57.9</td></tr><tr><td>5.</td><td>FlowTrack [54]</td><td>X</td><td>74.6</td><td>0.2</td><td>57.8</td></tr><tr><td>6.</td><td>MIPAL [25]</td><td>X</td><td>68.8</td><td>-</td><td>54.5</td></tr><tr><td>7.</td><td>STAF [38]</td><td>✓</td><td>70.3</td><td>2</td><td>53.8</td></tr><tr><td>8.</td><td>JointFlow [15]</td><td>X</td><td>63.6</td><td>0.2</td><td>53.1</td></tr></table>
|
| 177 |
+
|
| 178 |
+

|
| 179 |
+
Figure 6. Qualitative results of KeyTrack on the PoseTrack PoseTrack 17 Test Set. Additional qualitative results are in the supplement.
|
| 180 |
+
|
| 181 |
+

|
| 182 |
+
|
| 183 |
+

|
| 184 |
+
|
| 185 |
+

|
| 186 |
+
|
| 187 |
+
Efficiency: Our tracking approach is efficient, not reliant on optical flow or RGB data. When processing an image at our optimal resolution, $24 \times 18$ , we reduce the GFLOPS required by optical flow, which processes images at full size, from 52.7 to 0.1. [35]'s GCN does not capture higher-order interactions over keypoints and can be more efficient than our network with local convolutions. However, this translates to a $\sim$ 1ms improvement in GPU runtime. In fact, our tracking pipeline demonstrates a $30\%$ improvement in end-to-end runtime over [35], shown in 4.4. We have the fastest FPS of Top-down approaches. Also, we do not rely on optical flow to improve bounding box propagation as [54, 45] do, instead we use TOKs. This contributes to our 5x FPS improvement over [54, 45]. Further details on the parameters and FLOPS of the GCN, Optical Flow Network, and our Transformer Matching Network are in Table 6 of the Supplement.
|
| 188 |
+
|
| 189 |
+
# 5. Analysis
|
| 190 |
+
|
| 191 |
+
# 5.1. Tracking Pipeline
|
| 192 |
+
|
| 193 |
+
Varying Tokenization Schemes and Transformer Hyper-parameters We examine the benefits of each embedding. As evident in Table 3, Segment embeddings are crucial because they enable the network to distinguish between the Poses being matched. Token embeddings give the network information about the orientation of a pose and help it interpret keypoints which are in close spatial proximity; i.e. keypoints that have the same or similar position embedding. We also train a model that uses the relative
|
| 194 |
+
|
| 195 |
+
<table><tr><td>Abs. Position</td><td>Type</td><td>Segment</td><td>Rel. Position</td><td>Match % Accuracy</td></tr><tr><td>✓</td><td>✓</td><td>X</td><td>X</td><td>72.6</td></tr><tr><td>✓</td><td>X</td><td>✓</td><td>X</td><td>90.0</td></tr><tr><td>✓</td><td>✓</td><td>✓</td><td>X</td><td>93.2 (ours)</td></tr><tr><td>X</td><td>✓</td><td>✓</td><td>✓</td><td>91.3</td></tr><tr><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td>92.0</td></tr></table>
|
| 196 |
+
|
| 197 |
+
Table 3. Match accuracies for various embedding schemes.
|
| 198 |
+
|
| 199 |
+
keypoint distance from the pose center rather than the absolute distance of the keypoint in the entire image. We find that match accuracy deteriorates with this embedding. This is likely because many people perform the same activity, such as running, in the PoseTrack dataset, leading to them having nearly identical relative pose positions. We vary the number of transformer blocks, the hidden size in the transformer block, and number of heads in Table 7. Decreasing the number of transformer blocks, the hidden size, and attention heads hurts performance.
|
| 200 |
+
|
| 201 |
+
Number of Timesteps and Other Factors We find that reducing the number of timesteps adversely effects the MOTA score. It drops up to 0.3 points when using only a single timestep because we are less robust to detection errors. Also, in replacement of our greedy algorithm, we experimented with the Hungarian algorithm used in [19]. This algorithm is effective with ground truth information, but is not accurate when using detected poses.
|
| 202 |
+
|
| 203 |
+
<table><tr><td>Num Tx</td><td>Hidden Size</td><td>Int. Size</td><td>Num Heads</td><td>Parameters (M)</td><td>% IDSW</td></tr><tr><td>2</td><td>128</td><td>512</td><td>4</td><td>0.40</td><td>1.0</td></tr><tr><td>4</td><td>128</td><td>512</td><td>4</td><td>0.43</td><td>0.8</td></tr><tr><td>6</td><td>128</td><td>512</td><td>4</td><td>1.26</td><td>1.1</td></tr><tr><td>4</td><td>64</td><td>256</td><td>4</td><td>0.23</td><td>0.9</td></tr><tr><td>4</td><td>128</td><td>512</td><td>4</td><td>0.43</td><td>0.8</td></tr><tr><td>4</td><td>256</td><td>1024</td><td>4</td><td>3.31</td><td>1.1</td></tr><tr><td>4</td><td>128</td><td>128</td><td>4</td><td>0.43</td><td>0.8</td></tr><tr><td>4</td><td>128</td><td>512</td><td>4</td><td>0.86</td><td>0.8</td></tr><tr><td>4</td><td>128</td><td>128</td><td>2</td><td>0.43</td><td>0.9</td></tr><tr><td>4</td><td>128</td><td>128</td><td>4</td><td>0.43</td><td>0.8</td></tr><tr><td>4</td><td>128</td><td>128</td><td>6</td><td>0.43</td><td>0.8</td></tr></table>
|
| 204 |
+
|
| 205 |
+

|
| 206 |
+
Figure 7. Left: Transformer network hyper-parameters are varied. Right: A plot of IDSW rate vs. image resolution. The table on the left shows the input to each method, the conv+visual input is blurry because images are downsampled.
|
| 207 |
+
|
| 208 |
+

|
| 209 |
+
Visual Features
|
| 210 |
+
t=2 t=1
|
| 211 |
+
|
| 212 |
+

|
| 213 |
+
Tx Head 6-0
|
| 214 |
+
|
| 215 |
+

|
| 216 |
+
Tx Head 6-3
|
| 217 |
+
|
| 218 |
+

|
| 219 |
+
t=3 t=2
|
| 220 |
+
|
| 221 |
+

|
| 222 |
+
|
| 223 |
+

|
| 224 |
+
|
| 225 |
+

|
| 226 |
+
t=2 t=1
|
| 227 |
+
|
| 228 |
+

|
| 229 |
+
|
| 230 |
+

|
| 231 |
+
|
| 232 |
+

|
| 233 |
+
t=3 t=2
|
| 234 |
+
|
| 235 |
+

|
| 236 |
+
|
| 237 |
+

|
| 238 |
+
Figure 8. Attention heatmaps from two of our network's attention heads are shown. These are the 0th, and 3rd heads from our final transformer. The two pairs above the dotted line are a matching pair, while the pair below the dotted line are not (and are also from separate videos). $t$ is the frame timestep.
|
| 239 |
+
|
| 240 |
+
# 5.2. Comparing Self-Attention to Convolutions
|
| 241 |
+
|
| 242 |
+
We compare transformers and CNNs by replacing our Transformer Matching Network with two convolution-based methods. One takes visual features from bounding box pose pairs as input while the other takes only keypoints as input, where each unique keypoint is colored via a linear interpolation, a visual version of our Type tokens. Both approaches use identical CNNs, sharing an architecture inspired by VGG [43], and have approximately $4\mathrm{x}$ more parameters than our transformer-based model because this was required for stable training. See A.4 of the Supplement for details.
|
| 243 |
+
|
| 244 |
+
Transformers outperform CNNs for the tracking task, as shown in Figure 7. However, we find two areas where CNNs can be competitive. First, at higher resolutions, transformers often need a large number of parameters to match
|
| 245 |
+
|
| 246 |
+
CNN's performance. In NLP, when using large vocabularies, a similar behavior is observed where transformers need multiple layers to achieve good performance. Second, we also find that convolutions optimize more quickly than the transformers, reaching their lowest number of ID Switches within the first 2 epochs of training. Intuitively, CNNs are more easily able to take advantage of spatial proximity. The transformers receive spatial information via the position embeddings, which are 1D linear projections of 2D locations. This can be improved by using positional embedding schemes that better preserve spatial information [18].
|
| 247 |
+
|
| 248 |
+
In summary, CNNs are accurate at high resolutions given its useful properties such as translation invariance and location invariance. However, there is an extra computational cost of using them. The extra information, beyond the spatial location of keypoints, included in our keypoint embeddings, coupled with the transformer's ability to model higher-order interactions allows it to function surprisingly well at very low resolutions. Thus, the advantage of CNNs is diminished and our transformer-based network outperforms them in the low resolution case.
|
| 249 |
+
|
| 250 |
+
# 5.3. Visualizing Attention Heatmaps
|
| 251 |
+
|
| 252 |
+
We visualize our network's attention heatmaps in Fig. 8. When our network classifies a pair as non-matching, its attention is heavily placed on one of the poses over the other. Also, we find it interesting that one of our attention heads primarily places its attention on keypoints near the person's head. This specialization suggests different attention heads are attuned to specific keypoint motion cues.
|
| 253 |
+
|
| 254 |
+
# 6. Conclusion
|
| 255 |
+
|
| 256 |
+
In summary, we present an efficient Multi-person Pose Tracking method. Our proposed Pose Entailment method achieves SOTA performance on PoseTrack datasets without using RGB information in the tracking step. KeyTrack also benefits from improved keypoint estimates using TOKs, which outperforms bounding box propagation methods. Finally, we demonstrate how to tokenize and embed human pose information in the transformer architecture that has applications to tasks such as pose-based action recognition.
|
| 257 |
+
|
| 258 |
+
# References
|
| 259 |
+
|
| 260 |
+
[1]Posetrack leaderboard,2017 test set,2017.2,5
|
| 261 |
+
[2] Posetrack challenge - eccv 2018, 2018. 5
|
| 262 |
+
[3] Abdulrahman Alarifi, AbdulMalik Al-Salman, Mansour Alsaleh, Ahmad Alnafessah, Suheer Al-Hadhrami, Mai A Al-Ammar, and Hend S Al-Khalifa. Ultra wideband indoor positioning technologies: Analysis and recent advances. Sensors, 16(5):707, 2016. 2
|
| 263 |
+
[4] Mykhaylo Andriluka, Umar Iqbal, Eldar Insafutdinov, Leonid Pishchulin, Anton Milan, Juergen Gall, and Bernt Schiele. Posetrack: A benchmark for human pose estimation and tracking. In CVPR, 2018. 2, 5
|
| 264 |
+
[5] Mykhaylo Andriluka, Stefan Roth, and Bernt Schiele. Pictorial structures revisited: People detection and articulated pose estimation. In 2009 IEEE conference on computer vision and pattern recognition, pages 1014-1021. IEEE, 2009. 2
|
| 265 |
+
[6] Mykhaylo Andriluka1, Leonid Pishchulin, Peter Gehler, and Bernt Schiele. 2d human pose estimation - mpii human pose dataset. In CVPR, 2014. 5
|
| 266 |
+
[7] Keni Bernstein and Rainer Stiefelhagen. Evaluating multiple object tracking performance: the clear mot metrics. Journal on Image and Video Processing, 2008;1, 2008. 5
|
| 267 |
+
[8] Gedas Bertasius, Christoph Feichtenhofer, Du Tran, Jianbo Shi, and Lorenzo Torresani. Learning temporal pose estimation from sparsely-labeled videos. arXiv preprint arXiv:1906.04016, 2019. 2
|
| 268 |
+
[9] Samuel R Bowman, Gabor Angeli, Christopher Potts, and Christopher D Manning. A large annotated corpus for learning natural language inference. arXiv preprint arXiv:1508.05326, 2015. 3
|
| 269 |
+
[10] Yilun Chen, Zhicheng Wang, Yuxiang Peng, Zhiqiang Zhang, Gang Yu, and Jian Sun. Cascaded pyramid network for multi-person pose estimation. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 7103-7112, 2018. 2
|
| 270 |
+
[11] Bo Dai, Yuqi Zhang, and Dahua Lin. Detecting visual relationships with deep relational networks. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, 2017. 3
|
| 271 |
+
[12] Matthias Dantone, Juergen Gall, Christian Leistner, and Luc Van Gool. Human pose estimation using body parts dependent joint regressors. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 3041-3048, 2013. 2
|
| 272 |
+
[13] Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. BERT: pre-training of deep bidirectional transformers for language understanding. CoRR, abs/1810.04805, 2018. 3
|
| 273 |
+
[14] Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. Bert: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805, 2018. 2
|
| 274 |
+
[15] Andreas Doering, Umar Iqbal, and Juergen Gall. Joint flow: Temporal flow fields for multi person tracking, 2018. 7
|
| 275 |
+
|
| 276 |
+
[16] Pedro F Felzenszwalb and Daniel P Huttenlocher. Pictorial structures for object recognition. International journal of computer vision, 61(1):55-79, 2005. 2
|
| 277 |
+
[17] Mihai Fieraru, Anna Khoreva, Leonid Pishchulin, and Bernt Schiele. Learning to refine human pose estimation. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition Workshops, pages 205-214, 2018. 2
|
| 278 |
+
[18] Rohit Girdhar, Joao Carreira, Carl Doersch, and Andrew Zisserman. Video action transformer network. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 244–253, 2019. 3, 8
|
| 279 |
+
[19] Rohit Girdhar, Georgia Gkioxari, Lorenzo Torresani, Manohar Paluri, and Du Tran. Detect-and-track: Efficient pose estimation in videos. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 350-359, 2018. 2, 3, 7
|
| 280 |
+
[20] Hengkai Guo, Tang Tang, Guozhong Luo, Riwei Chen, Yongchen Lu, and Linfu Wen. Multi-domain pose network for multi-person pose estimation and tracking. Computer Vision - ECCV 2018 Workshops, page 209-216, 2019. 2, 3, 7
|
| 281 |
+
[21] Kaiming He, Georgia Gkioxari, Piotr Dólar, and Ross Girshick. Mask r-cnn. In Proceedings of the IEEE international conference on computer vision, pages 2961-2969, 2017. 2
|
| 282 |
+
[22] Ronghang Hu, Marcus Rohrbach, Jacob Andreas, Trevor Darrell, and Kate Saenko. Modeling relationships in referential expressions with compositional modular networks. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, 2016. 3
|
| 283 |
+
[23] Hao Huang, Luowei Zhou, Wei Zhang, Jason J Corso, and Chenliang Xu. Dynamic graph modules for modeling object-object interactions in activity recognition: Supplementary material. BMVC, 2019. 3
|
| 284 |
+
[24] Shaoli Huang, Mingming Gong, and Dacheng Tao. A coarse-fine network for keypoint localization. In Proceedings of the IEEE International Conference on Computer Vision, pages 3028-3037, 2017. 2
|
| 285 |
+
[25] Jihye Hwang, Jieun Lee, Sungheon Park, and Nojun Kwak. Pose estimator and tracker using temporal flow maps for limbs, 2019. 2, 6, 7
|
| 286 |
+
[26] Eldar Insafutdinov, Mykhaylo Andriluka, Leonid Pishchulin, Siyu Tang, Evgeny Levinkov, Bjoern Andres, and Bernt Schiele. Artrack: Articulated multi-person tracking in the wild. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 6457-6465, 2017. 2, 3
|
| 287 |
+
[27] Umar Iqbal, Anton Milan, and Juergen Gall. Posetrack: Joint multi-person pose estimation and tracking. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 2011-2020, 2017. 2, 3
|
| 288 |
+
[28] Sheng Jin, Wentao Liu, Wanli Ouyang, and Chen Qian. Multi-person articulated tracking with spatial and temporal embeddings. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 5664-5673, 2019. 2
|
| 289 |
+
[29] Sheng Jin, Xujie Ma, Zhipeng Han, Yue Wu, Wei Yang, Wentao Liu, Chen Qian, and Wanli Ouyang. Towards multi-
|
| 290 |
+
|
| 291 |
+
person pose tracking: Bottom-up and top-down methods. In ICCV PoseTrack Workshop, volume 2, page 7, 2017. 3
|
| 292 |
+
[30] Liunian Harold Li, Mark Yatskar, Da Yin, Cho-Jui Hsieh, and Kai-Wei Chang. Visualbert: A simple and perform-. mant baseline for vision and language. arXiv preprint arXiv:1908.03557, 2019. 3
|
| 293 |
+
[31] Jiasen Lu, Dhruv Batra, Devi Parikh, and Stefan Lee. Vilbert: Pretraining task-agnostic visiolinguistic representations for vision-and-language tasks. arXiv preprint arXiv:1908.02265, 2019. 3
|
| 294 |
+
[32] Chih-Yao Ma, Asim Kadav, Iain Melvin, Zsolt Kira, Ghassan AlRegib, and Hans Peter Graf. Attend and interact: Higher-order object interactions for video understanding. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 6790-6800, 2018. 3
|
| 295 |
+
[33] Anton Milan, Laura Leal-Taixe, Ian D. Reid, Stefan Roth, and Konrad Schindler. MOT16: A benchmark for multi-object tracking. CoRR, abs/1603.00831, 2016. 5
|
| 296 |
+
[34] Gyeongsik Moon, Ju Yong Chang, and Kyoung Mu Lee. Posefix: Model-agnostic general human pose refinement network. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 7773–7781, 2019. 2
|
| 297 |
+
[35] Guanghan Ning and Heng Huang. Lighttrack: A generic framework for online top-down human pose tracking. arXiv preprint arXiv:1905.02822, 2019. 1, 2, 6, 7
|
| 298 |
+
[36] Guanghan Ning, Ping Liu, Xiaochuan Fan, and Chi Zhang. A top-down approach to articulated human pose estimation and tracking. Computer Vision - ECCV 2018 Workshops, page 227-234, 2019. 7
|
| 299 |
+
[37] George Papandreou, Tyler Zhu, Nori Kanazawa, Alexander Toshev, Jonathan Tompson, Chris Bregler, and Kevin Murphy. Towards accurate multi-person pose estimation in the wild. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 4903-4911, 2017. 2
|
| 300 |
+
[38] Yaadhav Raaj, Haroon Idrees, Gines Hidalgo, and Yaser Sheikh. Efficient online multi-person 2d pose tracking with recurrent spatio-temporal affinity fields. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 4620-4628, 2019. 2, 7
|
| 301 |
+
[39] Prajit Ramachandran, Niki Parmar, Ashish Vaswani, Irwan Bello, Anselm Levskaya, and Jonathon Shlens. Standalone self-attention in vision models. arXiv preprint arXiv:1906.05909, 2019. 3
|
| 302 |
+
[40] Weijian Ruan, Wu Liu, Qian Bao, Jun Chen, Yuhao Cheng, and Tao Mei. Pointet: Pose-guided ovonic insight network for multi-person pose tracking. In Proceedings of the 27th ACM International Conference on Multimedia, MM '19, pages 284-292, New York, NY, USA, 2019. ACM. 2, 6, 7
|
| 303 |
+
[41] Adam Santoro, David Raposo, David GT Barrett, Mateusz Malinowski, Razvan Pascanu, Peter Battaglia, and Timothy Lillicrap. A simple neural network module for relational reasoning. In Advances in Neural Information Processing Systems, 2017. 3
|
| 304 |
+
[42] Leonid Sigal and Michael J Black. Measure locally, reason globally: Occlusion-sensitive articulated pose estimation. In 2006 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'06), volume 2, pages 2041-2048. IEEE, 2006. 2
|
| 305 |
+
|
| 306 |
+
[43] Karen Simonyan and Andrew Zisserman. Very deep convolutional networks for large-scale image recognition, 2014. 8
|
| 307 |
+
[44] Chen Sun, Fabien Baradel, Kevin Murphy, and Cordelia Schmid. Contrastive bidirectional transformer for temporal representation learning. arXiv preprint arXiv:1906.05743, 2019. 3
|
| 308 |
+
[45] Ke Sun, Bin Xiao, Dong Liu, and Jingdong Wang. Deep high-resolution representation learning for human pose estimation. CoRR, abs/1902.09212, 2019. 1, 2, 3, 7
|
| 309 |
+
[46] Hao Tan and Mohit Bansal. Lxmert: Learning cross-modality encoder representations from transformers. arXiv preprint arXiv:1908.07490, 2019. 3
|
| 310 |
+
[47] Jonathan Tompson, Ross Goroshin, Arjun Jain, Yann LeCun, and Christoph Bregler. Efficient object localization using convolutional networks. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 648-656, 2015. 1, 2
|
| 311 |
+
[48] Alexander Toshev and Christian Szegedy. Deeppose: Human pose estimation via deep neural networks. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 1653-1660, 2014. 1, 2
|
| 312 |
+
[49] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need, 2017. 1, 5
|
| 313 |
+
[50] Xiaolong Wang, Allan Jabri, and Alexei A Efros. Learning correspondence from the cycle-consistency of time. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 2566-2576, 2019. 2
|
| 314 |
+
[51] Yang Wang and Greg Mori. Multiple tree models for occlusion and spatial constraints in human pose estimation. In European Conference on Computer Vision, pages 710-724. Springer, 2008. 2
|
| 315 |
+
[52] Shih-En Wei, Varun Ramakrishna, Takeo Kanade, and Yaser Sheikh. Convolutional pose machines. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 4724–4732, 2016. 1, 2
|
| 316 |
+
[53] Fangting Xia, Peng Wang, Xianjie Chen, and Alan L Yuille. Joint multi-person pose estimation and semantic part segmentation. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 6769-6778, 2017. 2
|
| 317 |
+
[54] Bin Xiao, Haiping Wu, and Yichen Wei. Simple baselines for human pose estimation and tracking. In European Conference on Computer Vision (ECCV), 2018. 5, 6, 7
|
| 318 |
+
[55] Yuliang Xiu, Jiefeng Li, Haoyu Wang, Yinghong Fang, and Cewu Lu. Pose flow: Efficient online pose tracking. arXiv preprint arXiv:1802.00977, 2018.3
|
| 319 |
+
[56] Jiarui Xu, Yue Cao, Zheng Zhang, and Han Hu. Spatial-temporal relation networks for multi-object tracking. arXiv preprint arXiv:1904.11489, 2019. 3
|
| 320 |
+
[57] Wei Yang, Shuang Li, Wanli Ouyang, Hongsheng Li, and Xiaogang Wang. Learning feature pyramids for human pose estimation. In Proceedings of the IEEE International Conference on Computer Vision, pages 1281-1290, 2017. 1, 2
|
| 321 |
+
[58] Dongdong Yu, Kai Su, Jia Sun, and Changhu Wang. Multi-person pose estimation for pose tracking with enhanced cascaded pyramid network. In European Conference on Computer Vision, pages 221-226. Springer, 2018. 7
|
| 322 |
+
|
| 323 |
+
[59] Jiabin Zhang, Zheng Zhu, Wei Zou, Peng Li, Yanwei Li, Hu Su, and Guan Huang. Fastpose: Towards real-time pose estimation and tracking via scale-normalized multi-task networks, 2019. 7
|
| 324 |
+
[60] Luowei Zhou, Hamid Palangi, Lei Zhang, Houdong Hu, Jason J Corso, and Jianfeng Gao. Unified vision-language pre-training for image captioning and vqa. arXiv preprint arXiv:1909.11059, 2019. 3
|
| 325 |
+
[61] Luowei Zhou, Yingbo Zhou, Jason J Corso, Richard Socher, and Caiming Xiong. End-to-end dense video captioning with masked transformer. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 8739-8748, 2018. 3
|
15keypointsisallyouneed/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:316cd93505affe87eee07716c0e57176579d2f17ebbc0be5650e80b8850ceb12
|
| 3 |
+
size 476691
|
15keypointsisallyouneed/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0e33a40d28f8c218e2c0a1663c0a1dced37d54a96d1204958993e08fa59cc45f
|
| 3 |
+
size 407455
|
3dhumanmeshregressionwithdensecorrespondence/5ce609af-653e-4f2d-bb43-2bcd175021d4_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:546cd698d50dad4b75e54ecb356a3e7781ba9b45418a26770e6ee7830c7ac33d
|
| 3 |
+
size 77599
|
3dhumanmeshregressionwithdensecorrespondence/5ce609af-653e-4f2d-bb43-2bcd175021d4_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:81c2d7d663f5d903f72f29bee0cdaaaa3de06dcd30ebc32ffafbaa4f1e8e61f1
|
| 3 |
+
size 95607
|
3dhumanmeshregressionwithdensecorrespondence/5ce609af-653e-4f2d-bb43-2bcd175021d4_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:371f0ab1db0b974604e55690e0e7c652f828bc87b87f060f8ae97a9e13c320aa
|
| 3 |
+
size 1909257
|
3dhumanmeshregressionwithdensecorrespondence/full.md
ADDED
|
@@ -0,0 +1,373 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# 3D Human Mesh Regression with Dense Correspondence
|
| 2 |
+
|
| 3 |
+
Wang Zeng $^{1}$ , Wanli Ouyang $^{2}$ , Ping Luo $^{3}$ , Wentao Liu $^{4}$ , and Xiaogang Wang $^{1,4}$
|
| 4 |
+
|
| 5 |
+
<sup>1</sup>The Chinese University of Hong Kong <sup>2</sup>The University of Sydney <sup>3</sup>The University of Hong Kong <sup>4</sup>SenseTime Research {zengwang@link, xgwang@ee}.cuhk.edu.hk, wanli.ouyang@sydney.edu.au, pluo@cs.hku.hk, liuwentao@sensetime.com
|
| 6 |
+
|
| 7 |
+
# Abstract
|
| 8 |
+
|
| 9 |
+
Estimating 3D mesh of the human body from a single 2D image is an important task with many applications such as augmented reality and Human-Robot interaction. However, prior works reconstructed 3D mesh from global image feature extracted by using convolutional neural network (CNN), where the dense correspondences between the mesh surface and the image pixels are missing, leading to suboptimal solution. This paper proposes a model-free 3D human mesh estimation framework, named DecoMR, which explicitly establishes the dense correspondence between the mesh and the local image features in the UV space (i.e. a 2D space used for texture mapping of 3D mesh). DecoMR first predicts pixel-to-surface dense correspondence map (i.e., IUV image), with which we transfer local features from the image space to the UV space. Then the transferred local image features are processed in the UV space to regress a location map, which is well aligned with transferred features. Finally we reconstruct 3D human mesh from the regressed location map with a predefined mapping function. We also observe that the existing discontinuous UV map are unfriendly to the learning of network. Therefore, we propose a novel UV map that maintains most of the neighboring relations on the original mesh surface. Experiments demonstrate that our proposed local feature alignment and continuous UV map outperforms existing 3D mesh based methods on multiple public benchmarks. Code will be made available at https://github.com/zengwang430521/DecoMR.
|
| 10 |
+
|
| 11 |
+
# 1. Introduction
|
| 12 |
+
|
| 13 |
+
Estimation of the full human body pose and shape from a monocular image is a fundamental task for various applications such as human action recognition [12, 35], VR/AR [11] and video editing [10]. It is challenging mostly due to the inherent depth ambiguity and the difficulty to
|
| 14 |
+
|
| 15 |
+

|
| 16 |
+
Figure 1. Prior methods (e.g., SPIN [20] and CMR [21]) usually reconstruct 3D meshes of human body from the global image feature vector extracted by neural networks, where the dense correspondences between the mesh surface and the image pixels are missing, leading to suboptimal results (top). Our DecoMR framework explicitly establishes such correspondence in the feature space with the aid of a novel continuous UV map, which results in better results in mesh details (bottom).
|
| 17 |
+
|
| 18 |
+
obtain the ground-truth 3D human body data. There are several popular representations for 3D objects in literature, e.g., point clouds, 3D voxels and 3D meshes. Because of its compatibility with existing computer graphic engines and the efficiency to represent object surface in details with reasonable storage, 3D mesh representation has been widely adopted for 3D human body reconstruction [18, 4, 20, 8, 27, 38, 11, 26, 25, 37, 21, 39].
|
| 19 |
+
|
| 20 |
+
However, unlike 3D voxel representation, the dense correspondence between the template human mesh surface and the image pixels is missing, while this dense correspondence between the input and the output has been proven crucial for various tasks [24, 39]. Due to this limitation, most existing 3D mesh based methods, either model-based [18, 26, 25, 20] or model-free [21], have to ignore the correspondence between the mesh representation and pixel representation. And they have to estimate the human meshes based on either global image feature [18, 21, 20], or hierarchical projection and refinement [39], which is time consuming and sensitive to initial estimation.
|
| 21 |
+
|
| 22 |
+
To utilize the 3D mesh representation without losing
|
| 23 |
+
|
| 24 |
+
the correspondence between the mesh space and the image space, we propose a 3D human mesh estimation framework that explicitly establishes the dense correspondence between the output 3D mesh and the input image in the UV space.
|
| 25 |
+
|
| 26 |
+
Representing output mesh by a new UV map: Every point on the mesh surface is represented by its coordinates on the continuous UV map. Therefore, the 3D mesh can be presented as a location map in the UV space, of which the pixel values are the 3D coordinates of its corresponding point on the mesh surface, as shown in Figure 1. Instead of using SMPL default UV map, we construct a new continuous UV map that maintains more neighboring relations of the original mesh surface, by parameterizing the whole mesh surface into a single part on the UV plane, as shown in Figure 1.
|
| 27 |
+
|
| 28 |
+
Mapping image features to the UV space: To map the image features to the continuous UV map space, we first use a network that takes a monocular image as input for predicting an IUV image [2], which assign each pixel to a specific body part location. Then the local image features from the decoder are transferred to the UV space with the guidance of predicted IUV image to construct the transferred feature maps that are well aligned with the corresponding mesh area.
|
| 29 |
+
|
| 30 |
+
Given the transferred local features, we use both the local features and the global feature to estimate the location map in the UV space, which is further used to reconstruct the 3D human body mesh with the predefined UV mapping function. Since our UV map is continuous and maintains the neighboring relationships among body parts, details between body parts can be well preserved when the local features are transferred.
|
| 31 |
+
|
| 32 |
+
In summary, our contributions are twofold:
|
| 33 |
+
|
| 34 |
+
- We propose a novel UV map that maintains most of the neighboring relations on the original mesh surface.
|
| 35 |
+
- We explicitly establish the dense correspondence between the output 3D mesh and the input image by the transferred local image features.
|
| 36 |
+
|
| 37 |
+
We extensively evaluate our methods on multiple widely used benchmarks for 3D human body reconstruction. Our method achieves state-of-the-art performance on both 3D human body mesh reconstruction and 3D human body pose estimation.
|
| 38 |
+
|
| 39 |
+
# 2. Related Work
|
| 40 |
+
|
| 41 |
+
# 2.1. Optimization-based methods
|
| 42 |
+
|
| 43 |
+
Pioneer works solve the 3D human body reconstruction by optimizing parameters of an predefined 3D human mesh models, e.g., SCAPE [3] and SMPL [23], with respect to the ground-truth body landmark locations [8], or employing
|
| 44 |
+
|
| 45 |
+
a 2D keypoints estimation network [4]. To improve the precision, extra landmarks are used in [22]. Recent work [38] enables multiple persons body reconstruction by incorporating human semantic part segmentation clues, scene and temporal constrains.
|
| 46 |
+
|
| 47 |
+
# 2.2. Learning-based methods
|
| 48 |
+
|
| 49 |
+
Model-based methods: Directly reconstruction of the 3D human body from a single image is a relatively hard problem. Therefore, many methods incorporate a parameterized 3D human model and change the problem into the model parameter regression. For example, HMR [18] regresses the SMPL parameters directly from RGB image. In order to mitigate the lack of robustness caused by the inadequacy of in-the-wild training data, some approaches employ intermediate representations, such as 2D joint heatmaps and silhouette [26], semantic segmentation map [25] or IUV image [36]. Recently, SPIN [20] incorporates 3D human model parameter optimization into network training process by supervising network with optimization result, and achieves the state-of-the-art results among model-based 3D human body estimation approaches.
|
| 50 |
+
|
| 51 |
+
Compared with optimization-based methods, model parameter regression methods are more computationally efficient. While these methods can make use of the prior knowledge embedded in 3D human model, and tend to reconstruct more biologically plausible human bodies compared with model-free methods, the representation capability is also limited by the parameter space with these predefined human models. In addition, as stated in [21], 3D human model parameter space might not be so friendly to the learning of network. On the contrary, our framework does not regress model parameters. Instead, it directly outputs 3D coordinates of each mesh vertex.
|
| 52 |
+
|
| 53 |
+
Model-free methods: Some methods do not rely on human models and regress 3D human body representation directly from image. BodyNet [33] estimates volumetric representation of 3D human with a Voxel-CNN. A recent work [6] estimates visible and hidden depth maps, and combines them to form a point cloud of human. Voxel and point cloud based representations are flexible and can represent objects with different topology. However, the capability of reconstructing surface details is limited by the storage cost.
|
| 54 |
+
|
| 55 |
+
CMR [21] uses a Graph-CNN to directly regress 3D coordinates of vertices from image features. Densebody [37] estimates vertex location in the form of UV-position map. A recent work [28] represents the 3D shapes using 2D geometry images, which can be regarded as a special kind of UV-position map. These methods do not use any human model. However, they still lack correspondence between human mesh and image and estimate the whole surface only relying on global image feature. On the contrary, our method can employ local feature for the reconstruction
|
| 56 |
+
|
| 57 |
+

|
| 58 |
+
Figure 2. Overview of our framework. Given an input image, an IUV map is first predicted by the correspondence net. Then local image features are transferred to the UV space. Location net takes transferred local features, expanded global feature and reference location map as input, and regresses a location map. Finally, 3D mesh is reconstructed from the location map.
|
| 59 |
+
|
| 60 |
+
of corresponding surface area.
|
| 61 |
+
|
| 62 |
+
The efficacy of the UV space representation has been demonstrated in recent work Tex2Shape [1], where the 3D human shape is estimated from the texture map which is obtained by transferring images pixels according to the IUV image estimated by DensePose [2]. We also use the IUV image to guide the human mesh estimation. However, in [1], the UV transfer is used to preprocess the raw image and is independent from the model learning, while we incorporate the UV transfer into our network to enable the end-to-end learning. We observe the efficacy of learning the transferred features end-to-end, which has also been proved by prior works, e.g., Spatial Transformer Networks [15] and Deformable ConvNets [5].
|
| 63 |
+
|
| 64 |
+
Very recently, HMD [39] refines initial estimated human mesh by hierarchical projection and mesh deformation. PIFu [30] reconstructs 3D human as implicit function. HMD and PIFu are able to utilize local image features to achieve impressive details in the reconstruction results. However, HMD is computationally intensive and sensitive to the initial estimation, while implicit function lacks the semantic information of human body. In contrast, we estimate the pixel-to-surface dense correspondence from images directly, which is computationally efficient and more robust, and the location map maintains the semantic information of human body.
|
| 65 |
+
|
| 66 |
+
# 3. Our Method
|
| 67 |
+
|
| 68 |
+
Overview. As shown in Figure 2, our framework DecoMR consists of two components, including a dense correspondence estimation network (CNet), which preforms in the image space, as well as a localization network (LNet), which performs on a new continuous UV map space. The
|
| 69 |
+
|
| 70 |
+
CNet has an encoder-decoder architecture to estimate an IUV image. It also extracts local image features $\mathcal{F}_{im}$ , and then uses the estimated IUV image for transferring the image features $\mathcal{F}_{im}$ to the transferred local features $\mathcal{F}_{UV}$ in the UV space. LNet takes the above transferred local features $\mathcal{F}_{UV}$ as input, and regresses a location map $X$ , whose pixel value is the 3D coordinates of the corresponding points on the mesh surface. Finally, the 3D human mesh $V$ is reconstructed from the above location map by using a predefined UV mapping function. As a result, the location map and the transferred feature map are well aligned in the UV space, thus leading to dense correspondence between the output 3D mesh and the input image.
|
| 71 |
+
|
| 72 |
+
Although the SMPL UV map [23] is widely used in the literature [37, 1, 7], it loses the neighboring relationships between different body parts as shown in Figure 3 (a), which is crucial for network learning as stated in [21]. Therefore, we design a new UV map that is able to maintain more neighboring relationships on the original mesh surface as shown in Figure 3 (b).
|
| 73 |
+
|
| 74 |
+
The overall objective function of DecoMR is
|
| 75 |
+
|
| 76 |
+
$$
|
| 77 |
+
\mathcal {L} = \mathcal {L} _ {I U V} + \mathcal {L} _ {L o c} + \lambda_ {c o n} \mathcal {L} _ {c o n}. \tag {1}
|
| 78 |
+
$$
|
| 79 |
+
|
| 80 |
+
It has three loss functions of different purposes. The first loss denoted as $\mathcal{L}_{IUV}$ minimizes the distance between the predicted IUV image and the ground-truth IUV image. The second loss function denoted as $\mathcal{L}_{Loc}$ minimizes the dissimilarity between the regressed human mesh (e.g. location map) and the ground-truth human mesh. In order to encourage the output mesh to be aligned with the input image, we add an extra loss function, denoted as $\mathcal{L}_{con}$ , which is a consistent loss to increase the consistency between the regressed location map and the ground-truth IUV image. The $\lambda_{con}$ in Equation 1 is a constant coefficient to balance the
|
| 81 |
+
|
| 82 |
+
consistent loss $\mathcal{L}_{con}$ . We first define the new UV map below and then introduce different loss functions in details.
|
| 83 |
+
|
| 84 |
+
# 3.1. The Continuous UV map
|
| 85 |
+
|
| 86 |
+
First we define a new continuous UV map that preserves more neighboring relationships of the original mesh than the ordinary UV map of SMPL. As shown in Figure 3 (a), multiple mesh surface parts are placed separately on the SMPL default UV map, which loses the neighboring relationships of the original mesh surface. Instead of utilizing SMPL UV map as [1, 7, 37], we design a new continuous UV map. We first carefully split the template mesh into an open mesh, while keeping the entire mesh surface as a whole. Then we utilize an algorithm of area-preserving 3D mesh planar parameterization [14, 16], to minimize the area distortion between the UV map and the original mesh surface, in order to obtain an initial UV map. To maintain symmetry for every pair of symmetric vertices on the UV map, we further refine the initial UV map by first aligning the fitted symmetric axis with $v$ axis and then averaging the UV coordinates with the symmetric vertex flipped by $v$ axis.
|
| 87 |
+
|
| 88 |
+
Comparisons. Here we quantitatively show that our continuous UV map outperforms the SMPL UV map in terms of preserving connection relationships between vertices on the mesh. To do so, we compute the distance matrix, where each element is the distance between every vertex pair. We also compute the distance matrix on the UV map. Figure 4 shows such distance matrices. This distance matrix can be computed by using different types of data. For the mesh surface, the distance between two vertices is defined as the length of the minimal path between them on the graph built from the mesh. For the UV map, the distance between two vertices is directly calculated by the distance between their UV coordinates.
|
| 89 |
+
|
| 90 |
+
Now we quantitatively evaluate the similarity between the distance matrices of UV map and original mesh in two aspects as shown in Table 1. In the first aspect, we calculate the 2D correlation coefficient denoted as $S_{1}$ . We have
|
| 91 |
+
|
| 92 |
+
$$
|
| 93 |
+
S _ {1} = \frac {\sum_ {m} \sum_ {n} \left(A _ {m n} - \bar {A}\right) \left(B _ {m n} - \bar {B}\right)}{\sqrt {\left(\sum_ {m} \sum_ {n} \left(A _ {m n} - \bar {A}\right) ^ {2}\right) \left(\sum_ {m} \sum_ {n} \left(B _ {m n} - \bar {B}\right) ^ {2}\right)}}, \tag {2}
|
| 94 |
+
$$
|
| 95 |
+
|
| 96 |
+
where $A$ and $B$ are the distance matrices of original mesh and UV map, respectively. $\bar{A}$ and $\bar{B}$ are the mean value of $A$ and $B$ respectively. $m$ and $n$ are the indices of mesh vertices.
|
| 97 |
+
|
| 98 |
+
In the second aspect, we calculate the normalized cosine similarity between the distance matrices of UV map and original mesh, denoted as $S_{2}$ . From Table 1, we see that our continuous UV map outperforms SMPL UV map by large margins on both metric values, showing that our
|
| 99 |
+
|
| 100 |
+

|
| 101 |
+
(a)
|
| 102 |
+
|
| 103 |
+

|
| 104 |
+
|
| 105 |
+

|
| 106 |
+
|
| 107 |
+

|
| 108 |
+
|
| 109 |
+

|
| 110 |
+
(b)
|
| 111 |
+
RGB image
|
| 112 |
+
|
| 113 |
+

|
| 114 |
+
IUV image
|
| 115 |
+
|
| 116 |
+

|
| 117 |
+
UV map
|
| 118 |
+
|
| 119 |
+

|
| 120 |
+
3D mesh
|
| 121 |
+
|
| 122 |
+

|
| 123 |
+
Figure 3. Comparisons of UV maps. Row (a) shows SMPL default UV map and row (b) shows our continuous UV map.
|
| 124 |
+
SMPL UV map
|
| 125 |
+
Figure 4. Comparisons of distance matrices between vertices calculated on SMPL UV map, the proposed UV map, and the original mesh surface. Compared to SMPL UV map, the distance matrix of the proposed UV map is more similar to that of the original mesh.
|
| 126 |
+
|
| 127 |
+

|
| 128 |
+
Our UV map
|
| 129 |
+
|
| 130 |
+

|
| 131 |
+
Original mesh
|
| 132 |
+
|
| 133 |
+
<table><tr><td>UV map</td><td>2D correlation (S1)</td><td>cosine similarity (S2)</td></tr><tr><td>SMPL [23]</td><td>0.2132</td><td>0.8306</td></tr><tr><td>Ours</td><td>0.7758</td><td>0.9458</td></tr></table>
|
| 134 |
+
|
| 135 |
+
Table 1. Comparisons of the similarity between the vertices' distance matrices of the original mesh surface and different types of UV maps. $S_{1}$ is the 2D correlation coefficient and $S_{2}$ is the normalized cosine similarity. We see that the proposed UV map outperforms SMPL default UV map on both metrics.
|
| 136 |
+
|
| 137 |
+
UV map preserves more neighboring relationships than the SMPL UV map.
|
| 138 |
+
|
| 139 |
+
Pixel-to-Mesh Correspondence. With the proposed UV map, every point on the mesh surface can be expressed by its coordinates on the UV map (i.e. UV coordinates). Therefore, we can predict the pixel-to-surface correspondence by estimating the UV coordinates for each pixel belonging to human body, leading to an IUV image as shown in Figure 3. More importantly, we can also represent a 3D mesh with a location map in the UV space, where the pixel values are 3D coordinates of the corresponding points on the mesh surface. Thus it is easy to reconstruct 3D mesh from a location map with the following formula,
|
| 140 |
+
|
| 141 |
+
$$
|
| 142 |
+
V _ {i} = X \left(u _ {i}, v _ {i}\right), \tag {3}
|
| 143 |
+
$$
|
| 144 |
+
|
| 145 |
+
where $V_{i}$ denotes 3D coordinates of vertex, $X$ is the location map, $u_{i}$ and $v_{i}$ are UV coordinates of the vertex.
|
| 146 |
+
|
| 147 |
+
# 3.2. Dense Correspondence Network (CNet)
|
| 148 |
+
|
| 149 |
+
CNet establishes the dense correspondence between pixels of the input image and areas of 3D mesh surface. As
|
| 150 |
+
|
| 151 |
+

|
| 152 |
+
Figure 5. Illustration of the UV transferring of raw image pixels. Elements in the image space can be transferred to the UV space with the guidance of IUV image.
|
| 153 |
+
|
| 154 |
+
illustrated in Figure 2, CNet has an encoder-decoder architecture, where the encoder employs ResNet50 [9] as backbone, and the decoder consists of several upsampling and convolutional layers with skip connection with encoder. In particular, the encoder encodes the image as a local feature map and a global feature vector, as well as regresses the camera parameters, which are used to project the 3D mesh into the image plane. The decoder first generates a mask of the human body, which distinguishes fore pixels (i.e. human body) from those at the back. Then, the decoder outputs the exact UV coordinates for the fore pixels, constituting an IUV image as shown in Figure 3. With the predicted IUV image, the corresponding point on the mesh surface for every image pixel can be determined. The loss function for the CNet contains two terms,
|
| 155 |
+
|
| 156 |
+
$$
|
| 157 |
+
\mathcal {L} _ {I U V} = \lambda_ {c} \mathcal {L} _ {c} + \lambda_ {r} \mathcal {L} _ {r}, \tag {4}
|
| 158 |
+
$$
|
| 159 |
+
|
| 160 |
+
where $\mathcal{L}_c$ is a dense binary cross-entropy loss for classifying each pixel as 'fore' or 'back', $\mathcal{L}_r$ is an $l_1$ dense regression loss for predicting the exact UV coordinates, and $\lambda_c$ and $\lambda_r$ are two constant coefficients.
|
| 161 |
+
|
| 162 |
+
# 3.3. Vertex coordinates regression
|
| 163 |
+
|
| 164 |
+
The location net (LNet) aims to regress 3D coordinates of mesh vertices by outputting a location map, from which the 3D mesh can be reconstructed easily. As shown in Figure 2, the LNet first transfers image features from the image space to the UV space with the guidance of predicted IUV image:
|
| 165 |
+
|
| 166 |
+
$$
|
| 167 |
+
\mathcal {F} _ {U V} (u, v) = \mathcal {F} _ {i m} (x, y), \tag {5}
|
| 168 |
+
$$
|
| 169 |
+
|
| 170 |
+
where $(x,y)$ are the coordinates in image space of the pixels classified as fore, and $(u,v)$ are the predicted coordinates in UV space of these pixels. $\mathcal{F}_{im}$ is the feature map in image space and $\mathcal{F}_{UV}$ is the transferred feature map in UV space.
|
| 171 |
+
|
| 172 |
+
The feature map $\mathcal{F}_{UV}$ is well aligned with the output location map. So the LNet can predict location map utilizing corresponding local image features. In this way, the dense correspondence between image pixels and mesh surface areas is established explicitly. An example of raw image pixels transferred to UV space is shown in Figure 5. Note that our framework transfers features instead of pixel values.
|
| 173 |
+
|
| 174 |
+
The LNet is a light CNN with skip connections taking the transferred local image features, expanded global image
|
| 175 |
+
|
| 176 |
+

|
| 177 |
+
Figure 6. Illustration of our consistent loss between the location map and the IUV image. 3D coordinates in the location map are transferred back to the image space using IUV image, and then projected to the image plane. The projected 2D coordinates are supervised by the coordinates of image pixels in the image space.
|
| 178 |
+
|
| 179 |
+
feature and a reference location map as input. Intuitively, we apply an weighted $l_{1}$ loss between the predicted location map $X$ and ground-truth location map $\hat{X}$ , i.e.,
|
| 180 |
+
|
| 181 |
+
$$
|
| 182 |
+
\mathcal {L} _ {\text {m a p}} = \sum_ {u} \sum_ {v} W (u, v) \cdot \left\| X (u, v) - \hat {X} (u, v) \right\| _ {1}. \tag {6}
|
| 183 |
+
$$
|
| 184 |
+
|
| 185 |
+
$W$ is a weight map used to balance the contribution of different mesh areas, where areas away from torso are assigned higher weights.
|
| 186 |
+
|
| 187 |
+
We also reconstruct a 3D human mesh from the predicted location map and get 3D joints from human mesh employing joint regressor as previous works [18, 21, 20]. Then we add supervision on the 3D coordinates and projected 2D coordinates in the image space of the joints, i.e.,
|
| 188 |
+
|
| 189 |
+
$$
|
| 190 |
+
\mathcal {L} _ {J} ^ {3 D} = \sum_ {i} ^ {k} \left\| Z _ {i} - \hat {Z} _ {i} \right\| _ {1}, \tag {7}
|
| 191 |
+
$$
|
| 192 |
+
|
| 193 |
+
$$
|
| 194 |
+
\mathcal {L} _ {J} ^ {2 D} = \sum_ {i} ^ {k} \| v _ {i} \left(z _ {i} - \hat {z} _ {i}\right) \| _ {2} ^ {2}, \tag {8}
|
| 195 |
+
$$
|
| 196 |
+
|
| 197 |
+
where $Z_{i}$ and $z_{i}$ are the regressed 3D and 2D coordinates of joints, while $\hat{Z}_{i}$ and $\hat{z}_i$ refer to the coordinates of the ground-truth joints, and $v_{i}$ denotes the visibility of joints.
|
| 198 |
+
|
| 199 |
+
Finally, the full loss for LNet is
|
| 200 |
+
|
| 201 |
+
$$
|
| 202 |
+
\mathcal {L} _ {l o c} = \mathcal {L} _ {m a p} + \mathcal {L} _ {J} ^ {3 D} + \mathcal {L} _ {J} ^ {2 D}. \tag {9}
|
| 203 |
+
$$
|
| 204 |
+
|
| 205 |
+
Consistent Loss: Besides the above widely used supervision, we add an extra supervision between regressed location map and ground-truth IUV image to improve the alignment between 3D mesh and image.
|
| 206 |
+
|
| 207 |
+
As shown in Figure 6, with an IUV image, we can also transfer location map from the UV space back to the image space and get 3D coordinates for every foreground pixel. The 3D coordinates are then projected to image plane to get 2D coordinates, which should be consistent with the coordinates of the pixels in the image space. Then the consistent
|
| 208 |
+
|
| 209 |
+
loss is constructed as follows:
|
| 210 |
+
|
| 211 |
+
$$
|
| 212 |
+
\mathcal {L} _ {c o n} = \sum_ {(x, y)} \| (x, y) - \pi (X (u, v), c)) \| _ {2} ^ {2}, \tag {10}
|
| 213 |
+
$$
|
| 214 |
+
|
| 215 |
+
where $X$ is the predicted location map, $\pi(X, c)$ denotes the projection function with predicted camera parameters $c$ , and $x, y, u, v$ are the same as that in Equation 5. This consistent loss is similar to the loss item $\mathcal{L}_{dense}$ in recent work of Rong et al. [29]. However, in our framework there is no need to calculate the corresponding point on mesh surface as in [29], because the correspondence between mesh surface and image pixel is already established.
|
| 216 |
+
|
| 217 |
+
# 3.4. Implementation details
|
| 218 |
+
|
| 219 |
+
We set $\lambda_{c}$ , $\lambda_{r}$ and $\lambda_{cons}$ to 0.2, 1 and 1 respectively and optimize the framework with an Adam optimizer [19], with batch size 128 and learning rate 2.5e-4. The training data is augmented with randomly scaling, rotation, flipping and RGB channel noise. We first train the CNet for 5 epochs and then train the full framework end-to-end for 30 epochs.
|
| 220 |
+
|
| 221 |
+
# 4. Experiments
|
| 222 |
+
|
| 223 |
+
# 4.1. Datasets
|
| 224 |
+
|
| 225 |
+
In the experiment, we train our model on the Human3.6M [13], UP-3D [22] and SURREAL [34] dataset, while we provide evaluations on the test set of Human3.6M, SURREAL and LSP dataset [17].
|
| 226 |
+
|
| 227 |
+
Human3.6M: Human3.6M [13] is a large scale indoor dataset for 3D human pose estimation, including multiple subjects performing typical actions like walking, sitting and eating. Following the common setting [18], we use subjects S1, S5, S6, S7 and S8 as training data and use subjects S9 and S11 for evaluation. For evaluation, results are reported using two widely used metrics (MPJPE and MPJPE-PA) under two popular protocols: P1 and P2, as defined in [18],
|
| 228 |
+
|
| 229 |
+
UP-3D: UP-3D [22] is an outdoor 3D human pose estimation dataset. It provides 3D human body ground truth by fitting SMPL model on images from 2D human pose benchmarks. We utilize the images of training and validation set for training.
|
| 230 |
+
|
| 231 |
+
SURREAL: SURREAL dataset [34] is a large dataset providing synthetic images with ground-truth SMPL model parameters. We use the standard split setting [34] but remove all images with incomplete human body and evaluate on the same sampled test set as BodyNet [33].
|
| 232 |
+
|
| 233 |
+
LSP: LSP [17] dataset is a 2D human pose estimation benchmark. In our work, we evaluate the segmentation accuracy of each model on the segmentation annotation [22].
|
| 234 |
+
|
| 235 |
+
# 4.2. Comparison with the state-of-the-art
|
| 236 |
+
|
| 237 |
+
In this section, we present comparison of our method with other state-of-the-art mesh-based methods.
|
| 238 |
+
|
| 239 |
+
<table><tr><td>Methods</td><td>MPJPE-PA</td></tr><tr><td>Lassner etc. [22]</td><td>93.9</td></tr><tr><td>SMPLify [4]</td><td>82.3</td></tr><tr><td>Pavlakos etc. [26]</td><td>75.9</td></tr><tr><td>HMR[18]</td><td>56.8</td></tr><tr><td>NBF[25]</td><td>59.9</td></tr><tr><td>CMR[21]</td><td>50.1</td></tr><tr><td>DenseRaC[36]</td><td>48.0</td></tr><tr><td>SPIN[20]</td><td>41.1</td></tr><tr><td>Ours</td><td>39.3</td></tr></table>
|
| 240 |
+
|
| 241 |
+
Table 2. Comparison with the state-of-the-art mesh-based 3D human estimation methods on Human3.6M test set. The numbers are joint errors in mm with Procrustes alignment under P2, and lower is better. Our approach achieves the state-of-the-art performance.
|
| 242 |
+
|
| 243 |
+
<table><tr><td>Methods</td><td>Surface Error</td></tr><tr><td>SMPLify++ [22]</td><td>75.3</td></tr><tr><td>Tunget al. [32]</td><td>74.5</td></tr><tr><td>BodyNet[33]</td><td>73.6</td></tr><tr><td>Ours</td><td>56.5</td></tr></table>
|
| 244 |
+
|
| 245 |
+
Table 3. Comparison with the state-of-the-art methods on SUR-REAL dataset. The numbers are the mean vertex errors in mm, and lower is better. Our methods outperform baselines with a large margin.
|
| 246 |
+
|
| 247 |
+
<table><tr><td></td><td colspan="2">FB Seg.</td><td colspan="2">Part Seg</td></tr><tr><td></td><td>acc.</td><td>f1</td><td>acc.</td><td>f1</td></tr><tr><td>SMPLify oracle [4]</td><td>92.17</td><td>0.88</td><td>88.82</td><td>0.67</td></tr><tr><td>SMPLify [4]</td><td>91.89</td><td>0.88</td><td>87.71</td><td>0.67</td></tr><tr><td>SMPLify on [26]</td><td>92.17</td><td>0.88</td><td>88.24</td><td>0.64</td></tr><tr><td>HMR [18]</td><td>91.67</td><td>0.87</td><td>87.12</td><td>0.60</td></tr><tr><td>CMR [21]</td><td>91.46</td><td>0.87</td><td>88.69</td><td>0.66</td></tr><tr><td>SPIN [20]</td><td>91.83</td><td>0.87</td><td>89.41</td><td>0.68</td></tr><tr><td>Ours</td><td>92.10</td><td>0.88</td><td>89.45</td><td>0.69</td></tr></table>
|
| 248 |
+
|
| 249 |
+
Table 4. Comparison with the state-of-the-art methods on LSP test set. The numbers are accuracy and f1 scores, and higher is better. SMPLify [4] is optimization based, while HMR [18], CMR [21], SPIN [20] and our method are regression based. Our framework achieves the state-of-the-art result among regression based methods and is competitive with optimization based methods.
|
| 250 |
+
|
| 251 |
+
Table 2 shows the results on Human3.6M test set. We train our model following the setting of CMR [21] and utilize Human3.6M and UP-3D as the training set. Our method achieves the state-of-the-art performance among the mesh-based methods. It's worth notice that SPIN [20] and our method focus on different aspect and are compatible. SPIN [31] focus on the training using data with scarce 3D ground truth and the network is trained with extra data from 2D human pose benchmarks. While we focus on the dense correspondence between mesh and image, and do not include data from 2D human pose benchmarks.
|
| 252 |
+
|
| 253 |
+
Similarly, we show the results on SURREAL dataset in
|
| 254 |
+
|
| 255 |
+
<table><tr><td rowspan="2">UV map</td><td rowspan="2">\( {\mathcal{F}}_{G} \)</td><td rowspan="2">\( {\mathcal{F}}_{L} \)</td><td rowspan="2">raw pixel</td><td colspan="2">MPJPE</td><td colspan="2">MPJPE-PA</td></tr><tr><td>P1</td><td>P2</td><td>P1</td><td>P2</td></tr><tr><td rowspan="4">SMPL</td><td>✓</td><td></td><td></td><td>72.1</td><td>68.9</td><td>51.9</td><td>49.1</td></tr><tr><td></td><td>✓</td><td></td><td>71.9</td><td>69.6</td><td>47.4</td><td>44.8</td></tr><tr><td>✓</td><td>✓</td><td></td><td>65.0</td><td>61.7</td><td>45.1</td><td>42.6</td></tr><tr><td>✓</td><td></td><td>✓</td><td>65.0</td><td>63.2</td><td>46.5</td><td>44.7</td></tr><tr><td rowspan="4">Ours</td><td>✓</td><td></td><td></td><td>69.5</td><td>67.7</td><td>49.4</td><td>47.1</td></tr><tr><td></td><td>✓</td><td></td><td>69.8</td><td>68.4</td><td>44.6</td><td>42.3</td></tr><tr><td>✓</td><td>✓</td><td></td><td>62.7</td><td>60.6</td><td>42.2</td><td>39.3</td></tr><tr><td>✓</td><td></td><td>✓</td><td>63.2</td><td>61.0</td><td>45.5</td><td>42.6</td></tr></table>
|
| 256 |
+
|
| 257 |
+
Table 5. Comparison on Human3.6M test set with different UV map and input of location net. The numbers are 3D joint errors in mm. $\mathcal{F}_G$ and $\mathcal{F}_L$ refer to global feature vector and local feature map, respectively. With both UV maps, the framework use local feature outperforms the baseline using global feature with a large margin. Combining global feature and local feature further improves the performance. However, transferring raw image pixels brings a gain much smaller. With the same input, the frameworks using our UV map outperform these using SMPL default UV map.
|
| 258 |
+
|
| 259 |
+
Table 3. Our model is trained only with training data of SURREAL dataset and outperforms the previous methods by a large margin. The human shape in SURREAL dataset is of great variety, and this verifies the human shape reconstruction capability of our method.
|
| 260 |
+
|
| 261 |
+
We also investigate human shape estimation accuracy by evaluating the foreground-background and part-segmentation performance on the LSP test set. During the evaluation, we use the projection of the 3D mesh as segmentation result. The predicted IUV image is not used in evaluation for fair comparison. The results are shown in Table 4. Our regression based method outperforms the state-of-the-art regression based methods and is competitive with the optimization based methods, which tend to outperform the regression based methods on this metric but are with much lower inference speed.
|
| 262 |
+
|
| 263 |
+
# 4.3. Ablative studies
|
| 264 |
+
|
| 265 |
+
In this section, we provide the ablation studies of the proposed method. We train all networks with training data from Human3.6M and UP-3D dataset, and evaluate the models on Human3.6M test set.
|
| 266 |
+
|
| 267 |
+
Dense correspondence: We first investigate the effectiveness of the dense correspondence between 3D mesh and image features. We train networks that only use global feature or transferred local feature as the input of LNet. The comparison is shown in Table 5. With both UV maps, the framework utilizing transferred local feature outperforms the baseline using global feature with a large margin, which proves the effectiveness of the established dense correspondence. Combining global feature with local feature further improves the performance.
|
| 268 |
+
|
| 269 |
+
We also train frameworks that transfer raw image pixels
|
| 270 |
+
|
| 271 |
+

|
| 272 |
+
RGB image
|
| 273 |
+
|
| 274 |
+

|
| 275 |
+
|
| 276 |
+
Regressed
|
| 277 |
+

|
| 278 |
+
Figure 7. An example of mesh reconstructed using our new UV map (top) and SMPL default UV map (bottom). SMPL default UV map may cause discontinuity between different parts as well as erroneous estimation of some vertices near part edges. While our new UV map mitigates these problems.
|
| 279 |
+
|
| 280 |
+

|
| 281 |
+
|
| 282 |
+

|
| 283 |
+
Estimated
|
| 284 |
+
detail
|
| 285 |
+
|
| 286 |
+

|
| 287 |
+
|
| 288 |
+

|
| 289 |
+
Mesh
|
| 290 |
+
|
| 291 |
+
rather than image features and observe much less improvement than transferring local features. We attribute this phenomenon to the lack of human pose information in transferred raw pixels. For images with the same person in different poses, the pixels of a certain body part will be transferred to the same position in the UV space, which generates similar inputs for the LNet. So the LNet can only use transferred pixels to refine the estimation of human shape, and predict human pose only based on global feature.
|
| 292 |
+
|
| 293 |
+
On the contrary, the CNet is able to embed human pose information into image features. Then the LNet can resort to transferred features to refine both human shape and pose estimation.
|
| 294 |
+
|
| 295 |
+
UV map: For the second ablative study, we investigate the influence of different UV maps. We compare the performance of frameworks using SMPL default UV map [23], and our continuous UV map.
|
| 296 |
+
|
| 297 |
+
As shown in Table 5, with the same input of LNet, the frameworks using our continuous UV map outperforms these frameworks using SMPL default UV map with a large margin. We attribute the gain to the continuity of the new UV map. As shown in Figure 7, some neighboring parts on mesh surface are distant on SMPL default UV map, such as arms and hands. This may lead to discontinuity of these parts on the final 3D mesh. Additionally, some faraway surface parts are very close on the UV plane, such as hands and foots, which might cause erroneous estimation of vertices on edges of these parts. These phenomenons are both shown in Figure 7. On the contrary, our UV map preserves more neighboring relations of the original mesh surface, so these problems are mitigated.
|
| 298 |
+
|
| 299 |
+
# 4.4. Qualitative result
|
| 300 |
+
|
| 301 |
+
Some qualitative results are presented in Figure 8, and Figure 9 includes some failure cases. Typical failure cases can be attributed to challenging poses, viewpoints rare seen
|
| 302 |
+
|
| 303 |
+

|
| 304 |
+
Figure 8. Qualitative results of our approach. Rows 1-3: LSP [17]. Rows 4-5: Human3.6M [13].
|
| 305 |
+
|
| 306 |
+

|
| 307 |
+
(a) Image
|
| 308 |
+
|
| 309 |
+

|
| 310 |
+
(b) Result
|
| 311 |
+
|
| 312 |
+

|
| 313 |
+
(c) Image
|
| 314 |
+
Figure 9. Examples of erroneous reconstruction of our methods. Typical failures can be attributed to challenging poses, viewpoints rare seen in training set, severe self-osculation, as well as confusion caused by interaction among multiple people.
|
| 315 |
+
|
| 316 |
+

|
| 317 |
+
(d) Result
|
| 318 |
+
|
| 319 |
+
in training set, severe self-osculation, as well as confusion caused by interaction among multiple people.
|
| 320 |
+
|
| 321 |
+
# 5. Conclusion
|
| 322 |
+
|
| 323 |
+
This work aims to solve the problem of lacking dense correspondence between the image feature and output 3D
|
| 324 |
+
|
| 325 |
+
mesh in mesh-based monocular 3D human body estimation. The correspondence is explicitly established by IUV image estimation and image feature transferring. Instead of reconstructing human mesh from global feature, our framework is able to make use of extra dense local features transferred to the UV space. To facilitate the learning of frame work, we propose a new UV map that maintains more neighboring relations of the original mesh surface. Our framework achieves state-of-the-art performance among 3D mesh-based methods on several public benchmarks. Future work can focus on extending the framework to the reconstruction of surface details beyond existing human models, such as cloth wrinkles and hair styles.
|
| 326 |
+
|
| 327 |
+
# Acknowledgement
|
| 328 |
+
|
| 329 |
+
We thank reviewers for helpful discussions and comments. Wanli Ouyang is supported by the Australian Research Council Grant DP200103223.
|
| 330 |
+
|
| 331 |
+
# References
|
| 332 |
+
|
| 333 |
+
[1] T. Alldieck, G. Pons-Moll, C. Theobalt, and M. Magnor. Tex2shape: Detailed full human body geometry from a single image. arXiv preprint arXiv:1904.08645, 2019.
|
| 334 |
+
[2] R. Alp Güler, N. Neverova, and I. Kokkinos. Densesepos: Dense human pose estimation in the wild. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 7297-7306, 2018.
|
| 335 |
+
[3] D. Anguelov, P. Srinivasan, D. Koller, S. Thrun, J. Rodgers, and J. Davis. Scape: shape completion and animation of people. In ACM transactions on graphics (TOG), volume 24, pages 408-416. ACM, 2005.
|
| 336 |
+
[4] F. Bogo, A. Kanazawa, C. Lassner, P. Gehler, J. Romero, and M. J. Black. Keep it smpl: Automatic estimation of 3d human pose and shape from a single image. In European Conference on Computer Vision, pages 561-578. Springer, 2016.
|
| 337 |
+
[5] J. Dai, H. Qi, Y. Xiong, Y. Li, G. Zhang, H. Hu, and Y. Wei. Deformable convolutional networks. In ICCV, 2017.
|
| 338 |
+
[6] V. Gabeur, J.-S. Franco, X. Martin, C. Schmid, and G. Rogez. Moulding humans: Non-parametric 3d human shape estimation from single images. arXiv preprint arXiv:1908.00439, 2019.
|
| 339 |
+
[7] A. Grigorev, A. Sevastopolsky, A. Vakhitov, and V. Lempitsky. Coordinate-based texture inpainting for pose-guided human image generation. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 12135-12144, 2019.
|
| 340 |
+
[8] P. Guan, A. Weiss, A. O. Balan, and M. J. Black. Estimating human shape and pose from a single image. In 2009 IEEE 12th International Conference on Computer Vision, pages 1381-1388. IEEE, 2009.
|
| 341 |
+
[9] K. He, X. Zhang, S. Ren, and J. Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 770-778, 2016.
|
| 342 |
+
[10] P. Huang, M. Tejera, J. Collosomse, and A. Hilton. Hybrid skeletal-surface motion graphs for character animation from 4d performance capture. ACM Transactions on Graphics (ToG), 34(2):17, 2015.
|
| 343 |
+
[11] Y. Huang, F. Bogo, C. Lassner, A. Kanazawa, P. V. Gehler, J. Romero, I. Akhter, and M. J. Black. Towards accurate marker-less human shape and pose estimation over time. In 2017 International Conference on 3D Vision (3DV), pages 421-430. IEEE, 2017.
|
| 344 |
+
[12] M. E. Hussein, M. Torki, M. A. Gowayyed, and M. El-Saban. Human action recognition using a temporal hierarchy of covariance descriptors on 3d joint locations. In Twenty-Third International Joint Conference on Artificial Intelligence, 2013.
|
| 345 |
+
[13] C. Ionescu, D. Papava, V. Olaru, and C. Sminchisescu. Human3. 6m: Large scale datasets and predictive methods for 3d human sensing in natural environments. IEEE transactions on pattern analysis and machine intelligence, 36(7):1325-1339, 2013.
|
| 346 |
+
|
| 347 |
+
[14] A. Jacobson and D. Panozzo. libigl: prototyping geometry processing research in $\mathbf{c} + +$ . In SIGGRAPH Asia 2017 courses, page 11. ACM, 2017.
|
| 348 |
+
[15] M. Jaderberg, K. Simonyan, A. Zisserman, et al. Spatial transformer networks. In Advances in neural information processing systems, pages 2017-2025, 2015.
|
| 349 |
+
[16] Z. Jiang, S. Schaefer, and D. Panozzo. Simplicial complex augmentation framework for bijective maps. ACM Transactions on Graphics, 36(6), 2017.
|
| 350 |
+
[17] S. Johnson and M. Everingham. Clustered pose and nonlinear appearance models for human pose estimation. In Proceedings of the British Machine Vision Conference, 2010. doi:10.5244/C.24.12.
|
| 351 |
+
[18] A. Kanazawa, M. J. Black, D. W. Jacobs, and J. Malik. End-to-end recovery of human shape and pose. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 7122-7131, 2018.
|
| 352 |
+
[19] D. P. Kingma and J. Ba. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980, 2014.
|
| 353 |
+
[20] N. Kolotouros, G. Pavlakos, M. J. Black, and K. Daniilidis. Learning to reconstruct 3d human pose and shape via model-fitting in the loop. arXiv preprint arXiv:1909.12828, 2019.
|
| 354 |
+
[21] N. Kolotouros, G. Pavlakos, and K. Daniilidis. Convolutional mesh regression for single-image human shape reconstruction. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 4501-4510, 2019.
|
| 355 |
+
[22] C. Lassner, J. Romero, M. Kiefel, F. Bogo, M. J. Black, and P. V. Gehler. Unite the people: Closing the loop between 3d and 2d human representations. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 6050-6059, 2017.
|
| 356 |
+
[23] M. Loper, N. Mahmood, J. Romero, G. Pons-Moll, and M. J. Black. Smpl: A skinned multi-person linear model. ACM transactions on graphics (TOG), 34(6):248, 2015.
|
| 357 |
+
[24] A. Newell, K. Yang, and J. Deng. Stacked hourglass networks for human pose estimation. In European conference on computer vision, pages 483-499. Springer, 2016.
|
| 358 |
+
[25] M. Omran, C. Lassner, G. Pons-Moll, P. Gehler, and B. Schiele. Neural body fitting: Unifying deep learning and model based human pose and shape estimation. In 2018 International Conference on 3D Vision (3DV), pages 484-494. IEEE, 2018.
|
| 359 |
+
[26] G. Pavlakos, L. Zhu, X. Zhou, and K. Daniilidis. Learning to estimate 3d human pose and shape from a single color image. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 459-468, 2018.
|
| 360 |
+
[27] L. Pishchulin, E. Insafutdinov, S. Tang, B. Andres, M. Andriluka, P. V. Gehler, and B. Schiele. Deepcut: Joint subset partition and labeling for multi person pose estimation. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 4929-4937, 2016.
|
| 361 |
+
[28] A. Pumarola, J. Sanchez-Riera, G. Choi, A. Sanfeliu, and F. Moreno-Noguer. 3dpeople: Modeling the geometry of dressed humans. In Proceedings of the IEEE International Conference on Computer Vision, pages 2242-2251, 2019.
|
| 362 |
+
|
| 363 |
+
[29] Y. Rong, Z. Liu, C. Li, K. Cao, and C. C. Loy. Delving deep into hybrid annotations for 3d human recovery in the wild. In The IEEE International Conference on Computer Vision (ICCV), October 2019.
|
| 364 |
+
[30] S. Saito, Z. Huang, R. Natsume, S. Morishima, A. Kanazawa, and H. Li. Pifu: Pixel-aligned implicit function for high-resolution clothed human digitization. In Proceedings of the IEEE International Conference on Computer Vision, pages 2304-2314, 2019.
|
| 365 |
+
[31] B. Tekin, P. Márquez-Neila, M. Salzmann, and P. Fua. Learning to fuse 2d and 3d image cues for monocular body pose estimation. In Proceedings of the IEEE International Conference on Computer Vision, pages 3941–3950, 2017.
|
| 366 |
+
[32] H.-Y. Tung, H.-W. Tung, E. Yumer, and K. Fragkiadaki. Self-supervised learning of motion capture. In I. Guyon, U. V. Luxburg, S. Bengio, H. Wallach, R. Fergus, S. Vishwanathan, and R. Garnett, editors, Advances in Neural Information Processing Systems 30, pages 5236-5246. Curran Associates, Inc., 2017.
|
| 367 |
+
[33] G. Varol, D. Ceylan, B. Russell, J. Yang, E. Yumer, I. Laptev, and C. Schmid. Bodynet: Volumetric inference of 3d human body shapes. In Proceedings of the European Conference on Computer Vision (ECCV), pages 20-36, 2018.
|
| 368 |
+
[34] G. Varol, J. Romero, X. Martin, N. Mahmood, M. J. Black, I. Laptev, and C. Schmid. Learning from synthetic humans. In CVPR, 2017.
|
| 369 |
+
[35] L. Xia, C.-C. Chen, and J. K. Aggarwal. View invariant human action recognition using histograms of 3d joints. In 2012 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops, pages 20-27. IEEE, 2012.
|
| 370 |
+
[36] Y. Xu, S.-C. Zhu, and T. Tung. Denserac: Joint 3d pose and shape estimation by dense render-and-compare. In Proceedings of the IEEE International Conference on Computer Vision, pages 7760-7770, 2019.
|
| 371 |
+
[37] P. Yao, Z. Fang, F. Wu, Y. Feng, and J. Li. Densebody: Directly regressing dense 3d human pose and shape from a single color image. arXiv preprint arXiv:1903.10153, 2019.
|
| 372 |
+
[38] A. Zanfir, E. Marinoiu, and C. Sminchisescu. Monocular 3d pose and shape estimation of multiple people in natural scenes-the importance of multiple scene constraints. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 2148-2157, 2018.
|
| 373 |
+
[39] H. Zhu, X. Zuo, S. Wang, X. Cao, and R. Yang. Detailed human shape estimation from a single image by hierarchical mesh deformation. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 4491-4500, 2019.
|
3dhumanmeshregressionwithdensecorrespondence/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d691025a13320fbc09489c5318a8d3ecdbd67616b9871d893460604ad8bb7f1e
|
| 3 |
+
size 686379
|
3dhumanmeshregressionwithdensecorrespondence/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9d4a9003eb7918e5f1e3a5f60a2824a8b945080dce86a46fe918069f29d76256
|
| 3 |
+
size 394874
|
3dmpamultiproposalaggregationfor3dsemanticinstancesegmentation/f5f1d7aa-96ff-441f-b2d2-c3511cdca894_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3298e85997a7ed88d7589b4e5bb787624f2422a5ac62c9b04cc48737cfada913
|
| 3 |
+
size 79389
|
3dmpamultiproposalaggregationfor3dsemanticinstancesegmentation/f5f1d7aa-96ff-441f-b2d2-c3511cdca894_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b4f9f7c308f2554fff0cd1ba3a8be06a3db82dc3a5eadf50a6aa33274c8eaa6c
|
| 3 |
+
size 96578
|
3dmpamultiproposalaggregationfor3dsemanticinstancesegmentation/f5f1d7aa-96ff-441f-b2d2-c3511cdca894_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c77558eae318ac66d82b71402077a8621dfd4a708dc3d8f2241cccc1aedc3bf4
|
| 3 |
+
size 4312691
|
3dmpamultiproposalaggregationfor3dsemanticinstancesegmentation/full.md
ADDED
|
@@ -0,0 +1,301 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# 3D-MPA: Multi Proposal Aggregation for 3D Semantic Instance Segmentation
|
| 2 |
+
|
| 3 |
+
Francis Engelmann $^{1,2\dagger}$ Martin Bokeloh $^{2}$ Alireza Fathi $^{2}$ Bastian Leibe $^{1}$ Matthias Nießner $^{3}$ $^{1}$ RWTH Aachen University $^{2}$ Google $^{3}$ Technical University Munich
|
| 4 |
+
|
| 5 |
+

|
| 6 |
+
Input: 3D Point Cloud
|
| 7 |
+
|
| 8 |
+

|
| 9 |
+
Figure 1: Given an input 3D point cloud, our Multi Proposal Aggregation network (3D-MPA) predicts point-accurate 3D semantic instances. We propose an object-centric approach which generates instance proposals followed by a graph convolutional network which enables higher-level interactions between adjacent proposals. Unlike previous methods, the final object instances are obtained by aggregating multiple proposals instead of pruning proposals using non-maximum-suppression.
|
| 10 |
+
|
| 11 |
+

|
| 12 |
+
Object Center Votes & Aggregated Proposals
|
| 13 |
+
Output: 3D Semantic Instances
|
| 14 |
+
|
| 15 |
+
# Abstract
|
| 16 |
+
|
| 17 |
+
We present 3D-MPA, a method for instance segmentation on 3D point clouds. Given an input point cloud, we propose an object-centric approach where each point votes for its object center. We sample object proposals from the predicted object centers. Then, we learn proposal features from grouped point features that voted for the same object center. A graph convolutional network introduces interproposal relations, providing higher-level feature learning in addition to the lower-level point features. Each proposal comprises a semantic label, a set of associated points over which we define a foreground-background mask, an objectness score and aggregation features. Previous works usually perform non-maximum-suppression (NMS) over proposals to obtain the final object detections or semantic instances. However, NMS can discard potentially correct predictions. Instead, our approach keeps all proposals and groups them together based on the learned aggregation features. We show that grouping proposals improves over NMS and outperforms previous state-of-the-art methods on the tasks of 3D object detection and semantic instance segmentation on the ScanNetV2 benchmark and the S3DIS dataset.
|
| 18 |
+
|
| 19 |
+
# 1. Introduction
|
| 20 |
+
|
| 21 |
+
With the availability of commodity RGB-D sensors such as Kinect or Intel RealSense, the computer vision and graphics communities have achieved impressive results on 3D reconstruction methods [27, 28] that can now even achieve global pose tracking in real time [8, 47]. In addition to the reconstruction of the geometry, semantic scene understanding is critical to many real-world computer vision applications, including robotics, upcoming applications on mobile devices, or AR/VR headsets. In order to understand reconstructed 3D environments, researchers have already made significant progress with 3D deep learning methods that operate on volumetric grids [6, 32, 37, 38, 48], point clouds [11, 31, 33], meshes [16, 36] or multi-view hybrids [7, 39]. While early 3D learning approaches focus mostly on semantic segmentation, we have recently seen many works on 3D semantic instance segmentation [18, 19, 49] and 3D object detection [29, 51], both of which we believe are critical for real-world 3D perception.
|
| 22 |
+
|
| 23 |
+
One of the fundamental challenges in 3D object detection lies in how to predict and process object proposals: On one side, top-down methods first predict a large number of rough object bounding box proposals (e.g., anchor mechanisms in Faster R-CNN [35]), followed by a second stage refinement step. Here, results can be generated in a single
|
| 24 |
+
|
| 25 |
+
forward pass, but there is little outlier tolerance to wrongly detected box anchors. On the other side, bottom-up approaches utilize metric-learning methods with the goal of learning a per-point feature embedding space which is subsequently clustered into object instances [10, 19, 24]. This strategy can effectively handle outliers, but it heavily depends on manually tuning cluster parameters and is inherently expensive to compute at inference time due to $O(N^2)$ pairwise relationships.
|
| 26 |
+
|
| 27 |
+
In this work, we propose 3D-MPA which follows a hybrid approach that takes advantage of the benefits of both top-down and bottom-up techniques: from an input point cloud representing a 3D scan, we generate votes from each point for object centers and group those into object proposals; then - instead of rejecting proposals using nonmaximum-suppression - we learn higher-level features for each proposal, which we use to cluster the proposals into final object detections. The key idea behind this strategy is that the number of generated proposals is orders of magnitude smaller than the number of raw input points in a 3D scan, which makes grouping computationally very efficient. At the same time, each object can receive multiple proposals, which simplifies proposal generation since objects of all sizes are handled in the same fashion, and we can easily tolerate outlier proposals further down the pipeline.
|
| 28 |
+
|
| 29 |
+
To this end, our method first generates object-centric proposals using a per-point voting scheme from a sparse volumetric feature backbone. We then interpret the proposals as nodes of a proposal graph which we feed into a graph convolutional neural network in order to enable higher-order interactions between neighboring proposal features. In addition to proposal losses, the network is trained with a proxy loss between proposals similar to affinity scores in metric learning; however, due to the relatively small number of proposals, we can efficiently train the network and cluster proposals. In the end, each node predicts a semantic class, an object foreground mask, an objectness score, and additional features that are used to group nodes together.
|
| 30 |
+
|
| 31 |
+
In summary, our contributions are the following:
|
| 32 |
+
|
| 33 |
+
- A new method for 3D instance segmentation based on dense object center prediction leveraging learned semantic features from a sparse volumetric backbone.
|
| 34 |
+
- To obtain the final object detections and semantic instances from the object proposals, we replace the commonly used NMS with our multi proposal aggregation strategy based on jointly learned proposal features and report significantly improved scores over NMS.
|
| 35 |
+
- We employ a graph convolutional network that explicitly models higher-order interactions between neighboring proposal features in addition to the lower-level point features.
|
| 36 |
+
|
| 37 |
+
# 2. Related Work
|
| 38 |
+
|
| 39 |
+
Object Detection and Instance Segmentation. In the 2D domain, object detection and instance segmentation have most notably been influenced by Faster R-CNN from Ren et al. [35], which introduced the anchor mechanism to predict proposals with associated objectness scores and regions of interest that enable the regression of semantic bounding boxes. This approach was extended in Mask-RCNN [17] to predict per-pixel object instance masks. Hou et al. [18] apply the 2D proposal ideas onto the 3D domain by means of dense 3D convolutional networks. As an alternative, proposal-free methods were proposed in [4, 14, 19] which rely on metric learning. In the 2D domain, Fathi et al. [14] estimate how likely pixels are to belong to the same object. De Brabandere et al. [4] define a discriminative loss, which moves feature points of the same object towards their mean while pushing means of different objects apart. This discriminative loss is adopted by Lahoud et al. [19] to perform instance segmentation in 3D space. Final instances are obtained via clustering of the learned feature space. Yang et al. [49] directly predict object bounding boxes from a learned global feature vector and obtain instance masks by segmenting points inside a bounding box. The recent VoteNet [29] highlights the challenge of directly predicting bounding box centers in sparse 3D data as most surface points are far away from object centers. Instead, they predict bounding boxes by grouping points from the same object based on their votes for object centers. We adopt the object-centric approach, extend it with a branch for instance mask prediction and replace NMS with a grouping mechanism of jointly-learned proposal features.
|
| 40 |
+
|
| 41 |
+
3D Deep Learning. PointNets [31] have pioneered the use of deep learning methods for point cloud processing. Since then, we have seen impressive progress in numerous different fields, including 3D semantic segmentation [15, 12, 21, 31, 33, 40, 46], 3D instance segmentation [10, 18, 19, 45, 49, 50], object detection [18, 29, 51] and relocalization [42], flow estimation [3, 25, 43], scene-graph reconstruction [1] and scene over-segmentation [20]. Point-based architectures, such as PointNet [29] and PointNet++ [34] operate directly on unstructured sets of points, while voxel based approaches, such as 3DMV [7] or SparseConvNets [5, 15] transform the continuous 3D space into a discrete grid representation and define convolutional operators on the volumetric grid, analogously to image convolutions in the 2D domain. Graph-based approaches [22, 41, 46] define convolutional operators over graph-structured data such as 3D meshes [16, 36], citation networks [41], or molecules [9]. Here, we leverage the voxel-based approach of Graham et al. [15] as point feature backbone and use the graph neural network of Wang et al. [46] to enable higher-level interactions between proposals.
|
| 42 |
+
|
| 43 |
+

|
| 44 |
+
Figure 2: 3D-MPA network architecture. From an input point cloud, our network predicts object instance masks by aggregating object proposal masks. The full model consists of three parts: the proposal generation (left) follows an object-centric strategy: each point votes for the center of the object it belongs to. Proposal positions are then sampled from the predicted object centers. By grouping and aggregating votes in the vicinity of sampled proposal positions, we learn proposal features. During proposal consolidation (middle), proposal features are further refined using a graph convolutional network, which enables higher-order interactions on the level of proposals. Finally, we propose to aggregate multiple proposals by clustering jointly learned aggregation features as opposed to the commonly used non-maximum-suppression (right).
|
| 45 |
+
|
| 46 |
+
# 3. Method
|
| 47 |
+
|
| 48 |
+
The overall architecture of 3D-MPA is depicted in Fig. 2. The model consists of three parts: the first one takes as input a 3D point cloud and learns object proposals from sampled and grouped point features that voted for the same object center (Sec. 3.1). The next part consolidates the proposal features using a graph convolutional network enabling higher-level interactions between proposals which results in refined proposal features (Sec. 3.2). Last, the object generator consumes the object proposals and generates the final object detections, i.e. semantic instances. We parameterize an object as a set of points associated with that object and a semantic class. (Sec. 3.3).
|
| 49 |
+
|
| 50 |
+
# 3.1. Proposal Generation
|
| 51 |
+
|
| 52 |
+
Given a point cloud of size $N \times I$ , consisting of $N$ points and $I$ -dimensional input features (e.g. positions, colors and normals), the first part of the network generates a fixed number $K$ of object proposals. A proposal is a tuple $(y_{i}, g_{i}, s_{i})$ consisting of a position $y_{i} \in \mathbb{R}^{3}$ , a proposal features vector $g_{i} \in \mathbb{R}^{D}$ and a set of points $s_{i}$ associated with the proposal.
|
| 53 |
+
|
| 54 |
+
To generate proposals, we need strong point features that encode the semantic context and the geometry of the underlying scene. We implement a sparse volumetric network [5, 15] as feature backbone to generate per-point features $\{f_i\in \mathbb{R}^F\}_{i = 1}^N$ (Fig. 2, $\square$ ). Semantic context is encoded into the point features by supervising the feature backbone with semantic labels, using the standard cross-entropy loss for per-point semantic classification $\mathcal{L}_{\mathrm{sem,pt}}$ . Following the object-centric approach suggested by Qi et al. [29], points vote for the center of the object they belong to. However, unlike [29], only points from objects predict a center. This is possible since we jointly predict semantic classes, i.e.
|
| 55 |
+
|
| 56 |
+
we can differentiate between points from foreground (objects) and background (walls, floor, etc.) during both training and test. This results in precise center predictions since noisy predictions from background points are ignored. In particular, this is implemented as a regression loss which predicts per-point relative 3D offsets $\Delta x_{i}\in \mathbb{R}^{3}$ between a point position $x_{i}\in \mathbb{R}^{3}$ and its corresponding ground truth bounding-box center $c_{i}^{*}\in \mathbb{R}^{3}$ . We define the per-point center regression loss as:
|
| 57 |
+
|
| 58 |
+
$$
|
| 59 |
+
\mathcal {L} _ {\text {c e n t . p t .}} = \frac {1}{M} \left\| x _ {i} + \Delta x _ {i} - c _ {i} ^ {*} \right\| _ {H} \cdot \mathbb {1} \left(x _ {i}\right), \tag {1}
|
| 60 |
+
$$
|
| 61 |
+
|
| 62 |
+
where $||\cdot ||_H$ is the Huber-loss (or smooth $\mathrm{L}_1$ -loss) and $\mathbb{1}(\cdot)$ is a binary function indicating whether a point $x_{i}$ belongs to an object. $M$ is a normalization factor equal to the total number of points on objects. All in all, the feature backbone has two heads (Fig. 2, $\square$ ): a semantic head (which performs semantic classification of points) and a center head (which regresses object centers for each point). They are jointly supervised using the combined loss $\mathcal{L}_{\mathrm{point}}$ where $\lambda$ is a weighting factor set to 0.1:
|
| 63 |
+
|
| 64 |
+
$$
|
| 65 |
+
\mathcal {L} _ {\text {p o i n t}} = \lambda \cdot \mathcal {L} _ {\text {s e m . p t .}} + \mathcal {L} _ {\text {c e n t . p t .}}. \tag {2}
|
| 66 |
+
$$
|
| 67 |
+
|
| 68 |
+
Proposal Positions and Features. After each point (that belongs to an abject) has voted for a center, we obtain a distribution over object centers (Fig. 3, $3^{\mathrm{rd}}$ col.). From this distribution, we randomly pick $K$ samples as proposal positions $\{y_{i} = x_{i} + \Delta x_{i}\in \mathbb{R}^{3}\}_{i = 1}^{K}$ (Fig. 3, $4^{\mathrm{th}}$ col.). We found random sampling to work better than Farthest Point Sampling (FPS) used in [29], as FPS favors outliers far away from true object centers. Next, we define the set of associated points $s_i$ as those points that voted for centers within a radius $r$ of the sampled proposal position $y_{i}$ . The proposal
|
| 69 |
+
|
| 70 |
+
features $\{g_i\in \mathbb{R}^D\}_{i = 1}^K$ are learned using a PointNet [31] applied to the point features of the associated points $s_i$ . This corresponds to the grouping and normalization technique described in [29]. At this stage, we have $K$ proposals composed of 3D positions $y_{i}$ located near object centers, proposal features $g_{i}\in \mathbb{R}^{D}$ describing the local geometry and the semantics of the nearest objects (Fig. 2, $\square$ ), along with a set of points $s_i$ associated with each proposal.
|
| 71 |
+
|
| 72 |
+
# 3.2. Proposal Consolidation
|
| 73 |
+
|
| 74 |
+
So far, proposal features encode local information of their associated objects. During proposal consolidation, proposals become aware of their global neighborhood by explicitly modeling higher-order interactions between neighboring proposals. To this end, we define a graph convolutional network (GCN) over the proposals. While the initial point-feature backbone operates at the level of points, the GCN operates at the level of proposals. In particular, the nodes of the graph are defined by the proposal positions $y_{i}$ with associated proposal features $g_{i}$ . An edge between two nodes exists if the Euclidean distance $d$ between two 3D proposal positions $y_{\{i,j\}}$ is below $2\mathrm{m}$ . We adopt the convolutional operator from DGCNN [46] to define edge-features $e_{ij}$ between two neighboring proposals as:
|
| 75 |
+
|
| 76 |
+
$$
|
| 77 |
+
e _ {i j} = h _ {\Theta} \left(\left[ y _ {i}, g _ {i} \right], \left[ y _ {j}, g _ {j} \right] - \left[ y _ {i}, g _ {i} \right]\right), \tag {3}
|
| 78 |
+
$$
|
| 79 |
+
|
| 80 |
+
where $h_\Theta$ is a non-linear function with learnable parameters $\theta$ and $[\cdot, \cdot]$ denotes concatenation. The graph convolutional network consists of $l$ stacked graph convolutional layers. While our method also works without the GCN refinement (i.e. $l = 0$ ), we observe the best results using $l = 10$ (Sec. 4). To conclude, during proposal consolidation a GCN learns refined proposal features $\{h_i \in \mathbb{R}^{D'}\}_{i=1}^K$ given the initial proposal features $\{g_i \in \mathbb{R}^D\}_{i=1}^K$ (Fig. 2, □).
|
| 81 |
+
|
| 82 |
+
# 3.3. Object Generation
|
| 83 |
+
|
| 84 |
+
At this stage, we have $K$ proposals $\{(y_i,h_i,s_i)\}_{i = 1}^K$ with positions $y_{i}$ , refined features $h_i$ and sets of points $s_i$ . The goal is to obtain the final semantic instances (or object detections) from these proposals. To this end, we predict for every proposal a semantic class, an aggregation feature vector, an objectness score and a binary foregroundbackground mask over the points $s_i$ associated with the proposal. Specifically, the proposal features $h_i$ are input to an MLP with output sizes $(128,128,D_{out})$ where $D_{out} = S + E + 2$ with $S$ semantic classes, $E$ -dimensional aggregation feature and a 2D (positive, negative) objectness score (Fig. 2, $\square$
|
| 85 |
+
|
| 86 |
+
The objectness score [29, 35] classifies proposals into positive or negative examples. It is supervised via a cross-entropy loss $\mathcal{L}_{obj}$ . Proposals near a ground truth center ( $< 0.3\mathrm{m}$ ) are classified as positive. They are classified as negative, if they are far away ( $>0.6\mathrm{m}$ ) from any ground
|
| 87 |
+
|
| 88 |
+
truth center, or if they are equally far away from two ground truth centers since then the correct ground truth object is ambiguous. This is the case when $d_1 > 0.6 \cdot d_2$ where $d_i$ is the distance to the $i^{th}$ closest ground truth center.
|
| 89 |
+
|
| 90 |
+
Positive proposals are further supervised to predict a semantic class, aggregation features, and a binary mask. Negative ones are ignored. We use a cross-entropy loss $\mathcal{L}_{\mathrm{sem}}$ to predict the semantic label of the closest ground truth object.
|
| 91 |
+
|
| 92 |
+
Aggregation Features. Previous methods such as VoteNet [29] or 3D-BoNet [49] rely on non-maximum-suppression (NMS) to obtain the final objects. NMS iteratively selects proposals with the highest objectness score and removes all others that overlap with a certain IoU. However, this is sensitive to the quality of the objectness scores and can discard correct predictions. Instead of rejecting potentially useful information, we combine multiple proposals. To this end, we learn aggregation features for each proposal which are then clustered using DBScan [13].
|
| 93 |
+
|
| 94 |
+
All proposals whose aggregation features end up in the same cluster are aggregated together, yielding the final object detections. The points of a final object are the union over the foreground masks of combined proposals. As the number of proposals is relatively small ( $K \approx 500$ ) compared to the full point cloud ( $N \approx 10^6$ ), this step is very fast ( $\sim 8$ ms). This is a significant advantage over clustering full point clouds [10, 19], which can be prohibitively slow.
|
| 95 |
+
|
| 96 |
+
We investigate two types of aggregation features:
|
| 97 |
+
|
| 98 |
+
① Geometric features $\{\epsilon_i\in \mathbb{R}^{E = 4}\}_{i = 1}^K$ are composed of a refined 3D object center prediction $\Delta y_{i}$ and a 1D object radius estimation $r_i$ . The loss is defined as:
|
| 99 |
+
|
| 100 |
+
$$
|
| 101 |
+
\mathcal {L} _ {\text {a g g .}} = \left\| y _ {i} + \Delta y _ {i} - c _ {i} ^ {*} \right\| _ {H} + \left\| r _ {i} - r _ {i} ^ {*} \right\| _ {H} \tag {4}
|
| 102 |
+
$$
|
| 103 |
+
|
| 104 |
+
where $c_{i}^{*}$ is the nearest ground truth object center and $r_{i}^{*}$ the radius of the nearest ground truth object bounding sphere.
|
| 105 |
+
|
| 106 |
+
② Embedding features $\{\epsilon_{i}\in \mathbb{R}^{E}\}_{i = 1}^{K}$ are supervised with a discriminative loss function [4]. This loss was already successfully applied for 3D instance segmentation [10, 19]. It is composed of three terms: $\mathcal{L}_{\mathrm{agg.}} = \mathcal{L}_{\mathrm{var.}} + \mathcal{L}_{\mathrm{dist.}} + \gamma \cdot \mathcal{L}_{\mathrm{reg.}}$
|
| 107 |
+
|
| 108 |
+
$$
|
| 109 |
+
\mathcal {L} _ {\text {v a r .}} = \frac {1}{C} \sum_ {c = 1} ^ {C} \frac {1}{N _ {C}} \sum_ {i = 1} ^ {N _ {C}} \left[ \left\| \mu_ {C} - \epsilon_ {i} \right\| - \delta_ {v} \right] _ {+} ^ {2} \tag {5}
|
| 110 |
+
$$
|
| 111 |
+
|
| 112 |
+
$$
|
| 113 |
+
\mathcal {L} _ {\text {d i s t .}} = \frac {1}{C (C - 1)} \sum_ {\substack {C _ {A} = 1 \\ C _ {A} \neq C _ {B}}} ^ {C} \sum_ {C _ {B} = 1} ^ {C} [ 2 \delta_ {d} - \| \mu_ {C _ {A}} - \mu_ {C _ {B}} \| ] _ {+} ^ {2} \tag{6}
|
| 114 |
+
$$
|
| 115 |
+
|
| 116 |
+
$$
|
| 117 |
+
\mathcal {L} _ {\text {r e g .}} = \frac {1}{C} \sum_ {C = 1} ^ {C} \| \mu_ {C} \| \tag {7}
|
| 118 |
+
$$
|
| 119 |
+
|
| 120 |
+
In our experiments, we set $\gamma = 0.001$ and $\delta_v = \delta_d = 0.1$ . $C$ is the total number of ground truth objects and $N_C$ the number of proposals belonging to one object. $\mathcal{L}_{\mathrm{var}}$ pulls features that belong to the same instance towards their mean, $\mathcal{L}_{\mathrm{dist}}$ .
|
| 121 |
+
|
| 122 |
+
pushes clusters with different instance labels apart, and $\mathcal{L}_{\mathrm{reg}}$ is a regularization term pulling the means towards the origin. Further details and intuitions are available in the original work by DeBrabandere et al. [4]. In Sec. 4, we will show that geometric features outperform embedding features.
|
| 123 |
+
|
| 124 |
+
Mask Prediction. Each positive proposal predicts a class-agnostic binary segmentation mask over the points $s_i$ associated with that proposal, where the number of points per proposal $i$ is $|s_i| = n_i$ (Fig. 2, $\square$ ). Prior approaches obtain masks by segmenting 2D regions of interest (RoI) (Mask-RCNN [17]) or 3D bounding boxes (3D-BoNet [49]). Since we adopt an object-centric approach, mask segmentation can directly be performed on the points $s_i$ associated with a proposal. In particular, for each proposal, we select the per-point features $f_i$ of points that voted for a center within a distance $r$ of the proposal position $y_i$ . Formally, the set of selected per-point features is defined as $M_f = \{f_i \mid \| (x_i + \Delta x_i) - y_i \|_2 < r\}$ with $r = 0.3\mathrm{m}$ . The selected features $M_f$ are passed to a PointNet [32] for binary segmentation, i.e., we apply a shared MLP on each per-point feature, compute max-pooling over all feature channels, and concatenate the result to each feature before passing it through another MLP with feature sizes (256, 128, 64, 32, 2). Points that have the same ground truth instance label as the closest ground truth object instance label are supervised as foreground, while all others are background. Similar to [49], the mask loss $\mathcal{L}_{\mathrm{mask}}$ is implemented as FocalLoss [23] instead of a cross-entropy loss to cope with the foreground-background class imbalance.
|
| 125 |
+
|
| 126 |
+
# 3.4. Training Details
|
| 127 |
+
|
| 128 |
+
The model is trained end-to-end from scratch using the multi-task loss $\mathcal{L} = \mathcal{L}_{\mathrm{point}} + \mathcal{L}_{\mathrm{obj.}} + 0.1 \cdot \mathcal{L}_{\mathrm{sem.}} + \mathcal{L}_{\mathrm{mask}} + \mathcal{L}_{\mathrm{agg.}}$ . The batch size is 4 and the initial learning rate 0.1 which is reduced by half every $2 \cdot 10^4$ iterations and trained for $15 \cdot 10^4$ iterations in total. Our model is implemented in TensorFlow and runs on an Nvidia TitanXp GPU (12GB).
|
| 129 |
+
|
| 130 |
+
Input and data augmentation. Our network is trained on $3\mathrm{m}\times 3\mathrm{m}$ point cloud crops of $N$ points sampled from the surface of a 3D mesh. During test time, we evaluate on full scenes. Input features are the 3D position, color and normal assigned to each point. Data augmentation is performed by randomly rotating the scene by $\mathrm{Uniform}[-180^{\circ},180^{\circ}]$ around the upright axis and $\mathrm{Uniform}[-10^{\circ},10^{\circ}]$ around the other axis. The scenes are randomly flipped in both horizontal directions and randomly scaled by $\mathrm{Uniform}[0.9,1.1]$ .
|
| 131 |
+
|
| 132 |
+
# 4. Experiments
|
| 133 |
+
|
| 134 |
+
We compare our approach to previous state-of-the-art methods on two large-scale 3D indoor datasets (Sec. 4.1). Our ablation study analyzes the contribution of each component of our model and shows in particular the improvement of aggregating proposals over NMS (Sec. 4.2).
|
| 135 |
+
|
| 136 |
+
<table><tr><td colspan="3">3D Object Detection</td></tr><tr><td>ScanNetV2</td><td>mAP@25%</td><td>mAP@50%</td></tr><tr><td>DSS [37]</td><td>15.2</td><td>6.8</td></tr><tr><td>MRCNN 2D-3D [17]</td><td>17.3</td><td>10.5</td></tr><tr><td>F-PointNet [30]</td><td>19.8</td><td>10.8</td></tr><tr><td>GSPN [50]</td><td>30.6</td><td>17.7</td></tr><tr><td>3D-SIS [18]</td><td>40.2</td><td>22.5</td></tr><tr><td>VoteNet [29]</td><td>58.6</td><td>33.5</td></tr><tr><td>3D-MPA (Ours)</td><td>64.2</td><td>49.2</td></tr></table>
|
| 137 |
+
|
| 138 |
+
Table 1: 3D object detection scores on ScanNetV2 [6] validation set. We report per-class mean average precision (mAP) with an IoU of $25\%$ and $50\%$ . The IoU is computed on bounding boxes. All other scores are as reported in [29].
|
| 139 |
+
|
| 140 |
+
<table><tr><td colspan="3">3D Instance Segmentation</td></tr><tr><td>S3DIS 6-fold CV</td><td>mAP@50%</td><td>mAR@50%</td></tr><tr><td>PartNet [26]</td><td>56.4</td><td>43.4</td></tr><tr><td>ASIS [45]</td><td>63.6</td><td>47.5</td></tr><tr><td>3D-BoNet [49]</td><td>65.6</td><td>47.6</td></tr><tr><td>3D-MPA (Ours)</td><td>66.7</td><td>64.1</td></tr><tr><td>S3DIS Area 5</td><td>mAP@50%</td><td>mAR@50%</td></tr><tr><td>ASIS [45]</td><td>55.3</td><td>42.4</td></tr><tr><td>3D-BoNet [49]</td><td>57.5</td><td>40.2</td></tr><tr><td>3D-MPA (Ours)</td><td>63.1</td><td>58.0</td></tr></table>
|
| 141 |
+
|
| 142 |
+
Table 2: 3D instance segmentation scores on S3DIS [2]. We report scores on Area 5 (bottom) and 6-fold cross validation results (top). The metric is mean average precision (mAP) and mean average recall (mAR) at an IoU threshold of $50\%$ . The IoU is computed on per-point instance masks.
|
| 143 |
+
|
| 144 |
+
<table><tr><td colspan="7">3D Instance Segmentation</td></tr><tr><td rowspan="2">ScanNetV2</td><td colspan="3">Validation Set</td><td colspan="3">Hidden Test Set</td></tr><tr><td>mAP</td><td>@50%</td><td>@25%</td><td>mAP</td><td>@50%</td><td>@25%</td></tr><tr><td>SGPN [44]</td><td>-</td><td>11.3</td><td>22.2</td><td>4.9</td><td>14.3</td><td>39.0</td></tr><tr><td>3D-BEVIS [10]</td><td>-</td><td>-</td><td>-</td><td>11.7</td><td>24.8</td><td>40.1</td></tr><tr><td>3D-SIS [18]</td><td>-</td><td>18.7</td><td>35.7</td><td>16.1</td><td>38.2</td><td>55.8</td></tr><tr><td>GSPN [50]</td><td>19.3</td><td>37.8</td><td>53.4</td><td>15.8</td><td>30.6</td><td>54.4</td></tr><tr><td>3D-BoNet [49]</td><td>-</td><td>-</td><td>-</td><td>25.3</td><td>48.8</td><td>68.7</td></tr><tr><td>MTML [19]</td><td>20.3</td><td>40.2</td><td>55.4</td><td>28.2</td><td>54.9</td><td>73.1</td></tr><tr><td>3D-MPA (Ours)</td><td>35.3</td><td>59.1</td><td>72.4</td><td>35.5</td><td>61.1</td><td>73.7</td></tr></table>
|
| 145 |
+
|
| 146 |
+
Table 3: 3D instance segmentation scores ScanNetV2 [6]. The metric is mean average precision (mAP) at an IoU threshold of $55\%$ , $50\%$ and averaged over the range [0.5:0.95:05]. IoU on per-point instance masks.
|
| 147 |
+
|
| 148 |
+

|
| 149 |
+
Ground Truth Instances
|
| 150 |
+
|
| 151 |
+

|
| 152 |
+
Predicted Instances
|
| 153 |
+
Predicted Object Centers
|
| 154 |
+
|
| 155 |
+

|
| 156 |
+
|
| 157 |
+

|
| 158 |
+
|
| 159 |
+

|
| 160 |
+
Figure 3: Qualitative results and intermediate steps on ScanNetV2 [6]. First two columns: Our approach properly segments instances of vastly different sizes and makes clear decisions at object boundaries. Different colors represent separate instances (ground truth and predicted instances are not necessarily the same color). Third column: Every point on the surface of an object predicts its object center. These centers are shown as blue dots. Fourth column: Gray segments correspond to votes, they illustrate which point predicted a center. Colored spheres represent proposals. Proposals are obtained by sampling from the predicted object centers. Proposal features are learning from grouped point features that voted for the same object center. Spheres with the same color show which proposals are grouped together based on these learned proposal features.
|
| 161 |
+
|
| 162 |
+

|
| 163 |
+
|
| 164 |
+

|
| 165 |
+
Center Votes & Aggregated Proposals
|
| 166 |
+
|
| 167 |
+

|
| 168 |
+
|
| 169 |
+

|
| 170 |
+
|
| 171 |
+

|
| 172 |
+
|
| 173 |
+

|
| 174 |
+
|
| 175 |
+

|
| 176 |
+
|
| 177 |
+

|
| 178 |
+
Ground Truth Instances
|
| 179 |
+
|
| 180 |
+

|
| 181 |
+
Predicted Instances
|
| 182 |
+
|
| 183 |
+

|
| 184 |
+
Predicted Object Centers
|
| 185 |
+
|
| 186 |
+

|
| 187 |
+
Input Point Cloud
|
| 188 |
+
|
| 189 |
+

|
| 190 |
+
Figure 4: Failure Cases. We show two failure cases where our method incorrectly separates single instances. However, when comparing them to the input point cloud, they are still plausible predictions.
|
| 191 |
+
|
| 192 |
+

|
| 193 |
+
|
| 194 |
+

|
| 195 |
+
|
| 196 |
+

|
| 197 |
+
|
| 198 |
+
<table><tr><td>mAP@25 %</td><td>cab</td><td>bed</td><td>chair</td><td>sofa</td><td>tabl</td><td>door</td><td>wind</td><td>bkshf</td><td>pic</td><td>cntr</td><td>desk</td><td>curt</td><td>fridg</td><td>showr</td><td>toil</td><td>sink</td><td>bath</td><td>ofurn</td><td>avg</td></tr><tr><td>SegCluster [18]</td><td>11.8</td><td>13.5</td><td>18.9</td><td>14.6</td><td>13.8</td><td>11.1</td><td>11.5</td><td>11.7</td><td>0.0</td><td>13.7</td><td>12.2</td><td>12.4</td><td>11.2</td><td>18.0</td><td>19.5</td><td>18.9</td><td>16.4</td><td>12.2</td><td>13.4</td></tr><tr><td>MRCNN [17]</td><td>15.7</td><td>15.4</td><td>16.4</td><td>16.2</td><td>14.9</td><td>12.5</td><td>11.6</td><td>11.8</td><td>19.5</td><td>13.7</td><td>14.4</td><td>14.7</td><td>21.6</td><td>18.5</td><td>25.0</td><td>24.5</td><td>24.5</td><td>16.9</td><td>17.1</td></tr><tr><td>SGPN [44]</td><td>20.7</td><td>31.5</td><td>31.6</td><td>40.6</td><td>31.9</td><td>16.6</td><td>15.3</td><td>13.6</td><td>0.0</td><td>17.4</td><td>14.1</td><td>22.2</td><td>0.0</td><td>0.0</td><td>72.9</td><td>52.4</td><td>0.0</td><td>18.6</td><td>22.2</td></tr><tr><td>3D-SIS [18]</td><td>32.0</td><td>66.3</td><td>65.3</td><td>56.4</td><td>29.4</td><td>26.7</td><td>10.1</td><td>16.9</td><td>0.0</td><td>22.1</td><td>35.1</td><td>22.6</td><td>28.6</td><td>37.2</td><td>74.9</td><td>39.6</td><td>57.6</td><td>21.1</td><td>35.7</td></tr><tr><td>MTML [19]</td><td>34.6</td><td>80.6</td><td>87.7</td><td>80.3</td><td>67.4</td><td>45.8</td><td>47.2</td><td>45.3</td><td>19.8</td><td>9.7</td><td>49.9</td><td>54.2</td><td>44.1</td><td>74.9</td><td>98.0</td><td>44.5</td><td>79.4</td><td>33.5</td><td>55.4</td></tr><tr><td>3D-MPA (Ours)</td><td>69.9</td><td>83.4</td><td>87.6</td><td>76.1</td><td>74.8</td><td>56.6</td><td>62.2</td><td>78.3</td><td>48.0</td><td>62.5</td><td>69.2</td><td>66.0</td><td>61.4</td><td>93.1</td><td>99.2</td><td>75.2</td><td>90.3</td><td>48.6</td><td>72.4</td></tr></table>
|
| 199 |
+
|
| 200 |
+
Table 4: Per class 3D instance segmentation on ScanNetV2 [6] validation set with mAP@25% on 18 classes. Our method outperforms all other methods on all classes except for chair and sofa.
|
| 201 |
+
|
| 202 |
+
<table><tr><td>mAP@50%</td><td>cab</td><td>bed</td><td>chair</td><td>sofa</td><td>tabl</td><td>door</td><td>wind</td><td>bkshf</td><td>pic</td><td>cntr</td><td>desk</td><td>curt</td><td>fridg</td><td>showr</td><td>toil</td><td>sink</td><td>bath</td><td>ofurn</td><td>avg</td></tr><tr><td>SegCluster [18]</td><td>10.4</td><td>11.9</td><td>15.5</td><td>12.8</td><td>12.4</td><td>10.1</td><td>10.1</td><td>10.3</td><td>0.0</td><td>11.7</td><td>10.4</td><td>11.4</td><td>0.0</td><td>13.9</td><td>17.2</td><td>11.5</td><td>14.2</td><td>10.5</td><td>10.8</td></tr><tr><td>MRCNN [17]</td><td>11.2</td><td>10.6</td><td>10.6</td><td>11.4</td><td>10.8</td><td>10.3</td><td>0.0</td><td>0.0</td><td>11.1</td><td>10.1</td><td>0.0</td><td>10.0</td><td>12.8</td><td>0.0</td><td>18.9</td><td>13.1</td><td>11.8</td><td>11.6</td><td>9.1</td></tr><tr><td>SGPN [44]</td><td>10.1</td><td>16.4</td><td>20.2</td><td>20.7</td><td>14.7</td><td>11.1</td><td>11.1</td><td>0.0</td><td>0.0</td><td>10.0</td><td>10.3</td><td>12.8</td><td>0.0</td><td>0.0</td><td>48.7</td><td>16.5</td><td>0.0</td><td>0.0</td><td>11.3</td></tr><tr><td>3D-SIS [18]</td><td>19.7</td><td>37.7</td><td>40.5</td><td>31.9</td><td>15.9</td><td>18.1</td><td>0.0</td><td>11.0</td><td>0.0</td><td>0.0</td><td>10.5</td><td>11.1</td><td>18.5</td><td>24.0</td><td>45.8</td><td>15.8</td><td>23.5</td><td>12.9</td><td>18.7</td></tr><tr><td>MTML [19]</td><td>14.5</td><td>54.0</td><td>79.2</td><td>48.8</td><td>42.7</td><td>32.4</td><td>32.7</td><td>21.9</td><td>10.9</td><td>0.8</td><td>14.2</td><td>39.9</td><td>42.1</td><td>64.3</td><td>96.5</td><td>36.4</td><td>70.8</td><td>21.5</td><td>40.2</td></tr><tr><td>3D-MPA (Ours)</td><td>51.9</td><td>72.2</td><td>83.8</td><td>66.8</td><td>63.0</td><td>43.0</td><td>44.5</td><td>58.4</td><td>38.8</td><td>31.1</td><td>43.2</td><td>47.7</td><td>61.4</td><td>80.6</td><td>99.2</td><td>50.6</td><td>87.1</td><td>40.3</td><td>59.1</td></tr></table>
|
| 203 |
+
|
| 204 |
+
# 4.1. Comparison with State-of-the-art Methods
|
| 205 |
+
|
| 206 |
+
Datasets. The ScanNetV2 [6] benchmark dataset consists of richly-annotated 3D reconstructions of indoor scenes. It comprises 1201 training scenes, 312 validation scenes and 100 hidden test scenes. The benchmark is evaluated on 20 semantic classes which include 18 different object classes.
|
| 207 |
+
|
| 208 |
+
The S3DIS [2] dataset is a collection of six large-scale indoor areas annotated with 13 semantic classes and object instance labels. We follow the standard evaluation protocol and report scores on Area 5, as well as 6-fold cross validation results over all six areas.
|
| 209 |
+
|
| 210 |
+
Object detection scores are shown in Tab. 1. Object detections are obtained by fitting a tight axis-aligned bounding box around the predicted object point-masks. We compare 3D-MPA to recent approaches including VoteNet [29] on the ScanNetV2 [6] dataset. Scores are obtained by using the evaluation methodology provided by [29]. Our method outperforms all previous methods by at least $+5.8$ mAP@ $25\%$ and $+15.7$ mAP@ $50\%$ .
|
| 211 |
+
|
| 212 |
+
Instance segmentation scores on S3DIS [2] are shown in Tab. 2. Per-class instance segmentation results are shown in Tab. 7. We report mean average precision (mAP) and mean average recall (mAR) scores. Our scores are computed using the evaluation scripts provided by Yang et al. [49]. Our approach outperforms all previous methods. In particular, we report an increased recall of $+17.8$ mAR@50% on Area5 and $+16.5$ mAR@50% on 6-fold cross validation, which means we detect significantly more objects, while simultaneously achieving higher precision.
|
| 213 |
+
|
| 214 |
+
We show results on ScanNetV2 [6] validation and hidden test set in Tab. 3 and per-class scores with mAP@25% in Tab. 4 and mAP@50% in Tab. 5. We improve over previous methods by at least +18.1 mAP@50% and +17.0 mAP@25%. In particular, our 3D-MPA outperforms all other methods in every object class on mAP@50 (Tab. 5). On mAP@25, we outperform on all classes except chair and sofa. Qualitative results on ScanNetV2 are visualized in Fig. 3 and failure cases in Fig. 4.
|
| 215 |
+
|
| 216 |
+
# 4.2. Ablation study
|
| 217 |
+
|
| 218 |
+
In Tab. 6, we show the result of our ablation study analyzing the design choices of each component of our model. The evaluation metric is mean average precision (mAP) on the task of instance segmentation, evaluated on the ScanNetV2 validation set.
|
| 219 |
+
|
| 220 |
+
Table 5: Per class 3D instance segmentation on ScanNetV2 [6] validation set with mAP@50% on 18 classes. Our method outperforms all other methods on all classes.
|
| 221 |
+
|
| 222 |
+
<table><tr><td colspan="2">Ablation Study</td></tr><tr><td>3D Instance Segmentation (ScanNetV2 val.)</td><td>mAP@50%</td></tr><tr><td>① Proposals + NMS</td><td>47.5</td></tr><tr><td>② Agg. Props. (proposal positions)</td><td>52.4 (+4.9)</td></tr><tr><td>③ Agg. Props. (embedding features)</td><td>56.7 (+9.2)</td></tr><tr><td>④ Agg. Props. (geometric features)</td><td>57.8 (+10.3)</td></tr><tr><td>⑤ Agg. Props. (geometric features + GCN)</td><td>59.1 (+11.6)</td></tr></table>
|
| 223 |
+
|
| 224 |
+
Table 6: Ablation study. In Sec. 4.2 we discuss the results in detail. Scores are instance segmentation results on the ScanNetV2 [6] validation set and absolute improvements in mAP (in green) relative to the baseline ①.
|
| 225 |
+
|
| 226 |
+
<table><tr><td></td><td>S3DIS 6-fold CV</td><td>ceil.</td><td>floor</td><td>walls</td><td>beam</td><td>colm.</td><td>wind.</td><td>door</td><td>table</td><td>chair</td><td>sofa</td><td>bookc.</td><td>board</td><td>clut.</td><td>mean</td></tr><tr><td rowspan="2">mAP@0.5</td><td>3D-BoNet [49]</td><td>88.5</td><td>89.9</td><td>64.9</td><td>42.3</td><td>48.0</td><td>93.0</td><td>66.8</td><td>55.4</td><td>72.0</td><td>49.7</td><td>58.3</td><td>80.7</td><td>47.6</td><td>65.6</td></tr><tr><td>3D-MPA (Ours)</td><td>95.5</td><td>99.5</td><td>59.0</td><td>44.6</td><td>57.7</td><td>89.0</td><td>78.7</td><td>34.5</td><td>83.6</td><td>55.9</td><td>51.6</td><td>71.0</td><td>46.3</td><td>66.7</td></tr><tr><td rowspan="2">mAR@0.5</td><td>3D-BoNet [49]</td><td>61.8</td><td>74.6</td><td>50.0</td><td>42.2</td><td>27.2</td><td>62.4</td><td>58.5</td><td>48.6</td><td>64.9</td><td>28.8</td><td>28.4</td><td>46.5</td><td>28.6</td><td>46.7</td></tr><tr><td>3D-MPA (Ours)</td><td>68.4</td><td>96.2</td><td>51.9</td><td>58.8</td><td>77.6</td><td>79.8</td><td>69.5</td><td>32.8</td><td>75.2</td><td>71.1</td><td>46.2</td><td>68.2</td><td>38.2</td><td>64.1</td></tr></table>
|
| 227 |
+
|
| 228 |
+
Table 7: Per class 3D instance segmentation scores on S3DIS [2]. We report per-class mean average precision (mAP) and recall (mAR) with an IoU of $50\%$ . 3D-BoNet are up-to-date numbers provided by the original authors. Our method detects significantly more objects (+17.4 mAR) and it is even able to do so with a higher precision (+1.1 mAP).
|
| 229 |
+
|
| 230 |
+
Effect of grouping compared to NMS. The main result of this work is that grouping multiple proposals is superior to non-maximum-suppression (NMS). We demonstrate this experimentally by comparing two baseline variants of our model: In experiment ① (Tab. 6), we apply the traditional approach of predicting a number of proposals and applying NMS to obtain the final predictions. The model corresponds to the one depicted in Fig. 2 without proposal consolidation and with the aggregation replaced by NMS. NMS chooses the most confident prediction and suppresses all other predictions with an IoU larger than a specified threshold, in our case $25\%$ . For experiment ②, we perform a naive grouping of proposals by clustering the proposal positions $y_{i}$ . The final object instance masks are obtained as the union over all proposal masks in one cluster. We observe a significant increase of $+4.9$ mAP by replacing NMS with aggregation.
|
| 231 |
+
|
| 232 |
+
How important are good aggregation features? In experiment ②, we group proposals based on their position $y_{i}$ . These are still relatively simple features. In experiments ③ and ④, proposals are grouped based on learned embedding features and learned geometric features, respectively. These features are described in Sec. 3.3. Again, we observe a notable improvement of +5.4 mAP compared to experiment ② and even +10.3 mAP over ①. In our experiments, the geometric features performed better than the embedding features (+1.1 mAP). One possible explanation could be that the geometric features have an explicit meaning and are therefore easier to train than the 5-dimensional embedding space used in experiment ③. Therefore, for the next experiment in the ablation study and our final model, we make use of the geometric features. In summary, the quality of the aggregation features has a significant impact.
|
| 233 |
+
|
| 234 |
+
Does the graph convolutional network help? The graph convolutional network (GCN) defined on top of proposals enables higher-order interaction between proposals. Experiment ⑤ corresponds to the model depicted in Fig. 2 with a 10 layer GCN. Experiment ④ differs from experiment ⑤ in that it does not include the GCN for proposal consolidation. Adding the GCN results in another improvement of
|
| 235 |
+
|
| 236 |
+
+1.3 mAP. In total, by incorporating the GCN and replacing NMS with multi-proposal aggregation, we observe an improvement of +11.6 mAP over the same network architecture without those changes.
|
| 237 |
+
|
| 238 |
+
# 5. Conclusion
|
| 239 |
+
|
| 240 |
+
In this work, we introduced 3D-MPA, a new method for 3D semantic instance segmentation. Our core idea is to combine the benefits of both top-down and bottom-up object detection strategies. That is, we first produce a number of proposals using an object-centric voting scheme based on a sparse volumetric backbone. Each object may receive multiple proposals, which makes our method robust to potential outliers in the object proposal stage. However, at the same time we obtain only a handful of proposals such that clustering them is computationally inexpensive. To address this, we first allow higher-order feature interactions between proposals via a graph convolutional network. We then aggregate proposals based on graph relationship results and proposal feature similarities. We show that graph convolutions help to achieve high evaluation scores, although, the largest improvement originates from our multi proposal aggregation strategy. Our combined approach achieves state-of-the-art instance segmentation and object detection results on the popular ScanNetV2 and S3DIS datasets, thus validating our algorithm design.
|
| 241 |
+
|
| 242 |
+
Overall, we believe that multi proposal aggregation is a promising direction for object detection, in particular in the 3D domain. However, there still remain many interesting future avenues, for instance, how to combine detection with tracking in semi-dynamic sequences. We see a variety of interesting ideas, where proposals could be distributed in 4D space and accumulated along the time-space axis.
|
| 243 |
+
|
| 244 |
+
Acknowledgements. We would like to thank Theodora Kontogianni, Jonas Schult, Jonathon Luiten, Mats Steinweg, Ali Athar, Dan Jia and Sabarinath Mahadevan for helpful feedback as well as Angela Dai for help with the video. This work was funded by the ERC Consolidator Grant DeeViSe (ERC-2017-COG-773161) and the ERC Starting Grant Scan2CAD (804724).
|
| 245 |
+
|
| 246 |
+
# References
|
| 247 |
+
|
| 248 |
+
[1] I. Armeni, Z.-Y. He, J. Gwak, A. R. Zamir, M. Fischer, J. Malik, and S. Savarese. 3D Scene Graph: A Structure for Unified Semantics, 3D Space, and Camera. In IEEE International Conference on Computer Vision (ICCV), 2019. 2
|
| 249 |
+
[2] I. Armeni, O. Sener, A. R. Zamir, H. Jiang, I. Brilakis, M. Fischer, and S. Savarese. 3D Semantic Parsing of LargeScale Indoor Spaces. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2016. 5, 7, 8
|
| 250 |
+
[3] A. Behl, D. Paschalidou, S. Donne, and A. Geiger. Point-FlowNet: Learning Representations for Rigid Motion Estimation from Point Clouds. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 2
|
| 251 |
+
[4] B. D. Brabandere, D. Neven, and L. V. Gool. Semantic Instance Segmentation with a Discriminative Loss Function. In IEEE Conference on Computer Vision and Pattern Recognition Workshop (CVPR'W), 2017. 2, 4, 5
|
| 252 |
+
[5] C. Choy, J. Gwak, and S. Savarese. 4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural Networks. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 2, 3
|
| 253 |
+
[6] A. Dai, A. X. Chang, M. Savva, M. Halber, T. Funkhouser, and M. Nießner. ScanNet: Richly-annotated 3D Reconstructions of Indoor Scenes. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2017. 1, 5, 6, 7
|
| 254 |
+
[7] A. Dai and M. Nießner. 3DMV: Joint 3D-Multi-View Prediction for 3D Semantic Scene Segmentation. In European Conference on Computer Vision (ECCV), 2018. 1, 2
|
| 255 |
+
[8] A. Dai, M. Nießner, M. Zollhöfer, S. Izadi, and C. Theobalt. Bundlefusion: Real-time Globally Consistent 3D Reconstruction Using On-the-fly Surface Reintegration. ACM Transactions on Graphics (TOG), 2017. 1
|
| 256 |
+
[9] D. K. Duvenaud, D. Maclaurin, J. Iparraguirre, R. Bombarell, T. Hirzel, A. Aspuru-Guzik, and R. P. Adams. Convolutional Networks on Graphs for Learning Molecular Fingerprints. In Neural Information Processing Systems (NIPS), 2015. 2
|
| 257 |
+
[10] C. Elich, F. Engelmann, J. Schult, T. Kontogianni, and B. Leibe. 3D-BEVIS: Birds-Eye-View Instance Segmentation. In German Conference on Pattern Recognition (GCR), 2019. 2, 4, 5
|
| 258 |
+
[11] F. Engelmann, T. Kontogianni, and B. Leibe. Dilated Point Convolutions: On the Receptive Field Size of Point Convolutions on 3D Point Clouds. In International Conference on Robotics and Automation (ICRA), 2020. 1
|
| 259 |
+
[12] F. Engelmann, T. Kontogianni, J. Schult, and B. Leibe. Know What Your Neighbors Do: 3D Semantic Segmentation of Point Clouds. In European Conference on Computer Vision Workshop (ECCV'W), 2018. 2
|
| 260 |
+
[13] M. Ester, H. peter Kriegel, J. Sander, and X. Xu. A Density-based Algorithm for Discovering Clusters in Large Spatial Databases With Noise. In ACM International Conference on Knowledge Discovery & Data Mining (KDD), 1996. 4
|
| 261 |
+
[14] A. Fathi, Z. Wojna, V. Rathod, P. Wang, H. O. Song, S. Guadarrama, and K. P. Murphy. Semantic Instance Segmentation via Deep Metric Learning. CoRR, abs/1703.10277, 2017. 2
|
| 262 |
+
|
| 263 |
+
[15] B. Graham, M. Engelcke, and L. van der Maaten. 3D Semantic Segmentation with Submanifold Sparse Convolutional Networks. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2018. 2, 3
|
| 264 |
+
[16] R. Hanocka, A. Hertz, N. Fish, R. Giryes, S. Fleishman, and D. Cohen-Or. MeshCNN: A Network with an Edge. ACM Transactions on Graphics (TOG), 2019. 1, 2
|
| 265 |
+
[17] K. He, G. Gkioxari, P. Dollar, and R. B. Girshick. Mask R-CNN. In IEEE International Conference on Computer Vision (ICCV), 2017. 2, 5, 7
|
| 266 |
+
[18] J. Hou, A. Dai, and M. Nießner. 3D-SIS: 3D Semantic Instance Segmentation of RGB-D Scans. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019, 1, 2, 5, 7
|
| 267 |
+
[19] J. Lahoud, B. Ghanem, M. Pollefeys, and M. R. Oswald. 3D Instance Segmentation via Multi-Task Metric Learning. In IEEE International Conference on Computer Vision (ICCV), 2019. 1, 2, 4, 5, 7
|
| 268 |
+
[20] L. Landrieu and M. Boussaha. Point Cloud Oversegmentation with Graph-Structured Deep Metric Learning. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 2
|
| 269 |
+
[21] L. Landrieu and M. Simonovsky. Large-scale Point Cloud Semantic Segmentation with Superpoint Graphs. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2018. 2
|
| 270 |
+
[22] Y. Li, D. Tarlow, M. Brockschmidt, and R. Zemel. Gated Graph Sequence Neural Networks. In International Conference on Learning Representations (ICLR), 2017. 2
|
| 271 |
+
[23] T.-Y. Lin, P. Goyal, R. B. Girshick, K. He, and P. Dollar. Focal Loss for Dense Object Detection. In IEEE International Conference on Computer Vision (ICCV), 2017. 5
|
| 272 |
+
[24] C. Liu and Y. Furukawa. MASC: Multi-scale Affinity with Sparse Convolution for 3D Instance Segmentation. CoRR, abs/1902.04478, 2017. 2
|
| 273 |
+
[25] X. Liu, C. R. Qi, and L. J. Guibas. FlowNet3D: Learning Scene Flow in 3D Point Clouds. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 2
|
| 274 |
+
[26] K. Mo, S. Zhu, A. X. Chang, L. Yi, S. Tripathi, L. J. Guibas, and H. Su. PartNet: A Large-Scale Benchmark for Fine-Grained and Hierarchical Part-Level 3D Object Understanding. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 5
|
| 275 |
+
[27] R. A. Newcombe, S. Izadi, O. Hilliges, D. Molyneaux, D. Kim, A. J. Davison, P. Kohli, J. Shotton, S. Hodges, and A. W. Fitzgibbon. KinectFusion: Real-time Dense Surface Mapping and Tracking. In International Symposium on Mixed and Augmented Reality (ISMAR), 2011. 1
|
| 276 |
+
[28] M. Nießner, M. Zollhöfer, S. Izadi, and M. Stamminger. Real-time 3D Reconstruction at Scale using Voxel Hashing. ACM Transactions on Graphics (TOG), 2013. 1
|
| 277 |
+
[29] C. R. Qi, O. Litany, K. He, and L. J. Guibas. Deep Hough Voting for 3D Object Detection in Point Clouds. In IEEE International Conference on Computer Vision (ICCV), 2019, 1, 2, 3, 4, 5, 7
|
| 278 |
+
|
| 279 |
+
[30] C. R. Qi, W. Liu, C. Wu, H. Su, and L. J. Guibas. Frustumum PointNets for 3D Object Detection from RGB-D Data. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2018. 5
|
| 280 |
+
[31] C. R. Qi, H. Su, K. Mo, and L. J. Guibas. PointNet: Deep Learning on Point Sets for 3D Classification and Segmentation. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2017. 1, 2, 4
|
| 281 |
+
[32] C. R. Qi, H. Su, M. Nießner, A. Dai, M. Yan, and L. J. Guibas. Volumetric and multi-view cnns for object classification on 3d data. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2016. 1, 5
|
| 282 |
+
[33] C. R. Qi, L. Yi, H. Su, and L. J. Guibas. PointNet++: Deep Hierarchical Feature Learning on Point Sets in a Metric Space. In Neural Information Processing Systems (NIPS), 2017. 1, 2
|
| 283 |
+
[34] X. Qi, R. Liao, J. Jia, S. Fidler, and R. Urtasun. 3D Graph Neural Networks for RGBD Semantic Segmentation. In IEEE International Conference on Computer Vision (ICCV), 2017. 2
|
| 284 |
+
[35] S. Ren, K. He, R. Girshick, and J. Sun. Faster R-CNN: Towards Real-Time Object Detection with Region Proposal Networks. In Neural Information Processing Systems (NIPS), 2015. 1, 2, 4
|
| 285 |
+
[36] J. Schult, F. Engelmann, T. Kontogianni, and B. Leibe. DualConvMesh-Net: Joint Geodesic and Euclidean Convolutions on 3D Meshes. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2020. 1, 2
|
| 286 |
+
[37] S. Song and J. Xiao. Deep Sliding Shapes for Amodal 3D Object Detection in RGB-D Images. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2016. 1, 5
|
| 287 |
+
[38] Y. Song, C. Yang, Y. Shen, P. Wang, Q. Huang, and C. J. Kuo. SPG-Net: Segmentation Prediction and Guidance Network for Image Inpainting. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2018. 1
|
| 288 |
+
[39] H. Su, S. Maji, E. Kalogerakis, and E. Learned-Miller. Multiview convolutional neural networks for 3d shape recognition. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2015. 1
|
| 289 |
+
[40] M. Tatarchenko, J. Park, V. Koltun, and Q.-Y. Zhou. Tangent Convolutions for Dense Prediction in 3D. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2018. 2
|
| 290 |
+
|
| 291 |
+
[41] K. Thomas and W. Max. Semi-Supervised Classification with Graph Convolutional Networks. In International Conference on Learning Representations (ICLR), 2017. 2
|
| 292 |
+
[42] J. Wald, A. Avetisyan, N. Navab, F. Tombari, and M. Nießner. RIO: 3D Object Instance Re-Localization in Changing Indoor Environments. In IEEE International Conference on Computer Vision (ICCV), 2019. 2
|
| 293 |
+
[43] S. Wang, S. Suo, W. Ma, A. Pokrovsky, and R. Urtasun. Deep Parametric Continuous Convolutional Neural Networks. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2018. 2
|
| 294 |
+
[44] W. Wang, R. Yu, Q. Huang, and U. Neumann. SGPN: Similarity Group Proposal Network for 3D Point Cloud Instance Segmentation. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2018. 5, 7
|
| 295 |
+
[45] X. Wang, S. Liu, X. Shen, C. Shen, and J. Jia. Associatively Segmenting Instances and Semantics in Point Clouds. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 2, 5
|
| 296 |
+
[46] Y. Wang, Y. Sun, Z. Liu, S. E. Sarma, M. M. Bronstein, and J. M. Solomon. Dynamic Graph CNN for Learning on Point Clouds. In ACM Transactions on Graphics (TOG), 2019. 2, 4
|
| 297 |
+
[47] T. Whelan, S. Leutenegger, R. Salas-Moreno, B. Glocker, and A. Davison. ElasticFusion: Dense SLAM without a Pose Graph. In Robotics: Science and Systems (RSS), 2015. 1
|
| 298 |
+
[48] Z. Wu, S. Song, A. Khosla, F. Yu, L. Zhang, X. Tang, and J. Xiao. 3D ShapeNets: A Deep Representation for Volumetric Shapes. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2015. 1
|
| 299 |
+
[49] B. Yang, J. Wang, R. Clark, Q. Hu, S. Wang, A. Markham, and N. Trigoni. Learning object bounding boxes for 3d instance segmentation on point clouds. In Neural Information Processing Systems (NIPS), 2019. 1, 2, 4, 5, 7, 8
|
| 300 |
+
[50] L. Yi, W. Zhao, H. Wang, M. Sung, and L. Guibas. GSPN: Generative Shape Proposal Network for 3D Instance Segmentation in Point Cloud. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 2, 5
|
| 301 |
+
[51] Y. Zhou and O. Tuzel. VoxelNet: End-to-End Learning for Point Cloud Based 3D Object Detection. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2018. 1, 2
|
3dmpamultiproposalaggregationfor3dsemanticinstancesegmentation/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b743d83e2803847dc6ce3747ef255fb40f6d7c394d8ca56cf6729f4a9c6dc74d
|
| 3 |
+
size 719561
|
3dmpamultiproposalaggregationfor3dsemanticinstancesegmentation/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4ad23e9ade7e652c85c711ff9dd69f61095472a7dbde59808a2584b026e515fb
|
| 3 |
+
size 424676
|
3dpackingforselfsupervisedmonoculardepthestimation/98cb13b1-a587-4d6d-b10f-2fb39663ea60_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5d173f81ffa17e63a0c69431b430bad14161f740c51b379b5549c3cc644206fc
|
| 3 |
+
size 79463
|
3dpackingforselfsupervisedmonoculardepthestimation/98cb13b1-a587-4d6d-b10f-2fb39663ea60_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:78f5878969f622d2f7ee688df532bc10484ce1aa53e1ba1ce31e1010b8d608b8
|
| 3 |
+
size 97207
|
3dpackingforselfsupervisedmonoculardepthestimation/98cb13b1-a587-4d6d-b10f-2fb39663ea60_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:554e10d071b2f8425790a2e1c4801be4868ffd27a3d8da6c246a771e8fc31836
|
| 3 |
+
size 3456914
|
3dpackingforselfsupervisedmonoculardepthestimation/full.md
ADDED
|
@@ -0,0 +1,280 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# 3D Packing for Self-Supervised Monocular Depth Estimation
|
| 2 |
+
|
| 3 |
+
Vitor Guizilini Rares Ambrus Sudeep Pillai Allan Raventos Adrien Gaidon Toyota Research Institute (TRI)
|
| 4 |
+
|
| 5 |
+
first这个名字@tri.global
|
| 6 |
+
|
| 7 |
+
# Abstract
|
| 8 |
+
|
| 9 |
+
Although cameras are ubiquitous, robotic platforms typically rely on active sensors like LiDAR for direct 3D perception. In this work, we propose a novel self-supervised monocular depth estimation method combining geometry with a new deep network, PackNet, learned only from unlabeled monocular videos. Our architecture leverages novel symmetrical packing and unpacking blocks to jointly learn to compress and decompress detail-preserving representations using 3D convolutions. Although self-supervised, our method outperforms other self, semi, and fully supervised methods on the KITTI benchmark. The 3D inductive bias in PackNet enables it to scale with input resolution and number of parameters without overfitting, generalizing better on out-of-domain data such as the NuScenes dataset. Furthermore, it does not require large-scale supervised pretraining on ImageNet and can run in real-time. Finally, we release DDAD (Dense Depth for Automated Driving), a new urban driving dataset with more challenging and accurate depth evaluation, thanks to longer-range and denser ground-truth depth generated from high-density LiDARs mounted on a fleet of self-driving cars operating world-wide. $^{\dagger}$
|
| 10 |
+
|
| 11 |
+
# 1. Introduction
|
| 12 |
+
|
| 13 |
+
Accurate depth estimation is a key prerequisite in many robotics tasks, including perception, navigation, and planning. Depth from monocular camera configurations can provide useful cues for a wide array of tasks [23, 30, 34, 36], producing dense depth maps that could complement or eventually replace expensive range sensors. However, learning monocular depth via direct supervision requires ground-truth information from additional sensors and precise cross-calibration. Self-supervised methods do not suffer from these limitations, as they use geometrical constraints on image sequences as the sole source of supervision. In this work, we address the problem of jointly estimating scene structure and camera motion across RGB image sequences using a self-supervised deep network.
|
| 14 |
+
|
| 15 |
+
While recent works in self-supervised monocular depth
|
| 16 |
+
|
| 17 |
+

|
| 18 |
+
Figure 1: Example metrically accurate PackNet prediction (map and textured point cloud) on our DDAD dataset.
|
| 19 |
+
|
| 20 |
+
estimation have mostly focused on engineering the loss function [5, 33, 47, 53], we show that performance critically depends on the model architecture, in line with the observations of [27] for other self-supervised tasks. Going beyond image classification models like ResNet [20], our main contribution is a new convolutional network architecture, called PackNet, for high-resolution self-supervised monocular depth estimation. We propose new packing and unpacking blocks that jointly leverage 3D convolutions to learn representations that maximally propagate dense appearance and geometric information while still being able to run in real time. Our second contribution is a novel loss that can optionally leverage the camera's velocity when available (e.g., from cars, robots, mobile phones) to solve the inherent scale ambiguity in monocular vision. Our third contribution is a new dataset: Dense Depth for Automated Driving (DDAD). It leverages diverse logs from a fleet of well-calibrated self-driving cars equipped with cameras and high-accuracy long-range LiDARs. Compared to existing benchmarks, DDAD enables much more accurate depth evaluation at range, which is key for high resolution monocular depth estimation methods (cf. Figure 1).
|
| 21 |
+
|
| 22 |
+
Our experiments on the standard KITTI benchmark [16], the recent NuScenes dataset [4], and our new proposed DDAD benchmark show that our self-supervised monocular approach $i)$ improves on the state of the art, especially at longer ranges; $ii)$ is competitive with fully supervised methods; $iii)$ generalizes better on unseen data; $iv)$ scales better with number of parameters, input resolution, and more unlabeled training data; $v)$ can run in real time at high resolution; and $vi)$ does not require supervised pretraining on ImageNet to achieve state-of-the-art results; or test-time ground-truth scaling if velocity information is available at training time.
|
| 23 |
+
|
| 24 |
+
# 2. Related Work
|
| 25 |
+
|
| 26 |
+
Depth estimation from a single image poses several challenges due to its ill-posed and ambiguous nature. However, modern convolutional networks have shown that it is possible to successfully leverage appearance-based patterns in large scale datasets in order to make accurate predictions.
|
| 27 |
+
|
| 28 |
+
Depth Network Architectures Eigen et al. [13] proposed one of the earliest works in convolutional-based depth estimation using a multi-scale deep network trained on RGB-D sensor data to regress the depth directly from single images. Subsequent works extended these network architectures to perform two-view stereo disparity estimation [35] using techniques developed in the flow estimation literature [12]. Following [12, 35], Umenhofer et al. [42] applied these concepts to simultaneously train a depth and pose network to predict depth and camera ego-motion between successive unconstrained image pairs.
|
| 29 |
+
|
| 30 |
+
Independently, dense pixel-prediction networks [2, 31, 48] have made significant progress towards improving the flow of information between encoding and decoding layers. Fractional pooling [19] was introduced to amortize the rapid spatial reduction during downsampling. Lee et al. [29] generalized the pooling function to allow the learning of more complex patterns, including linear combinations and learnable pooling operations. Shi et al. [39] used sub-pixel convolutions to perform Single-Image-Super-Resolution, synthesizing and super-resolving images beyond their input resolutions, while still operating at lower resolutions. Recent works [38, 51] in self-supervised monocular depth estimation use this concept to super-resolve estimates and further improve performance. Here, we go one step further and introduce new operations relying on 3D convolutions for learning to preserve and process spatial information in the features of encoding and decoding layers.
|
| 31 |
+
|
| 32 |
+
Self-Supervised Monocular Depth and Pose As supervised techniques for depth estimation advanced rapidly, the availability of target depth labels became challenging, especially for outdoor applications. To this end, [15, 17] pro
|
| 33 |
+
|
| 34 |
+
vided an alternative strategy involving training a monocular depth network with stereo cameras, without requiring ground-truth depth labels. By leveraging Spatial Transformer Networks [22], Godard et al [17] use stereo imagery to geometrically transform the right image plus a predicted depth of the left image into a synthesized left image. The loss between the resulting synthesized and original left images is then defined in a fully-differentiable manner, using a Structural Similarity [44] term and additional depth regularization terms, thus allowing the depth network to be self-supervised in an end-to-end fashion.
|
| 35 |
+
|
| 36 |
+
Following [17] and [42], Zhou et al. [52] generalize this to self-supervised training in the purely monocular setting, where a depth and pose network are simultaneously learned from unlabeled monocular videos. Several methods [5, 26, 33, 43, 46, 47, 51, 53] have advanced this line terms, of work by incorporating these methods, ad,ditional loss and constraints. All, however, take advantage of constraints in monocular Structure-from-Motion (SfM) training that only allow the estimation of depth and pose up to an unknown scale factor, and rely on the ground-truth LiDAR measu, rements to scale their depth estimates appropriately for evaluation purposes [52]. Instead, in this work we show that, by simply using the instantaneous velocity of the camera during training, we are able to learn a scale-aware depth and pose model, alleviating the impractical need to use Li-DAR ground-truth depth measurements at test-time.
|
| 37 |
+
|
| 38 |
+
# 3. Self-Supervised Scale-Aware SfM
|
| 39 |
+
|
| 40 |
+
In self-supervised monocular SfM training (Fig. 2), we aim to learn: (i) a monocular depth model $f_{D}: I \to D$ , that predicts the scale-ambiguous depth $\hat{D} = f_{D}(I(p))$ for every pixel $p$ in the target image $I$ ; and (ii) a monocular ego-motion estimator $f_{\mathbf{x}}: (I_t, I_S) \to \mathbf{x}_{t \to S}$ , that predicts the set of 6-DoF rigid transformations for all $s \in S$ given by $\mathbf{x}_{t \to s} = \begin{pmatrix} \mathbf{R} & \mathbf{t} \\ \mathbf{0} & \mathbf{1} \end{pmatrix} \in \mathrm{SE}(3)$ , between the target image $I_t$ and the set of source images $I_s \in I_S$ considered as part of the temporal context. In practice, we use the frames $I_{t-1}$ and $I_{t+1}$ as source images, although using a larger context is possible. Note that in the case of monocular SfM both depth and pose are estimated up to an unknown scale factor, due to the inherent ambiguity of the photometric loss.
|
| 41 |
+
|
| 42 |
+
# 3.1. Self-Supervised Objective
|
| 43 |
+
|
| 44 |
+
Following the work of Zhou et al. [52], we train the depth and pose network simultaneously in a self-supervised manner. In this work, however, we learn to recover the inverse-depth $f_{d}: I \to f_{D}^{-1}(I)$ instead, along with the ego-motion estimator $f_{\mathbf{x}}$ . Similar to [52], the overall self-supervised objective consists of an appearance matching loss term $\mathcal{L}_p$ that is imposed between the synthesized target image $\hat{I}_t$ and the target image $I_t$ , and a depth regularization term $\mathcal{L}_s$ that
|
| 45 |
+
|
| 46 |
+
ensures edge-aware smoothing in the depth estimates $\hat{D}_t$ . The objective takes the following form:
|
| 47 |
+
|
| 48 |
+
$$
|
| 49 |
+
\mathcal {L} \left(I _ {t}, \hat {I} _ {t}\right) = \mathcal {L} _ {p} \left(I _ {t}, I _ {S}\right) \odot \mathcal {M} _ {p} \odot \mathcal {M} _ {t} + \lambda_ {1} \mathcal {L} _ {s} (\hat {D} _ {t}) \tag {1}
|
| 50 |
+
$$
|
| 51 |
+
|
| 52 |
+
where $\mathcal{M}_t$ is a binary mask that avoids computing the photometric loss on the pixels that do not have a valid mapping, and $\odot$ denotes element-wise multiplication. Additionally, $\lambda_1$ enforces a weighted depth regularization on the objective. The overall loss in Equation 1 is averaged per-pixel, pyramid-scale and image batch during training. Fig. 2 shows a high-level overview of our training pipeline.
|
| 53 |
+
|
| 54 |
+
Appearance Matching Loss. Following [17, 52] the pixel-level similarity between the target image $I_{t}$ and the synthesized target image $\hat{I}_{t}$ is estimated using the Structural Similarity (SSIM) [44] term combined with an L1 pixel-wise loss term, inducing an overall photometric loss given by Equation 2 below.
|
| 55 |
+
|
| 56 |
+
$$
|
| 57 |
+
\mathcal {L} _ {p} \left(I _ {t}, \hat {I} _ {t}\right) = \alpha \frac {1 - \operatorname {S S I M} \left(I _ {t} , \hat {I} _ {t}\right)}{2} + (1 - \alpha) \| I _ {t} - \hat {I} _ {t} \| \tag {2}
|
| 58 |
+
$$
|
| 59 |
+
|
| 60 |
+
While multi-view projective geometry provides strong cues for self-supervision, errors due to parallax in the scene have an undesirable effect incurred on the photometric loss. We mitigate these undesirable effects by calculating the minimum photometric loss per pixel for each source image in the context $I_{S}$ , as shown in [18], so that:
|
| 61 |
+
|
| 62 |
+
$$
|
| 63 |
+
\mathcal {L} _ {p} \left(I _ {t}, I _ {S}\right) = \min _ {I _ {S}} \mathcal {L} _ {p} \left(I _ {t}, \hat {I} _ {t}\right) \tag {3}
|
| 64 |
+
$$
|
| 65 |
+
|
| 66 |
+
The intuition is that the same pixel will not be occluded or out-of-bounds in all context images, and that the association with minimal photometric loss should be the correct one. Furthermore, we also mask out static pixels by removing those which have a warped photometric loss $\mathcal{L}_p(I_t,\hat{I}_t)$ higher than their corresponding unwarped photometric loss $\mathcal{L}_p(I_t,I_s)$ , calculated using the original source image without view synthesis. Introduced in [18], this auto-mask removes pixels whose appearance does not change between frames, which includes static scenes and dynamic objects with no relative motion, since these will have a smaller photometric loss when assuming no ego-motion.
|
| 67 |
+
|
| 68 |
+
$$
|
| 69 |
+
\mathcal {M} _ {p} = \min _ {I _ {S}} \mathcal {L} _ {p} \left(I _ {t}, I _ {s}\right) > \min _ {I _ {S}} \mathcal {L} _ {p} \left(I _ {t}, \hat {I} _ {t}\right) \tag {4}
|
| 70 |
+
$$
|
| 71 |
+
|
| 72 |
+
Depth Smoothness Loss. In order to regularize the depth in texture-less low-image gradient regions, we incorporate an edge-aware term (Equation 5), similar to [17]. The loss is weighted for each of the pyramid-levels, and is decayed by a factor of 2 on down-sampling, starting with a weight of 1 for the $0^{\text{th}}$ pyramid level.
|
| 73 |
+
|
| 74 |
+
$$
|
| 75 |
+
\mathcal {L} _ {s} (\hat {D} _ {t}) = \left| \delta_ {x} \hat {D} _ {t} \right| e ^ {- \left| \delta_ {x} I _ {t} \right|} + \left| \delta_ {y} \hat {D} _ {t} \right| e ^ {- \left| \delta_ {y} I _ {t} \right|} \tag {5}
|
| 76 |
+
$$
|
| 77 |
+
|
| 78 |
+

|
| 79 |
+
Figure 2: PackNet-SfM: Our proposed scale-aware self-supervised monocular structure-from-motion architecture. We introduce PackNet as a novel depth network, and optionally include weak velocity supervision at training time to produce scale-aware depth and pose models.
|
| 80 |
+
|
| 81 |
+
# 3.2. Scale-Aware SfM
|
| 82 |
+
|
| 83 |
+
As previously mentioned, both the monocular depth and ego-motion estimators $f_{d}$ and $f_{\mathbf{x}}$ predict scale-ambiguous values, due to the limitations of the monocular SfM training objective. In other words, the scene depth and the camera ego-motion can only be estimated up to an unknown and ambiguous scale factor. This is also reflected in the overall learning objective, where the photometric loss is agnostic to the metric depth of the scene. Furthermore, we note that all previous approaches which operate in the self-supervised monocular regime [5, 15, 17, 33] suffer from this limitation, and resort to artificially incorporating this scale factor at test-time, using LiDAR measurements.
|
| 84 |
+
|
| 85 |
+
Velocity Supervision Loss. Since instantaneous velocity measurements are ubiquitous in most mobile systems today, we show that they can be directly incorporated in our self-supervised objective to learn a metrically accurate and scale-aware monocular depth estimator. During training, we impose an additional loss $\mathcal{L}_v$ between the magnitude of the pose-translation component of the pose network prediction $\hat{\mathbf{t}}$ and the measured instantaneous velocity scalar $v$ multiplied by the time difference between target and source frames $\Delta T_{t\rightarrow s}$ , as shown below:
|
| 86 |
+
|
| 87 |
+
$$
|
| 88 |
+
\mathcal {L} _ {v} \left(\hat {\mathbf {t}} _ {t \rightarrow s}, v\right) = \left| \| \hat {\mathbf {t}} _ {t \rightarrow s} \| - | v | \Delta T _ {t \rightarrow s} \right| \tag {6}
|
| 89 |
+
$$
|
| 90 |
+
|
| 91 |
+
Our final scale-aware self-supervised objective loss $\mathcal{L}_{\mathrm{scale}}$ from Equation 1 becomes:
|
| 92 |
+
|
| 93 |
+
$$
|
| 94 |
+
\mathcal {L} _ {\text {s c a l e}} \left(I _ {t}, \hat {I} _ {t}, v\right) = \mathcal {L} \left(I _ {t}, \hat {I} _ {t}\right) + \lambda_ {2} \mathcal {L} _ {v} \left(\hat {\mathbf {t}} _ {t \rightarrow s}, v\right) \tag {7}
|
| 95 |
+
$$
|
| 96 |
+
|
| 97 |
+
where $\lambda_{2}$ is a weight used to balance the different loss terms. This additional velocity loss allows the pose network to make metrically accurate predictions, subsequently resulting in the depth network also learning metrically accurate estimates to maintain consistency (cf. Section 5.4).
|
| 98 |
+
|
| 99 |
+

|
| 100 |
+
Figure 3: Proposed 3D packing and unpacking blocks. Packing replaces striding and pooling, while unpacking is its symmetrical feature upsampling mechanism.
|
| 101 |
+
|
| 102 |
+
# 4. PackNet: 3D Packing for Depth Estimation
|
| 103 |
+
|
| 104 |
+
Standard convolutional architectures use aggressive striding and pooling to increase their receptive field size. However, this potentially decreases model performance for tasks requiring fine-grained representations [19, 49]. Similarly, traditional upsampling strategies [6, 11] fail to propagate and preserve sufficient details at the decoder layers to recover accurate depth predictions. In contrast, we propose a novel encoder-decoder architecture, called PackNet, that introduces new 3D packing and unpacking blocks to learn to jointly preserve and recover important spatial information for depth estimation. This is in alignment with recent observations that information loss is not a necessary condition to learn representations capable of generalizing to different scenarios [21]. In fact, progressive expansion and contraction in a fully invertible manner, without discarding "uninformative" input variability, has been shown to increase performance in a wide variety of tasks [3, 10, 25]. We first describe the different blocks of our proposed architecture, and then proceed to show how they are integrated together in a single model for monocular depth estimation.
|
| 105 |
+
|
| 106 |
+
# 4.1. Packing Block
|
| 107 |
+
|
| 108 |
+
The packing block (Fig. 3a) starts by folding the spatial dimensions of convolutional feature maps into extra feature channels via a Space2Depth operation [39]. The resulting tensor is at a reduced resolution, but in contrast to striding or pooling, this transformation is invertible and comes at no loss. Next, we learn to compress this concatenated feature space in order to reduce its dimensionality to a desired number of output channels. As we show in our experiments (cf. Section 5.6), 2D convolutions are not designed to directly leverage the tiled structure of this feature space. Instead, we propose to first learn to expand this structured
|
| 109 |
+
|
| 110 |
+
<table><tr><td></td><td>Layer Description</td><td>K</td><td>Output Tensor Dim.</td></tr><tr><td>#0</td><td>Input RGB image</td><td></td><td>3×H×W</td></tr><tr><td colspan="4">Encoding Layers</td></tr><tr><td>#1</td><td>Conv2d</td><td>5</td><td>64×H×W</td></tr><tr><td>#2</td><td>Conv2d → Packing</td><td>7</td><td>64×H/2×W/2</td></tr><tr><td>#3</td><td>ResidualBlock (x2) → Packing</td><td>3</td><td>64×H/4×W/4</td></tr><tr><td>#4</td><td>ResidualBlock (x2) → Packing</td><td>3</td><td>128×H/8×W/8</td></tr><tr><td>#5</td><td>ResidualBlock (x3) → Packing</td><td>3</td><td>256×H/16×W/16</td></tr><tr><td>#6</td><td>ResidualBlock (x3) → Packing</td><td>3</td><td>512×H/32×W/32</td></tr><tr><td colspan="4">Decoding Layers</td></tr><tr><td>#7</td><td>Unpacking (#6) → Conv2d (⊕ #5)</td><td>3</td><td>512×H/16×W/16</td></tr><tr><td>#8</td><td>Unpacking (#7) → Conv2d (⊕ #4)</td><td>3</td><td>256×H/8×W/8</td></tr><tr><td>#9</td><td>InvDepth (#8)</td><td>3</td><td>1×H/8×W/8</td></tr><tr><td>#10</td><td>Unpacking (#8) → Conv2d (⊕ #3 ⊕ Upsample(#9))</td><td>3</td><td>128×H/4×W/4</td></tr><tr><td>#11</td><td>InvDepth (#10)</td><td>3</td><td>1×H/4×W/4</td></tr><tr><td>#12</td><td>Unpacking (#10) → Conv2d (⊕ #2 ⊕ Upsample(#11))</td><td>3</td><td>64×H/2×W/2</td></tr><tr><td>#13</td><td>InvDepth (#12)</td><td>3</td><td>1×H/2×W/2</td></tr><tr><td>#14</td><td>Unpacking (#12) → Conv2d (⊕ #1 ⊕ Upsample(#13))</td><td>3</td><td>64×H×W</td></tr><tr><td>#15</td><td>InvDepth (#14)</td><td>3</td><td>1×H×W</td></tr></table>
|
| 111 |
+
|
| 112 |
+
Table 1: Summary of our PackNet architecture for self-supervised monocular depth estimation. The Packing and Unpacking blocks are described in Fig. 3, with kernel size $K = 3$ and $D = 8$ . Conv2d blocks include Group-Norm [45] with $G = 16$ and ELU non-linearities [7]. In-vDepth blocks include a 2D convolutional layer with $K = 3$ and sigmoid non-linearities. Each ResidualBlock is a sequence of 3 2D convolutional layers with $K = 3/3/1$ and ELU non-linearities, followed by GroupNorm with $G = 16$ and Dropout [40] of 0.5 in the final layer. Upsample is a nearest-neighbor resizing operation. Numbers in parentheses indicate input layers, with $\oplus$ as channel concatenation. Bold numbers indicate the four inverse depth output scales.
|
| 113 |
+
|
| 114 |
+
representation via a 3D convolutional layer. The resulting higher dimensional feature space is then flattened (by simple reshaping) before a final 2D convolutional contraction layer. This structured feature expansion-contraction, inspired by invertible networks [3, 21] although we do not ensure invertibility, allows our architecture to dedicate more parameters to learn how to compress key spatial details that need to be preserved for high resolution depth decoding.
|
| 115 |
+
|
| 116 |
+
# 4.2. Unpacking Block
|
| 117 |
+
|
| 118 |
+
Symmetrically, the unpacking block (Fig. 3b) learns to decompress and unfold packed convolutional feature channels back into higher resolution spatial dimensions during the decoding process. The unpacking block replaces convolutional feature upsampling, typically performed via nearest-neighbor or with learnable transposed convolutional weights. It is inspired by sub-pixel convolutions [39], but adapted to reverse the 3D packing process that the features went through in the encoder. First, we use a 2D convolutional layer to produce the required number of feature channels for a following 3D convolutional layer. Second, this 3D convolution learns to expand back the compressed spatial features. Third, these unpacked features are converted back to spatial details via a reshape and Depth2Space operation [39] to obtain a tensor with the desired number of output channels and target higher resolution.
|
| 119 |
+
|
| 120 |
+

|
| 121 |
+
(a) Input Image
|
| 122 |
+
|
| 123 |
+

|
| 124 |
+
(b) Max Pooling + Bilinear Upsample
|
| 125 |
+
Figure 4: Image reconstruction using different encoder-decoders: (b) standard max pooling and bilinear upsampling, each followed by 2D convolutions; (c) one packing-unpacking combination (cf. Fig. 3) with $D = 2$ . All kernel sizes are $K = 3$ and $C = 4$ for intermediate channels.
|
| 126 |
+
|
| 127 |
+

|
| 128 |
+
(c) Pack + Unpack
|
| 129 |
+
|
| 130 |
+
# 4.3. Detail-Preserving Properties
|
| 131 |
+
|
| 132 |
+
In Fig. 4, we illustrate the detail-preserving properties of our packing / unpacking combination, showing we can get a near-lossless encoder-decoder for single image reconstruction by minimizing the L1 loss. We train a simple network composed of one packing layer followed by a symmetrical unpacking one and show it is able to almost exactly reconstruct the input image (final loss of 0.0079), including sharp edges and finer details. In contrast, a comparable baseline replacing packing / unpacking with max pooling / bilinear upsampling (and keeping the 2D convolutions) is only able to learn a blurry reconstruction (final loss of 0.063). This highlights how PackNet is able to learn more complex features by preserving spatial and appearance information end-to-end throughout the network.
|
| 133 |
+
|
| 134 |
+
# 4.4. Model Architecture
|
| 135 |
+
|
| 136 |
+
Our PackNet architecture for self-supervised monocular depth estimation is detailed in Table 1. Our symmetrical encoder-decoder architecture incorporates several packing and unpacking blocks, and is supplemented with skip connections [35] to facilitate the flow of information and gradients throughout the network. The decoder produces intermediate inverse depth maps that are upsampled before being concatenated with their corresponding skip connections and unpacked feature maps. These intermediate inverse depth maps are also used at training time in the loss calculation, after being upsampled to the full output resolution using nearest neighbors interpolation.
|
| 137 |
+
|
| 138 |
+
# 5. Experiments
|
| 139 |
+
|
| 140 |
+
# 5.1. Datasets
|
| 141 |
+
|
| 142 |
+
KITTI [16]. The KITTI benchmark is the de facto standard for depth evaluation. More specifically, we adopt the training protocol used in Eigen et al. [13], with Zhou et al.'s [52] pre-processing to remove static frames. This results in 39810 images for training, 4424 for validation and 697 for evaluation. We also consider the improved ground-truth depth maps from [41] for evaluation, which uses 5 consecutive frames to accumulate LiDAR points and stereo
|
| 143 |
+
|
| 144 |
+
information to handle moving objects, resulting in 652 high-quality depth maps.
|
| 145 |
+
|
| 146 |
+
DDAD (Dense Depth for Automated Driving). As one of our contributions, we release a diverse dataset of urban, highway, and residential scenes curated from a global fleet of self-driving cars. It contains 17,050 training and 4,150 evaluation frames with ground-truth depth maps generated from dense LiDAR measurements using the Luminar-H2 sensor. This new dataset is a more realistic and challenging benchmark for depth estimation, as it is diverse and captures precise structure across images (30k points per frame) at longer ranges (up to $200m$ vs $80m$ for previous datasets). See supplementary material for more details.
|
| 147 |
+
|
| 148 |
+
NuScenes [4]. To assess the generalization capability of our approach w.r.t. previous ones, we evaluate KITTI models (without fine-tuning) on the official NuScenes validation dataset of 6019 front-facing images with ground-truth depth maps generated by LiDAR reprojection.
|
| 149 |
+
|
| 150 |
+
CityScapes [8]. We also experiment with pretraining our monocular networks on the CityScapes dataset, before finetuning on the KITTI dataset. This also allows us to explore the scalability and generalization performance of different models, as they are trained with increasing amounts of unlabeled data. A total of 88250 images were considered as the training split for the CityScapes dataset, using the same training parameters as KITTI for 20 epochs.
|
| 151 |
+
|
| 152 |
+
# 5.2. Implementation Details
|
| 153 |
+
|
| 154 |
+
We use PyTorch [37] with all models trained across 8 Titan V100 GPUs. We use the Adam optimizer [24], with $\beta_{1} = 0.9$ and $\beta_{2} = 0.999$ . The monocular depth and pose networks are trained for 100 epochs, with a batch size of 4 and initial depth and pose learning rates of $2 \cdot 10^{-4}$ and $5 \cdot 10^{-4}$ respectively. Training sequences are generated using a stride of 1, meaning that the previous $t - 1$ , current $t$ , and posterior $t + 1$ images are used in the loss calculation. As training proceeds, the learning rate is decayed every 40 epochs by a factor of 2. We set the SSIM weight to $\alpha = 0.85$ , the depth regularization weight to $\lambda_{1} = 0.001$ and, where applicable, the velocity-scaling weight to $\lambda_{2} = 0.05$ .
|
| 155 |
+
|
| 156 |
+
Depth Network. Unless noted otherwise, we use our PackNet architecture as specified in Table 1. During training, all four inverse depth output scales are used in the loss calculation, and at test-time only the final output scale is used, after being resized to the full ground-truth depth map resolution using nearest neighbor interpolation.
|
| 157 |
+
|
| 158 |
+
Pose Network. We use the architecture proposed by [52] without the explainability mask, which we found not to improve results. The pose network consists of 7 convolutional layers followed by a final $1 \times 1$ convolutional layer. The input to the network consists of the target view $I_{t}$ and the context views $I_{S}$ , and the output is the set of 6 DOF transformations between $I_{t}$ and $I_{s}$ , for $s \in S$ .
|
| 159 |
+
|
| 160 |
+
# 5.3. Depth Estimation Performance
|
| 161 |
+
|
| 162 |
+
First, we report the performance of our proposed monocular depth estimation method when considering longer distances, which is now possible due to the introduction of our new DDAD dataset. Depth estimation results using this dataset for training and evaluation, considering cumulative distances up to $200\mathrm{m}$ , can be found in Fig. 5 and Table 2. Additionally, in Fig. 6 we present results for different depth intervals calculated independently. From these results we can see that our PackNet-SfM approach significantly outperforms the state-of-the-art [18], based on the ResNet family, the performance gap consistently increasing when larger distances are considered.
|
| 163 |
+
|
| 164 |
+
Second, we evaluate depth predictions on KITTI using the metrics described in Eigen et al. [13]. We summarize our results in Table 3, for the original depth maps from [13] and the accumulated depth maps from [41], and illustrate their performance qualitatively in Fig. 7. In contrast to previous methods [5, 18] that predominantly focus on modifying the training objective, we show that our proposed Pack-Net architecture can by itself bolster performance and establish a new state of the art for the task of monocular depth estimation, trained in the self-supervised monocular setting.
|
| 165 |
+
|
| 166 |
+
Furthermore, we show that by simply introducing an additional source of unlabeled videos, such as the publicly available CityScapes dataset (CS+K) [8], we are able to further improve monocular depth estimation performance. As indicated by Pillai et al. [38], we also observe an improvement in performance at higher image resolutions, which we attribute to the proposed network's ability to properly preserve and process spatial information end-to-end. Our best results are achieved when injecting both more unlabeled data at training time and processing higher resolution input images, achieving performance comparable to semi-supervised [28] and fully supervised [14] methods.
|
| 167 |
+
|
| 168 |
+
# 5.4. Scale-Aware Depth Estimation Performance
|
| 169 |
+
|
| 170 |
+
Due to their inherent scale ambiguity, self-supervised monocular methods [18, 33, 52] evaluate depth by scaling their estimates to the median ground-truth as measured via LiDAR. In Section 3.2 we propose to also recover the metric scale of the scene from a single image by imposing a loss on the magnitude of the translation for the pose network output. Table 3 shows that introducing this weak velocity supervision at training time allows the generation of scale-aware depth models with similar performance as their unscaled counterparts, with the added benefit of not requiring ground-truth depth scaling (or even velocity information) at test-time. Another benefit of scale-awareness is that we can compose metrically accurate trajectories directly from the output of the pose network. Due to space constraints, we report pose estimation results in supplementary material.
|
| 171 |
+
|
| 172 |
+

|
| 173 |
+
Figure 5: PackNet pointcloud reconstructions on DDAD.
|
| 174 |
+
|
| 175 |
+
<table><tr><td>Method</td><td>Abs Rel</td><td>Sq Rel</td><td>RMSE</td><td>\( RMSE_{log} \)</td><td>\( \delta_{1.25} \)</td></tr><tr><td>Monodepth2 (R18)</td><td>0.381</td><td>8.387</td><td>21.277</td><td>0.371</td><td>0.587</td></tr><tr><td>Monodepth2‡ (R18)</td><td>0.213</td><td>4.975</td><td>18.051</td><td>0.340</td><td>0.761</td></tr><tr><td>Monodepth2 (R50)</td><td>0.324</td><td>7.348</td><td>20.538</td><td>0.344</td><td>0.615</td></tr><tr><td>Monodepth2‡ (R50)</td><td>0.198</td><td>4.504</td><td>16.641</td><td>0.318</td><td>0.781</td></tr><tr><td>PackNet-SfM</td><td>0.162</td><td>3.917</td><td>13.452</td><td>0.269</td><td>0.823</td></tr></table>
|
| 176 |
+
|
| 177 |
+
Table 2: Depth Evaluation on DDAD, for $640 \times 384$ resolution and distances up to $200\mathrm{m}$ . While the ResNet family heavily relies on large-scale supervised ImageNet [9] pretraining (denoted by $\ddagger$ ), PackNet achieves significantly better results despite being trained from scratch.
|
| 178 |
+
|
| 179 |
+

|
| 180 |
+
Figure 6: Depth Evaluation on DDAD binned at different intervals, calculated independently by only considering ground-truth depth pixels in that range (0-20m, 20-40m, ...).
|
| 181 |
+
|
| 182 |
+
# 5.5. Network Complexity
|
| 183 |
+
|
| 184 |
+
The introduction of packing and unpacking as alternatives to standard downsampling and upsampling operations increases the complexity of the network, due to the number of added parameters. To ensure that the gain in performance shown in our experiments is not only due to an increase in model capacity, we compare different variations of our PackNet architecture (obtained by modifying the number of layers and feature channels) against available ResNet architectures. These results are depicted in Fig. 8 and show that, while the ResNet family stabilizes with diminishing returns as the number of parameters increase, the PackNet family matches its performance at around 70M parameters and further improves as more complexity is added. Finally, the proposed architecture (Table 1) reaches around 128M parameters with an inference time of 60ms on a Titan V100 GPU, which can be further improved to $< 30$ ms using TensorRT [1], making it suitable for real-time applications.
|
| 185 |
+
|
| 186 |
+
<table><tr><td></td><td>Method</td><td>Supervision</td><td>Resolution</td><td>Dataset</td><td>Abs Rel</td><td>Sq Rel</td><td>RMSE</td><td>RMSElog</td><td>δ < 1.25</td><td>δ < 1.252</td><td>δ < 1.253</td></tr><tr><td rowspan="15">Original [13]</td><td>SfMLearner [52]</td><td>M</td><td>416 x 128</td><td>CS + K</td><td>0.198</td><td>1.836</td><td>6.565</td><td>0.275</td><td>0.718</td><td>0.901</td><td>0.960</td></tr><tr><td>Vid2Depth [33]</td><td>M</td><td>416 x 128</td><td>CS + K</td><td>0.159</td><td>1.231</td><td>5.912</td><td>0.243</td><td>0.784</td><td>0.923</td><td>0.970</td></tr><tr><td>DF-Net [53]</td><td>M</td><td>576 x 160</td><td>CS + K</td><td>0.146</td><td>1.182</td><td>5.215</td><td>0.213</td><td>0.818</td><td>0.943</td><td>0.978</td></tr><tr><td>Struct2Depth [5]</td><td>M</td><td>416 x 128</td><td>K</td><td>0.141</td><td>1.026</td><td>5.291</td><td>0.215</td><td>0.816</td><td>0.945</td><td>0.979</td></tr><tr><td>Zhou et al.‡ [50]</td><td>M</td><td>1248 x 384</td><td>K</td><td>0.121</td><td>0.837</td><td>4.945</td><td>0.197</td><td>0.853</td><td>0.955</td><td>0.982</td></tr><tr><td>Monodepth2‡ [18]</td><td>M</td><td>640 x 192</td><td>K</td><td>0.115</td><td>0.903</td><td>4.863</td><td>0.193</td><td>0.877</td><td>0.959</td><td>0.981</td></tr><tr><td>Monodepth2‡ [18]</td><td>M</td><td>1024 x 320</td><td>K</td><td>0.115</td><td>0.882</td><td>4.701</td><td>0.190</td><td>0.879</td><td>0.961</td><td>0.982</td></tr><tr><td>PackNet-SfM</td><td>M</td><td>640 x 192</td><td>K</td><td>0.111</td><td>0.785</td><td>4.601</td><td>0.189</td><td>0.878</td><td>0.960</td><td>0.982</td></tr><tr><td>PackNet-SfM</td><td>M+v</td><td>640 x 192</td><td>K</td><td>0.111</td><td>0.829</td><td>4.788</td><td>0.199</td><td>0.864</td><td>0.954</td><td>0.980</td></tr><tr><td>PackNet-SfM</td><td>M</td><td>640 x 192</td><td>CS + K</td><td>0.108</td><td>0.727</td><td>4.426</td><td>0.184</td><td>0.885</td><td>0.963</td><td>0.983</td></tr><tr><td>PackNet-SfM</td><td>M+v</td><td>640 x 192</td><td>CS + K</td><td>0.108</td><td>0.803</td><td>4.642</td><td>0.195</td><td>0.875</td><td>0.958</td><td>0.980</td></tr><tr><td>PackNet-SfM</td><td>M</td><td>1280 x 384</td><td>K</td><td>0.107</td><td>0.802</td><td>4.538</td><td>0.186</td><td>0.889</td><td>0.962</td><td>0.981</td></tr><tr><td>PackNet-SfM</td><td>M+v</td><td>1280 x 384</td><td>K</td><td>0.107</td><td>0.803</td><td>4.566</td><td>0.197</td><td>0.876</td><td>0.957</td><td>0.979</td></tr><tr><td>PackNet-SfM</td><td>M</td><td>1280 x 384</td><td>CS + K</td><td>0.104</td><td>0.758</td><td>4.386</td><td>0.182</td><td>0.895</td><td>0.964</td><td>0.982</td></tr><tr><td>PackNet-SfM</td><td>M+v</td><td>1280 x 384</td><td>CS + K</td><td>0.103</td><td>0.796</td><td>4.404</td><td>0.189</td><td>0.881</td><td>0.959</td><td>0.980</td></tr><tr><td rowspan="11">Improved [41]</td><td>SfMLeaner [52]</td><td>M</td><td>416 x 128</td><td>CS + K</td><td>0.176</td><td>1.532</td><td>6.129</td><td>0.244</td><td>0.758</td><td>0.921</td><td>0.971</td></tr><tr><td>Vid2Depth [33]</td><td>M</td><td>416 x 128</td><td>CS + K</td><td>0.134</td><td>0.983</td><td>5.501</td><td>0.203</td><td>0.827</td><td>0.944</td><td>0.981</td></tr><tr><td>GeoNet [47]</td><td>M</td><td>416 x 128</td><td>CS + K</td><td>0.132</td><td>0.994</td><td>5.240</td><td>0.193</td><td>0.883</td><td>0.953</td><td>0.985</td></tr><tr><td>DDVO [43]</td><td>M</td><td>416 x 128</td><td>CS + K</td><td>0.126</td><td>0.866</td><td>4.932</td><td>0.185</td><td>0.851</td><td>0.958</td><td>0.986</td></tr><tr><td>EPC++ [32]</td><td>M</td><td>640 x 192</td><td>K</td><td>0.120</td><td>0.789</td><td>4.755</td><td>0.177</td><td>0.856</td><td>0.961</td><td>0.987</td></tr><tr><td>Monodepth2‡ [18]</td><td>M</td><td>640 x 192</td><td>K</td><td>0.090</td><td>0.545</td><td>3.942</td><td>0.137</td><td>0.914</td><td>0.983</td><td>0.995</td></tr><tr><td>Kuznietsov et al.‡ [28]</td><td>D</td><td>621 x 187</td><td>K</td><td>0.089</td><td>0.478</td><td>3.610</td><td>0.138</td><td>0.906</td><td>0.980</td><td>0.995</td></tr><tr><td>DORN‡ [14]</td><td>D</td><td>513 x 385</td><td>K</td><td>0.072</td><td>0.307</td><td>2.727</td><td>0.120</td><td>0.932</td><td>0.984</td><td>0.995</td></tr><tr><td>PackNet-SfM</td><td>M</td><td>640 x 192</td><td>K</td><td>0.078</td><td>0.420</td><td>3.485</td><td>0.121</td><td>0.931</td><td>0.986</td><td>0.996</td></tr><tr><td>PackNet-SfM</td><td>M</td><td>1280 x 384</td><td>CS + K</td><td>0.071</td><td>0.359</td><td>3.153</td><td>0.109</td><td>0.944</td><td>0.990</td><td>0.997</td></tr><tr><td>PackNet-SfM</td><td>M+v</td><td>1280 x 384</td><td>CS + K</td><td>0.075</td><td>0.384</td><td>3.293</td><td>0.114</td><td>0.938</td><td>0.984</td><td>0.995</td></tr></table>
|
| 187 |
+
|
| 188 |
+
Table 3: Quantitative performance comparison of PackNet-SfM on the KITTI dataset for distances up to $80\mathrm{m}$ . For Abs Rel, Sq Rel, RMSE and $\mathrm{RMSE}_{log}$ lower is better, and for $\delta < 1.25$ , $\delta < 1.25^2$ and $\delta < 1.25^3$ higher is better. In the Dataset column, CS+K refers to pretraining on CityScapes (CS) and fine-tuning on KITTI (K). M refers to methods that train using monocular (M) images, and M+v refers to added velocity weak supervision (v), as shown in Section 3.2. $\ddagger$ indicates ImageNet [9] pretraining. Original uses raw depth maps from [13] for evaluation, and Improved uses annotated depth maps from [41]. At test-time, all monocular methods (M) scale estimated depths with median ground-truth LiDAR information. Velocity-scaled (M+v) and supervised (D) methods are not scaled in such way, since they are already metrically accurate.
|
| 189 |
+
|
| 190 |
+

|
| 191 |
+
Figure 7: Qualitative monocular depth estimation performance comparing PackNet with previous methods, on frames from the KITTI dataset (Eigen test split). Our method is able to capture sharper details and structure (e.g., on vehicles, pedestrians, and thin poles) thanks to the learned preservation of spatial information.
|
| 192 |
+
|
| 193 |
+

|
| 194 |
+
Figure 8: Performance of different depth network architectures for varying numbers of parameters on the original KITTI Eigen split [13] with resolutions of $640 \times 192$ (MR) and $1280 \times 384$ (HR). While the ResNet family plateaus at 70M parameters, the PackNet family matches its performance at the same number of parameters for MR, outperforms it clearly for HR, and improves significantly with more parameters in both cases without overfitting.
|
| 195 |
+
|
| 196 |
+
The PackNet family is also consistently better at higher resolution, as it properly preserves and propagates spatial information between layers. In contrast, as reported in prior works [18], ResNet architectures do not scale well, with only minor improvements at higher resolution.
|
| 197 |
+
|
| 198 |
+
# 5.6. Ablation Studies
|
| 199 |
+
|
| 200 |
+
To further study the performance improvements that PackNet provides, we perform an ablative analysis on the different architectural components introduced, as depicted in Table 4. We show that the base architecture, without the proposed packing and unpacking blocks, already produces a strong baseline for the monocular depth estimation task. The introduction of packing and unpacking boosts depth estimation performance, especially as more 3D convolutional filters are added, with new state-of-the-art results being achieved by the architecture described in Table 1.
|
| 201 |
+
|
| 202 |
+
As mentioned in [14, 18], ResNet architectures highly benefit from ImageNet pretraining, since they were originally developed for classification tasks. Interestingly, we also noticed that the performance of pretrained ResNet architectures degrades in longer training periods, due to catastrophic forgetting that leads to overfitting. The proposed PackNet architecture, on the other hand, achieves state-of-the-art results from randomly initialized weights, and can be further improved by self-supervised pretraining on other datasets, thus properly leveraging the large-scale availability of unlabeled information thanks to its structure.
|
| 203 |
+
|
| 204 |
+
# 5.7. Generalization Capability
|
| 205 |
+
|
| 206 |
+
We also investigate the generalization performance of PackNet, as evidence that it does not simply memorize training data but learns transferable discriminative features. To assess this, we evaluate on the recent NuScenes dataset [4] models trained on a combination of CityScapes and KITTI $(\mathrm{CS} + \mathrm{K})$ , without any fine-tuning. Results in Table 5 show PackNet indeed generalizes better across a large spectrum of
|
| 207 |
+
|
| 208 |
+
<table><tr><td>Depth Network</td><td>Abs Rel</td><td>Sq Rel</td><td>RMSE</td><td>\( RMSE_{log} \)</td><td>\( \delta_{1.25} \)</td></tr><tr><td>ResNet18</td><td>0.133</td><td>1.023</td><td>5.123</td><td>0.211</td><td>0.845</td></tr><tr><td>\( ResNet18^{\ddagger} \)</td><td>0.120</td><td>0.896</td><td>4.869</td><td>0.198</td><td>0.868</td></tr><tr><td>ResNet50</td><td>0.127</td><td>0.977</td><td>5.023</td><td>0.205</td><td>0.856</td></tr><tr><td>\( ResNet50^{\ddagger} \)</td><td>0.117</td><td>0.900</td><td>4.826</td><td>0.196</td><td>0.873</td></tr><tr><td>PackNet (w/o pack/unpack)</td><td>0.122</td><td>0.880</td><td>4.816</td><td>0.198</td><td>0.864</td></tr><tr><td>PackNet (D=0)</td><td>0.121</td><td>0.922</td><td>4.831</td><td>0.195</td><td>0.869</td></tr><tr><td>PackNet (D=2)</td><td>0.118</td><td>0.802</td><td>4.656</td><td>0.194</td><td>0.868</td></tr><tr><td>PackNet (D=4)</td><td>0.113</td><td>0.818</td><td>4.621</td><td>0.190</td><td>0.875</td></tr><tr><td>PackNet (D=8)</td><td>0.111</td><td>0.785</td><td>4.601</td><td>0.189</td><td>0.878</td></tr></table>
|
| 209 |
+
|
| 210 |
+
Table 4: Ablation study on the PackNet architecture, on the standard KITTI benchmark for $640 \times 192$ resolution. ResNetXX indicates that specific architecture [20] as encoder, with and without ImageNet [9] pretraining (denoted with $\ddagger$ ). We also show results with the proposed PackNet architecture, first without packing and unpacking (replaced respectively with convolutional striding and bilinear upsampling) and then with increasing numbers of 3D convolutional filters ( $D = 0$ indicates no 3D convolutions and the corresponding reshape operations).
|
| 211 |
+
|
| 212 |
+
<table><tr><td>Method</td><td>Abs Rel</td><td>Sq Rel</td><td>RMSE</td><td>RMSElog</td><td>δ1.25</td></tr><tr><td>ResNet18</td><td>0.218</td><td>2.053</td><td>8.154</td><td>0.355</td><td>0.650</td></tr><tr><td>ResNet18‡</td><td>0.212</td><td>1.918</td><td>7.958</td><td>0.323</td><td>0.674</td></tr><tr><td>ResNet50</td><td>0.216</td><td>2.165</td><td>8.477</td><td>0.371</td><td>0.637</td></tr><tr><td>ResNet50‡</td><td>0.210</td><td>2.017</td><td>8.111</td><td>0.328</td><td>0.697</td></tr><tr><td>PackNet</td><td>0.187</td><td>1.852</td><td>7.636</td><td>0.289</td><td>0.742</td></tr></table>
|
| 213 |
+
|
| 214 |
+
Table 5: Generalization capability of different depth networks, trained on both KITTI and CityScapes and evaluated on NuScenes [4], for $640 \times 192$ resolution and distances up to $80\mathrm{m}$ . $\ddagger$ denotes ImageNet [9] pretraining.
|
| 215 |
+
|
| 216 |
+
vehicles and countries (Germany for $\mathrm{CS + K}$ , USA + Singapore for NuScenes), outperforming standard architectures in all considered metrics without the need for large-scale supervised pretraining on ImageNet.
|
| 217 |
+
|
| 218 |
+
# 6. Conclusion
|
| 219 |
+
|
| 220 |
+
We propose a new convolutional network architecture for self-supervised monocular depth estimation: PackNet. It leverages novel, symmetrical, detail-preserving packing and unpacking blocks that jointly learn to compress and decompress high resolution visual information for fine-grained predictions. Although purely trained on unlabeled monocular videos, our approach outperforms other existing self- and semi-supervised methods and is even competitive with fully-supervised methods while able to run in real-time. It also generalizes better to different datasets and unseen environments without the need for ImageNet pretraining, especially when considering longer depth ranges, as assessed up to $200\mathrm{m}$ on our new DDAD dataset. Additionally, by leveraging during training only weak velocity information, we are able to make our model scale-aware, i.e. producing metrically accurate depth maps from a single image.
|
| 221 |
+
|
| 222 |
+
# References
|
| 223 |
+
|
| 224 |
+
[1] TensorRT python library. https://developer. nvidia.com/tensorrt. Accessed: 2019-11-09. 6
|
| 225 |
+
[2] Aayush Bansal, Xinlei Chen, Bryan Russell, Abhinav Gupta, and Deva Ramanan. Pixelnet: Representation of the pixels, by the pixels, and for the pixels. arXiv preprint arXiv:1702.06506, 2017. 2
|
| 226 |
+
[3] Jens Behrmann, Will Grathwohl, Ricky TQ Chen, David Duvenaud, and Jörn-Henrik Jacobsen. Invertible residual networks. arXiv preprint arXiv:1811.00995, 2018. 4
|
| 227 |
+
[4] Holger Caesar, Varun Bankiti, Alex H. Lang, Sourabh Vora, Venice Erin Liong, Qiang Xu, Anush Krishnan, Yu Pan, Giancarlo Baldan, and Oscar Beijbom. nuscenes: A multimodal dataset for autonomous driving. CoRR, 2019. 2, 5, 8
|
| 228 |
+
[5] Vincent Casser, Soeren Pirk, Reza Mahjourian, and Anelia Angelova. Depth prediction without the sensors: Leveraging structure for unsupervised learning from monocular videos. In AAAI, 2019. 1, 2, 3, 6, 7
|
| 229 |
+
[6] Yunjin Chen and Thomas Pock. Trainable nonlinear reaction diffusion: A flexible framework for fast and effective image restoration. IEEE Transactions on Pattern Analysis and Machine Intelligence, 39:1256-1272, 2017. 4
|
| 230 |
+
[7] Djork-Arné Clevert, Thomas Unterthiner, and Sepp Hochreiter. Fast and accurate deep network learning by exponential linear units (elus). In ICLR, 2016. 4
|
| 231 |
+
[8] Marius Cordts, Mohamed Omran, Sebastian Ramos, Timo Rehfeld, Markus Enzweiler, Rodrigo Benenson, Uwe Franke, Stefan Roth, and Bernt Schiele. The cityscapes dataset for semantic urban scene understanding. In IEEE conference on computer vision and pattern recognition, pages 3213-3223, 2016. 5, 6
|
| 232 |
+
[9] Jia Deng, Wei Dong, Richard Socher, Li jia Li, Kai Li, and Li Fei-fei. Imagenet: A large-scale hierarchical image database. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, 2009. 6, 7, 8
|
| 233 |
+
[10] Laurent Dinh, Jascha Sohl-Dickstein, and Samy Bengio. Density estimation using real nvp. In *ICLR*, 2017. 4
|
| 234 |
+
[11] Chao Dong, Chen Change Loy, Kaiming He, and Xiaou Tang. Image super-resolution using deep convolutional networks. IEEE Trans. Pattern Anal. Mach. Intell., 38(2):295-307, Feb. 2016. 4
|
| 235 |
+
[12] Alexey Dosovitskiy, Philipp Fischer, Eddy Ilg, Philip Hausser, Caner Hazirbas, Vladimir Golkov, Patrick Van Der Smagt, Daniel Cremers, and Thomas Brox. Flownet: Learning optical flow with convolutional networks. In Proceedings of the IEEE international conference on computer vision, pages 2758-2766, 2015. 2
|
| 236 |
+
[13] David Eigen, Christian Puhrsch, and Rob Fergus. Depth map prediction from a single image using a multi-scale deep network. In Advances in neural information processing systems, pages 2366-2374, 2014. 2, 5, 6, 7, 8
|
| 237 |
+
[14] Huan Fu, Mingming Gong, Chaohui Wang, Kayhan Bat-manghelich, and Dacheng Tao. Deep ordinal regression network for monocular depth estimation. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 2002-2011, 2018. 6, 7, 8
|
| 238 |
+
|
| 239 |
+
[15] Ravi Garg, Vijay Kumar BG, Gustavo Carneiro, and Ian Reid. Unsupervised cnn for single view depth estimation: Geometry to the rescue. In European Conference on Computer Vision, pages 740-756. Springer, 2016. 2, 3
|
| 240 |
+
[16] Andreas Geiger, Philip Lenz, Christoph Stiller, and Raquel Urtasun. Vision meets robotics: The kitti dataset. The International Journal of Robotics Research, 32(11):1231-1237, 2013. 2, 5
|
| 241 |
+
[17] Clément Godard, Oisin Mac Aodha, and Gabriel J Brosstow. Unsupervised monocular depth estimation with left-right consistency. In CVPR, volume 2, page 7, 2017. 2, 3
|
| 242 |
+
[18] Clément Godard, Oisin Mac Aodha, Michael Firman, and Gabriel J. Brostow. Digging into self-supervised monocular depth prediction. In ICCV, 2019. 3, 6, 7, 8
|
| 243 |
+
[19] Benjamin Graham. Fractional max-pooling. arXiv:1412.607, 2015. 2, 4
|
| 244 |
+
[20] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 770-778, 2016. 1, 8
|
| 245 |
+
[21] Jrn-Henrik Jacobsen, Arnold W.M. Smeulders, and Edouard Oyallon. i-revnet: Deep invertible networks. In International Conference on Learning Representations, 2018. 4
|
| 246 |
+
[22] Max Jaderberg, Karen Simonyan, Andrew Zisserman, et al. Spatial transformer networks. In Advances in neural information processing systems, pages 2017-2025, 2015. 2
|
| 247 |
+
[23] Alex Kendall, Yarin Gal, and Roberto Cipolla. Multi-task learning using uncertainty to weigh losses for scene geometry and semantics. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 7482-7491, 2018. 1
|
| 248 |
+
[24] Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980, 2014.5
|
| 249 |
+
[25] Durk P Kingma and Prafulla Dhariwal. Glow: Generative flow with invertible 1x1 convolutions. In Advances in Neural Information Processing Systems, 2018. 4
|
| 250 |
+
[26] Maria Klodt and Andrea Vedaldi. Supervising the new with the old: Learning sfm from sfm. In European Conference on Computer Vision, pages 713-728. Springer, 2018. 2
|
| 251 |
+
[27] Alexander Kolesnikov, Xiaohua Zhai, and Lucas Beyer. Revisiting self-supervised visual representation learning. arXiv preprint arXiv:1901.09005, 2019. 1
|
| 252 |
+
[28] Yevhen Kuznietsov, Jörg Stuckler, and Bastian Leibe. Semi-supervised deep learning for monocular depth map prediction. In IEEE Conference on Computer Vision and Pattern Recognition, pages 6647-6655, 2017. 6, 7
|
| 253 |
+
[29] Chen-Yu Lee, Patrick Gallagher, and Zhuowen Tu. Generalizing pooling functions in convolutional neural networks: Mixed, gated, and tree. In International Conference on Artificial Intelligence and Statistics (AISTATS), 2016. 2
|
| 254 |
+
[30] Kuan-Hui Lee, German Ros, Jie Li, and Adrien Gaidon. Spigan: Privileged adversarial learning from simulation. In ICLR, 2019. 1
|
| 255 |
+
[31] Jonathan Long, Evan Shelhamer, and Trevor Darrell. Fully convolutional networks for semantic segmentation. In Pro
|
| 256 |
+
|
| 257 |
+
ceedings of the IEEE conference on computer vision and pattern recognition, pages 3431-3440, 2015. 2
|
| 258 |
+
[32] C. Luo, Z. Yang, P. Wang, Y. Wang, W. Xu, R. Nevatia, and A. Yuille. Every pixel counts++: Joint learning of geometry and motion with 3d holistic understanding. arXiv preprint arXiv:1810.06125, 2018. 7
|
| 259 |
+
[33] Reza Mahjourian, Martin Wicke, and Anelia Angelova. Unsupervised learning of depth and ego-motion from monocular video using 3d geometric constraints. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 5667-5675, 2018. 1, 2, 3, 6, 7
|
| 260 |
+
[34] Fabian Manhardt, Wadim Kehl, and Adrien Gaidon. Roi10d: Monocular lifting of 2d detection to 6d pose and metric shape. IEEE Conference on Computer Vision and Pattern Recognition, 2018. 1
|
| 261 |
+
[35] Nikolaus Mayer, Eddy Ilg, Philip Hausser, Philipp Fischer, Daniel Cremers, Alexey Dosovitskiy, and Thomas Brox. A large dataset to train convolutional networks for disparity, optical flow, and scene flow estimation. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 4040-4048, 2016. 2, 5
|
| 262 |
+
[36] Jeff Michels, Ashutosh Saxena, and Andrew Y Ng. High speed obstacle avoidance using monocular vision and reinforcement learning. In 22nd international conference on Machine learning, pages 593-600. ACM, 2005. 1
|
| 263 |
+
[37] Adam Paszke, Sam Gross, Soumith Chintala, Gregory Chanan, Edward Yang, Zachary DeVito, Zeming Lin, Alban Desmaison, Luca Antiga, and Adam Lerer. Automatic differentiation in pytorch. In NIPS-W, 2017. 5
|
| 264 |
+
[38] Sudeep Pillai, Rares Ambrus, and Adrien Gaidon. Superdepth: Self-supervised, super-resolved monocular depth estimation. In Robotics and Automation (ICRA), 2019 IEEE International Conference on, 2018. 2, 6
|
| 265 |
+
[39] Wenzhe Shi, Jose Caballero, Ferenc Huszár, Johannes Totz, Andrew P Aitken, Rob Bishop, Daniel Rueckert, and Zehan Wang. Real-time single image and video super-resolution using an efficient sub-pixel convolutional neural network. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 1874-1883, 2016. 2, 4
|
| 266 |
+
[40] Nitish Srivastava, Geoffrey Hinton, Alex Krizhevsky, Ilya Sutskever, and Ruslan Salakhutdinov. Dropout: A simple way to prevent neural networks from overfitting. Journal of Machine Learning Research, 15:1929-1958, 2014. 4
|
| 267 |
+
[41] J. Uhrig, N. Schneider, L. Schneider, U. Franke, T. Brox, and A. Geiger. Sparsity invariant cnns. 3DV, 2017. 5, 6, 7
|
| 268 |
+
|
| 269 |
+
[42] Benjamin Ummenhofer, Huizhong Zhou, Jonas Uhrig, Nikolaus Mayer, Eddy Ilg, Alexey Dosovitskiy, and Thomas Brox. Demon: Depth and motion network for learning monocular stereo. In IEEE Conference on computer vision and pattern recognition (CVPR), volume 5, page 6, 2017. 2
|
| 270 |
+
[43] Chaoyang Wang, José Miguel Buenaposada, Rui Zhu, and Simon Lucey. Learning depth from monocular videos using direct methods. In IEEE Conference on Computer Vision and Pattern Recognition, pages 2022–2030, 2018. 2, 7
|
| 271 |
+
[44] Zhou Wang, Alan C Bovik, Hamid R Sheikh, and Eero P Simoncelli. Image quality assessment: from error visibility to structural similarity. IEEE transactions on image processing, 13(4):600-612, 2004. 2, 3
|
| 272 |
+
[45] Yuxin Wu and Kaiming He. Group normalization. In Computer Vision - ECCV 2018 - 15th European Conference, Munich, Germany, September 8-14, 2018, Proceedings, Part XIII, pages 3-19, 2018. 4
|
| 273 |
+
[46] Nan Yang, Rui Wang, Jörg Stuckler, and Daniel Cremers. Deep virtual stereo odometry: Leveraging deep depth prediction for monocular direct sparse odometry. arXiv preprint arXiv:1807.02570, 2018. 2
|
| 274 |
+
[47] Zhichao Yin and Jianping Shi. Geonet: Unsupervised learning of dense depth, optical flow and camera pose. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), volume 2, 2018. 1, 2, 7
|
| 275 |
+
[48] Fisher Yu, Vladlen Koltun, and Thomas Funkhouser. Dilated residual networks. In The IEEE Conference on Computer Vision and Pattern Recognition (CVPR), July 2017. 2
|
| 276 |
+
[49] Hao Zhang and Jianwei Ma. Hartley spectral pooling for deep learning. Computing Research Repository, abs/1810.04028, 2018. 4
|
| 277 |
+
[50] Junsheng Zhou, Yuwang Wang, Naiyan Wang, and Wenjun Zeng. Unsupervised high-resolution depth learning from videos with dual networks. In Inter. Conf. on Computer Vision. IEEE, IEEE, 2019. 7
|
| 278 |
+
[51] Lipu Zhou, Jiamin Ye, Montiel Abello, Shengze Wang, and Michael Kaess. Unsupervised learning of monocular depth estimation with bundle adjustment, super-resolution and clip loss. arXiv preprint arXiv:1812.03368, 2018. 2
|
| 279 |
+
[52] Tinghui Zhou, Matthew Brown, Noah Snavely, and David G Lowe. Unsupervised learning of depth and ego-motion from video. In CVPR, volume 2, page 7, 2017. 2, 3, 5, 6, 7
|
| 280 |
+
[53] Yuliang Zou, Zelun Luo, and Jia-Bin Huang. Df-net: Unsupervised joint learning of depth and flow using cross-task consistency. In ECCV, 2018. 1, 2, 7
|
3dpackingforselfsupervisedmonoculardepthestimation/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f4227fa3267f5ebef9454d4e57a0e108a780069482d4395dd38201a016e14c41
|
| 3 |
+
size 737295
|
3dpackingforselfsupervisedmonoculardepthestimation/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:559a527ec74105f83c7afb5316374d80279100b3e82fdd51374f007859ab4070
|
| 3 |
+
size 371359
|
3dpartguidedimageeditingforfinegrainedobjectunderstanding/48654681-1e22-4cee-a2f1-9f712cc0b228_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:dabaed30c3026d4c8c2a7390c1d58dd6b033b42dc00afe13b44c728fd8dc7591
|
| 3 |
+
size 78995
|
3dpartguidedimageeditingforfinegrainedobjectunderstanding/48654681-1e22-4cee-a2f1-9f712cc0b228_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:80747bd61b0d5e266c824f1e6c083c59c628fd601409cf5cdd55d8852a94878b
|
| 3 |
+
size 95578
|
3dpartguidedimageeditingforfinegrainedobjectunderstanding/48654681-1e22-4cee-a2f1-9f712cc0b228_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:246b55adc7fc93c92129b38e0a0861ac5ed5427b5ff063b1c5b369b481f7339d
|
| 3 |
+
size 7346149
|
3dpartguidedimageeditingforfinegrainedobjectunderstanding/full.md
ADDED
|
@@ -0,0 +1,322 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# 3D Part Guided Image Editing for Fine-grained Object Understanding
|
| 2 |
+
|
| 3 |
+
Zongdai Liu $^{\dagger 1}$ , Feixiang Lu $^{\dagger 2,6}$ , Peng Wang $^{\S 2,5}$ , Hui Miao $^{1}$ , Liangjun Zhang $^{2,6}$ , Ruigang Yang $^{2,3,6}$ and Bin Zhou $^{*1,4}$
|
| 4 |
+
|
| 5 |
+
$^{1}$ State Key Laboratory of Virtual Reality Technology and Systems, Beihang University $^{2}$ Robotics and Autonomous Driving Laboratory, Baidu Research $^{3}$ University of Kentucky $^{4}$ Peng Cheng Laboratory, Shenzhen, China $^{5}$ ByteDance Research $^{6}$ National Engineering Laboratory of Deep Learning Technology and Application, China
|
| 6 |
+
|
| 7 |
+
# Abstract
|
| 8 |
+
|
| 9 |
+
Holistically understanding an object with its 3D movable parts is essential for visual models of a robot to interact with the world. For example, only by understanding many possible part dynamics of other vehicles (e.g., door or trunk opening, taillight blinking for changing lane), a self-driving vehicle can be success in dealing with emergency cases. However, existing visual models tackle rarely on these situations, but focus on bounding box detection. In this paper, we fill this important missing piece in autonomous driving by solving two critical issues. First, for dealing with data scarcity, we propose an effective training data generation process by fitting a 3D car model with dynamic parts to cars in real images. This allows us to directly edit the real images using the aligned 3D parts, yielding effective training data for learning robust deep neural networks (DNNs). Secondly, to benchmark the quality of 3D part understanding, we collected a large dataset in real driving scenario with cars in uncommon states (CUS), i.e. with door or trunk opened etc., which demonstrates that our trained network with edited images largely outperforms other baselines in terms of 2D detection and instance segmentation accuracy.
|
| 10 |
+
|
| 11 |
+
# 1. Introduction
|
| 12 |
+
|
| 13 |
+
An object, e.g. a car or a person, is commonly composed with articulated and movable 3D parts [47, 24]. Understanding an object with its 3D parts and their future states within images or videos is essential for the vision/perception system of a robot to decide its actions to interact with the world. For example, in the popular au
|
| 14 |
+
|
| 15 |
+

|
| 16 |
+
|
| 17 |
+

|
| 18 |
+
|
| 19 |
+

|
| 20 |
+
Figure 1: Fine-grained parsing of cars in uncommon states on various datasets. The results include 2D detection (red bounding box), instance segmentation (orange mask), dynamic part segmentation (blue mask), and state description. Note that the common-state cars are with green color.
|
| 21 |
+
|
| 22 |
+

|
| 23 |
+
|
| 24 |
+

|
| 25 |
+
|
| 26 |
+

|
| 27 |
+
|
| 28 |
+

|
| 29 |
+
|
| 30 |
+
tonomous driving (AD) scenario, when a car parking on the road has its door opened, it will be very likely that someone would get off. As a response, the autonomous vehicle should immediately slow down, turn the steering wheel, and change line. Though, this case is not common, it is deadly if there is no such understanding behind, and in real driving scenario, there are many such cases as illustrated in Fig. 1.
|
| 31 |
+
|
| 32 |
+
However, the dominant visual perception systems with deep neural networks, though achieved great success in 2D/3D detection [34, 12], instance segmentation [17] and pose estimation [4, 22, 39], are based on coarse understanding of objects with bounding boxes or masks. In our opin
|
| 33 |
+
|
| 34 |
+
ion, this is not sufficient for performing actions respect to 3D part dynamics of vehicles on the street.
|
| 35 |
+
|
| 36 |
+
This paper is a step forward to fill this missing piece, especially in AD scenario, by providing a model that enables detailed 3D part parsing of an object. To perform such a task, we first look through many popular AD datasets, such as KITTI [14], CityScapes [6] and ApolloScape [20, 46]. As shown in Fig. 1, we found firstly, cases where a car has its part moved as we discussed are existing in real driving scenarios. Secondly, the amount of cases is too scarce, e.g. only tens of cars, to train an effective model to dealing with 3D part understanding when these cases happened.
|
| 37 |
+
|
| 38 |
+
To generate enough amount of data for training a model understanding 3D parts, the common strategy is manually crowd sourcing large amount of real images [15], which will be labor expensive, while other solutions such as obtaining dataset with simulated environment and computer graphics [1, 45, 31] will have strong domain gap of car and scene appearance to realistic scenarios. To balance the two and automatically generate training data for current deep learning models [17], here we propose a 3D part guided image editing strategy, as illustrated in Fig. 2, by first fitting a 3D car model with dynamic parts in images, then re-rendering the car inside with re-configured parts and the realistic texture. Specifically, we adopt models from the ApolloCar3D [39] dataset where each car instance is fitted with a 3D model, and we define ten commonly happened dynamic parts, i.e., bonnet, trunk, four doors, two headlights and two taillights, for each type of 3D car model. More specifically, for each part, we labelled its motion axis which constraints the range of possible movement. By sampling all the possible motion of a 3D car instance, our strategy automatically edit the 2D car instance inside images, yielding a large number of training samples.
|
| 39 |
+
|
| 40 |
+
Based on the generated data, we design and train a multitask network performing object understanding with fine granularity, including 2D detection, instance segmentation, dynamic part segmentation, and 3D car state description. Our deep model is significantly more robust in understanding cars in AD than models without our generated dataset.
|
| 41 |
+
|
| 42 |
+
Finally, to benchmark our model and strategies, to our best knowledge, we construct the first dataset with large amount of described uncommon states of cars in AD, i.e. with door or trunk open etc., which contains 1441 labelled street-view images, 1850 car instances, and 12 defined states. We evaluate the part understanding quality extensively with the dataset, and show our network and training strategies yields large improvements (over $8\%$ relatively) in discovering and understanding these uncommon cases.
|
| 43 |
+
|
| 44 |
+
In summary, our contributions are in three aspects:
|
| 45 |
+
|
| 46 |
+
- We present a 3D part guided image editing pipeline for automatic training data generation, which helps to learn fine-grained object understanding models in AD.
|
| 47 |
+
|
| 48 |
+
- We design a multi-task network architecture which produces output of both instance level and part level object understanding.
|
| 49 |
+
|
| 50 |
+
- To benchmark our data generation strategies, and network architectures, we build a large dataset which contains 1441 real images with fine-grained annotation of objects in many uncommon states. It demonstrates the effectiveness of our approaches.
|
| 51 |
+
|
| 52 |
+
# 2. Related Work
|
| 53 |
+
|
| 54 |
+
Fine-grained object understanding is one of the center problem for autonomous driving. Our work is mostly related to two areas: datasets and vehicle for fine-grained parsing. We review the related works in the following.
|
| 55 |
+
|
| 56 |
+
Datasets for Autonomous Driving. Focusing on perception in autonomous driving, several datasets have been constructed and released. The first dataset is CamVid [2], which annotates 701 images with 32 semantic classes. The later released KITTI benchmark [14] contains multiple vision tasks (e.g., optical flow, 2D/3D detection). However, it mainly annotates 2D/3D bounding boxes for each car, resulting in 7481 training and 7518 test images. Recently, CityScapes dataset [6] labelled vehicles with instance-level segmentation, which released 2975 training, 500 validation, and 1525 test images. ApolloScape [20] is a large-scale AD dataset for various 2D and 3D tasks. It performs pixels-level annotations for 2D scene parsing, providing about 140K images. ApolloCar3D [39] is a 3D instance car dataset built from real images in driving scenes. For each car instance in 2D image, 3D model and corresponding 6-DoF pose are manually labelled. Moreover, there exist other real street-view self-driving datasets (e.g., Toronto [48], Mapillary [30], and BDD100K [51]) and synthetic datasets (e.g., SYNTHIA [37], P.F.B. [35], and Virtual KITTI [11]). However, all of these datasets only annotate common cars with 2D bounding box or semantic/instance segmentation, while cars in uncommon states are ignored (e.g., opened door or trunk, and flashed headlights or taillights). In an AD scenario, this information can predict further action of the vehicle, which becomes very important for safety.
|
| 57 |
+
|
| 58 |
+
Data Generation for Deep Network. Learning effective deep networks (e.g., AlexNet [21], VGG [38], ResNet [18], and FPN [25]), depends on large amount of training data for each individual task. However, real data collection and annotation [8, 26, 20] are laborious. To avoid the difficulties of data labelling, synthetic data is widely used for training deep networks. Current image synthesis techniques can be roughly divided into two classes: 3D model rendering and 2D image 'cut-paste' [10]. Recently, several large-scale 3D model datasets have been released, such as ShapeNet [5], ModelNet [49] and ScanNet [7]. Researchers directly render 3D models to obtain 2D im
|
| 59 |
+
|
| 60 |
+

|
| 61 |
+
Figure 2: Overview of our data augmentation pipeline.
|
| 62 |
+
|
| 63 |
+
ages for training. However, rendering is a time-consuming work which requires pre-building the complex realistic 3D scenes. Therefore, some works cut objects from images, and then paste to other background to synthesize photorealistic training data. However, the diversity of 'cut-paste' results is limited. Furthermore, it cannot handle the problem of occlusion.
|
| 64 |
+
|
| 65 |
+
Nevertheless, many computer vision tasks are beneficial from synthetic data, such as optical flow [3, 9], scene flow [28], stereo [32, 52], semantic segmentation [36, 37], 3D keypoint extraction [42], viewpoint [40], object pose [29], 3D reconstruction [16], and object detection [1, 13, 31, 45]. The key problem for these works is to fix appearance domain gap to realistic images. Domain randomization [43] is widely used for vehicle detection [31, 45], which gets the optimal performance. Alhaija et al. [1] take advantage of AR approach to overlay vehicle rendering results to the real street-view images, yielding augmented photo-realistic training data. Hinterstoisser et al. [19] show that by freezing a pre-trained feature extractor can train a good object detector with synthetic data only.
|
| 66 |
+
|
| 67 |
+
Fine-grained Parsing and Understanding. For AD, as discussed in Sec. 1, it is important to detect, segment, and parse the moving objects into part-level semantics. Here, state-of-the-art (SOTA) methods often rely on detecting and understanding pipelines. Specifically, an object is first separated using detectors such as one-stage methods (e.g., SSD513 [12], YOLOv3 [33]) or two-stage methods (e.g., Faster-RCNN [34], Mask-RCNN [17]); and then performed fine-grained recognition with object parts, such as part keypoints regression [39] and part segmentation [47, 50]. Most recently, Lu et al. [27] extend the part-level pixel-wise annotation to the part state inference problem, such that visual models can be more instructive. Our work follows this trend, while extends previous works with object part understanding in 3D to handle uncommon cases in AD scenario.
|
| 68 |
+
|
| 69 |
+
# 3. 3D Part Guided Image Editing
|
| 70 |
+
|
| 71 |
+
In this section, we introduce how to leverage the 3D parts to automatically edit the source 2D images. To achieve this goal, four essential components are required: 1) 3D part segmentation and motion axis annotation; 2) 3D transformation and 2D projection; 3) hole filling and image filtering; 4) invisible region generation.
|
| 72 |
+
|
| 73 |
+
Recently, Song et al. [39] published a 2D-3D alignment dataset: ApolloCar3D, which annotates the 3D model and 6-DoF pose for each 2D car instance. Based on the released 3D CAD models of cars, we manually segment out the movable parts (i.e., bonnet, trunk and four doors) and the semantic parts (i.e., two headlights and two taillights), respectively. For semantic parts, we directly project them to obtain the corresponding 2D regions, which are further edited to yellow or red flashed effects (the third row in Fig. 3). For movable parts, we firstly annotate their motion axis, then transform the 3D parts to guide 2D image editing. Note that the 3D models provided by ApolloCar3D are low-quality. It is difficult to obtain appropriate texture map from source image to perform photo-realistic rendering.
|
| 74 |
+
|
| 75 |
+
Instead, we render the 3D geometry parts to obtain corresponding depth map $D$ , according to the global rotation $\mathbf{R_g}$ , translation $\mathbf{t_g}$ , and the camera intrinsic matrix $\mathbf{K}$ . For each 2D pixel $\mathbf{u} = (u,v)^{\top}$ with depth value $D(\mathbf{u})$ , we convert it to acquire 3D point $\mathbf{P} = (x,y,z)^{\top}$ through
|
| 76 |
+
|
| 77 |
+
$$
|
| 78 |
+
\mathbf {P} = \mathbf {R} _ {\mathbf {g}} ^ {- 1} \cdot \left(D (\mathbf {u}) \cdot \mathbf {K} ^ {- 1} \cdot \dot {\mathbf {u}} - \mathbf {t} _ {\mathbf {g}}\right). \tag {1}
|
| 79 |
+
$$
|
| 80 |
+
|
| 81 |
+
Here, $\dot{\mathbf{u}}$ is a homogeneous vector: $\dot{\mathbf{u}} = (\mathbf{u}^{\top}|1)^{\top}$ .
|
| 82 |
+
|
| 83 |
+
Assuming the part locally transformed with a 3D rotation $\mathbf{R_o}$ along with the motion axis, and the axis translate $\mathbf{t_o}$ in the global coordinate. We compute the pixel's new position $\mathbf{u}'$ in the image domain, which is defined as:
|
| 84 |
+
|
| 85 |
+
$$
|
| 86 |
+
\mathbf {u} ^ {\prime} = \left. \left\lfloor \pi \left(\mathbf {K} \cdot \left(\mathbf {R} _ {\mathbf {g}} \left(\mathbf {R} _ {\mathbf {o}} \left(\mathbf {P} - \mathbf {t} _ {\mathbf {o}}\right) + \mathbf {t} _ {\mathbf {o}}\right) + \mathbf {t} _ {\mathbf {g}}\right)\right) \right\rfloor . \right. \tag {2}
|
| 87 |
+
$$
|
| 88 |
+
|
| 89 |
+

|
| 90 |
+
Figure 3: The generated cars in uncommon states by our approach. The editing results of movable parts (i.e., trunk, bonnet, and four doors) are shown in the 1st row and the 2nd row. And the editing results of semantic parts (i.e., two headlights and two taillights) are shown in the 3rd row.
|
| 91 |
+
|
| 92 |
+

|
| 93 |
+
|
| 94 |
+

|
| 95 |
+
|
| 96 |
+

|
| 97 |
+
Figure 4: The architecture of our two-backbone network, which can output 2D detection, instance-level segmentation, dynamic part segmentation, and state description.
|
| 98 |
+
|
| 99 |
+
Here, the function $\mathbf{u} = \pi (\mathbf{P})$ performs perspective projection of $\mathbf{P}\in \mathbb{R}^3 = (x,y,z)^\top$ including dehomogenisation to obtain $\mathbf{u}\in \mathbb{R}^2 = (x / z,y / z)^\top$ .
|
| 100 |
+
|
| 101 |
+
Note that the transformed pixels are always sparse and noisy in the part region (Fig. 2 (e)). Here, we call the nonvalued pixel as 'hole'. In order to fill these holes, we perform the linear blending algorithm [41] to obtain the RGB values. After interpolating the non-valued pixels, we apply a bilateral filter [44] on the edited images. The smoothed results are shown in Fig. 2 (f) and Fig. 3.
|
| 102 |
+
|
| 103 |
+
Invisible region generation. For the case of opening door, we can generate visual compelling results if the cart towards the camera. Once the car in the opposite direction, opening door will introduce some invisible regions in the original image. These invisible regions can be roughly divided into two classes: one is the reverse side of the part, another is the vehicle interior (e.g., seat, steering wheel and engine). Empirically, the interior regions are always dark due to the inadequate illumination. Therefore, we directly fill interior regions with the gray color. Also, we have tried the random color and the patches from real images. However, according to the experimental results, we don't find obvious differences among them.
|
| 104 |
+
|
| 105 |
+
Compared with the interior regions, coloring the reverse side of part seems rather complex. As shown in Fig. 2, it is not appropriate to directly filling in pure color. Thus, we adopt the photo-realistic rendering pipeline to generate high-fidelity results of reverse side. Considering the low-quality models provided by ApolloCar3D, we firstly construct a small expert designed 3D model database for movable parts. Each part is designed by a professional artist with the commercial software, 3dsMax. The part materials are manually labelled and BRDF parameters are predefined. As shown in Fig. 2 (h), we use the environment map calculated [23] from ApolloCar3D to perform photorealistic rendering. Our editing results are shown in Fig. 3.
|
| 106 |
+
|
| 107 |
+

|
| 108 |
+
|
| 109 |
+
# 4. Network Architectures
|
| 110 |
+
|
| 111 |
+
We propose a novel multi-task deep neural network architecture shown in Fig. 4, which is used for fine-grained object understanding. In this section, we discuss the modules of our network and training settings in details.
|
| 112 |
+
|
| 113 |
+
# 4.1. Two Backbones
|
| 114 |
+
|
| 115 |
+
We aim to detect cars in uncommon states from real street-view images through only training on the editing images. To achieve the transfer ability from synthetic data to real data, there are two ResNet50-FPN [25] backbones in our network. We pre-train the main backbone both on ApolloCar3D [39] benchmark and CityScapes [6] benchmark using Mask-RCNN to extract the car body features guided by a car detection task. Simultaneously, we pre-train the auxiliary backbone on COCO dataset to learn the general features of the edited region (e.g., the rendered parts) guided by a general detection task. Finally, we fix the parameters of these two backbones to train the network on the editing data. Indeed, experimental results in Sec. 6.4 demonstrate that we can get the optimal performance by freezing two backbones.
|
| 116 |
+
|
| 117 |
+
# 4.2. Dynamic Part Segmentation
|
| 118 |
+
|
| 119 |
+
We adopt the Mask-RCNN [17] to implement the task of dynamic part segmentation. In Mask-RCNN, the mask branch outputs a $Km^2$ dimensional binary mask for each RoI aligned feature map, where $K$ is the number of class and $m$ is the resolution. Besides, we take the dynamic part segmentation as a new channel, resulting in output containing a $(K + 1)m^2$ binary mask. Specifically, we feed $14 \times 14$ RoI aligned feature map to four sequential 256-d $3 \times 3$ convolution layers. A $2 \times 2$ deconvolution layer is used to up-sample the output to $28 \times 28$ . Finally, we define the $L_{\text{part}}$ as the average of per-pixel sigmoid cross entropy loss.
|
| 120 |
+
|
| 121 |
+
<table><tr><td rowspan="2">Datasets</td><td>Bonnet</td><td>Trunk</td><td colspan="4">Doors</td><td colspan="2">Headlights</td><td colspan="4">Taillights</td><td rowspan="2">Total</td></tr><tr><td>lifted</td><td>lifted</td><td>fl-o.</td><td>fr-o.</td><td>bl-o.</td><td>br-o.</td><td>l-tu.</td><td>r-tu.</td><td>l-tu.</td><td>r-tu.</td><td>stop</td><td>alarm</td></tr><tr><td>KITTI</td><td>1</td><td>9</td><td>1</td><td>0</td><td>0</td><td>5</td><td>1</td><td>0</td><td>2</td><td>1</td><td>8</td><td>0</td><td>28</td></tr><tr><td>CityScapes</td><td>0</td><td>0</td><td>14</td><td>5</td><td>8</td><td>4</td><td>3</td><td>2</td><td>4</td><td>0</td><td>15</td><td>0</td><td>55</td></tr><tr><td>ApolloScape</td><td>0</td><td>23</td><td>29</td><td>0</td><td>59</td><td>157</td><td>15</td><td>18</td><td>23</td><td>27</td><td>33</td><td>16</td><td>400</td></tr><tr><td>ApolloCar3D</td><td>0</td><td>13</td><td>19</td><td>1</td><td>0</td><td>11</td><td>3</td><td>5</td><td>12</td><td>9</td><td>21</td><td>0</td><td>94</td></tr><tr><td>Capt. Images</td><td>15</td><td>405</td><td>232</td><td>66</td><td>79</td><td>346</td><td>19</td><td>17</td><td>25</td><td>18</td><td>44</td><td>7</td><td>1273</td></tr><tr><td>CUS Dataset</td><td>16</td><td>450</td><td>295</td><td>72</td><td>146</td><td>523</td><td>41</td><td>42</td><td>66</td><td>55</td><td>121</td><td>23</td><td>1850</td></tr></table>
|
| 122 |
+
|
| 123 |
+
Table 1: The constructed CUS dataset, which annotates 1850 car instances in uncommon states from 1441 street-view images. 'fl-o. (br-o.)' indicates the opened front-left (back-right) part, and 'l-tu. (r-tu.)' indicates turning left (right).
|
| 124 |
+
|
| 125 |
+
# 4.3. State Description
|
| 126 |
+
|
| 127 |
+
We use a binary variable to represent the existence of the particular part state (i.e., 1 for existed and 0 for others). Then, we define the 'part state vector' as a concatenation of all binary variables. Our method regresses the part state vector through the sequential convolution layers and a fully connected layer in mask branch. Similarly, we define the $L_{state}$ as the average sigmoid cross entropy loss.
|
| 128 |
+
|
| 129 |
+
# 4.4. Training Details
|
| 130 |
+
|
| 131 |
+
At first, we pre-train a Mask-RCNN with ResNet50-FPN backbone both on ApolloCar3D [39] benchmark and CityScapes [6] benchmark through a car instance segmentation task. Then, we initialize the main backbone by copying the parameters of the pre-trained network. Simultaneously, we pre-train the auxiliary backbone on COCO dataset using the same network architecture. Finally, we fix the parameters of these two backbones to train the network on the editing data. The multi-task loss is defined as:
|
| 132 |
+
|
| 133 |
+
$$
|
| 134 |
+
\begin{array}{l} L = L _ {\text {c l a s s}} ^ {p} + L _ {\text {r e g}} ^ {p} + L _ {\text {c l a s s}} ^ {r} + L _ {\text {b o x}} ^ {r} \tag {3} \\ + L _ {m a s k} ^ {r} + L _ {s t a t e} ^ {r} + L _ {p a r t} ^ {r}, \\ \end{array}
|
| 135 |
+
$$
|
| 136 |
+
|
| 137 |
+
where $(.)^p$ and $(.)^r$ indicate RPN and RCNN, respectively. The subscript state and part denote the loss of state vector and part mask, respectively. We minimize our loss function using the SGD with a weight decay of 0.0001 and a momentum of 0.9. The learning rate is initially set to 0.002, and reduced by 0.1 for every 5 epochs.
|
| 138 |
+
|
| 139 |
+
# 5. CUS Dataset
|
| 140 |
+
|
| 141 |
+
To our best knowledge, none of existing datasets provides the detailed annotation of cars in uncommon states (CUS). To evaluate the quality of edited data and benchmark network performance, we construct a CUS dataset with real street-view images annotated. Specifically, we firstly look up the existing AD-oriented datasets, including KITTI [14], CityScapes [6], ApolloScape [20], and ApolloCar3D [39]. These four datasets have a total of 80,000
|
| 142 |
+
|
| 143 |
+
images, which labelled over 1 million car instances. However, very few of them are in uncommon states (Tab. 1).
|
| 144 |
+
|
| 145 |
+
To add more CUS data, we drive a car to capture images in various sites (i.e., hospital, park, school, and urban road) and in different time (i.e., morning, noon, and afternoon). Consequently, we capture about 150,000 images in total. After removed the blurred and overexposed images, we finally collect 1273 car instances to label.
|
| 146 |
+
|
| 147 |
+
As shown in Tab. 1, our dataset covers 10 dynamic parts (i.e., bonnet, trunk, four doors, two headlights, and two taillights) and 12 uncommon states, which annotates 1850 car instances from 1441 images. For each car instance, we manually labelled the 2D bounding box, instance segmentation, dynamic part segmentation, and state description. Notice that our trained deep model is used directly for testing on CUS dataset without any 'domain adaptation' or 'fineturning' strategies. We believe the built benchmark can effectively verify the quality of editing data, and quantitatively evaluate the network performance.
|
| 148 |
+
|
| 149 |
+
# 6. Results
|
| 150 |
+
|
| 151 |
+
# 6.1. Experimental Settings
|
| 152 |
+
|
| 153 |
+
Our network is trained on a 64-bit work station with a 8-core 3.4 GHz CPU, 4 Nvidia Titan XP graphics cards, and Ubuntu 16.04 OS. The generated training data mostly comes from ApolloCar3D dataset which labelled the 3D model and 6-DoF pose for each car instance. Considering the obvious domain gap among different datasets, we further annotate 100 common car instances with 2D-3D aligned in KITTI, CityScapes, ApolloScape, and captured images, respectively. Then we perform the proposed editing approach to generate CUS data for training. The editing time for each car is about 3 seconds. Specifically, 0.5s for 3D points transformation and projection, 0.5s for hole filling and filtering, and 2s for invisible region generation.
|
| 154 |
+
|
| 155 |
+
The training time of our network depends on the data number. In general, training 25K images costs 24 hours. On the testing phase, we directly use the trained model to perform fine-grained understanding on CUS dataset. As shown
|
| 156 |
+
|
| 157 |
+

|
| 158 |
+
(a)
|
| 159 |
+
|
| 160 |
+

|
| 161 |
+
|
| 162 |
+

|
| 163 |
+
|
| 164 |
+

|
| 165 |
+
|
| 166 |
+

|
| 167 |
+
Figure 5: The training data of different approaches: (a) raw images; (b) rendering data; (c) editing data by our approach.
|
| 168 |
+
|
| 169 |
+

|
| 170 |
+
Figure 6: Visualization results on 2D detection and instance segmentation (five baseline methods vs. ours).
|
| 171 |
+
|
| 172 |
+
# 6.2. Evaluation Metric
|
| 173 |
+
|
| 174 |
+
In Sec. 6.3, our network is compared with Mask-RCNN. Note that the proposed benchmark is only focused on CUS. While Mask-RCNN cannot distinguish the cars in common/uncommon states, which are both existed in the testing data. If we use the $AP$ metric to evaluate this experiment, the detected common-state cars will decrease the precision, resulting in inaccurate $AP$ value. Therefore, we compute the maximum of IoU values between the ground truth and the predictions to evaluate the network performance.
|
| 175 |
+
|
| 176 |
+
Different with Mask-RCNN, our two-backbone network can correctly detect the cars in uncommon states. For the ablation study in Sec. 6.4, we choose the $mAP$ metric to evaluate the performance of 2D detection, instance segmentation, and part segmentation. For state description, we compute the match rate at each binary item between prediction state vectors and ground truth.
|
| 177 |
+
|
| 178 |
+
<table><tr><td>Methods</td><td>2D Detection (IoU)</td><td>Ins. Seg. (IoU)</td></tr><tr><td>Baseline 1</td><td>0.751</td><td>0.704</td></tr><tr><td>Baseline 2</td><td>0.758</td><td>0.712</td></tr><tr><td>Baseline 3</td><td>0.775</td><td>0.721</td></tr><tr><td>Baseline 4</td><td>0.766</td><td>0.713</td></tr><tr><td>Baseline 5</td><td>0.772</td><td>0.719</td></tr><tr><td>Ours</td><td>0.862</td><td>0.815</td></tr></table>
|
| 179 |
+
|
| 180 |
+
Table 2: 2D detection and instance segmentation evaluation results with different approaches on CUS dataset.
|
| 181 |
+
|
| 182 |
+

|
| 183 |
+
|
| 184 |
+

|
| 185 |
+
|
| 186 |
+

|
| 187 |
+
in Fig. 1, Fig. 7 (c) and Fig. 10, our network outputs holistic parsing results, including 2D detection, instance-level segmentation, dynamic part segmentation, and state description. Source code, data and more results can be found on the project page (https://github.com/zongdai/EditingForDNN).
|
| 188 |
+
|
| 189 |
+

|
| 190 |
+
|
| 191 |
+

|
| 192 |
+
|
| 193 |
+

|
| 194 |
+
|
| 195 |
+
# 6.3. Comparison with Baseline Methods
|
| 196 |
+
|
| 197 |
+
To demonstrate our method can effectively improve the performance on 2D detection and instance-level segmentation, following baseline methods are compared (Tab. 2):
|
| 198 |
+
|
| 199 |
+
Baseline 1: Mask-RCNN + Existing Datasets. We train the Mask-RCNN network on the existing datasets (i.e., KITTI, CityScapes, ApolloScape, and ApolloCar3D), which only annotate the common-state cars (Fig. 5 (a)). In the testing phase, we directly output the results of 2D detection and instance-level segmentation on CUS dataset.
|
| 200 |
+
|
| 201 |
+
Baseline 2: Mask-RCNN + Rendering Data. We implement the rendering-based data generation pipeline which is adopted in most image synthesis works [40, 31, 45]. Following [1], we construct 50 high-quality textured CAD models of cars, which are labelled the dynamic parts and motion axis. We transform the car models according to the 6-DoF pose and operate the dynamic parts to generate uncommon states. Here, we use Blender software to obtain rendering result which is further overlaid to background (Fig. 5 (b)). Consequently, we build a rendering dataset which consists of 25K images. We train the Mask-RCNN network on rendering data and test on CUS dataset.
|
| 202 |
+
|
| 203 |
+
Baseline 3: Mask-RCNN + Editing Data. We then train the Mask-RCNN network using our editing data (Fig. 5 (c)) which has the same number with the rendering data. We evaluate the trained Mask-RCNN network on CUS dataset.
|
| 204 |
+
|
| 205 |
+
Baseline 4: Our Network + Existing Datasets. We train our two-backbone network using the existing datasets which are introduced in Baseline 1.
|
| 206 |
+
|
| 207 |
+
Baseline 5: Our Network + Rendering Data. We train the proposed two-backbone network using the rendering data which is illustrated in Baseline 2.
|
| 208 |
+
|
| 209 |
+
Our method: Our Network + Editing Data. At last, we train our two-backbone network using the editing data. The quantitative results of these approaches are listed in Tab. 2.
|
| 210 |
+
|
| 211 |
+
<table><tr><td>Methods</td><td>2D Detection (mAP)</td><td>Instance Seg. (mAP)</td><td>Part Seg. (mAP)</td><td>State Description</td></tr><tr><td>Single Backbone Re-trained</td><td>0.136</td><td>0.114</td><td>0.144</td><td>0.149</td></tr><tr><td>Single Backbone Frozen</td><td>0.672</td><td>0.516</td><td>0.273</td><td>0.837</td></tr><tr><td>Two Backbones Frozen</td><td>0.701</td><td>0.563</td><td>0.314</td><td>0.874</td></tr></table>
|
| 212 |
+
|
| 213 |
+
Table 3: Ablation study of our network on 2D detection, instance segmentation, part segmentation, and state description.
|
| 214 |
+
|
| 215 |
+

|
| 216 |
+
(a)
|
| 217 |
+
|
| 218 |
+

|
| 219 |
+
(b)
|
| 220 |
+
|
| 221 |
+

|
| 222 |
+
(c)
|
| 223 |
+
|
| 224 |
+
The results of Baseline 1 indicate that the model of Mask-RCNN trained by common-state cars can detect and segment the car body. However, the dynamic parts are always ignored (Fig. 6 (a)). The results of Baseline 2 show that rendering data improves the network performance compared with Baseline 1. While the rendering data (Fig. 5 (b)) has the natural domain gap with the real captured images (Fig. 5 (a)). In addition, 3D rendering costs much 10x time than editing-based approach. The results of Baseline 3 prove that Mask-RCNN trained by editing data outperforms existing datasets and rendering data. However, when we visualize the results of detection and segmentation in Fig. 6 (c), it is clearly shown that the visible parts are good while the reverse side of dynamic part suffers from errors.
|
| 225 |
+
|
| 226 |
+
Baseline 4 and Baseline 5 are using our two-backbone network to train on the existing datasets and rendering data, respectively. However, the performance of both baseline methods are not improved significantly. Here, we emphasize that our two-backbone network is carefully designed to learn the editing data, especially the dynamic parts. Directly using our network cannot effectively learn other data, because they are in the different domain. Consequently, our two-backbone network trained by editing data gets the best performance, which advances other methods by over 8 percent on both tasks (Tab. 2). The main improvement comes from the invisible regions (Fig. 6 (f)).
|
| 227 |
+
|
| 228 |
+

|
| 229 |
+
Figure 7: Visual results on ablation study of our network: (a) single backbone re-trained; (b) single backbone frozen; (c) two backbones frozen. The words with red/green color indicate the wrong/correct state descriptions.
|
| 230 |
+
Figure 8: The performance of our two-backbone network with different number of training data.
|
| 231 |
+
|
| 232 |
+
# 6.4. Performance Analysis
|
| 233 |
+
|
| 234 |
+
The impact of our network structure. Besides 2D detection and instance segmentation, our network can detect cars in uncommon states, segment dynamic parts, and describe states. To illustrate the impact of our network structure, we conduct an ablation study as shown in Tab. 3 using constant number of training data (i.e., 25K). We firstly retrain the single backbone, which is a common strategy in most deep networks (e.g., [17]). The results show that it can hardly predict correct class of CUS, leading bad performance on these tasks (Fig. 7 (a)). We then freeze the single backbone pre-trained on COCO during the editing data training. The performance is improved due to relieving the over-fitting problem. However, the frozen backbone can not extract adequate features (Fig. 7 (b)). On the contrast, our two backbones which pre-trained on car detection task and general task can not only extract adequate features but also avoid over-fitting problem. It achieves the best performance on these tasks (Fig. 7 (c)).
|
| 235 |
+
|
| 236 |
+
The impact of the number of training data. Empirically, the performance of deep network largely relies on the number of training data. Here, we conduct an experiment to study the relationship between the number of data and network performance. Thanks to the fully automatic editing-based approach, we set the number of data from 5K to 40K with an interval of 5K to train our network. Fig. 8 shows the network performance on multiple tasks with respect to
|
| 237 |
+
|
| 238 |
+

|
| 239 |
+
Figure 9: The rendering results with (a) and without (b) environment map.
|
| 240 |
+
|
| 241 |
+
<table><tr><td>Tasks</td><td>w/o Env. Map</td><td>with Env. Map</td></tr><tr><td>2D Detection (mAP)</td><td>0.688</td><td>0.701</td></tr><tr><td>Ins. Seg. (mAP)</td><td>0.538</td><td>0.563</td></tr><tr><td>Part Seg. (mAP)</td><td>0.221</td><td>0.314</td></tr><tr><td>State Description</td><td>0.844</td><td>0.874</td></tr></table>
|
| 242 |
+
|
| 243 |
+
Table 4: The impact of environment map.
|
| 244 |
+
|
| 245 |
+
different number of training data. We find that from 5K to 25K, the network performance is significantly improved. While from 25K to 40K, it is not sensitive to the number. In practice, we set the number of training data to 25K, which is a good compromise of efficiency and accuracy.
|
| 246 |
+
|
| 247 |
+
The impact of environment map. In the proposed data generation pipeline, we render the reverse side of dynamic parts to generate the invisible region data. While in the wild, illumination (or environment map) plays an important role which determines the rendered region whether is compatible with the surroundings. Here, we conduct an experiment to study the effectiveness of environment map. We utilize the same number of reverse side data with/without environment map (shown in Fig. 9) to train our network, and evaluate on the proposed CUS dataset. As shown is Tab. 4, the data rendered by environment map significantly improves the network performance. In particular, the dynamic part segmentation gets a 9.3 percent improvement.
|
| 248 |
+
|
| 249 |
+
# 6.5. Application
|
| 250 |
+
|
| 251 |
+
Our results can effectively support a number of high-level vision tasks. As shown in Fig. 10, we integrate human detection task to our network. Intuitively, there exists rich semantics between the human and the dynamic parts. For example, if someone stands near by the lifted trunk, it is very likely that he/she is taking the luggage. If someone bends to push the door of car, it implies he/she would get off. Besides action reasoning and interaction understanding, we can even infer the people's identity from the uncommon cases. For instance, if the front left door is opened, people near the door usually is a driver.
|
| 252 |
+
|
| 253 |
+

|
| 254 |
+
Figure 10: The applications on action reasoning and people identity inference by understanding CUS.
|
| 255 |
+
|
| 256 |
+
# 7. Conclusion and Limitation
|
| 257 |
+
|
| 258 |
+
In this paper, we make the first attempt to analyse the cars in uncommon states (CUS). Instead of annotating large amount of images, we present an editing-based data generation approach which takes advantage of 3D parts. Our method is light-weight but high-efficiency, which advances the rendering-based methods by a large margin. To perform a holistic understanding for CUS, we propose a multi-task deep network which can simultaneously output 2D detection, instance-level segmentation, dynamic part segmentation, and state description. To benchmark the performance, we construct a CUS dataset which contains 1441 real images (1850 car instances) with fine-grained annotation. The experimental results show that our editing data and deep network perform well on CUS.
|
| 259 |
+
|
| 260 |
+
Nevertheless, there are a number of limitations, which point out our direction in the future work. First, AD is a huge and complex project, the uncommon states analysed in this paper are closed to cars. Some other objects, such as human and road, we will pay more attention to them. Second, the output of our network are mostly 2D results. We will extend this work to 3D space, such as 3D detection, 3D localization, and 3D reconstruction. Third, we will research CUS on the video sequences. Lastly, we will fuse multiple sensors (e.g., RGB camera, stereo camera, Lidar, and Radar) to research the CUS problem.
|
| 261 |
+
|
| 262 |
+
# Acknowledgement
|
| 263 |
+
|
| 264 |
+
We thank the anonymous reviewers for their valuable comments. This work was supported in part by National Natural Science Foundation of China (U1736217 and 61932003), National Key R&D Program of China (2019YFF0302902), and Pre-research Project of the Manned Space Flight (060601).
|
| 265 |
+
|
| 266 |
+
# References
|
| 267 |
+
|
| 268 |
+
[1] Hassan Abu Alhaija, Siva Karthik Mustikovela, Lars Mescheder, Andreas Geiger, and Carsten Rother. Augmented reality meets computer vision.
|
| 269 |
+
[2] Gabriel J Brostow, Julien Fauqueur, and Roberto Cipolla. Semantic object classes in video: A high-definition ground truth database. Pattern Recognition Letters, 30(2):88-97, 2009.
|
| 270 |
+
[3] Daniel J Butler, Jonas Wulff, Garrett B Stanley, and Michael J Black. A naturalistic open source movie for optical flow evaluation. In Proceedings of ECCV, pages 611-625. Springer, 2012.
|
| 271 |
+
[4] F. Chabot, M. Chaouch, J. Rabarisoa, C. Teulière, and T. Chateau. Deep manta: A coarse-to-fine many-task network for joint 2d and 3d vehicle analysis from monocular image. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, 2017.
|
| 272 |
+
[5] Angel X. Chang, Thomas Funkhouser, Leonidas Guibas, Pat Hanrahan, Qixing Huang, Zimo Li, Silvio Savarese, Manolis Savva, Shuran Song, Hao Su, Jianxiong Xiao, Li Yi, and Fisher Yu. ShapeNet: An Information-Rich 3D Model Repository. Technical Report arXiv:1512.03012 [cs.GR], 2015.
|
| 273 |
+
[6] Marius Cordts, Mohamed Omran, Sebastian Ramos, Timo Rehfeld, Markus Enzweiler, Rodrigo Benenson, Uwe Franke, Stefan Roth, and Bernt Schiele. The cityscapes dataset for semantic urban scene understanding. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 3213-3223, 2016.
|
| 274 |
+
[7] Angela Dai, Angel X. Chang, Manolis Savva, Maciej Halber, Thomas Funkhouser, and Matthias Nießner. Scannet: Richly-annotated 3d reconstructions of indoor scenes. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, 2017.
|
| 275 |
+
[8] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 248-255. IEEE, 2009.
|
| 276 |
+
[9] Alexey Dosovitskiy, Philipp Fischer, Eddy Ilg, Philip Hausser, Caner Hazirbas, Vladimir Golkov, Patrick Van Der Smagt, Daniel Cremers, and Thomas Brox. Flownet: Learning optical flow with convolutional networks. In Proceedings of the IEEE International Conference on Computer Vision, pages 2758-2766, 2015.
|
| 277 |
+
[10] Debidatta Dwibedi, Ishan Misra, and Martial Hebert. Cut, paste and learn: Surprisingly easy synthesis for instance detection. In Proceedings of the IEEE International Conference on Computer Vision, pages 1301-1310, 2017.
|
| 278 |
+
[11] Francis Engelmann, Theodora Kontogianni, Alexander Hermans, and Bastian Leibe. Exploring spatial context for 3d semantic segmentation of point clouds. In Proceedings of the IEEE International Conference on Computer Vision, pages 716-724, 2017.
|
| 279 |
+
[12] Cheng-Yang Fu, Wei Liu, Ananth Ranga, Ambrish Tyagi, and Alexander C Berg. Dssd: Deconvolutional single shot detector. arXiv preprint arXiv:1701.06659, 2017.
|
| 280 |
+
|
| 281 |
+
[13] Adrien Gaidon, Qiao Wang, Yohann Cabon, and Eleonora Vig. Virtual worlds as proxy for multi-object tracking analysis. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 4340-4349, 2016.
|
| 282 |
+
[14] Andreas Geiger, Philip Lenz, Christoph Stiller, and Raquel Urtasun. Vision meets robotics: The kitti dataset. The International Journal of Robotics Research, 32(11):1231-1237, 2013.
|
| 283 |
+
[15] Robert Geirhos, Patricia Rubisch, Claudio Michaelis, Matthias Bethge, Felix A Wichmann, and Wieland Brendel. Imagenet-trained cnns are biased towards texture; increasing shape bias improves accuracy and robustness. arXiv preprint arXiv:1811.12231, 2018.
|
| 284 |
+
[16] Ankur Handa, Viorica Patraucean, Vijay Badrinarayanan, Simon Stent, and Roberto Cipolla. Understanding real world indoor scenes with synthetic data. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 4077-4085, 2016.
|
| 285 |
+
[17] Kaiming He, Georgia Gkioxari, Piotr Dólar, and Ross Girshick. Mask r-cnn. In Proceedings of the IEEE International Conference on Computer Vision, pages 2961-2969, 2017.
|
| 286 |
+
[18] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 770-778, 2016.
|
| 287 |
+
[19] Stefan Hinterstoisser, Vincent Lepetit, Paul Wohlhart, and Kurt Konolige. On pre-trained image features and synthetic images for deep learning. In The European Conference on Computer Vision (ECCV) Workshops, September 2018.
|
| 288 |
+
[20] Xinyu Huang, Xinjing Cheng, Qichuan Geng, Binbin Cao, Dingfu Zhou, Peng Wang, Yuanqing Lin, and Ruigang Yang. The apolloscape dataset for autonomous driving. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition Workshops, pages 954–960, 2018.
|
| 289 |
+
[21] Alex Krizhevsky, Ilya Sutskever, and Geoffrey E Hinton. Imagenet classification with deep convolutional neural networks. In Advances in Neural Information Processing Systems, pages 1097-1105, 2012.
|
| 290 |
+
[22] Abhijit Kundu, Yin Li, and James M. Rehg. 3d-rcnn: Instance-level 3d object reconstruction via render-and compare. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, 2018.
|
| 291 |
+
[23] Jean-François Lalonde and Alexei A Efros. Synthesizing environment maps from a single image. Technical Report CMU-R I-TR-10-24, 2010.
|
| 292 |
+
[24] Huan Lei, Naveed Akhtar, and Ajmal Mian. Spherical kernel for efficient graph convolution on 3d point clouds. arXiv preprint arXiv:1909.09287, 2019.
|
| 293 |
+
[25] Tsung-Yi Lin, Piotr Dólar, Ross Girshick, Kaiming He, Bharath Hariharan, and Serge Belongie. Feature pyramid networks for object detection. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 2117-2125, 2017.
|
| 294 |
+
[26] Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dólar, and C Lawrence Zitnick. Microsoft coco: Common objects in context. In Proceedings of ECCV, pages 740-755. Springer, 2014.
|
| 295 |
+
|
| 296 |
+
[27] Cewu Lu, Hao Su, Yonglu Li, Yongyi Lu, Li Yi, Chi-Keung Tang, and Leonidas J Guibas. Beyond holistic object recognition: Enriching image understanding with part states. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 6955–6963, 2018.
|
| 297 |
+
[28] Nikolaus Mayer, Eddy Ilg, Philip Hausser, Philipp Fischer, Daniel Cremers, Alexey Dosovitskiy, and Thomas Brox. A large dataset to train convolutional networks for disparity, optical flow, and scene flow estimation. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 4040-4048, 2016.
|
| 298 |
+
[29] Matthias Müller, Vincent Casser, Jean Lahoud, Neil Smith, and Bernard Ghanem. Sim4cv: A photo-realistic simulator for computer vision applications. International Journal of Computer Vision, 126(9):902-919, 2018.
|
| 299 |
+
[30] Gerhard Neuhold, Tobias Ollmann, Samuel Rota Bulo, and Peter Kontschieder. The mapillary vistas dataset for semantic understanding of street scenes. In Proceedings of the IEEE International Conference on Computer Vision, pages 4990-4999, 2017.
|
| 300 |
+
[31] Aayush Prakash, Shaad Boochoon, Mark Brophy, David Acuna, Eric Cameracci, Gavriel State, Omer Shapira, and Stan Birchfield. Structured domain randomization: Bridging the reality gap by context-aware synthetic data. arXiv preprint arXiv:1810.10093, 2018.
|
| 301 |
+
[32] Weichao Qiu and Alan Yuille. Unrealcv: Connecting computer vision to unreal engine. In Proceedings of ECCV, pages 909-916. Springer, 2016.
|
| 302 |
+
[33] Joseph Redmon and Ali Farhadi. Yolov3: An incremental improvement. arXiv preprint arXiv:1804.02767, 2018.
|
| 303 |
+
[34] Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun. Faster r-cnn: Towards real-time object detection with region proposal networks. In Advances in Neural Information Processing Systems, pages 91-99, 2015.
|
| 304 |
+
[35] Stephan R Richter, Zeeshan Hayden, and Vladlen Koltun. Playing for benchmarks. In Proceedings of the IEEE International Conference on Computer Vision, pages 2213-2222, 2017.
|
| 305 |
+
[36] Stephan R Richter, Vibhav Vineet, Stefan Roth, and Vladlen Koltun. Playing for data: Ground truth from computer games. In Proceedings of ECCV, pages 102-118. Springer, 2016.
|
| 306 |
+
[37] German Ros, Laura Sellart, Joanna Materzynska, David Vazquez, and Antonio M Lopez. The synthia dataset: A large collection of synthetic images for semantic segmentation of urban scenes. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 3234-3243, 2016.
|
| 307 |
+
[38] Karen Simonyan and Andrew Zisserman. Very deep convolutional networks for large-scale image recognition. arXiv preprint arXiv:1409.1556, 2014.
|
| 308 |
+
[39] Xibin Song, Peng Wang, Dingfu Zhou, Rui Zhu, Chenye Guan, Yuchao Dai, Hao Su, Hongdong Li, and Ruigang Yang. Apollocar3d: A large 3d car instance understanding benchmark for autonomous driving. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 5452-5462, 2019.
|
| 309 |
+
|
| 310 |
+
[40] Hao Su, Charles R Qi, Yangyan Li, and Leonidas J Guibas. Render for cnn: Viewpoint estimation in images using cnns trained with rendered 3d model views. In Proceedings of the IEEE International Conference on Computer Vision, pages 2686-2694, 2015.
|
| 311 |
+
[41] Robert W Sumner, Johannes Schmid, and Mark Pauly. Embedded deformation for shape manipulation. ACM Transactions on Graphics (TOG), 26(3):80, 2007.
|
| 312 |
+
[42] Supasorn Suwajanakorn, Noah Snavely, Jonathan J Tompson, and Mohammad Norouzi. Discovery of latent 3d keypoints via end-to-end geometric reasoning. In Advances in Neural Information Processing Systems, pages 2059-2070, 2018.
|
| 313 |
+
[43] Josh Tobin, Rachel Fong, Alex Ray, Jonas Schneider, Wojciech Zaremba, and Pieter Abbeel. Domain randomization for transferring deep neural networks from simulation to the real world. In International Conference on Intelligent Robots and Systems (IROS), pages 23-30. IEEE, 2017.
|
| 314 |
+
[44] Carlo Tomasi and Roberto Manduchi. Bilateral filtering for gray and color images. In Proceedings of ICCV, volume 98, page 2, 1998.
|
| 315 |
+
[45] Jonathan Tremblay, Aayush Prakash, David Acuna, Mark Brophy, Varun Jampani, Cem Anil, Thang To, Eric Cameracci, Shaad Boochoon, and Stan Birchfield. Training deep networks with synthetic data: Bridging the reality gap by domain randomization. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition Workshops, pages 969-977, 2018.
|
| 316 |
+
[46] Peng Wang, Xinyu Huang, Xinjing Cheng, Dingfu Zhou, Qichuan Geng, and Ruigang Yang. The apolloscope open dataset for autonomous driving and its application. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2019.
|
| 317 |
+
[47] Peng Wang, Xiaohui Shen, Zhe Lin, Scott Cohen, Brian Price, and Alan L Yuille. Joint object and part segmentation using deep learned potentials. In Proceedings of the IEEE International Conference on Computer Vision, pages 1573-1581, 2015.
|
| 318 |
+
[48] Shenlong Wang, Min Bai, Gellert Mattyus, Hang Chu, Wenjie Luo, Bin Yang, Justin Liang, Joel Cheverie, Sanja Fidler, and Raquel Urtasun. Toronto: Seeing the world with a million eyes. arXiv preprint arXiv:1612.00423, 2016.
|
| 319 |
+
[49] Zhirong Wu, Shuran Song, Aditya Khosla, Fisher Yu, Linguang Zhang, Xiaou Tang, and Jianxiong Xiao. 3d shapenets: A deep representation for volumetric shapes. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 1912-1920, 2015.
|
| 320 |
+
[50] Fangting Xia, Peng Wang, Liang-Chieh Chen, and Alan L Yuille. Zoom better to see clearer: Human and object parsing with hierarchical auto-zoom net. In Proceedings of ECCV, pages 648-663. Springer, 2016.
|
| 321 |
+
[51] Fisher Yu, Wenqi Xian, Yingying Chen, Fangchen Liu, Mike Liao, Vashisht Madhavan, and Trevor Darrell. Bdd100k: A diverse driving video database with scalable annotation tooling. arXiv preprint arXiv:1805.04687, 2018.
|
| 322 |
+
[52] Yi Zhang, Weichao Qiu, Qi Chen, Xiaolin Hu, and Alan Yuille. Unrealstereo: A synthetic dataset for analyzing stereo vision. arXiv preprint arXiv:1612.04647, 1(2), 2016.
|
3dpartguidedimageeditingforfinegrainedobjectunderstanding/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:45e06f9a7237ad17fca5540733966ee8fa446baa9c02cd99c204cd215703e4b8
|
| 3 |
+
size 732082
|
3dpartguidedimageeditingforfinegrainedobjectunderstanding/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:bb6e5f35caa886e01ad0b8965098f64f8bdea353e2b01dbe8534a974c2ab86a2
|
| 3 |
+
size 362140
|
3dphotographyusingcontextawarelayereddepthinpainting/d1b0b4a6-ad26-4d18-aac0-e2f991cc179b_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:cac912472aee7c984e0a7716d3359ea94e27dd498e212339e00548aeeea75765
|
| 3 |
+
size 96236
|
3dphotographyusingcontextawarelayereddepthinpainting/d1b0b4a6-ad26-4d18-aac0-e2f991cc179b_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2356c8ba8e3bdc7409767bc80717bee9789aa89e1450849aaa248186c3a8eb18
|
| 3 |
+
size 122135
|
3dphotographyusingcontextawarelayereddepthinpainting/d1b0b4a6-ad26-4d18-aac0-e2f991cc179b_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6f32bc7ac201efae11a7ecef171a0a2d77b93d995b87424a509c9619e2621caf
|
| 3 |
+
size 5176667
|
3dphotographyusingcontextawarelayereddepthinpainting/full.md
ADDED
|
@@ -0,0 +1,491 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# 3D Photography using Context-aware Layered Depth Inpainting
|
| 2 |
+
|
| 3 |
+
Meng-Li Shih<sup>12</sup>
|
| 4 |
+
|
| 5 |
+
shihsml@gapp.nthu.edu.tw
|
| 6 |
+
|
| 7 |
+
Shih-Yang Su
|
| 8 |
+
|
| 9 |
+
shihyang@vt.edu
|
| 10 |
+
|
| 11 |
+
Johannes Kopf<sup>3</sup>
|
| 12 |
+
|
| 13 |
+
jkopf@fb.com
|
| 14 |
+
|
| 15 |
+
Jia-Bin Huang
|
| 16 |
+
|
| 17 |
+
jbhuang@vt.edu
|
| 18 |
+
|
| 19 |
+
<sup>1</sup>Virginia Tech
|
| 20 |
+
|
| 21 |
+
$^{2}$ National Tsing Hua University
|
| 22 |
+
|
| 23 |
+
Facebook
|
| 24 |
+
|
| 25 |
+
https://shihmengli.github.io/3D-Photo-Inpainting
|
| 26 |
+
|
| 27 |
+

|
| 28 |
+
(a) Depth-warping (holes)
|
| 29 |
+
|
| 30 |
+

|
| 31 |
+
(b) Depth-warping (stretching)
|
| 32 |
+
|
| 33 |
+

|
| 34 |
+
(c) Facebook 3D photo
|
| 35 |
+
Figure 1. 3D photography from a single RGB-D image. Naive methods either produce holes (a) or stretch content (b) at disocclusions. Color and depth inpainting using diffusion is better, but provides a too smooth appearance (c). Our approach is capable of synthesizing new color/depth texture and structures, leading to more photorealistic novel views (d).
|
| 36 |
+
|
| 37 |
+

|
| 38 |
+
(d) Our result
|
| 39 |
+
|
| 40 |
+
# Abstract
|
| 41 |
+
|
| 42 |
+
We propose a method for converting a single RGB-D input image into a 3D photo — a multi-layer representation for novel view synthesis that contains hallucinated color and depth structures in regions occluded in the original view. We use a Layered Depth Image with explicit pixel connectivity as underlying representation, and present a learning-based inpainting model that synthesizes new local color-and-depth content into the occluded region in a spatial context-aware manner. The resulting 3D photos can be efficiently rendered with motion parallax using standard graphics engines. We validate the effectiveness of our method on a wide range of challenging everyday scenes and show less artifacts compared with the state of the arts.
|
| 43 |
+
|
| 44 |
+
# 1. Introduction
|
| 45 |
+
|
| 46 |
+
3D photography—capturing views of the world with a camera and using image-based rendering techniques for novel view synthesis—is a fascinating way to record and reproduce visual perception. It provides a dramatically more immersive experience than old 2D photography: almost lifelike in Virtual Reality, and even to some degree on normal flat displays when displayed with parallax.
|
| 47 |
+
|
| 48 |
+
Classic image-based reconstruction and rendering tech
|
| 49 |
+
|
| 50 |
+
niques, however, require elaborate capture setups involving many images with large baselines [17, 59, 26, 45, 19, 12], and/or special hardware (e.g., Lytro Immerge, Facebook Manifold camera $^{1}$ ).
|
| 51 |
+
|
| 52 |
+
Recently, we have seen work to make capture for 3D photography more effortless by using cell phone cameras and lowering baseline requirements [17, 18]. In the most extreme cases, novel techniques such as Facebook 3D Photos<sup>2</sup> now just require capturing a single snapshot with a dual lens camera phone, which essentially provides an RGB-D (color and depth) input image.
|
| 53 |
+
|
| 54 |
+
In this work we are interested in rendering novel views from such an RGB-D input. The most salient features in rendered novel views are the disocclusions due to parallax: naive depth-based warping techniques either produce gaps here (Figure 1a) or stretched content (1b). Recent methods try to provide better extrapolations.
|
| 55 |
+
|
| 56 |
+
Stereo magnification [72] and recent variants [52, 39] use a fronto-parallel multi-plane representation (MPI), which is synthesized from the small-baseline dual camera stereo input. However, MPI produces artifacts on sloped surfaces. Besides, the excessive redundancy in the multi
|
| 57 |
+
|
| 58 |
+
plane representation makes it memory and storage inefficient and costly to render.
|
| 59 |
+
|
| 60 |
+
Facebook 3D Photos use a layered depth image (LDI) representation [48], which is more compact due to its sparsity, and can be converted into a light-weight mesh representation for rendering. The color and depth in occluded regions are synthesized using heuristics that are optimized for fast runtime on mobile devices. In particular it uses a isotropic diffusion algorithm for inpainting colors, which produces overly smooth results and is unable to extrapolate texture and structures (Figure 1c).
|
| 61 |
+
|
| 62 |
+
Several recent learning-based methods also use similar multi-layer image representations [7, 56]. However, these methods use "rigid" layer structures, in the sense that every pixel in the image has the same (fixed and predetermined) number of layers. At every pixel, they store the nearest surface in the first layer, the second-nearest in the next layer, etc. This is problematic, because across depth discontinuities the content within a layer changes abruptly, which destroys locality in receptive fields of convolution kernels.
|
| 63 |
+
|
| 64 |
+
In this work we present a new learning-based method that generates a 3D photo from an RGB-D input. The depth can either come from dual camera cell phone stereo, or be estimated from a single RGB image [30, 28, 13]. We use the LDI representation (similar to Facebook 3D Photos) because it is compact and allows us to handle situations of arbitrary depth-complexity. Unlike the "rigid" layer structures described above, we explicitly store connectivity across pixels in our representation. However, as a result it is more difficult to apply a global CNN to the problem, because our topology is more complex than a standard tensor. Instead, we break the problem into many local inpainting sub-problems, which we solve iteratively. Each problem is locally like an image, so we can apply standard CNN. We use an inpainting model that is conditioned on spatially-adaptive context regions, which are extracted from the local connectivity of the LDI. After synthesis we fuse the inpainted regions back into the LDI, leading to a recursive algorithm that proceeds until all depth edges are treated.
|
| 65 |
+
|
| 66 |
+
The result of our algorithm are 3D photos with synthesized texture and structures in occluded regions (Figure 1d). Unlike most previous approaches we do not require predetermining a fixed number of layers. Instead our algorithm adapts by design to the local depth-complexity of the input and generates a varying number of layers across the image. We have validated our approach on a wide variety of photos captured in different situations.
|
| 67 |
+
|
| 68 |
+
# 2. Related Work
|
| 69 |
+
|
| 70 |
+
Representation for novel view synthesis. Different types of representations have been explored for novel view synthesis, including light fields [15, 29, 2], multi-plane images [72, 52, 39], and layered depth images [48, 55, 7, 56, 17, 18, 6, 42]. Light fields enable photorealistic rendering of novel views, but generally require many input images
|
| 71 |
+
|
| 72 |
+
to achieve good results. The multi-plane image representation [72, 52, 39] stores multiple layers of RGB- $\alpha$ images at fixed depths. The main advantage of this representation is its ability to capture semi-reflective or semi-transparent surfaces. However, due to the fixed depth discretization, sloped surfaces often do not reproduce well, unless an excessive number of planes is used. Many variants of layered depth image representations have been used over time. Representations with a fixed number of layers everywhere have recently been used [7, 56], but they do not preserve locality well, as described in the previous section. Other recent work [17, 18] extends the original work of Shade et al. [48] to explicitly store connectivity information. This representation can locally adapt to any depth-complexity and can be easily converted into a textured mesh for efficient rendering. Our work uses this representation as well.
|
| 73 |
+
|
| 74 |
+
Image-based rendering. Image-based rendering techniques enable photorealistic synthesis of novel views from a collection of posed images. These methods work best when the images have sufficiently large baselines (so that multi-view stereo algorithms can work well) or are captured with depth sensors. Recent advances include learning-based blending [19], soft 3D reconstruction [45], handling reflection [49, 26], relighting [63], and reconstructing mirror and glass surfaces [59]. Our focus in this work lies in novel view synthesis from one single image.
|
| 75 |
+
|
| 76 |
+
Learning-based view synthesis. CNN-based methods have been applied to synthesizing novel views from sparse light field data [23] or two or more posed images [12, 19, 4]. Several recent methods explore view synthesis from a single image. These methods, however, often focus on a specific domain [53, 60], synthetic 3D scenes/objects [73, 43, 54, 6, 7, 11], hallucinating only one specific view [61, 68], or assuming piecewise planar scenes [32, 34].
|
| 77 |
+
|
| 78 |
+
Many of these learning-based view synthesis methods require running a forward pass of the pre-trained network to synthesize the image of a given viewpoint. This makes these approaches less applicable to display on resource-constrained devices. Our representation, on the other hand, can be easily converted into a textured mesh and efficiently rendered with standard graphics engines.
|
| 79 |
+
|
| 80 |
+
Image inpainting. The task of image inpainting aims to fill missing regions in images with plausible content. Inspired by the success of texture synthesis [9, 8], example-based methods complete the missing regions by transferring the contents from the known regions of the image, either through non-parametric patch-based synthesis [58, 1, 5, 20] or solving a Markov Random Field model using belief propagation [25] or graph cut [46, 27, 16]. Driven by the progress of convolutional neural networks, CNN-based methods have received considerable attention due to their ability to predict semantically meaningful contents that are not available in the known regions [44, 51, 21, 65, 66]. Recent efforts include designing CNN architectures to better handle holes with irregular shapes [33, 67, 64] and two-
|
| 81 |
+
|
| 82 |
+
stage methods with structure-content disentanglement, e.g., predicting structure (e.g., contour/edges in the missing regions) and followed by content completion conditioned on the predicted structures [41, 62, 47].
|
| 83 |
+
|
| 84 |
+
Our inpainting model builds upon the recent two-stage approaches [41, 62, 47] but with two key differences. First, unlike existing image inpainting algorithms where the hole and the available contexts are static (e.g., the known regions in the entire input image), we apply the inpainting locally around each depth discontinuity with adaptive hole and context regions. Second, in addition to inpaint the color image, we also inpaint the depth values as well as the depth discontinuity in the missing regions.
|
| 85 |
+
|
| 86 |
+
Depth inpainting. Depth inpainting has applications in filling missing depth values where commodity-grade depth cameras fail (e.g., transparent/reflective/distant surfaces) [35, 70, 36] or performing image editing tasks such as object removal on stereo images [57, 40]. The goal of these algorithms, however, is to inpaint the depth of the visible surfaces. In contrast, our focus is on recovering the depth of the hidden surface.
|
| 87 |
+
|
| 88 |
+
CNN-based single depth estimation. CNN-based methods have recently demonstrated promising results on estimating depth from a single image. Due to the difficulty of collecting labeled datasets, earlier approaches often focus on specific visual domains such as indoor scenes [10] or street view [14, 71]. While the accuracy of these approaches is not yet competitive with multi-view stereo algorithms, this line of research is particularly promising due to the availability of larger and more diverse training datasets from relative depth annotations [3], multi-view stereo [30], 3D movies [28] and synthetic data [42].
|
| 89 |
+
|
| 90 |
+
For cases where only one single color image is available, we obtain the depth estimate through a pre-trained depth estimation model [30, 28]. Removing the dependency on stereo or multiple images as input makes our method more widely applicable to all the existing photos.
|
| 91 |
+
|
| 92 |
+
# 3. Method
|
| 93 |
+
|
| 94 |
+
Layered depth image. Our method takes as input an RGB-D image (i.e., an aligned color-and-depth image pair) and generates a Layered Depth Image (LDI, [48]) with inpainted color and depth in parts that were occluded in the input.
|
| 95 |
+
|
| 96 |
+
An LDI is similar to a regular 4-connected image, except at every position in the pixel lattice it can hold any number of pixels, from zero to many. Each LDI pixel stores a color and a depth value. Unlike the original LDI work [48], we explicitly represent the local connectivity of pixels: each pixel stores pointers to either zero or at most one direct neighbor in each of the four cardinal directions (left, right, top, bottom). LDI pixels are 4-connected like normal image pixels within smooth regions, but do not have neighbors across depth discontinuities.
|
| 97 |
+
|
| 98 |
+
LDIs are a useful representation for 3D photography, be
|
| 99 |
+
|
| 100 |
+
cause (1) they naturally handle an arbitrary number of layers, i.e., can adapt to depth-complex situations as necessary, and (2) they are sparse, i.e., memory and storage efficient and can be converted into a light-weight textured mesh representation that renders fast.
|
| 101 |
+
|
| 102 |
+
The quality of the depth input to our method does not need to be perfect, as long as discontinuities are reasonably well aligned in the color and depth channels. In practice, we have successfully used our method with inputs from dual camera cell phones as well as with estimated depth maps from learning-based methods [30, 28].
|
| 103 |
+
|
| 104 |
+
Method overview. Given an input RGB-D image, our method proceeds as follows. We first initialize a trivial LDI, which uses a single layer everywhere and is fully 4-connected. In a pre-process we detect major depth discontinuities and group them into simple connected depth edges (Section 3.1). These form the basic units for our main algorithm below. In the core part of our algorithm, we iteratively select a depth edge for inpainting. We then disconnect the LDI pixels across the edge and only consider the background pixels of the edge for inpainting. We extract a local context region from the "known" side of the edge, and generate a synthesis region on the "unknown" side (Section 3.2). The synthesis region is a contiguous 2D region of new pixels, whose color and depth values we generate from the given context using a learning-based method (Section 3.3). Once inpainted, we merge the synthesized pixels back into the LDI (Section 3.4). Our method iteratively proceeds in this manner until all depth edges have been treated.
|
| 105 |
+
|
| 106 |
+
# 3.1. Image preprocessing
|
| 107 |
+
|
| 108 |
+
The only input to our method is a single RGB-D image. Every step of the algorithm below proceeds fully automatically. We normalize the depth channel, by mapping the min and max disparity values (i.e., $1 / \mathrm{depth}$ ) to 0 and 1, respectively. All parameters related to spatial dimensions below are tuned for images with 1024 pixels along the longer dimension, and should be adjusted proportionally for images of different sizes.
|
| 109 |
+
|
| 110 |
+
We start by lifting the image onto an LDI, i.e., creating a single layer everywhere and connecting every LDI pixel to its four cardinal neighbors. Since our goal is to inpaint the occluded parts of the scene, we need to find depth discontinuities since these are the places where we need to extend the existing content. In most depth maps produced by stereo methods (dual camera cell phones) or depth estimation networks, discontinuities are blurred across multiple pixels (Figure 2c), making it difficult to precisely localize them. We, therefore, sharpen the depth maps using a bilateral median filter [37] (Figure 2d), using a $7 \times 7$ window size, and $\sigma_{\text{spatial}} = 4.0$ , $\sigma_{\text{intensity}} = 0.5$ .
|
| 111 |
+
|
| 112 |
+
After sharpening the depth map, we find discontinuities by thresholding the disparity difference between neighboring pixels. This results in many spurious responses, such as isolated speckles and short segments dangling off longer
|
| 113 |
+
|
| 114 |
+

|
| 115 |
+
(a) Color
|
| 116 |
+
|
| 117 |
+

|
| 118 |
+
(b) Raw / filtered depth
|
| 119 |
+
|
| 120 |
+

|
| 121 |
+
(c) Raw
|
| 122 |
+
|
| 123 |
+

|
| 124 |
+
(d) Filtered
|
| 125 |
+
|
| 126 |
+

|
| 127 |
+
(e) Raw discontinuities
|
| 128 |
+
|
| 129 |
+

|
| 130 |
+
(f) Linked depth edges
|
| 131 |
+
|
| 132 |
+

|
| 133 |
+
Figure 2. Preprocessing. Preprocessing of the color and depth input (a-b). We use a bilateral median filter to sharpen the input depth maps (c-d), detect raw discontinuities using disparity thresholds (e), and clean up spurious threshold responses and link discontinuities into connected depth edges (f). These linked depth edges form the basic unit for our inpainting process.
|
| 134 |
+
(a) Initial LDI
|
| 135 |
+
fully connected)
|
| 136 |
+
|
| 137 |
+

|
| 138 |
+
(b) Cut across discontinuity
|
| 139 |
+
|
| 140 |
+

|
| 141 |
+
(c) Context / synthesis regions
|
| 142 |
+
|
| 143 |
+

|
| 144 |
+
(d) Inpainted
|
| 145 |
+
|
| 146 |
+

|
| 147 |
+
Figure 3. Conceptual illustration of the LDI inpainting algorithm. (a) The initial LDI is fully connected. A depth edge (discontinuity) is marked in gray. (b) We first cut the LDI pixel connections across the depth, forming a foreground silhouette (green) and a background silhouette (red). (c) For the background silhouette we spawn a context region (blue) and a synthesis region (red) of new LDI pixels. (d) The synthesized pixels have been merged into the LDI.
|
| 148 |
+
Figure 4. Context/synthesis regions. Context regions (blue) and synthesis regions (red) for three example connected depth edges (black) from Figure 2(f).
|
| 149 |
+
|
| 150 |
+

|
| 151 |
+
|
| 152 |
+

|
| 153 |
+
|
| 154 |
+
edges (Figure 2e). We clean this up as follows: First, we create a binary map by labeling depth discontinuities as 1 (and others as 0). Next, we use connected component analysis to merge adjacent discontinuities into a collection of "linked depth edges". To avoid merging edges at junctions, we separate them based on the local connectivity of the LDI. Finally, we remove short segments (< 10 pixels), including both isolated and dangling ones. We determine the threshold 10 by conducting five-fold cross-validation with LPIPS [69] metric on 50 samples randomly selected from RealEstate10K training set. The final edges (Figures 2f) form the basic unit of our iterative inpainting procedure, which is described in the following sections.
|
| 155 |
+
|
| 156 |
+

|
| 157 |
+
Input
|
| 158 |
+
Figure 5. Handling imperfect depth edges. As the detected depth edges may not align well around occlusion boundaries, we dilate the synthesis region by 5 pixels. This strategy helps reduce artifacts in the inpainted regions.
|
| 159 |
+
|
| 160 |
+

|
| 161 |
+
context/synthesis
|
| 162 |
+
|
| 163 |
+

|
| 164 |
+
w/o dilation
|
| 165 |
+
|
| 166 |
+

|
| 167 |
+
w/dilation
|
| 168 |
+
|
| 169 |
+
# 3.2. Context and synthesis regions
|
| 170 |
+
|
| 171 |
+
Our inpainting algorithm operates on one of the previously computed depth edges at a time. Given one of these edges (Figure 3a), the goal is to synthesize new color and depth content in the adjacent occluded region. We start by disconnecting the LDI pixels across the discontinuity (Figure 3b). We call the pixels that became disconnected (i.e., are now missing a neighbor) silhouette pixels. We see in Figure 3b that a foreground silhouette (marked green) and a background silhouette (marked red) forms. Only the background silhouette requires inpainting. We are interested in extending its surrounding content into the occluded region.
|
| 172 |
+
|
| 173 |
+
We start by generating a synthesis region, a contiguous region of new pixels (Figure 3c, red pixels). These are essentially just 2D pixel coordinates at this point. We initialize the color and depth values in the synthesis region
|
| 174 |
+
|
| 175 |
+
using a simple iterative flood-fill like algorithm. It starts by stepping from all silhouette pixels one step in the direction where they are disconnected. These pixels form the initial synthesis region. We then iteratively expand (for 40 iterations) all pixels of the region by stepping left/right/up/down and adding any pixels that have not been visited before. For each iteration, we expand the context and synthesis regions alternately and thus a pixel only belongs to either one of the two regions. Additionally, we do not step back across the silhouette, so the synthesis region remains strictly in the occluded part of the image. Figure 4 shows a few examples.
|
| 176 |
+
|
| 177 |
+
We describe our learning-based technique for inpainting the synthesis region in the next section. Similar techniques [33, 41] were previously used for filling holes in images. One important difference to our work is that these image holes were always fully surrounded by known content, which constrained the synthesis. In our case, however, the inpainting is performed on a connected layer of an LDI pixels, and it should only be constrained by surrounding pixels that are directly connected to it. Any other region in the LDI, for example on other foreground or background layer, is entirely irrelevant for this synthesis unit, and should not constrain or influence it in any way.
|
| 178 |
+
|
| 179 |
+
We achieve this behavior by explicitly defining a context region (Figure 3c, blue region) for the synthesis. Our inpainting networks only considers the content in the context region and does not see any other parts of the LDI. The context region is generated using a similar flood-fill like algorithm. One difference, however, is that this algorithm selects actual LDI pixels and follows their connection links, so the context region expansion halts at silhouettes. We run this algorithm for 100 iterations, as we found that synthesis performs better with slightly larger context regions. In practice, the silhouette pixels may not align well with the actual occluding boundaries due to imperfect depth estimation. To tackle this issue, we dilate the synthesis region near the depth edge by 5 pixels (the context region erodes correspondingly). Figure 5 shows the effect of this heuristic.
|
| 180 |
+
|
| 181 |
+
# 3.3. Context-aware color and depth inpainting
|
| 182 |
+
|
| 183 |
+
Model. Given the context and synthesis regions, our next goal is to synthesize color and depth values. Even though we perform the synthesis on an LDI, the extracted context and synthesis regions are locally like images, so we can use standard network architectures designed for images. Specifically, we build our color and depth inpainting models upon image inpainting methods in [41, 33, 62].
|
| 184 |
+
|
| 185 |
+
One straightforward approach is to inpaint the color image and depth map independently. The inpainted depth map, however, may not be well-aligned with respect to the inpainted color. To address this issue, we design our color and depth inpainting network similar to [41, 62]: we break down the inpainting tasks into three sub-networks: (1) edge inpainting network, (2) color inpainting network, and (3) depth inpainting network (Figure 6). First, given the con
|
| 186 |
+
|
| 187 |
+
text edges as input, we use the edge inpainting network to predict the depth edges in the synthesis regions, producing the inpainted edges. Performing this step first helps infer the structure (in terms of depth edges) that can be used for constraining the content prediction (the color and depth values). We take the concatenated inpainted edges and context color as input and use the color inpainting network to produce inpainted color. We perform the depth inpainting similarly. Figure 7 shows an example of how the edge-guided inpainting is able to extend the depth structures accurately and alleviate the color/depth misalignment issue.
|
| 188 |
+
|
| 189 |
+
Multi-layer inpainting. In depth-complex scenarios, applying our inpainting model once is not sufficient as we can still see the hole through the discontinuity created by the inpainted depth edges. We thus apply our inpainting model until no further inpainted depth edges are generated. Figure 8 shows an example of the effects. Here, applying our inpainting model once fills in missing layers. However, several holes are still visible when viewed at a certain viewpoint (Figure 8b). Applying the inpainting model one more time fixes the artifacts.
|
| 190 |
+
|
| 191 |
+
Training data generation. For training, our proposed model can be simply trained on any image dataset without the need of annotated data. Here, we choose to use MSCOCO dataset [31] for its wide diversity in object types and scenes. To generate the training data for the inpainting model, we create a synthetic dataset as follows. First, we apply the pre-trained MegaDepth [30] on the COCO dataset to obtain pseudo ground truth depth maps. We extract context/synthesis regions (as described in Section 3.2) to form a pool of these regions. We then randomly sample and place these context-synthesis regions on different images in the COCO dataset. We thus can obtain the ground truth content (RGB-D) from the simulated occluded region.
|
| 192 |
+
|
| 193 |
+
# 3.4. Converting to 3D textured mesh
|
| 194 |
+
|
| 195 |
+
We form the 3D textured mesh by integrating all the inpainted depth and color values back into the original LDI. Using mesh representations for rendering allows us to quickly render novel views, without the need to perform per-view inference step. Consequently, the 3D representation produced by our algorithm can easily be rendered using standard graphics engines on edge devices.
|
| 196 |
+
|
| 197 |
+
# 4. Experimental Results
|
| 198 |
+
|
| 199 |
+
In this section, we start with describing implementation details (Section 4.1). We then show visual comparisons with the state-of-the-art novel view synthesis methods (Section 4.2). We refer to the readers to supplementary material for extensive results and comparisons. Next, we follow the evaluation protocol in [72] and report the quantitative comparisons on the RealEstate10K dataset (Section 4.3). We present an ablation study to justify our model design (Section 4.4). Finally, we show that our method works well with
|
| 200 |
+
|
| 201 |
+

|
| 202 |
+
Figure 6. Context-aware color and depth inpainting. Given the color, depth, the extracted and linked depth edges as inputs, we randomly select one of the edges as a subproblem. We start with inpainting the depth edge in the synthesis region (red) using an edge inpainting network. We then concatenate the inpainted depth edges with the context color together and apply a color inpainting network to produce the inpainted color. Similarly, we concatenate the inpainted depth edges with the context depth and apply a depth inpainting network to produce the inpainted depth.
|
| 203 |
+
|
| 204 |
+

|
| 205 |
+
Zoom-in Diffusion w/o edge w/ edge
|
| 206 |
+
|
| 207 |
+

|
| 208 |
+
|
| 209 |
+

|
| 210 |
+
|
| 211 |
+

|
| 212 |
+
Figure 7. Effect of depth inpainting. Edge-guided depth inpainting produces more accurate structure inpainting, particularly for depth-complex regions (e.g., T-junctions). Blue box: synthesized novel view.
|
| 213 |
+
(a) None
|
| 214 |
+
Figure 8. Multi-layer inpainting.
|
| 215 |
+
|
| 216 |
+

|
| 217 |
+
(b) Once
|
| 218 |
+
|
| 219 |
+

|
| 220 |
+
(c) Twice
|
| 221 |
+
|
| 222 |
+
depth maps from different sources (Section 4.5). Additional details and visual comparisons can be found in our supplementary material.
|
| 223 |
+
|
| 224 |
+
# 4.1. Implementation details
|
| 225 |
+
|
| 226 |
+
Training the inpainting model. For the edge-generator, we follow the hyper-parameters in [41]. Specifically, we train the edge-generator model using the ADAM optimizer [24] with $\beta = 0.9$ and an initial learning rate of
|
| 227 |
+
|
| 228 |
+
0.0001. We train both the edge and depth generator model using the context-synthesis regions dataset on the MSCOCO dataset for 5 epochs. We train the depth generator and color image generator for 5 and 10 epochs, respectively.
|
| 229 |
+
|
| 230 |
+
Inpainting model architecture. For the edge inpainting network, we adopt the architecture provided by [41]. For the depth and color inpainting networks, we use a standard U-Net architecture with partial covolution [33]. Due to the space limitation, we leave additional implementation details (specific network architecture, the training loss and the weights for each network) to the supplementary material. We will make the source code and pre-trained model publicly available to foster future work.
|
| 231 |
+
|
| 232 |
+
Training data. We use the 118k images from COCO 2017 set for training. We select at most 3 pairs of regions from each image to form the context-synthesis pool. During training, we sample one pair of regions for each image, and resize it by a factor between [1.0, 1.3].
|
| 233 |
+
|
| 234 |
+
# 4.2. Visual comparisons
|
| 235 |
+
|
| 236 |
+
Comparisons with methods with MPI representations. We compare our proposed model against MPI-based approaches on RealEstate10K dataset. We use DPSNet [22] to obtain the input depth maps for our method. We render the novel views of MPI-based methods using the pretrained weights provided by the authors. Figure 9 shows two challenging examples with complex depth structures. Our method synthesizes plausible structures around depth boundaries; on the other hand, stereo magnification and PB-MPI produce artifacts around depth discontinuities. LLFF [38] suffers from ghosting effects when extrapolating new views.
|
| 237 |
+
|
| 238 |
+

|
| 239 |
+
|
| 240 |
+

|
| 241 |
+
Reference Frame
|
| 242 |
+
|
| 243 |
+

|
| 244 |
+
|
| 245 |
+

|
| 246 |
+
|
| 247 |
+

|
| 248 |
+
|
| 249 |
+

|
| 250 |
+
Zoom-in
|
| 251 |
+
|
| 252 |
+

|
| 253 |
+
|
| 254 |
+

|
| 255 |
+
|
| 256 |
+

|
| 257 |
+
|
| 258 |
+

|
| 259 |
+
StereoMag [72] PB-MPI [52]
|
| 260 |
+
|
| 261 |
+

|
| 262 |
+
|
| 263 |
+

|
| 264 |
+
|
| 265 |
+

|
| 266 |
+
|
| 267 |
+

|
| 268 |
+
PB-MPI [52] LLFF [39]
|
| 269 |
+
|
| 270 |
+

|
| 271 |
+
|
| 272 |
+

|
| 273 |
+
|
| 274 |
+

|
| 275 |
+
|
| 276 |
+

|
| 277 |
+
XView [4]
|
| 278 |
+
|
| 279 |
+
LLFF [39]
|
| 280 |
+
|
| 281 |
+

|
| 282 |
+
|
| 283 |
+

|
| 284 |
+
|
| 285 |
+

|
| 286 |
+
|
| 287 |
+

|
| 288 |
+
|
| 289 |
+

|
| 290 |
+
|
| 291 |
+

|
| 292 |
+
|
| 293 |
+

|
| 294 |
+
|
| 295 |
+

|
| 296 |
+
Ours
|
| 297 |
+
|
| 298 |
+

|
| 299 |
+
Figure 9. Visual comparison with MPI-based methods. Our method inpaints plausible structure and color in the occluded region.
|
| 300 |
+
|
| 301 |
+

|
| 302 |
+
|
| 303 |
+

|
| 304 |
+
|
| 305 |
+

|
| 306 |
+
Facebook 3D Photo results
|
| 307 |
+
|
| 308 |
+

|
| 309 |
+
|
| 310 |
+

|
| 311 |
+
|
| 312 |
+

|
| 313 |
+
|
| 314 |
+

|
| 315 |
+
|
| 316 |
+

|
| 317 |
+
|
| 318 |
+

|
| 319 |
+
Figure 10. Visual comparison to Facebook 3D Photos. Our approach fills plausible textures and structures at disocclusions.
|
| 320 |
+
|
| 321 |
+

|
| 322 |
+
|
| 323 |
+

|
| 324 |
+
|
| 325 |
+

|
| 326 |
+
Our results
|
| 327 |
+
|
| 328 |
+

|
| 329 |
+
|
| 330 |
+

|
| 331 |
+
|
| 332 |
+

|
| 333 |
+
|
| 334 |
+

|
| 335 |
+
|
| 336 |
+

|
| 337 |
+
|
| 338 |
+
Comparisons with Facebook 3D photo. Here, we aim to evaluate the capability of our method on photos taken in the wild. We extract the color images and the corresponding depth maps estimated from an iPhone X (with dual camera lens). We use the same set of RGB-D inputs for both Facebook 3D photo and our algorithm. Figure 10 shows the view synthesis result in comparison with Facebook 3D photo. The diffused color and depth values by the facebook
|
| 339 |
+
|
| 340 |
+
3D photo algorithm work well when small or thin occluded regions are revealed at novel views. These artifacts, however, become clearly visible with larger occluded regions. On the other hand, our results in general fill in the synthesis regions with visually plausible contents and structures.
|
| 341 |
+
|
| 342 |
+
Table 1. Quantitative comparison on the RealEstate10K dataset.
|
| 343 |
+
|
| 344 |
+
<table><tr><td>Methods</td><td>SSIM ↑</td><td>PSNR ↑</td><td>LPIPS ↓</td></tr><tr><td>Stereo-Mag [72]</td><td>0.8906</td><td>26.71</td><td>0.0826</td></tr><tr><td>PB-MPI [52]</td><td>0.8773</td><td>25.51</td><td>0.0902</td></tr><tr><td>LLFF [39]</td><td>0.8062</td><td>23.17</td><td>0.1323</td></tr><tr><td>Xview [4]</td><td>0.8628</td><td>24.75</td><td>0.0822</td></tr><tr><td>Ours</td><td>0.8887</td><td>27.29</td><td>0.0724</td></tr></table>
|
| 345 |
+
|
| 346 |
+
Table 2. Using depth edge as guidance improves the results.
|
| 347 |
+
Blue: results in disocculated regions.
|
| 348 |
+
|
| 349 |
+
<table><tr><td>Methods</td><td>SSIM ↑</td><td>PSNR ↑</td><td>LPIPS ↓</td></tr><tr><td>Diffusion</td><td>0.8665 (0.6237)</td><td>25.95 (18.91)</td><td>0.084</td></tr><tr><td>Inpaint w/o edge</td><td>0.8665 (0.6247)</td><td>25.96 (18.94)</td><td>0.084</td></tr><tr><td>Inpaint w/ edge (Ours)</td><td>0.8666 (0.6265)</td><td>25.97 (18.98)</td><td>0.083</td></tr></table>
|
| 350 |
+
|
| 351 |
+
Table 3. Using color inpainting model gives better perceptual quality. Our dilation heuristic further boosts the performance.
|
| 352 |
+
Blue: results in disocculated regions.
|
| 353 |
+
|
| 354 |
+
<table><tr><td>Methods</td><td>SSIM ↑</td><td>PSNR ↑</td><td>LPIPS ↓</td></tr><tr><td>Diffusion</td><td>0.8661 (0.6215)</td><td>25.90 (18.78)</td><td>0.088</td></tr><tr><td>Inpaint w/o dilation</td><td>0.8643 (0.5573)</td><td>25.56 (17.14)</td><td>0.085</td></tr><tr><td>Inpaint w/ dilation (Ours)</td><td>0.8666 (0.6265)</td><td>25.97 (18.98)</td><td>0.083</td></tr></table>
|
| 355 |
+
|
| 356 |
+
# 4.3. Quantitative comparisons
|
| 357 |
+
|
| 358 |
+
We evaluate how well our model can extrapolate views compared to MPI-based methods [52, 72, 4, 39]. We randomly sample 1500 video sequences from RealEstate10K to generate testing triplets. For each triplet, we set $t = 10$ for target view, so that all the methods need to extrapolate beyond the source ( $t = 0$ ) and reference ( $t = 4$ ) frame. We use DPSNet [22] to generate the input depth maps required for our model. We quantify the performance of each model using SSIM and PSNR metrics between the synthesized target views and the ground truth. As these metrics do not capture the perceptual quality of the synthesized view, we include LPIPS [69] metric to quantify how well does the generated view align with human perception. For PB-MPI, we set the number of depth layers to 64 as it yields the best result. We report the evaluation results in Table 1. Our proposed method performs competitively on SSIM and PSNR. In addition, our synthesis views exhibit better perceptual quality, as reflected in the superior LPIPS score.
|
| 359 |
+
|
| 360 |
+
# 4.4. Ablation study
|
| 361 |
+
|
| 362 |
+
We conduct ablation studies to see how each of our proposed components contribute to the final performance. We first verify the effectiveness of edge-guided depth inpainting. We sample 130 triplets from our testing sequences, evaluate the inpainted color on both the entire image and disoccluded regions, and report the numbers in Table 2. The results show that our proposed edge-guided inpainting leads to minor improvement in numerical metrics. Next, we ex
|
| 363 |
+
|
| 364 |
+

|
| 365 |
+
Input
|
| 366 |
+
|
| 367 |
+

|
| 368 |
+
|
| 369 |
+

|
| 370 |
+
|
| 371 |
+

|
| 372 |
+
(a) Disocclusion
|
| 373 |
+
(c) w/o Dilation
|
| 374 |
+
|
| 375 |
+

|
| 376 |
+
(b) Diffusion
|
| 377 |
+
(d) w/Dilation
|
| 378 |
+
|
| 379 |
+

|
| 380 |
+
Figure 11. Color inpainting leads to better visual quality.
|
| 381 |
+
|
| 382 |
+

|
| 383 |
+
Input
|
| 384 |
+
Figure 12. Our method works with various sources of depth map. We show the depth estimates on the top-left of novel views.
|
| 385 |
+
|
| 386 |
+

|
| 387 |
+
|
| 388 |
+

|
| 389 |
+
MegaDepth
|
| 390 |
+
|
| 391 |
+

|
| 392 |
+
|
| 393 |
+

|
| 394 |
+
MiDas
|
| 395 |
+
|
| 396 |
+

|
| 397 |
+
|
| 398 |
+

|
| 399 |
+
Kinect
|
| 400 |
+
|
| 401 |
+
amine the efficacy of our color inpainting model following the same procedure described above. We present the performance in both entire image and occluded regions in Table 3. We observe that our proposed model yields better perceptual quality. Figure 11 shows an example.
|
| 402 |
+
|
| 403 |
+
# 4.5. Handling different depth maps
|
| 404 |
+
|
| 405 |
+
We test our method using depth maps generated using different approaches (Figure 12). We select images from SUNRGBD [50] dataset, and obtain the corresponding depth maps from three different sources: 1) depth estimated with MegaDepth [30], 2) MiDas [28] and 3) Kinect depth sensor. We present the resulting 3D photos in Figure 12. The results show that our method can handle depth maps from different sources reasonably well.
|
| 406 |
+
|
| 407 |
+
# 5. Conclusions
|
| 408 |
+
|
| 409 |
+
In this paper, we present an algorithm for creating compelling 3D photography from a single RGB-D image. Our core technical novelty lies in creating a completed layered depth image representation through context-aware color and depth inpainting. We validate our method on a wide variety of everyday scenes. Our experimental results show that our algorithm produces considerably fewer visual artifacts when compared with the state-of-the-art novel view synthesis techniques. We believe that such technology can bring 3D photography to a broader community, allowing people to easily capture scenes for immersive viewing.
|
| 410 |
+
|
| 411 |
+
Acknowledgement. This project is supported in part by NSF (#1755785) and MOST-108-2634-F-007-006 and MOST-109-2634-F-007-016.
|
| 412 |
+
|
| 413 |
+
# References
|
| 414 |
+
|
| 415 |
+
[1] Connelly Barnes, Eli Shechtman, Adam Finkelstein, and Dan B Goldman. Patchmatch: A randomized correspondence algorithm for structural image editing. In ACM Transactions on Graphics, volume 28, page 24, 2009. 2
|
| 416 |
+
[2] Chris Buehler, Michael Bosse, Leonard McMillan, Steven Gortler, and Michael Cohen. Unstructured lumigraph rendering. In Proceedings of the 28th annual conference on Computer graphics and interactive techniques, 2001. 2
|
| 417 |
+
[3] Weifeng Chen, Zhao Fu, Dawei Yang, and Jia Deng. Single-image depth perception in the wild. In NeurIPS, 2016. 3
|
| 418 |
+
[4] Inchang Choi, Orazio Gallo, Alejandro Troccoli, Min H Kim, and Jan Kautz. Extreme view synthesis. In ICCV, 2019. 2, 7, 8
|
| 419 |
+
[5] Soheil Darabi, Eli Shechtman, Connelly Barnes, Dan B Goldman, and Pradeep Sen. Image melding: Combining inconsistent images using patch-based synthesis. ACM Transactions on Graphics, 31(4):82-1, 2012. 2
|
| 420 |
+
[6] Helisa Dhamo, Nassir Navab, and Federico Tombari. Object-driven multi-layer scene decomposition from a single image. In ICCV, 2019. 2
|
| 421 |
+
[7] Helisa Dhamo, Keisuke Tateno, Iro Laina, Nassir Navab, and Federico Tombari. Peeking behind objects: Layered depth prediction from a single image. In ECCV, 2018. 2
|
| 422 |
+
[8] Alexei A Efros and William T Freeman. Image quilting for texture synthesis and transfer. In Proceedings of the 28th annual conference on Computer graphics and interactive techniques, pages 341-346, 2001. 2
|
| 423 |
+
[9] Alexei A Efros and Thomas K Leung. Texture synthesis by non-parametric sampling. In ICCV, 1999. 2
|
| 424 |
+
[10] David Eigen and Rob Fergus. Predicting depth, surface normals and semantic labels with a common multi-scale convolutional architecture. In ICCV, 2015. 3
|
| 425 |
+
[11] SM Ali Eslami, Danilo Jimenez Rezende, Frederic Besse, Fabio Viola, Ari S Morcos, Marta Garnelo, Avraham Ruderman, Andrei A Rusu, Ivo Danihelka, Karol Gregor, et al. Neural scene representation and rendering. Science, 360(6394):1204-1210, 2018. 2
|
| 426 |
+
[12] John Flynn, Ivan Neulander, James Philbin, and Noah Snavely. Deepstereo: Learning to predict new views from the world's imagery. In CVPR, 2016. 1, 2
|
| 427 |
+
[13] Clément Godard, Oisin Mac Aodha, Michael Firman, and Gabriel J Brostow. Digging into self-supervised monocular depth estimation. In ICCV, pages 3828-3838, 2019. 2
|
| 428 |
+
[14] Clément Godard, Oisin Mac Aodha, and Gabriel J Brostow. Unsupervised monocular depth estimation with left-right consistency. In CVPR, 2017. 3
|
| 429 |
+
[15] Steven J Gortler, Radek Grzesczuk, Richard Szeliski, and Michael F Cohen. The lumigraph. In SIGGRAPH, volume 96, pages 43-54, 1996. 2
|
| 430 |
+
[16] Kaiming He and Jian Sun. Image completion approaches using the statistics of similar patches. TPAMI, 36(12):2423-2435, 2014. 2
|
| 431 |
+
[17] Peter Hedman, Suhib Alsisan, Richard Szeliski, and Johannes Kopf. Casual 3d photography. ACM Transactions on Graphics, 36(6):234, 2017. 1, 2
|
| 432 |
+
[18] Peter Hedman and Johannes Kopf. Instant 3d photography. ACM Transactions on Graphics, 37(4):101, 2018. 1, 2
|
| 433 |
+
|
| 434 |
+
[19] Peter Hedman, Julien Philip, True Price, Jan-Michael Frahm, George Drettakis, and Gabriel Brostow. Deep blending for free-viewpoint image-based rendering. ACM Transactions on Graphics, page 257, 2018. 1, 2
|
| 435 |
+
[20] Jia-Bin Huang, Sing Bing Kang, Narendra Ahuja, and Johannes Kopf. Image completion using planar structure guidance. ACM Transactions on graphics, 33(4):129, 2014. 2
|
| 436 |
+
[21] Satoshi Iizuka, Edgar Simo-Serra, and Hiroshi Ishikawa. Globally and locally consistent image completion. TOG, 36(4):107, 2017. 2
|
| 437 |
+
[22] Sunghoon Im, Hae-Gon Jeon, Steve Lin, and In So Kweon. Dpsnet: End-to-end deep plane sweep stereo. 2019. 6, 8
|
| 438 |
+
[23] Nima Khademi Kalantari, Ting-Chun Wang, and Ravi Ramamoorthi. Learning-based view synthesis for light field cameras. ACM Transactions on Graphics, 35(6):193, 2016. 2
|
| 439 |
+
[24] Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. In ICLR, 2015. 6
|
| 440 |
+
[25] Nikos Komodakis and Georgios Tziritas. Image completion using efficient belief propagation via priority scheduling and dynamic pruning. TIP, 16(11):2649-2661, 2007. 2
|
| 441 |
+
[26] Johannes Kopf, Fabian Langguth, Daniel Scharstein, Richard Szeliski, and Michael Goesele. Image-based rendering in the gradient domain. ACM Transactions on Graphics, 32(6):199, 2013. 1, 2
|
| 442 |
+
[27] Vivek Kwatra, Arno Schödl, Irfan Essa, Greg Turk, and Aaron Bobick. Graphcut textures: image and video synthesis using graph cuts. ACM Transactions on Graphics, 22(3):277-286, 2003. 2
|
| 443 |
+
[28] Katrin Lasinger, René Ranftl, Konrad Schindler, and Vladlen Koltun. Towards robust monocular depth estimation: Mixing datasets for zero-shot cross-dataset transfer. arXiv preprint arXiv:1907.01341, 2019. 2, 3, 8
|
| 444 |
+
[29] Marc Levoy and Pat Hanrahan. Light field rendering. In Proceedings of the 23rd annual conference on Computer graphics and interactive techniques, pages 31-42, 1996. 2
|
| 445 |
+
[30] Zhengqi Li and Noah Snavely. Megadepth: Learning single-view depth prediction from internet photos. In CVPR, 2018, 2, 3, 5, 8
|
| 446 |
+
[31] Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dólar, and C Lawrence Zitnick. Microsoft coco: Common objects in context. In ECCV, 2014. 5
|
| 447 |
+
[32] Chen Liu, Jimei Yang, Duygu Ceylan, Ersin Yumer, and Yasutaka Furukawa. Planenet: Piece-wise planar reconstruction from a single rgb image. In CVPR, 2018. 2
|
| 448 |
+
[33] Guilin Liu, Fitsum A Reda, Kevin J Shih, Ting-Chun Wang, Andrew Tao, and Bryan Catanzaro. Image inpainting for irregular holes using partial convolutions. In ECCV, 2018. 2, 5, 6
|
| 449 |
+
[34] Miaomiao Liu, Xuming He, and Mathieu Salzmann. Geometry-aware deep network for single-image novel view synthesis. In CVPR, 2018. 2
|
| 450 |
+
[35] Wei Liu, Xiaogang Chen, Jie Yang, and Qiang Wu. Robust color guided depth map restoration. TIP, 26(1):315-327, 2017. 3
|
| 451 |
+
[36] Si Lu, Xiaofeng Ren, and Feng Liu. Depth enhancement via low-rank matrix completion. In CVPR, 2014. 3
|
| 452 |
+
|
| 453 |
+
[37] Ziyang Ma, Kaiming He, Yichen Wei, Jian Sun, and Enhua Wu. Constant time weighted median filtering for stereo matching and beyond. Proceedings of the 2013 IEEE International Conference on Computer Vision, pages 49-56, 2013. 3
|
| 454 |
+
[38] Leonard McMillan and Gary Bishop. Plenoptic modeling: An image-based rendering system. In Proceedings of the 22nd annual conference on Computer graphics and interactive techniques, pages 39-46. ACM, 1995. 6
|
| 455 |
+
[39] Ben Mildenhall, Pratul P. Srinivasan, Rodrigo Ortiz-Cayon, Nima Khademi Kalantari, Ravi Ramamoorthi, Ren Ng, and Abhishek Kar. Local light field fusion: Practical view synthesis with prescriptive sampling guidelines. ACM Transactions on Graphics (TOG), 38(4), July 2019. 1, 2, 7, 8
|
| 456 |
+
[40] Tai-Jiang Mu, Ju-Hong Wang, Song-Pei Du, and Shi-Min Hu. Stereoscopic image completion and depth recovery. The Visual Computer, 30(6-8):833-843, 2014. 3
|
| 457 |
+
[41] Kamyar Nazeri, Eric Ng, Tony Joseph, Faisal Qureshi, and Mehran Ebrahimi. Edgeconnect: Generative image inpainting with adversarial edge learning. arXiv preprint, 2019. 3, 5, 6
|
| 458 |
+
[42] Simon Niklaus, Long Mai, Jimei Yang, and Feng Liu. 3d ken burns effect from a single image. ACM Transactions on Graphics (TOG), 38(6), Nov. 2019. 2, 3
|
| 459 |
+
[43] Eunbyung Park, Jimei Yang, Ersin Yumer, Duygu Ceylan, and Alexander C Berg. Transformation-grounded image generation network for novel 3d view synthesis. In CVPR, 2017. 2
|
| 460 |
+
[44] Deepak Pathak, Philipp Krahenbuhl, Jeff Donahue, Trevor Darrell, and Alexei A Efros. Context encoders: Feature learning by inpainting. In CVPR, 2016. 2
|
| 461 |
+
[45] Eric Penner and Li Zhang. Soft 3d reconstruction for view synthesis. ACM Transactions on Graphics (TOG), 36(6):235, 2017. 1, 2
|
| 462 |
+
[46] Yael Pritch, Eitam Kav-Venaki, and Shmuel Peleg. Shift-map image editing. In CVPR, pages 151-158. IEEE, 2009. 2
|
| 463 |
+
[47] Yurui Ren, Xiaoming Yu, Ruonan Zhang, Thomas H Li, Shan Liu, and Ge Li. Structureflow: Image inpainting via structure-aware appearance flow. In ICCV, 2019. 3
|
| 464 |
+
[48] Jonathan Shade, Steven Gortler, Li-wei He, and Richard Szeliski. Layered depth images. In Proceedings of the 25th annual conference on Computer graphics and interactive techniques, pages 231–242. ACM, 1998. 2, 3
|
| 465 |
+
[49] Sudipta N Sinha, Johannes Kopf, Michael Goesele, Daniel Scharstein, and Richard Szeliski. Image-based rendering for scenes with reflections. ACM Transactions on Graphics, 31(4):100-1, 2012. 2
|
| 466 |
+
[50] Shuran Song, Samuel P Lichtenberg, and Jianxiong Xiao. Sun rgb-d: A rgb-d scene understanding benchmark suite. In CVPR, 2015. 8
|
| 467 |
+
[51] Yuhang Song, Chao Yang, Zhe Lin, Xiaofeng Liu, Qin Huang, Hao Li, and C-C Jay Kuo. Contextual-based image inpainting: Infer, match, and translate. arXiv preprint arXiv:1711.08590, 2017. 2
|
| 468 |
+
[52] Pratul P Srinivasan, Richard Tucker, Jonathan T Barron, Ravi Ramamoorthi, Ren Ng, and Noah Snively. Pushing the boundaries of view extrapolation with multiplane images. In CVPR, 2019. 1, 2, 7, 8
|
| 469 |
+
|
| 470 |
+
[53] Pratul P Srinivasan, Tongzhou Wang, Ashwin Sreelal, Ravi Ramamoorthi, and Ren Ng. Learning to synthesize a 4d rgbd light field from a single image. In ICCV, 2017. 2
|
| 471 |
+
[54] Shao-Hua Sun, Minyoung Huh, Yuan-Hong Liao, Ning Zhang, and Joseph J Lim. Multi-view to novel view: Synthesizing novel views with self-learned confidence. In ECCV, 2018. 2
|
| 472 |
+
[55] Lech Świrski, Christian Richardt, and Neil A Dodgson. Layered photo pop-up. In ACM SIGGRAPH 2011 Posters, 2011. 2
|
| 473 |
+
[56] Shubham Tulsiani, Richard Tucker, and Noah Snavely. Layer-structured 3d scene inference via view synthesis. In ECCV, 2018. 2
|
| 474 |
+
[57] Liang Wang, Hailin Jin, Ruigang Yang, and Minglun Gong. Stereoscopic inpainting: Joint color and depth completion from stereo images. In CVPR, 2008. 3
|
| 475 |
+
[58] Yonatan Wexler, Eli Shechtman, and Michal Irani. Spacetime completion of video. TPAMI, (3):463-476, 2007. 2
|
| 476 |
+
[59] Thomas Whelan, Michael Goesele, Steven J Lovegrove, Julian Straub, Simon Green, Richard Szeliski, Steven Butterfield, Shobhit Verma, and Richard Newcombe. Reconstructing scenes with mirror and glass surfaces. ACM Transactions on Graphics, 37(4):102, 2018. 1, 2
|
| 477 |
+
[60] Olivia Wiles, Georgia Gkioxari, Richard Szeliski, and Justin Johnson. Synsin: End-to-end view synthesis from a single image. In CVPR, 2020. 2
|
| 478 |
+
[61] Junyuan Xie, Ross Girshick, and Ali Farhadi. Deep3d: Fully automatic 2d-to-3d video conversion with deep convolutional neural networks. In ECCV, 2016. 2
|
| 479 |
+
[62] Wei Xiong, Zhe Lin, Jimei Yang, Xin Lu, Connelly Barnes, and Jiebo Luo. Foreground-aware image inpainting. 2019. 3, 5
|
| 480 |
+
[63] Zexiang Xu, Sai Bi, Kalyan Sunkavalli, Sunil Hadap, Hao Su, and Ravi Ramamoorthi. Deep view synthesis from sparse photometric images. ACM Transactions on Graphics (TOG), 38(4):76, 2019. 2
|
| 481 |
+
[64] Zhaoyi Yan, Xiaoming Li, Mu Li, Wangmeng Zuo, and Shiguang Shan. Shift-net: Image inpainting via deep feature rearrangement. In ECCV, September 2018. 2
|
| 482 |
+
[65] Chao Yang, Xin Lu, Zhe Lin, Eli Shechtman, Oliver Wang, and Hao Li. High-resolution image inpainting using multiscale neural patch synthesis. In CVPR, volume 1, page 3, 2017. 2
|
| 483 |
+
[66] Jiahui Yu, Zhe Lin, Jimei Yang, Xiaohui Shen, Xin Lu, and Thomas S Huang. Generative image inpainting with contextual attention. In CVPR, 2018. 2
|
| 484 |
+
[67] Jiahui Yu, Zhe Lin, Jimei Yang, Xiaohui Shen, Xin Lu, and Thomas S Huang. Free-form image inpainting with gated convolution. In ICCV, 2019. 2
|
| 485 |
+
[68] Qiong Zeng, Wenzheng Chen, Huan Wang, Changhe Tu, Daniel Cohen-Or, Dani Lischinski, and Baoquan Chen. Hallucinating stereoscopy from a single image. In Computer Graphics Forum, volume 34, pages 1–12, 2015. 2
|
| 486 |
+
[69] Richard Zhang, Phillip Isola, Alexei A Efros, Eli Shechtman, and Oliver Wang. The unreasonable effectiveness of deep features as a perceptual metric. In CVPR, 2018. 4, 8
|
| 487 |
+
[70] Yinda Zhang and Thomas Funkhouser. Deep depth completion of a single rgb-d image. In CVPR, 2018. 3
|
| 488 |
+
|
| 489 |
+
[71] Tinghui Zhou, Matthew Brown, Noah Snavely, and David G Lowe. Unsupervised learning of depth and ego-motion from video. In CVPR, volume 2, page 7, 2017. 3
|
| 490 |
+
[72] Tinghui Zhou, Richard Tucker, John Flynn, Graham Fyffe, and Noah Snavely. Stereo magnification: Learning view synthesis using multiplane images. ACM Transactions on Graphics, 2018. 1, 2, 5, 7, 8
|
| 491 |
+
[73] Tinghui Zhou, Shubham Tulsiani, Weilun Sun, Jitendra Malik, and Alexei A Efros. View synthesis by appearance flow. In ECCV, 2016. 2
|
3dphotographyusingcontextawarelayereddepthinpainting/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d8dd7fb42b3befc1f783567f56f871feb51d35640d7017b7af0a1986e90ac30f
|
| 3 |
+
size 818330
|
3dphotographyusingcontextawarelayereddepthinpainting/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b23e2c644a4a7d268b9a59bc9cceb1ba5f16ffca66efa84e411da97ea6e40f8f
|
| 3 |
+
size 521494
|
3dregnetadeepneuralnetworkfor3dpointregistration/a5f4920e-2130-4def-a31a-c357b9131bbf_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9fce90a25e16496cfe0d1d2abf75c36d949fb8c3a17e2aba8cec8884b5001314
|
| 3 |
+
size 88756
|
3dregnetadeepneuralnetworkfor3dpointregistration/a5f4920e-2130-4def-a31a-c357b9131bbf_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:43f06eb313b8993eba5564ef96b2c93e1c68d7f081fcc726a0410c9d29348495
|
| 3 |
+
size 113174
|
3dregnetadeepneuralnetworkfor3dpointregistration/a5f4920e-2130-4def-a31a-c357b9131bbf_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1c745c2ed48512334433eff3cdca5d9420e9a2327017e9b879ed4cb0db14f458
|
| 3 |
+
size 1629132
|
3dregnetadeepneuralnetworkfor3dpointregistration/full.md
ADDED
|
@@ -0,0 +1,394 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# 3DRegNet: A Deep Neural Network for 3D Point Registration
|
| 2 |
+
|
| 3 |
+
G. Dias Pais<sup>1</sup>, Srikumar Ramalingam<sup>2</sup>, Venu Madhav Govindu<sup>3</sup>, Jacinto C. Nascimento<sup>1</sup>, Rama Chellappa<sup>4</sup>, and Pedro Miraldo<sup>1</sup>
|
| 4 |
+
|
| 5 |
+
$^{1}$ Instituto Superior Técnico, Lisboa $^{2}$ Google Research, NY $^{3}$ Indian Institute of Science, Bengaluru $^{4}$ University of Maryland, College Park
|
| 6 |
+
|
| 7 |
+
# Abstract
|
| 8 |
+
|
| 9 |
+
We present 3DRegNet, a novel deep learning architecture for the registration of 3D scans. Given a set of 3D point correspondences, we build a deep neural network to address the following two challenges: (i) classification of the point correspondences into inliers/outliers, and (ii) regression of the motion parameters that align the scans into a common reference frame. With regard to regression, we present two alternative approaches: (i) a Deep Neural Network (DNN) registration and (ii) a Procrustes approach using SVD to estimate the transformation. Our correspondence-based approach achieves a higher speedup compared to competing baselines. We further propose the use of a refinement network, which consists of a smaller 3DRegNet as a refinement to improve the accuracy of the registration. Extensive experiments on two challenging datasets demonstrate that we outperform other methods and achieve state-of-the-art results. The code is available at https://github.com/3DVisionISR/3DRegNet.
|
| 10 |
+
|
| 11 |
+
# 1. Introduction
|
| 12 |
+
|
| 13 |
+
We address the problem of 3D registration, which is one of the classical and fundamental problems in geometrical computer vision due to its wide variety of vision, robotics, and medical applications. In 3D registration, the 6 Degrees of Freedom (DoF) motion parameters between two scans are computed given noisy (outliers) point correspondences. The standard approach is to use minimal solvers that employ three-point correspondences (see [48, 39]) in a RANSAC [17] framework, followed by refinement techniques such as the Iterative Closest Point (ICP) [6]. In this paper, we investigate if the registration problem can be solved using a deep neural methodology. Specifically, we study if deep learning methods can bring any complementary advantages over classical registration methods. In particular, we wish to achieve speedup without compromises.
|
| 14 |
+
|
| 15 |
+
3DRegNet
|
| 16 |
+

|
| 17 |
+
(a) Inliers/outliers classification using the proposed 3DRegNet vs. a RANSAC approach. Green and red colors indicate the inliers and outliers, respectively.
|
| 18 |
+
|
| 19 |
+

|
| 20 |
+
RANSAC
|
| 21 |
+
|
| 22 |
+
3DRegNet
|
| 23 |
+

|
| 24 |
+
(b) Results of the estimation of the transformation that aligns two point clouds, 3DRegNet vs. the current state-of-the-art Fast Global Registration method (FGR) [65].
|
| 25 |
+
|
| 26 |
+
FGR
|
| 27 |
+

|
| 28 |
+
Figure 1: Given a set of 3D point correspondences from two scans with outliers, our proposed network 3DRegNet simultaneously classifies the point correspondences into inliers and outliers (see (a)), and also computes the transformation (rotation, translation) for the alignment of the scans (see (b)). 3DRegNet is significantly faster and outperforms other standard geometric methods.
|
| 29 |
+
|
| 30 |
+
ing the registration accuracy in the presence of outliers. In other words, the challenge is not in pose given point correspondences, but how can efficiently handle the outliers. Figure 1 illustrates the main goals of this paper. Figure 1(a) depicts the classification of noisy point correspondences into inliers and outliers using 3DRegNet (left) and RANSAC (right) for aligning two scans. Figure 1(b) shows the estimation of the transformation that aligns two point clouds using the proposed 3DRegNet (left) and current state-of-the-art FGR [65] (right).
|
| 31 |
+
|
| 32 |
+
In Fig. 2(a), we show our proposed architecture with two sub-blocks: classification and registration. The for
|
| 33 |
+
|
| 34 |
+

|
| 35 |
+
(a) Depiction of the 3DRegNet with DNNs for Registration.
|
| 36 |
+
|
| 37 |
+

|
| 38 |
+
(b) Representation of the 3DRegNet with Procrustes.
|
| 39 |
+
|
| 40 |
+

|
| 41 |
+
(c) Classification Block
|
| 42 |
+
(d) Registration Block with DNNs.
|
| 43 |
+
Figure 2: Two proposed architectures. (a) shows our first proposal with the classification and the registration blocks. (b) shows our second proposal with the same classification block as in the first one, but with a different registration block based on the differential Procrustes method. (c) classification block using C ResNets, which receives a set of point correspondences as input and outputs weights classifying them as inliers/outliers. (d) registration block (used in the architecture shown in (a)) that is obtained from the features of classification block and where its parameters are obtained through a DNN.
|
| 44 |
+
|
| 45 |
+

|
| 46 |
+
|
| 47 |
+
mer takes a set of noisy point correspondences between two scans and produces weight (confidence) parameters that indicate whether a given point correspondence is an inlier or an outlier. The latter directly produces the 6 DoF motion parameters for the alignment of two 3D scans. Our main contributions are as follows. We present a novel deep neural network architecture for solving the problem of 3D scan registration, with the possibility of a refinement network that can fine-tune the results. While achieving a significant speedup, our method achieves state-of-the-art registration performance.
|
| 48 |
+
|
| 49 |
+
# 2. Related Work
|
| 50 |
+
|
| 51 |
+
The ICP is widely considered as the gold standard approach to solve point cloud registration [6, 44]. However since ICP often gets stuck in local minima, other approaches have proposed extensions or generalizations that achieve both efficiency and robustness, e.g., [49, 40, 41, 58, 20, 31, 43, 29]. The 3D registration can also be viewed as a non-rigid problem motivating several works [67, 5, 51, 34]. A survey of rigid and non-rigid registration of 3D point clouds is available in [52]. An optimal least-squares solution can be obtained using methods such as [53, 49, 40, 38, 24, 57, 65, 7, 36]. Many of these methods require either a good initialization or identification of inliers using RANSAC. Subsequently, the optimal pose is estimated using only the selected inliers. In contrast to the above strategies, we focus on jointly solving (i) the inlier correspondences and (ii) the estimation of the transformation parameters without requiring an initialization. We propose a unified deep learning framework to address both challenges mentioned above.
|
| 52 |
+
|
| 53 |
+
Deep learning has been used to solve 3D registration problems in diverse contexts [14, 15, 23]. PointNet is a Deep Neural Network (DNN) that produces classification and segmentation results for unordered point clouds [46]. It strives to achieve results that are invariant to the order of points, rotations, and translations. To achieve invariance, PointNet uses several Multi-Layer Perceptrons (MLP) individually on different points, and then use a symmetric function on top of the outputs from the MLPs. PointNetLK builds on PointNet and proposes a DNN loop scheme to compute the 3D point cloud alignment [2]. In [54], authors derive an alternative approach to ICP, i.e., alternating between finding the closest points and computing the 3D registration. The proposed method focuses on finding the closest points at each step; the registration is computed with Procrustes. [32] proposes a network that initially generates correspondences based on learned matched probabilities and then creates an aligned point cloud. In [56, 50, 25, 55], other methods are proposed for object detection and pose estimation on point clouds with 3D bounding boxes. In contrast to these methods, our registration is obtained from pre-computed 3D point matches, such as [47, 61], instead of using the original point clouds and thereby achieving considerable speedup.
|
| 54 |
+
|
| 55 |
+
A well-known approach is to use point feature histograms as features for describing a 3D point [47]. The matching of 3D points can also be achieved by extracting features using convolutional neural networks [61, 12, 59, 15, 13, 19]. Some methods directly extract 3D features from the point clouds that are invariant to the 3D environment (spherical CNNs) [10, 16]. A deep network has been designed recently for computing the pose for direct image
|
| 56 |
+
|
| 57 |
+
to image registration [21]. Using graph convolutional networks and cycle consistency losses, one can train an image matching algorithm in an unsupervised manner [45].
|
| 58 |
+
|
| 59 |
+
In [60], a deep learning method for classifying 2D point correspondences into inliers/outliers is proposed. The regression of the Essential Matrix is computed separately using eigendecomposition and the inlier correspondences. The input of the network is only pixel coordinates instead of original images allowing for faster inference. The method was improved in [62], by proposing hierarchically extracted and aggregated local correspondences. The method is also insensitive to the order of correspondences. In [11], an eigendecomposition-free approach was introduced to train a deep network whose loss depends on the eigenvector corresponding to a zero eigenvalue of a matrix predicted by the network. This was also applied to 2D outlier removal. In [33], a DNN classifier was trained on a general match representation based on putative match through exploiting the consensus of local neighborhood structures and a nearest neighbor strategy. In contrast with the methods mentioned above, our technique aims at getting an end-to-end solution to the registration and outlier/inlier classification from matches of 3D point correspondences.
|
| 60 |
+
|
| 61 |
+
For 3D reconstruction using a large collection of scans, rotation averaging can be used to improve the pairwise relative pose estimates using robust methods [8]. Recently, it was shown that it would be possible to utilize deep neural networks to compute the weights for different pairwise relative pose estimates [26]. The work in [64] focuses on learning 3D match of features in three views. Our paper focuses on the problem of pairwise registration of 3D scans.
|
| 62 |
+
|
| 63 |
+
# 3. Problem Statement
|
| 64 |
+
|
| 65 |
+
Given a set of $N$ 3D point correspondences $\{(\mathbf{p}_i,\mathbf{q}_i)\}_{i = 1}^N$ , where $\mathbf{p}_i\in \mathbb{R}^3$ , $\mathbf{q}_i\in \mathbb{R}^3$ are the 3D points in the first and second scan respectively, our goal is to compute the transformation parameters (rotation matrix $\mathbf{R}\in \mathcal{SO}(3)$ and translation vector $\mathbf{t}\in \mathbb{R}^3$ ) as follows
|
| 66 |
+
|
| 67 |
+
$$
|
| 68 |
+
\mathbf {R} ^ {*}, \mathbf {t} ^ {*} = \underset {\mathbf {R} \in \mathcal {S O} (3), \mathbf {t} \in \mathbb {R} ^ {3}} {\operatorname {a r g m i n}} \sum_ {n = 1} ^ {N} \rho (\mathbf {q} _ {n}, \mathbf {R p} _ {n} + \mathbf {t}), \tag {1}
|
| 69 |
+
$$
|
| 70 |
+
|
| 71 |
+
where $\rho (\mathbf{a},\mathbf{b})$ is some distance metric. The problem addressed in this work is shown in Fig. 1. The input consists of $N$ point correspondences, and the output consists of $N + M + 3$ variables. Specifically, the first $N$ output variables form a weight vector $W\coloneqq \{w_{i}\}_{i = 1}^{N}$ , where $w_{i}\in [0,1)$ represents the confidence that the $i$ -th correspondence pair $(\mathbf{p}_i,\mathbf{q}_i)$ is an inlier. By comparing $w_{i}$ with a threshold $\mathcal{T}$ , i.e., $w_{i}\geq \mathcal{T}$ we can classify all the input correspondences into inliers/outiers. The next $M$ output variables represent the rotation parameters, i.e., $(v_{1},\ldots ,v_{M})$ . The remaining three parameters $(t_1,t_2,t_3)$
|
| 72 |
+
|
| 73 |
+
denote the translation. Although a 3D rotation has exactly 3 degrees of freedom, there are different possible parameterizations. As shown in [66], choosing the correct parameterization for the rotation is essential for the overall performance of these approaches. Previous methods use over-parameterization for the rotation (e.g., PoseNet [27] uses four parameter-quaternions for representing the rotation, while deep PnP [11] uses nine parameters). We study the different parameterizations of the rotation and evaluate their performance.
|
| 74 |
+
|
| 75 |
+
# 4. 3DRegNet
|
| 76 |
+
|
| 77 |
+
The proposed 3DRegNet architecture is shown in Fig. 2 with two blocks for classification and registration. We have two possible approaches for the registration block, either using DNNs or differentiable Procrustes. This choice does not affect the loss functions presented in Sec. 4.1.
|
| 78 |
+
|
| 79 |
+
Classification: The classification block (see the respective block in Fig. 2(c)) follows the ideas of previous works [46, 60, 11, 62]. The input is a 6-tuples set of 3D point correspondences given by $\{(\mathbf{p}_i,\mathbf{q}_i)\}_{i = 1}^N$ between the two scans.
|
| 80 |
+
|
| 81 |
+
Each 3D point correspondence is processed by a fully connected layer with 128 ReLU activation functions. There is a weight sharing for each of the individual $N$ point correspondences, and the output is of dimension $N \times 128$ , where we generate 128 dimensional features from every point correspondence. The $N \times 128$ output is then passed through $C$ deep ResNets [22], with weight-shared fully connected layers instead of convolutional layers. At the end, we use another fully connected layer with ReLU $(\mathrm{ReLU}(x) = \max(0, x))$ followed by $\tanh(\tanh(x) = \frac{e^x - e^{-x}}{e^x + e^{-x}} \in (-1, 1))$ units to produce the weights in the range $w_i \in [0, 1)$ . The number $C$ of deep ResNets depends on the complexity of the transformation to be estimated as is discussed in Sec. 5.
|
| 82 |
+
|
| 83 |
+
Registration with DNNs: The input to this block are the features extracted from the point correspondences. As shown in Fig. 2(d), we use pooling to extract meaningful features of dimensions $128 \times 1$ from each layer of the classification block. We extract features at $C + 1$ stages of the classification, i.e., the first one is extracted before the first ResNet and the last one is extracted after the $C$ -th ResNet. Based on our experiments, max-pooling performed the best in comparison with other choices such as average pooling. After the pooling is completed, we apply context normalization, as introduced in [60], and concatenate the $C + 1$ feature maps (see Figs. 2(a) and 2(d)). This process normalizes the features and it helps to extract the necessary and fixed number of features to obtain the transformation at the end of the registration block (that should be independent of $N$ ). The features from the context normalization is of size $(C + 1) \times 128$ , which is then passed on to a con
|
| 84 |
+
|
| 85 |
+
volitional layer, with 8 channels. Each filter passes a 3-by-3 patch with a stride of 2 for the column and of 1 for the row. The output of the convolution is then injected in two fully connected layers with 256 filters each, with ReLU between the layers, that generate the output of $M + 3$ variables: $\mathbf{v} = (v_{1},\dots,v_{M})$ and $\mathbf{t} = (t_1,t_2,t_3)$ .
|
| 86 |
+
|
| 87 |
+
Registration with Differentiable Procrustes: In contrast to the previous block, we present another alternative to perform the registration. Now, we obtain the desired transformation through the point correspondences (see Fig. 2(b)). We filter out the outliers and compute the centroid of the inliers, using this as the origin. Since the centroids of the point clouds are now at the origin, we only need to obtain the rotation between them. Note that the outlier filtering and the shift in the centroids can be seen as intermediate layers, thereby allowing end-to-end training for both classification and pose computation. This rotation is computed from the SVD of the matrix $\mathbf{M} = \mathbf{U}\boldsymbol{\Sigma}\mathbf{V}^{\mathrm{T}}$ [3], where $\mathbf{M} \in \mathbb{R}^{3 \times 3}$ is as follows:
|
| 88 |
+
|
| 89 |
+
$$
|
| 90 |
+
\mathbf {M} = \sum_ {i \in \mathcal {I}} w _ {i} \mathbf {p} _ {i} \mathbf {q} _ {i} ^ {T}, \tag {2}
|
| 91 |
+
$$
|
| 92 |
+
|
| 93 |
+
where $\mathcal{I}$ represents the set of inliers obtained from the classification block. The rotation is obtained by
|
| 94 |
+
|
| 95 |
+
$$
|
| 96 |
+
\mathbf {R} = \mathbf {U} \operatorname {d i a g} (1, 1, \det (\mathbf {U V} ^ {T})) \mathbf {V} ^ {T}. \tag {3}
|
| 97 |
+
$$
|
| 98 |
+
|
| 99 |
+
The translation parameters are given by
|
| 100 |
+
|
| 101 |
+
$$
|
| 102 |
+
\mathbf {t} = \frac {1}{N _ {\mathcal {I}}} \left(\sum_ {i \in \mathcal {I}} \mathbf {p} _ {i} - \mathbf {R} \sum_ {i \in \mathcal {I}} \mathbf {q} _ {i}\right), \tag {4}
|
| 103 |
+
$$
|
| 104 |
+
|
| 105 |
+
where $N_{\mathcal{I}}$ and $\mathcal{I}$ are the number of inliers and the inlier set, respectively.
|
| 106 |
+
|
| 107 |
+
# 4.1. Loss Functions
|
| 108 |
+
|
| 109 |
+
Our overall loss function has two individual loss terms, namely classification and registration losses from the two blocks of the network.
|
| 110 |
+
|
| 111 |
+
Classification Loss: The classification loss penalizes incorrect correspondences using cross-entropy:
|
| 112 |
+
|
| 113 |
+
$$
|
| 114 |
+
\mathcal {L} _ {c} ^ {k} = \frac {1}{N} \sum_ {i = 1} ^ {N} \gamma_ {i} ^ {k} H \left(y _ {i} ^ {k}, \sigma \left(o _ {i} ^ {k}\right)\right), \tag {5}
|
| 115 |
+
$$
|
| 116 |
+
|
| 117 |
+
where $o_i^k$ are the network outputs before passing them through ReLU and tanh for computing the weights $w_{i}$ . $\sigma$ denotes the sigmoid activation function. Note that the motion between pairs of scans are different, and the index $k$ is used to denote the associated training pair of scans. $H(.,.)$ is the cross-entropy function, and $y_{i}^{k}$ (equals to one or zero) is the ground-truth, which indicates whether the $i$ -th point correspondence is an inlier or outlier. The term $\mathcal{L}_c^k$ is the classification loss for the 3D point correspondences of a particular scan-pair with an index $k$ . The $\gamma_{i}^{k}$ balances the
|
| 118 |
+
|
| 119 |
+
classification loss by the number of examples for each class in the associated scan pair $k$ .
|
| 120 |
+
|
| 121 |
+
Registration Loss: The registration loss penalizes misaligned points in the point cloud using the distance between the 3D points in the second scan $\mathbf{q}_i$ and the transformed points from the first 3D scan $\mathbf{p}_i$ , for $i = \{1, \dots, N\}$ . The loss function becomes
|
| 122 |
+
|
| 123 |
+
$$
|
| 124 |
+
\mathcal {L} _ {r} ^ {k} = \frac {1}{N} \sum_ {i = 1} ^ {N} \rho \left(\mathbf {q} _ {i} ^ {k}, \mathbf {R} ^ {k} \mathbf {p} _ {i} ^ {k} + \mathbf {t} ^ {k}\right), \tag {6}
|
| 125 |
+
$$
|
| 126 |
+
|
| 127 |
+
where $\rho (.,.)$ is the distance metric function. For a given scan pair $k$ , the relative motion parameters obtained from the registration block are given by $\mathbf{R}^k$ and $\mathbf{t}^k$ . We considered and evaluated distance metrics: $L_{1}$ , weighted least squares, $L_{2}$ , and Geman-McClure [18] in Sec. 7.
|
| 128 |
+
|
| 129 |
+
Total Loss: The individual loss functions are given below:
|
| 130 |
+
|
| 131 |
+
$$
|
| 132 |
+
\mathcal {L} _ {c} = \frac {1}{K} \sum_ {k = 1} ^ {K} \mathcal {L} _ {c} ^ {k} \quad \text {a n d} \quad \mathcal {L} _ {r} = \frac {1}{K} \sum_ {k = 1} ^ {K} \mathcal {L} _ {r} ^ {k}, \tag {7}
|
| 133 |
+
$$
|
| 134 |
+
|
| 135 |
+
where $K$ is the total number of scan pairs in the training set. The total training loss is the sum of both the classification and the registration loss terms:
|
| 136 |
+
|
| 137 |
+
$$
|
| 138 |
+
\mathcal {L} = \alpha \mathcal {L} _ {c} + \beta \mathcal {L} _ {r}, \tag {8}
|
| 139 |
+
$$
|
| 140 |
+
|
| 141 |
+
where the coefficients $\alpha$ and $\beta$ are hyperparameters that are manually set for classification and registration terms in the loss function.
|
| 142 |
+
|
| 143 |
+
# 5.3DRegNet Refinement
|
| 144 |
+
|
| 145 |
+
We describe our architecture consisting of two 3DRegNet where the second network provides a regression refinement (see Fig. 3(a)). A commonly adopted approach for 3D registration is to first consider a rough estimate for the transformation followed by a refinement strategy. Following this reasoning, we consider the possibility of using an additional 3DRegNet. The first 3DRegNet provides a rough estimate trained for larger rotation and translation parameters values. Subsequently, the second smaller network is used for refinement, estimating smaller transformations. This can also be seen as deep-supervision that is shown to be useful in many applications [30]. Figure 3(a) illustrates the proposed architecture.
|
| 146 |
+
|
| 147 |
+
Architecture: As shown in Fig. 3(a), we use two 3DReg-Nets, where the first one is used to obtain the coarse registration followed by the second one doing the refinement. Each 3DRegNet is characterized by the regression parameters $\{(\mathbf{R}^r,\mathbf{t}^r)\}$ and the classification weights $\{w_i^r\}_{i = 1}^N$ with $r = \{1,2\}$ . We note that the loss on the second network has to consider the cumulative regression of both
|
| 148 |
+
|
| 149 |
+

|
| 150 |
+
(a) Scheme for refinement using 3DRegNet.
|
| 151 |
+
|
| 152 |
+

|
| 153 |
+
(b) Before Refinement
|
| 154 |
+
|
| 155 |
+

|
| 156 |
+
(c) After Refinement
|
| 157 |
+
Figure 3: (a) shows the proposed architecture with two 3DRegNet blocks in sequence. (b),(c) show an improvement upon using an additional 3DRegnet to fine-tune or refine the registration from the first 3DRegNet.
|
| 158 |
+
|
| 159 |
+
3DRegNets. Hence, the original set of point correspondences $(\{\mathbf{p}_i,\mathbf{q}_i\})_{1 = 1}^N$ are transformed by the following cumulative translation and rotation
|
| 160 |
+
|
| 161 |
+
$$
|
| 162 |
+
\mathbf {R} = \mathbf {R} ^ {2} \mathbf {R} ^ {1} \text {a n d} \mathbf {t} = \mathbf {R} ^ {2} \mathbf {t} ^ {1} + \mathbf {t} ^ {2}. \tag {9}
|
| 163 |
+
$$
|
| 164 |
+
|
| 165 |
+
Notice that, in (9), the update of the transformation parameters $\mathbf{R}$ and $\mathbf{t}$ , depends on the estimates of both 3DRegNets. The point correspondence update at the refinement network becomes
|
| 166 |
+
|
| 167 |
+
$$
|
| 168 |
+
\left\{\left(\mathbf {p} _ {i} ^ {1}, \mathbf {q} _ {i} ^ {1}\right) \right\} = \left\{\left(w _ {i} ^ {1} \left(\mathbf {R} ^ {1} \mathbf {p} _ {i} + \mathbf {t} ^ {1}\right), w _ {i} ^ {1} \mathbf {q} _ {i}\right) \right\}, \tag {10}
|
| 169 |
+
$$
|
| 170 |
+
|
| 171 |
+
forcing the second network to obtain smaller transformations that corrects for any residual transformation following the first 3DRegNet block.
|
| 172 |
+
|
| 173 |
+
Loss Functions: The classification and registration losses are computed as in (5) and (6) at each step, then averaged by the total loss:
|
| 174 |
+
|
| 175 |
+
$$
|
| 176 |
+
\mathcal {L} _ {c} = \frac {1}{K} \sum_ {k = 1} ^ {K} \frac {1}{2} \sum_ {r = 1} ^ {2} \mathcal {L} _ {c} ^ {k, r} \text {a n d} \mathcal {L} _ {r} = \frac {1}{K} \sum_ {k = 1} ^ {K} \frac {1}{2} \sum_ {r = 1} ^ {2} \mathcal {L} _ {r} ^ {k, r}. \tag {11}
|
| 177 |
+
$$
|
| 178 |
+
|
| 179 |
+
We then apply (8) as before.
|
| 180 |
+
|
| 181 |
+
# 6. Datasets and 3DRegNet Training
|
| 182 |
+
|
| 183 |
+
Datasets: We use two datasets, the synthetic augmented ICL-NUIM Dataset [9] and the SUN3D [63] consisting of real images. The former consists of 4 scenes with a total of about 25000 different pairs of connected point clouds. The latter is composed of 13 randomly selected scenes,
|
| 184 |
+
|
| 185 |
+
with a total of around 3700 different connected pairs. Using FPFH [47], we extract about 3000 3D point correspondences for each pair of scans in both datasets. Based on the ground-truth transformations and the 3D distances between the transformed 3D points, correspondences are labeled as inliers/outliers using a predefined threshold (set $y_{n}^{k}$ to one or zero). The threshold is set such that the number of outliers is about $50\%$ of the total matches. We select $70\%$ of the pairs for training and $30\%$ for testing for the ICL-NUIM Dataset. With respect to the SUN3D Dataset, we select 10 scenes, for training and 3 scenes, completely unseen with respect to the training set, for testing.
|
| 186 |
+
|
| 187 |
+
Training: The proposed architecture is implemented in Tensorflow [1]. We used $C = 8$ for the first 3DRegNet and $C = 4$ for the refinement 3DRegNet<sup>1</sup>. The other values for the registration blocks are detailed in Sec. 4. The network was trained for 1000 epochs with 1092 steps for the ICL-NUIM dataset and for 1000 epochs with 200 steps for the SUN3D dataset. The learning rate was $10^{-4}$ , while using the Adam Optimizer [28]. A cross-validation strategy is used during training. We used a batch size of 16. The coefficients of the classification and registration terms are given by $\alpha = 0.5$ and $\beta = 10^{-3}$ . The network was trained using an INTEL i7-7600 and a NVIDIA GEFORCE GTX 1070. For a fair comparison to the classical methods, all run times were obtained using CPU, only.
|
| 188 |
+
|
| 189 |
+
Data Augmentation: To generalize for unseen rotations, we augment the training dataset by applying random rotations. Taking inspiration from [4, 37, 42], we propose the use of Curriculum Learning (CL) data augmentation. The idea is to start small [4], (i.e., easier tasks containing small values of rotation) and having the tasks ordered by increasing difficulty. The training only proceeds to harder tasks after the easier ones are completed. However, an interesting alternative of traditional CL was adopted. Let the magnitude of the augmented rotation to be applied in the training be denoted as $\theta$ , and an epoch such that $\tau \in [0,1]$ (normalized training steps). In CL, we should start small at the beginning of each epoch. However, this breaks the smoothness of $\theta$ values (since the maximum value for $\theta$ , i.e., $\theta_{\mathrm{Max}}$ has been reached at the end of the previous epoch). This can easily be tackled if we progressively increase the $\theta$ up to $\theta_{\mathrm{Max}}$ at $\tau = 0.5$ , decreasing $\theta$ afterwards.
|
| 190 |
+
|
| 191 |
+
# 7. Experimental Results
|
| 192 |
+
|
| 193 |
+
In this section, we start by defining the evaluation metrics used throughout the experiments. Then, we present some ablation studies considering: 1) the use of different distance metrics; 2) different parameterizations for the rotation; 3) the use of Procrustes vs. DNN for estimat-
|
| 194 |
+
|
| 195 |
+
<table><tr><td rowspan="2">Distance Function</td><td colspan="2">Rotation [deg]</td><td colspan="2">Translation [m]</td><td rowspan="2">Time [s]</td><td rowspan="2">Classification Accuracy</td></tr><tr><td>Mean</td><td>Median</td><td>Mean</td><td>Median</td></tr><tr><td>L2-norm</td><td>2.44</td><td>1.64</td><td>0.087</td><td>0.067</td><td>0.0295</td><td>0.95</td></tr><tr><td>L1-norm</td><td>1.37</td><td>0.90</td><td>0.054</td><td>0.042</td><td>0.0281</td><td>0.96</td></tr><tr><td>Weighted L2-norm</td><td>1.89</td><td>1.33</td><td>0.070</td><td>0.056</td><td>0.0294</td><td>0.95</td></tr><tr><td>Geman-McClure</td><td>2.45</td><td>1.59</td><td>0.089</td><td>0.068</td><td>0.0300</td><td>0.95</td></tr></table>
|
| 196 |
+
|
| 197 |
+
ing the transformation parameters; 4) the sensitivity to the number of point correspondences; 5) the use of Data-Augmentation in the training; and 6) the use of the refinement network. The ablation studies are performed on the ICL-NUIM dataset. We conclude the experiments with some comparison with previous methods and the application of our method in unseen scenes.
|
| 198 |
+
|
| 199 |
+
Evaluation Metrics: We defined the following metrics for accuracy. For rotation, we use
|
| 200 |
+
|
| 201 |
+
$$
|
| 202 |
+
\delta \left(\mathbf {R}, \mathbf {R} _ {\mathrm {G T}}\right) = \operatorname {a c o s} \left(\frac {\operatorname {t r a c e} \left(\mathbf {R} ^ {- 1} \mathbf {R} _ {\mathrm {G T}}\right) - 1}{2}\right), \tag {12}
|
| 203 |
+
$$
|
| 204 |
+
|
| 205 |
+
where $\mathbf{R}$ and $\mathbf{R}_{\mathrm{GT}}$ are the estimated and ground-truth rotation matrices, respectively. We refer to [35] for more details. For measuring the accuracy of translation, we use
|
| 206 |
+
|
| 207 |
+
$$
|
| 208 |
+
\delta \left(\mathbf {t}, \mathbf {t} _ {\mathrm {G T}}\right) = \left\| \mathbf {t} - \mathbf {t} _ {\mathrm {G T}} \right\|. \tag {13}
|
| 209 |
+
$$
|
| 210 |
+
|
| 211 |
+
For the classification accuracy, we used the standard classification error. The computed weights $w_{i} \in [0,1)$ will be rounded to 0 or 1 based on a threshold $(\mathcal{T} = 0.5)$ before measuring the classification error.
|
| 212 |
+
|
| 213 |
+
# 7.1. Ablation Studies
|
| 214 |
+
|
| 215 |
+
Distance Metrics: We start these experiments by evaluating the 3DRegNet training using different types of distance metrics in the regression loss function. Namely, we use: 1) the $L_{2}$ -norm; 2) $L_{1}$ -norm; 3) Weighted $L_{2}$ -norm with the weights obtained from the classification block; and 4) German-McClure distances. For all the pairwise correspondences in the testing phase, we compute the rotation and translation errors obtained by the 3DRegNet. The results of the classification are reported in Tab. 1, in which we use the minimal Lie algebra representation for the rotation.
|
| 216 |
+
|
| 217 |
+
As it can be seen from these results (see Tab 1), the $L_{1}$ -norm gives the best results in all the evaluation criteria. It is interesting to note that weighted $L_{2}$ -norm, despite using the weights from the classification block, did not perform as good as the $L_{1}$ -norm. This is possible since the registration block also utilizes the outputs from some of the intermediate layers of the classification block. Based on these results, the remaining evaluations are conducted using the $L_{1}$ -norm.
|
| 218 |
+
|
| 219 |
+
Parameterization of R: We study the following three parameterizations for the rotation: 1) minimal Lie algebra
|
| 220 |
+
|
| 221 |
+
Table 1: Evaluation of the different distance functions on the training of the proposed architecture.
|
| 222 |
+
|
| 223 |
+
<table><tr><td></td><td colspan="2">Rotation [deg]</td><td colspan="2">Translation [m]</td><td rowspan="2">Time [s]</td><td rowspan="2">Classification Accuracy</td></tr><tr><td>Representation</td><td>Mean</td><td>Median</td><td>Mean</td><td>Median</td></tr><tr><td>Lie Algebra</td><td>1.37</td><td>0.90</td><td>0.054</td><td>0.042</td><td>0.0281</td><td>0.96</td></tr><tr><td>Quaternions</td><td>1.55</td><td>1.11</td><td>0.067</td><td>0.054</td><td>0.0284</td><td>0.95</td></tr><tr><td>Linear</td><td>5.78</td><td>4.78</td><td>0.059</td><td>0.042</td><td>0.0275</td><td>0.95</td></tr><tr><td>Procrustes</td><td>1.65</td><td>1.52</td><td>0.235</td><td>0.233</td><td>0.0243</td><td>0.52</td></tr></table>
|
| 224 |
+
|
| 225 |
+
Table 2: Evaluation of different representations for the rotations.
|
| 226 |
+
|
| 227 |
+
<table><tr><td rowspan="2">Matches</td><td colspan="2">Rotation [deg]</td><td colspan="2">Translation [m]</td><td rowspan="2">Time [s]</td><td rowspan="2">Classification Accuracy</td></tr><tr><td>Mean</td><td>Median</td><td>Mean</td><td>Median</td></tr><tr><td>10%</td><td>2.40</td><td>1.76</td><td>0.089</td><td>0.073</td><td>0.0106</td><td>0.94</td></tr><tr><td>25%</td><td>1.76</td><td>1.22</td><td>0.068</td><td>0.054</td><td>0.0149</td><td>0.95</td></tr><tr><td>50%</td><td>1.51</td><td>1.01</td><td>0.060</td><td>0.047</td><td>0.0188</td><td>0.95</td></tr><tr><td>75%</td><td>1.41</td><td>0.92</td><td>0.056</td><td>0.044</td><td>0.0241</td><td>0.96</td></tr><tr><td>90%</td><td>1.38</td><td>0.90</td><td>0.055</td><td>0.043</td><td>0.0267</td><td>0.96</td></tr><tr><td>100%</td><td>1.37</td><td>0.90</td><td>0.054</td><td>0.042</td><td>0.0281</td><td>0.96</td></tr></table>
|
| 228 |
+
|
| 229 |
+
Table 3: Evaluation of different number of correspondences.
|
| 230 |
+
|
| 231 |
+
(three parameters); 2) quaternions (four parameters); and 3) linear matrix form (nine parameters). The results are shown in Tab. 2. We observe that the minimal parameterization using Lie algebra provides the best results. In the experimental results that follows, we use the three parameters Lie algebra representation. While Lie algebra performs better for the problem on hand, we cannot generalize this conclusion to other problems like human pose estimation, as shown in [66].
|
| 232 |
+
|
| 233 |
+
Regression with DNNs vs. Procrustes: We aim at evaluating the merits of using DNNs vs. Procrustes to get the 3D registration, as shown in Fig. 2(a) and Fig. 2(b). From Tab. 2, we conclude that the differentiable Procrustes method does not solve the problem as accurately as DNNs. The run time is lower than the DNNs with the Lie Algebra, but the difference is small and can be neglected. On the other hand, the classification accuracy degrades significantly. From now on, we use the DNNs for the regression.
|
| 234 |
+
|
| 235 |
+
Sensitivity to the number of correspondences: Instead of considering all the correspondences in each of the pairwise scans of the testing examples, we select a percentage of the total number of matches ranging from $10\%$ to $100\%$ (recall that the total number of correspondences per pair is around 3000). The results are shown in Tab. 3.
|
| 236 |
+
|
| 237 |
+
As expected, the accuracy of the regression degrades as the number of input correspondences decreases. The classification, however, is not affected. The inlier/outlier classifications should not depend on the number of input correspondences, while the increase of the number of inliers should lead to a better estimate.
|
| 238 |
+
|
| 239 |
+
Data Augmentation: Using the 3DRegNet trained in the previous sections, we select a pair of 3D scans from the training data and rotate the original point-clouds to increase the rotation angles between them. We vary the magnitude
|
| 240 |
+
|
| 241 |
+

|
| 242 |
+
Figure 4: Training with and without data augmentation. It is observed an improvement on the test results when perturbances are applied. The data augmentation regularizes the network for other rotations that were not included in the original dataset.
|
| 243 |
+
|
| 244 |
+

|
| 245 |
+
|
| 246 |
+
<table><tr><td></td><td colspan="2">Rotation [deg]</td><td colspan="2">Translation [m]</td><td rowspan="2">Time [s]</td><td rowspan="2">Classification Accuracy</td></tr><tr><td>Refinement</td><td>Mean</td><td>Median</td><td>Mean</td><td>Median</td></tr><tr><td>without</td><td>1.37</td><td>0.90</td><td>0.054</td><td>0.042</td><td>0.0281</td><td>0.96</td></tr><tr><td>with</td><td>1.19</td><td>0.89</td><td>0.053</td><td>0.044</td><td>0.0327</td><td>0.94</td></tr></table>
|
| 247 |
+
|
| 248 |
+
of this rotation $(\theta)$ from 0 to 50 degrees, and the results for the rotation error and accuracy in the testing are shown in Fig. 4 (green curve). Afterward, we train the network a second time, using the data augmentation strategy proposed in Sec. 6. At each step, the pair of examples is perturbed by a rotation with increasing steps of $2^{\circ}$ , setting the maximum value of $\theta = 50^{\circ}$ . We run the test as before, and the results are shown in Fig. 4 (blue curve).
|
| 249 |
+
|
| 250 |
+
From this experiment we can conclude that, by only training with the original dataset, we constrained to the rotations contained in the dataset. On the other hand, by performing a smooth regularization (CL data augmentation), we can overcome this drawback. Since the datasets at hand are sequences of small motions, there is no benefit on generalizing the results for the rotation parameters. If all the involved transformations are small, the network should be trained as such. We do not carry out data augmentation in the following experiments.
|
| 251 |
+
|
| 252 |
+
3DRegNet refinement: We consider the use of the extra 3DRegNet presented in Sec. 5 for regression refinement. This composition of two similar networks was developed to improve the accuracy of the results. From Tab. 4, we observe an overall improvement on the transformation estimation, without compromising the run time significantly. The classification accuracy decreases by $2\%$ , but does not influence the final regression. This improvement on the estimation can also be seen in Fig. 3, where the estimation using only one 3DRegNet (Fig. 3(b)) is still a bit far from the true alignment, in comparison to using the 3DRegNet with refinement, shown in Fig. 3(c), which is closer to the correct alignment. For the remainder of the paper, when we refer to 3DRegNet, we are using the refinement network.
|
| 253 |
+
|
| 254 |
+
Table 4: Evaluation of the use of 3DRegNet refinement.
|
| 255 |
+
|
| 256 |
+
<table><tr><td></td><td colspan="2">Rotation [deg]</td><td colspan="2">Translation [m]</td><td rowspan="2">Time [s]</td></tr><tr><td>Method</td><td>Mean</td><td>Median</td><td>Mean</td><td>Median</td></tr><tr><td>FGR</td><td>1.39</td><td>0.53</td><td>0.045</td><td>0.024</td><td>0.2669</td></tr><tr><td>ICP</td><td>3.78</td><td>0.43</td><td>0.121</td><td>0.023</td><td>0.1938</td></tr><tr><td>RANSAC</td><td>1.89</td><td>1.45</td><td>0.063</td><td>0.051</td><td>0.8441</td></tr><tr><td>3DRegNet</td><td>1.19</td><td>0.89</td><td>0.053</td><td>0.044</td><td>0.0327</td></tr><tr><td>FGR + ICP</td><td>1.01</td><td>0.38</td><td>0.038</td><td>0.021</td><td>0.3422</td></tr><tr><td>RANSAC + U</td><td>1.42</td><td>1.02</td><td>0.050</td><td>0.042</td><td>0.8441</td></tr><tr><td>3DRegNet + ICP</td><td>0.55</td><td>0.34</td><td>0.030</td><td>0.021</td><td>0.0691</td></tr><tr><td>3DRegNet + U</td><td>0.28</td><td>0.22</td><td>0.014</td><td>0.011</td><td>0.0327</td></tr></table>
|
| 257 |
+
|
| 258 |
+
(a) Baselines results on the ICL-NUIM Dataset.
|
| 259 |
+
|
| 260 |
+
<table><tr><td rowspan="2">Method</td><td colspan="2">Rotation [deg]</td><td colspan="2">Translation [m]</td><td rowspan="2">Time [s]</td></tr><tr><td>Mean</td><td>Median</td><td>Mean</td><td>Median</td></tr><tr><td>FGR</td><td>2.57</td><td>1.92</td><td>0.121</td><td>0.067</td><td>0.1623</td></tr><tr><td>ICP</td><td>3.18</td><td>1.50</td><td>0.146</td><td>0.079</td><td>0.0596</td></tr><tr><td>RANSAC</td><td>3.00</td><td>1.73</td><td>0.148</td><td>0.074</td><td>2.6156</td></tr><tr><td>3DRegNet</td><td>1.84</td><td>1.69</td><td>0.087</td><td>0.078</td><td>0.0398</td></tr><tr><td>FGR + ICP</td><td>1.49</td><td>1.10</td><td>0.070</td><td>0.046</td><td>0.1948</td></tr><tr><td>RANSAC + U</td><td>2.74</td><td>1.48</td><td>0.134</td><td>0.061</td><td>2.6157</td></tr><tr><td>3DRegNet + ICP</td><td>1.26</td><td>1.14</td><td>0.066</td><td>0.048</td><td>0.0852</td></tr><tr><td>3DRegNet + U</td><td>1.16</td><td>1.10</td><td>0.053</td><td>0.050</td><td>0.0398</td></tr></table>
|
| 261 |
+
|
| 262 |
+
(b) Results on unseen sequences (SUN3D Dataset).
|
| 263 |
+
|
| 264 |
+
Table 5: Comparison with the baselines: FGR [65]; RANSAC-based approaches [17, 48]; and ICP [6].
|
| 265 |
+
|
| 266 |
+
# 7.2. Baselines
|
| 267 |
+
|
| 268 |
+
We use three baselines. The Fast Global Registration [65] (FGR) geometric method, that aims to provide a global solution for some set of 3D correspondences. The second baseline is the classical RANSAC method [17]. The third baseline is ICP [6]. Note that we are comparing our technique against both correspondence-free (ICP) and correspondence-based methods (FGR, RANSAC). For this test, we use the ICL-NUIM dataset. In the attempt to ascertain what is the strategy that provides the best registration prior for the ICP, we applied two methods termed as FGR + ICP and 3DRegNet + ICP, where the initialization for ICP is done using the estimated transformations given by the FGR and the 3DRegNet, respectively. Also, for evaluating the quality of the classification, we take the inliers given by the 3DRegNet and RANSAC, and input these in a least square non-linear Umayama refinement technique presented in [53]. These methods are denoted as 3DRegNet + U and RANSAC + U, respectively. The results are shown in Tab. 5(a).
|
| 269 |
+
|
| 270 |
+
Cumulative distribution function (i.e., like a precision-recall curve) is shown in Fig. 6(a) to better illustrate the performance of both 3DRegNet and FGR. In this figure, part of the tests are shown where the rotation error is less than a given error angle. It can be seen that FGR performs better than 3DRegNet (until $2^{\circ}$ error). Afterward, 3DRegNet starts to provide better results. This implies that FGR does better for easier problems but for a larger number of cases it
|
| 271 |
+
|
| 272 |
+

|
| 273 |
+
Harvard MIT
|
| 274 |
+
|
| 275 |
+

|
| 276 |
+
3DRegNet
|
| 277 |
+
|
| 278 |
+

|
| 279 |
+
|
| 280 |
+

|
| 281 |
+
3DRegNet + ICP
|
| 282 |
+
|
| 283 |
+

|
| 284 |
+
|
| 285 |
+

|
| 286 |
+
FGR
|
| 287 |
+
|
| 288 |
+

|
| 289 |
+
|
| 290 |
+

|
| 291 |
+
FGR + ICP
|
| 292 |
+
|
| 293 |
+
has high error (also higher than that of 3DRegNet). In other words, FGR has a heavier tail, hence lower median error and higher mean error compared to 3DRegNet as evident from Tab. 5. As the complexity of the problem increases, 3DRegNet becomes a better algorithm. This is further illustrated when we compare their performance in combination with ICP. Here, we can see that the initial estimates provided by 3DRegNet (3DRegNet + ICP) outperform to those of FGR + ICP. It is particularly noteworthy that even though ICP is local, 3DRegNet + ICP converges to a better minimum than FGR + ICP. This means that a deep learning approach allows us to perform better when the pairwise correspondences are of lower quality, which makes the problem harder. In terms of computation time, we are at least $8\mathrm{x}$ faster than FGR, and $25\mathrm{x}$ faster than RANSAC. To do a fair comparison for all the methods, all computation timings are obtained using CPU.
|
| 294 |
+
|
| 295 |
+
When considering the use of ICP and Umeyama refinement techniques, in terms of accuracy, we see that both the 3DRegNet + ICP and the 3DRegNet + U beat any other methods. With results from 3DRegNet + ICP, we conclude that the solution to the transformation provided by our network leads ICP to a lower minimum than FGR + ICP. From 3DRegNet + U, we get that our classification selects better the inliers. In terms of computation time, we can draw the same conclusions as before.
|
| 296 |
+
|
| 297 |
+
# 7.3. Results in Unseen Sequences
|
| 298 |
+
|
| 299 |
+
For this test, we use the SUN3D dataset. We run the same tests as in the previous section. However, while in Sec. 7.2 we used all the pairs from the sequences and split them into training and testing, here, we run our tests in hold-out training sequences. The results are shown in Tab. 5(b) and Fig. 6(b). The conclusions are similar as in the previous
|
| 300 |
+
|
| 301 |
+

|
| 302 |
+
Figure 5: Two examples of 3D point-cloud alignment using the 3DRegNet, 3DRegNet + ICP, FGR, and FGR + ICP methods. A pair of 3D scans were chosen from three scenes in the SUN3D data-set: MIT and Harvard sequences. These sequences were not used in the training of the network.
|
| 303 |
+
(a) ICL-NUIM
|
| 304 |
+
Figure 6: Cumulative distribution function of the rotation errors of 3DRegNet vs. FGR.
|
| 305 |
+
|
| 306 |
+

|
| 307 |
+
(b) SUN3D
|
| 308 |
+
|
| 309 |
+
section. We observe that the results from 3DRegNet do not degrade significantly, which means that the network is able to generalize the classification and registration to unseen sequences. Some snapshots are shown in Fig. 5.
|
| 310 |
+
|
| 311 |
+
# 8. Discussion
|
| 312 |
+
|
| 313 |
+
We propose 3DRegNet, a deep neural network that can solve the scan registration problem by jointly solving the outlier rejection given 3D point correspondences and computing the pose for alignment of the scans. We show that our approach is extremely efficient. It performs as well as the current baselines, while still being significantly faster. We show additional tests and visualizations of 3D registrations in the Supplementary Materials.
|
| 314 |
+
|
| 315 |
+
# Acknowledgements
|
| 316 |
+
|
| 317 |
+
This work was supported by the Portuguese National Funding Agency for Science, Research and Technology project PTDC/EEI-SII/4698/2014, and the LARSyS - FCT Plurianual funding 2020-2023.
|
| 318 |
+
|
| 319 |
+
# References
|
| 320 |
+
|
| 321 |
+
[1] Martin Abadi, Ashish Agarwal, Paul Barham, Eugene Brevdo, Zhifeng Chen, Craig Citro, Greg S. Corrado, Andy Davis, Jeffrey Dean, Matthieu Devin, Sanjay Ghemawat, Ian Goodfellow, Andrew Harp, Geoffrey Irving, Michael Isard, Yangqing Jia, Rafal Jozefowicz, Lukasz Kaiser, Manjunath Kudlur, Josh Levenberg, Dandelion Mané, Rajat Monga, Sherry Moore, Derek Murray, Chris Olah, Mike Schuster, Jonathon Shlens, Benoit Steiner, Ilya Sutskever, Kunal Talwar, Paul Tucker, Vincent Vanhoucke, Vijay Vasudevan, Fernanda Viégas, Oriol Vinyals, Pete Warden, Martin Wattenberg, Martin Wicke, Yuan Yu, and Xiaoqiang Zheng. TensorFlow: Large-scale machine learning on heterogeneous systems, 2015. Software available from tensorflow.org.
|
| 322 |
+
[2] Yasuhiro Aoki, Hunter Goforth, Rangaprasad Arun Srivatsan, and Simon Lucey. Pointnetlk: Robust & efficient point cloud registration using pointnet. In IEEE Conf. Computer Vision and Pattern Recognition (CVPR), pages 7163-7172, 2019.
|
| 323 |
+
[3] K Somani Arun, Thomas S Huang, and Steven D Blostein. Least-squares fitting of two 3-d point sets. IEEE Trans. Pattern Analysis and Machine Intelligence (T-PAMI), 9(5):698-700, 1987.
|
| 324 |
+
[4] Yoshua Bengio, Jerome Lourador, Ronan Collobert, and Jason Weston. Curriculum learning. In Int'l Conf. Machine learning (ICML), pages 41-48, 2009.
|
| 325 |
+
[5] Florian Bernard, Frank R. Schmidt, Johan Thunberg, and Daniel Cremers. A combinatorial solution to non-rigid 3d shape-to-image matching. In IEEE Conf. Computer Vision and Pattern Recognition (CVPR), pages 1436-1445, 2017.
|
| 326 |
+
[6] Paul J. Besl and Neil D. McKay. A method for registration of 3-d shapes. IEEE Trans. Pattern Analysis and Machine Intelligence (T-PAMI), 14(2):239-256, 1992.
|
| 327 |
+
[7] Alvaro Parra Bustos and Tat-Jun Chin. Guaranteed outlier removal for point cloud registration with correspondences. IEEE Trans. Pattern Analysis and Machine Intelligence (T-PAMI), 40(12):2868-2882, 2018.
|
| 328 |
+
[8] Avishek Chatterjee and Venu Madhav Govindu. Robust relative rotation averaging. IEEE Trans. Pattern Analysis and Machine Intelligence (T-PAMI), 40(4):958-972, 2018.
|
| 329 |
+
[9] Sungjoon Choi, Qian-Yi Zhou, and Vladlen Koltun. Robust reconstruction of indoor scenes. In IEEE Conf. Computer Vision and Pattern Recognition (CVPR), pages 5556-5565, 2015.
|
| 330 |
+
[10] Taco S. Cohen, Mario Geiger, Jonas Koehler, and Max Welling. Spherical cnns. In Int'l Conf. Learning Representations (ICLR), 2018.
|
| 331 |
+
[11] Zheng Dang, Kwang Moo Yi, Yinlin Hu, Fei Wang, Pascal Fua, and Mathieu Salzmann. Eigendecomposition-free training of deep networks with zero eigenvalue-based losses. In European Conf. Computer Vision (ECCV), pages 792-807, 2018.
|
| 332 |
+
[12] Haowen Deng, Tolga Birdal, and Slobodan Ilic. Ppfnet: Global context aware local features for robust 3d point matching. In IEEE Conf. Computer Vision and Pattern Recognition (CVPR), pages 195-205, 2018.
|
| 333 |
+
|
| 334 |
+
[13] Haowen Deng, Tolga Birdal, and Slobodan Ilic. 3d local features for direct pairwise registration. In IEEE Conf. Computer Vision and Pattern Recognition (CVPR), pages 3239-3248, 2019.
|
| 335 |
+
[14] Li Ding and Chen Feng. Deepmapping: Unsupervised map estimation from multiple point clouds. In IEEE Conf. Computer Vision and Pattern Recognition (CVPR), pages 8650-8659, 2019.
|
| 336 |
+
[15] Gil Elbaz, Tamar Avraham, and Anath Fischer. 3d point cloud registration for localization using a deep neural network auto-encoder. In IEEE Conf. Computer Vision and Pattern Recognition (CVPR), pages 2472 - 2481, 2017.
|
| 337 |
+
[16] Carlos Esteves, Christine Allen-Blanchette, Ameesh Makadia, and Kostas Daniilidis. Learning so(3) equivariant representations with spherical cnns. In European Conf. Computer Vision (ECCV), pages 52–68, 2018.
|
| 338 |
+
[17] Martin A. Fischler and Robert C. Bolles. Random sample consensus: A paradigm for model fitting with applications to image analysis and automated cartography. Commun. ACM, 24(6):381-395, 1981.
|
| 339 |
+
[18] Stuart Geman and Donald E. McClure. Bayesian image analysis: An application to single photon emission tomography. In Proc. American Statistical Association, pages 12-18, 1985.
|
| 340 |
+
[19] Zan Gojcic, Caifa Zhou, Jan D. Wegner, and Andreas Wieser. The perfect match: 3d point cloud matching with smoothed densities. In IEEE Conf. Computer Vision and Pattern Recognition (CVPR), pages 5545-5554, 2019.
|
| 341 |
+
[20] Venu Madhav Govindu and A. Pooja. On averaging multiview relations for 3d scan registration. IEEE Trans. Image Processing (T-IP), 23(3):1289-1302, 2014.
|
| 342 |
+
[21] Lei Han, Mengqi Ji, Lu Fang, and Matthias Niessner. Regnet: Learning the optimization of direct image-to-image pose registration. arXiv:1812.10212, 2018.
|
| 343 |
+
[22] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In IEEE Conf. Computer Vision and Pattern Recognition (CVPR), pages 770-778, 2016.
|
| 344 |
+
[23] Joao F. Henriques and Andrea Vedaldi. Mapnet: An allocentric spatial memory for mapping environments. In IEEE Conf. Computer Vision and Pattern Recognition (CVPR), pages 8476-8484, 2018.
|
| 345 |
+
[24] Dirk Holz, Alexandru E. Ichim, Federico Tombari, Radu B. Rusu, and Sven Behnke. Registration with the point cloud library: A modular framework for aligning in 3-d. IEEE Robotics Automation Magazine (RA-M), 22(4):110-124, 2015.
|
| 346 |
+
[25] Ji Hou, Angela Dai, and Matthias Niessner. 3d-sis: 3d semantic instance segmentation of rgb-d scans. In IEEE Conf. Computer Vision and Pattern Recognition (CVPR), pages 4416-4425, 2019.
|
| 347 |
+
[26] Xiangru Huang, Zhenxiao Liang, Xiaowei Zhou, Yao Xie, Leonidas Guibas, and Qixing Huang. Learning transformation synchronization. In IEEE Conf. Computer Vision and Pattern Recognition (CVPR), pages 8082-8091, 2019.
|
| 348 |
+
[27] Alex Kendall, Matthew Grimes, and Roberto Cipolla. Posenet: A convolutional network for real-time 6-dof camera
|
| 349 |
+
|
| 350 |
+
relocalization. In IEEE Int'l Conf. Computer Vision (ICCV), pages 2938-2946, 2015.
|
| 351 |
+
[28] Diederik P. Kingma and Jimmy Lei Ba. Adam: A method for stochastic optimization. In Int'l Conf. Learning Representations (ICLR), 2015.
|
| 352 |
+
[29] Huu M. Le, Thanh-Toan Do, Tuan Hoang, and Ngai-Man Cheung. Sdrsac: Semidefinite-based randomized approach for robust point cloud registration without correspondences. In IEEE Conf. Computer Vision and Pattern Recognition (CVPR), pages 124-133, 2019.
|
| 353 |
+
[30] Chen-Yu Lee, Saining Xie, Patrick Gallagher, Zhengyou Zhang, and Zhuowen Tu. Deeply-supervised nets, 2014.
|
| 354 |
+
[31] Hongdong Li and Richard Hartley. The 3d-3d registration problem revisited. In IEEE Int'l Conf. Computer Vision (ICCV), pages 1-8, 2017.
|
| 355 |
+
[32] Weixin Lu, Guowei Wan, Yao Zhou, Xiangyu Fu, Pengfei Yuan, and Shiyu Song. Deepvcp: An end-to-end deep neural network for point cloud registration. In IEEE Int'l Conf. Computer Vision (ICCV), pages 3523-3532, 2019.
|
| 356 |
+
[33] Jiayi Ma, Xingyu Jiang, Junjun Jiang, Ji Zhao, and Xiao-jie Guo. Lmr: Learning a two-class classifier for mismatch removal. IEEE Trans. Image Processing (T-IP), 28(8):4045–4059, 2019.
|
| 357 |
+
[34] Lingni Ma, Jorg Stuckler, Christian Kerl, and Daniel Cremers. Multi-view deep learning for consistent semantic mapping with rgb-d cameras. In IEEE/RSJ Int'l Conf. Intelligent Robots and Systems (IROS), pages 598-605, 2017.
|
| 358 |
+
[35] Yi Ma, Stefano Soatto, Jana Kosecka, and S. Shankar Sastry. An Invitation to 3-D Vision. Springer-Verlag New York, 2004.
|
| 359 |
+
[36] Andre Mateus, Srikumar Ramalingam, and Pedro Miraldo. Minimal solvers for 3d scan alignment with pairs of intersecting lines. In IEEE Conf. Computer Vision and Pattern Recognition (CVPR), 2020.
|
| 360 |
+
[37] Tambet Matiisen, Avital Oliver, Taco Cohen, and John Schulman. Teacher-student curriculum learning. IEEE Trans. Neural Networks and Learning Systems (T-NNLS), 2019.
|
| 361 |
+
[38] Nicolas Mellado, Niloy Mitra, and Dror Aiger. Super 4pcs: Fast global pointcloud registration via smart indexing. Computer Graphics Forum (Proc. EUROGRAPHICS), 33(5):205-215, 2014.
|
| 362 |
+
[39] Pedro Miraldo, Surojit Saha, and Srikumar Ramalingam. Minimal solvers for mini-loop closures in 3d multi-scan alignment. In IEEE Conf. Computer Vision and Pattern Recognition (CVPR), pages 9699-9708, 2019.
|
| 363 |
+
[40] Andriy Myronenko and Xubo Song. Point set registration: Coherent point drift. IEEE Trans. Pattern Analysis and Machine Intelligence (T-PAMI), 32(12):2262-2275, 2010.
|
| 364 |
+
[41] Richard A. Newcombe, Shahram Izadi, Otmar Hilliges, David Molyneaux, David Kim, Andrew J. Davison, Pushmeet Kohli, Jamie Shotton, Steve Hodges, and Andrew Fitzgibbon. Kinectfusion: Real-time dense surface mapping and tracking. In IEEE Int'l Symposium on Mixed and Augmented Reality (ISMAR), pages 127-136, 2011.
|
| 365 |
+
[42] Ilkay Oksuz, Bram Ruijsink, Esther Puyol-Antn, James R. Clough, Gastao Cruz, Aurelien Bustin, Claudia Prieto, Rene
|
| 366 |
+
|
| 367 |
+
Botnar, Daniel Rueckert, Julia A. Schnabel, and Andrew P. King. Automatic cnn-based detection of cardiac mr motion artefacts using k-space data augmentation and curriculum learning. Medical Image Analysis, 55:136-147, 2019.
|
| 368 |
+
[43] Jaesik Park, Qian-Yi Zhou, and Vladlen Koltun. Colored point cloud registration revisited. In IEEE Int'l Conf. Computer Vision (ICCV), pages 143-152, 2017.
|
| 369 |
+
[44] Graeme P. Penney, Philip J. Edwards, Andrew P. King, Jane M. Blackall, Philipp G. Batchelor, and David J. Hawkes. A stochastic iterative closest point algorithm (stochastic). In Medical Image Computing and Computer-Assisted Intervention (MICCAI), pages 762-769, 2001.
|
| 370 |
+
[45] Stephen Phillips and Kostas Daniilidis. All graphs lead to rome: Learning geometric and cycle-consistent representations with graph convolutional networks. arXiv:1901.02078, 2019.
|
| 371 |
+
[46] Charles R Qi, Hao Su, Kaichun Mo, and Leonidas J Guibas. Pointnet: Deep learning on point sets for 3d classification and segmentation. In IEEE Conf. Computer Vision and Pattern Recognition (CVPR), pages 652-660, 2017.
|
| 372 |
+
[47] Radu Bogdan Rusu, Nico Blodow, and Michael Beetz. Fast point feature histograms (fpfh) for 3d registration. In IEEE Int'l Conf. Robotics and Automation (ICRA), pages 3212-3217, 2009.
|
| 373 |
+
[48] Peter H. Schonemann. A generalized solution of the orthogonal procrustes problem. Psychometrika, 31(1):1-10, 1966.
|
| 374 |
+
[49] Aleksandr V. Segal, Dirk Haehnel, and Sebastian Thrun. Generalized-icp. In Robotics: Science and Systems (RSS), 2009.
|
| 375 |
+
[50] Shaoshuai Shi, Xiaogang Wang, and Hongsheng Li. Poincarc: 3d object proposal generation and detection from point cloud. In IEEE Conf. Computer Vision and Pattern Recognition (CVPR), pages 770-779, 2019.
|
| 376 |
+
[51] Miroslava Slavcheva, Maximilian Baust, Daniel Cremers, and Slobodan Ilic. Killingfusion: Non-rigid 3d reconstruction without correspondences. In IEEE Conf. Computer Vision and Pattern Recognition (CVPR), pages 5474-5483, 2017.
|
| 377 |
+
[52] Gary K.L. Tam, Zhi-Quan Cheng, Yu-Kun Lai, Frank C. Langbein, Yonghuai Liu, David Marshall, Ralp R. Martin, Xian-Fang Sun, and Paul L. Rosin. Registration of 3d point clouds and meshes: A survey from rigid to nonrigid. IEEE Trans. Visualization and Computer Graphics (T-VCG), 19(7):1199–1217, 2013.
|
| 378 |
+
[53] Shinji Umeyama. Least-squares estimation of transformation parameters between two point patterns. IEEE Trans. Pattern Analysis and Machine Intelligence (T-PAMI), 13(4):376-380, 1991.
|
| 379 |
+
[54] Yue Wang and Justin Solomon. Deep closest point: Learning representations for point cloud registration. In IEEE Int'l Conf. Computer Vision (ICCV), pages 3522-3531, 2019.
|
| 380 |
+
[55] Xinshuo Weng and Kris Kitani. Monocular 3D Object Detection with Pseudo-LiDAR Point Cloud. In ICCV Workshops, 2019.
|
| 381 |
+
[56] Jay M. Wong, Vincent Kee, Tiffany Le, Syler Wagner, Gian-Luca Mariottini, Abraham Schneider, Lei Hamilton,
|
| 382 |
+
|
| 383 |
+
Rahul Chipalkatty, Mitchell Hebert, David M.S. Johnson, Jimmy Wu, Bolei Zhou, and Antonio Torralba. Segicp: Integrated deep semantic segmentation and pose estimation. In IEEE/RSJ Int'l Conf. Intelligent Robots and Systems (IROS), pages 5784-5789, 2017.
|
| 384 |
+
[57] Jiaolong Yang, Hongdong Li, Dylan Campbell, and Yunde Jia. Go-icp: Solving 3d registration efficiently and globally optimally. IEEE Trans. Pattern Analysis and Machine Intelligence (T-PAMI), 38(11):2241–2254, 2016.
|
| 385 |
+
[58] Jiaolong Yang, Hongdong Li, and Yunde Jia. Go-icp: Solving 3d registration efficiently and globally optimally. In IEEE Int'l Conf. Computer Vision (ICCV), pages 1457–1464, 2013.
|
| 386 |
+
[59] Zi Jian Yew and Gim Hee Lee. 3dfeat-net: Weakly supervised local 3d features for point cloud registration. In European Conf. Computer Vision (ECCV), pages 630-646, 2018.
|
| 387 |
+
[60] Kwang Moo Yi, Eduard Trulls, Yuki Ono, Vincent Lepetit, Mathieu Salzmann, and Pascal Fua. Learning to find good correspondences. In IEEE Conf. Computer Vision and Pattern Recognition (CVPR), pages 2666-2674, 2018.
|
| 388 |
+
[61] Andy Zeng, Shuran Song, Matthias Niessner, Matthew Fisher, Jianxiong Xiao, and Thomas Funkhouser. 3dmatch: Learning local geometric descriptors from rgb-d reconstructions. In IEEE Conf. Computer Vision and Pattern Recognition (CVPR), pages 199-208, 2017.
|
| 389 |
+
[62] Chen Zhao, Zhiguo Cao, Chi Li, Xin Li, and Jiaqi Yang. Nm-net: Mining reliable neighbors for robust feature correspondences. In IEEE Conf. Computer Vision and Pattern Recognition (CVPR), pages 215-224, 2019.
|
| 390 |
+
[63] Bolei Zhou, Agata Lapedriza, Jianxiong Xiao, Antonio Torralba, and Aude Oliva. Learning deep features for scene recognition using places database. In Advances in Neural Information Processing Systems (NIPS), pages 487-495, 2014.
|
| 391 |
+
[64] Lei Zhou, Siyu Zhu, Zixin Luo, Tianwei Shen, Runze Zhang, Mingmin Zhen, Tian Fang, and Long Quan. Learning and matching multi-view descriptors for registration of point clouds. In European Conf. Computer Vision (ECCV), pages 527-544, 2018.
|
| 392 |
+
[65] Qian-Yi Zhou, Jaesik Park, and Vladlen Koltun. Fast global registration. In European Conf. Computer Vision (ECCV), pages 766-782, 2016.
|
| 393 |
+
[66] Yi Zhou, Connelly Barnes, Jingwan Lu, Jimei Yang, and Hao Li. On the continuity of rotation representations in neural networks. In IEEE Conf. Computer Vision and Pattern Recognition (CVPR), pages 5745-5753, 2019.
|
| 394 |
+
[67] Michael Zollhofer, Matthias Niessner, Shahram Izadi, Christoph Rehmann, Christopher Zach, Matthew Fisher, Chenglei Wu, Andrew Fitzgibbon, Charles Loop, Christian Theobalt, and Marc Stamminger. Real-time non-rigid reconstruction using an rgb-d camera. ACM Trans. Graphics, 33(4), 2014.
|
3dregnetadeepneuralnetworkfor3dpointregistration/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7bf67bea87b7a0f734ab9d7d8a32bed2fc8afc7424266953624a15b5833afd1f
|
| 3 |
+
size 549388
|
3dregnetadeepneuralnetworkfor3dpointregistration/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:94b8d3584b6d6a66f09b096e6a16570a0403de0e3776d0f79a5ca58d636b7ac4
|
| 3 |
+
size 483286
|
3dsketchawaresemanticscenecompletionviasemisupervisedstructureprior/21e2be0e-0fb4-4da1-b0aa-370b4456f88a_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:bea2afa069c87b29349ccd0c1ddacd752d6503c24a78b0ca5057c28da6d21e48
|
| 3 |
+
size 90968
|
3dsketchawaresemanticscenecompletionviasemisupervisedstructureprior/21e2be0e-0fb4-4da1-b0aa-370b4456f88a_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e73037fd3a7347ce99aba9e021d55157410d671b20210610da0d3b353e751fd5
|
| 3 |
+
size 108451
|